Create logger package and rename errorIf to LogIf (#5678)

Removing message from error logging
Replace errors.Trace with LogIf
This commit is contained in:
kannappanr 2018-04-05 15:04:40 -07:00 committed by GitHub
parent 91fd8ffeb7
commit f8a3fd0c2a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
119 changed files with 2608 additions and 1860 deletions

View File

@ -18,7 +18,9 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
@ -27,6 +29,7 @@ import (
"time" "time"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/handlers" "github.com/minio/minio/pkg/handlers"
"github.com/minio/minio/pkg/madmin" "github.com/minio/minio/pkg/madmin"
@ -71,7 +74,7 @@ func (a adminAPIHandlers) VersionHandler(w http.ResponseWriter, r *http.Request)
jsonBytes, err := json.Marshal(adminAPIVersionInfo) jsonBytes, err := json.Marshal(adminAPIVersionInfo)
if err != nil { if err != nil {
writeErrorResponseJSON(w, ErrInternalError, r.URL) writeErrorResponseJSON(w, ErrInternalError, r.URL)
errorIf(err, "Failed to marshal Admin API Version to JSON.") logger.LogIf(context.Background(), err)
return return
} }
@ -99,7 +102,7 @@ func (a adminAPIHandlers) ServiceStatusHandler(w http.ResponseWriter, r *http.Re
uptime, err := getPeerUptimes(globalAdminPeers) uptime, err := getPeerUptimes(globalAdminPeers)
if err != nil { if err != nil {
writeErrorResponseJSON(w, toAPIErrorCode(err), r.URL) writeErrorResponseJSON(w, toAPIErrorCode(err), r.URL)
errorIf(err, "Possibly failed to get uptime from majority of servers.") logger.LogIf(context.Background(), err)
return return
} }
@ -113,7 +116,7 @@ func (a adminAPIHandlers) ServiceStatusHandler(w http.ResponseWriter, r *http.Re
jsonBytes, err := json.Marshal(serverStatus) jsonBytes, err := json.Marshal(serverStatus)
if err != nil { if err != nil {
writeErrorResponseJSON(w, ErrInternalError, r.URL) writeErrorResponseJSON(w, ErrInternalError, r.URL)
errorIf(err, "Failed to marshal storage info into json.") logger.LogIf(context.Background(), err)
return return
} }
// Reply with storage information (across nodes in a // Reply with storage information (across nodes in a
@ -136,7 +139,7 @@ func (a adminAPIHandlers) ServiceStopNRestartHandler(w http.ResponseWriter, r *h
var sa madmin.ServiceAction var sa madmin.ServiceAction
err := json.NewDecoder(r.Body).Decode(&sa) err := json.NewDecoder(r.Body).Decode(&sa)
if err != nil { if err != nil {
errorIf(err, "Error parsing body JSON") logger.LogIf(context.Background(), err)
writeErrorResponseJSON(w, ErrRequestBodyParse, r.URL) writeErrorResponseJSON(w, ErrRequestBodyParse, r.URL)
return return
} }
@ -149,7 +152,7 @@ func (a adminAPIHandlers) ServiceStopNRestartHandler(w http.ResponseWriter, r *h
serviceSig = serviceStop serviceSig = serviceStop
default: default:
writeErrorResponseJSON(w, ErrMalformedPOSTRequest, r.URL) writeErrorResponseJSON(w, ErrMalformedPOSTRequest, r.URL)
errorIf(err, "Invalid service action received") logger.LogIf(context.Background(), errors.New("Invalid service action received"))
return return
} }
@ -243,7 +246,9 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
serverInfoData, err := peer.cmdRunner.ServerInfoData() serverInfoData, err := peer.cmdRunner.ServerInfoData()
if err != nil { if err != nil {
errorIf(err, "Unable to get server info from %s.", peer.addr) reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", peer.addr)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
reply[idx].Error = err.Error() reply[idx].Error = err.Error()
return return
} }
@ -258,7 +263,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
jsonBytes, err := json.Marshal(reply) jsonBytes, err := json.Marshal(reply)
if err != nil { if err != nil {
writeErrorResponseJSON(w, ErrInternalError, r.URL) writeErrorResponseJSON(w, ErrInternalError, r.URL)
errorIf(err, "Failed to marshal storage info into json.") logger.LogIf(context.Background(), err)
return return
} }
@ -292,7 +297,7 @@ func validateLockQueryParams(vars url.Values) (string, string, time.Duration,
} }
duration, err := time.ParseDuration(olderThanStr) duration, err := time.ParseDuration(olderThanStr)
if err != nil { if err != nil {
errorIf(err, "Failed to parse duration passed as query value.") logger.LogIf(context.Background(), err)
return "", "", time.Duration(0), ErrInvalidDuration return "", "", time.Duration(0), ErrInvalidDuration
} }
@ -325,7 +330,7 @@ func (a adminAPIHandlers) ListLocksHandler(w http.ResponseWriter, r *http.Reques
duration) duration)
if err != nil { if err != nil {
writeErrorResponseJSON(w, ErrInternalError, r.URL) writeErrorResponseJSON(w, ErrInternalError, r.URL)
errorIf(err, "Failed to fetch lock information from remote nodes.") logger.LogIf(context.Background(), err)
return return
} }
@ -333,7 +338,7 @@ func (a adminAPIHandlers) ListLocksHandler(w http.ResponseWriter, r *http.Reques
jsonBytes, err := json.Marshal(volLocks) jsonBytes, err := json.Marshal(volLocks)
if err != nil { if err != nil {
writeErrorResponseJSON(w, ErrInternalError, r.URL) writeErrorResponseJSON(w, ErrInternalError, r.URL)
errorIf(err, "Failed to marshal lock information into json.") logger.LogIf(context.Background(), err)
return return
} }
@ -369,7 +374,7 @@ func (a adminAPIHandlers) ClearLocksHandler(w http.ResponseWriter, r *http.Reque
duration) duration)
if err != nil { if err != nil {
writeErrorResponseJSON(w, ErrInternalError, r.URL) writeErrorResponseJSON(w, ErrInternalError, r.URL)
errorIf(err, "Failed to fetch lock information from remote nodes.") logger.LogIf(ctx, err)
return return
} }
@ -377,7 +382,7 @@ func (a adminAPIHandlers) ClearLocksHandler(w http.ResponseWriter, r *http.Reque
jsonBytes, err := json.Marshal(volLocks) jsonBytes, err := json.Marshal(volLocks)
if err != nil { if err != nil {
writeErrorResponseJSON(w, ErrInternalError, r.URL) writeErrorResponseJSON(w, ErrInternalError, r.URL)
errorIf(err, "Failed to marshal lock information into json.") logger.LogIf(ctx, err)
return return
} }
@ -425,7 +430,7 @@ func extractHealInitParams(r *http.Request) (bucket, objPrefix string,
if clientToken == "" { if clientToken == "" {
jerr := json.NewDecoder(r.Body).Decode(&hs) jerr := json.NewDecoder(r.Body).Decode(&hs)
if jerr != nil { if jerr != nil {
errorIf(jerr, "Error parsing body JSON") logger.LogIf(context.Background(), jerr)
err = ErrRequestBodyParse err = ErrRequestBodyParse
return return
} }
@ -583,7 +588,7 @@ func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Reques
// occurring on a quorum of the servers is returned. // occurring on a quorum of the servers is returned.
configBytes, err := getPeerConfig(globalAdminPeers) configBytes, err := getPeerConfig(globalAdminPeers)
if err != nil { if err != nil {
errorIf(err, "Failed to get config from peers") logger.LogIf(context.Background(), err)
writeErrorResponseJSON(w, toAdminAPIErrCode(err), r.URL) writeErrorResponseJSON(w, toAdminAPIErrCode(err), r.URL)
return return
} }
@ -655,6 +660,7 @@ func writeSetConfigResponse(w http.ResponseWriter, peers adminPeers,
// SetConfigHandler - PUT /minio/admin/v1/config // SetConfigHandler - PUT /minio/admin/v1/config
func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) { func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
// Get current object layer instance. // Get current object layer instance.
objectAPI := newObjectLayerFn() objectAPI := newObjectLayerFn()
if objectAPI == nil { if objectAPI == nil {
@ -678,7 +684,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
return return
} }
if err != io.ErrUnexpectedEOF { if err != io.ErrUnexpectedEOF {
errorIf(err, "Failed to read config from request body.") logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return return
} }
@ -688,7 +694,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
// Validate JSON provided in the request body: check the // Validate JSON provided in the request body: check the
// client has not sent JSON objects with duplicate keys. // client has not sent JSON objects with duplicate keys.
if err = checkDupJSONKeys(string(configBytes)); err != nil { if err = checkDupJSONKeys(string(configBytes)); err != nil {
errorIf(err, "config contains duplicate JSON entries.") logger.LogIf(ctx, err)
writeErrorResponse(w, ErrAdminConfigBadJSON, r.URL) writeErrorResponse(w, ErrAdminConfigBadJSON, r.URL)
return return
} }
@ -696,7 +702,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
var config serverConfig var config serverConfig
err = json.Unmarshal(configBytes, &config) err = json.Unmarshal(configBytes, &config)
if err != nil { if err != nil {
errorIf(err, "Failed to unmarshal JSON configuration", err) logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return return
} }
@ -718,7 +724,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
errs := writeTmpConfigPeers(globalAdminPeers, tmpFileName, configBytes) errs := writeTmpConfigPeers(globalAdminPeers, tmpFileName, configBytes)
// Check if the operation succeeded in quorum or more nodes. // Check if the operation succeeded in quorum or more nodes.
rErr := reduceWriteQuorumErrs(errs, nil, len(globalAdminPeers)/2+1) rErr := reduceWriteQuorumErrs(ctx, errs, nil, len(globalAdminPeers)/2+1)
if rErr != nil { if rErr != nil {
writeSetConfigResponse(w, globalAdminPeers, errs, false, r.URL) writeSetConfigResponse(w, globalAdminPeers, errs, false, r.URL)
return return
@ -736,7 +742,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
// Rename the temporary config file to config.json // Rename the temporary config file to config.json
errs = commitConfigPeers(globalAdminPeers, tmpFileName) errs = commitConfigPeers(globalAdminPeers, tmpFileName)
rErr = reduceWriteQuorumErrs(errs, nil, len(globalAdminPeers)/2+1) rErr = reduceWriteQuorumErrs(ctx, errs, nil, len(globalAdminPeers)/2+1)
if rErr != nil { if rErr != nil {
writeSetConfigResponse(w, globalAdminPeers, errs, false, r.URL) writeSetConfigResponse(w, globalAdminPeers, errs, false, r.URL)
return return
@ -777,7 +783,7 @@ func (a adminAPIHandlers) UpdateCredentialsHandler(w http.ResponseWriter,
var req madmin.SetCredsReq var req madmin.SetCredsReq
err := json.NewDecoder(r.Body).Decode(&req) err := json.NewDecoder(r.Body).Decode(&req)
if err != nil { if err != nil {
errorIf(err, "Error parsing body JSON") logger.LogIf(context.Background(), err)
writeErrorResponseJSON(w, ErrRequestBodyParse, r.URL) writeErrorResponseJSON(w, ErrRequestBodyParse, r.URL)
return return
} }
@ -804,7 +810,9 @@ func (a adminAPIHandlers) UpdateCredentialsHandler(w http.ResponseWriter,
// Notify all other Minio peers to update credentials // Notify all other Minio peers to update credentials
updateErrs := updateCredsOnPeers(creds) updateErrs := updateCredsOnPeers(creds)
for peer, err := range updateErrs { for peer, err := range updateErrs {
errorIf(err, "Unable to update credentials on peer %s.", peer) reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", peer)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
} }
// Update local credentials in memory. // Update local credentials in memory.

View File

@ -33,7 +33,6 @@ import (
router "github.com/gorilla/mux" router "github.com/gorilla/mux"
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/madmin" "github.com/minio/minio/pkg/madmin"
) )
@ -264,7 +263,7 @@ func initTestXLObjLayer() (ObjectLayer, []string, error) {
return nil, nil, err return nil, nil, err
} }
endpoints := mustGetNewEndpointList(xlDirs...) endpoints := mustGetNewEndpointList(xlDirs...)
format, err := waitForFormatXL(true, endpoints, 1, 16) format, err := waitForFormatXL(context.Background(), true, endpoints, 1, 16)
if err != nil { if err != nil {
removeRoots(xlDirs) removeRoots(xlDirs)
return nil, nil, err return nil, nil, err
@ -762,13 +761,13 @@ func buildAdminRequest(queryVal url.Values, method, path string,
"/minio/admin/v1"+path+"?"+queryVal.Encode(), "/minio/admin/v1"+path+"?"+queryVal.Encode(),
contentLength, bodySeeker) contentLength, bodySeeker)
if err != nil { if err != nil {
return nil, errors.Trace(err) return nil, err
} }
cred := globalServerConfig.GetCredential() cred := globalServerConfig.GetCredential()
err = signRequestV4(req, cred.AccessKey, cred.SecretKey) err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil { if err != nil {
return nil, errors.Trace(err) return nil, err
} }
return req, nil return req, nil

View File

@ -222,7 +222,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
StartTime: h.startTime, StartTime: h.startTime,
}) })
if err != nil { if err != nil {
errorIf(err, "Failed to marshal heal result into json.") logger.LogIf(context.Background(), err)
return nil, ErrInternalError, "" return nil, ErrInternalError, ""
} }
return b, ErrNone, "" return b, ErrNone, ""
@ -270,7 +270,7 @@ func (ahs *allHealState) PopHealStatusJSON(path string,
jbytes, err := json.Marshal(h.currentStatus) jbytes, err := json.Marshal(h.currentStatus)
if err != nil { if err != nil {
errorIf(err, "Failed to marshal heal result into json.") logger.LogIf(context.Background(), err)
return nil, ErrInternalError return nil, ErrInternalError
} }
@ -321,7 +321,9 @@ type healSequence struct {
func newHealSequence(bucket, objPrefix, clientAddr string, func newHealSequence(bucket, objPrefix, clientAddr string,
numDisks int, hs madmin.HealOpts, forceStart bool) *healSequence { numDisks int, hs madmin.HealOpts, forceStart bool) *healSequence {
ctx := logger.SetContext(context.Background(), &logger.ReqInfo{clientAddr, "", "", "Heal", bucket, objPrefix, nil}) reqInfo := &logger.ReqInfo{RemoteHost: clientAddr, API: "Heal", BucketName: bucket}
reqInfo.AppendTags("prefix", objPrefix)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
return &healSequence{ return &healSequence{
bucket: bucket, bucket: bucket,

View File

@ -29,7 +29,7 @@ import (
"time" "time"
"github.com/minio/minio-go/pkg/set" "github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/cmd/logger"
) )
const ( const (
@ -201,7 +201,7 @@ func (rc remoteAdminClient) WriteTmpConfig(tmpFileName string, configBytes []byt
err := rc.Call(writeTmpConfigRPC, &wArgs, &WriteConfigReply{}) err := rc.Call(writeTmpConfigRPC, &wArgs, &WriteConfigReply{})
if err != nil { if err != nil {
errorIf(err, "Failed to write temporary config file.") logger.LogIf(context.Background(), err)
return err return err
} }
@ -215,7 +215,10 @@ func (lc localAdminClient) CommitConfig(tmpFileName string) error {
tmpConfigFile := filepath.Join(getConfigDir(), tmpFileName) tmpConfigFile := filepath.Join(getConfigDir(), tmpFileName)
err := os.Rename(tmpConfigFile, configFile) err := os.Rename(tmpConfigFile, configFile)
errorIf(err, fmt.Sprintf("Failed to rename %s to %s", tmpConfigFile, configFile)) reqInfo := (&logger.ReqInfo{}).AppendTags("tmpConfigFile", tmpConfigFile)
reqInfo.AppendTags("configFile", configFile)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return err return err
} }
@ -228,7 +231,7 @@ func (rc remoteAdminClient) CommitConfig(tmpFileName string) error {
cReply := CommitConfigReply{} cReply := CommitConfigReply{}
err := rc.Call(commitConfigRPC, &cArgs, &cReply) err := rc.Call(commitConfigRPC, &cArgs, &cReply)
if err != nil { if err != nil {
errorIf(err, "Failed to rename config file.") logger.LogIf(context.Background(), err)
return err return err
} }
@ -436,7 +439,7 @@ func getPeerUptimes(peers adminPeers) (time.Duration, error) {
latestUptime := time.Duration(0) latestUptime := time.Duration(0)
for _, uptime := range uptimes { for _, uptime := range uptimes {
if uptime.err != nil { if uptime.err != nil {
errorIf(uptime.err, "Unable to fetch uptime") logger.LogIf(context.Background(), uptime.err)
continue continue
} }
@ -489,15 +492,17 @@ func getPeerConfig(peers adminPeers) ([]byte, error) {
// Unmarshal the received config files. // Unmarshal the received config files.
err := json.Unmarshal(configBytes, &serverConfigs[i]) err := json.Unmarshal(configBytes, &serverConfigs[i])
if err != nil { if err != nil {
errorIf(err, "Failed to unmarshal serverConfig from ", peers[i].addr) reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", peers[i].addr)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return nil, err return nil, err
} }
} }
configJSON, err := getValidServerConfig(serverConfigs, errs) configJSON, err := getValidServerConfig(serverConfigs, errs)
if err != nil { if err != nil {
errorIf(err, "Unable to find a valid server config") logger.LogIf(context.Background(), err)
return nil, errors.Trace(err) return nil, err
} }
// Return the config.json that was present quorum or more // Return the config.json that was present quorum or more

View File

@ -26,7 +26,7 @@ import (
"time" "time"
router "github.com/gorilla/mux" router "github.com/gorilla/mux"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/cmd/logger"
) )
const adminPath = "/admin" const adminPath = "/admin"
@ -176,7 +176,9 @@ type WriteConfigReply struct {
func writeTmpConfigCommon(tmpFileName string, configBytes []byte) error { func writeTmpConfigCommon(tmpFileName string, configBytes []byte) error {
tmpConfigFile := filepath.Join(getConfigDir(), tmpFileName) tmpConfigFile := filepath.Join(getConfigDir(), tmpFileName)
err := ioutil.WriteFile(tmpConfigFile, configBytes, 0666) err := ioutil.WriteFile(tmpConfigFile, configBytes, 0666)
errorIf(err, fmt.Sprintf("Failed to write to temporary config file %s", tmpConfigFile)) reqInfo := (&logger.ReqInfo{}).AppendTags("tmpConfigFile", tmpConfigFile)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return err return err
} }
@ -209,7 +211,10 @@ func (s *adminCmd) CommitConfig(cArgs *CommitConfigArgs, cReply *CommitConfigRep
tmpConfigFile := filepath.Join(getConfigDir(), cArgs.FileName) tmpConfigFile := filepath.Join(getConfigDir(), cArgs.FileName)
err := os.Rename(tmpConfigFile, configFile) err := os.Rename(tmpConfigFile, configFile)
errorIf(err, fmt.Sprintf("Failed to rename %s to %s", tmpConfigFile, configFile)) reqInfo := (&logger.ReqInfo{}).AppendTags("tmpConfigFile", tmpConfigFile)
reqInfo.AppendTags("configFile", configFile)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return err return err
} }
@ -220,7 +225,8 @@ func registerAdminRPCRouter(mux *router.Router) error {
adminRPCServer := newRPCServer() adminRPCServer := newRPCServer()
err := adminRPCServer.RegisterName("Admin", adminRPCHandler) err := adminRPCServer.RegisterName("Admin", adminRPCHandler)
if err != nil { if err != nil {
return errors.Trace(err) logger.LogIf(context.Background(), err)
return err
} }
adminRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter() adminRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter()
adminRouter.Path(adminPath).Handler(adminRPCServer) adminRouter.Path(adminPath).Handler(adminRPCServer)

View File

@ -793,7 +793,7 @@ var errorCodeResponse = map[APIErrorCode]APIError{
HTTPStatusCode: http.StatusBadRequest, HTTPStatusCode: http.StatusBadRequest,
}, },
// Generic Invalid-Request error. Should be used for response errors only for unlikely // Generic Invalid-Request error. Should be used for response errors only for unlikely
// corner case errors for which introducing new APIErrorCode is not worth it. errorIf() // corner case errors for which introducing new APIErrorCode is not worth it. LogIf()
// should be used to log the error at the source of the error for debugging purposes. // should be used to log the error at the source of the error for debugging purposes.
ErrInvalidRequest: { ErrInvalidRequest: {
Code: "InvalidRequest", Code: "InvalidRequest",

View File

@ -20,6 +20,7 @@ import (
"net/http" "net/http"
router "github.com/gorilla/mux" router "github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
) )
// objectAPIHandler implements and provides http handlers for S3 API. // objectAPIHandler implements and provides http handlers for S3 API.
@ -35,7 +36,7 @@ func registerAPIRouter(mux *router.Router) {
if len(cacheConfig.Drives) > 0 { if len(cacheConfig.Drives) > 0 {
// initialize the new disk cache objects. // initialize the new disk cache objects.
globalCacheObjectAPI, err = newServerCacheObjects(cacheConfig) globalCacheObjectAPI, err = newServerCacheObjects(cacheConfig)
fatalIf(err, "Unable to initialize disk caching") logger.FatalIf(err, "Unable to initialize disk caching")
} }
// Initialize API. // Initialize API.

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"encoding/base64" "encoding/base64"
"encoding/hex" "encoding/hex"
"errors" "errors"
@ -25,6 +26,7 @@ import (
"net/http" "net/http"
"strings" "strings"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/handlers" "github.com/minio/minio/pkg/handlers"
) )
@ -114,12 +116,14 @@ func checkAdminRequestAuthType(r *http.Request, region string) APIErrorCode {
s3Err = isReqAuthenticated(r, region) s3Err = isReqAuthenticated(r, region)
} }
if s3Err != ErrNone { if s3Err != ErrNone {
errorIf(errors.New(getAPIError(s3Err).Description), "%s", dumpRequest(r)) reqInfo := (&logger.ReqInfo{}).AppendTags("requestHeaders", dumpRequest(r))
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, errors.New(getAPIError(s3Err).Description))
} }
return s3Err return s3Err
} }
func checkRequestAuthType(r *http.Request, bucket, policyAction, region string) APIErrorCode { func checkRequestAuthType(ctx context.Context, r *http.Request, bucket, policyAction, region string) APIErrorCode {
reqAuthType := getRequestAuthType(r) reqAuthType := getRequestAuthType(r)
switch reqAuthType { switch reqAuthType {
@ -136,7 +140,7 @@ func checkRequestAuthType(r *http.Request, bucket, policyAction, region string)
if err != nil { if err != nil {
return ErrInternalError return ErrInternalError
} }
return enforceBucketPolicy(bucket, policyAction, resource, return enforceBucketPolicy(ctx, bucket, policyAction, resource,
r.Referer(), handlers.GetSourceIP(r), r.URL.Query()) r.Referer(), handlers.GetSourceIP(r), r.URL.Query())
} }
@ -176,7 +180,7 @@ func isReqAuthenticated(r *http.Request, region string) (s3Error APIErrorCode) {
payload, err := ioutil.ReadAll(r.Body) payload, err := ioutil.ReadAll(r.Body)
if err != nil { if err != nil {
errorIf(err, "Unable to read request body for signature verification") logger.LogIf(context.Background(), err)
return ErrInternalError return ErrInternalError
} }

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bufio" "bufio"
"context"
"crypto/tls" "crypto/tls"
"crypto/x509" "crypto/x509"
"errors" "errors"
@ -29,6 +30,8 @@ import (
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/minio/minio/cmd/logger"
) )
// Attempt to retry only this many number of times before // Attempt to retry only this many number of times before
@ -264,7 +267,9 @@ func rpcDial(serverAddr, serviceEndpoint string, secureConn bool) (netRPCClient
// Print RPC connection errors that are worthy to display in log. // Print RPC connection errors that are worthy to display in log.
switch err.(type) { switch err.(type) {
case x509.HostnameError: case x509.HostnameError:
errorIf(err, "Unable to establish secure connection to %s", serverAddr) reqInfo := (&logger.ReqInfo{}).AppendTags("serverAddr", serverAddr)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
} }
return nil, &net.OpError{ return nil, &net.OpError{

View File

@ -17,11 +17,13 @@
package cmd package cmd
import ( import (
"context"
"fmt" "fmt"
"path" "path"
"sync" "sync"
"time" "time"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
) )
@ -62,7 +64,7 @@ func (br *browserPeerAPIHandlers) SetAuthPeer(args SetAuthPeerArgs, reply *AuthR
// Save the current creds when failed to update. // Save the current creds when failed to update.
globalServerConfig.SetCredential(prevCred) globalServerConfig.SetCredential(prevCred)
errorIf(err, "Unable to update the config with new credentials sent from browser RPC.") logger.LogIf(context.Background(), err)
return err return err
} }

View File

@ -17,9 +17,10 @@
package cmd package cmd
import ( import (
router "github.com/gorilla/mux" "context"
"github.com/minio/minio/pkg/errors" router "github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
) )
// Set up an RPC endpoint that receives browser related calls. The // Set up an RPC endpoint that receives browser related calls. The
@ -42,7 +43,8 @@ func registerBrowserPeerRPCRouter(mux *router.Router) error {
bpRPCServer := newRPCServer() bpRPCServer := newRPCServer()
err := bpRPCServer.RegisterName("BrowserPeer", bpHandlers) err := bpRPCServer.RegisterName("BrowserPeer", bpHandlers)
if err != nil { if err != nil {
return errors.Trace(err) logger.LogIf(context.Background(), err)
return err
} }
bpRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter() bpRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter()

View File

@ -64,7 +64,7 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
return return
} }
if s3Error := checkRequestAuthType(r, bucket, "s3:ListBucket", globalServerConfig.GetRegion()); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:ListBucket", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
return return
} }
@ -134,7 +134,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
return return
} }
if s3Error := checkRequestAuthType(r, bucket, "s3:ListBucket", globalServerConfig.GetRegion()); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:ListBucket", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
return return
} }

View File

@ -33,6 +33,7 @@ import (
mux "github.com/gorilla/mux" mux "github.com/gorilla/mux"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/set" "github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
@ -40,10 +41,10 @@ import (
// http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html // http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
// Enforces bucket policies for a bucket for a given tatusaction. // Enforces bucket policies for a bucket for a given tatusaction.
func enforceBucketPolicy(bucket, action, resource, referer, sourceIP string, queryParams url.Values) (s3Error APIErrorCode) { func enforceBucketPolicy(ctx context.Context, bucket, action, resource, referer, sourceIP string, queryParams url.Values) (s3Error APIErrorCode) {
// Verify if bucket actually exists // Verify if bucket actually exists
objAPI := newObjectLayerFn() objAPI := newObjectLayerFn()
if err := checkBucketExist(bucket, objAPI); err != nil { if err := checkBucketExist(ctx, bucket, objAPI); err != nil {
err = errors.Cause(err) err = errors.Cause(err)
switch err.(type) { switch err.(type) {
case BucketNameInvalid: case BucketNameInvalid:
@ -53,13 +54,12 @@ func enforceBucketPolicy(bucket, action, resource, referer, sourceIP string, que
// For no bucket found we return NoSuchBucket instead. // For no bucket found we return NoSuchBucket instead.
return ErrNoSuchBucket return ErrNoSuchBucket
} }
errorIf(err, "Unable to read bucket policy.")
// Return internal error for any other errors so that we can investigate. // Return internal error for any other errors so that we can investigate.
return ErrInternalError return ErrInternalError
} }
// Fetch bucket policy, if policy is not set return access denied. // Fetch bucket policy, if policy is not set return access denied.
p, err := objAPI.GetBucketPolicy(context.Background(), bucket) p, err := objAPI.GetBucketPolicy(ctx, bucket)
if err != nil { if err != nil {
return ErrAccessDenied return ErrAccessDenied
} }
@ -92,7 +92,10 @@ func enforceBucketPolicy(bucket, action, resource, referer, sourceIP string, que
// Check if the action is allowed on the bucket/prefix. // Check if the action is allowed on the bucket/prefix.
func isBucketActionAllowed(action, bucket, prefix string, objectAPI ObjectLayer) bool { func isBucketActionAllowed(action, bucket, prefix string, objectAPI ObjectLayer) bool {
bp, err := objectAPI.GetBucketPolicy(context.Background(), bucket) reqInfo := &logger.ReqInfo{BucketName: bucket}
reqInfo.AppendTags("prefix", prefix)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
bp, err := objectAPI.GetBucketPolicy(ctx, bucket)
if err != nil { if err != nil {
return false return false
} }
@ -120,10 +123,10 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
return return
} }
s3Error := checkRequestAuthType(r, bucket, "s3:GetBucketLocation", globalMinioDefaultRegion) s3Error := checkRequestAuthType(ctx, r, bucket, "s3:GetBucketLocation", globalMinioDefaultRegion)
if s3Error == ErrInvalidRegion { if s3Error == ErrInvalidRegion {
// Clients like boto3 send getBucketLocation() call signed with region that is configured. // Clients like boto3 send getBucketLocation() call signed with region that is configured.
s3Error = checkRequestAuthType(r, "", "s3:GetBucketLocation", globalServerConfig.GetRegion()) s3Error = checkRequestAuthType(ctx, r, "", "s3:GetBucketLocation", globalServerConfig.GetRegion())
} }
if s3Error != ErrNone { if s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
@ -179,7 +182,7 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
return return
} }
if s3Error := checkRequestAuthType(r, bucket, "s3:ListBucketMultipartUploads", globalServerConfig.GetRegion()); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:ListBucketMultipartUploads", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
return return
} }
@ -228,10 +231,10 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
listBuckets = api.CacheAPI().ListBuckets listBuckets = api.CacheAPI().ListBuckets
} }
// ListBuckets does not have any bucket action. // ListBuckets does not have any bucket action.
s3Error := checkRequestAuthType(r, "", "", globalMinioDefaultRegion) s3Error := checkRequestAuthType(ctx, r, "", "", globalMinioDefaultRegion)
if s3Error == ErrInvalidRegion { if s3Error == ErrInvalidRegion {
// Clients like boto3 send listBuckets() call signed with region that is configured. // Clients like boto3 send listBuckets() call signed with region that is configured.
s3Error = checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()) s3Error = checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion())
} }
if s3Error != ErrNone { if s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
@ -266,7 +269,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
} }
var authError APIErrorCode var authError APIErrorCode
if authError = checkRequestAuthType(r, bucket, "s3:DeleteObject", globalServerConfig.GetRegion()); authError != ErrNone { if authError = checkRequestAuthType(ctx, r, bucket, "s3:DeleteObject", globalServerConfig.GetRegion()); authError != ErrNone {
// In the event access is denied, a 200 response should still be returned // In the event access is denied, a 200 response should still be returned
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html // http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if authError != ErrAccessDenied { if authError != ErrAccessDenied {
@ -294,7 +297,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
// Read incoming body XML bytes. // Read incoming body XML bytes.
if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil { if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil {
errorIf(err, "Unable to read HTTP body.") logger.LogIf(ctx, err)
writeErrorResponse(w, ErrInternalError, r.URL) writeErrorResponse(w, ErrInternalError, r.URL)
return return
} }
@ -302,7 +305,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
// Unmarshal list of keys to be deleted. // Unmarshal list of keys to be deleted.
deleteObjects := &DeleteObjectsRequest{} deleteObjects := &DeleteObjectsRequest{}
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil { if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
errorIf(err, "Unable to unmarshal delete objects request XML.") logger.LogIf(ctx, err)
writeErrorResponse(w, ErrMalformedXML, r.URL) writeErrorResponse(w, ErrMalformedXML, r.URL)
return return
} }
@ -411,7 +414,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
} }
// PutBucket does not have any bucket action. // PutBucket does not have any bucket action.
s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()) s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion())
if s3Error != ErrNone { if s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
return return
@ -490,7 +493,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
// be loaded in memory, the remaining being put in temporary files. // be loaded in memory, the remaining being put in temporary files.
reader, err := r.MultipartReader() reader, err := r.MultipartReader()
if err != nil { if err != nil {
errorIf(err, "Unable to initialize multipart reader.") logger.LogIf(ctx, err)
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL) writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
return return
} }
@ -498,7 +501,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
// Read multipart data and save in memory and in the disk if needed // Read multipart data and save in memory and in the disk if needed
form, err := reader.ReadForm(maxFormMemory) form, err := reader.ReadForm(maxFormMemory)
if err != nil { if err != nil {
errorIf(err, "Unable to initialize multipart reader.") logger.LogIf(ctx, err)
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL) writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
return return
} }
@ -507,9 +510,9 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
defer form.RemoveAll() defer form.RemoveAll()
// Extract all form fields // Extract all form fields
fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(form) fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(ctx, form)
if err != nil { if err != nil {
errorIf(err, "Unable to parse form values.") logger.LogIf(ctx, err)
writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL) writeErrorResponse(w, ErrMalformedPOSTRequest, r.URL)
return return
} }
@ -584,16 +587,15 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
} }
// Extract metadata to be saved from received Form. // Extract metadata to be saved from received Form.
metadata, err := extractMetadataFromHeader(formValues) metadata, err := extractMetadataFromHeader(ctx, formValues)
if err != nil { if err != nil {
errorIf(err, "found invalid http request header")
writeErrorResponse(w, ErrInternalError, r.URL) writeErrorResponse(w, ErrInternalError, r.URL)
return return
} }
hashReader, err := hash.NewReader(fileBody, fileSize, "", "") hashReader, err := hash.NewReader(fileBody, fileSize, "", "")
if err != nil { if err != nil {
errorIf(err, "Unable to initialize hashReader.") logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return return
} }
@ -690,7 +692,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
return return
} }
if s3Error := checkRequestAuthType(r, bucket, "s3:ListBucket", globalServerConfig.GetRegion()); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:ListBucket", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponseHeadersOnly(w, s3Error) writeErrorResponseHeadersOnly(w, s3Error)
return return
} }
@ -717,7 +719,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
} }
// DeleteBucket does not have any bucket action. // DeleteBucket does not have any bucket action.
if s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
return return
} }
@ -736,12 +738,14 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
// Notify all peers (including self) to update in-memory state // Notify all peers (including self) to update in-memory state
for addr, err := range globalNotificationSys.UpdateBucketPolicy(bucket) { for addr, err := range globalNotificationSys.UpdateBucketPolicy(bucket) {
errorIf(err, "unable to update policy change in remote peer %v", addr) logger.GetReqInfo(ctx).AppendTags("remotePeer", addr.Name)
logger.LogIf(ctx, err)
} }
globalNotificationSys.RemoveNotification(bucket) globalNotificationSys.RemoveNotification(bucket)
for addr, err := range globalNotificationSys.DeleteBucket(bucket) { for addr, err := range globalNotificationSys.DeleteBucket(bucket) {
errorIf(err, "unable to delete bucket in remote peer %v", addr) logger.GetReqInfo(ctx).AppendTags("remotePeer", addr.Name)
logger.LogIf(ctx, err)
} }
// Write success response. // Write success response.

View File

@ -23,6 +23,7 @@ import (
"net/http" "net/http"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
xerrors "github.com/minio/minio/pkg/errors" xerrors "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/event/target" "github.com/minio/minio/pkg/event/target"
@ -53,7 +54,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
writeErrorResponse(w, ErrNotImplemented, r.URL) writeErrorResponse(w, ErrNotImplemented, r.URL)
return return
} }
if s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
return return
} }
@ -63,17 +64,15 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
_, err := objAPI.GetBucketInfo(ctx, bucketName) _, err := objAPI.GetBucketInfo(ctx, bucketName)
if err != nil { if err != nil {
errorIf(err, "Unable to find bucket info.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return return
} }
// Attempt to successfully load notification config. // Attempt to successfully load notification config.
nConfig, err := readNotificationConfig(objAPI, bucketName) nConfig, err := readNotificationConfig(ctx, objAPI, bucketName)
if err != nil { if err != nil {
// Ignore errNoSuchNotifications to comply with AWS S3. // Ignore errNoSuchNotifications to comply with AWS S3.
if xerrors.Cause(err) != errNoSuchNotifications { if xerrors.Cause(err) != errNoSuchNotifications {
errorIf(err, "Unable to read notification configuration.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return return
} }
@ -83,7 +82,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
notificationBytes, err := xml.Marshal(nConfig) notificationBytes, err := xml.Marshal(nConfig)
if err != nil { if err != nil {
errorIf(err, "Unable to marshal notification configuration into XML.", err) logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return return
} }
@ -106,7 +105,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
writeErrorResponse(w, ErrNotImplemented, r.URL) writeErrorResponse(w, ErrNotImplemented, r.URL)
return return
} }
if s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
return return
} }
@ -146,7 +145,8 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
rulesMap := config.ToRulesMap() rulesMap := config.ToRulesMap()
globalNotificationSys.AddRulesMap(bucketName, rulesMap) globalNotificationSys.AddRulesMap(bucketName, rulesMap)
for addr, err := range globalNotificationSys.PutBucketNotification(bucketName, rulesMap) { for addr, err := range globalNotificationSys.PutBucketNotification(bucketName, rulesMap) {
errorIf(err, "unable to put bucket notification to remote peer %v", addr) logger.GetReqInfo(ctx).AppendTags("remotePeer", addr.Name)
logger.LogIf(ctx, err)
} }
writeSuccessResponseHeadersOnly(w) writeSuccessResponseHeadersOnly(w)
@ -167,7 +167,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
writeErrorResponse(w, ErrNotImplemented, r.URL) writeErrorResponse(w, ErrNotImplemented, r.URL)
return return
} }
if s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
return return
} }
@ -217,7 +217,6 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
} }
if _, err := objAPI.GetBucketInfo(ctx, bucketName); err != nil { if _, err := objAPI.GetBucketInfo(ctx, bucketName); err != nil {
errorIf(err, "Unable to get bucket info.")
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return return
} }
@ -227,7 +226,8 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
rulesMap := event.NewRulesMap(eventNames, pattern, target.ID()) rulesMap := event.NewRulesMap(eventNames, pattern, target.ID())
if err := globalNotificationSys.AddRemoteTarget(bucketName, target, rulesMap); err != nil { if err := globalNotificationSys.AddRemoteTarget(bucketName, target, rulesMap); err != nil {
errorIf(err, "Unable to add httpclient target %v to globalNotificationSys.targetList.", target) logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return return
} }
@ -236,20 +236,23 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
thisAddr := xnet.MustParseHost(GetLocalPeer(globalEndpoints)) thisAddr := xnet.MustParseHost(GetLocalPeer(globalEndpoints))
if err := SaveListener(objAPI, bucketName, eventNames, pattern, target.ID(), *thisAddr); err != nil { if err := SaveListener(objAPI, bucketName, eventNames, pattern, target.ID(), *thisAddr); err != nil {
errorIf(err, "Unable to save HTTP listener %v", target) logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return return
} }
errors := globalNotificationSys.ListenBucketNotification(bucketName, eventNames, pattern, target.ID(), *thisAddr) errors := globalNotificationSys.ListenBucketNotification(bucketName, eventNames, pattern, target.ID(), *thisAddr)
for addr, err := range errors { for addr, err := range errors {
errorIf(err, "unable to call listen bucket notification to remote peer %v", addr) logger.GetReqInfo(ctx).AppendTags("remotePeer", addr.Name)
logger.LogIf(ctx, err)
} }
<-target.DoneCh <-target.DoneCh
if err := RemoveListener(objAPI, bucketName, target.ID(), *thisAddr); err != nil { if err := RemoveListener(objAPI, bucketName, target.ID(), *thisAddr); err != nil {
errorIf(err, "Unable to save HTTP listener %v", target) logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return return
} }

View File

@ -28,6 +28,7 @@ import (
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
mux "github.com/gorilla/mux" mux "github.com/gorilla/mux"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/wildcard" "github.com/minio/minio/pkg/wildcard"
) )
@ -228,7 +229,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
return return
} }
if s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
return return
} }
@ -260,7 +261,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
// bucket policies are limited to 20KB in size, using a limit reader. // bucket policies are limited to 20KB in size, using a limit reader.
policyBytes, err := ioutil.ReadAll(io.LimitReader(r.Body, maxAccessPolicySize)) policyBytes, err := ioutil.ReadAll(io.LimitReader(r.Body, maxAccessPolicySize))
if err != nil { if err != nil {
errorIf(err, "Unable to read from client.") logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return return
} }
@ -288,7 +289,8 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
} }
for addr, err := range globalNotificationSys.UpdateBucketPolicy(bucket) { for addr, err := range globalNotificationSys.UpdateBucketPolicy(bucket) {
errorIf(err, "unable to update policy change in remote peer %v", addr) logger.GetReqInfo(ctx).AppendTags("remotePeer", addr.Name)
logger.LogIf(ctx, err)
} }
// Success. // Success.
@ -308,7 +310,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
return return
} }
if s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
return return
} }
@ -331,7 +333,8 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
} }
for addr, err := range globalNotificationSys.UpdateBucketPolicy(bucket) { for addr, err := range globalNotificationSys.UpdateBucketPolicy(bucket) {
errorIf(err, "unable to update policy change in remote peer %v", addr) logger.GetReqInfo(ctx).AppendTags("remotePeer", addr.Name)
logger.LogIf(ctx, err)
} }
// Success. // Success.
@ -351,7 +354,7 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht
return return
} }
if s3Error := checkRequestAuthType(r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, "", "", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
return return
} }
@ -375,7 +378,7 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht
policyBytes, err := json.Marshal(&policy) policyBytes, err := json.Marshal(&policy)
if err != nil { if err != nil {
errorIf(err, "Unable to marshal bucket policy.") logger.LogIf(ctx, err)
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return return
} }

View File

@ -25,6 +25,7 @@ import (
"sync" "sync"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
) )
@ -119,12 +120,12 @@ func readBucketPolicyJSON(bucket string, objAPI ObjectLayer) (bucketPolicyReader
policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig) policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig)
var buffer bytes.Buffer var buffer bytes.Buffer
err = objAPI.GetObject(context.Background(), minioMetaBucket, policyPath, 0, -1, &buffer, "") ctx := logger.SetReqInfo(context.Background(), &logger.ReqInfo{BucketName: bucket})
err = objAPI.GetObject(ctx, minioMetaBucket, policyPath, 0, -1, &buffer, "")
if err != nil { if err != nil {
if isErrObjectNotFound(err) || isErrIncompleteBody(err) { if isErrObjectNotFound(err) || isErrIncompleteBody(err) {
return nil, PolicyNotFound{Bucket: bucket} return nil, PolicyNotFound{Bucket: bucket}
} }
errorIf(err, "Unable to load policy for the bucket %s.", bucket)
return nil, errors.Cause(err) return nil, errors.Cause(err)
} }
@ -151,9 +152,9 @@ func ReadBucketPolicy(bucket string, objAPI ObjectLayer) (policy.BucketAccessPol
// removeBucketPolicy - removes any previously written bucket policy. Returns BucketPolicyNotFound // removeBucketPolicy - removes any previously written bucket policy. Returns BucketPolicyNotFound
// if no policies are found. // if no policies are found.
func removeBucketPolicy(bucket string, objAPI ObjectLayer) error { func removeBucketPolicy(ctx context.Context, bucket string, objAPI ObjectLayer) error {
policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig) policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig)
err := objAPI.DeleteObject(context.Background(), minioMetaBucket, policyPath) err := objAPI.DeleteObject(ctx, minioMetaBucket, policyPath)
if err != nil { if err != nil {
err = errors.Cause(err) err = errors.Cause(err)
if _, ok := err.(ObjectNotFound); ok { if _, ok := err.(ObjectNotFound); ok {
@ -165,21 +166,21 @@ func removeBucketPolicy(bucket string, objAPI ObjectLayer) error {
} }
// writeBucketPolicy - save a bucket policy that is assumed to be validated. // writeBucketPolicy - save a bucket policy that is assumed to be validated.
func writeBucketPolicy(bucket string, objAPI ObjectLayer, bpy policy.BucketAccessPolicy) error { func writeBucketPolicy(ctx context.Context, bucket string, objAPI ObjectLayer, bpy policy.BucketAccessPolicy) error {
buf, err := json.Marshal(bpy) buf, err := json.Marshal(bpy)
if err != nil { if err != nil {
errorIf(err, "Unable to marshal bucket policy '%#v' to JSON", bpy) logger.LogIf(ctx, err)
return err return err
} }
policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig) policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig)
hashReader, err := hash.NewReader(bytes.NewReader(buf), int64(len(buf)), "", getSHA256Hash(buf)) hashReader, err := hash.NewReader(bytes.NewReader(buf), int64(len(buf)), "", getSHA256Hash(buf))
if err != nil { if err != nil {
errorIf(err, "Unable to set policy for the bucket %s", bucket) logger.LogIf(ctx, err)
return errors.Cause(err) return errors.Cause(err)
} }
if _, err = objAPI.PutObject(context.Background(), minioMetaBucket, policyPath, hashReader, nil); err != nil { if _, err = objAPI.PutObject(ctx, minioMetaBucket, policyPath, hashReader, nil); err != nil {
errorIf(err, "Unable to set policy for the bucket %s", bucket)
return errors.Cause(err) return errors.Cause(err)
} }
return nil return nil
@ -188,9 +189,9 @@ func writeBucketPolicy(bucket string, objAPI ObjectLayer, bpy policy.BucketAcces
// persistAndNotifyBucketPolicyChange - takes a policyChange argument, // persistAndNotifyBucketPolicyChange - takes a policyChange argument,
// persists it to storage, and notify nodes in the cluster about the // persists it to storage, and notify nodes in the cluster about the
// change. In-memory state is updated in response to the notification. // change. In-memory state is updated in response to the notification.
func persistAndNotifyBucketPolicyChange(bucket string, isRemove bool, bktPolicy policy.BucketAccessPolicy, objAPI ObjectLayer) error { func persistAndNotifyBucketPolicyChange(ctx context.Context, bucket string, isRemove bool, bktPolicy policy.BucketAccessPolicy, objAPI ObjectLayer) error {
if isRemove { if isRemove {
err := removeBucketPolicy(bucket, objAPI) err := removeBucketPolicy(ctx, bucket, objAPI)
if err != nil { if err != nil {
return err return err
} }
@ -198,7 +199,7 @@ func persistAndNotifyBucketPolicyChange(bucket string, isRemove bool, bktPolicy
if reflect.DeepEqual(bktPolicy, emptyBucketPolicy) { if reflect.DeepEqual(bktPolicy, emptyBucketPolicy) {
return errInvalidArgument return errInvalidArgument
} }
if err := writeBucketPolicy(bucket, objAPI, bktPolicy); err != nil { if err := writeBucketPolicy(ctx, bucket, objAPI, bktPolicy); err != nil {
return err return err
} }
} }

View File

@ -25,6 +25,7 @@ import (
"time" "time"
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
) )
@ -33,9 +34,9 @@ func checkUpdate(mode string) {
// Its OK to ignore any errors during doUpdate() here. // Its OK to ignore any errors during doUpdate() here.
if updateMsg, _, currentReleaseTime, latestReleaseTime, err := getUpdateInfo(2*time.Second, mode); err == nil { if updateMsg, _, currentReleaseTime, latestReleaseTime, err := getUpdateInfo(2*time.Second, mode); err == nil {
if globalInplaceUpdateDisabled { if globalInplaceUpdateDisabled {
log.Println(updateMsg) logger.Println(updateMsg)
} else { } else {
log.Println(prepareUpdateMessage("Run `minio update`", latestReleaseTime.Sub(currentReleaseTime))) logger.Println(prepareUpdateMessage("Run `minio update`", latestReleaseTime.Sub(currentReleaseTime)))
} }
} }
} }
@ -43,11 +44,11 @@ func checkUpdate(mode string) {
func initConfig() { func initConfig() {
// Config file does not exist, we create it fresh and return upon success. // Config file does not exist, we create it fresh and return upon success.
if isFile(getConfigFile()) { if isFile(getConfigFile()) {
fatalIf(migrateConfig(), "Config migration failed.") logger.FatalIf(migrateConfig(), "Config migration failed.")
fatalIf(loadConfig(), "Unable to load config version: '%s'.", serverConfigVersion) logger.FatalIf(loadConfig(), "Unable to load config version: '%s'.", serverConfigVersion)
} else { } else {
fatalIf(newConfig(), "Unable to initialize minio config for the first time.") logger.FatalIf(newConfig(), "Unable to initialize minio config for the first time.")
log.Println("Created minio configuration file successfully at " + getConfigDir()) logger.Println("Created minio configuration file successfully at " + getConfigDir())
} }
} }
@ -70,17 +71,17 @@ func handleCommonCmdArgs(ctx *cli.Context) {
// default config directory. // default config directory.
configDir = getConfigDir() configDir = getConfigDir()
if configDir == "" { if configDir == "" {
fatalIf(errors.New("missing option"), "config-dir option must be provided.") logger.FatalIf(errors.New("missing option"), "config-dir option must be provided.")
} }
} }
if configDir == "" { if configDir == "" {
fatalIf(errors.New("empty directory"), "Configuration directory cannot be empty.") logger.FatalIf(errors.New("empty directory"), "Configuration directory cannot be empty.")
} }
// Disallow relative paths, figure out absolute paths. // Disallow relative paths, figure out absolute paths.
configDirAbs, err := filepath.Abs(configDir) configDirAbs, err := filepath.Abs(configDir)
fatalIf(err, "Unable to fetch absolute path for config directory %s", configDir) logger.FatalIf(err, "Unable to fetch absolute path for config directory %s", configDir)
setConfigDir(configDirAbs) setConfigDir(configDirAbs)
} }
@ -94,7 +95,7 @@ func handleCommonEnvVars() {
secretKey := os.Getenv("MINIO_SECRET_KEY") secretKey := os.Getenv("MINIO_SECRET_KEY")
if accessKey != "" && secretKey != "" { if accessKey != "" && secretKey != "" {
cred, err := auth.CreateCredentials(accessKey, secretKey) cred, err := auth.CreateCredentials(accessKey, secretKey)
fatalIf(err, "Invalid access/secret Key set in environment.") logger.FatalIf(err, "Invalid access/secret Key set in environment.")
// credential Envs are set globally. // credential Envs are set globally.
globalIsEnvCreds = true globalIsEnvCreds = true
@ -104,7 +105,7 @@ func handleCommonEnvVars() {
if browser := os.Getenv("MINIO_BROWSER"); browser != "" { if browser := os.Getenv("MINIO_BROWSER"); browser != "" {
browserFlag, err := ParseBrowserFlag(browser) browserFlag, err := ParseBrowserFlag(browser)
if err != nil { if err != nil {
fatalIf(errors.New("invalid value"), "Unknown value %s in MINIO_BROWSER environment variable.", browser) logger.FatalIf(errors.New("invalid value"), "Unknown value %s in MINIO_BROWSER environment variable.", browser)
} }
// browser Envs are set globally, this does not represent // browser Envs are set globally, this does not represent
@ -117,7 +118,7 @@ func handleCommonEnvVars() {
if traceFile != "" { if traceFile != "" {
var err error var err error
globalHTTPTraceFile, err = os.OpenFile(traceFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0660) globalHTTPTraceFile, err = os.OpenFile(traceFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0660)
fatalIf(err, "error opening file %s", traceFile) logger.FatalIf(err, "error opening file %s", traceFile)
} }
globalDomainName = os.Getenv("MINIO_DOMAIN") globalDomainName = os.Getenv("MINIO_DOMAIN")
@ -127,18 +128,18 @@ func handleCommonEnvVars() {
if drives := os.Getenv("MINIO_CACHE_DRIVES"); drives != "" { if drives := os.Getenv("MINIO_CACHE_DRIVES"); drives != "" {
driveList, err := parseCacheDrives(strings.Split(drives, cacheEnvDelimiter)) driveList, err := parseCacheDrives(strings.Split(drives, cacheEnvDelimiter))
fatalIf(err, "Invalid value set in environment variable MINIO_CACHE_DRIVES %s.", drives) logger.FatalIf(err, "Invalid value set in environment variable MINIO_CACHE_DRIVES %s.", drives)
globalCacheDrives = driveList globalCacheDrives = driveList
globalIsDiskCacheEnabled = true globalIsDiskCacheEnabled = true
} }
if excludes := os.Getenv("MINIO_CACHE_EXCLUDE"); excludes != "" { if excludes := os.Getenv("MINIO_CACHE_EXCLUDE"); excludes != "" {
excludeList, err := parseCacheExcludes(strings.Split(excludes, cacheEnvDelimiter)) excludeList, err := parseCacheExcludes(strings.Split(excludes, cacheEnvDelimiter))
fatalIf(err, "Invalid value set in environment variable MINIO_CACHE_EXCLUDE %s.", excludes) logger.FatalIf(err, "Invalid value set in environment variable MINIO_CACHE_EXCLUDE %s.", excludes)
globalCacheExcludes = excludeList globalCacheExcludes = excludeList
} }
if expiryStr := os.Getenv("MINIO_CACHE_EXPIRY"); expiryStr != "" { if expiryStr := os.Getenv("MINIO_CACHE_EXPIRY"); expiryStr != "" {
expiry, err := strconv.Atoi(expiryStr) expiry, err := strconv.Atoi(expiryStr)
fatalIf(err, "Invalid value set in environment variable MINIO_CACHE_EXPIRY %s.", expiryStr) logger.FatalIf(err, "Invalid value set in environment variable MINIO_CACHE_EXPIRY %s.", expiryStr)
globalCacheExpiry = expiry globalCacheExpiry = expiry
} }
@ -154,25 +155,25 @@ func handleCommonEnvVars() {
// Check for environment variables and parse into storageClass struct // Check for environment variables and parse into storageClass struct
if ssc := os.Getenv(standardStorageClassEnv); ssc != "" { if ssc := os.Getenv(standardStorageClassEnv); ssc != "" {
globalStandardStorageClass, err = parseStorageClass(ssc) globalStandardStorageClass, err = parseStorageClass(ssc)
fatalIf(err, "Invalid value set in environment variable %s.", standardStorageClassEnv) logger.FatalIf(err, "Invalid value set in environment variable %s.", standardStorageClassEnv)
} }
if rrsc := os.Getenv(reducedRedundancyStorageClassEnv); rrsc != "" { if rrsc := os.Getenv(reducedRedundancyStorageClassEnv); rrsc != "" {
globalRRStorageClass, err = parseStorageClass(rrsc) globalRRStorageClass, err = parseStorageClass(rrsc)
fatalIf(err, "Invalid value set in environment variable %s.", reducedRedundancyStorageClassEnv) logger.FatalIf(err, "Invalid value set in environment variable %s.", reducedRedundancyStorageClassEnv)
} }
// Validation is done after parsing both the storage classes. This is needed because we need one // Validation is done after parsing both the storage classes. This is needed because we need one
// storage class value to deduce the correct value of the other storage class. // storage class value to deduce the correct value of the other storage class.
if globalRRStorageClass.Scheme != "" { if globalRRStorageClass.Scheme != "" {
err = validateParity(globalStandardStorageClass.Parity, globalRRStorageClass.Parity) err = validateParity(globalStandardStorageClass.Parity, globalRRStorageClass.Parity)
fatalIf(err, "Invalid value set in environment variable %s.", reducedRedundancyStorageClassEnv) logger.FatalIf(err, "Invalid value set in environment variable %s.", reducedRedundancyStorageClassEnv)
globalIsStorageClass = true globalIsStorageClass = true
} }
if globalStandardStorageClass.Scheme != "" { if globalStandardStorageClass.Scheme != "" {
err = validateParity(globalStandardStorageClass.Parity, globalRRStorageClass.Parity) err = validateParity(globalStandardStorageClass.Parity, globalRRStorageClass.Parity)
fatalIf(err, "Invalid value set in environment variable %s.", standardStorageClassEnv) logger.FatalIf(err, "Invalid value set in environment variable %s.", standardStorageClassEnv)
globalIsStorageClass = true globalIsStorageClass = true
} }
} }

View File

@ -21,6 +21,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/event/target" "github.com/minio/minio/pkg/event/target"
@ -194,7 +195,7 @@ func purgeV1() error {
} }
os.RemoveAll(configFile) os.RemoveAll(configFile)
log.Println("Removed unsupported config version 1.") logger.Println("Removed unsupported config version 1.")
return nil return nil
} }
@ -252,7 +253,7 @@ func migrateV2ToV3() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv2.Version, srvConfig.Version, err) return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv2.Version, srvConfig.Version, err)
} }
log.Printf(configMigrateMSGTemplate, configFile, cv2.Version, srvConfig.Version) logger.Printf(configMigrateMSGTemplate, configFile, cv2.Version, srvConfig.Version)
return nil return nil
} }
@ -290,7 +291,7 @@ func migrateV3ToV4() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv3.Version, srvConfig.Version, err) return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv3.Version, srvConfig.Version, err)
} }
log.Printf(configMigrateMSGTemplate, configFile, cv3.Version, srvConfig.Version) logger.Printf(configMigrateMSGTemplate, configFile, cv3.Version, srvConfig.Version)
return nil return nil
} }
@ -331,7 +332,7 @@ func migrateV4ToV5() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv4.Version, srvConfig.Version, err) return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv4.Version, srvConfig.Version, err)
} }
log.Printf(configMigrateMSGTemplate, configFile, cv4.Version, srvConfig.Version) logger.Printf(configMigrateMSGTemplate, configFile, cv4.Version, srvConfig.Version)
return nil return nil
} }
@ -420,7 +421,7 @@ func migrateV5ToV6() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv5.Version, srvConfig.Version, err) return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv5.Version, srvConfig.Version, err)
} }
log.Printf(configMigrateMSGTemplate, configFile, cv5.Version, srvConfig.Version) logger.Printf(configMigrateMSGTemplate, configFile, cv5.Version, srvConfig.Version)
return nil return nil
} }
@ -476,7 +477,7 @@ func migrateV6ToV7() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv6.Version, srvConfig.Version, err) return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv6.Version, srvConfig.Version, err)
} }
log.Printf(configMigrateMSGTemplate, configFile, cv6.Version, srvConfig.Version) logger.Printf(configMigrateMSGTemplate, configFile, cv6.Version, srvConfig.Version)
return nil return nil
} }
@ -539,7 +540,7 @@ func migrateV7ToV8() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv7.Version, srvConfig.Version, err) return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv7.Version, srvConfig.Version, err)
} }
log.Printf(configMigrateMSGTemplate, configFile, cv7.Version, srvConfig.Version) logger.Printf(configMigrateMSGTemplate, configFile, cv7.Version, srvConfig.Version)
return nil return nil
} }
@ -609,7 +610,7 @@ func migrateV8ToV9() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv8.Version, srvConfig.Version, err) return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv8.Version, srvConfig.Version, err)
} }
log.Printf(configMigrateMSGTemplate, configFile, cv8.Version, srvConfig.Version) logger.Printf(configMigrateMSGTemplate, configFile, cv8.Version, srvConfig.Version)
return nil return nil
} }
@ -677,7 +678,7 @@ func migrateV9ToV10() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv9.Version, srvConfig.Version, err) return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv9.Version, srvConfig.Version, err)
} }
log.Printf(configMigrateMSGTemplate, configFile, cv9.Version, srvConfig.Version) logger.Printf(configMigrateMSGTemplate, configFile, cv9.Version, srvConfig.Version)
return nil return nil
} }
@ -748,7 +749,7 @@ func migrateV10ToV11() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv10.Version, srvConfig.Version, err) return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv10.Version, srvConfig.Version, err)
} }
log.Printf(configMigrateMSGTemplate, configFile, cv10.Version, srvConfig.Version) logger.Printf(configMigrateMSGTemplate, configFile, cv10.Version, srvConfig.Version)
return nil return nil
} }
@ -846,7 +847,7 @@ func migrateV11ToV12() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv11.Version, srvConfig.Version, err) return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv11.Version, srvConfig.Version, err)
} }
log.Printf(configMigrateMSGTemplate, configFile, cv11.Version, srvConfig.Version) logger.Printf(configMigrateMSGTemplate, configFile, cv11.Version, srvConfig.Version)
return nil return nil
} }
@ -926,7 +927,7 @@ func migrateV12ToV13() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv12.Version, srvConfig.Version, err) return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv12.Version, srvConfig.Version, err)
} }
log.Printf(configMigrateMSGTemplate, configFile, cv12.Version, srvConfig.Version) logger.Printf(configMigrateMSGTemplate, configFile, cv12.Version, srvConfig.Version)
return nil return nil
} }
@ -1011,7 +1012,7 @@ func migrateV13ToV14() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv13.Version, srvConfig.Version, err) return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv13.Version, srvConfig.Version, err)
} }
log.Printf(configMigrateMSGTemplate, configFile, cv13.Version, srvConfig.Version) logger.Printf(configMigrateMSGTemplate, configFile, cv13.Version, srvConfig.Version)
return nil return nil
} }
@ -1100,7 +1101,7 @@ func migrateV14ToV15() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv14.Version, srvConfig.Version, err) return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv14.Version, srvConfig.Version, err)
} }
log.Printf(configMigrateMSGTemplate, configFile, cv14.Version, srvConfig.Version) logger.Printf(configMigrateMSGTemplate, configFile, cv14.Version, srvConfig.Version)
return nil return nil
} }
@ -1190,7 +1191,7 @@ func migrateV15ToV16() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv15.Version, srvConfig.Version, err) return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv15.Version, srvConfig.Version, err)
} }
log.Printf(configMigrateMSGTemplate, configFile, cv15.Version, srvConfig.Version) logger.Printf(configMigrateMSGTemplate, configFile, cv15.Version, srvConfig.Version)
return nil return nil
} }
@ -1311,7 +1312,7 @@ func migrateV16ToV17() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv16.Version, srvConfig.Version, err) return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv16.Version, srvConfig.Version, err)
} }
log.Printf(configMigrateMSGTemplate, configFile, cv16.Version, srvConfig.Version) logger.Printf(configMigrateMSGTemplate, configFile, cv16.Version, srvConfig.Version)
return nil return nil
} }
@ -1415,7 +1416,7 @@ func migrateV17ToV18() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv17.Version, srvConfig.Version, err) return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv17.Version, srvConfig.Version, err)
} }
log.Printf(configMigrateMSGTemplate, configFile, cv17.Version, srvConfig.Version) logger.Printf(configMigrateMSGTemplate, configFile, cv17.Version, srvConfig.Version)
return nil return nil
} }
@ -1521,7 +1522,7 @@ func migrateV18ToV19() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv18.Version, srvConfig.Version, err) return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv18.Version, srvConfig.Version, err)
} }
log.Printf(configMigrateMSGTemplate, configFile, cv18.Version, srvConfig.Version) logger.Printf(configMigrateMSGTemplate, configFile, cv18.Version, srvConfig.Version)
return nil return nil
} }
@ -1626,7 +1627,7 @@ func migrateV19ToV20() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv19.Version, srvConfig.Version, err) return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv19.Version, srvConfig.Version, err)
} }
log.Printf(configMigrateMSGTemplate, configFile, cv19.Version, srvConfig.Version) logger.Printf(configMigrateMSGTemplate, configFile, cv19.Version, srvConfig.Version)
return nil return nil
} }
@ -1730,7 +1731,7 @@ func migrateV20ToV21() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv20.Version, srvConfig.Version, err) return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv20.Version, srvConfig.Version, err)
} }
log.Printf(configMigrateMSGTemplate, configFile, cv20.Version, srvConfig.Version) logger.Printf(configMigrateMSGTemplate, configFile, cv20.Version, srvConfig.Version)
return nil return nil
} }
@ -1834,7 +1835,7 @@ func migrateV21ToV22() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv21.Version, srvConfig.Version, err) return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv21.Version, srvConfig.Version, err)
} }
log.Printf(configMigrateMSGTemplate, configFile, cv21.Version, srvConfig.Version) logger.Printf(configMigrateMSGTemplate, configFile, cv21.Version, srvConfig.Version)
return nil return nil
} }
@ -1947,6 +1948,6 @@ func migrateV22ToV23() error {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv22.Version, srvConfig.Version, err) return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv22.Version, srvConfig.Version, err)
} }
log.Printf(configMigrateMSGTemplate, configFile, cv22.Version, srvConfig.Version) logger.Printf(configMigrateMSGTemplate, configFile, cv22.Version, srvConfig.Version)
return nil return nil
} }

View File

@ -28,6 +28,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/disk" "github.com/minio/minio/pkg/disk"
errors2 "github.com/minio/minio/pkg/errors" errors2 "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
@ -93,7 +94,7 @@ func newCacheFSObjects(dir string, expiry int, maxDiskUsagePct int) (*cacheFSObj
appendFileMap: make(map[string]*fsAppendFile), appendFileMap: make(map[string]*fsAppendFile),
} }
go fsObjects.cleanupStaleMultipartUploads(globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh) go fsObjects.cleanupStaleMultipartUploads(context.Background(), globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh)
cacheFS := cacheFSObjects{ cacheFS := cacheFSObjects{
FSObjects: fsObjects, FSObjects: fsObjects,
@ -116,7 +117,9 @@ func (cfs *cacheFSObjects) diskUsageLow() bool {
minUsage := cfs.maxDiskUsagePct * 80 / 100 minUsage := cfs.maxDiskUsagePct * 80 / 100
di, err := disk.GetInfo(cfs.dir) di, err := disk.GetInfo(cfs.dir)
if err != nil { if err != nil {
errorIf(err, "Error getting disk information on %s", cfs.dir) reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", cfs.dir)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return false return false
} }
usedPercent := (di.Total - di.Free) * 100 / di.Total usedPercent := (di.Total - di.Free) * 100 / di.Total
@ -128,7 +131,9 @@ func (cfs *cacheFSObjects) diskUsageLow() bool {
func (cfs *cacheFSObjects) diskUsageHigh() bool { func (cfs *cacheFSObjects) diskUsageHigh() bool {
di, err := disk.GetInfo(cfs.dir) di, err := disk.GetInfo(cfs.dir)
if err != nil { if err != nil {
errorIf(err, "Error getting disk information on %s", cfs.dir) reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", cfs.dir)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return true return true
} }
usedPercent := (di.Total - di.Free) * 100 / di.Total usedPercent := (di.Total - di.Free) * 100 / di.Total
@ -140,7 +145,9 @@ func (cfs *cacheFSObjects) diskUsageHigh() bool {
func (cfs *cacheFSObjects) diskAvailable(size int64) bool { func (cfs *cacheFSObjects) diskAvailable(size int64) bool {
di, err := disk.GetInfo(cfs.dir) di, err := disk.GetInfo(cfs.dir)
if err != nil { if err != nil {
errorIf(err, "Error getting disk information on %s", cfs.dir) reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", cfs.dir)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return false return false
} }
usedPercent := (di.Total - (di.Free - uint64(size))) * 100 / di.Total usedPercent := (di.Total - (di.Free - uint64(size))) * 100 / di.Total
@ -163,14 +170,15 @@ func (cfs *cacheFSObjects) purgeTrash() {
return return
} }
for _, entry := range entries { for _, entry := range entries {
fi, err := fsStatVolume(pathJoin(trashPath, entry)) ctx := logger.SetReqInfo(context.Background(), &logger.ReqInfo{})
fi, err := fsStatVolume(ctx, pathJoin(trashPath, entry))
if err != nil { if err != nil {
continue continue
} }
dir := path.Join(trashPath, fi.Name()) dir := path.Join(trashPath, fi.Name())
// Delete all expired cache content. // Delete all expired cache content.
fsRemoveAll(dir) fsRemoveAll(ctx, dir)
} }
} }
} }
@ -193,7 +201,7 @@ func (cfs *cacheFSObjects) purge() {
deletedCount := 0 deletedCount := 0
buckets, err := cfs.ListBuckets(ctx) buckets, err := cfs.ListBuckets(ctx)
if err != nil { if err != nil {
errorIf(err, "Unable to list buckets.") logger.LogIf(ctx, err)
} }
// Reset cache online status if drive was offline earlier. // Reset cache online status if drive was offline earlier.
if !cfs.IsOnline() { if !cfs.IsOnline() {
@ -221,7 +229,7 @@ func (cfs *cacheFSObjects) purge() {
continue continue
} }
if err = cfs.DeleteObject(ctx, bucket.Name, object.Name); err != nil { if err = cfs.DeleteObject(ctx, bucket.Name, object.Name); err != nil {
errorIf(err, "Unable to remove cache entry in dir %s/%s", bucket.Name, object.Name) logger.LogIf(ctx, err)
continue continue
} }
deletedCount++ deletedCount++
@ -313,7 +321,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
var err error var err error
// Validate if bucket name is valid and exists. // Validate if bucket name is valid and exists.
if _, err = fs.statBucketDir(bucket); err != nil { if _, err = fs.statBucketDir(ctx, bucket); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket) return ObjectInfo{}, toObjectErr(err, bucket)
} }
@ -325,31 +333,32 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
// and return success. // and return success.
if isObjectDir(object, data.Size()) { if isObjectDir(object, data.Size()) {
// Check if an object is present as one of the parent dir. // Check if an object is present as one of the parent dir.
if fs.parentDirIsObject(bucket, path.Dir(object)) { if fs.parentDirIsObject(ctx, bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(errors2.Trace(errFileAccessDenied), bucket, object) return ObjectInfo{}, toObjectErr(errFileAccessDenied, bucket, object)
} }
if err = mkdirAll(pathJoin(fs.fsPath, bucket, object), 0777); err != nil { if err = mkdirAll(pathJoin(fs.fsPath, bucket, object), 0777); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
var fi os.FileInfo var fi os.FileInfo
if fi, err = fsStatDir(pathJoin(fs.fsPath, bucket, object)); err != nil { if fi, err = fsStatDir(ctx, pathJoin(fs.fsPath, bucket, object)); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
return fsMeta.ToObjectInfo(bucket, object, fi), nil return fsMeta.ToObjectInfo(bucket, object, fi), nil
} }
if err = checkPutObjectArgs(bucket, object, fs, data.Size()); err != nil { if err = checkPutObjectArgs(ctx, bucket, object, fs, data.Size()); err != nil {
return ObjectInfo{}, err return ObjectInfo{}, err
} }
// Check if an object is present as one of the parent dir. // Check if an object is present as one of the parent dir.
if fs.parentDirIsObject(bucket, path.Dir(object)) { if fs.parentDirIsObject(ctx, bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(errors2.Trace(errFileAccessDenied), bucket, object) return ObjectInfo{}, toObjectErr(errFileAccessDenied, bucket, object)
} }
// Validate input data size and it can never be less than zero. // Validate input data size and it can never be less than zero.
if data.Size() < 0 { if data.Size() < 0 {
return ObjectInfo{}, errors2.Trace(errInvalidArgument) logger.LogIf(ctx, errInvalidArgument)
return ObjectInfo{}, errInvalidArgument
} }
var wlk *lock.LockedFile var wlk *lock.LockedFile
@ -359,7 +368,8 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
wlk, err = fs.rwPool.Create(fsMetaPath) wlk, err = fs.rwPool.Create(fsMetaPath)
if err != nil { if err != nil {
return ObjectInfo{}, toObjectErr(errors2.Trace(err), bucket, object) logger.LogIf(ctx, err)
return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
// This close will allow for locks to be synchronized on `fs.json`. // This close will allow for locks to be synchronized on `fs.json`.
defer wlk.Close() defer wlk.Close()
@ -367,7 +377,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
// Remove meta file when PutObject encounters any error // Remove meta file when PutObject encounters any error
if retErr != nil { if retErr != nil {
tmpDir := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID) tmpDir := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID)
fsRemoveMeta(bucketMetaDir, fsMetaPath, tmpDir) fsRemoveMeta(ctx, bucketMetaDir, fsMetaPath, tmpDir)
} }
}() }()
} }
@ -385,10 +395,9 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
buf := make([]byte, int(bufSize)) buf := make([]byte, int(bufSize))
fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, tempObj) fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, tempObj)
bytesWritten, err := fsCreateFile(fsTmpObjPath, data, buf, data.Size()) bytesWritten, err := fsCreateFile(ctx, fsTmpObjPath, data, buf, data.Size())
if err != nil { if err != nil {
fsRemoveFile(fsTmpObjPath) fsRemoveFile(ctx, fsTmpObjPath)
errorIf(err, "Failed to create object %s/%s", bucket, object)
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
if fsMeta.Meta["etag"] == "" { if fsMeta.Meta["etag"] == "" {
@ -397,18 +406,18 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
// Should return IncompleteBody{} error when reader has fewer // Should return IncompleteBody{} error when reader has fewer
// bytes than specified in request header. // bytes than specified in request header.
if bytesWritten < data.Size() { if bytesWritten < data.Size() {
fsRemoveFile(fsTmpObjPath) fsRemoveFile(ctx, fsTmpObjPath)
return ObjectInfo{}, errors2.Trace(IncompleteBody{}) return ObjectInfo{}, IncompleteBody{}
} }
// Delete the temporary object in the case of a // Delete the temporary object in the case of a
// failure. If PutObject succeeds, then there would be // failure. If PutObject succeeds, then there would be
// nothing to delete. // nothing to delete.
defer fsRemoveFile(fsTmpObjPath) defer fsRemoveFile(ctx, fsTmpObjPath)
// Entire object was written to the temp location, now it's safe to rename it to the actual location. // Entire object was written to the temp location, now it's safe to rename it to the actual location.
fsNSObjPath := pathJoin(fs.fsPath, bucket, object) fsNSObjPath := pathJoin(fs.fsPath, bucket, object)
if err = fsRenameFile(fsTmpObjPath, fsNSObjPath); err != nil { if err = fsRenameFile(ctx, fsTmpObjPath, fsNSObjPath); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
@ -420,7 +429,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
} }
// Stat the file to fetch timestamp, size. // Stat the file to fetch timestamp, size.
fi, err := fsStatFile(pathJoin(fs.fsPath, bucket, object)) fi, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object))
if err != nil { if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
@ -447,11 +456,11 @@ func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, objec
} }
} }
fs := cfs.FSObjects fs := cfs.FSObjects
if err := checkNewMultipartArgs(bucket, object, fs); err != nil { if err := checkNewMultipartArgs(ctx, bucket, object, fs); err != nil {
return "", toObjectErr(err, bucket) return "", toObjectErr(err, bucket)
} }
if _, err := fs.statBucketDir(bucket); err != nil { if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return "", toObjectErr(err, bucket) return "", toObjectErr(err, bucket)
} }
@ -459,7 +468,8 @@ func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, objec
err := mkdirAll(uploadIDDir, 0755) err := mkdirAll(uploadIDDir, 0755)
if err != nil { if err != nil {
return "", errors2.Trace(err) logger.LogIf(ctx, err)
return "", err
} }
// Initialize fs.json values. // Initialize fs.json values.
@ -468,11 +478,13 @@ func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, objec
fsMetaBytes, err := json.Marshal(fsMeta) fsMetaBytes, err := json.Marshal(fsMeta)
if err != nil { if err != nil {
return "", errors2.Trace(err) logger.LogIf(ctx, err)
return "", err
} }
if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0644); err != nil { if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0644); err != nil {
return "", errors2.Trace(err) logger.LogIf(ctx, err)
return "", err
} }
return uploadID, nil return uploadID, nil
} }
@ -485,7 +497,7 @@ func (cfs *cacheFSObjects) moveBucketToTrash(ctx context.Context, bucket string)
return err return err
} }
defer bucketLock.Unlock() defer bucketLock.Unlock()
bucketDir, err := fs.getBucketDir(bucket) bucketDir, err := fs.getBucketDir(ctx, bucket)
if err != nil { if err != nil {
return toObjectErr(err, bucket) return toObjectErr(err, bucket)
} }
@ -493,12 +505,13 @@ func (cfs *cacheFSObjects) moveBucketToTrash(ctx context.Context, bucket string)
expiredDir := path.Join(trashPath, bucket) expiredDir := path.Join(trashPath, bucket)
// Attempt to move regular bucket to expired directory. // Attempt to move regular bucket to expired directory.
if err = fsRenameDir(bucketDir, expiredDir); err != nil { if err = fsRenameDir(bucketDir, expiredDir); err != nil {
logger.LogIf(ctx, err)
return toObjectErr(err, bucket) return toObjectErr(err, bucket)
} }
// Cleanup all the bucket metadata. // Cleanup all the bucket metadata.
ominioMetadataBucketDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket) ominioMetadataBucketDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket)
nminioMetadataBucketDir := pathJoin(trashPath, MustGetUUID()) nminioMetadataBucketDir := pathJoin(trashPath, MustGetUUID())
_ = fsRenameDir(ominioMetadataBucketDir, nminioMetadataBucketDir) logger.LogIf(ctx, fsRenameDir(ominioMetadataBucketDir, nminioMetadataBucketDir))
return nil return nil
} }
@ -506,22 +519,22 @@ func (cfs *cacheFSObjects) moveBucketToTrash(ctx context.Context, bucket string)
// paths for windows automatically. // paths for windows automatically.
func fsRenameDir(dirPath, newPath string) (err error) { func fsRenameDir(dirPath, newPath string) (err error) {
if dirPath == "" || newPath == "" { if dirPath == "" || newPath == "" {
return errors2.Trace(errInvalidArgument) return errInvalidArgument
} }
if err = checkPathLength(dirPath); err != nil { if err = checkPathLength(dirPath); err != nil {
return errors2.Trace(err) return err
} }
if err = checkPathLength(newPath); err != nil { if err = checkPathLength(newPath); err != nil {
return errors2.Trace(err) return err
} }
if err = os.Rename(dirPath, newPath); err != nil { if err = os.Rename(dirPath, newPath); err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
return errors2.Trace(errVolumeNotFound) return errVolumeNotFound
} else if isSysErrNotEmpty(err) { } else if isSysErrNotEmpty(err) {
return errors2.Trace(errVolumeNotEmpty) return errVolumeNotEmpty
} }
return errors2.Trace(err) return err
} }
return nil return nil
} }

View File

@ -31,6 +31,7 @@ import (
"github.com/djherbis/atime" "github.com/djherbis/atime"
"github.com/minio/minio/cmd/logger"
errors2 "github.com/minio/minio/pkg/errors" errors2 "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/wildcard" "github.com/minio/minio/pkg/wildcard"
@ -345,12 +346,12 @@ func (c cacheObjects) listCacheObjects(ctx context.Context, bucket, prefix, mark
if err != nil { if err != nil {
return false return false
} }
_, err = fs.getObjectInfo(bucket, object) _, err = fs.getObjectInfo(ctx, bucket, object)
return err == nil return err == nil
} }
listDir := listDirCacheFactory(isLeaf, cacheTreeWalkIgnoredErrs, c.cache.cfs) listDir := listDirCacheFactory(isLeaf, cacheTreeWalkIgnoredErrs, c.cache.cfs)
walkResultCh = startTreeWalk(bucket, prefix, marker, recursive, listDir, isLeaf, endWalkCh) walkResultCh = startTreeWalk(ctx, bucket, prefix, marker, recursive, listDir, isLeaf, endWalkCh)
} }
for i := 0; i < maxKeys; { for i := 0; i < maxKeys; {
@ -383,7 +384,7 @@ func (c cacheObjects) listCacheObjects(ctx context.Context, bucket, prefix, mark
} }
return result, toObjectErr(err, bucket, prefix) return result, toObjectErr(err, bucket, prefix)
} }
objInfo, err = fs.getObjectInfo(bucket, entry) objInfo, err = fs.getObjectInfo(ctx, bucket, entry)
if err != nil { if err != nil {
// Ignore errFileNotFound // Ignore errFileNotFound
if errors2.Cause(err) == errFileNotFound { if errors2.Cause(err) == errFileNotFound {
@ -754,7 +755,8 @@ func (c cacheObjects) StorageInfo(ctx context.Context) (storageInfo StorageInfo)
continue continue
} }
info, err := getDiskInfo((cfs.fsPath)) info, err := getDiskInfo((cfs.fsPath))
errorIf(err, "Unable to get disk info %#v", cfs.fsPath) logger.GetReqInfo(ctx).AppendTags("cachePath", cfs.fsPath)
logger.LogIf(ctx, err)
total += info.Total total += info.Total
free += info.Free free += info.Free
} }
@ -791,7 +793,8 @@ func (c cacheObjects) DeleteBucket(ctx context.Context, bucket string) (err erro
// or the global env overrides. // or the global env overrides.
func newCache(config CacheConfig) (*diskCache, error) { func newCache(config CacheConfig) (*diskCache, error) {
var cfsObjects []*cacheFSObjects var cfsObjects []*cacheFSObjects
formats, err := loadAndValidateCacheFormat(config.Drives) ctx := logger.SetReqInfo(context.Background(), &logger.ReqInfo{})
formats, err := loadAndValidateCacheFormat(ctx, config.Drives)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -27,6 +27,7 @@ import (
"strings" "strings"
"github.com/minio/minio-go/pkg/set" "github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/mountinfo" "github.com/minio/minio/pkg/mountinfo"
) )
@ -399,7 +400,7 @@ func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList,
} }
ipList, err := getHostIP4(host) ipList, err := getHostIP4(host)
fatalIf(err, "unexpected error when resolving host '%s'", host) logger.FatalIf(err, "unexpected error when resolving host '%s'", host)
// Filter ipList by IPs those start with '127.'. // Filter ipList by IPs those start with '127.'.
loopBackIPs := ipList.FuncMatch(func(ip string, matchString string) bool { loopBackIPs := ipList.FuncMatch(func(ip string, matchString string) bool {

View File

@ -17,18 +17,20 @@
package cmd package cmd
import ( import (
"context"
"hash" "hash"
"io" "io"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/cmd/logger"
) )
// CreateFile creates a new bitrot encoded file spread over all available disks. CreateFile will create // CreateFile creates a new bitrot encoded file spread over all available disks. CreateFile will create
// the file at the given volume and path. It will read from src until an io.EOF occurs. The given algorithm will // the file at the given volume and path. It will read from src until an io.EOF occurs. The given algorithm will
// be used to protect the erasure encoded file. // be used to protect the erasure encoded file.
func (s *ErasureStorage) CreateFile(src io.Reader, volume, path string, buffer []byte, algorithm BitrotAlgorithm, writeQuorum int) (f ErasureFileInfo, err error) { func (s *ErasureStorage) CreateFile(ctx context.Context, src io.Reader, volume, path string, buffer []byte, algorithm BitrotAlgorithm, writeQuorum int) (f ErasureFileInfo, err error) {
if !algorithm.Available() { if !algorithm.Available() {
return f, errors.Trace(errBitrotHashAlgoInvalid) logger.LogIf(ctx, errBitrotHashAlgoInvalid)
return f, errBitrotHashAlgoInvalid
} }
f.Checksums = make([][]byte, len(s.disks)) f.Checksums = make([][]byte, len(s.disks))
hashers := make([]hash.Hash, len(s.disks)) hashers := make([]hash.Hash, len(s.disks))
@ -50,21 +52,22 @@ func (s *ErasureStorage) CreateFile(src io.Reader, volume, path string, buffer [
} }
blocks = make([][]byte, len(s.disks)) // write empty block blocks = make([][]byte, len(s.disks)) // write empty block
} else if err == nil || (n > 0 && err == io.ErrUnexpectedEOF) { } else if err == nil || (n > 0 && err == io.ErrUnexpectedEOF) {
blocks, err = s.ErasureEncode(buffer[:n]) blocks, err = s.ErasureEncode(ctx, buffer[:n])
if err != nil { if err != nil {
return f, err return f, err
} }
} else { } else {
return f, errors.Trace(err) logger.LogIf(ctx, err)
return f, err
} }
for i := range errChans { // span workers for i := range errChans { // span workers
go erasureAppendFile(s.disks[i], volume, path, hashers[i], blocks[i], errChans[i]) go erasureAppendFile(ctx, s.disks[i], volume, path, hashers[i], blocks[i], errChans[i])
} }
for i := range errChans { // wait until all workers are finished for i := range errChans { // wait until all workers are finished
errs[i] = <-errChans[i] errs[i] = <-errChans[i]
} }
if err = reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, writeQuorum); err != nil { if err = reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum); err != nil {
return f, err return f, err
} }
s.disks = evalDisks(s.disks, errs) s.disks = evalDisks(s.disks, errs)
@ -83,9 +86,10 @@ func (s *ErasureStorage) CreateFile(src io.Reader, volume, path string, buffer [
// erasureAppendFile appends the content of buf to the file on the given disk and updates computes // erasureAppendFile appends the content of buf to the file on the given disk and updates computes
// the hash of the written data. It sends the write error (or nil) over the error channel. // the hash of the written data. It sends the write error (or nil) over the error channel.
func erasureAppendFile(disk StorageAPI, volume, path string, hash hash.Hash, buf []byte, errChan chan<- error) { func erasureAppendFile(ctx context.Context, disk StorageAPI, volume, path string, hash hash.Hash, buf []byte, errChan chan<- error) {
if disk == OfflineDisk { if disk == OfflineDisk {
errChan <- errors.Trace(errDiskNotFound) logger.LogIf(ctx, errDiskNotFound)
errChan <- errDiskNotFound
return return
} }
err := disk.AppendFile(volume, path, buf) err := disk.AppendFile(volume, path, buf)

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"crypto/rand" "crypto/rand"
"io" "io"
"testing" "testing"
@ -70,7 +71,7 @@ func TestErasureCreateFile(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Test %d: failed to create test setup: %v", i, err) t.Fatalf("Test %d: failed to create test setup: %v", i, err)
} }
storage, err := NewErasureStorage(setup.disks, test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize) storage, err := NewErasureStorage(context.Background(), setup.disks, test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
if err != nil { if err != nil {
setup.Remove() setup.Remove()
t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err) t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)
@ -82,7 +83,7 @@ func TestErasureCreateFile(t *testing.T) {
setup.Remove() setup.Remove()
t.Fatalf("Test %d: failed to generate random test data: %v", i, err) t.Fatalf("Test %d: failed to generate random test data: %v", i, err)
} }
file, err := storage.CreateFile(bytes.NewReader(data[test.offset:]), "testbucket", "object", buffer, test.algorithm, test.dataBlocks+1) file, err := storage.CreateFile(context.Background(), bytes.NewReader(data[test.offset:]), "testbucket", "object", buffer, test.algorithm, test.dataBlocks+1)
if err != nil && !test.shouldFail { if err != nil && !test.shouldFail {
t.Errorf("Test %d: should pass but failed with: %v", i, err) t.Errorf("Test %d: should pass but failed with: %v", i, err)
} }
@ -100,7 +101,7 @@ func TestErasureCreateFile(t *testing.T) {
if test.offDisks > 0 { if test.offDisks > 0 {
storage.disks[0] = OfflineDisk storage.disks[0] = OfflineDisk
} }
file, err = storage.CreateFile(bytes.NewReader(data[test.offset:]), "testbucket", "object2", buffer, test.algorithm, test.dataBlocks+1) file, err = storage.CreateFile(context.Background(), bytes.NewReader(data[test.offset:]), "testbucket", "object2", buffer, test.algorithm, test.dataBlocks+1)
if err != nil && !test.shouldFailQuorum { if err != nil && !test.shouldFailQuorum {
t.Errorf("Test %d: should pass but failed with: %v", i, err) t.Errorf("Test %d: should pass but failed with: %v", i, err)
} }
@ -125,7 +126,7 @@ func benchmarkErasureWrite(data, parity, dataDown, parityDown int, size int64, b
b.Fatalf("failed to create test setup: %v", err) b.Fatalf("failed to create test setup: %v", err)
} }
defer setup.Remove() defer setup.Remove()
storage, err := NewErasureStorage(setup.disks, data, parity, blockSizeV1) storage, err := NewErasureStorage(context.Background(), setup.disks, data, parity, blockSizeV1)
if err != nil { if err != nil {
b.Fatalf("failed to create ErasureStorage: %v", err) b.Fatalf("failed to create ErasureStorage: %v", err)
} }
@ -143,7 +144,7 @@ func benchmarkErasureWrite(data, parity, dataDown, parityDown int, size int64, b
b.SetBytes(size) b.SetBytes(size)
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
_, err := storage.CreateFile(bytes.NewReader(content), "testbucket", "object", buffer, DefaultBitrotAlgorithm, data+1) _, err := storage.CreateFile(context.Background(), bytes.NewReader(content), "testbucket", "object", buffer, DefaultBitrotAlgorithm, data+1)
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -17,11 +17,12 @@
package cmd package cmd
import ( import (
"context"
"fmt" "fmt"
"hash" "hash"
"strings" "strings"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/cmd/logger"
) )
// HealFile tries to reconstruct an erasure-coded file spread over all // HealFile tries to reconstruct an erasure-coded file spread over all
@ -45,12 +46,13 @@ import (
// //
// It returns bitrot checksums for the non-nil staleDisks on which // It returns bitrot checksums for the non-nil staleDisks on which
// healing succeeded. // healing succeeded.
func (s ErasureStorage) HealFile(staleDisks []StorageAPI, volume, path string, blocksize int64, func (s ErasureStorage) HealFile(ctx context.Context, staleDisks []StorageAPI, volume, path string, blocksize int64,
dstVol, dstPath string, size int64, alg BitrotAlgorithm, checksums [][]byte) ( dstVol, dstPath string, size int64, alg BitrotAlgorithm, checksums [][]byte) (
f ErasureFileInfo, err error) { f ErasureFileInfo, err error) {
if !alg.Available() { if !alg.Available() {
return f, errors.Trace(errBitrotHashAlgoInvalid) logger.LogIf(ctx, errBitrotHashAlgoInvalid)
return f, errBitrotHashAlgoInvalid
} }
// Initialization // Initialization
@ -84,7 +86,7 @@ func (s ErasureStorage) HealFile(staleDisks []StorageAPI, volume, path string, b
} }
readLen += lastChunkSize readLen += lastChunkSize
var buffers [][]byte var buffers [][]byte
buffers, _, err = s.readConcurrent(volume, path, 0, readLen, verifiers) buffers, _, err = s.readConcurrent(ctx, volume, path, 0, readLen, verifiers)
if err != nil { if err != nil {
return f, err return f, err
} }
@ -131,7 +133,7 @@ func (s ErasureStorage) HealFile(staleDisks []StorageAPI, volume, path string, b
} }
buffOffset += csize buffOffset += csize
if err = s.ErasureDecodeDataAndParityBlocks(blocks); err != nil { if err = s.ErasureDecodeDataAndParityBlocks(ctx, blocks); err != nil {
return f, err return f, err
} }
@ -155,7 +157,9 @@ func (s ErasureStorage) HealFile(staleDisks []StorageAPI, volume, path string, b
// If all disks had write errors we quit. // If all disks had write errors we quit.
if !writeSucceeded { if !writeSucceeded {
// build error from all write errors // build error from all write errors
return f, errors.Trace(joinWriteErrors(writeErrors)) err := joinWriteErrors(writeErrors)
logger.LogIf(ctx, err)
return f, err
} }
} }

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"crypto/rand" "crypto/rand"
"io" "io"
"reflect" "reflect"
@ -74,7 +75,7 @@ func TestErasureHealFile(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Test %d: failed to setup XL environment: %v", i, err) t.Fatalf("Test %d: failed to setup XL environment: %v", i, err)
} }
storage, err := NewErasureStorage(setup.disks, test.dataBlocks, test.disks-test.dataBlocks, test.blocksize) storage, err := NewErasureStorage(context.Background(), setup.disks, test.dataBlocks, test.disks-test.dataBlocks, test.blocksize)
if err != nil { if err != nil {
setup.Remove() setup.Remove()
t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err) t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)
@ -89,7 +90,7 @@ func TestErasureHealFile(t *testing.T) {
algorithm = DefaultBitrotAlgorithm algorithm = DefaultBitrotAlgorithm
} }
buffer := make([]byte, test.blocksize, 2*test.blocksize) buffer := make([]byte, test.blocksize, 2*test.blocksize)
file, err := storage.CreateFile(bytes.NewReader(data), "testbucket", "testobject", buffer, algorithm, test.dataBlocks+1) file, err := storage.CreateFile(context.Background(), bytes.NewReader(data), "testbucket", "testobject", buffer, algorithm, test.dataBlocks+1)
if err != nil { if err != nil {
setup.Remove() setup.Remove()
t.Fatalf("Test %d: failed to create random test data: %v", i, err) t.Fatalf("Test %d: failed to create random test data: %v", i, err)
@ -113,7 +114,7 @@ func TestErasureHealFile(t *testing.T) {
} }
// test case setup is complete - now call Healfile() // test case setup is complete - now call Healfile()
info, err := storage.HealFile(staleDisks, "testbucket", "testobject", test.blocksize, "testbucket", "healedobject", test.size, test.algorithm, file.Checksums) info, err := storage.HealFile(context.Background(), staleDisks, "testbucket", "testobject", test.blocksize, "testbucket", "healedobject", test.size, test.algorithm, file.Checksums)
if err != nil && !test.shouldFail { if err != nil && !test.shouldFail {
t.Errorf("Test %d: should pass but it failed with: %v", i, err) t.Errorf("Test %d: should pass but it failed with: %v", i, err)
} }

View File

@ -17,9 +17,10 @@
package cmd package cmd
import ( import (
"context"
"io" "io"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/cmd/logger"
) )
type errIdx struct { type errIdx struct {
@ -27,7 +28,7 @@ type errIdx struct {
err error err error
} }
func (s ErasureStorage) readConcurrent(volume, path string, offset, length int64, func (s ErasureStorage) readConcurrent(ctx context.Context, volume, path string, offset, length int64,
verifiers []*BitrotVerifier) (buffers [][]byte, needsReconstruction bool, verifiers []*BitrotVerifier) (buffers [][]byte, needsReconstruction bool,
err error) { err error) {
@ -39,7 +40,8 @@ func (s ErasureStorage) readConcurrent(volume, path string, offset, length int64
stageBuffers[i] = make([]byte, length) stageBuffers[i] = make([]byte, length)
disk := s.disks[i] disk := s.disks[i]
if disk == OfflineDisk { if disk == OfflineDisk {
errChan <- errIdx{i, errors.Trace(errDiskNotFound)} logger.LogIf(ctx, errDiskNotFound)
errChan <- errIdx{i, errDiskNotFound}
return return
} }
_, rerr := disk.ReadFile(volume, path, offset, stageBuffers[i], verifiers[i]) _, rerr := disk.ReadFile(volume, path, offset, stageBuffers[i], verifiers[i])
@ -75,7 +77,8 @@ func (s ErasureStorage) readConcurrent(volume, path string, offset, length int64
} }
if successCount != s.dataBlocks { if successCount != s.dataBlocks {
// Not enough disks returns data. // Not enough disks returns data.
err = errors.Trace(errXLReadQuorum) err = errXLReadQuorum
logger.LogIf(ctx, err)
} }
return return
} }
@ -86,18 +89,21 @@ func (s ErasureStorage) readConcurrent(volume, path string, offset, length int64
// integrity of the given file. ReadFile will read data from the given // integrity of the given file. ReadFile will read data from the given
// offset up to the given length. If parts of the file are corrupted // offset up to the given length. If parts of the file are corrupted
// ReadFile tries to reconstruct the data. // ReadFile tries to reconstruct the data.
func (s ErasureStorage) ReadFile(writer io.Writer, volume, path string, offset, func (s ErasureStorage) ReadFile(ctx context.Context, writer io.Writer, volume, path string, offset,
length, totalLength int64, checksums [][]byte, algorithm BitrotAlgorithm, length, totalLength int64, checksums [][]byte, algorithm BitrotAlgorithm,
blocksize int64) (f ErasureFileInfo, err error) { blocksize int64) (f ErasureFileInfo, err error) {
if offset < 0 || length < 0 { if offset < 0 || length < 0 {
return f, errors.Trace(errUnexpected) logger.LogIf(ctx, errUnexpected)
return f, errUnexpected
} }
if offset+length > totalLength { if offset+length > totalLength {
return f, errors.Trace(errUnexpected) logger.LogIf(ctx, errUnexpected)
return f, errUnexpected
} }
if !algorithm.Available() { if !algorithm.Available() {
return f, errors.Trace(errBitrotHashAlgoInvalid) logger.LogIf(ctx, errBitrotHashAlgoInvalid)
return f, errBitrotHashAlgoInvalid
} }
f.Checksums = make([][]byte, len(s.disks)) f.Checksums = make([][]byte, len(s.disks))
@ -145,7 +151,7 @@ func (s ErasureStorage) ReadFile(writer io.Writer, volume, path string, offset,
var buffers [][]byte var buffers [][]byte
var needsReconstruction bool var needsReconstruction bool
buffers, needsReconstruction, err = s.readConcurrent(volume, path, buffers, needsReconstruction, err = s.readConcurrent(ctx, volume, path,
partDataStartIndex, partDataLength, verifiers) partDataStartIndex, partDataLength, verifiers)
if err != nil { if err != nil {
// Could not read enough disks. // Could not read enough disks.
@ -194,7 +200,8 @@ func (s ErasureStorage) ReadFile(writer io.Writer, volume, path string, offset,
if needsReconstruction { if needsReconstruction {
if err = s.ErasureDecodeDataBlocks(blocks); err != nil { if err = s.ErasureDecodeDataBlocks(blocks); err != nil {
return f, errors.Trace(err) logger.LogIf(ctx, err)
return f, err
} }
} }
@ -210,7 +217,7 @@ func (s ErasureStorage) ReadFile(writer io.Writer, volume, path string, offset,
writeLength = lastBlockLength - writeStart writeLength = lastBlockLength - writeStart
} }
} }
n, err := writeDataBlocks(writer, blocks, s.dataBlocks, writeStart, writeLength) n, err := writeDataBlocks(ctx, writer, blocks, s.dataBlocks, writeStart, writeLength)
if err != nil { if err != nil {
return f, err return f, err
} }

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
crand "crypto/rand" crand "crypto/rand"
"io" "io"
"math/rand" "math/rand"
@ -86,7 +87,7 @@ func TestErasureReadFile(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Test %d: failed to create test setup: %v", i, err) t.Fatalf("Test %d: failed to create test setup: %v", i, err)
} }
storage, err := NewErasureStorage(setup.disks, test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize) storage, err := NewErasureStorage(context.Background(), setup.disks, test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
if err != nil { if err != nil {
setup.Remove() setup.Remove()
t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err) t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)
@ -102,13 +103,13 @@ func TestErasureReadFile(t *testing.T) {
writeAlgorithm = DefaultBitrotAlgorithm writeAlgorithm = DefaultBitrotAlgorithm
} }
buffer := make([]byte, test.blocksize, 2*test.blocksize) buffer := make([]byte, test.blocksize, 2*test.blocksize)
file, err := storage.CreateFile(bytes.NewReader(data[:]), "testbucket", "object", buffer, writeAlgorithm, test.dataBlocks+1) file, err := storage.CreateFile(context.Background(), bytes.NewReader(data[:]), "testbucket", "object", buffer, writeAlgorithm, test.dataBlocks+1)
if err != nil { if err != nil {
setup.Remove() setup.Remove()
t.Fatalf("Test %d: failed to create erasure test file: %v", i, err) t.Fatalf("Test %d: failed to create erasure test file: %v", i, err)
} }
writer := bytes.NewBuffer(nil) writer := bytes.NewBuffer(nil)
readInfo, err := storage.ReadFile(writer, "testbucket", "object", test.offset, test.length, test.data, file.Checksums, test.algorithm, test.blocksize) readInfo, err := storage.ReadFile(context.Background(), writer, "testbucket", "object", test.offset, test.length, test.data, file.Checksums, test.algorithm, test.blocksize)
if err != nil && !test.shouldFail { if err != nil && !test.shouldFail {
t.Errorf("Test %d: should pass but failed with: %v", i, err) t.Errorf("Test %d: should pass but failed with: %v", i, err)
} }
@ -134,7 +135,7 @@ func TestErasureReadFile(t *testing.T) {
if test.offDisks > 0 { if test.offDisks > 0 {
storage.disks[0] = OfflineDisk storage.disks[0] = OfflineDisk
} }
readInfo, err = storage.ReadFile(writer, "testbucket", "object", test.offset, test.length, test.data, file.Checksums, test.algorithm, test.blocksize) readInfo, err = storage.ReadFile(context.Background(), writer, "testbucket", "object", test.offset, test.length, test.data, file.Checksums, test.algorithm, test.blocksize)
if err != nil && !test.shouldFailQuorum { if err != nil && !test.shouldFailQuorum {
t.Errorf("Test %d: should pass but failed with: %v", i, err) t.Errorf("Test %d: should pass but failed with: %v", i, err)
} }
@ -174,7 +175,7 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) {
} }
defer setup.Remove() defer setup.Remove()
storage, err := NewErasureStorage(setup.disks, dataBlocks, parityBlocks, blockSize) storage, err := NewErasureStorage(context.Background(), setup.disks, dataBlocks, parityBlocks, blockSize)
if err != nil { if err != nil {
t.Fatalf("failed to create ErasureStorage: %v", err) t.Fatalf("failed to create ErasureStorage: %v", err)
} }
@ -191,7 +192,7 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) {
// Create a test file to read from. // Create a test file to read from.
buffer := make([]byte, blockSize, 2*blockSize) buffer := make([]byte, blockSize, 2*blockSize)
file, err := storage.CreateFile(bytes.NewReader(data), "testbucket", "testobject", buffer, DefaultBitrotAlgorithm, dataBlocks+1) file, err := storage.CreateFile(context.Background(), bytes.NewReader(data), "testbucket", "testobject", buffer, DefaultBitrotAlgorithm, dataBlocks+1)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -211,7 +212,7 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) {
expected := data[offset : offset+readLen] expected := data[offset : offset+readLen]
_, err = storage.ReadFile(buf, "testbucket", "testobject", offset, readLen, length, file.Checksums, DefaultBitrotAlgorithm, blockSize) _, err = storage.ReadFile(context.Background(), buf, "testbucket", "testobject", offset, readLen, length, file.Checksums, DefaultBitrotAlgorithm, blockSize)
if err != nil { if err != nil {
t.Fatal(err, offset, readLen) t.Fatal(err, offset, readLen)
} }
@ -231,14 +232,14 @@ func benchmarkErasureRead(data, parity, dataDown, parityDown int, size int64, b
b.Fatalf("failed to create test setup: %v", err) b.Fatalf("failed to create test setup: %v", err)
} }
defer setup.Remove() defer setup.Remove()
storage, err := NewErasureStorage(setup.disks, data, parity, blockSizeV1) storage, err := NewErasureStorage(context.Background(), setup.disks, data, parity, blockSizeV1)
if err != nil { if err != nil {
b.Fatalf("failed to create ErasureStorage: %v", err) b.Fatalf("failed to create ErasureStorage: %v", err)
} }
content := make([]byte, size) content := make([]byte, size)
buffer := make([]byte, blockSizeV1, 2*blockSizeV1) buffer := make([]byte, blockSizeV1, 2*blockSizeV1)
file, err := storage.CreateFile(bytes.NewReader(content), "testbucket", "object", buffer, DefaultBitrotAlgorithm, data+1) file, err := storage.CreateFile(context.Background(), bytes.NewReader(content), "testbucket", "object", buffer, DefaultBitrotAlgorithm, data+1)
if err != nil { if err != nil {
b.Fatalf("failed to create erasure test file: %v", err) b.Fatalf("failed to create erasure test file: %v", err)
} }
@ -255,7 +256,7 @@ func benchmarkErasureRead(data, parity, dataDown, parityDown int, size int64, b
b.SetBytes(size) b.SetBytes(size)
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
if file, err = storage.ReadFile(bytes.NewBuffer(content[:0]), "testbucket", "object", 0, size, size, checksums, DefaultBitrotAlgorithm, blockSizeV1); err != nil { if file, err = storage.ReadFile(context.Background(), bytes.NewBuffer(content[:0]), "testbucket", "object", 0, size, size, checksums, DefaultBitrotAlgorithm, blockSizeV1); err != nil {
panic(err) panic(err)
} }
} }

View File

@ -18,10 +18,11 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"io" "io"
"github.com/klauspost/reedsolomon" "github.com/klauspost/reedsolomon"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/cmd/logger"
) )
// getDataBlockLen - get length of data blocks from encoded blocks. // getDataBlockLen - get length of data blocks from encoded blocks.
@ -36,20 +37,23 @@ func getDataBlockLen(enBlocks [][]byte, dataBlocks int) int {
// Writes all the data blocks from encoded blocks until requested // Writes all the data blocks from encoded blocks until requested
// outSize length. Provides a way to skip bytes until the offset. // outSize length. Provides a way to skip bytes until the offset.
func writeDataBlocks(dst io.Writer, enBlocks [][]byte, dataBlocks int, offset int64, length int64) (int64, error) { func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, dataBlocks int, offset int64, length int64) (int64, error) {
// Offset and out size cannot be negative. // Offset and out size cannot be negative.
if offset < 0 || length < 0 { if offset < 0 || length < 0 {
return 0, errors.Trace(errUnexpected) logger.LogIf(ctx, errUnexpected)
return 0, errUnexpected
} }
// Do we have enough blocks? // Do we have enough blocks?
if len(enBlocks) < dataBlocks { if len(enBlocks) < dataBlocks {
return 0, errors.Trace(reedsolomon.ErrTooFewShards) logger.LogIf(ctx, reedsolomon.ErrTooFewShards)
return 0, reedsolomon.ErrTooFewShards
} }
// Do we have enough data? // Do we have enough data?
if int64(getDataBlockLen(enBlocks, dataBlocks)) < length { if int64(getDataBlockLen(enBlocks, dataBlocks)) < length {
return 0, errors.Trace(reedsolomon.ErrShortData) logger.LogIf(ctx, reedsolomon.ErrShortData)
return 0, reedsolomon.ErrShortData
} }
// Counter to decrement total left to write. // Counter to decrement total left to write.
@ -77,7 +81,8 @@ func writeDataBlocks(dst io.Writer, enBlocks [][]byte, dataBlocks int, offset in
if write < int64(len(block)) { if write < int64(len(block)) {
n, err := io.Copy(dst, bytes.NewReader(block[:write])) n, err := io.Copy(dst, bytes.NewReader(block[:write]))
if err != nil { if err != nil {
return 0, errors.Trace(err) logger.LogIf(ctx, err)
return 0, err
} }
totalWritten += n totalWritten += n
break break
@ -85,7 +90,8 @@ func writeDataBlocks(dst io.Writer, enBlocks [][]byte, dataBlocks int, offset in
// Copy the block. // Copy the block.
n, err := io.Copy(dst, bytes.NewReader(block)) n, err := io.Copy(dst, bytes.NewReader(block))
if err != nil { if err != nil {
return 0, errors.Trace(err) logger.LogIf(ctx, err)
return 0, err
} }
// Decrement output size. // Decrement output size.

View File

@ -17,11 +17,12 @@
package cmd package cmd
import ( import (
"context"
"crypto/subtle" "crypto/subtle"
"hash" "hash"
"github.com/klauspost/reedsolomon" "github.com/klauspost/reedsolomon"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/cmd/logger"
) )
// OfflineDisk represents an unavailable disk. // OfflineDisk represents an unavailable disk.
@ -44,11 +45,12 @@ type ErasureStorage struct {
// NewErasureStorage creates a new ErasureStorage. The storage erasure codes and protects all data written to // NewErasureStorage creates a new ErasureStorage. The storage erasure codes and protects all data written to
// the disks. // the disks.
func NewErasureStorage(disks []StorageAPI, dataBlocks, parityBlocks int, blockSize int64) (s ErasureStorage, err error) { func NewErasureStorage(ctx context.Context, disks []StorageAPI, dataBlocks, parityBlocks int, blockSize int64) (s ErasureStorage, err error) {
shardsize := (int(blockSize) + dataBlocks - 1) / dataBlocks shardsize := (int(blockSize) + dataBlocks - 1) / dataBlocks
erasure, err := reedsolomon.New(dataBlocks, parityBlocks, reedsolomon.WithAutoGoroutines(shardsize)) erasure, err := reedsolomon.New(dataBlocks, parityBlocks, reedsolomon.WithAutoGoroutines(shardsize))
if err != nil { if err != nil {
return s, errors.Tracef("failed to create erasure coding: %v", err) logger.LogIf(ctx, err)
return s, err
} }
s = ErasureStorage{ s = ErasureStorage{
disks: make([]StorageAPI, len(disks)), disks: make([]StorageAPI, len(disks)),
@ -62,13 +64,15 @@ func NewErasureStorage(disks []StorageAPI, dataBlocks, parityBlocks int, blockSi
// ErasureEncode encodes the given data and returns the erasure-coded data. // ErasureEncode encodes the given data and returns the erasure-coded data.
// It returns an error if the erasure coding failed. // It returns an error if the erasure coding failed.
func (s *ErasureStorage) ErasureEncode(data []byte) ([][]byte, error) { func (s *ErasureStorage) ErasureEncode(ctx context.Context, data []byte) ([][]byte, error) {
encoded, err := s.erasure.Split(data) encoded, err := s.erasure.Split(data)
if err != nil { if err != nil {
return nil, errors.Tracef("failed to split data: %v", err) logger.LogIf(ctx, err)
return nil, err
} }
if err = s.erasure.Encode(encoded); err != nil { if err = s.erasure.Encode(encoded); err != nil {
return nil, errors.Tracef("failed to encode data: %v", err) logger.LogIf(ctx, err)
return nil, err
} }
return encoded, nil return encoded, nil
} }
@ -78,16 +82,17 @@ func (s *ErasureStorage) ErasureEncode(data []byte) ([][]byte, error) {
// It returns an error if the decoding failed. // It returns an error if the decoding failed.
func (s *ErasureStorage) ErasureDecodeDataBlocks(data [][]byte) error { func (s *ErasureStorage) ErasureDecodeDataBlocks(data [][]byte) error {
if err := s.erasure.ReconstructData(data); err != nil { if err := s.erasure.ReconstructData(data); err != nil {
return errors.Tracef("failed to reconstruct data: %v", err) return err
} }
return nil return nil
} }
// ErasureDecodeDataAndParityBlocks decodes the given erasure-coded data and verifies it. // ErasureDecodeDataAndParityBlocks decodes the given erasure-coded data and verifies it.
// It returns an error if the decoding failed. // It returns an error if the decoding failed.
func (s *ErasureStorage) ErasureDecodeDataAndParityBlocks(data [][]byte) error { func (s *ErasureStorage) ErasureDecodeDataAndParityBlocks(ctx context.Context, data [][]byte) error {
if err := s.erasure.Reconstruct(data); err != nil { if err := s.erasure.Reconstruct(data); err != nil {
return errors.Tracef("failed to reconstruct data: %v", err) logger.LogIf(ctx, err)
return err
} }
return nil return nil
} }

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"crypto/rand" "crypto/rand"
"io" "io"
"os" "os"
@ -52,11 +53,11 @@ func TestErasureDecode(t *testing.T) {
copy(buffer, data) copy(buffer, data)
disks := make([]StorageAPI, test.dataBlocks+test.parityBlocks) disks := make([]StorageAPI, test.dataBlocks+test.parityBlocks)
storage, err := NewErasureStorage(disks, test.dataBlocks, test.parityBlocks, blockSizeV1) storage, err := NewErasureStorage(context.Background(), disks, test.dataBlocks, test.parityBlocks, blockSizeV1)
if err != nil { if err != nil {
t.Fatalf("Test %d: failed to create erasure storage: %v", i, err) t.Fatalf("Test %d: failed to create erasure storage: %v", i, err)
} }
encoded, err := storage.ErasureEncode(buffer) encoded, err := storage.ErasureEncode(context.Background(), buffer)
if err != nil { if err != nil {
t.Fatalf("Test %d: failed to encode data: %v", i, err) t.Fatalf("Test %d: failed to encode data: %v", i, err)
} }
@ -69,7 +70,7 @@ func TestErasureDecode(t *testing.T) {
} }
if test.reconstructParity { if test.reconstructParity {
err = storage.ErasureDecodeDataAndParityBlocks(encoded) err = storage.ErasureDecodeDataAndParityBlocks(context.Background(), encoded)
} else { } else {
err = storage.ErasureDecodeDataBlocks(encoded) err = storage.ErasureDecodeDataBlocks(encoded)
} }
@ -98,7 +99,7 @@ func TestErasureDecode(t *testing.T) {
} }
decodedData := new(bytes.Buffer) decodedData := new(bytes.Buffer)
if _, err = writeDataBlocks(decodedData, decoded, test.dataBlocks, 0, int64(len(data))); err != nil { if _, err = writeDataBlocks(context.Background(), decodedData, decoded, test.dataBlocks, 0, int64(len(data))); err != nil {
t.Errorf("Test %d: failed to write data blocks: %v", i, err) t.Errorf("Test %d: failed to write data blocks: %v", i, err)
} }
if !bytes.Equal(decodedData.Bytes(), data) { if !bytes.Equal(decodedData.Bytes(), data) {

View File

@ -17,13 +17,14 @@
package cmd package cmd
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"os" "os"
"reflect" "reflect"
errors2 "github.com/minio/minio/pkg/errors" "github.com/minio/minio/cmd/logger"
) )
const ( const (
@ -100,14 +101,14 @@ func createFormatCache(fsFormatPath string, format *formatCacheV1) error {
// open file using READ & WRITE permission // open file using READ & WRITE permission
var file, err = os.OpenFile(fsFormatPath, os.O_RDWR|os.O_CREATE, 0600) var file, err = os.OpenFile(fsFormatPath, os.O_RDWR|os.O_CREATE, 0600)
if err != nil { if err != nil {
return errors2.Trace(err) return err
} }
// Close the locked file upon return. // Close the locked file upon return.
defer file.Close() defer file.Close()
fi, err := file.Stat() fi, err := file.Stat()
if err != nil { if err != nil {
return errors2.Trace(err) return err
} }
if fi.Size() != 0 { if fi.Size() != 0 {
// format.json already got created because of another minio process's createFormatCache() // format.json already got created because of another minio process's createFormatCache()
@ -118,7 +119,7 @@ func createFormatCache(fsFormatPath string, format *formatCacheV1) error {
// This function creates a cache format file on disk and returns a slice // This function creates a cache format file on disk and returns a slice
// of format cache config // of format cache config
func initFormatCache(drives []string) (formats []*formatCacheV1, err error) { func initFormatCache(ctx context.Context, drives []string) (formats []*formatCacheV1, err error) {
nformats := newFormatCacheV1(drives) nformats := newFormatCacheV1(drives)
for _, drive := range drives { for _, drive := range drives {
_, err = os.Stat(drive) _, err = os.Stat(drive)
@ -126,28 +127,36 @@ func initFormatCache(drives []string) (formats []*formatCacheV1, err error) {
continue continue
} }
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
logger.GetReqInfo(ctx).AppendTags("drive", drive)
logger.LogIf(ctx, err)
return nil, err return nil, err
} }
if err = os.Mkdir(drive, 0777); err != nil { if err = os.Mkdir(drive, 0777); err != nil {
logger.GetReqInfo(ctx).AppendTags("drive", drive)
logger.LogIf(ctx, err)
return nil, err return nil, err
} }
} }
for i, drive := range drives { for i, drive := range drives {
if err = os.Mkdir(pathJoin(drive, minioMetaBucket), 0777); err != nil { if err = os.Mkdir(pathJoin(drive, minioMetaBucket), 0777); err != nil {
if !os.IsExist(err) { if !os.IsExist(err) {
logger.GetReqInfo(ctx).AppendTags("drive", drive)
logger.LogIf(ctx, err)
return nil, err return nil, err
} }
} }
cacheFormatPath := pathJoin(drive, minioMetaBucket, formatConfigFile) cacheFormatPath := pathJoin(drive, minioMetaBucket, formatConfigFile)
// Fresh disk - create format.json for this cfs // Fresh disk - create format.json for this cfs
if err = createFormatCache(cacheFormatPath, nformats[i]); err != nil { if err = createFormatCache(cacheFormatPath, nformats[i]); err != nil {
logger.GetReqInfo(ctx).AppendTags("drive", drive)
logger.LogIf(ctx, err)
return nil, err return nil, err
} }
} }
return nformats, nil return nformats, nil
} }
func loadFormatCache(drives []string) ([]*formatCacheV1, error) { func loadFormatCache(ctx context.Context, drives []string) ([]*formatCacheV1, error) {
formats := make([]*formatCacheV1, len(drives)) formats := make([]*formatCacheV1, len(drives))
for i, drive := range drives { for i, drive := range drives {
cacheFormatPath := pathJoin(drive, minioMetaBucket, formatConfigFile) cacheFormatPath := pathJoin(drive, minioMetaBucket, formatConfigFile)
@ -156,6 +165,7 @@ func loadFormatCache(drives []string) ([]*formatCacheV1, error) {
if os.IsNotExist(err) { if os.IsNotExist(err) {
continue continue
} }
logger.LogIf(ctx, err)
return nil, err return nil, err
} }
defer f.Close() defer f.Close()
@ -268,7 +278,7 @@ func findCacheDiskIndex(disk string, disks []string) int {
} }
// validate whether cache drives order has changed // validate whether cache drives order has changed
func validateCacheFormats(formats []*formatCacheV1) error { func validateCacheFormats(ctx context.Context, formats []*formatCacheV1) error {
count := 0 count := 0
for _, format := range formats { for _, format := range formats {
if format == nil { if format == nil {
@ -279,12 +289,16 @@ func validateCacheFormats(formats []*formatCacheV1) error {
return errors.New("Cache format files missing on all drives") return errors.New("Cache format files missing on all drives")
} }
if _, err := checkFormatCacheValues(formats); err != nil { if _, err := checkFormatCacheValues(formats); err != nil {
logger.LogIf(ctx, err)
return err return err
} }
if err := checkCacheDisksSliceConsistency(formats); err != nil { if err := checkCacheDisksSliceConsistency(formats); err != nil {
logger.LogIf(ctx, err)
return err return err
} }
return checkCacheDiskConsistency(formats) err := checkCacheDiskConsistency(formats)
logger.LogIf(ctx, err)
return err
} }
// return true if all of the list of cache drives are // return true if all of the list of cache drives are
@ -303,16 +317,16 @@ func cacheDrivesUnformatted(drives []string) bool {
// create format.json for each cache drive if fresh disk or load format from disk // create format.json for each cache drive if fresh disk or load format from disk
// Then validate the format for all drives in the cache to ensure order // Then validate the format for all drives in the cache to ensure order
// of cache drives has not changed. // of cache drives has not changed.
func loadAndValidateCacheFormat(drives []string) (formats []*formatCacheV1, err error) { func loadAndValidateCacheFormat(ctx context.Context, drives []string) (formats []*formatCacheV1, err error) {
if cacheDrivesUnformatted(drives) { if cacheDrivesUnformatted(drives) {
formats, err = initFormatCache(drives) formats, err = initFormatCache(ctx, drives)
} else { } else {
formats, err = loadFormatCache(drives) formats, err = loadFormatCache(ctx, drives)
} }
if err != nil { if err != nil {
return nil, err return nil, err
} }
if err = validateCacheFormats(formats); err != nil { if err = validateCacheFormats(ctx, formats); err != nil {
return nil, err return nil, err
} }
return formats, nil return formats, nil

View File

@ -17,18 +17,20 @@
package cmd package cmd
import ( import (
"context"
"os" "os"
"testing" "testing"
) )
// TestDiskCacheFormat - tests initFormatCache, formatMetaGetFormatBackendCache, formatCacheGetVersion. // TestDiskCacheFormat - tests initFormatCache, formatMetaGetFormatBackendCache, formatCacheGetVersion.
func TestDiskCacheFormat(t *testing.T) { func TestDiskCacheFormat(t *testing.T) {
ctx := context.Background()
fsDirs, err := getRandomDisks(1) fsDirs, err := getRandomDisks(1)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
_, err = initFormatCache(fsDirs) _, err = initFormatCache(ctx, fsDirs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -56,7 +58,7 @@ func TestDiskCacheFormat(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if _, err = loadAndValidateCacheFormat(fsDirs); err == nil { if _, err = loadAndValidateCacheFormat(context.Background(), fsDirs); err == nil {
t.Fatal("expected to fail") t.Fatal("expected to fail")
} }
@ -69,7 +71,7 @@ func TestDiskCacheFormat(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if _, err = loadAndValidateCacheFormat(fsDirs); err == nil { if _, err = loadAndValidateCacheFormat(context.Background(), fsDirs); err == nil {
t.Fatal("expected to fail") t.Fatal("expected to fail")
} }
} }
@ -307,7 +309,7 @@ func TestFormatCache(t *testing.T) {
} }
for i, testCase := range testCases { for i, testCase := range testCases {
err := validateCacheFormats(testCase.formatConfigs) err := validateCacheFormats(context.Background(), testCase.formatConfigs)
if err != nil && testCase.shouldPass { if err != nil && testCase.shouldPass {
t.Errorf("Test %d: Expected to pass but failed with %s", i+1, err) t.Errorf("Test %d: Expected to pass but failed with %s", i+1, err)
} }

View File

@ -17,13 +17,14 @@
package cmd package cmd
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"os" "os"
"path" "path"
"time" "time"
errors2 "github.com/minio/minio/pkg/errors" "github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/lock" "github.com/minio/minio/pkg/lock"
) )
@ -97,7 +98,7 @@ func formatFSGetVersion(r io.ReadSeeker) (string, error) {
// Migrate from V1 to V2. V2 implements new backend format for multipart // Migrate from V1 to V2. V2 implements new backend format for multipart
// uploads. Delete the previous multipart directory. // uploads. Delete the previous multipart directory.
func formatFSMigrateV1ToV2(wlk *lock.LockedFile, fsPath string) error { func formatFSMigrateV1ToV2(ctx context.Context, wlk *lock.LockedFile, fsPath string) error {
version, err := formatFSGetVersion(wlk) version, err := formatFSGetVersion(wlk)
if err != nil { if err != nil {
return err return err
@ -107,11 +108,12 @@ func formatFSMigrateV1ToV2(wlk *lock.LockedFile, fsPath string) error {
return fmt.Errorf(`format.json version expected %s, found %s`, formatFSVersionV1, version) return fmt.Errorf(`format.json version expected %s, found %s`, formatFSVersionV1, version)
} }
if err = fsRemoveAll(path.Join(fsPath, minioMetaMultipartBucket)); err != nil { if err = fsRemoveAll(ctx, path.Join(fsPath, minioMetaMultipartBucket)); err != nil {
return err return err
} }
if err = os.MkdirAll(path.Join(fsPath, minioMetaMultipartBucket), 0755); err != nil { if err = os.MkdirAll(path.Join(fsPath, minioMetaMultipartBucket), 0755); err != nil {
logger.LogIf(ctx, err)
return err return err
} }
@ -122,7 +124,7 @@ func formatFSMigrateV1ToV2(wlk *lock.LockedFile, fsPath string) error {
// Migration should happen when formatFSV1.FS.Version changes. This version // Migration should happen when formatFSV1.FS.Version changes. This version
// can change when there is a change to the struct formatFSV1.FS or if there // can change when there is a change to the struct formatFSV1.FS or if there
// is any change in the backend file system tree structure. // is any change in the backend file system tree structure.
func formatFSMigrate(wlk *lock.LockedFile, fsPath string) error { func formatFSMigrate(ctx context.Context, wlk *lock.LockedFile, fsPath string) error {
// Add any migration code here in case we bump format.FS.Version // Add any migration code here in case we bump format.FS.Version
version, err := formatFSGetVersion(wlk) version, err := formatFSGetVersion(wlk)
if err != nil { if err != nil {
@ -131,7 +133,7 @@ func formatFSMigrate(wlk *lock.LockedFile, fsPath string) error {
switch version { switch version {
case formatFSVersionV1: case formatFSVersionV1:
if err = formatFSMigrateV1ToV2(wlk, fsPath); err != nil { if err = formatFSMigrateV1ToV2(ctx, wlk, fsPath); err != nil {
return err return err
} }
fallthrough fallthrough
@ -151,19 +153,21 @@ func formatFSMigrate(wlk *lock.LockedFile, fsPath string) error {
} }
// Creates a new format.json if unformatted. // Creates a new format.json if unformatted.
func createFormatFS(fsFormatPath string) error { func createFormatFS(ctx context.Context, fsFormatPath string) error {
// Attempt a write lock on formatConfigFile `format.json` // Attempt a write lock on formatConfigFile `format.json`
// file stored in minioMetaBucket(.minio.sys) directory. // file stored in minioMetaBucket(.minio.sys) directory.
lk, err := lock.TryLockedOpenFile(fsFormatPath, os.O_RDWR|os.O_CREATE, 0600) lk, err := lock.TryLockedOpenFile(fsFormatPath, os.O_RDWR|os.O_CREATE, 0600)
if err != nil { if err != nil {
return errors2.Trace(err) logger.LogIf(ctx, err)
return err
} }
// Close the locked file upon return. // Close the locked file upon return.
defer lk.Close() defer lk.Close()
fi, err := lk.Stat() fi, err := lk.Stat()
if err != nil { if err != nil {
return errors2.Trace(err) logger.LogIf(ctx, err)
return err
} }
if fi.Size() != 0 { if fi.Size() != 0 {
// format.json already got created because of another minio process's createFormatFS() // format.json already got created because of another minio process's createFormatFS()
@ -177,7 +181,7 @@ func createFormatFS(fsFormatPath string) error {
// The file descriptor should be kept open throughout the life // The file descriptor should be kept open throughout the life
// of the process so that another minio process does not try to // of the process so that another minio process does not try to
// migrate the backend when we are actively working on the backend. // migrate the backend when we are actively working on the backend.
func initFormatFS(fsPath string) (rlk *lock.RLockedFile, err error) { func initFormatFS(ctx context.Context, fsPath string) (rlk *lock.RLockedFile, err error) {
fsFormatPath := pathJoin(fsPath, minioMetaBucket, formatConfigFile) fsFormatPath := pathJoin(fsPath, minioMetaBucket, formatConfigFile)
// Any read on format.json should be done with read-lock. // Any read on format.json should be done with read-lock.
// Any write on format.json should be done with write-lock. // Any write on format.json should be done with write-lock.
@ -191,7 +195,8 @@ func initFormatFS(fsPath string) (rlk *lock.RLockedFile, err error) {
var fi os.FileInfo var fi os.FileInfo
fi, err = rlk.Stat() fi, err = rlk.Stat()
if err != nil { if err != nil {
return nil, errors2.Trace(err) logger.LogIf(ctx, err)
return nil, err
} }
isEmpty = fi.Size() == 0 isEmpty = fi.Size() == 0
} }
@ -200,7 +205,7 @@ func initFormatFS(fsPath string) (rlk *lock.RLockedFile, err error) {
rlk.Close() rlk.Close()
} }
// Fresh disk - create format.json // Fresh disk - create format.json
err = createFormatFS(fsFormatPath) err = createFormatFS(ctx, fsFormatPath)
if err == lock.ErrAlreadyLocked { if err == lock.ErrAlreadyLocked {
// Lock already present, sleep and attempt again. // Lock already present, sleep and attempt again.
// Can happen in a rare situation when a parallel minio process // Can happen in a rare situation when a parallel minio process
@ -209,19 +214,22 @@ func initFormatFS(fsPath string) (rlk *lock.RLockedFile, err error) {
continue continue
} }
if err != nil { if err != nil {
return nil, errors2.Trace(err) logger.LogIf(ctx, err)
return nil, err
} }
// After successfully creating format.json try to hold a read-lock on // After successfully creating format.json try to hold a read-lock on
// the file. // the file.
continue continue
} }
if err != nil { if err != nil {
return nil, errors2.Trace(err) logger.LogIf(ctx, err)
return nil, err
} }
formatBackend, err := formatMetaGetFormatBackendFS(rlk) formatBackend, err := formatMetaGetFormatBackendFS(rlk)
if err != nil { if err != nil {
return nil, errors2.Trace(err) logger.LogIf(ctx, err)
return nil, err
} }
if formatBackend != formatBackendFS { if formatBackend != formatBackendFS {
return nil, fmt.Errorf(`%s file: expected format-type: %s, found: %s`, formatConfigFile, formatBackendFS, formatBackend) return nil, fmt.Errorf(`%s file: expected format-type: %s, found: %s`, formatConfigFile, formatBackendFS, formatBackend)
@ -244,7 +252,7 @@ func initFormatFS(fsPath string) (rlk *lock.RLockedFile, err error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = formatFSMigrate(wlk, fsPath) err = formatFSMigrate(ctx, wlk, fsPath)
wlk.Close() wlk.Close()
if err != nil { if err != nil {
// Migration failed, bail out so that the user can observe what happened. // Migration failed, bail out so that the user can observe what happened.

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
@ -38,7 +39,7 @@ func TestFSFormatFS(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
rlk, err := initFormatFS(disk) rlk, err := initFormatFS(context.Background(), disk)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -81,7 +82,7 @@ func TestFSFormatFS(t *testing.T) {
if _, err = formatFSGetVersion(rlk); err == nil { if _, err = formatFSGetVersion(rlk); err == nil {
t.Fatal("expected to fail") t.Fatal("expected to fail")
} }
if _, err = initFormatFS(disk); err == nil { if _, err = initFormatFS(context.Background(), disk); err == nil {
t.Fatal("expected to fail") t.Fatal("expected to fail")
} }
@ -96,7 +97,7 @@ func TestFSFormatFS(t *testing.T) {
if _, err = formatMetaGetFormatBackendFS(f); err == nil { if _, err = formatMetaGetFormatBackendFS(f); err == nil {
t.Fatal("expected to fail") t.Fatal("expected to fail")
} }
if _, err = initFormatFS(disk); err == nil { if _, err = initFormatFS(context.Background(), disk); err == nil {
t.Fatal("expected to fail") t.Fatal("expected to fail")
} }
} }

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
@ -520,7 +521,7 @@ func formatXLV3Check(reference *formatXLV3, format *formatXLV3) error {
} }
// saveFormatXLAll - populates `format.json` on disks in its order. // saveFormatXLAll - populates `format.json` on disks in its order.
func saveFormatXLAll(storageDisks []StorageAPI, formats []*formatXLV3) error { func saveFormatXLAll(ctx context.Context, storageDisks []StorageAPI, formats []*formatXLV3) error {
var errs = make([]error, len(storageDisks)) var errs = make([]error, len(storageDisks))
var wg = &sync.WaitGroup{} var wg = &sync.WaitGroup{}
@ -542,7 +543,7 @@ func saveFormatXLAll(storageDisks []StorageAPI, formats []*formatXLV3) error {
wg.Wait() wg.Wait()
writeQuorum := len(storageDisks)/2 + 1 writeQuorum := len(storageDisks)/2 + 1
return reduceWriteQuorumErrs(errs, nil, writeQuorum) return reduceWriteQuorumErrs(ctx, errs, nil, writeQuorum)
} }
// relinquishes the underlying connection for all storage disks. // relinquishes the underlying connection for all storage disks.
@ -614,7 +615,7 @@ func fixFormatXLV3(storageDisks []StorageAPI, endpoints EndpointList, formats []
} }
// initFormatXL - save XL format configuration on all disks. // initFormatXL - save XL format configuration on all disks.
func initFormatXL(storageDisks []StorageAPI, setCount, disksPerSet int) (format *formatXLV3, err error) { func initFormatXL(ctx context.Context, storageDisks []StorageAPI, setCount, disksPerSet int) (format *formatXLV3, err error) {
format = newFormatXLV3(setCount, disksPerSet) format = newFormatXLV3(setCount, disksPerSet)
formats := make([]*formatXLV3, len(storageDisks)) formats := make([]*formatXLV3, len(storageDisks))
@ -632,7 +633,7 @@ func initFormatXL(storageDisks []StorageAPI, setCount, disksPerSet int) (format
} }
// Save formats `format.json` across all disks. // Save formats `format.json` across all disks.
if err = saveFormatXLAll(storageDisks, formats); err != nil { if err = saveFormatXLAll(ctx, storageDisks, formats); err != nil {
return nil, err return nil, err
} }

View File

@ -17,11 +17,13 @@
package cmd package cmd
import ( import (
"context"
"io" "io"
"os" "os"
pathutil "path" pathutil "path"
"runtime" "runtime"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/lock" "github.com/minio/minio/pkg/lock"
) )
@ -29,17 +31,23 @@ import (
// Removes only the file at given path does not remove // Removes only the file at given path does not remove
// any parent directories, handles long paths for // any parent directories, handles long paths for
// windows automatically. // windows automatically.
func fsRemoveFile(filePath string) (err error) { func fsRemoveFile(ctx context.Context, filePath string) (err error) {
if filePath == "" { if filePath == "" {
return errors.Trace(errInvalidArgument) logger.LogIf(ctx, errInvalidArgument)
return errInvalidArgument
} }
if err = checkPathLength(filePath); err != nil { if err = checkPathLength(filePath); err != nil {
return errors.Trace(err) logger.LogIf(ctx, err)
return err
} }
if err = os.Remove((filePath)); err != nil { if err = os.Remove((filePath)); err != nil {
return osErrToFSFileErr(err) fsErr := osErrToFSFileErr(err)
if fsErr != errFileNotFound {
logger.LogIf(ctx, err)
}
return fsErr
} }
return nil return nil
@ -47,22 +55,27 @@ func fsRemoveFile(filePath string) (err error) {
// Removes all files and folders at a given path, handles // Removes all files and folders at a given path, handles
// long paths for windows automatically. // long paths for windows automatically.
func fsRemoveAll(dirPath string) (err error) { func fsRemoveAll(ctx context.Context, dirPath string) (err error) {
if dirPath == "" { if dirPath == "" {
return errors.Trace(errInvalidArgument) logger.LogIf(ctx, errInvalidArgument)
return errInvalidArgument
} }
if err = checkPathLength(dirPath); err != nil { if err = checkPathLength(dirPath); err != nil {
return errors.Trace(err) logger.LogIf(ctx, err)
return err
} }
if err = os.RemoveAll(dirPath); err != nil { if err = os.RemoveAll(dirPath); err != nil {
if os.IsPermission(err) { if os.IsPermission(err) {
return errors.Trace(errVolumeAccessDenied) logger.LogIf(ctx, errVolumeAccessDenied)
return errVolumeAccessDenied
} else if isSysErrNotEmpty(err) { } else if isSysErrNotEmpty(err) {
return errors.Trace(errVolumeNotEmpty) logger.LogIf(ctx, errVolumeNotEmpty)
return errVolumeNotEmpty
} }
return errors.Trace(err) logger.LogIf(ctx, err)
return err
} }
return nil return nil
@ -70,22 +83,27 @@ func fsRemoveAll(dirPath string) (err error) {
// Removes a directory only if its empty, handles long // Removes a directory only if its empty, handles long
// paths for windows automatically. // paths for windows automatically.
func fsRemoveDir(dirPath string) (err error) { func fsRemoveDir(ctx context.Context, dirPath string) (err error) {
if dirPath == "" { if dirPath == "" {
return errors.Trace(errInvalidArgument) logger.LogIf(ctx, errInvalidArgument)
return errInvalidArgument
} }
if err = checkPathLength(dirPath); err != nil { if err = checkPathLength(dirPath); err != nil {
return errors.Trace(err) logger.LogIf(ctx, err)
return err
} }
if err = os.Remove((dirPath)); err != nil { if err = os.Remove((dirPath)); err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
return errors.Trace(errVolumeNotFound) logger.LogIf(ctx, errVolumeNotFound)
return errVolumeNotFound
} else if isSysErrNotEmpty(err) { } else if isSysErrNotEmpty(err) {
return errors.Trace(errVolumeNotEmpty) logger.LogIf(ctx, errVolumeNotEmpty)
return errVolumeNotEmpty
} }
return errors.Trace(err) logger.LogIf(ctx, err)
return err
} }
return nil return nil
@ -95,29 +113,36 @@ func fsRemoveDir(dirPath string) (err error) {
// otherwise returns an error. If directory already // otherwise returns an error. If directory already
// exists returns an error. Windows long paths // exists returns an error. Windows long paths
// are handled automatically. // are handled automatically.
func fsMkdir(dirPath string) (err error) { func fsMkdir(ctx context.Context, dirPath string) (err error) {
if dirPath == "" { if dirPath == "" {
return errors.Trace(errInvalidArgument) logger.LogIf(ctx, errInvalidArgument)
return errInvalidArgument
} }
if err = checkPathLength(dirPath); err != nil { if err = checkPathLength(dirPath); err != nil {
return errors.Trace(err) logger.LogIf(ctx, err)
return err
} }
if err = os.Mkdir((dirPath), 0777); err != nil { if err = os.Mkdir((dirPath), 0777); err != nil {
if os.IsExist(err) { if os.IsExist(err) {
return errors.Trace(errVolumeExists) logger.LogIf(ctx, errVolumeExists)
return errVolumeExists
} else if os.IsPermission(err) { } else if os.IsPermission(err) {
return errors.Trace(errDiskAccessDenied) logger.LogIf(ctx, errDiskAccessDenied)
return errDiskAccessDenied
} else if isSysErrNotDir(err) { } else if isSysErrNotDir(err) {
// File path cannot be verified since // File path cannot be verified since
// one of the parents is a file. // one of the parents is a file.
return errors.Trace(errDiskAccessDenied) logger.LogIf(ctx, errDiskAccessDenied)
return errDiskAccessDenied
} else if isSysErrPathNotFound(err) { } else if isSysErrPathNotFound(err) {
// Add specific case for windows. // Add specific case for windows.
return errors.Trace(errDiskAccessDenied) logger.LogIf(ctx, errDiskAccessDenied)
return errDiskAccessDenied
} }
return errors.Trace(err) logger.LogIf(ctx, err)
return err
} }
return nil return nil
@ -128,36 +153,40 @@ func fsMkdir(dirPath string) (err error) {
// not perform any higher layer interpretation of files v/s // not perform any higher layer interpretation of files v/s
// directories. For higher level interpretation look at // directories. For higher level interpretation look at
// fsStatFileDir, fsStatFile, fsStatDir. // fsStatFileDir, fsStatFile, fsStatDir.
func fsStat(statLoc string) (os.FileInfo, error) { func fsStat(ctx context.Context, statLoc string) (os.FileInfo, error) {
if statLoc == "" { if statLoc == "" {
return nil, errors.Trace(errInvalidArgument) logger.LogIf(ctx, errInvalidArgument)
return nil, errInvalidArgument
} }
if err := checkPathLength(statLoc); err != nil { if err := checkPathLength(statLoc); err != nil {
return nil, errors.Trace(err) logger.LogIf(ctx, err)
return nil, err
} }
fi, err := os.Stat((statLoc)) fi, err := os.Stat((statLoc))
if err != nil { if err != nil {
return nil, errors.Trace(err) logger.LogIf(ctx, err)
return nil, err
} }
return fi, nil return fi, nil
} }
// Lookup if volume exists, returns volume attributes upon success. // Lookup if volume exists, returns volume attributes upon success.
func fsStatVolume(volume string) (os.FileInfo, error) { func fsStatVolume(ctx context.Context, volume string) (os.FileInfo, error) {
fi, err := fsStat(volume) fi, err := fsStat(ctx, volume)
if err != nil { if err != nil {
err = errors.Cause(err) err = errors.Cause(err)
if os.IsNotExist(err) { if os.IsNotExist(err) {
return nil, errors.Trace(errVolumeNotFound) return nil, errVolumeNotFound
} else if os.IsPermission(err) { } else if os.IsPermission(err) {
return nil, errors.Trace(errVolumeAccessDenied) return nil, errVolumeAccessDenied
} }
return nil, errors.Trace(err) return nil, err
} }
if !fi.IsDir() { if !fi.IsDir() {
return nil, errors.Trace(errVolumeAccessDenied) logger.LogIf(ctx, errVolumeAccessDenied)
return nil, errVolumeAccessDenied
} }
return fi, nil return fi, nil
@ -173,52 +202,55 @@ func osErrToFSFileErr(err error) error {
} }
err = errors.Cause(err) err = errors.Cause(err)
if os.IsNotExist(err) { if os.IsNotExist(err) {
return errors.Trace(errFileNotFound) return errFileNotFound
} }
if os.IsPermission(err) { if os.IsPermission(err) {
return errors.Trace(errFileAccessDenied) return errFileAccessDenied
} }
if isSysErrNotDir(err) { if isSysErrNotDir(err) {
return errors.Trace(errFileAccessDenied) return errFileAccessDenied
} }
if isSysErrPathNotFound(err) { if isSysErrPathNotFound(err) {
return errors.Trace(errFileNotFound) return errFileNotFound
} }
return err return err
} }
// Lookup if directory exists, returns directory attributes upon success. // Lookup if directory exists, returns directory attributes upon success.
func fsStatDir(statDir string) (os.FileInfo, error) { func fsStatDir(ctx context.Context, statDir string) (os.FileInfo, error) {
fi, err := fsStat(statDir) fi, err := fsStat(ctx, statDir)
if err != nil { if err != nil {
return nil, osErrToFSFileErr(err) return nil, osErrToFSFileErr(err)
} }
if !fi.IsDir() { if !fi.IsDir() {
return nil, errors.Trace(errFileAccessDenied) return nil, errFileAccessDenied
} }
return fi, nil return fi, nil
} }
// Lookup if file exists, returns file attributes upon success. // Lookup if file exists, returns file attributes upon success.
func fsStatFile(statFile string) (os.FileInfo, error) { func fsStatFile(ctx context.Context, statFile string) (os.FileInfo, error) {
fi, err := fsStat(statFile) fi, err := fsStat(ctx, statFile)
if err != nil { if err != nil {
return nil, osErrToFSFileErr(err) return nil, osErrToFSFileErr(err)
} }
if fi.IsDir() { if fi.IsDir() {
return nil, errors.Trace(errFileAccessDenied) logger.LogIf(ctx, errFileAccessDenied)
return nil, errFileAccessDenied
} }
return fi, nil return fi, nil
} }
// Opens the file at given path, optionally from an offset. Upon success returns // Opens the file at given path, optionally from an offset. Upon success returns
// a readable stream and the size of the readable stream. // a readable stream and the size of the readable stream.
func fsOpenFile(readPath string, offset int64) (io.ReadCloser, int64, error) { func fsOpenFile(ctx context.Context, readPath string, offset int64) (io.ReadCloser, int64, error) {
if readPath == "" || offset < 0 { if readPath == "" || offset < 0 {
return nil, 0, errors.Trace(errInvalidArgument) logger.LogIf(ctx, errInvalidArgument)
return nil, 0, errInvalidArgument
} }
if err := checkPathLength(readPath); err != nil { if err := checkPathLength(readPath); err != nil {
return nil, 0, errors.Trace(err) logger.LogIf(ctx, err)
return nil, 0, err
} }
fr, err := os.Open((readPath)) fr, err := os.Open((readPath))
@ -229,19 +261,22 @@ func fsOpenFile(readPath string, offset int64) (io.ReadCloser, int64, error) {
// Stat to get the size of the file at path. // Stat to get the size of the file at path.
st, err := os.Stat((readPath)) st, err := os.Stat((readPath))
if err != nil { if err != nil {
return nil, 0, errors.Trace(err) logger.LogIf(ctx, err)
return nil, 0, err
} }
// Verify if its not a regular file, since subsequent Seek is undefined. // Verify if its not a regular file, since subsequent Seek is undefined.
if !st.Mode().IsRegular() { if !st.Mode().IsRegular() {
return nil, 0, errors.Trace(errIsNotRegular) logger.LogIf(ctx, errIsNotRegular)
return nil, 0, errIsNotRegular
} }
// Seek to the requested offset. // Seek to the requested offset.
if offset > 0 { if offset > 0 {
_, err = fr.Seek(offset, os.SEEK_SET) _, err = fr.Seek(offset, os.SEEK_SET)
if err != nil { if err != nil {
return nil, 0, errors.Trace(err) logger.LogIf(ctx, err)
return nil, 0, err
} }
} }
@ -250,21 +285,25 @@ func fsOpenFile(readPath string, offset int64) (io.ReadCloser, int64, error) {
} }
// Creates a file and copies data from incoming reader. Staging buffer is used by io.CopyBuffer. // Creates a file and copies data from incoming reader. Staging buffer is used by io.CopyBuffer.
func fsCreateFile(filePath string, reader io.Reader, buf []byte, fallocSize int64) (int64, error) { func fsCreateFile(ctx context.Context, filePath string, reader io.Reader, buf []byte, fallocSize int64) (int64, error) {
if filePath == "" || reader == nil { if filePath == "" || reader == nil {
return 0, errors.Trace(errInvalidArgument) logger.LogIf(ctx, errInvalidArgument)
return 0, errInvalidArgument
} }
if err := checkPathLength(filePath); err != nil { if err := checkPathLength(filePath); err != nil {
return 0, errors.Trace(err) logger.LogIf(ctx, err)
return 0, err
} }
if err := mkdirAll(pathutil.Dir(filePath), 0777); err != nil { if err := mkdirAll(pathutil.Dir(filePath), 0777); err != nil {
return 0, errors.Trace(err) logger.LogIf(ctx, err)
return 0, err
} }
if err := checkDiskFree(pathutil.Dir(filePath), fallocSize); err != nil { if err := checkDiskFree(pathutil.Dir(filePath), fallocSize); err != nil {
return 0, errors.Trace(err) logger.LogIf(ctx, err)
return 0, err
} }
writer, err := lock.Open(filePath, os.O_CREATE|os.O_WRONLY, 0666) writer, err := lock.Open(filePath, os.O_CREATE|os.O_WRONLY, 0666)
@ -276,7 +315,8 @@ func fsCreateFile(filePath string, reader io.Reader, buf []byte, fallocSize int6
// Fallocate only if the size is final object is known. // Fallocate only if the size is final object is known.
if fallocSize > 0 { if fallocSize > 0 {
if err = fsFAllocate(int(writer.Fd()), 0, fallocSize); err != nil { if err = fsFAllocate(int(writer.Fd()), 0, fallocSize); err != nil {
return 0, errors.Trace(err) logger.LogIf(ctx, err)
return 0, err
} }
} }
@ -284,12 +324,14 @@ func fsCreateFile(filePath string, reader io.Reader, buf []byte, fallocSize int6
if buf != nil { if buf != nil {
bytesWritten, err = io.CopyBuffer(writer, reader, buf) bytesWritten, err = io.CopyBuffer(writer, reader, buf)
if err != nil { if err != nil {
return 0, errors.Trace(err) logger.LogIf(ctx, err)
return 0, err
} }
} else { } else {
bytesWritten, err = io.Copy(writer, reader) bytesWritten, err = io.Copy(writer, reader)
if err != nil { if err != nil {
return 0, errors.Trace(err) logger.LogIf(ctx, err)
return 0, err
} }
} }
@ -320,12 +362,14 @@ func fsFAllocate(fd int, offset int64, len int64) (err error) {
// Renames source path to destination path, creates all the // Renames source path to destination path, creates all the
// missing parents if they don't exist. // missing parents if they don't exist.
func fsRenameFile(sourcePath, destPath string) error { func fsRenameFile(ctx context.Context, sourcePath, destPath string) error {
if err := checkPathLength(sourcePath); err != nil { if err := checkPathLength(sourcePath); err != nil {
return errors.Trace(err) logger.LogIf(ctx, err)
return err
} }
if err := checkPathLength(destPath); err != nil { if err := checkPathLength(destPath); err != nil {
return errors.Trace(err) logger.LogIf(ctx, err)
return err
} }
// Verify if source path exists. // Verify if source path exists.
@ -334,27 +378,34 @@ func fsRenameFile(sourcePath, destPath string) error {
} }
if err := renameAll(sourcePath, destPath); err != nil { if err := renameAll(sourcePath, destPath); err != nil {
return errors.Trace(err) logger.LogIf(ctx, err)
return err
} }
return nil return nil
} }
// fsDeleteFile is a wrapper for deleteFile(), after checking the path length. // fsDeleteFile is a wrapper for deleteFile(), after checking the path length.
func fsDeleteFile(basePath, deletePath string) error { func fsDeleteFile(ctx context.Context, basePath, deletePath string) error {
if err := checkPathLength(basePath); err != nil { if err := checkPathLength(basePath); err != nil {
return errors.Trace(err) logger.LogIf(ctx, err)
return err
} }
if err := checkPathLength(deletePath); err != nil { if err := checkPathLength(deletePath); err != nil {
return errors.Trace(err) logger.LogIf(ctx, err)
return err
} }
return deleteFile(basePath, deletePath) if err := deleteFile(basePath, deletePath); err != nil {
logger.LogIf(ctx, err)
return err
}
return nil
} }
// fsRemoveMeta safely removes a locked file and takes care of Windows special case // fsRemoveMeta safely removes a locked file and takes care of Windows special case
func fsRemoveMeta(basePath, deletePath, tmpDir string) error { func fsRemoveMeta(ctx context.Context, basePath, deletePath, tmpDir string) error {
// Special case for windows please read through. // Special case for windows please read through.
if runtime.GOOS == globalWindowsOSName { if runtime.GOOS == globalWindowsOSName {
// Ordinarily windows does not permit deletion or renaming of files still // Ordinarily windows does not permit deletion or renaming of files still
@ -388,13 +439,13 @@ func fsRemoveMeta(basePath, deletePath, tmpDir string) error {
tmpPath := pathJoin(tmpDir, mustGetUUID()) tmpPath := pathJoin(tmpDir, mustGetUUID())
fsRenameFile(deletePath, tmpPath) fsRenameFile(ctx, deletePath, tmpPath)
// Proceed to deleting the directory if empty // Proceed to deleting the directory if empty
fsDeleteFile(basePath, pathutil.Dir(deletePath)) fsDeleteFile(ctx, basePath, pathutil.Dir(deletePath))
// Finally delete the renamed file. // Finally delete the renamed file.
return fsDeleteFile(tmpDir, tmpPath) return fsDeleteFile(ctx, tmpDir, tmpPath)
} }
return fsDeleteFile(basePath, deletePath) return fsDeleteFile(ctx, basePath, deletePath)
} }

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
@ -36,19 +37,19 @@ func TestFSRenameFile(t *testing.T) {
} }
defer os.RemoveAll(path) defer os.RemoveAll(path)
if err = fsMkdir(pathJoin(path, "testvolume1")); err != nil { if err = fsMkdir(context.Background(), pathJoin(path, "testvolume1")); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); err != nil { if err = fsRenameFile(context.Background(), pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); errors.Cause(err) != errFileNotFound { if err = fsRenameFile(context.Background(), pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); errors.Cause(err) != errFileNotFound {
t.Fatal(err) t.Fatal(err)
} }
if err = fsRenameFile(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"), pathJoin(path, "testvolume2")); errors.Cause(err) != errFileNameTooLong { if err = fsRenameFile(context.Background(), pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"), pathJoin(path, "testvolume2")); errors.Cause(err) != errFileNameTooLong {
t.Fatal("Unexpected error", err) t.Fatal("Unexpected error", err)
} }
if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errors.Cause(err) != errFileNameTooLong { if err = fsRenameFile(context.Background(), pathJoin(path, "testvolume1"), pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errors.Cause(err) != errFileNameTooLong {
t.Fatal("Unexpected error", err) t.Fatal("Unexpected error", err)
} }
} }
@ -63,30 +64,30 @@ func TestFSStats(t *testing.T) {
// Setup test environment. // Setup test environment.
if err = fsMkdir(""); errors.Cause(err) != errInvalidArgument { if err = fsMkdir(context.Background(), ""); errors.Cause(err) != errInvalidArgument {
t.Fatal("Unexpected error", err) t.Fatal("Unexpected error", err)
} }
if err = fsMkdir(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errors.Cause(err) != errFileNameTooLong { if err = fsMkdir(context.Background(), pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errors.Cause(err) != errFileNameTooLong {
t.Fatal("Unexpected error", err) t.Fatal("Unexpected error", err)
} }
if err = fsMkdir(pathJoin(path, "success-vol")); err != nil { if err = fsMkdir(context.Background(), pathJoin(path, "success-vol")); err != nil {
t.Fatalf("Unable to create volume, %s", err) t.Fatalf("Unable to create volume, %s", err)
} }
var reader = bytes.NewReader([]byte("Hello, world")) var reader = bytes.NewReader([]byte("Hello, world"))
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, nil, 0); err != nil { if _, err = fsCreateFile(context.Background(), pathJoin(path, "success-vol", "success-file"), reader, nil, 0); err != nil {
t.Fatalf("Unable to create file, %s", err) t.Fatalf("Unable to create file, %s", err)
} }
// Seek back. // Seek back.
reader.Seek(0, 0) reader.Seek(0, 0)
if err = fsMkdir(pathJoin(path, "success-vol", "success-file")); errors.Cause(err) != errVolumeExists { if err = fsMkdir(context.Background(), pathJoin(path, "success-vol", "success-file")); errors.Cause(err) != errVolumeExists {
t.Fatal("Unexpected error", err) t.Fatal("Unexpected error", err)
} }
if _, err = fsCreateFile(pathJoin(path, "success-vol", "path/to/success-file"), reader, nil, 0); err != nil { if _, err = fsCreateFile(context.Background(), pathJoin(path, "success-vol", "path/to/success-file"), reader, nil, 0); err != nil {
t.Fatalf("Unable to create file, %s", err) t.Fatalf("Unable to create file, %s", err)
} }
// Seek back. // Seek back.
@ -169,12 +170,12 @@ func TestFSStats(t *testing.T) {
for i, testCase := range testCases { for i, testCase := range testCases {
if testCase.srcPath != "" { if testCase.srcPath != "" {
if _, err := fsStatFile(pathJoin(testCase.srcFSPath, testCase.srcVol, if _, err := fsStatFile(context.Background(), pathJoin(testCase.srcFSPath, testCase.srcVol,
testCase.srcPath)); errors.Cause(err) != testCase.expectedErr { testCase.srcPath)); errors.Cause(err) != testCase.expectedErr {
t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
} }
} else { } else {
if _, err := fsStatVolume(pathJoin(testCase.srcFSPath, testCase.srcVol)); errors.Cause(err) != testCase.expectedErr { if _, err := fsStatVolume(context.Background(), pathJoin(testCase.srcFSPath, testCase.srcVol)); errors.Cause(err) != testCase.expectedErr {
t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
} }
} }
@ -189,20 +190,20 @@ func TestFSCreateAndOpen(t *testing.T) {
} }
defer os.RemoveAll(path) defer os.RemoveAll(path)
if err = fsMkdir(pathJoin(path, "success-vol")); err != nil { if err = fsMkdir(context.Background(), pathJoin(path, "success-vol")); err != nil {
t.Fatalf("Unable to create directory, %s", err) t.Fatalf("Unable to create directory, %s", err)
} }
if _, err = fsCreateFile("", nil, nil, 0); errors.Cause(err) != errInvalidArgument { if _, err = fsCreateFile(context.Background(), "", nil, nil, 0); errors.Cause(err) != errInvalidArgument {
t.Fatal("Unexpected error", err) t.Fatal("Unexpected error", err)
} }
if _, _, err = fsOpenFile("", -1); errors.Cause(err) != errInvalidArgument { if _, _, err = fsOpenFile(context.Background(), "", -1); errors.Cause(err) != errInvalidArgument {
t.Fatal("Unexpected error", err) t.Fatal("Unexpected error", err)
} }
var reader = bytes.NewReader([]byte("Hello, world")) var reader = bytes.NewReader([]byte("Hello, world"))
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, nil, 0); err != nil { if _, err = fsCreateFile(context.Background(), pathJoin(path, "success-vol", "success-file"), reader, nil, 0); err != nil {
t.Fatalf("Unable to create file, %s", err) t.Fatalf("Unable to create file, %s", err)
} }
// Seek back. // Seek back.
@ -230,18 +231,18 @@ func TestFSCreateAndOpen(t *testing.T) {
} }
for i, testCase := range testCases { for i, testCase := range testCases {
_, err = fsCreateFile(pathJoin(path, testCase.srcVol, testCase.srcPath), reader, nil, 0) _, err = fsCreateFile(context.Background(), pathJoin(path, testCase.srcVol, testCase.srcPath), reader, nil, 0)
if errors.Cause(err) != testCase.expectedErr { if errors.Cause(err) != testCase.expectedErr {
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
} }
_, _, err = fsOpenFile(pathJoin(path, testCase.srcVol, testCase.srcPath), 0) _, _, err = fsOpenFile(context.Background(), pathJoin(path, testCase.srcVol, testCase.srcPath), 0)
if errors.Cause(err) != testCase.expectedErr { if errors.Cause(err) != testCase.expectedErr {
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
} }
} }
// Attempt to open a directory. // Attempt to open a directory.
if _, _, err = fsOpenFile(pathJoin(path), 0); errors.Cause(err) != errIsNotRegular { if _, _, err = fsOpenFile(context.Background(), pathJoin(path), 0); errors.Cause(err) != errIsNotRegular {
t.Fatal("Unexpected error", err) t.Fatal("Unexpected error", err)
} }
} }
@ -255,20 +256,20 @@ func TestFSDeletes(t *testing.T) {
defer os.RemoveAll(path) defer os.RemoveAll(path)
// Setup test environment. // Setup test environment.
if err = fsMkdir(pathJoin(path, "success-vol")); err != nil { if err = fsMkdir(context.Background(), pathJoin(path, "success-vol")); err != nil {
t.Fatalf("Unable to create directory, %s", err) t.Fatalf("Unable to create directory, %s", err)
} }
var buf = make([]byte, 4096) var buf = make([]byte, 4096)
var reader = bytes.NewReader([]byte("Hello, world")) var reader = bytes.NewReader([]byte("Hello, world"))
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, buf, reader.Size()); err != nil { if _, err = fsCreateFile(context.Background(), pathJoin(path, "success-vol", "success-file"), reader, buf, reader.Size()); err != nil {
t.Fatalf("Unable to create file, %s", err) t.Fatalf("Unable to create file, %s", err)
} }
// Seek back. // Seek back.
reader.Seek(0, io.SeekStart) reader.Seek(0, io.SeekStart)
// folder is not empty // folder is not empty
err = fsMkdir(pathJoin(path, "success-vol", "not-empty")) err = fsMkdir(context.Background(), pathJoin(path, "success-vol", "not-empty"))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -278,10 +279,10 @@ func TestFSDeletes(t *testing.T) {
} }
// recursive // recursive
if err = fsMkdir(pathJoin(path, "success-vol", "parent")); err != nil { if err = fsMkdir(context.Background(), pathJoin(path, "success-vol", "parent")); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err = fsMkdir(pathJoin(path, "success-vol", "parent", "dir")); err != nil { if err = fsMkdir(context.Background(), pathJoin(path, "success-vol", "parent", "dir")); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -343,7 +344,7 @@ func TestFSDeletes(t *testing.T) {
} }
for i, testCase := range testCases { for i, testCase := range testCases {
if err = fsDeleteFile(testCase.basePath, pathJoin(testCase.basePath, testCase.srcVol, testCase.srcPath)); errors.Cause(err) != testCase.expectedErr { if err = fsDeleteFile(context.Background(), testCase.basePath, pathJoin(testCase.basePath, testCase.srcVol, testCase.srcPath)); errors.Cause(err) != testCase.expectedErr {
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
} }
} }
@ -358,7 +359,7 @@ func BenchmarkFSDeleteFile(b *testing.B) {
defer os.RemoveAll(path) defer os.RemoveAll(path)
// Setup test environment. // Setup test environment.
if err = fsMkdir(pathJoin(path, "benchmark")); err != nil { if err = fsMkdir(context.Background(), pathJoin(path, "benchmark")); err != nil {
b.Fatalf("Unable to create directory, %s", err) b.Fatalf("Unable to create directory, %s", err)
} }
@ -375,7 +376,7 @@ func BenchmarkFSDeleteFile(b *testing.B) {
} }
b.StartTimer() b.StartTimer()
err = fsDeleteFile(benchDir, filename) err = fsDeleteFile(context.Background(), benchDir, filename)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -392,18 +393,18 @@ func TestFSRemoves(t *testing.T) {
defer os.RemoveAll(path) defer os.RemoveAll(path)
// Setup test environment. // Setup test environment.
if err = fsMkdir(pathJoin(path, "success-vol")); err != nil { if err = fsMkdir(context.Background(), pathJoin(path, "success-vol")); err != nil {
t.Fatalf("Unable to create directory, %s", err) t.Fatalf("Unable to create directory, %s", err)
} }
var reader = bytes.NewReader([]byte("Hello, world")) var reader = bytes.NewReader([]byte("Hello, world"))
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, nil, 0); err != nil { if _, err = fsCreateFile(context.Background(), pathJoin(path, "success-vol", "success-file"), reader, nil, 0); err != nil {
t.Fatalf("Unable to create file, %s", err) t.Fatalf("Unable to create file, %s", err)
} }
// Seek back. // Seek back.
reader.Seek(0, 0) reader.Seek(0, 0)
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file-new"), reader, nil, 0); err != nil { if _, err = fsCreateFile(context.Background(), pathJoin(path, "success-vol", "success-file-new"), reader, nil, 0); err != nil {
t.Fatalf("Unable to create file, %s", err) t.Fatalf("Unable to create file, %s", err)
} }
// Seek back. // Seek back.
@ -477,25 +478,25 @@ func TestFSRemoves(t *testing.T) {
for i, testCase := range testCases { for i, testCase := range testCases {
if testCase.srcPath != "" { if testCase.srcPath != "" {
if err = fsRemoveFile(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); errors.Cause(err) != testCase.expectedErr { if err = fsRemoveFile(context.Background(), pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); errors.Cause(err) != testCase.expectedErr {
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
} }
} else { } else {
if err = fsRemoveDir(pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); errors.Cause(err) != testCase.expectedErr { if err = fsRemoveDir(context.Background(), pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); errors.Cause(err) != testCase.expectedErr {
t.Error(err) t.Error(err)
} }
} }
} }
if err = fsRemoveAll(pathJoin(path, "success-vol")); err != nil { if err = fsRemoveAll(context.Background(), pathJoin(path, "success-vol")); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err = fsRemoveAll(""); errors.Cause(err) != errInvalidArgument { if err = fsRemoveAll(context.Background(), ""); errors.Cause(err) != errInvalidArgument {
t.Fatal(err) t.Fatal(err)
} }
if err = fsRemoveAll("my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); errors.Cause(err) != errFileNameTooLong { if err = fsRemoveAll(context.Background(), "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); errors.Cause(err) != errFileNameTooLong {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -509,14 +510,14 @@ func TestFSRemoveMeta(t *testing.T) {
defer os.RemoveAll(fsPath) defer os.RemoveAll(fsPath)
// Setup test environment. // Setup test environment.
if err = fsMkdir(pathJoin(fsPath, "success-vol")); err != nil { if err = fsMkdir(context.Background(), pathJoin(fsPath, "success-vol")); err != nil {
t.Fatalf("Unable to create directory, %s", err) t.Fatalf("Unable to create directory, %s", err)
} }
filePath := pathJoin(fsPath, "success-vol", "success-file") filePath := pathJoin(fsPath, "success-vol", "success-file")
var reader = bytes.NewReader([]byte("Hello, world")) var reader = bytes.NewReader([]byte("Hello, world"))
if _, err = fsCreateFile(filePath, reader, nil, 0); err != nil { if _, err = fsCreateFile(context.Background(), filePath, reader, nil, 0); err != nil {
t.Fatalf("Unable to create file, %s", err) t.Fatalf("Unable to create file, %s", err)
} }
@ -535,7 +536,7 @@ func TestFSRemoveMeta(t *testing.T) {
t.Fatal(tmpErr) t.Fatal(tmpErr)
} }
if err := fsRemoveMeta(fsPath, filePath, tmpDir); err != nil { if err := fsRemoveMeta(context.Background(), fsPath, filePath, tmpDir); err != nil {
t.Fatalf("Unable to remove file, %s", err) t.Fatalf("Unable to remove file, %s", err)
} }

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"io" "io"
@ -25,7 +26,7 @@ import (
pathutil "path" pathutil "path"
"strings" "strings"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/lock" "github.com/minio/minio/pkg/lock"
"github.com/minio/minio/pkg/mimedb" "github.com/minio/minio/pkg/mimedb"
"github.com/tidwall/gjson" "github.com/tidwall/gjson"
@ -237,20 +238,23 @@ func parseFSPartsArray(fsMetaBuf []byte) []objectPartInfo {
return partsArray return partsArray
} }
func (m *fsMetaV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) { func (m *fsMetaV1) ReadFrom(ctx context.Context, lk *lock.LockedFile) (n int64, err error) {
var fsMetaBuf []byte var fsMetaBuf []byte
fi, err := lk.Stat() fi, err := lk.Stat()
if err != nil { if err != nil {
return 0, errors.Trace(err) logger.LogIf(ctx, err)
return 0, err
} }
fsMetaBuf, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size())) fsMetaBuf, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size()))
if err != nil { if err != nil {
return 0, errors.Trace(err) logger.LogIf(ctx, err)
return 0, err
} }
if len(fsMetaBuf) == 0 { if len(fsMetaBuf) == 0 {
return 0, errors.Trace(io.EOF) logger.LogIf(ctx, io.EOF)
return 0, io.EOF
} }
// obtain version. // obtain version.
@ -259,7 +263,9 @@ func (m *fsMetaV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
// Verify if the format is valid, return corrupted format // Verify if the format is valid, return corrupted format
// for unrecognized formats. // for unrecognized formats.
if !isFSMetaValid(m.Version) { if !isFSMetaValid(m.Version) {
return 0, errors.Trace(errCorruptedFormat) logger.GetReqInfo(ctx).AppendTags("file", lk.Name())
logger.LogIf(ctx, errCorruptedFormat)
return 0, errCorruptedFormat
} }
// obtain parts information // obtain parts information

View File

@ -70,7 +70,7 @@ func TestReadFSMetadata(t *testing.T) {
// Regular fs metadata reading, no errors expected // Regular fs metadata reading, no errors expected
fsMeta := fsMetaV1{} fsMeta := fsMetaV1{}
if _, err = fsMeta.ReadFrom(rlk.LockedFile); err != nil { if _, err = fsMeta.ReadFrom(context.Background(), rlk.LockedFile); err != nil {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
} }
@ -105,7 +105,7 @@ func TestWriteFSMetadata(t *testing.T) {
// FS metadata reading, no errors expected (healthy disk) // FS metadata reading, no errors expected (healthy disk)
fsMeta := fsMetaV1{} fsMeta := fsMetaV1{}
_, err = fsMeta.ReadFrom(rlk.LockedFile) _, err = fsMeta.ReadFrom(context.Background(), rlk.LockedFile)
if err != nil { if err != nil {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }

View File

@ -29,6 +29,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
mioutil "github.com/minio/minio/pkg/ioutil" mioutil "github.com/minio/minio/pkg/ioutil"
@ -64,8 +65,9 @@ func (fs *FSObjects) decodePartFile(name string) (partNumber int, etag string, e
} }
// Appends parts to an appendFile sequentially. // Appends parts to an appendFile sequentially.
func (fs *FSObjects) backgroundAppend(bucket, object, uploadID string) { func (fs *FSObjects) backgroundAppend(ctx context.Context, bucket, object, uploadID string) {
fs.appendFileMapMu.Lock() fs.appendFileMapMu.Lock()
logger.GetReqInfo(ctx).AppendTags("uploadID", uploadID)
file := fs.appendFileMap[uploadID] file := fs.appendFileMap[uploadID]
if file == nil { if file == nil {
file = &fsAppendFile{ file = &fsAppendFile{
@ -84,7 +86,8 @@ func (fs *FSObjects) backgroundAppend(bucket, object, uploadID string) {
entries, err := readDir(uploadIDDir) entries, err := readDir(uploadIDDir)
if err != nil { if err != nil {
errorIf(err, "error reading directory %s", uploadIDDir) logger.GetReqInfo(ctx).AppendTags("uploadIDDir", uploadIDDir)
logger.LogIf(ctx, err)
return return
} }
sort.Strings(entries) sort.Strings(entries)
@ -95,7 +98,8 @@ func (fs *FSObjects) backgroundAppend(bucket, object, uploadID string) {
} }
partNumber, etag, err := fs.decodePartFile(entry) partNumber, etag, err := fs.decodePartFile(entry)
if err != nil { if err != nil {
errorIf(err, "unable to split the file name into partNumber and etag: %s", entry) logger.GetReqInfo(ctx).AppendTags("entry", entry)
logger.LogIf(ctx, err)
return return
} }
if partNumber < nextPartNumber { if partNumber < nextPartNumber {
@ -110,7 +114,9 @@ func (fs *FSObjects) backgroundAppend(bucket, object, uploadID string) {
partPath := pathJoin(uploadIDDir, entry) partPath := pathJoin(uploadIDDir, entry)
err = mioutil.AppendFile(file.filePath, partPath) err = mioutil.AppendFile(file.filePath, partPath)
if err != nil { if err != nil {
errorIf(err, "Unable to append %s to %s", partPath, file.filePath) reqInfo := logger.GetReqInfo(ctx).AppendTags("partPath", partPath)
reqInfo.AppendTags("filepath", file.filePath)
logger.LogIf(ctx, err)
return return
} }
@ -122,12 +128,12 @@ func (fs *FSObjects) backgroundAppend(bucket, object, uploadID string) {
// ListMultipartUploads - lists all the uploadIDs for the specified object. // ListMultipartUploads - lists all the uploadIDs for the specified object.
// We do not support prefix based listing. // We do not support prefix based listing.
func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, e error) { func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, e error) {
if err := checkListMultipartArgs(bucket, object, keyMarker, uploadIDMarker, delimiter, fs); err != nil { if err := checkListMultipartArgs(ctx, bucket, object, keyMarker, uploadIDMarker, delimiter, fs); err != nil {
return result, toObjectErr(errors.Trace(err)) return result, toObjectErr(err)
} }
if _, err := fs.statBucketDir(bucket); err != nil { if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return result, toObjectErr(errors.Trace(err), bucket) return result, toObjectErr(err, bucket)
} }
result.MaxUploads = maxUploads result.MaxUploads = maxUploads
@ -143,7 +149,8 @@ func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, k
result.IsTruncated = false result.IsTruncated = false
return result, nil return result, nil
} }
return result, toObjectErr(errors.Trace(err)) logger.LogIf(ctx, err)
return result, toObjectErr(err)
} }
// S3 spec says uploaIDs should be sorted based on initiated time. ModTime of fs.json // S3 spec says uploaIDs should be sorted based on initiated time. ModTime of fs.json
@ -151,7 +158,7 @@ func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, k
var uploads []MultipartInfo var uploads []MultipartInfo
for _, uploadID := range uploadIDs { for _, uploadID := range uploadIDs {
metaFilePath := pathJoin(fs.getMultipartSHADir(bucket, object), uploadID, fs.metaJSONFile) metaFilePath := pathJoin(fs.getMultipartSHADir(bucket, object), uploadID, fs.metaJSONFile)
fi, err := fsStatFile(metaFilePath) fi, err := fsStatFile(ctx, metaFilePath)
if err != nil { if err != nil {
return result, toObjectErr(err, bucket, object) return result, toObjectErr(err, bucket, object)
} }
@ -204,11 +211,11 @@ func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, k
// //
// Implements S3 compatible initiate multipart API. // Implements S3 compatible initiate multipart API.
func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string) (string, error) { func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string) (string, error) {
if err := checkNewMultipartArgs(bucket, object, fs); err != nil { if err := checkNewMultipartArgs(ctx, bucket, object, fs); err != nil {
return "", toObjectErr(err, bucket) return "", toObjectErr(err, bucket)
} }
if _, err := fs.statBucketDir(bucket); err != nil { if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return "", toObjectErr(err, bucket) return "", toObjectErr(err, bucket)
} }
@ -217,7 +224,8 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
err := mkdirAll(uploadIDDir, 0755) err := mkdirAll(uploadIDDir, 0755)
if err != nil { if err != nil {
return "", errors.Trace(err) logger.LogIf(ctx, err)
return "", err
} }
// Initialize fs.json values. // Initialize fs.json values.
@ -226,11 +234,13 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
fsMetaBytes, err := json.Marshal(fsMeta) fsMetaBytes, err := json.Marshal(fsMeta)
if err != nil { if err != nil {
return "", errors.Trace(err) logger.LogIf(ctx, err)
return "", err
} }
if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0644); err != nil { if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0644); err != nil {
return "", errors.Trace(err) logger.LogIf(ctx, err)
return "", err
} }
return uploadID, nil return uploadID, nil
@ -242,22 +252,22 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int,
startOffset int64, length int64, srcInfo ObjectInfo) (pi PartInfo, e error) { startOffset int64, length int64, srcInfo ObjectInfo) (pi PartInfo, e error) {
if err := checkNewMultipartArgs(srcBucket, srcObject, fs); err != nil { if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, fs); err != nil {
return pi, toObjectErr(errors.Trace(err)) return pi, toObjectErr(err)
} }
// Initialize pipe. // Initialize pipe.
go func() { go func() {
if gerr := fs.GetObject(ctx, srcBucket, srcObject, startOffset, length, srcInfo.Writer, srcInfo.ETag); gerr != nil { if gerr := fs.GetObject(ctx, srcBucket, srcObject, startOffset, length, srcInfo.Writer, srcInfo.ETag); gerr != nil {
if gerr = srcInfo.Writer.Close(); gerr != nil { if gerr = srcInfo.Writer.Close(); gerr != nil {
errorIf(gerr, "Unable to read %s/%s.", srcBucket, srcObject) logger.LogIf(ctx, gerr)
return return
} }
return return
} }
// Close writer explicitly signalling we wrote all data. // Close writer explicitly signalling we wrote all data.
if gerr := srcInfo.Writer.Close(); gerr != nil { if gerr := srcInfo.Writer.Close(); gerr != nil {
errorIf(gerr, "Unable to read %s/%s.", srcBucket, srcObject) logger.LogIf(ctx, gerr)
return return
} }
}() }()
@ -275,26 +285,27 @@ func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, d
// written to '.minio.sys/tmp' location and safely renamed to // written to '.minio.sys/tmp' location and safely renamed to
// '.minio.sys/multipart' for reach parts. // '.minio.sys/multipart' for reach parts.
func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) { func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) {
if err := checkPutObjectPartArgs(bucket, object, fs); err != nil { if err := checkPutObjectPartArgs(ctx, bucket, object, fs); err != nil {
return pi, toObjectErr(errors.Trace(err), bucket) return pi, toObjectErr(err, bucket)
} }
if _, err := fs.statBucketDir(bucket); err != nil { if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return pi, toObjectErr(errors.Trace(err), bucket) return pi, toObjectErr(err, bucket)
} }
// Validate input data size and it can never be less than zero. // Validate input data size and it can never be less than zero.
if data.Size() < 0 { if data.Size() < 0 {
return pi, toObjectErr(errors.Trace(errInvalidArgument)) logger.LogIf(ctx, errInvalidArgument)
return pi, toObjectErr(errInvalidArgument)
} }
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
// Just check if the uploadID exists to avoid copy if it doesn't. // Just check if the uploadID exists to avoid copy if it doesn't.
_, err := fsStatFile(pathJoin(uploadIDDir, fs.metaJSONFile)) _, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile))
if err != nil { if err != nil {
if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied { if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied {
return pi, errors.Trace(InvalidUploadID{UploadID: uploadID}) return pi, InvalidUploadID{UploadID: uploadID}
} }
return pi, toObjectErr(err, bucket, object) return pi, toObjectErr(err, bucket, object)
} }
@ -306,23 +317,23 @@ func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
buf := make([]byte, bufSize) buf := make([]byte, bufSize)
tmpPartPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, uploadID+"."+mustGetUUID()+"."+strconv.Itoa(partID)) tmpPartPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, uploadID+"."+mustGetUUID()+"."+strconv.Itoa(partID))
bytesWritten, err := fsCreateFile(tmpPartPath, data, buf, data.Size()) bytesWritten, err := fsCreateFile(ctx, tmpPartPath, data, buf, data.Size())
if err != nil { if err != nil {
fsRemoveFile(tmpPartPath) fsRemoveFile(ctx, tmpPartPath)
return pi, toObjectErr(err, minioMetaTmpBucket, tmpPartPath) return pi, toObjectErr(err, minioMetaTmpBucket, tmpPartPath)
} }
// Should return IncompleteBody{} error when reader has fewer // Should return IncompleteBody{} error when reader has fewer
// bytes than specified in request header. // bytes than specified in request header.
if bytesWritten < data.Size() { if bytesWritten < data.Size() {
fsRemoveFile(tmpPartPath) fsRemoveFile(ctx, tmpPartPath)
return pi, errors.Trace(IncompleteBody{}) return pi, IncompleteBody{}
} }
// Delete temporary part in case of failure. If // Delete temporary part in case of failure. If
// PutObjectPart succeeds then there would be nothing to // PutObjectPart succeeds then there would be nothing to
// delete in which case we just ignore the error. // delete in which case we just ignore the error.
defer fsRemoveFile(tmpPartPath) defer fsRemoveFile(ctx, tmpPartPath)
etag := hex.EncodeToString(data.MD5Current()) etag := hex.EncodeToString(data.MD5Current())
if etag == "" { if etag == "" {
@ -330,13 +341,13 @@ func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
} }
partPath := pathJoin(uploadIDDir, fs.encodePartFile(partID, etag)) partPath := pathJoin(uploadIDDir, fs.encodePartFile(partID, etag))
if err = fsRenameFile(tmpPartPath, partPath); err != nil { if err = fsRenameFile(ctx, tmpPartPath, partPath); err != nil {
return pi, toObjectErr(err, minioMetaMultipartBucket, partPath) return pi, toObjectErr(err, minioMetaMultipartBucket, partPath)
} }
go fs.backgroundAppend(bucket, object, uploadID) go fs.backgroundAppend(ctx, bucket, object, uploadID)
fi, err := fsStatFile(partPath) fi, err := fsStatFile(ctx, partPath)
if err != nil { if err != nil {
return pi, toObjectErr(err, minioMetaMultipartBucket, partPath) return pi, toObjectErr(err, minioMetaMultipartBucket, partPath)
} }
@ -356,8 +367,8 @@ func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
// ListPartsInfo structure is unmarshalled directly into XML and // ListPartsInfo structure is unmarshalled directly into XML and
// replied back to the client. // replied back to the client.
func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListPartsInfo, e error) { func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListPartsInfo, e error) {
if err := checkListPartsArgs(bucket, object, fs); err != nil { if err := checkListPartsArgs(ctx, bucket, object, fs); err != nil {
return result, toObjectErr(errors.Trace(err)) return result, toObjectErr(err)
} }
result.Bucket = bucket result.Bucket = bucket
result.Object = object result.Object = object
@ -366,22 +377,23 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
result.PartNumberMarker = partNumberMarker result.PartNumberMarker = partNumberMarker
// Check if bucket exists // Check if bucket exists
if _, err := fs.statBucketDir(bucket); err != nil { if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return result, toObjectErr(errors.Trace(err), bucket) return result, toObjectErr(err, bucket)
} }
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
_, err := fsStatFile(pathJoin(uploadIDDir, fs.metaJSONFile)) _, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile))
if err != nil { if err != nil {
if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied { if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied {
return result, errors.Trace(InvalidUploadID{UploadID: uploadID}) return result, InvalidUploadID{UploadID: uploadID}
} }
return result, toObjectErr(errors.Trace(err), bucket, object) return result, toObjectErr(err, bucket, object)
} }
entries, err := readDir(uploadIDDir) entries, err := readDir(uploadIDDir)
if err != nil { if err != nil {
return result, toObjectErr(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return result, toObjectErr(err, bucket)
} }
partsMap := make(map[int]string) partsMap := make(map[int]string)
@ -391,20 +403,21 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
} }
partNumber, etag1, derr := fs.decodePartFile(entry) partNumber, etag1, derr := fs.decodePartFile(entry)
if derr != nil { if derr != nil {
return result, toObjectErr(errors.Trace(derr)) logger.LogIf(ctx, derr)
return result, toObjectErr(derr)
} }
etag2, ok := partsMap[partNumber] etag2, ok := partsMap[partNumber]
if !ok { if !ok {
partsMap[partNumber] = etag1 partsMap[partNumber] = etag1
continue continue
} }
stat1, serr := fsStatFile(pathJoin(uploadIDDir, fs.encodePartFile(partNumber, etag1))) stat1, serr := fsStatFile(ctx, pathJoin(uploadIDDir, fs.encodePartFile(partNumber, etag1)))
if serr != nil { if serr != nil {
return result, toObjectErr(errors.Trace(serr)) return result, toObjectErr(serr)
} }
stat2, serr := fsStatFile(pathJoin(uploadIDDir, fs.encodePartFile(partNumber, etag2))) stat2, serr := fsStatFile(ctx, pathJoin(uploadIDDir, fs.encodePartFile(partNumber, etag2)))
if serr != nil { if serr != nil {
return result, toObjectErr(errors.Trace(serr)) return result, toObjectErr(serr)
} }
if stat1.ModTime().After(stat2.ModTime()) { if stat1.ModTime().After(stat2.ModTime()) {
partsMap[partNumber] = etag1 partsMap[partNumber] = etag1
@ -443,9 +456,9 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
} }
for i, part := range result.Parts { for i, part := range result.Parts {
var stat os.FileInfo var stat os.FileInfo
stat, err = fsStatFile(pathJoin(uploadIDDir, fs.encodePartFile(part.PartNumber, part.ETag))) stat, err = fsStatFile(ctx, pathJoin(uploadIDDir, fs.encodePartFile(part.PartNumber, part.ETag)))
if err != nil { if err != nil {
return result, toObjectErr(errors.Trace(err)) return result, toObjectErr(err)
} }
result.Parts[i].LastModified = stat.ModTime() result.Parts[i].LastModified = stat.ModTime()
result.Parts[i].Size = stat.Size() result.Parts[i].Size = stat.Size()
@ -453,7 +466,8 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
fsMetaBytes, err := ioutil.ReadFile(pathJoin(uploadIDDir, fs.metaJSONFile)) fsMetaBytes, err := ioutil.ReadFile(pathJoin(uploadIDDir, fs.metaJSONFile))
if err != nil { if err != nil {
return result, errors.Trace(err) logger.LogIf(ctx, err)
return result, err
} }
result.UserDefined = parseFSMetaMap(fsMetaBytes) result.UserDefined = parseFSMetaMap(fsMetaBytes)
@ -467,31 +481,31 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
// //
// Implements S3 compatible Complete multipart API. // Implements S3 compatible Complete multipart API.
func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart) (oi ObjectInfo, e error) { func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart) (oi ObjectInfo, e error) {
if err := checkCompleteMultipartArgs(bucket, object, fs); err != nil { if err := checkCompleteMultipartArgs(ctx, bucket, object, fs); err != nil {
return oi, toObjectErr(err) return oi, toObjectErr(err)
} }
// Check if an object is present as one of the parent dir. // Check if an object is present as one of the parent dir.
if fs.parentDirIsObject(bucket, pathutil.Dir(object)) { if fs.parentDirIsObject(ctx, bucket, pathutil.Dir(object)) {
return oi, toObjectErr(errors.Trace(errFileAccessDenied), bucket, object) return oi, toObjectErr(errFileAccessDenied, bucket, object)
} }
if _, err := fs.statBucketDir(bucket); err != nil { if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return oi, toObjectErr(err, bucket) return oi, toObjectErr(err, bucket)
} }
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
// Just check if the uploadID exists to avoid copy if it doesn't. // Just check if the uploadID exists to avoid copy if it doesn't.
_, err := fsStatFile(pathJoin(uploadIDDir, fs.metaJSONFile)) _, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile))
if err != nil { if err != nil {
if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied { if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied {
return oi, errors.Trace(InvalidUploadID{UploadID: uploadID}) return oi, InvalidUploadID{UploadID: uploadID}
} }
return oi, toObjectErr(err, bucket, object) return oi, toObjectErr(err, bucket, object)
} }
// Calculate s3 compatible md5sum for complete multipart. // Calculate s3 compatible md5sum for complete multipart.
s3MD5, err := getCompleteMultipartMD5(parts) s3MD5, err := getCompleteMultipartMD5(ctx, parts)
if err != nil { if err != nil {
return oi, err return oi, err
} }
@ -507,12 +521,12 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
for i, part := range parts { for i, part := range parts {
partPath := pathJoin(uploadIDDir, fs.encodePartFile(part.PartNumber, part.ETag)) partPath := pathJoin(uploadIDDir, fs.encodePartFile(part.PartNumber, part.ETag))
var fi os.FileInfo var fi os.FileInfo
fi, err = fsStatFile(partPath) fi, err = fsStatFile(ctx, partPath)
if err != nil { if err != nil {
if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied { if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied {
return oi, errors.Trace(InvalidPart{}) return oi, InvalidPart{}
} }
return oi, errors.Trace(err) return oi, err
} }
if partSize == -1 { if partSize == -1 {
partSize = fi.Size() partSize = fi.Size()
@ -530,11 +544,13 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
// All parts except the last part has to be atleast 5MB. // All parts except the last part has to be atleast 5MB.
if !isMinAllowedPartSize(fi.Size()) { if !isMinAllowedPartSize(fi.Size()) {
return oi, errors.Trace(PartTooSmall{ err = PartTooSmall{
PartNumber: part.PartNumber, PartNumber: part.PartNumber,
PartSize: fi.Size(), PartSize: fi.Size(),
PartETag: part.ETag, PartETag: part.ETag,
}) }
logger.LogIf(ctx, err)
return oi, err
} }
// TODO: Make necessary changes in future as explained in the below comment. // TODO: Make necessary changes in future as explained in the below comment.
@ -545,7 +561,8 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
// CompleteMultipartUpload we already have the full file available which can be // CompleteMultipartUpload we already have the full file available which can be
// renamed to the main name-space. // renamed to the main name-space.
if partSize != fi.Size() { if partSize != fi.Size() {
return oi, errors.Trace(PartsSizeUnequal{}) logger.LogIf(ctx, PartsSizeUnequal{})
return oi, PartsSizeUnequal{}
} }
} }
@ -557,7 +574,7 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
// 1. The last PutObjectPart triggers go-routine fs.backgroundAppend, this go-routine has not started yet. // 1. The last PutObjectPart triggers go-routine fs.backgroundAppend, this go-routine has not started yet.
// 2. Now CompleteMultipartUpload gets called which sees that lastPart is not appended and starts appending // 2. Now CompleteMultipartUpload gets called which sees that lastPart is not appended and starts appending
// from the beginning // from the beginning
fs.backgroundAppend(bucket, object, uploadID) fs.backgroundAppend(ctx, bucket, object, uploadID)
fs.appendFileMapMu.Lock() fs.appendFileMapMu.Lock()
file := fs.appendFileMap[uploadID] file := fs.appendFileMap[uploadID]
@ -585,12 +602,13 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
} }
if appendFallback { if appendFallback {
fsRemoveFile(file.filePath) fsRemoveFile(ctx, file.filePath)
for _, part := range parts { for _, part := range parts {
partPath := pathJoin(uploadIDDir, fs.encodePartFile(part.PartNumber, part.ETag)) partPath := pathJoin(uploadIDDir, fs.encodePartFile(part.PartNumber, part.ETag))
err = mioutil.AppendFile(appendFilePath, partPath) err = mioutil.AppendFile(appendFilePath, partPath)
if err != nil { if err != nil {
return oi, toObjectErr(errors.Trace(err)) logger.LogIf(ctx, err)
return oi, toObjectErr(err)
} }
} }
} }
@ -604,18 +622,21 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile) fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile)
metaFile, err := fs.rwPool.Create(fsMetaPath) metaFile, err := fs.rwPool.Create(fsMetaPath)
if err != nil { if err != nil {
return oi, toObjectErr(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return oi, toObjectErr(err, bucket, object)
} }
defer metaFile.Close() defer metaFile.Close()
// Read saved fs metadata for ongoing multipart. // Read saved fs metadata for ongoing multipart.
fsMetaBuf, err := ioutil.ReadFile(pathJoin(uploadIDDir, fs.metaJSONFile)) fsMetaBuf, err := ioutil.ReadFile(pathJoin(uploadIDDir, fs.metaJSONFile))
if err != nil { if err != nil {
return oi, toObjectErr(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return oi, toObjectErr(err, bucket, object)
} }
err = json.Unmarshal(fsMetaBuf, &fsMeta) err = json.Unmarshal(fsMetaBuf, &fsMeta)
if err != nil { if err != nil {
return oi, toObjectErr(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return oi, toObjectErr(err, bucket, object)
} }
// Save additional metadata. // Save additional metadata.
if len(fsMeta.Meta) == 0 { if len(fsMeta.Meta) == 0 {
@ -623,24 +644,26 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
} }
fsMeta.Meta["etag"] = s3MD5 fsMeta.Meta["etag"] = s3MD5
if _, err = fsMeta.WriteTo(metaFile); err != nil { if _, err = fsMeta.WriteTo(metaFile); err != nil {
return oi, toObjectErr(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return oi, toObjectErr(err, bucket, object)
} }
// Deny if WORM is enabled // Deny if WORM is enabled
if globalWORMEnabled { if globalWORMEnabled {
if _, err = fsStatFile(pathJoin(fs.fsPath, bucket, object)); err == nil { if _, err = fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object)); err == nil {
return ObjectInfo{}, errors.Trace(ObjectAlreadyExists{Bucket: bucket, Object: object}) return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
} }
} }
err = fsRenameFile(appendFilePath, pathJoin(fs.fsPath, bucket, object)) err = fsRenameFile(ctx, appendFilePath, pathJoin(fs.fsPath, bucket, object))
if err != nil { if err != nil {
return oi, toObjectErr(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return oi, toObjectErr(err, bucket, object)
} }
fsRemoveAll(uploadIDDir) fsRemoveAll(ctx, uploadIDDir)
fi, err := fsStatFile(pathJoin(fs.fsPath, bucket, object)) fi, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object))
if err != nil { if err != nil {
return oi, toObjectErr(errors.Trace(err), bucket, object) return oi, toObjectErr(err, bucket, object)
} }
return fsMeta.ToObjectInfo(bucket, object, fi), nil return fsMeta.ToObjectInfo(bucket, object, fi), nil
@ -659,12 +682,12 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
// no affect and further requests to the same uploadID would not be // no affect and further requests to the same uploadID would not be
// honored. // honored.
func (fs *FSObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { func (fs *FSObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error {
if err := checkAbortMultipartArgs(bucket, object, fs); err != nil { if err := checkAbortMultipartArgs(ctx, bucket, object, fs); err != nil {
return err return err
} }
if _, err := fs.statBucketDir(bucket); err != nil { if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return toObjectErr(errors.Trace(err), bucket) return toObjectErr(err, bucket)
} }
fs.appendFileMapMu.Lock() fs.appendFileMapMu.Lock()
@ -673,16 +696,16 @@ func (fs *FSObjects) AbortMultipartUpload(ctx context.Context, bucket, object, u
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
// Just check if the uploadID exists to avoid copy if it doesn't. // Just check if the uploadID exists to avoid copy if it doesn't.
_, err := fsStatFile(pathJoin(uploadIDDir, fs.metaJSONFile)) _, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile))
if err != nil { if err != nil {
if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied { if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied {
return errors.Trace(InvalidUploadID{UploadID: uploadID}) return InvalidUploadID{UploadID: uploadID}
} }
return toObjectErr(errors.Trace(err), bucket, object) return toObjectErr(err, bucket, object)
} }
// Ignore the error returned as Windows fails to remove directory if a file in it // Ignore the error returned as Windows fails to remove directory if a file in it
// is Open()ed by the backgroundAppend() // is Open()ed by the backgroundAppend()
fsRemoveAll(uploadIDDir) fsRemoveAll(ctx, uploadIDDir)
return nil return nil
} }
@ -690,7 +713,7 @@ func (fs *FSObjects) AbortMultipartUpload(ctx context.Context, bucket, object, u
// Removes multipart uploads if any older than `expiry` duration // Removes multipart uploads if any older than `expiry` duration
// on all buckets for every `cleanupInterval`, this function is // on all buckets for every `cleanupInterval`, this function is
// blocking and should be run in a go-routine. // blocking and should be run in a go-routine.
func (fs *FSObjects) cleanupStaleMultipartUploads(cleanupInterval, expiry time.Duration, doneCh chan struct{}) { func (fs *FSObjects) cleanupStaleMultipartUploads(ctx context.Context, cleanupInterval, expiry time.Duration, doneCh chan struct{}) {
ticker := time.NewTicker(cleanupInterval) ticker := time.NewTicker(cleanupInterval)
for { for {
select { select {
@ -710,12 +733,12 @@ func (fs *FSObjects) cleanupStaleMultipartUploads(cleanupInterval, expiry time.D
continue continue
} }
for _, uploadID := range uploadIDs { for _, uploadID := range uploadIDs {
fi, err := fsStatDir(pathJoin(fs.fsPath, minioMetaMultipartBucket, entry, uploadID)) fi, err := fsStatDir(ctx, pathJoin(fs.fsPath, minioMetaMultipartBucket, entry, uploadID))
if err != nil { if err != nil {
continue continue
} }
if now.Sub(fi.ModTime()) > expiry { if now.Sub(fi.ModTime()) > expiry {
fsRemoveAll(pathJoin(fs.fsPath, minioMetaMultipartBucket, entry, uploadID)) fsRemoveAll(ctx, pathJoin(fs.fsPath, minioMetaMultipartBucket, entry, uploadID))
} }
} }
} }

View File

@ -49,7 +49,7 @@ func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) {
t.Fatal("Unexpected err: ", err) t.Fatal("Unexpected err: ", err)
} }
go fs.cleanupStaleMultipartUploads(20*time.Millisecond, 0, globalServiceDoneCh) go fs.cleanupStaleMultipartUploads(context.Background(), 20*time.Millisecond, 0, globalServiceDoneCh)
// Wait for 40ms such that - we have given enough time for // Wait for 40ms such that - we have given enough time for
// cleanup routine to kick in. // cleanup routine to kick in.

View File

@ -17,10 +17,12 @@
package cmd package cmd
import ( import (
"context"
"os" "os"
pathutil "path" pathutil "path"
"sync" "sync"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/lock" "github.com/minio/minio/pkg/lock"
) )
@ -48,7 +50,9 @@ func (fsi *fsIOPool) lookupToRead(path string) (*lock.RLockedFile, bool) {
// If the file is closed and not removed from map is a bug. // If the file is closed and not removed from map is a bug.
if rlkFile.IsClosed() { if rlkFile.IsClosed() {
// Log this as an error. // Log this as an error.
errorIf(errUnexpected, "Unexpected entry found on the map %s", path) reqInfo := (&logger.ReqInfo{}).AppendTags("path", path)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, errUnexpected)
// Purge the cached lock path from map. // Purge the cached lock path from map.
delete(fsi.readersMap, path) delete(fsi.readersMap, path)

View File

@ -32,6 +32,7 @@ import (
"time" "time"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/lock" "github.com/minio/minio/pkg/lock"
@ -97,6 +98,7 @@ func initMetaVolumeFS(fsPath, fsUUID string) error {
// NewFSObjectLayer - initialize new fs object layer. // NewFSObjectLayer - initialize new fs object layer.
func NewFSObjectLayer(fsPath string) (ObjectLayer, error) { func NewFSObjectLayer(fsPath string) (ObjectLayer, error) {
ctx := context.Background()
if fsPath == "" { if fsPath == "" {
return nil, errInvalidArgument return nil, errInvalidArgument
} }
@ -142,7 +144,7 @@ func NewFSObjectLayer(fsPath string) (ObjectLayer, error) {
} }
// Initialize `format.json`, this function also returns. // Initialize `format.json`, this function also returns.
rlk, err := initFormatFS(fsPath) rlk, err := initFormatFS(ctx, fsPath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -177,7 +179,7 @@ func NewFSObjectLayer(fsPath string) (ObjectLayer, error) {
return nil, fmt.Errorf("Unable to initialize event notification. %s", err) return nil, fmt.Errorf("Unable to initialize event notification. %s", err)
} }
go fs.cleanupStaleMultipartUploads(globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh) go fs.cleanupStaleMultipartUploads(ctx, globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh)
// Return successfully initialized object layer. // Return successfully initialized object layer.
return fs, nil return fs, nil
@ -188,13 +190,14 @@ func (fs *FSObjects) Shutdown(ctx context.Context) error {
fs.fsFormatRlk.Close() fs.fsFormatRlk.Close()
// Cleanup and delete tmp uuid. // Cleanup and delete tmp uuid.
return fsRemoveAll(pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID)) return fsRemoveAll(ctx, pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID))
} }
// StorageInfo - returns underlying storage statistics. // StorageInfo - returns underlying storage statistics.
func (fs *FSObjects) StorageInfo(ctx context.Context) StorageInfo { func (fs *FSObjects) StorageInfo(ctx context.Context) StorageInfo {
info, err := getDiskInfo((fs.fsPath)) info, err := getDiskInfo((fs.fsPath))
errorIf(err, "Unable to get disk info %#v", fs.fsPath) logger.GetReqInfo(ctx).AppendTags("path", fs.fsPath)
logger.LogIf(ctx, err)
storageInfo := StorageInfo{ storageInfo := StorageInfo{
Total: info.Total, Total: info.Total,
Free: info.Free, Free: info.Free,
@ -220,22 +223,24 @@ func (fs *FSObjects) ClearLocks(ctx context.Context, info []VolumeLockInfo) erro
// getBucketDir - will convert incoming bucket names to // getBucketDir - will convert incoming bucket names to
// corresponding valid bucket names on the backend in a platform // corresponding valid bucket names on the backend in a platform
// compatible way for all operating systems. // compatible way for all operating systems.
func (fs *FSObjects) getBucketDir(bucket string) (string, error) { func (fs *FSObjects) getBucketDir(ctx context.Context, bucket string) (string, error) {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return "", errors.Trace(BucketNameInvalid{Bucket: bucket}) err := BucketNameInvalid{Bucket: bucket}
logger.LogIf(ctx, err)
return "", err
} }
bucketDir := pathJoin(fs.fsPath, bucket) bucketDir := pathJoin(fs.fsPath, bucket)
return bucketDir, nil return bucketDir, nil
} }
func (fs *FSObjects) statBucketDir(bucket string) (os.FileInfo, error) { func (fs *FSObjects) statBucketDir(ctx context.Context, bucket string) (os.FileInfo, error) {
bucketDir, err := fs.getBucketDir(bucket) bucketDir, err := fs.getBucketDir(ctx, bucket)
if err != nil { if err != nil {
return nil, err return nil, err
} }
st, err := fsStatVolume(bucketDir) st, err := fsStatVolume(ctx, bucketDir)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -250,12 +255,12 @@ func (fs *FSObjects) MakeBucketWithLocation(ctx context.Context, bucket, locatio
return err return err
} }
defer bucketLock.Unlock() defer bucketLock.Unlock()
bucketDir, err := fs.getBucketDir(bucket) bucketDir, err := fs.getBucketDir(ctx, bucket)
if err != nil { if err != nil {
return toObjectErr(err, bucket) return toObjectErr(err, bucket)
} }
if err = fsMkdir(bucketDir); err != nil { if err = fsMkdir(ctx, bucketDir); err != nil {
return toObjectErr(err, bucket) return toObjectErr(err, bucket)
} }
@ -269,7 +274,7 @@ func (fs *FSObjects) GetBucketInfo(ctx context.Context, bucket string) (bi Bucke
return bi, e return bi, e
} }
defer bucketLock.RUnlock() defer bucketLock.RUnlock()
st, err := fs.statBucketDir(bucket) st, err := fs.statBucketDir(ctx, bucket)
if err != nil { if err != nil {
return bi, toObjectErr(err, bucket) return bi, toObjectErr(err, bucket)
} }
@ -285,12 +290,14 @@ func (fs *FSObjects) GetBucketInfo(ctx context.Context, bucket string) (bi Bucke
// ListBuckets - list all s3 compatible buckets (directories) at fsPath. // ListBuckets - list all s3 compatible buckets (directories) at fsPath.
func (fs *FSObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) { func (fs *FSObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
if err := checkPathLength(fs.fsPath); err != nil { if err := checkPathLength(fs.fsPath); err != nil {
return nil, errors.Trace(err) logger.LogIf(ctx, err)
return nil, err
} }
var bucketInfos []BucketInfo var bucketInfos []BucketInfo
entries, err := readDir((fs.fsPath)) entries, err := readDir((fs.fsPath))
if err != nil { if err != nil {
return nil, toObjectErr(errors.Trace(errDiskNotFound)) logger.LogIf(ctx, errDiskNotFound)
return nil, toObjectErr(errDiskNotFound)
} }
for _, entry := range entries { for _, entry := range entries {
@ -299,7 +306,7 @@ func (fs *FSObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
continue continue
} }
var fi os.FileInfo var fi os.FileInfo
fi, err = fsStatVolume(pathJoin(fs.fsPath, entry)) fi, err = fsStatVolume(ctx, pathJoin(fs.fsPath, entry))
// There seems like no practical reason to check for errors // There seems like no practical reason to check for errors
// at this point, if there are indeed errors we can simply // at this point, if there are indeed errors we can simply
// just ignore such buckets and list only those which // just ignore such buckets and list only those which
@ -327,27 +334,28 @@ func (fs *FSObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
func (fs *FSObjects) DeleteBucket(ctx context.Context, bucket string) error { func (fs *FSObjects) DeleteBucket(ctx context.Context, bucket string) error {
bucketLock := fs.nsMutex.NewNSLock(bucket, "") bucketLock := fs.nsMutex.NewNSLock(bucket, "")
if err := bucketLock.GetLock(globalObjectTimeout); err != nil { if err := bucketLock.GetLock(globalObjectTimeout); err != nil {
logger.LogIf(ctx, err)
return err return err
} }
defer bucketLock.Unlock() defer bucketLock.Unlock()
bucketDir, err := fs.getBucketDir(bucket) bucketDir, err := fs.getBucketDir(ctx, bucket)
if err != nil { if err != nil {
return toObjectErr(err, bucket) return toObjectErr(err, bucket)
} }
// Attempt to delete regular bucket. // Attempt to delete regular bucket.
if err = fsRemoveDir(bucketDir); err != nil { if err = fsRemoveDir(ctx, bucketDir); err != nil {
return toObjectErr(err, bucket) return toObjectErr(err, bucket)
} }
// Cleanup all the bucket metadata. // Cleanup all the bucket metadata.
minioMetadataBucketDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket) minioMetadataBucketDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket)
if err = fsRemoveAll(minioMetadataBucketDir); err != nil { if err = fsRemoveAll(ctx, minioMetadataBucketDir); err != nil {
return toObjectErr(err, bucket) return toObjectErr(err, bucket)
} }
// Delete all bucket metadata. // Delete all bucket metadata.
deleteBucketMetadata(bucket, fs) deleteBucketMetadata(ctx, bucket, fs)
return nil return nil
} }
@ -380,7 +388,7 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
} }
defer objectSRLock.RUnlock() defer objectSRLock.RUnlock()
} }
if _, err := fs.statBucketDir(srcBucket); err != nil { if _, err := fs.statBucketDir(ctx, srcBucket); err != nil {
return oi, toObjectErr(err, srcBucket) return oi, toObjectErr(err, srcBucket)
} }
@ -391,14 +399,15 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, srcBucket, srcObject, fs.metaJSONFile) fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, srcBucket, srcObject, fs.metaJSONFile)
wlk, err := fs.rwPool.Write(fsMetaPath) wlk, err := fs.rwPool.Write(fsMetaPath)
if err != nil { if err != nil {
return oi, toObjectErr(errors.Trace(err), srcBucket, srcObject) logger.LogIf(ctx, err)
return oi, toObjectErr(err, srcBucket, srcObject)
} }
// This close will allow for locks to be synchronized on `fs.json`. // This close will allow for locks to be synchronized on `fs.json`.
defer wlk.Close() defer wlk.Close()
// Save objects' metadata in `fs.json`. // Save objects' metadata in `fs.json`.
fsMeta := newFSMetaV1() fsMeta := newFSMetaV1()
if _, err = fsMeta.ReadFrom(wlk); err != nil { if _, err = fsMeta.ReadFrom(ctx, wlk); err != nil {
return oi, toObjectErr(err, srcBucket, srcObject) return oi, toObjectErr(err, srcBucket, srcObject)
} }
@ -409,7 +418,7 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
} }
// Stat the file to get file size. // Stat the file to get file size.
fi, err := fsStatFile(pathJoin(fs.fsPath, srcBucket, srcObject)) fi, err := fsStatFile(ctx, pathJoin(fs.fsPath, srcBucket, srcObject))
if err != nil { if err != nil {
return oi, toObjectErr(err, srcBucket, srcObject) return oi, toObjectErr(err, srcBucket, srcObject)
} }
@ -419,20 +428,20 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
} }
go func() { go func() {
if gerr := fs.getObject(srcBucket, srcObject, 0, srcInfo.Size, srcInfo.Writer, srcInfo.ETag, !cpSrcDstSame); gerr != nil { if gerr := fs.getObject(ctx, srcBucket, srcObject, 0, srcInfo.Size, srcInfo.Writer, srcInfo.ETag, !cpSrcDstSame); gerr != nil {
if gerr = srcInfo.Writer.Close(); gerr != nil { if gerr = srcInfo.Writer.Close(); gerr != nil {
errorIf(gerr, "Unable to read the object %s/%s.", srcBucket, srcObject) logger.LogIf(ctx, gerr)
} }
return return
} }
// Close writer explicitly signalling we wrote all data. // Close writer explicitly signalling we wrote all data.
if gerr := srcInfo.Writer.Close(); gerr != nil { if gerr := srcInfo.Writer.Close(); gerr != nil {
errorIf(gerr, "Unable to read the object %s/%s.", srcBucket, srcObject) logger.LogIf(ctx, gerr)
return return
} }
}() }()
objInfo, err := fs.putObject(dstBucket, dstObject, srcInfo.Reader, srcInfo.UserDefined) objInfo, err := fs.putObject(ctx, dstBucket, dstObject, srcInfo.Reader, srcInfo.UserDefined)
if err != nil { if err != nil {
return oi, toObjectErr(err, dstBucket, dstObject) return oi, toObjectErr(err, dstBucket, dstObject)
} }
@ -447,39 +456,43 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
// startOffset indicates the starting read location of the object. // startOffset indicates the starting read location of the object.
// length indicates the total length of the object. // length indicates the total length of the object.
func (fs *FSObjects) GetObject(ctx context.Context, bucket, object string, offset int64, length int64, writer io.Writer, etag string) (err error) { func (fs *FSObjects) GetObject(ctx context.Context, bucket, object string, offset int64, length int64, writer io.Writer, etag string) (err error) {
if err = checkGetObjArgs(bucket, object); err != nil { if err = checkGetObjArgs(ctx, bucket, object); err != nil {
return err return err
} }
// Lock the object before reading. // Lock the object before reading.
objectLock := fs.nsMutex.NewNSLock(bucket, object) objectLock := fs.nsMutex.NewNSLock(bucket, object)
if err := objectLock.GetRLock(globalObjectTimeout); err != nil { if err := objectLock.GetRLock(globalObjectTimeout); err != nil {
logger.LogIf(ctx, err)
return err return err
} }
defer objectLock.RUnlock() defer objectLock.RUnlock()
return fs.getObject(bucket, object, offset, length, writer, etag, true) return fs.getObject(ctx, bucket, object, offset, length, writer, etag, true)
} }
// getObject - wrapper for GetObject // getObject - wrapper for GetObject
func (fs *FSObjects) getObject(bucket, object string, offset int64, length int64, writer io.Writer, etag string, lock bool) (err error) { func (fs *FSObjects) getObject(ctx context.Context, bucket, object string, offset int64, length int64, writer io.Writer, etag string, lock bool) (err error) {
if _, err = fs.statBucketDir(bucket); err != nil { if _, err = fs.statBucketDir(ctx, bucket); err != nil {
return toObjectErr(err, bucket) return toObjectErr(err, bucket)
} }
// Offset cannot be negative. // Offset cannot be negative.
if offset < 0 { if offset < 0 {
return toObjectErr(errors.Trace(errUnexpected), bucket, object) logger.LogIf(ctx, errUnexpected)
return toObjectErr(errUnexpected, bucket, object)
} }
// Writer cannot be nil. // Writer cannot be nil.
if writer == nil { if writer == nil {
return toObjectErr(errors.Trace(errUnexpected), bucket, object) logger.LogIf(ctx, errUnexpected)
return toObjectErr(errUnexpected, bucket, object)
} }
// If its a directory request, we return an empty body. // If its a directory request, we return an empty body.
if hasSuffix(object, slashSeparator) { if hasSuffix(object, slashSeparator) {
_, err = writer.Write([]byte("")) _, err = writer.Write([]byte(""))
return toObjectErr(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return toObjectErr(err, bucket, object)
} }
if bucket != minioMetaBucket { if bucket != minioMetaBucket {
@ -487,25 +500,27 @@ func (fs *FSObjects) getObject(bucket, object string, offset int64, length int64
if lock { if lock {
_, err = fs.rwPool.Open(fsMetaPath) _, err = fs.rwPool.Open(fsMetaPath)
if err != nil && err != errFileNotFound { if err != nil && err != errFileNotFound {
return toObjectErr(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return toObjectErr(err, bucket, object)
} }
defer fs.rwPool.Close(fsMetaPath) defer fs.rwPool.Close(fsMetaPath)
} }
} }
if etag != "" { if etag != "" {
objEtag, perr := fs.getObjectETag(bucket, object, lock) objEtag, perr := fs.getObjectETag(ctx, bucket, object, lock)
if perr != nil { if perr != nil {
return toObjectErr(errors.Trace(perr), bucket, object) return toObjectErr(perr, bucket, object)
} }
if objEtag != etag { if objEtag != etag {
return toObjectErr(errors.Trace(InvalidETag{}), bucket, object) logger.LogIf(ctx, InvalidETag{})
return toObjectErr(InvalidETag{}, bucket, object)
} }
} }
// Read the object, doesn't exist returns an s3 compatible error. // Read the object, doesn't exist returns an s3 compatible error.
fsObjPath := pathJoin(fs.fsPath, bucket, object) fsObjPath := pathJoin(fs.fsPath, bucket, object)
reader, size, err := fsOpenFile(fsObjPath, offset) reader, size, err := fsOpenFile(ctx, fsObjPath, offset)
if err != nil { if err != nil {
return toObjectErr(err, bucket, object) return toObjectErr(err, bucket, object)
} }
@ -523,21 +538,23 @@ func (fs *FSObjects) getObject(bucket, object string, offset int64, length int64
// Reply back invalid range if the input offset and length fall out of range. // Reply back invalid range if the input offset and length fall out of range.
if offset > size || offset+length > size { if offset > size || offset+length > size {
return errors.Trace(InvalidRange{offset, length, size}) err = InvalidRange{offset, length, size}
logger.LogIf(ctx, err)
return err
} }
// Allocate a staging buffer. // Allocate a staging buffer.
buf := make([]byte, int(bufSize)) buf := make([]byte, int(bufSize))
_, err = io.CopyBuffer(writer, io.LimitReader(reader, length), buf) _, err = io.CopyBuffer(writer, io.LimitReader(reader, length), buf)
logger.LogIf(ctx, err)
return toObjectErr(errors.Trace(err), bucket, object) return toObjectErr(err, bucket, object)
} }
// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo. // getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
func (fs *FSObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e error) { func (fs *FSObjects) getObjectInfo(ctx context.Context, bucket, object string) (oi ObjectInfo, e error) {
fsMeta := fsMetaV1{} fsMeta := fsMetaV1{}
fi, err := fsStatDir(pathJoin(fs.fsPath, bucket, object)) fi, err := fsStatDir(ctx, pathJoin(fs.fsPath, bucket, object))
if err != nil && errors.Cause(err) != errFileAccessDenied { if err != nil && errors.Cause(err) != errFileAccessDenied {
return oi, toObjectErr(err, bucket, object) return oi, toObjectErr(err, bucket, object)
} }
@ -547,6 +564,7 @@ func (fs *FSObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e erro
if hasSuffix(object, slashSeparator) { if hasSuffix(object, slashSeparator) {
return fsMeta.ToObjectInfo(bucket, object, fi), nil return fsMeta.ToObjectInfo(bucket, object, fi), nil
} }
logger.LogIf(ctx, errFileNotFound)
return oi, toObjectErr(errFileNotFound, bucket, object) return oi, toObjectErr(errFileNotFound, bucket, object)
} }
@ -558,7 +576,7 @@ func (fs *FSObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e erro
if err == nil { if err == nil {
// Read from fs metadata only if it exists. // Read from fs metadata only if it exists.
defer fs.rwPool.Close(fsMetaPath) defer fs.rwPool.Close(fsMetaPath)
if _, rerr := fsMeta.ReadFrom(rlk.LockedFile); rerr != nil { if _, rerr := fsMeta.ReadFrom(ctx, rlk.LockedFile); rerr != nil {
// `fs.json` can be empty due to previously failed // `fs.json` can be empty due to previously failed
// PutObject() transaction, if we arrive at such // PutObject() transaction, if we arrive at such
// a situation we just ignore and continue. // a situation we just ignore and continue.
@ -570,11 +588,12 @@ func (fs *FSObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e erro
// Ignore if `fs.json` is not available, this is true for pre-existing data. // Ignore if `fs.json` is not available, this is true for pre-existing data.
if err != nil && err != errFileNotFound { if err != nil && err != errFileNotFound {
return oi, toObjectErr(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return oi, toObjectErr(err, bucket, object)
} }
// Stat the file to get file size. // Stat the file to get file size.
fi, err = fsStatFile(pathJoin(fs.fsPath, bucket, object)) fi, err = fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object))
if err != nil { if err != nil {
return oi, toObjectErr(err, bucket, object) return oi, toObjectErr(err, bucket, object)
} }
@ -591,27 +610,27 @@ func (fs *FSObjects) GetObjectInfo(ctx context.Context, bucket, object string) (
} }
defer objectLock.RUnlock() defer objectLock.RUnlock()
if err := checkGetObjArgs(bucket, object); err != nil { if err := checkGetObjArgs(ctx, bucket, object); err != nil {
return oi, err return oi, err
} }
if _, err := fs.statBucketDir(bucket); err != nil { if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return oi, toObjectErr(err, bucket) return oi, toObjectErr(err, bucket)
} }
return fs.getObjectInfo(bucket, object) return fs.getObjectInfo(ctx, bucket, object)
} }
// This function does the following check, suppose // This function does the following check, suppose
// object is "a/b/c/d", stat makes sure that objects ""a/b/c"" // object is "a/b/c/d", stat makes sure that objects ""a/b/c""
// "a/b" and "a" do not exist. // "a/b" and "a" do not exist.
func (fs *FSObjects) parentDirIsObject(bucket, parent string) bool { func (fs *FSObjects) parentDirIsObject(ctx context.Context, bucket, parent string) bool {
var isParentDirObject func(string) bool var isParentDirObject func(string) bool
isParentDirObject = func(p string) bool { isParentDirObject = func(p string) bool {
if p == "." || p == "/" { if p == "." || p == "/" {
return false return false
} }
if _, err := fsStatFile(pathJoin(fs.fsPath, bucket, p)); err == nil { if _, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, p)); err == nil {
// If there is already a file at prefix "p", return true. // If there is already a file at prefix "p", return true.
return true return true
} }
@ -627,20 +646,21 @@ func (fs *FSObjects) parentDirIsObject(bucket, parent string) bool {
// Additionally writes `fs.json` which carries the necessary metadata // Additionally writes `fs.json` which carries the necessary metadata
// for future object operations. // for future object operations.
func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, retErr error) { func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, retErr error) {
if err := checkPutObjectArgs(bucket, object, fs, data.Size()); err != nil { if err := checkPutObjectArgs(ctx, bucket, object, fs, data.Size()); err != nil {
return ObjectInfo{}, err return ObjectInfo{}, err
} }
// Lock the object. // Lock the object.
objectLock := fs.nsMutex.NewNSLock(bucket, object) objectLock := fs.nsMutex.NewNSLock(bucket, object)
if err := objectLock.GetLock(globalObjectTimeout); err != nil { if err := objectLock.GetLock(globalObjectTimeout); err != nil {
logger.LogIf(ctx, err)
return objInfo, err return objInfo, err
} }
defer objectLock.Unlock() defer objectLock.Unlock()
return fs.putObject(bucket, object, data, metadata) return fs.putObject(ctx, bucket, object, data, metadata)
} }
// putObject - wrapper for PutObject // putObject - wrapper for PutObject
func (fs *FSObjects) putObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, retErr error) { func (fs *FSObjects) putObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, retErr error) {
// No metadata is set, allocate a new one. // No metadata is set, allocate a new one.
meta := make(map[string]string) meta := make(map[string]string)
for k, v := range metadata { for k, v := range metadata {
@ -649,7 +669,7 @@ func (fs *FSObjects) putObject(bucket string, object string, data *hash.Reader,
var err error var err error
// Validate if bucket name is valid and exists. // Validate if bucket name is valid and exists.
if _, err = fs.statBucketDir(bucket); err != nil { if _, err = fs.statBucketDir(ctx, bucket); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket) return ObjectInfo{}, toObjectErr(err, bucket)
} }
@ -661,31 +681,35 @@ func (fs *FSObjects) putObject(bucket string, object string, data *hash.Reader,
// and return success. // and return success.
if isObjectDir(object, data.Size()) { if isObjectDir(object, data.Size()) {
// Check if an object is present as one of the parent dir. // Check if an object is present as one of the parent dir.
if fs.parentDirIsObject(bucket, path.Dir(object)) { if fs.parentDirIsObject(ctx, bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(errors.Trace(errFileAccessDenied), bucket, object) logger.LogIf(ctx, errFileAccessDenied)
return ObjectInfo{}, toObjectErr(errFileAccessDenied, bucket, object)
} }
if err = mkdirAll(pathJoin(fs.fsPath, bucket, object), 0777); err != nil { if err = mkdirAll(pathJoin(fs.fsPath, bucket, object), 0777); err != nil {
logger.LogIf(ctx, err)
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
var fi os.FileInfo var fi os.FileInfo
if fi, err = fsStatDir(pathJoin(fs.fsPath, bucket, object)); err != nil { if fi, err = fsStatDir(ctx, pathJoin(fs.fsPath, bucket, object)); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
return fsMeta.ToObjectInfo(bucket, object, fi), nil return fsMeta.ToObjectInfo(bucket, object, fi), nil
} }
if err = checkPutObjectArgs(bucket, object, fs, data.Size()); err != nil { if err = checkPutObjectArgs(ctx, bucket, object, fs, data.Size()); err != nil {
return ObjectInfo{}, err return ObjectInfo{}, err
} }
// Check if an object is present as one of the parent dir. // Check if an object is present as one of the parent dir.
if fs.parentDirIsObject(bucket, path.Dir(object)) { if fs.parentDirIsObject(ctx, bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(errors.Trace(errFileAccessDenied), bucket, object) logger.LogIf(ctx, errFileAccessDenied)
return ObjectInfo{}, toObjectErr(errFileAccessDenied, bucket, object)
} }
// Validate input data size and it can never be less than zero. // Validate input data size and it can never be less than zero.
if data.Size() < 0 { if data.Size() < 0 {
return ObjectInfo{}, errors.Trace(errInvalidArgument) logger.LogIf(ctx, errInvalidArgument)
return ObjectInfo{}, errInvalidArgument
} }
var wlk *lock.LockedFile var wlk *lock.LockedFile
@ -695,7 +719,8 @@ func (fs *FSObjects) putObject(bucket string, object string, data *hash.Reader,
fsMetaPath := pathJoin(bucketMetaDir, bucket, object, fs.metaJSONFile) fsMetaPath := pathJoin(bucketMetaDir, bucket, object, fs.metaJSONFile)
wlk, err = fs.rwPool.Create(fsMetaPath) wlk, err = fs.rwPool.Create(fsMetaPath)
if err != nil { if err != nil {
return ObjectInfo{}, toObjectErr(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
// This close will allow for locks to be synchronized on `fs.json`. // This close will allow for locks to be synchronized on `fs.json`.
defer wlk.Close() defer wlk.Close()
@ -703,7 +728,7 @@ func (fs *FSObjects) putObject(bucket string, object string, data *hash.Reader,
// Remove meta file when PutObject encounters any error // Remove meta file when PutObject encounters any error
if retErr != nil { if retErr != nil {
tmpDir := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID) tmpDir := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID)
fsRemoveMeta(bucketMetaDir, fsMetaPath, tmpDir) fsRemoveMeta(ctx, bucketMetaDir, fsMetaPath, tmpDir)
} }
}() }()
} }
@ -721,9 +746,9 @@ func (fs *FSObjects) putObject(bucket string, object string, data *hash.Reader,
buf := make([]byte, int(bufSize)) buf := make([]byte, int(bufSize))
fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, tempObj) fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, tempObj)
bytesWritten, err := fsCreateFile(fsTmpObjPath, data, buf, data.Size()) bytesWritten, err := fsCreateFile(ctx, fsTmpObjPath, data, buf, data.Size())
if err != nil { if err != nil {
fsRemoveFile(fsTmpObjPath) fsRemoveFile(ctx, fsTmpObjPath)
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
@ -732,24 +757,24 @@ func (fs *FSObjects) putObject(bucket string, object string, data *hash.Reader,
// Should return IncompleteBody{} error when reader has fewer // Should return IncompleteBody{} error when reader has fewer
// bytes than specified in request header. // bytes than specified in request header.
if bytesWritten < data.Size() { if bytesWritten < data.Size() {
fsRemoveFile(fsTmpObjPath) fsRemoveFile(ctx, fsTmpObjPath)
return ObjectInfo{}, errors.Trace(IncompleteBody{}) return ObjectInfo{}, IncompleteBody{}
} }
// Delete the temporary object in the case of a // Delete the temporary object in the case of a
// failure. If PutObject succeeds, then there would be // failure. If PutObject succeeds, then there would be
// nothing to delete. // nothing to delete.
defer fsRemoveFile(fsTmpObjPath) defer fsRemoveFile(ctx, fsTmpObjPath)
// Entire object was written to the temp location, now it's safe to rename it to the actual location. // Entire object was written to the temp location, now it's safe to rename it to the actual location.
fsNSObjPath := pathJoin(fs.fsPath, bucket, object) fsNSObjPath := pathJoin(fs.fsPath, bucket, object)
// Deny if WORM is enabled // Deny if WORM is enabled
if globalWORMEnabled { if globalWORMEnabled {
if _, err = fsStatFile(fsNSObjPath); err == nil { if _, err = fsStatFile(ctx, fsNSObjPath); err == nil {
return ObjectInfo{}, errors.Trace(ObjectAlreadyExists{Bucket: bucket, Object: object}) return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
} }
} }
if err = fsRenameFile(fsTmpObjPath, fsNSObjPath); err != nil { if err = fsRenameFile(ctx, fsTmpObjPath, fsNSObjPath); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
@ -761,7 +786,7 @@ func (fs *FSObjects) putObject(bucket string, object string, data *hash.Reader,
} }
// Stat the file to fetch timestamp, size. // Stat the file to fetch timestamp, size.
fi, err := fsStatFile(pathJoin(fs.fsPath, bucket, object)) fi, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object))
if err != nil { if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
@ -780,11 +805,11 @@ func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string) er
} }
defer objectLock.Unlock() defer objectLock.Unlock()
if err := checkDelObjArgs(bucket, object); err != nil { if err := checkDelObjArgs(ctx, bucket, object); err != nil {
return err return err
} }
if _, err := fs.statBucketDir(bucket); err != nil { if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return toObjectErr(err, bucket) return toObjectErr(err, bucket)
} }
@ -797,18 +822,19 @@ func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string) er
defer rwlk.Close() defer rwlk.Close()
} }
if lerr != nil && lerr != errFileNotFound { if lerr != nil && lerr != errFileNotFound {
return toObjectErr(errors.Trace(lerr), bucket, object) logger.LogIf(ctx, lerr)
return toObjectErr(lerr, bucket, object)
} }
} }
// Delete the object. // Delete the object.
if err := fsDeleteFile(pathJoin(fs.fsPath, bucket), pathJoin(fs.fsPath, bucket, object)); err != nil { if err := fsDeleteFile(ctx, pathJoin(fs.fsPath, bucket), pathJoin(fs.fsPath, bucket, object)); err != nil {
return toObjectErr(err, bucket, object) return toObjectErr(err, bucket, object)
} }
if bucket != minioMetaBucket { if bucket != minioMetaBucket {
// Delete the metadata object. // Delete the metadata object.
err := fsDeleteFile(minioMetaBucketDir, fsMetaPath) err := fsDeleteFile(ctx, minioMetaBucketDir, fsMetaPath)
if err != nil && errors.Cause(err) != errFileNotFound { if err != nil && errors.Cause(err) != errFileNotFound {
return toObjectErr(err, bucket, object) return toObjectErr(err, bucket, object)
} }
@ -836,7 +862,7 @@ func (fs *FSObjects) listDirFactory(isLeaf isLeafFunc) listDirFunc {
// getObjectETag is a helper function, which returns only the md5sum // getObjectETag is a helper function, which returns only the md5sum
// of the file on the disk. // of the file on the disk.
func (fs *FSObjects) getObjectETag(bucket, entry string, lock bool) (string, error) { func (fs *FSObjects) getObjectETag(ctx context.Context, bucket, entry string, lock bool) (string, error) {
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, entry, fs.metaJSONFile) fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, entry, fs.metaJSONFile)
var reader io.Reader var reader io.Reader
@ -848,7 +874,8 @@ func (fs *FSObjects) getObjectETag(bucket, entry string, lock bool) (string, err
rlk, err := fs.rwPool.Open(fsMetaPath) rlk, err := fs.rwPool.Open(fsMetaPath)
// Ignore if `fs.json` is not available, this is true for pre-existing data. // Ignore if `fs.json` is not available, this is true for pre-existing data.
if err != nil && err != errFileNotFound { if err != nil && err != errFileNotFound {
return "", toObjectErr(errors.Trace(err), bucket, entry) logger.LogIf(ctx, err)
return "", toObjectErr(err, bucket, entry)
} }
// If file is not found, we don't need to proceed forward. // If file is not found, we don't need to proceed forward.
@ -862,16 +889,17 @@ func (fs *FSObjects) getObjectETag(bucket, entry string, lock bool) (string, err
// Fetch the size of the underlying file. // Fetch the size of the underlying file.
fi, err = rlk.LockedFile.Stat() fi, err = rlk.LockedFile.Stat()
if err != nil { if err != nil {
return "", toObjectErr(errors.Trace(err), bucket, entry) logger.LogIf(ctx, err)
return "", toObjectErr(err, bucket, entry)
} }
size = fi.Size() size = fi.Size()
reader = io.NewSectionReader(rlk.LockedFile, 0, fi.Size()) reader = io.NewSectionReader(rlk.LockedFile, 0, fi.Size())
} else { } else {
var err error var err error
reader, size, err = fsOpenFile(fsMetaPath, 0) reader, size, err = fsOpenFile(ctx, fsMetaPath, 0)
if err != nil { if err != nil {
return "", toObjectErr(errors.Trace(err), bucket, entry) return "", toObjectErr(err, bucket, entry)
} }
} }
@ -884,12 +912,14 @@ func (fs *FSObjects) getObjectETag(bucket, entry string, lock bool) (string, err
fsMetaBuf, err := ioutil.ReadAll(reader) fsMetaBuf, err := ioutil.ReadAll(reader)
if err != nil { if err != nil {
return "", toObjectErr(errors.Trace(err), bucket, entry) logger.LogIf(ctx, err)
return "", toObjectErr(err, bucket, entry)
} }
// Check if FS metadata is valid, if not return error. // Check if FS metadata is valid, if not return error.
if !isFSMetaValid(parseFSVersion(fsMetaBuf)) { if !isFSMetaValid(parseFSVersion(fsMetaBuf)) {
return "", toObjectErr(errors.Trace(errCorruptedFormat), bucket, entry) logger.LogIf(ctx, errCorruptedFormat)
return "", toObjectErr(errCorruptedFormat, bucket, entry)
} }
return extractETag(parseFSMetaMap(fsMetaBuf)), nil return extractETag(parseFSMetaMap(fsMetaBuf)), nil
@ -898,7 +928,7 @@ func (fs *FSObjects) getObjectETag(bucket, entry string, lock bool) (string, err
// ListObjects - list all objects at prefix upto maxKeys., optionally delimited by '/'. Maintains the list pool // ListObjects - list all objects at prefix upto maxKeys., optionally delimited by '/'. Maintains the list pool
// state for future re-entrant list requests. // state for future re-entrant list requests.
func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
if err := checkListObjsArgs(bucket, prefix, marker, delimiter, fs); err != nil { if err := checkListObjsArgs(ctx, bucket, prefix, marker, delimiter, fs); err != nil {
return loi, err return loi, err
} }
// Marker is set validate pre-condition. // Marker is set validate pre-condition.
@ -908,7 +938,7 @@ func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, de
return ListObjectsInfo{}, e return ListObjectsInfo{}, e
} }
} }
if _, err := fs.statBucketDir(bucket); err != nil { if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return loi, err return loi, err
} }
@ -942,10 +972,11 @@ func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, de
// Protect the entry from concurrent deletes, or renames. // Protect the entry from concurrent deletes, or renames.
objectLock := fs.nsMutex.NewNSLock(bucket, entry) objectLock := fs.nsMutex.NewNSLock(bucket, entry)
if err = objectLock.GetRLock(globalListingTimeout); err != nil { if err = objectLock.GetRLock(globalListingTimeout); err != nil {
logger.LogIf(ctx, err)
return ObjectInfo{}, err return ObjectInfo{}, err
} }
defer objectLock.RUnlock() defer objectLock.RUnlock()
return fs.getObjectInfo(bucket, entry) return fs.getObjectInfo(ctx, bucket, entry)
} }
heal := false // true only for xl.ListObjectsHeal() heal := false // true only for xl.ListObjectsHeal()
@ -959,7 +990,7 @@ func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, de
return !hasSuffix(object, slashSeparator) return !hasSuffix(object, slashSeparator)
} }
listDir := fs.listDirFactory(isLeaf) listDir := fs.listDirFactory(isLeaf)
walkResultCh = startTreeWalk(bucket, prefix, marker, recursive, listDir, isLeaf, endWalkCh) walkResultCh = startTreeWalk(ctx, bucket, prefix, marker, recursive, listDir, isLeaf, endWalkCh)
} }
var objInfos []ObjectInfo var objInfos []ObjectInfo
@ -984,7 +1015,6 @@ func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, de
} }
objInfo, err := entryToObjectInfo(walkResult.entry) objInfo, err := entryToObjectInfo(walkResult.entry)
if err != nil { if err != nil {
errorIf(err, "Unable to fetch object info for %s", walkResult.entry)
return loi, nil return loi, nil
} }
nextMarker = objInfo.Name nextMarker = objInfo.Name
@ -1018,34 +1048,39 @@ func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, de
// HealFormat - no-op for fs, Valid only for XL. // HealFormat - no-op for fs, Valid only for XL.
func (fs *FSObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) { func (fs *FSObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) {
return madmin.HealResultItem{}, errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return madmin.HealResultItem{}, NotImplemented{}
} }
// HealObject - no-op for fs. Valid only for XL. // HealObject - no-op for fs. Valid only for XL.
func (fs *FSObjects) HealObject(ctx context.Context, bucket, object string, dryRun bool) ( func (fs *FSObjects) HealObject(ctx context.Context, bucket, object string, dryRun bool) (
res madmin.HealResultItem, err error) { res madmin.HealResultItem, err error) {
return res, errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return res, NotImplemented{}
} }
// HealBucket - no-op for fs, Valid only for XL. // HealBucket - no-op for fs, Valid only for XL.
func (fs *FSObjects) HealBucket(ctx context.Context, bucket string, dryRun bool) ([]madmin.HealResultItem, func (fs *FSObjects) HealBucket(ctx context.Context, bucket string, dryRun bool) ([]madmin.HealResultItem,
error) { error) {
return nil, errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return nil, NotImplemented{}
} }
// ListObjectsHeal - list all objects to be healed. Valid only for XL // ListObjectsHeal - list all objects to be healed. Valid only for XL
func (fs *FSObjects) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { func (fs *FSObjects) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
return loi, errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return loi, NotImplemented{}
} }
// ListBucketsHeal - list all buckets to be healed. Valid only for XL // ListBucketsHeal - list all buckets to be healed. Valid only for XL
func (fs *FSObjects) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { func (fs *FSObjects) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) {
return []BucketInfo{}, errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return []BucketInfo{}, NotImplemented{}
} }
// SetBucketPolicy sets policy on bucket // SetBucketPolicy sets policy on bucket
func (fs *FSObjects) SetBucketPolicy(ctx context.Context, bucket string, policy policy.BucketAccessPolicy) error { func (fs *FSObjects) SetBucketPolicy(ctx context.Context, bucket string, policy policy.BucketAccessPolicy) error {
return persistAndNotifyBucketPolicyChange(bucket, false, policy, fs) return persistAndNotifyBucketPolicyChange(ctx, bucket, false, policy, fs)
} }
// GetBucketPolicy will get policy on bucket // GetBucketPolicy will get policy on bucket
@ -1059,7 +1094,7 @@ func (fs *FSObjects) GetBucketPolicy(ctx context.Context, bucket string) (policy
// DeleteBucketPolicy deletes all policies on bucket // DeleteBucketPolicy deletes all policies on bucket
func (fs *FSObjects) DeleteBucketPolicy(ctx context.Context, bucket string) error { func (fs *FSObjects) DeleteBucketPolicy(ctx context.Context, bucket string) error {
return persistAndNotifyBucketPolicyChange(bucket, true, emptyBucketPolicy, fs) return persistAndNotifyBucketPolicyChange(ctx, bucket, true, emptyBucketPolicy, fs)
} }
// ListObjectsV2 lists all blobs in bucket filtered by prefix // ListObjectsV2 lists all blobs in bucket filtered by prefix

View File

@ -91,7 +91,7 @@ func TestFSParentDirIsObject(t *testing.T) {
}, },
} }
for i, testCase := range testCases { for i, testCase := range testCases {
gotValue := fs.parentDirIsObject(bucketName, testCase.objectName) gotValue := fs.parentDirIsObject(context.Background(), bucketName, testCase.objectName)
if testCase.parentIsObject != gotValue { if testCase.parentIsObject != gotValue {
t.Errorf("Test %d: Unexpected value returned got %t, expected %t", i+1, gotValue, testCase.parentIsObject) t.Errorf("Test %d: Unexpected value returned got %t, expected %t", i+1, gotValue, testCase.parentIsObject)
} }

View File

@ -19,7 +19,6 @@ package cmd
import ( import (
"net/http" "net/http"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
minio "github.com/minio/minio-go" minio "github.com/minio/minio-go"
@ -31,12 +30,6 @@ var (
// MustGetUUID function alias. // MustGetUUID function alias.
MustGetUUID = mustGetUUID MustGetUUID = mustGetUUID
// ErrorIf provides errorIf function alias.
ErrorIf = errorIf
// FatalIf provides fatalIf function alias.
FatalIf = fatalIf
) )
// AnonErrToObjectErr - converts standard http codes into meaningful object layer errors. // AnonErrToObjectErr - converts standard http codes into meaningful object layer errors.
@ -262,16 +255,6 @@ func ErrorRespToObjectError(err error, params ...string) error {
return nil return nil
} }
e, ok := err.(*errors.Error)
if !ok {
// Code should be fixed if this function is called without doing traceError()
// Else handling different situations in this function makes this function complicated.
errorIf(err, "Expected type *Error")
return err
}
err = e.Cause
bucket := "" bucket := ""
object := "" object := ""
if len(params) >= 1 { if len(params) >= 1 {
@ -282,15 +265,14 @@ func ErrorRespToObjectError(err error, params ...string) error {
} }
if isNetworkOrHostDown(err) { if isNetworkOrHostDown(err) {
e.Cause = BackendDown{} return BackendDown{}
return e
} }
minioErr, ok := err.(minio.ErrorResponse) minioErr, ok := err.(minio.ErrorResponse)
if !ok { if !ok {
// We don't interpret non Minio errors. As minio errors will // We don't interpret non Minio errors. As minio errors will
// have StatusCode to help to convert to object errors. // have StatusCode to help to convert to object errors.
return e return err
} }
switch minioErr.Code { switch minioErr.Code {
@ -325,6 +307,5 @@ func ErrorRespToObjectError(err error, params ...string) error {
err = PartTooSmall{} err = PartTooSmall{}
} }
e.Cause = err return err
return e
} }

View File

@ -17,6 +17,8 @@
package cmd package cmd
import ( import (
"context"
"errors"
"fmt" "fmt"
"net/url" "net/url"
"os" "os"
@ -28,7 +30,7 @@ import (
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/minio/cli" "github.com/minio/cli"
miniohttp "github.com/minio/minio/cmd/http" miniohttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/cmd/logger"
) )
var ( var (
@ -100,10 +102,14 @@ func ValidateGatewayArguments(serverAddr, endpointAddr string) error {
return nil return nil
} }
func init() {
logger.Init(GOPATH)
}
// StartGateway - handler for 'minio gateway <name>'. // StartGateway - handler for 'minio gateway <name>'.
func StartGateway(ctx *cli.Context, gw Gateway) { func StartGateway(ctx *cli.Context, gw Gateway) {
if gw == nil { if gw == nil {
fatalIf(errUnexpected, "Gateway implementation not initialized, exiting.") logger.FatalIf(errUnexpected, "Gateway implementation not initialized, exiting.")
} }
// Validate if we have access, secret set through environment. // Validate if we have access, secret set through environment.
@ -116,13 +122,13 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
// enable json and quite modes if jason flag is turned on. // enable json and quite modes if jason flag is turned on.
jsonFlag := ctx.IsSet("json") || ctx.GlobalIsSet("json") jsonFlag := ctx.IsSet("json") || ctx.GlobalIsSet("json")
if jsonFlag { if jsonFlag {
log.EnableJSON() logger.EnableJSON()
} }
// Get quiet flag from command line argument. // Get quiet flag from command line argument.
quietFlag := ctx.IsSet("quiet") || ctx.GlobalIsSet("quiet") quietFlag := ctx.IsSet("quiet") || ctx.GlobalIsSet("quiet")
if quietFlag { if quietFlag {
log.EnableQuiet() logger.EnableQuiet()
} }
// Fetch address option // Fetch address option
@ -139,35 +145,34 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
// Validate if we have access, secret set through environment. // Validate if we have access, secret set through environment.
if !globalIsEnvCreds { if !globalIsEnvCreds {
errorIf(fmt.Errorf("Access and secret keys not set"), "Access and Secret keys should be set through ENVs for backend [%s]", gatewayName) reqInfo := (&logger.ReqInfo{}).AppendTags("gatewayName", gatewayName)
contxt := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(contxt, errors.New("Access and Secret keys should be set through ENVs for backend"))
cli.ShowCommandHelpAndExit(ctx, gatewayName, 1) cli.ShowCommandHelpAndExit(ctx, gatewayName, 1)
} }
// Create certs path. // Create certs path.
fatalIf(createConfigDir(), "Unable to create configuration directories.") logger.FatalIf(createConfigDir(), "Unable to create configuration directories.")
// Initialize gateway config. // Initialize gateway config.
initConfig() initConfig()
// Init the error tracing module.
errors.Init(GOPATH, "github.com/minio/minio")
// Check and load SSL certificates. // Check and load SSL certificates.
var err error var err error
globalPublicCerts, globalRootCAs, globalTLSCertificate, globalIsSSL, err = getSSLConfig() globalPublicCerts, globalRootCAs, globalTLSCertificate, globalIsSSL, err = getSSLConfig()
fatalIf(err, "Invalid SSL certificate file") logger.FatalIf(err, "Invalid SSL certificate file")
// Set system resources to maximum. // Set system resources to maximum.
errorIf(setMaxResources(), "Unable to change resource limit") logger.LogIf(context.Background(), setMaxResources())
initNSLock(false) // Enable local namespace lock. initNSLock(false) // Enable local namespace lock.
// Initialize notification system. // Initialize notification system.
globalNotificationSys, err = NewNotificationSys(globalServerConfig, EndpointList{}) globalNotificationSys, err = NewNotificationSys(globalServerConfig, EndpointList{})
fatalIf(err, "Unable to initialize notification system.") logger.FatalIf(err, "Unable to initialize notification system.")
newObject, err := gw.NewGatewayLayer(globalServerConfig.GetCredential()) newObject, err := gw.NewGatewayLayer(globalServerConfig.GetCredential())
fatalIf(err, "Unable to initialize gateway layer") logger.FatalIf(err, "Unable to initialize gateway layer")
router := mux.NewRouter().SkipClean(true) router := mux.NewRouter().SkipClean(true)
@ -176,7 +181,7 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
// Register web router when its enabled. // Register web router when its enabled.
if globalIsBrowserEnabled { if globalIsBrowserEnabled {
fatalIf(registerWebRouter(router), "Unable to configure web browser") logger.FatalIf(registerWebRouter(router), "Unable to configure web browser")
} }
// Add API router. // Add API router.
@ -204,7 +209,7 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
// Print a warning message if gateway is not ready for production before the startup banner. // Print a warning message if gateway is not ready for production before the startup banner.
if !gw.Production() { if !gw.Production() {
log.Println(colorYellow("\n *** Warning: Not Ready for Production ***")) logger.Println(colorYellow("\n *** Warning: Not Ready for Production ***"))
} }
// Print gateway startup message. // Print gateway startup message.

View File

@ -20,6 +20,8 @@ import (
"context" "context"
"fmt" "fmt"
"strings" "strings"
"github.com/minio/minio/cmd/logger"
) )
// Prints the formatted startup message. // Prints the formatted startup message.
@ -54,12 +56,12 @@ func printGatewayCommonMsg(apiEndpoints []string) {
apiEndpointStr := strings.Join(apiEndpoints, " ") apiEndpointStr := strings.Join(apiEndpoints, " ")
// Colorize the message and print. // Colorize the message and print.
log.Println(colorBlue("\nEndpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr))) logger.Println(colorBlue("\nEndpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))
log.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKey))) logger.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKey)))
log.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", cred.SecretKey))) logger.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", cred.SecretKey)))
if globalIsBrowserEnabled { if globalIsBrowserEnabled {
log.Println(colorBlue("\nBrowser Access:")) logger.Println(colorBlue("\nBrowser Access:"))
log.Println(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 3), apiEndpointStr)) logger.Println(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 3), apiEndpointStr))
} }
} }

View File

@ -21,7 +21,7 @@ import (
"time" "time"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/madmin" "github.com/minio/minio/pkg/madmin"
) )
@ -31,105 +31,125 @@ type GatewayUnsupported struct{}
// ListMultipartUploads lists all multipart uploads. // ListMultipartUploads lists all multipart uploads.
func (a GatewayUnsupported) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, err error) { func (a GatewayUnsupported) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, err error) {
return lmi, errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return lmi, NotImplemented{}
} }
// NewMultipartUpload upload object in multiple parts // NewMultipartUpload upload object in multiple parts
func (a GatewayUnsupported) NewMultipartUpload(ctx context.Context, bucket string, object string, metadata map[string]string) (uploadID string, err error) { func (a GatewayUnsupported) NewMultipartUpload(ctx context.Context, bucket string, object string, metadata map[string]string) (uploadID string, err error) {
return "", errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return "", NotImplemented{}
} }
// CopyObjectPart copy part of object to uploadID for another object // CopyObjectPart copy part of object to uploadID for another object
func (a GatewayUnsupported) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, partID int, startOffset, length int64, srcInfo ObjectInfo) (pi PartInfo, err error) { func (a GatewayUnsupported) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, partID int, startOffset, length int64, srcInfo ObjectInfo) (pi PartInfo, err error) {
return pi, errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return pi, NotImplemented{}
} }
// PutObjectPart puts a part of object in bucket // PutObjectPart puts a part of object in bucket
func (a GatewayUnsupported) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi PartInfo, err error) { func (a GatewayUnsupported) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi PartInfo, err error) {
return pi, errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return pi, NotImplemented{}
} }
// ListObjectParts returns all object parts for specified object in specified bucket // ListObjectParts returns all object parts for specified object in specified bucket
func (a GatewayUnsupported) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi ListPartsInfo, err error) { func (a GatewayUnsupported) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi ListPartsInfo, err error) {
return lpi, errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return lpi, NotImplemented{}
} }
// AbortMultipartUpload aborts a ongoing multipart upload // AbortMultipartUpload aborts a ongoing multipart upload
func (a GatewayUnsupported) AbortMultipartUpload(ctx context.Context, bucket string, object string, uploadID string) error { func (a GatewayUnsupported) AbortMultipartUpload(ctx context.Context, bucket string, object string, uploadID string) error {
return errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return NotImplemented{}
} }
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object // CompleteMultipartUpload completes ongoing multipart upload and finalizes object
func (a GatewayUnsupported) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []CompletePart) (oi ObjectInfo, err error) { func (a GatewayUnsupported) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []CompletePart) (oi ObjectInfo, err error) {
return oi, errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return oi, NotImplemented{}
} }
// SetBucketPolicy sets policy on bucket // SetBucketPolicy sets policy on bucket
func (a GatewayUnsupported) SetBucketPolicy(ctx context.Context, bucket string, policyInfo policy.BucketAccessPolicy) error { func (a GatewayUnsupported) SetBucketPolicy(ctx context.Context, bucket string, policyInfo policy.BucketAccessPolicy) error {
return errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return NotImplemented{}
} }
// GetBucketPolicy will get policy on bucket // GetBucketPolicy will get policy on bucket
func (a GatewayUnsupported) GetBucketPolicy(ctx context.Context, bucket string) (bal policy.BucketAccessPolicy, err error) { func (a GatewayUnsupported) GetBucketPolicy(ctx context.Context, bucket string) (bal policy.BucketAccessPolicy, err error) {
return bal, errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return bal, NotImplemented{}
} }
// DeleteBucketPolicy deletes all policies on bucket // DeleteBucketPolicy deletes all policies on bucket
func (a GatewayUnsupported) DeleteBucketPolicy(ctx context.Context, bucket string) error { func (a GatewayUnsupported) DeleteBucketPolicy(ctx context.Context, bucket string) error {
return errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return NotImplemented{}
} }
// HealFormat - Not implemented stub // HealFormat - Not implemented stub
func (a GatewayUnsupported) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) { func (a GatewayUnsupported) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) {
return madmin.HealResultItem{}, errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return madmin.HealResultItem{}, NotImplemented{}
} }
// HealBucket - Not implemented stub // HealBucket - Not implemented stub
func (a GatewayUnsupported) HealBucket(ctx context.Context, bucket string, dryRun bool) ([]madmin.HealResultItem, error) { func (a GatewayUnsupported) HealBucket(ctx context.Context, bucket string, dryRun bool) ([]madmin.HealResultItem, error) {
return nil, errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return nil, NotImplemented{}
} }
// ListBucketsHeal - Not implemented stub // ListBucketsHeal - Not implemented stub
func (a GatewayUnsupported) ListBucketsHeal(ctx context.Context) (buckets []BucketInfo, err error) { func (a GatewayUnsupported) ListBucketsHeal(ctx context.Context) (buckets []BucketInfo, err error) {
return nil, errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return nil, NotImplemented{}
} }
// HealObject - Not implemented stub // HealObject - Not implemented stub
func (a GatewayUnsupported) HealObject(ctx context.Context, bucket, object string, dryRun bool) (h madmin.HealResultItem, e error) { func (a GatewayUnsupported) HealObject(ctx context.Context, bucket, object string, dryRun bool) (h madmin.HealResultItem, e error) {
return h, errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return h, NotImplemented{}
} }
// ListObjectsV2 - Not implemented stub // ListObjectsV2 - Not implemented stub
func (a GatewayUnsupported) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { func (a GatewayUnsupported) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
return result, errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return result, NotImplemented{}
} }
// ListObjectsHeal - Not implemented stub // ListObjectsHeal - Not implemented stub
func (a GatewayUnsupported) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { func (a GatewayUnsupported) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
return loi, errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return loi, NotImplemented{}
} }
// CopyObject copies a blob from source container to destination container. // CopyObject copies a blob from source container to destination container.
func (a GatewayUnsupported) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string, func (a GatewayUnsupported) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string,
srcInfo ObjectInfo) (objInfo ObjectInfo, err error) { srcInfo ObjectInfo) (objInfo ObjectInfo, err error) {
return objInfo, errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return objInfo, NotImplemented{}
} }
// Locking operations // Locking operations
// ListLocks lists namespace locks held in object layer // ListLocks lists namespace locks held in object layer
func (a GatewayUnsupported) ListLocks(ctx context.Context, bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) { func (a GatewayUnsupported) ListLocks(ctx context.Context, bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) {
return []VolumeLockInfo{}, errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return []VolumeLockInfo{}, NotImplemented{}
} }
// ClearLocks clears namespace locks held in object layer // ClearLocks clears namespace locks held in object layer
func (a GatewayUnsupported) ClearLocks(ctx context.Context, info []VolumeLockInfo) error { func (a GatewayUnsupported) ClearLocks(ctx context.Context, info []VolumeLockInfo) error {
return errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return NotImplemented{}
} }
// RefreshBucketPolicy refreshes cache policy with what's on disk. // RefreshBucketPolicy refreshes cache policy with what's on disk.
func (a GatewayUnsupported) RefreshBucketPolicy(ctx context.Context, bucket string) error { func (a GatewayUnsupported) RefreshBucketPolicy(ctx context.Context, bucket string) error {
return errors.Trace(NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return NotImplemented{}
} }
// IsNotificationSupported returns whether bucket notification is applicable for this layer. // IsNotificationSupported returns whether bucket notification is applicable for this layer.

View File

@ -35,6 +35,7 @@ import (
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
@ -118,7 +119,7 @@ func azureGatewayMain(ctx *cli.Context) {
// Validate gateway arguments. // Validate gateway arguments.
host := ctx.Args().First() host := ctx.Args().First()
// Validate gateway arguments. // Validate gateway arguments.
minio.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument") logger.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
minio.StartGateway(ctx, &Azure{host}) minio.StartGateway(ctx, &Azure{host})
} }
@ -181,11 +182,12 @@ func (g *Azure) Production() bool {
// copied into BlobProperties. // copied into BlobProperties.
// //
// Header names are canonicalized as in http.Header. // Header names are canonicalized as in http.Header.
func s3MetaToAzureProperties(s3Metadata map[string]string) (storage.BlobMetadata, func s3MetaToAzureProperties(ctx context.Context, s3Metadata map[string]string) (storage.BlobMetadata,
storage.BlobProperties, error) { storage.BlobProperties, error) {
for k := range s3Metadata { for k := range s3Metadata {
if strings.Contains(k, "--") { if strings.Contains(k, "--") {
return storage.BlobMetadata{}, storage.BlobProperties{}, errors.Trace(minio.UnsupportedMetadata{}) logger.LogIf(ctx, minio.UnsupportedMetadata{})
return storage.BlobMetadata{}, storage.BlobProperties{}, minio.UnsupportedMetadata{}
} }
} }
@ -300,15 +302,6 @@ func azureToObjectError(err error, params ...string) error {
return nil return nil
} }
e, ok := err.(*errors.Error)
if !ok {
// Code should be fixed if this function is called without doing errors.Trace()
// Else handling different situations in this function makes this function complicated.
minio.ErrorIf(err, "Expected type *Error")
return err
}
err = e.Cause
bucket := "" bucket := ""
object := "" object := ""
if len(params) >= 1 { if len(params) >= 1 {
@ -322,7 +315,7 @@ func azureToObjectError(err error, params ...string) error {
if !ok { if !ok {
// We don't interpret non Azure errors. As azure errors will // We don't interpret non Azure errors. As azure errors will
// have StatusCode to help to convert to object errors. // have StatusCode to help to convert to object errors.
return e return err
} }
switch azureErr.Code { switch azureErr.Code {
@ -349,8 +342,7 @@ func azureToObjectError(err error, params ...string) error {
err = minio.BucketNameInvalid{Bucket: bucket} err = minio.BucketNameInvalid{Bucket: bucket}
} }
} }
e.Cause = err return err
return e
} }
// mustGetAzureUploadID - returns new upload ID which is hex encoded 8 bytes random value. // mustGetAzureUploadID - returns new upload ID which is hex encoded 8 bytes random value.
@ -371,17 +363,23 @@ func mustGetAzureUploadID() string {
} }
// checkAzureUploadID - returns error in case of given string is upload ID. // checkAzureUploadID - returns error in case of given string is upload ID.
func checkAzureUploadID(uploadID string) (err error) { func checkAzureUploadID(ctx context.Context, uploadID string) (err error) {
if len(uploadID) != 16 { if len(uploadID) != 16 {
return errors.Trace(minio.MalformedUploadID{ logger.LogIf(ctx, minio.MalformedUploadID{
UploadID: uploadID, UploadID: uploadID,
}) })
return minio.MalformedUploadID{
UploadID: uploadID,
}
} }
if _, err = hex.DecodeString(uploadID); err != nil { if _, err = hex.DecodeString(uploadID); err != nil {
return errors.Trace(minio.MalformedUploadID{ logger.LogIf(ctx, minio.MalformedUploadID{
UploadID: uploadID, UploadID: uploadID,
}) })
return minio.MalformedUploadID{
UploadID: uploadID,
}
} }
return nil return nil
@ -438,7 +436,8 @@ func (a *azureObjects) MakeBucketWithLocation(ctx context.Context, bucket, locat
err := container.Create(&storage.CreateContainerOptions{ err := container.Create(&storage.CreateContainerOptions{
Access: storage.ContainerAccessTypePrivate, Access: storage.ContainerAccessTypePrivate,
}) })
return azureToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return azureToObjectError(err, bucket)
} }
// GetBucketInfo - Get bucket metadata.. // GetBucketInfo - Get bucket metadata..
@ -448,7 +447,8 @@ func (a *azureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi min
// in azure documentation, so we will simply use the same function here. // in azure documentation, so we will simply use the same function here.
// Ref - https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata // Ref - https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata
if !minio.IsValidBucketName(bucket) { if !minio.IsValidBucketName(bucket) {
return bi, errors.Trace(minio.BucketNameInvalid{Bucket: bucket}) logger.LogIf(ctx, minio.BucketNameInvalid{Bucket: bucket})
return bi, minio.BucketNameInvalid{Bucket: bucket}
} }
// Azure does not have an equivalent call, hence use // Azure does not have an equivalent call, hence use
@ -457,7 +457,8 @@ func (a *azureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi min
Prefix: bucket, Prefix: bucket,
}) })
if err != nil { if err != nil {
return bi, azureToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return bi, azureToObjectError(err, bucket)
} }
for _, container := range resp.Containers { for _, container := range resp.Containers {
if container.Name == bucket { if container.Name == bucket {
@ -470,19 +471,22 @@ func (a *azureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi min
} // else continue } // else continue
} }
} }
return bi, errors.Trace(minio.BucketNotFound{Bucket: bucket}) logger.LogIf(ctx, minio.BucketNotFound{Bucket: bucket})
return bi, minio.BucketNotFound{Bucket: bucket}
} }
// ListBuckets - Lists all azure containers, uses Azure equivalent ListContainers. // ListBuckets - Lists all azure containers, uses Azure equivalent ListContainers.
func (a *azureObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) { func (a *azureObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) {
resp, err := a.client.ListContainers(storage.ListContainersParameters{}) resp, err := a.client.ListContainers(storage.ListContainersParameters{})
if err != nil { if err != nil {
return nil, azureToObjectError(errors.Trace(err)) logger.LogIf(ctx, err)
return nil, azureToObjectError(err)
} }
for _, container := range resp.Containers { for _, container := range resp.Containers {
t, e := time.Parse(time.RFC1123, container.Properties.LastModified) t, e := time.Parse(time.RFC1123, container.Properties.LastModified)
if e != nil { if e != nil {
return nil, errors.Trace(e) logger.LogIf(ctx, e)
return nil, e
} }
buckets = append(buckets, minio.BucketInfo{ buckets = append(buckets, minio.BucketInfo{
Name: container.Name, Name: container.Name,
@ -495,7 +499,9 @@ func (a *azureObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketI
// DeleteBucket - delete a container on azure, uses Azure equivalent DeleteContainer. // DeleteBucket - delete a container on azure, uses Azure equivalent DeleteContainer.
func (a *azureObjects) DeleteBucket(ctx context.Context, bucket string) error { func (a *azureObjects) DeleteBucket(ctx context.Context, bucket string) error {
container := a.client.GetContainerReference(bucket) container := a.client.GetContainerReference(bucket)
return azureToObjectError(errors.Trace(container.Delete(nil)), bucket) err := container.Delete(nil)
logger.LogIf(ctx, err)
return azureToObjectError(err, bucket)
} }
// ListObjects - lists all blobs on azure with in a container filtered by prefix // ListObjects - lists all blobs on azure with in a container filtered by prefix
@ -512,7 +518,8 @@ func (a *azureObjects) ListObjects(ctx context.Context, bucket, prefix, marker,
MaxResults: uint(maxKeys), MaxResults: uint(maxKeys),
}) })
if err != nil { if err != nil {
return result, azureToObjectError(errors.Trace(err), bucket, prefix) logger.LogIf(ctx, err)
return result, azureToObjectError(err, bucket, prefix)
} }
for _, object := range resp.Blobs { for _, object := range resp.Blobs {
@ -580,7 +587,8 @@ func (a *azureObjects) ListObjectsV2(ctx context.Context, bucket, prefix, contin
func (a *azureObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error { func (a *azureObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
// startOffset cannot be negative. // startOffset cannot be negative.
if startOffset < 0 { if startOffset < 0 {
return azureToObjectError(errors.Trace(minio.InvalidRange{}), bucket, object) logger.LogIf(ctx, minio.InvalidRange{})
return azureToObjectError(minio.InvalidRange{}, bucket, object)
} }
blobRange := &storage.BlobRange{Start: uint64(startOffset)} blobRange := &storage.BlobRange{Start: uint64(startOffset)}
@ -599,11 +607,13 @@ func (a *azureObjects) GetObject(ctx context.Context, bucket, object string, sta
}) })
} }
if err != nil { if err != nil {
return azureToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return azureToObjectError(err, bucket, object)
} }
_, err = io.Copy(writer, rc) _, err = io.Copy(writer, rc)
rc.Close() rc.Close()
return errors.Trace(err) logger.LogIf(ctx, err)
return err
} }
// GetObjectInfo - reads blob metadata properties and replies back minio.ObjectInfo, // GetObjectInfo - reads blob metadata properties and replies back minio.ObjectInfo,
@ -612,7 +622,8 @@ func (a *azureObjects) GetObjectInfo(ctx context.Context, bucket, object string)
blob := a.client.GetContainerReference(bucket).GetBlobReference(object) blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
err = blob.GetProperties(nil) err = blob.GetProperties(nil)
if err != nil { if err != nil {
return objInfo, azureToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, object)
} }
return minio.ObjectInfo{ return minio.ObjectInfo{
@ -631,13 +642,14 @@ func (a *azureObjects) GetObjectInfo(ctx context.Context, bucket, object string)
// uses Azure equivalent CreateBlockBlobFromReader. // uses Azure equivalent CreateBlockBlobFromReader.
func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) { func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
blob := a.client.GetContainerReference(bucket).GetBlobReference(object) blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
blob.Metadata, blob.Properties, err = s3MetaToAzureProperties(metadata) blob.Metadata, blob.Properties, err = s3MetaToAzureProperties(ctx, metadata)
if err != nil { if err != nil {
return objInfo, azureToObjectError(err, bucket, object) return objInfo, azureToObjectError(err, bucket, object)
} }
err = blob.CreateBlockBlobFromReader(data, nil) err = blob.CreateBlockBlobFromReader(data, nil)
if err != nil { if err != nil {
return objInfo, azureToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, object)
} }
return a.GetObjectInfo(ctx, bucket, object) return a.GetObjectInfo(ctx, bucket, object)
} }
@ -647,19 +659,21 @@ func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, dat
func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo minio.ObjectInfo) (objInfo minio.ObjectInfo, err error) { func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo minio.ObjectInfo) (objInfo minio.ObjectInfo, err error) {
srcBlobURL := a.client.GetContainerReference(srcBucket).GetBlobReference(srcObject).GetURL() srcBlobURL := a.client.GetContainerReference(srcBucket).GetBlobReference(srcObject).GetURL()
destBlob := a.client.GetContainerReference(destBucket).GetBlobReference(destObject) destBlob := a.client.GetContainerReference(destBucket).GetBlobReference(destObject)
azureMeta, props, err := s3MetaToAzureProperties(srcInfo.UserDefined) azureMeta, props, err := s3MetaToAzureProperties(ctx, srcInfo.UserDefined)
if err != nil { if err != nil {
return objInfo, azureToObjectError(err, srcBucket, srcObject) return objInfo, azureToObjectError(err, srcBucket, srcObject)
} }
destBlob.Metadata = azureMeta destBlob.Metadata = azureMeta
err = destBlob.Copy(srcBlobURL, nil) err = destBlob.Copy(srcBlobURL, nil)
if err != nil { if err != nil {
return objInfo, azureToObjectError(errors.Trace(err), srcBucket, srcObject) logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, srcBucket, srcObject)
} }
destBlob.Properties = props destBlob.Properties = props
err = destBlob.SetProperties(nil) err = destBlob.SetProperties(nil)
if err != nil { if err != nil {
return objInfo, azureToObjectError(errors.Trace(err), srcBucket, srcObject) logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, srcBucket, srcObject)
} }
return a.GetObjectInfo(ctx, destBucket, destObject) return a.GetObjectInfo(ctx, destBucket, destObject)
} }
@ -670,7 +684,7 @@ func (a *azureObjects) DeleteObject(ctx context.Context, bucket, object string)
blob := a.client.GetContainerReference(bucket).GetBlobReference(object) blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
err := blob.Delete(nil) err := blob.Delete(nil)
if err != nil { if err != nil {
return azureToObjectError(errors.Trace(err), bucket, object) return azureToObjectError(err, bucket, object)
} }
return nil return nil
} }
@ -690,19 +704,21 @@ func getAzureMetadataObjectName(objectName, uploadID string) string {
return fmt.Sprintf(metadataObjectNameTemplate, uploadID, sha256.Sum256([]byte(objectName))) return fmt.Sprintf(metadataObjectNameTemplate, uploadID, sha256.Sum256([]byte(objectName)))
} }
func (a *azureObjects) checkUploadIDExists(bucketName, objectName, uploadID string) (err error) { func (a *azureObjects) checkUploadIDExists(ctx context.Context, bucketName, objectName, uploadID string) (err error) {
blob := a.client.GetContainerReference(bucketName).GetBlobReference( blob := a.client.GetContainerReference(bucketName).GetBlobReference(
getAzureMetadataObjectName(objectName, uploadID)) getAzureMetadataObjectName(objectName, uploadID))
err = blob.GetMetadata(nil) err = blob.GetMetadata(nil)
err = azureToObjectError(errors.Trace(err), bucketName, objectName) logger.LogIf(ctx, err)
err = azureToObjectError(err, bucketName, objectName)
oerr := minio.ObjectNotFound{ oerr := minio.ObjectNotFound{
Bucket: bucketName, Bucket: bucketName,
Object: objectName, Object: objectName,
} }
if errors.Cause(err) == oerr { if errors.Cause(err) == oerr {
err = errors.Trace(minio.InvalidUploadID{ logger.LogIf(ctx, minio.InvalidUploadID{UploadID: uploadID})
err = minio.InvalidUploadID{
UploadID: uploadID, UploadID: uploadID,
}) }
} }
return err return err
} }
@ -714,13 +730,15 @@ func (a *azureObjects) NewMultipartUpload(ctx context.Context, bucket, object st
var jsonData []byte var jsonData []byte
if jsonData, err = json.Marshal(azureMultipartMetadata{Name: object, Metadata: metadata}); err != nil { if jsonData, err = json.Marshal(azureMultipartMetadata{Name: object, Metadata: metadata}); err != nil {
return "", errors.Trace(err) logger.LogIf(ctx, err)
return "", err
} }
blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject) blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject)
err = blob.CreateBlockBlobFromReader(bytes.NewBuffer(jsonData), nil) err = blob.CreateBlockBlobFromReader(bytes.NewBuffer(jsonData), nil)
if err != nil { if err != nil {
return "", azureToObjectError(errors.Trace(err), bucket, metadataObject) logger.LogIf(ctx, err)
return "", azureToObjectError(err, bucket, metadataObject)
} }
return uploadID, nil return uploadID, nil
@ -728,11 +746,11 @@ func (a *azureObjects) NewMultipartUpload(ctx context.Context, bucket, object st
// PutObjectPart - Use Azure equivalent PutBlockWithLength. // PutObjectPart - Use Azure equivalent PutBlockWithLength.
func (a *azureObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info minio.PartInfo, err error) { func (a *azureObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info minio.PartInfo, err error) {
if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil { if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
return info, err return info, err
} }
if err = checkAzureUploadID(uploadID); err != nil { if err = checkAzureUploadID(ctx, uploadID); err != nil {
return info, err return info, err
} }
@ -756,7 +774,8 @@ func (a *azureObjects) PutObjectPart(ctx context.Context, bucket, object, upload
blob := a.client.GetContainerReference(bucket).GetBlobReference(object) blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
err = blob.PutBlockWithLength(id, uint64(subPartSize), io.LimitReader(data, subPartSize), nil) err = blob.PutBlockWithLength(id, uint64(subPartSize), io.LimitReader(data, subPartSize), nil)
if err != nil { if err != nil {
return info, azureToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return info, azureToObjectError(err, bucket, object)
} }
subPartNumber++ subPartNumber++
} }
@ -770,7 +789,7 @@ func (a *azureObjects) PutObjectPart(ctx context.Context, bucket, object, upload
// ListObjectParts - Use Azure equivalent GetBlockList. // ListObjectParts - Use Azure equivalent GetBlockList.
func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result minio.ListPartsInfo, err error) { func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result minio.ListPartsInfo, err error) {
if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil { if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
return result, err return result, err
} }
@ -787,7 +806,8 @@ func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uplo
return result, nil return result, nil
} }
if err != nil { if err != nil {
return result, azureToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return result, azureToObjectError(err, bucket, object)
} }
// Build a sorted list of parts and return the requested entries. // Build a sorted list of parts and return the requested entries.
partsMap := make(map[int]minio.PartInfo) partsMap := make(map[int]minio.PartInfo)
@ -796,7 +816,8 @@ func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uplo
var parsedUploadID string var parsedUploadID string
var md5Hex string var md5Hex string
if partNumber, _, parsedUploadID, md5Hex, err = azureParseBlockID(block.Name); err != nil { if partNumber, _, parsedUploadID, md5Hex, err = azureParseBlockID(block.Name); err != nil {
return result, azureToObjectError(errors.Trace(fmt.Errorf("Unexpected error")), bucket, object) logger.LogIf(ctx, fmt.Errorf("Unexpected error"))
return result, azureToObjectError(fmt.Errorf("Unexpected error"), bucket, object)
} }
if parsedUploadID != uploadID { if parsedUploadID != uploadID {
continue continue
@ -813,7 +834,8 @@ func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uplo
if part.ETag != md5Hex { if part.ETag != md5Hex {
// If two parts of same partNumber were uploaded with different contents // If two parts of same partNumber were uploaded with different contents
// return error as we won't be able to decide which the latest part is. // return error as we won't be able to decide which the latest part is.
return result, azureToObjectError(errors.Trace(fmt.Errorf("Unexpected error")), bucket, object) logger.LogIf(ctx, fmt.Errorf("Unexpected error"))
return result, azureToObjectError(fmt.Errorf("Unexpected error"), bucket, object)
} }
part.Size += block.Size part.Size += block.Size
partsMap[partNumber] = part partsMap[partNumber] = part
@ -856,7 +878,7 @@ func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uplo
// There is no corresponding API in azure to abort an incomplete upload. The uncommmitted blocks // There is no corresponding API in azure to abort an incomplete upload. The uncommmitted blocks
// gets deleted after one week. // gets deleted after one week.
func (a *azureObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) (err error) { func (a *azureObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) (err error) {
if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil { if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
return err return err
} }
@ -868,23 +890,25 @@ func (a *azureObjects) AbortMultipartUpload(ctx context.Context, bucket, object,
// CompleteMultipartUpload - Use Azure equivalent PutBlockList. // CompleteMultipartUpload - Use Azure equivalent PutBlockList.
func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []minio.CompletePart) (objInfo minio.ObjectInfo, err error) { func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []minio.CompletePart) (objInfo minio.ObjectInfo, err error) {
metadataObject := getAzureMetadataObjectName(object, uploadID) metadataObject := getAzureMetadataObjectName(object, uploadID)
if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil { if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
return objInfo, err return objInfo, err
} }
if err = checkAzureUploadID(uploadID); err != nil { if err = checkAzureUploadID(ctx, uploadID); err != nil {
return objInfo, err return objInfo, err
} }
var metadataReader io.Reader var metadataReader io.Reader
blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject) blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject)
if metadataReader, err = blob.Get(nil); err != nil { if metadataReader, err = blob.Get(nil); err != nil {
return objInfo, azureToObjectError(errors.Trace(err), bucket, metadataObject) logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, metadataObject)
} }
var metadata azureMultipartMetadata var metadata azureMultipartMetadata
if err = json.NewDecoder(metadataReader).Decode(&metadata); err != nil { if err = json.NewDecoder(metadataReader).Decode(&metadata); err != nil {
return objInfo, azureToObjectError(errors.Trace(err), bucket, metadataObject) logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, metadataObject)
} }
defer func() { defer func() {
@ -894,13 +918,15 @@ func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject) blob := a.client.GetContainerReference(bucket).GetBlobReference(metadataObject)
derr := blob.Delete(nil) derr := blob.Delete(nil)
minio.ErrorIf(derr, "unable to remove meta data object for upload ID %s", uploadID) logger.GetReqInfo(ctx).AppendTags("uploadID", uploadID)
logger.LogIf(ctx, derr)
}() }()
objBlob := a.client.GetContainerReference(bucket).GetBlobReference(object) objBlob := a.client.GetContainerReference(bucket).GetBlobReference(object)
resp, err := objBlob.GetBlockList(storage.BlockListTypeUncommitted, nil) resp, err := objBlob.GetBlockList(storage.BlockListTypeUncommitted, nil)
if err != nil { if err != nil {
return objInfo, azureToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, object)
} }
getBlocks := func(partNumber int, etag string) (blocks []storage.Block, size int64, err error) { getBlocks := func(partNumber int, etag string) (blocks []storage.Block, size int64, err error) {
@ -936,7 +962,8 @@ func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
var size int64 var size int64
blocks, size, err = getBlocks(part.PartNumber, part.ETag) blocks, size, err = getBlocks(part.PartNumber, part.ETag)
if err != nil { if err != nil {
return objInfo, errors.Trace(err) logger.LogIf(ctx, err)
return objInfo, err
} }
allBlocks = append(allBlocks, blocks...) allBlocks = append(allBlocks, blocks...)
@ -946,30 +973,39 @@ func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
// Error out if parts except last part sizing < 5MiB. // Error out if parts except last part sizing < 5MiB.
for i, size := range partSizes[:len(partSizes)-1] { for i, size := range partSizes[:len(partSizes)-1] {
if size < azureS3MinPartSize { if size < azureS3MinPartSize {
return objInfo, errors.Trace(minio.PartTooSmall{ logger.LogIf(ctx, minio.PartTooSmall{
PartNumber: uploadedParts[i].PartNumber, PartNumber: uploadedParts[i].PartNumber,
PartSize: size, PartSize: size,
PartETag: uploadedParts[i].ETag, PartETag: uploadedParts[i].ETag,
}) })
return objInfo, minio.PartTooSmall{
PartNumber: uploadedParts[i].PartNumber,
PartSize: size,
PartETag: uploadedParts[i].ETag,
}
} }
} }
err = objBlob.PutBlockList(allBlocks, nil) err = objBlob.PutBlockList(allBlocks, nil)
if err != nil { if err != nil {
return objInfo, azureToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, object)
} }
if len(metadata.Metadata) > 0 { if len(metadata.Metadata) > 0 {
objBlob.Metadata, objBlob.Properties, err = s3MetaToAzureProperties(metadata.Metadata) objBlob.Metadata, objBlob.Properties, err = s3MetaToAzureProperties(ctx, metadata.Metadata)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, object) return objInfo, azureToObjectError(err, bucket, object)
} }
err = objBlob.SetProperties(nil) err = objBlob.SetProperties(nil)
if err != nil { if err != nil {
return objInfo, azureToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, object)
} }
err = objBlob.SetMetadata(nil) err = objBlob.SetMetadata(nil)
if err != nil { if err != nil {
return objInfo, azureToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return objInfo, azureToObjectError(err, bucket, object)
} }
} }
return a.GetObjectInfo(ctx, bucket, object) return a.GetObjectInfo(ctx, bucket, object)
@ -992,13 +1028,16 @@ func (a *azureObjects) SetBucketPolicy(ctx context.Context, bucket string, polic
} }
prefix := bucket + "/*" // For all objects inside the bucket. prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 { if len(policies) != 1 {
return errors.Trace(minio.NotImplemented{}) logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
} }
if policies[0].Prefix != prefix { if policies[0].Prefix != prefix {
return errors.Trace(minio.NotImplemented{}) logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
} }
if policies[0].Policy != policy.BucketPolicyReadOnly { if policies[0].Policy != policy.BucketPolicyReadOnly {
return errors.Trace(minio.NotImplemented{}) logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
} }
perm := storage.ContainerPermissions{ perm := storage.ContainerPermissions{
AccessType: storage.ContainerAccessTypeContainer, AccessType: storage.ContainerAccessTypeContainer,
@ -1006,7 +1045,8 @@ func (a *azureObjects) SetBucketPolicy(ctx context.Context, bucket string, polic
} }
container := a.client.GetContainerReference(bucket) container := a.client.GetContainerReference(bucket)
err := container.SetPermissions(perm, nil) err := container.SetPermissions(perm, nil)
return azureToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return azureToObjectError(err, bucket)
} }
// GetBucketPolicy - Get the container ACL and convert it to canonical []bucketAccessPolicy // GetBucketPolicy - Get the container ACL and convert it to canonical []bucketAccessPolicy
@ -1015,15 +1055,18 @@ func (a *azureObjects) GetBucketPolicy(ctx context.Context, bucket string) (poli
container := a.client.GetContainerReference(bucket) container := a.client.GetContainerReference(bucket)
perm, err := container.GetPermissions(nil) perm, err := container.GetPermissions(nil)
if err != nil { if err != nil {
return policy.BucketAccessPolicy{}, azureToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return policy.BucketAccessPolicy{}, azureToObjectError(err, bucket)
} }
switch perm.AccessType { switch perm.AccessType {
case storage.ContainerAccessTypePrivate: case storage.ContainerAccessTypePrivate:
return policy.BucketAccessPolicy{}, errors.Trace(minio.PolicyNotFound{Bucket: bucket}) logger.LogIf(ctx, minio.PolicyNotFound{Bucket: bucket})
return policy.BucketAccessPolicy{}, minio.PolicyNotFound{Bucket: bucket}
case storage.ContainerAccessTypeContainer: case storage.ContainerAccessTypeContainer:
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "") policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "")
default: default:
return policy.BucketAccessPolicy{}, azureToObjectError(errors.Trace(minio.NotImplemented{})) logger.LogIf(ctx, minio.NotImplemented{})
return policy.BucketAccessPolicy{}, azureToObjectError(minio.NotImplemented{})
} }
return policyInfo, nil return policyInfo, nil
} }
@ -1036,5 +1079,6 @@ func (a *azureObjects) DeleteBucketPolicy(ctx context.Context, bucket string) er
} }
container := a.client.GetContainerReference(bucket) container := a.client.GetContainerReference(bucket)
err := container.SetPermissions(perm, nil) err := container.SetPermissions(perm, nil)
return azureToObjectError(errors.Trace(err)) logger.LogIf(ctx, err)
return azureToObjectError(err)
} }

View File

@ -17,6 +17,7 @@
package azure package azure
import ( import (
"context"
"fmt" "fmt"
"net/http" "net/http"
"reflect" "reflect"
@ -55,7 +56,7 @@ func TestS3MetaToAzureProperties(t *testing.T) {
"X_Amz_Matdesc": "{}", "X_Amz_Matdesc": "{}",
"X_Amz_Iv": "eWmyryl8kq+EVnnsE7jpOg==", "X_Amz_Iv": "eWmyryl8kq+EVnnsE7jpOg==",
} }
meta, _, err := s3MetaToAzureProperties(headers) meta, _, err := s3MetaToAzureProperties(context.Background(), headers)
if err != nil { if err != nil {
t.Fatalf("Test failed, with %s", err) t.Fatalf("Test failed, with %s", err)
} }
@ -65,7 +66,7 @@ func TestS3MetaToAzureProperties(t *testing.T) {
headers = map[string]string{ headers = map[string]string{
"invalid--meta": "value", "invalid--meta": "value",
} }
_, _, err = s3MetaToAzureProperties(headers) _, _, err = s3MetaToAzureProperties(context.Background(), headers)
if err = errors.Cause(err); err != nil { if err = errors.Cause(err); err != nil {
if _, ok := err.(minio.UnsupportedMetadata); !ok { if _, ok := err.(minio.UnsupportedMetadata); !ok {
t.Fatalf("Test failed with unexpected error %s, expected UnsupportedMetadata", err) t.Fatalf("Test failed with unexpected error %s, expected UnsupportedMetadata", err)
@ -75,7 +76,7 @@ func TestS3MetaToAzureProperties(t *testing.T) {
headers = map[string]string{ headers = map[string]string{
"content-md5": "Dce7bmCX61zvxzP5QmfelQ==", "content-md5": "Dce7bmCX61zvxzP5QmfelQ==",
} }
_, props, err := s3MetaToAzureProperties(headers) _, props, err := s3MetaToAzureProperties(context.Background(), headers)
if err != nil { if err != nil {
t.Fatalf("Test failed, with %s", err) t.Fatalf("Test failed, with %s", err)
} }
@ -137,53 +138,46 @@ func TestAzureToObjectError(t *testing.T) {
nil, nil, "", "", nil, nil, "", "",
}, },
{ {
errors.Trace(fmt.Errorf("Non azure error")), fmt.Errorf("Non azure error"),
fmt.Errorf("Non azure error"), "", "", fmt.Errorf("Non azure error"), "", "",
}, },
{ {
storage.AzureStorageServiceError{ storage.AzureStorageServiceError{
Code: "ContainerAlreadyExists", Code: "ContainerAlreadyExists",
}, storage.AzureStorageServiceError{ }, minio.BucketExists{Bucket: "bucket"}, "bucket", "",
Code: "ContainerAlreadyExists",
}, "bucket", "",
}, },
{ {
errors.Trace(storage.AzureStorageServiceError{ storage.AzureStorageServiceError{
Code: "ContainerAlreadyExists",
}), minio.BucketExists{Bucket: "bucket"}, "bucket", "",
},
{
errors.Trace(storage.AzureStorageServiceError{
Code: "InvalidResourceName", Code: "InvalidResourceName",
}), minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "", }, minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
}, },
{ {
errors.Trace(storage.AzureStorageServiceError{ storage.AzureStorageServiceError{
Code: "RequestBodyTooLarge", Code: "RequestBodyTooLarge",
}), minio.PartTooBig{}, "", "", }, minio.PartTooBig{}, "", "",
}, },
{ {
errors.Trace(storage.AzureStorageServiceError{ storage.AzureStorageServiceError{
Code: "InvalidMetadata", Code: "InvalidMetadata",
}), minio.UnsupportedMetadata{}, "", "", }, minio.UnsupportedMetadata{}, "", "",
}, },
{ {
errors.Trace(storage.AzureStorageServiceError{ storage.AzureStorageServiceError{
StatusCode: http.StatusNotFound, StatusCode: http.StatusNotFound,
}), minio.ObjectNotFound{ }, minio.ObjectNotFound{
Bucket: "bucket", Bucket: "bucket",
Object: "object", Object: "object",
}, "bucket", "object", }, "bucket", "object",
}, },
{ {
errors.Trace(storage.AzureStorageServiceError{ storage.AzureStorageServiceError{
StatusCode: http.StatusNotFound, StatusCode: http.StatusNotFound,
}), minio.BucketNotFound{Bucket: "bucket"}, "bucket", "", }, minio.BucketNotFound{Bucket: "bucket"}, "bucket", "",
}, },
{ {
errors.Trace(storage.AzureStorageServiceError{ storage.AzureStorageServiceError{
StatusCode: http.StatusBadRequest, StatusCode: http.StatusBadRequest,
}), minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "", }, minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
}, },
} }
for i, testCase := range testCases { for i, testCase := range testCases {
@ -307,7 +301,7 @@ func TestCheckAzureUploadID(t *testing.T) {
} }
for _, uploadID := range invalidUploadIDs { for _, uploadID := range invalidUploadIDs {
if err := checkAzureUploadID(uploadID); err == nil { if err := checkAzureUploadID(context.Background(), uploadID); err == nil {
t.Fatalf("%s: expected: <error>, got: <nil>", uploadID) t.Fatalf("%s: expected: <error>, got: <nil>", uploadID)
} }
} }
@ -318,7 +312,7 @@ func TestCheckAzureUploadID(t *testing.T) {
} }
for _, uploadID := range validUploadIDs { for _, uploadID := range validUploadIDs {
if err := checkAzureUploadID(uploadID); err != nil { if err := checkAzureUploadID(context.Background(), uploadID); err != nil {
t.Fatalf("%s: expected: <nil>, got: %s", uploadID, err) t.Fatalf("%s: expected: <nil>, got: %s", uploadID, err)
} }
} }

View File

@ -31,8 +31,8 @@ import (
b2 "github.com/minio/blazer/base" b2 "github.com/minio/blazer/base"
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
h2 "github.com/minio/minio/pkg/hash" h2 "github.com/minio/minio/pkg/hash"
minio "github.com/minio/minio/cmd" minio "github.com/minio/minio/cmd"
@ -146,16 +146,6 @@ func b2ToObjectError(err error, params ...string) error {
if err == nil { if err == nil {
return nil return nil
} }
e, ok := err.(*errors.Error)
if !ok {
// Code should be fixed if this function is called without doing errors.Trace()
// Else handling different situations in this function makes this function complicated.
minio.ErrorIf(err, "Expected type *Error")
return err
}
err = e.Cause
bucket := "" bucket := ""
object := "" object := ""
uploadID := "" uploadID := ""
@ -177,7 +167,7 @@ func b2ToObjectError(err error, params ...string) error {
if statusCode == 0 { if statusCode == 0 {
// We don't interpret non B2 errors. B2 errors have statusCode // We don't interpret non B2 errors. B2 errors have statusCode
// to help us convert them to S3 object errors. // to help us convert them to S3 object errors.
return e return err
} }
switch code { switch code {
@ -208,8 +198,7 @@ func b2ToObjectError(err error, params ...string) error {
err = minio.InvalidUploadID{UploadID: uploadID} err = minio.InvalidUploadID{UploadID: uploadID}
} }
e.Cause = err return err
return e
} }
// Shutdown saves any gateway metadata to disk // Shutdown saves any gateway metadata to disk
@ -230,7 +219,8 @@ func (l *b2Objects) MakeBucketWithLocation(ctx context.Context, bucket, location
// All buckets are set to private by default. // All buckets are set to private by default.
_, err := l.b2Client.CreateBucket(l.ctx, bucket, bucketTypePrivate, nil, nil) _, err := l.b2Client.CreateBucket(l.ctx, bucket, bucketTypePrivate, nil, nil)
return b2ToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket)
} }
func (l *b2Objects) reAuthorizeAccount(ctx context.Context) error { func (l *b2Objects) reAuthorizeAccount(ctx context.Context) error {
@ -271,14 +261,16 @@ func (l *b2Objects) listBuckets(ctx context.Context, err error) ([]*b2.Bucket, e
func (l *b2Objects) Bucket(ctx context.Context, bucket string) (*b2.Bucket, error) { func (l *b2Objects) Bucket(ctx context.Context, bucket string) (*b2.Bucket, error) {
bktList, err := l.listBuckets(ctx, nil) bktList, err := l.listBuckets(ctx, nil)
if err != nil { if err != nil {
return nil, b2ToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return nil, b2ToObjectError(err, bucket)
} }
for _, bkt := range bktList { for _, bkt := range bktList {
if bkt.Name == bucket { if bkt.Name == bucket {
return bkt, nil return bkt, nil
} }
} }
return nil, errors.Trace(minio.BucketNotFound{Bucket: bucket}) logger.LogIf(ctx, minio.BucketNotFound{Bucket: bucket})
return nil, minio.BucketNotFound{Bucket: bucket}
} }
// GetBucketInfo gets bucket metadata.. // GetBucketInfo gets bucket metadata..
@ -315,7 +307,8 @@ func (l *b2Objects) DeleteBucket(ctx context.Context, bucket string) error {
return err return err
} }
err = bkt.DeleteBucket(l.ctx) err = bkt.DeleteBucket(l.ctx)
return b2ToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket)
} }
// ListObjects lists all objects in B2 bucket filtered by prefix, returns upto at max 1000 entries at a time. // ListObjects lists all objects in B2 bucket filtered by prefix, returns upto at max 1000 entries at a time.
@ -326,7 +319,8 @@ func (l *b2Objects) ListObjects(ctx context.Context, bucket string, prefix strin
} }
files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter) files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter)
if lerr != nil { if lerr != nil {
return loi, b2ToObjectError(errors.Trace(lerr), bucket) logger.LogIf(ctx, lerr)
return loi, b2ToObjectError(lerr, bucket)
} }
loi.IsTruncated = next != "" loi.IsTruncated = next != ""
loi.NextMarker = next loi.NextMarker = next
@ -359,7 +353,8 @@ func (l *b2Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuat
} }
files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, continuationToken, prefix, delimiter) files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, continuationToken, prefix, delimiter)
if lerr != nil { if lerr != nil {
return loi, b2ToObjectError(errors.Trace(lerr), bucket) logger.LogIf(ctx, lerr)
return loi, b2ToObjectError(lerr, bucket)
} }
loi.IsTruncated = next != "" loi.IsTruncated = next != ""
loi.ContinuationToken = continuationToken loi.ContinuationToken = continuationToken
@ -396,11 +391,13 @@ func (l *b2Objects) GetObject(ctx context.Context, bucket string, object string,
} }
reader, err := bkt.DownloadFileByName(l.ctx, object, startOffset, length) reader, err := bkt.DownloadFileByName(l.ctx, object, startOffset, length)
if err != nil { if err != nil {
return b2ToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket, object)
} }
defer reader.Close() defer reader.Close()
_, err = io.Copy(writer, reader) _, err = io.Copy(writer, reader)
return b2ToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket, object)
} }
// GetObjectInfo reads object info and replies back ObjectInfo // GetObjectInfo reads object info and replies back ObjectInfo
@ -411,12 +408,14 @@ func (l *b2Objects) GetObjectInfo(ctx context.Context, bucket string, object str
} }
f, err := bkt.DownloadFileByName(l.ctx, object, 0, 1) f, err := bkt.DownloadFileByName(l.ctx, object, 0, 1)
if err != nil { if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return objInfo, b2ToObjectError(err, bucket, object)
} }
f.Close() f.Close()
fi, err := bkt.File(f.ID, object).GetFileInfo(l.ctx) fi, err := bkt.File(f.ID, object).GetFileInfo(l.ctx)
if err != nil { if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return objInfo, b2ToObjectError(err, bucket, object)
} }
return minio.ObjectInfo{ return minio.ObjectInfo{
Bucket: bucket, Bucket: bucket,
@ -504,20 +503,23 @@ func (l *b2Objects) PutObject(ctx context.Context, bucket string, object string,
var u *b2.URL var u *b2.URL
u, err = bkt.GetUploadURL(l.ctx) u, err = bkt.GetUploadURL(l.ctx)
if err != nil { if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return objInfo, b2ToObjectError(err, bucket, object)
} }
hr := newB2Reader(data, data.Size()) hr := newB2Reader(data, data.Size())
var f *b2.File var f *b2.File
f, err = u.UploadFile(l.ctx, hr, int(hr.Size()), object, contentType, sha1AtEOF, metadata) f, err = u.UploadFile(l.ctx, hr, int(hr.Size()), object, contentType, sha1AtEOF, metadata)
if err != nil { if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return objInfo, b2ToObjectError(err, bucket, object)
} }
var fi *b2.FileInfo var fi *b2.FileInfo
fi, err = f.GetFileInfo(l.ctx) fi, err = f.GetFileInfo(l.ctx)
if err != nil { if err != nil {
return objInfo, b2ToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return objInfo, b2ToObjectError(err, bucket, object)
} }
return minio.ObjectInfo{ return minio.ObjectInfo{
@ -539,12 +541,14 @@ func (l *b2Objects) DeleteObject(ctx context.Context, bucket string, object stri
} }
reader, err := bkt.DownloadFileByName(l.ctx, object, 0, 1) reader, err := bkt.DownloadFileByName(l.ctx, object, 0, 1)
if err != nil { if err != nil {
return b2ToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket, object)
} }
io.Copy(ioutil.Discard, reader) io.Copy(ioutil.Discard, reader)
reader.Close() reader.Close()
err = bkt.File(reader.ID, object).DeleteFileVersion(l.ctx) err = bkt.File(reader.ID, object).DeleteFileVersion(l.ctx)
return b2ToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket, object)
} }
// ListMultipartUploads lists all multipart uploads. // ListMultipartUploads lists all multipart uploads.
@ -563,7 +567,8 @@ func (l *b2Objects) ListMultipartUploads(ctx context.Context, bucket string, pre
} }
largeFiles, nextMarker, err := bkt.ListUnfinishedLargeFiles(l.ctx, uploadIDMarker, maxUploads) largeFiles, nextMarker, err := bkt.ListUnfinishedLargeFiles(l.ctx, uploadIDMarker, maxUploads)
if err != nil { if err != nil {
return lmi, b2ToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return lmi, b2ToObjectError(err, bucket)
} }
lmi = minio.ListMultipartsInfo{ lmi = minio.ListMultipartsInfo{
MaxUploads: maxUploads, MaxUploads: maxUploads,
@ -598,7 +603,8 @@ func (l *b2Objects) NewMultipartUpload(ctx context.Context, bucket string, objec
delete(metadata, "content-type") delete(metadata, "content-type")
lf, err := bkt.StartLargeFile(l.ctx, object, contentType, metadata) lf, err := bkt.StartLargeFile(l.ctx, object, contentType, metadata)
if err != nil { if err != nil {
return uploadID, b2ToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return uploadID, b2ToObjectError(err, bucket, object)
} }
return lf.ID, nil return lf.ID, nil
@ -613,13 +619,15 @@ func (l *b2Objects) PutObjectPart(ctx context.Context, bucket string, object str
fc, err := bkt.File(uploadID, object).CompileParts(0, nil).GetUploadPartURL(l.ctx) fc, err := bkt.File(uploadID, object).CompileParts(0, nil).GetUploadPartURL(l.ctx)
if err != nil { if err != nil {
return pi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID) logger.LogIf(ctx, err)
return pi, b2ToObjectError(err, bucket, object, uploadID)
} }
hr := newB2Reader(data, data.Size()) hr := newB2Reader(data, data.Size())
sha1, err := fc.UploadPart(l.ctx, hr, sha1AtEOF, int(hr.Size()), partID) sha1, err := fc.UploadPart(l.ctx, hr, sha1AtEOF, int(hr.Size()), partID)
if err != nil { if err != nil {
return pi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID) logger.LogIf(ctx, err)
return pi, b2ToObjectError(err, bucket, object, uploadID)
} }
return minio.PartInfo{ return minio.PartInfo{
@ -647,7 +655,8 @@ func (l *b2Objects) ListObjectParts(ctx context.Context, bucket string, object s
partNumberMarker++ partNumberMarker++
partsList, next, err := bkt.File(uploadID, object).ListParts(l.ctx, partNumberMarker, maxParts) partsList, next, err := bkt.File(uploadID, object).ListParts(l.ctx, partNumberMarker, maxParts)
if err != nil { if err != nil {
return lpi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID) logger.LogIf(ctx, err)
return lpi, b2ToObjectError(err, bucket, object, uploadID)
} }
if next != 0 { if next != 0 {
lpi.IsTruncated = true lpi.IsTruncated = true
@ -670,7 +679,8 @@ func (l *b2Objects) AbortMultipartUpload(ctx context.Context, bucket string, obj
return err return err
} }
err = bkt.File(uploadID, object).CompileParts(0, nil).CancelLargeFile(l.ctx) err = bkt.File(uploadID, object).CompileParts(0, nil).CancelLargeFile(l.ctx)
return b2ToObjectError(errors.Trace(err), bucket, object, uploadID) logger.LogIf(ctx, err)
return b2ToObjectError(err, bucket, object, uploadID)
} }
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object, uses B2's LargeFile upload API. // CompleteMultipartUpload completes ongoing multipart upload and finalizes object, uses B2's LargeFile upload API.
@ -684,7 +694,8 @@ func (l *b2Objects) CompleteMultipartUpload(ctx context.Context, bucket string,
// B2 requires contigous part numbers starting with 1, they do not support // B2 requires contigous part numbers starting with 1, they do not support
// hand picking part numbers, we return an S3 compatible error instead. // hand picking part numbers, we return an S3 compatible error instead.
if i+1 != uploadedPart.PartNumber { if i+1 != uploadedPart.PartNumber {
return oi, b2ToObjectError(errors.Trace(minio.InvalidPart{}), bucket, object, uploadID) logger.LogIf(ctx, minio.InvalidPart{})
return oi, b2ToObjectError(minio.InvalidPart{}, bucket, object, uploadID)
} }
// Trim "-1" suffix in ETag as PutObjectPart() treats B2 returned SHA1 as ETag. // Trim "-1" suffix in ETag as PutObjectPart() treats B2 returned SHA1 as ETag.
@ -692,7 +703,8 @@ func (l *b2Objects) CompleteMultipartUpload(ctx context.Context, bucket string,
} }
if _, err = bkt.File(uploadID, object).CompileParts(0, hashes).FinishLargeFile(l.ctx); err != nil { if _, err = bkt.File(uploadID, object).CompileParts(0, hashes).FinishLargeFile(l.ctx); err != nil {
return oi, b2ToObjectError(errors.Trace(err), bucket, object, uploadID) logger.LogIf(ctx, err)
return oi, b2ToObjectError(err, bucket, object, uploadID)
} }
return l.GetObjectInfo(ctx, bucket, object) return l.GetObjectInfo(ctx, bucket, object)
@ -713,13 +725,16 @@ func (l *b2Objects) SetBucketPolicy(ctx context.Context, bucket string, policyIn
} }
prefix := bucket + "/*" // For all objects inside the bucket. prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 { if len(policies) != 1 {
return errors.Trace(minio.NotImplemented{}) logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
} }
if policies[0].Prefix != prefix { if policies[0].Prefix != prefix {
return errors.Trace(minio.NotImplemented{}) logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
} }
if policies[0].Policy != policy.BucketPolicyReadOnly { if policies[0].Policy != policy.BucketPolicyReadOnly {
return errors.Trace(minio.NotImplemented{}) logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
} }
bkt, err := l.Bucket(ctx, bucket) bkt, err := l.Bucket(ctx, bucket)
if err != nil { if err != nil {
@ -727,7 +742,8 @@ func (l *b2Objects) SetBucketPolicy(ctx context.Context, bucket string, policyIn
} }
bkt.Type = bucketTypeReadOnly bkt.Type = bucketTypeReadOnly
_, err = bkt.Update(l.ctx) _, err = bkt.Update(l.ctx)
return b2ToObjectError(errors.Trace(err)) logger.LogIf(ctx, err)
return b2ToObjectError(err)
} }
// GetBucketPolicy, returns the current bucketType from B2 backend and convert // GetBucketPolicy, returns the current bucketType from B2 backend and convert
@ -745,7 +761,8 @@ func (l *b2Objects) GetBucketPolicy(ctx context.Context, bucket string) (policy.
// bkt.Type can also be snapshot, but it is only allowed through B2 browser console, // bkt.Type can also be snapshot, but it is only allowed through B2 browser console,
// just return back as policy not found for all cases. // just return back as policy not found for all cases.
// CreateBucket always sets the value to allPrivate by default. // CreateBucket always sets the value to allPrivate by default.
return policy.BucketAccessPolicy{}, errors.Trace(minio.PolicyNotFound{Bucket: bucket}) logger.LogIf(ctx, minio.PolicyNotFound{Bucket: bucket})
return policy.BucketAccessPolicy{}, minio.PolicyNotFound{Bucket: bucket}
} }
// DeleteBucketPolicy - resets the bucketType of bucket on B2 to 'allPrivate'. // DeleteBucketPolicy - resets the bucketType of bucket on B2 to 'allPrivate'.
@ -756,5 +773,6 @@ func (l *b2Objects) DeleteBucketPolicy(ctx context.Context, bucket string) error
} }
bkt.Type = bucketTypePrivate bkt.Type = bucketTypePrivate
_, err = bkt.Update(l.ctx) _, err = bkt.Update(l.ctx)
return b2ToObjectError(errors.Trace(err)) logger.LogIf(ctx, err)
return b2ToObjectError(err)
} }

View File

@ -21,7 +21,6 @@ import (
"testing" "testing"
b2 "github.com/minio/blazer/base" b2 "github.com/minio/blazer/base"
"github.com/minio/minio/pkg/errors"
minio "github.com/minio/minio/cmd" minio "github.com/minio/minio/cmd"
) )
@ -40,70 +39,70 @@ func TestB2ObjectError(t *testing.T) {
[]string{}, fmt.Errorf("Not *Error"), fmt.Errorf("Not *Error"), []string{}, fmt.Errorf("Not *Error"), fmt.Errorf("Not *Error"),
}, },
{ {
[]string{}, errors.Trace(fmt.Errorf("Non B2 Error")), fmt.Errorf("Non B2 Error"), []string{}, fmt.Errorf("Non B2 Error"), fmt.Errorf("Non B2 Error"),
}, },
{ {
[]string{"bucket"}, errors.Trace(b2.Error{ []string{"bucket"}, b2.Error{
StatusCode: 1, StatusCode: 1,
Code: "duplicate_bucket_name", Code: "duplicate_bucket_name",
}), minio.BucketAlreadyOwnedByYou{ }, minio.BucketAlreadyOwnedByYou{
Bucket: "bucket", Bucket: "bucket",
}, },
}, },
{ {
[]string{"bucket"}, errors.Trace(b2.Error{ []string{"bucket"}, b2.Error{
StatusCode: 1, StatusCode: 1,
Code: "bad_request", Code: "bad_request",
}), minio.BucketNotFound{ }, minio.BucketNotFound{
Bucket: "bucket", Bucket: "bucket",
}, },
}, },
{ {
[]string{"bucket", "object"}, errors.Trace(b2.Error{ []string{"bucket", "object"}, b2.Error{
StatusCode: 1, StatusCode: 1,
Code: "bad_request", Code: "bad_request",
}), minio.ObjectNameInvalid{ }, minio.ObjectNameInvalid{
Bucket: "bucket", Bucket: "bucket",
Object: "object", Object: "object",
}, },
}, },
{ {
[]string{"bucket"}, errors.Trace(b2.Error{ []string{"bucket"}, b2.Error{
StatusCode: 1, StatusCode: 1,
Code: "bad_bucket_id", Code: "bad_bucket_id",
}), minio.BucketNotFound{Bucket: "bucket"}, }, minio.BucketNotFound{Bucket: "bucket"},
}, },
{ {
[]string{"bucket", "object"}, errors.Trace(b2.Error{ []string{"bucket", "object"}, b2.Error{
StatusCode: 1, StatusCode: 1,
Code: "file_not_present", Code: "file_not_present",
}), minio.ObjectNotFound{ }, minio.ObjectNotFound{
Bucket: "bucket", Bucket: "bucket",
Object: "object", Object: "object",
}, },
}, },
{ {
[]string{"bucket", "object"}, errors.Trace(b2.Error{ []string{"bucket", "object"}, b2.Error{
StatusCode: 1, StatusCode: 1,
Code: "not_found", Code: "not_found",
}), minio.ObjectNotFound{ }, minio.ObjectNotFound{
Bucket: "bucket", Bucket: "bucket",
Object: "object", Object: "object",
}, },
}, },
{ {
[]string{"bucket"}, errors.Trace(b2.Error{ []string{"bucket"}, b2.Error{
StatusCode: 1, StatusCode: 1,
Code: "cannot_delete_non_empty_bucket", Code: "cannot_delete_non_empty_bucket",
}), minio.BucketNotEmpty{ }, minio.BucketNotEmpty{
Bucket: "bucket", Bucket: "bucket",
}, },
}, },
{ {
[]string{"bucket", "object", "uploadID"}, errors.Trace(b2.Error{ []string{"bucket", "object", "uploadID"}, b2.Error{
StatusCode: 1, StatusCode: 1,
Message: "No active upload for", Message: "No active upload for",
}), minio.InvalidUploadID{ }, minio.InvalidUploadID{
UploadID: "uploadID", UploadID: "uploadID",
}, },
}, },

View File

@ -34,8 +34,8 @@ import (
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
@ -154,11 +154,13 @@ EXAMPLES:
func gcsGatewayMain(ctx *cli.Context) { func gcsGatewayMain(ctx *cli.Context) {
projectID := ctx.Args().First() projectID := ctx.Args().First()
if projectID == "" && os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") == "" { if projectID == "" && os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") == "" {
minio.ErrorIf(errGCSProjectIDNotFound, "project-id should be provided as argument or GOOGLE_APPLICATION_CREDENTIALS should be set with path to credentials.json") logger.LogIf(context.Background(), errGCSProjectIDNotFound)
cli.ShowCommandHelpAndExit(ctx, "gcs", 1) cli.ShowCommandHelpAndExit(ctx, "gcs", 1)
} }
if projectID != "" && !isValidGCSProjectIDFormat(projectID) { if projectID != "" && !isValidGCSProjectIDFormat(projectID) {
minio.ErrorIf(errGCSInvalidProjectID, "Unable to start GCS gateway with %s", ctx.Args().First()) reqInfo := (&logger.ReqInfo{}).AppendTags("projectID", ctx.Args().First())
contxt := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(contxt, errGCSInvalidProjectID)
cli.ShowCommandHelpAndExit(ctx, "gcs", 1) cli.ShowCommandHelpAndExit(ctx, "gcs", 1)
} }
@ -237,16 +239,6 @@ func gcsToObjectError(err error, params ...string) error {
return nil return nil
} }
e, ok := err.(*errors.Error)
if !ok {
// Code should be fixed if this function is called without doing errors.Trace()
// Else handling different situations in this function makes this function complicated.
minio.ErrorIf(err, "Expected type *Error")
return err
}
err = e.Cause
bucket := "" bucket := ""
object := "" object := ""
uploadID := "" uploadID := ""
@ -266,8 +258,7 @@ func gcsToObjectError(err error, params ...string) error {
err = minio.BucketNotFound{ err = minio.BucketNotFound{
Bucket: bucket, Bucket: bucket,
} }
e.Cause = err return err
return e
case "storage: object doesn't exist": case "storage: object doesn't exist":
if uploadID != "" { if uploadID != "" {
err = minio.InvalidUploadID{ err = minio.InvalidUploadID{
@ -279,21 +270,18 @@ func gcsToObjectError(err error, params ...string) error {
Object: object, Object: object,
} }
} }
e.Cause = err return err
return e
} }
googleAPIErr, ok := err.(*googleapi.Error) googleAPIErr, ok := err.(*googleapi.Error)
if !ok { if !ok {
// We don't interpret non Minio errors. As minio errors will // We don't interpret non Minio errors. As minio errors will
// have StatusCode to help to convert to object errors. // have StatusCode to help to convert to object errors.
e.Cause = err return err
return e
} }
if len(googleAPIErr.Errors) == 0 { if len(googleAPIErr.Errors) == 0 {
e.Cause = err return err
return e
} }
reason := googleAPIErr.Errors[0].Reason reason := googleAPIErr.Errors[0].Reason
@ -337,8 +325,7 @@ func gcsToObjectError(err error, params ...string) error {
err = fmt.Errorf("Unsupported error reason: %s", reason) err = fmt.Errorf("Unsupported error reason: %s", reason)
} }
e.Cause = err return err
return e
} }
// gcsProjectIDRegex defines a valid gcs project id format // gcsProjectIDRegex defines a valid gcs project id format
@ -381,7 +368,9 @@ func (l *gcsGateway) CleanupGCSMinioSysTmpBucket(bucket string) {
attrs, err := it.Next() attrs, err := it.Next()
if err != nil { if err != nil {
if err != iterator.Done { if err != iterator.Done {
minio.ErrorIf(err, "Object listing error on bucket %s during purging of old files in minio.sys.tmp", bucket) reqInfo := &logger.ReqInfo{BucketName: bucket}
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
} }
return return
} }
@ -389,7 +378,9 @@ func (l *gcsGateway) CleanupGCSMinioSysTmpBucket(bucket string) {
// Delete files older than 2 weeks. // Delete files older than 2 weeks.
err := l.client.Bucket(bucket).Object(attrs.Name).Delete(l.ctx) err := l.client.Bucket(bucket).Object(attrs.Name).Delete(l.ctx)
if err != nil { if err != nil {
minio.ErrorIf(err, "Unable to delete %s/%s during purging of old files in minio.sys.tmp", bucket, attrs.Name) reqInfo := &logger.ReqInfo{BucketName: bucket, ObjectName: attrs.Name}
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return return
} }
} }
@ -404,7 +395,8 @@ func (l *gcsGateway) CleanupGCSMinioSysTmp() {
attrs, err := it.Next() attrs, err := it.Next()
if err != nil { if err != nil {
if err != iterator.Done { if err != iterator.Done {
minio.ErrorIf(err, "Bucket listing error during purging of old files in minio.sys.tmp") ctx := logger.SetReqInfo(context.Background(), &logger.ReqInfo{})
logger.LogIf(ctx, err)
} }
break break
} }
@ -438,15 +430,16 @@ func (l *gcsGateway) MakeBucketWithLocation(ctx context.Context, bucket, locatio
err := bkt.Create(l.ctx, l.projectID, &storage.BucketAttrs{ err := bkt.Create(l.ctx, l.projectID, &storage.BucketAttrs{
Location: location, Location: location,
}) })
logger.LogIf(ctx, err)
return gcsToObjectError(errors.Trace(err), bucket) return gcsToObjectError(err, bucket)
} }
// GetBucketInfo - Get bucket metadata.. // GetBucketInfo - Get bucket metadata..
func (l *gcsGateway) GetBucketInfo(ctx context.Context, bucket string) (minio.BucketInfo, error) { func (l *gcsGateway) GetBucketInfo(ctx context.Context, bucket string) (minio.BucketInfo, error) {
attrs, err := l.client.Bucket(bucket).Attrs(l.ctx) attrs, err := l.client.Bucket(bucket).Attrs(l.ctx)
if err != nil { if err != nil {
return minio.BucketInfo{}, gcsToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return minio.BucketInfo{}, gcsToObjectError(err, bucket)
} }
return minio.BucketInfo{ return minio.BucketInfo{
@ -467,7 +460,8 @@ func (l *gcsGateway) ListBuckets(ctx context.Context) (buckets []minio.BucketInf
} }
if ierr != nil { if ierr != nil {
return buckets, gcsToObjectError(errors.Trace(ierr)) logger.LogIf(ctx, ierr)
return buckets, gcsToObjectError(ierr)
} }
buckets = append(buckets, minio.BucketInfo{ buckets = append(buckets, minio.BucketInfo{
@ -495,7 +489,8 @@ func (l *gcsGateway) DeleteBucket(ctx context.Context, bucket string) error {
break break
} }
if err != nil { if err != nil {
return gcsToObjectError(errors.Trace(err)) logger.LogIf(ctx, err)
return gcsToObjectError(err)
} }
if objAttrs.Prefix == minio.GatewayMinioSysTmp { if objAttrs.Prefix == minio.GatewayMinioSysTmp {
gcsMinioPathFound = true gcsMinioPathFound = true
@ -505,7 +500,8 @@ func (l *gcsGateway) DeleteBucket(ctx context.Context, bucket string) error {
break break
} }
if nonGCSMinioPathFound { if nonGCSMinioPathFound {
return gcsToObjectError(errors.Trace(minio.BucketNotEmpty{})) logger.LogIf(ctx, minio.BucketNotEmpty{})
return gcsToObjectError(minio.BucketNotEmpty{})
} }
if gcsMinioPathFound { if gcsMinioPathFound {
// Remove minio.sys.tmp before deleting the bucket. // Remove minio.sys.tmp before deleting the bucket.
@ -516,16 +512,19 @@ func (l *gcsGateway) DeleteBucket(ctx context.Context, bucket string) error {
break break
} }
if err != nil { if err != nil {
return gcsToObjectError(errors.Trace(err)) logger.LogIf(ctx, err)
return gcsToObjectError(err)
} }
err = l.client.Bucket(bucket).Object(objAttrs.Name).Delete(l.ctx) err = l.client.Bucket(bucket).Object(objAttrs.Name).Delete(l.ctx)
if err != nil { if err != nil {
return gcsToObjectError(errors.Trace(err)) logger.LogIf(ctx, err)
return gcsToObjectError(err)
} }
} }
} }
err := l.client.Bucket(bucket).Delete(l.ctx) err := l.client.Bucket(bucket).Delete(l.ctx)
return gcsToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
} }
func toGCSPageToken(name string) string { func toGCSPageToken(name string) string {
@ -607,7 +606,8 @@ func (l *gcsGateway) ListObjects(ctx context.Context, bucket string, prefix stri
break break
} }
if err != nil { if err != nil {
return minio.ListObjectsInfo{}, gcsToObjectError(errors.Trace(err), bucket, prefix) logger.LogIf(ctx, err)
return minio.ListObjectsInfo{}, gcsToObjectError(err, bucket, prefix)
} }
nextMarker = toGCSPageToken(attrs.Name) nextMarker = toGCSPageToken(attrs.Name)
@ -689,7 +689,8 @@ func (l *gcsGateway) ListObjectsV2(ctx context.Context, bucket, prefix, continua
} }
if err != nil { if err != nil {
return minio.ListObjectsV2Info{}, gcsToObjectError(errors.Trace(err), bucket, prefix) logger.LogIf(ctx, err)
return minio.ListObjectsV2Info{}, gcsToObjectError(err, bucket, prefix)
} }
if attrs.Prefix == minio.GatewayMinioSysTmp { if attrs.Prefix == minio.GatewayMinioSysTmp {
@ -733,18 +734,21 @@ func (l *gcsGateway) GetObject(ctx context.Context, bucket string, key string, s
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket // otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil { if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
return gcsToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
} }
object := l.client.Bucket(bucket).Object(key) object := l.client.Bucket(bucket).Object(key)
r, err := object.NewRangeReader(l.ctx, startOffset, length) r, err := object.NewRangeReader(l.ctx, startOffset, length)
if err != nil { if err != nil {
return gcsToObjectError(errors.Trace(err), bucket, key) logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket, key)
} }
defer r.Close() defer r.Close()
if _, err := io.Copy(writer, r); err != nil { if _, err := io.Copy(writer, r); err != nil {
return gcsToObjectError(errors.Trace(err), bucket, key) logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket, key)
} }
return nil return nil
@ -771,12 +775,14 @@ func (l *gcsGateway) GetObjectInfo(ctx context.Context, bucket string, object st
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket // otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil { if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket)
} }
attrs, err := l.client.Bucket(bucket).Object(object).Attrs(l.ctx) attrs, err := l.client.Bucket(bucket).Object(object).Attrs(l.ctx)
if err != nil { if err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, object)
} }
return fromGCSAttrsToObjectInfo(attrs), nil return fromGCSAttrsToObjectInfo(attrs), nil
@ -787,7 +793,8 @@ func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, d
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket // otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil { if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket)
} }
object := l.client.Bucket(bucket).Object(key) object := l.client.Bucket(bucket).Object(key)
@ -801,7 +808,8 @@ func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, d
if _, err := io.Copy(w, data); err != nil { if _, err := io.Copy(w, data); err != nil {
// Close the object writer upon error. // Close the object writer upon error.
w.CloseWithError(err) w.CloseWithError(err)
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key) logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
} }
// Close the object writer upon success. // Close the object writer upon success.
@ -809,7 +817,8 @@ func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, d
attrs, err := object.Attrs(l.ctx) attrs, err := object.Attrs(l.ctx)
if err != nil { if err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key) logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
} }
return fromGCSAttrsToObjectInfo(attrs), nil return fromGCSAttrsToObjectInfo(attrs), nil
@ -827,7 +836,8 @@ func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject
attrs, err := copier.Run(l.ctx) attrs, err := copier.Run(l.ctx)
if err != nil { if err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), destBucket, destObject) logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, destBucket, destObject)
} }
return fromGCSAttrsToObjectInfo(attrs), nil return fromGCSAttrsToObjectInfo(attrs), nil
@ -837,7 +847,8 @@ func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject
func (l *gcsGateway) DeleteObject(ctx context.Context, bucket string, object string) error { func (l *gcsGateway) DeleteObject(ctx context.Context, bucket string, object string) error {
err := l.client.Bucket(bucket).Object(object).Delete(l.ctx) err := l.client.Bucket(bucket).Object(object).Delete(l.ctx)
if err != nil { if err != nil {
return gcsToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket, object)
} }
return nil return nil
@ -863,7 +874,8 @@ func (l *gcsGateway) NewMultipartUpload(ctx context.Context, bucket string, key
bucket, bucket,
key, key,
}); err != nil { }); err != nil {
return "", gcsToObjectError(errors.Trace(err), bucket, key) logger.LogIf(ctx, err)
return "", gcsToObjectError(err, bucket, key)
} }
return uploadID, nil return uploadID, nil
} }
@ -883,7 +895,8 @@ func (l *gcsGateway) ListMultipartUploads(ctx context.Context, bucket string, pr
// an object layer compatible error upon any error. // an object layer compatible error upon any error.
func (l *gcsGateway) checkUploadIDExists(ctx context.Context, bucket string, key string, uploadID string) error { func (l *gcsGateway) checkUploadIDExists(ctx context.Context, bucket string, key string, uploadID string) error {
_, err := l.client.Bucket(bucket).Object(gcsMultipartMetaName(uploadID)).Attrs(l.ctx) _, err := l.client.Bucket(bucket).Object(gcsMultipartMetaName(uploadID)).Attrs(l.ctx)
return gcsToObjectError(errors.Trace(err), bucket, key, uploadID) logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket, key, uploadID)
} }
// PutObjectPart puts a part of object in bucket // PutObjectPart puts a part of object in bucket
@ -904,7 +917,8 @@ func (l *gcsGateway) PutObjectPart(ctx context.Context, bucket string, key strin
if _, err := io.Copy(w, data); err != nil { if _, err := io.Copy(w, data); err != nil {
// Make sure to close object writer upon error. // Make sure to close object writer upon error.
w.Close() w.Close()
return minio.PartInfo{}, gcsToObjectError(errors.Trace(err), bucket, key) logger.LogIf(ctx, err)
return minio.PartInfo{}, gcsToObjectError(err, bucket, key)
} }
// Make sure to close the object writer upon success. // Make sure to close the object writer upon success.
w.Close() w.Close()
@ -923,7 +937,7 @@ func (l *gcsGateway) ListObjectParts(ctx context.Context, bucket string, key str
} }
// Called by AbortMultipartUpload and CompleteMultipartUpload for cleaning up. // Called by AbortMultipartUpload and CompleteMultipartUpload for cleaning up.
func (l *gcsGateway) cleanupMultipartUpload(bucket, key, uploadID string) error { func (l *gcsGateway) cleanupMultipartUpload(ctx context.Context, bucket, key, uploadID string) error {
prefix := fmt.Sprintf("%s/%s/", gcsMinioMultipartPathV1, uploadID) prefix := fmt.Sprintf("%s/%s/", gcsMinioMultipartPathV1, uploadID)
// iterate through all parts and delete them // iterate through all parts and delete them
@ -935,7 +949,8 @@ func (l *gcsGateway) cleanupMultipartUpload(bucket, key, uploadID string) error
break break
} }
if err != nil { if err != nil {
return gcsToObjectError(errors.Trace(err), bucket, key) logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket, key)
} }
object := l.client.Bucket(bucket).Object(attrs.Name) object := l.client.Bucket(bucket).Object(attrs.Name)
@ -951,7 +966,7 @@ func (l *gcsGateway) AbortMultipartUpload(ctx context.Context, bucket string, ke
if err := l.checkUploadIDExists(ctx, bucket, key, uploadID); err != nil { if err := l.checkUploadIDExists(ctx, bucket, key, uploadID); err != nil {
return err return err
} }
return l.cleanupMultipartUpload(bucket, key, uploadID) return l.cleanupMultipartUpload(ctx, bucket, key, uploadID)
} }
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object // CompleteMultipartUpload completes ongoing multipart upload and finalizes object
@ -968,23 +983,27 @@ func (l *gcsGateway) CompleteMultipartUpload(ctx context.Context, bucket string,
partZeroAttrs, err := object.Attrs(l.ctx) partZeroAttrs, err := object.Attrs(l.ctx)
if err != nil { if err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key, uploadID) logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key, uploadID)
} }
r, err := object.NewReader(l.ctx) r, err := object.NewReader(l.ctx)
if err != nil { if err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key) logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
} }
defer r.Close() defer r.Close()
// Check version compatibility of the meta file before compose() // Check version compatibility of the meta file before compose()
multipartMeta := gcsMultipartMetaV1{} multipartMeta := gcsMultipartMetaV1{}
if err = json.NewDecoder(r).Decode(&multipartMeta); err != nil { if err = json.NewDecoder(r).Decode(&multipartMeta); err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key) logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
} }
if multipartMeta.Version != gcsMinioMultipartMetaCurrentVersion { if multipartMeta.Version != gcsMinioMultipartMetaCurrentVersion {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(errGCSFormat), bucket, key) logger.LogIf(ctx, errGCSFormat)
return minio.ObjectInfo{}, gcsToObjectError(errGCSFormat, bucket, key)
} }
// Validate if the gcs.json stores valid entries for the bucket and key. // Validate if the gcs.json stores valid entries for the bucket and key.
@ -1001,7 +1020,8 @@ func (l *gcsGateway) CompleteMultipartUpload(ctx context.Context, bucket string,
uploadedPart.PartNumber, uploadedPart.ETag))) uploadedPart.PartNumber, uploadedPart.ETag)))
partAttr, pErr := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, uploadedPart.PartNumber, uploadedPart.ETag)).Attrs(l.ctx) partAttr, pErr := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, uploadedPart.PartNumber, uploadedPart.ETag)).Attrs(l.ctx)
if pErr != nil { if pErr != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(pErr), bucket, key, uploadID) logger.LogIf(ctx, pErr)
return minio.ObjectInfo{}, gcsToObjectError(pErr, bucket, key, uploadID)
} }
partSizes[i] = partAttr.Size partSizes[i] = partAttr.Size
} }
@ -1009,11 +1029,16 @@ func (l *gcsGateway) CompleteMultipartUpload(ctx context.Context, bucket string,
// Error out if parts except last part sizing < 5MiB. // Error out if parts except last part sizing < 5MiB.
for i, size := range partSizes[:len(partSizes)-1] { for i, size := range partSizes[:len(partSizes)-1] {
if size < 5*humanize.MiByte { if size < 5*humanize.MiByte {
return minio.ObjectInfo{}, errors.Trace(minio.PartTooSmall{ logger.LogIf(ctx, minio.PartTooSmall{
PartNumber: uploadedParts[i].PartNumber, PartNumber: uploadedParts[i].PartNumber,
PartSize: size, PartSize: size,
PartETag: uploadedParts[i].ETag, PartETag: uploadedParts[i].ETag,
}) })
return minio.ObjectInfo{}, minio.PartTooSmall{
PartNumber: uploadedParts[i].PartNumber,
PartSize: size,
PartETag: uploadedParts[i].ETag,
}
} }
} }
@ -1040,7 +1065,8 @@ func (l *gcsGateway) CompleteMultipartUpload(ctx context.Context, bucket string,
composer.Metadata = partZeroAttrs.Metadata composer.Metadata = partZeroAttrs.Metadata
if _, err = composer.Run(l.ctx); err != nil { if _, err = composer.Run(l.ctx); err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key) logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
} }
} }
@ -1053,10 +1079,11 @@ func (l *gcsGateway) CompleteMultipartUpload(ctx context.Context, bucket string,
composer.Metadata = partZeroAttrs.Metadata composer.Metadata = partZeroAttrs.Metadata
attrs, err := composer.Run(l.ctx) attrs, err := composer.Run(l.ctx)
if err != nil { if err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key) logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
} }
if err = l.cleanupMultipartUpload(bucket, key, uploadID); err != nil { if err = l.cleanupMultipartUpload(ctx, bucket, key, uploadID); err != nil {
return minio.ObjectInfo{}, gcsToObjectError(errors.Trace(err), bucket, key) return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
} }
return fromGCSAttrsToObjectInfo(attrs), nil return fromGCSAttrsToObjectInfo(attrs), nil
} }
@ -1075,16 +1102,19 @@ func (l *gcsGateway) SetBucketPolicy(ctx context.Context, bucket string, policyI
prefix := bucket + "/*" // For all objects inside the bucket. prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 { if len(policies) != 1 {
return errors.Trace(minio.NotImplemented{}) logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
} }
if policies[0].Prefix != prefix { if policies[0].Prefix != prefix {
return errors.Trace(minio.NotImplemented{}) logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
} }
acl := l.client.Bucket(bucket).ACL() acl := l.client.Bucket(bucket).ACL()
if policies[0].Policy == policy.BucketPolicyNone { if policies[0].Policy == policy.BucketPolicyNone {
if err := acl.Delete(l.ctx, storage.AllUsers); err != nil { if err := acl.Delete(l.ctx, storage.AllUsers); err != nil {
return gcsToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
} }
return nil return nil
} }
@ -1096,11 +1126,13 @@ func (l *gcsGateway) SetBucketPolicy(ctx context.Context, bucket string, policyI
case policy.BucketPolicyWriteOnly: case policy.BucketPolicyWriteOnly:
role = storage.RoleWriter role = storage.RoleWriter
default: default:
return errors.Trace(minio.NotImplemented{}) logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
} }
if err := acl.Set(l.ctx, storage.AllUsers, role); err != nil { if err := acl.Set(l.ctx, storage.AllUsers, role); err != nil {
return gcsToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
} }
return nil return nil
@ -1110,7 +1142,8 @@ func (l *gcsGateway) SetBucketPolicy(ctx context.Context, bucket string, policyI
func (l *gcsGateway) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) { func (l *gcsGateway) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) {
rules, err := l.client.Bucket(bucket).ACL().List(l.ctx) rules, err := l.client.Bucket(bucket).ACL().List(l.ctx)
if err != nil { if err != nil {
return policy.BucketAccessPolicy{}, gcsToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return policy.BucketAccessPolicy{}, gcsToObjectError(err, bucket)
} }
policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"} policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"}
for _, r := range rules { for _, r := range rules {
@ -1126,7 +1159,8 @@ func (l *gcsGateway) GetBucketPolicy(ctx context.Context, bucket string) (policy
} }
// Return NoSuchBucketPolicy error, when policy is not set // Return NoSuchBucketPolicy error, when policy is not set
if len(policyInfo.Statements) == 0 { if len(policyInfo.Statements) == 0 {
return policy.BucketAccessPolicy{}, gcsToObjectError(errors.Trace(minio.PolicyNotFound{}), bucket) logger.LogIf(ctx, minio.PolicyNotFound{})
return policy.BucketAccessPolicy{}, gcsToObjectError(minio.PolicyNotFound{}, bucket)
} }
return policyInfo, nil return policyInfo, nil
} }
@ -1135,7 +1169,8 @@ func (l *gcsGateway) GetBucketPolicy(ctx context.Context, bucket string) (policy
func (l *gcsGateway) DeleteBucketPolicy(ctx context.Context, bucket string) error { func (l *gcsGateway) DeleteBucketPolicy(ctx context.Context, bucket string) error {
// This only removes the storage.AllUsers policies // This only removes the storage.AllUsers policies
if err := l.client.Bucket(bucket).ACL().Delete(l.ctx, storage.AllUsers); err != nil { if err := l.client.Bucket(bucket).ACL().Delete(l.ctx, storage.AllUsers); err != nil {
return gcsToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
} }
return nil return nil

View File

@ -24,7 +24,6 @@ import (
"reflect" "reflect"
"testing" "testing"
"github.com/minio/minio/pkg/errors"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
miniogo "github.com/minio/minio-go" miniogo "github.com/minio/minio-go"
@ -237,14 +236,14 @@ func TestGCSToObjectError(t *testing.T) {
}, },
{ {
[]string{"bucket"}, []string{"bucket"},
errors.Trace(fmt.Errorf("storage: bucket doesn't exist")), fmt.Errorf("storage: bucket doesn't exist"),
minio.BucketNotFound{ minio.BucketNotFound{
Bucket: "bucket", Bucket: "bucket",
}, },
}, },
{ {
[]string{"bucket", "object"}, []string{"bucket", "object"},
errors.Trace(fmt.Errorf("storage: object doesn't exist")), fmt.Errorf("storage: object doesn't exist"),
minio.ObjectNotFound{ minio.ObjectNotFound{
Bucket: "bucket", Bucket: "bucket",
Object: "object", Object: "object",
@ -252,76 +251,76 @@ func TestGCSToObjectError(t *testing.T) {
}, },
{ {
[]string{"bucket", "object", "uploadID"}, []string{"bucket", "object", "uploadID"},
errors.Trace(fmt.Errorf("storage: object doesn't exist")), fmt.Errorf("storage: object doesn't exist"),
minio.InvalidUploadID{ minio.InvalidUploadID{
UploadID: "uploadID", UploadID: "uploadID",
}, },
}, },
{ {
[]string{}, []string{},
errors.Trace(fmt.Errorf("Unknown error")), fmt.Errorf("Unknown error"),
fmt.Errorf("Unknown error"), fmt.Errorf("Unknown error"),
}, },
{ {
[]string{"bucket", "object"}, []string{"bucket", "object"},
errors.Trace(&googleapi.Error{ &googleapi.Error{
Message: "No list of errors", Message: "No list of errors",
}), },
&googleapi.Error{ &googleapi.Error{
Message: "No list of errors", Message: "No list of errors",
}, },
}, },
{ {
[]string{"bucket", "object"}, []string{"bucket", "object"},
errors.Trace(&googleapi.Error{ &googleapi.Error{
Errors: []googleapi.ErrorItem{{ Errors: []googleapi.ErrorItem{{
Reason: "conflict", Reason: "conflict",
Message: "You already own this bucket. Please select another name.", Message: "You already own this bucket. Please select another name.",
}}, }},
}), },
minio.BucketAlreadyOwnedByYou{ minio.BucketAlreadyOwnedByYou{
Bucket: "bucket", Bucket: "bucket",
}, },
}, },
{ {
[]string{"bucket", "object"}, []string{"bucket", "object"},
errors.Trace(&googleapi.Error{ &googleapi.Error{
Errors: []googleapi.ErrorItem{{ Errors: []googleapi.ErrorItem{{
Reason: "conflict", Reason: "conflict",
Message: "Sorry, that name is not available. Please try a different one.", Message: "Sorry, that name is not available. Please try a different one.",
}}, }},
}), },
minio.BucketAlreadyExists{ minio.BucketAlreadyExists{
Bucket: "bucket", Bucket: "bucket",
}, },
}, },
{ {
[]string{"bucket", "object"}, []string{"bucket", "object"},
errors.Trace(&googleapi.Error{ &googleapi.Error{
Errors: []googleapi.ErrorItem{{ Errors: []googleapi.ErrorItem{{
Reason: "conflict", Reason: "conflict",
}}, }},
}), },
minio.BucketNotEmpty{Bucket: "bucket"}, minio.BucketNotEmpty{Bucket: "bucket"},
}, },
{ {
[]string{"bucket"}, []string{"bucket"},
errors.Trace(&googleapi.Error{ &googleapi.Error{
Errors: []googleapi.ErrorItem{{ Errors: []googleapi.ErrorItem{{
Reason: "notFound", Reason: "notFound",
}}, }},
}), },
minio.BucketNotFound{ minio.BucketNotFound{
Bucket: "bucket", Bucket: "bucket",
}, },
}, },
{ {
[]string{"bucket", "object"}, []string{"bucket", "object"},
errors.Trace(&googleapi.Error{ &googleapi.Error{
Errors: []googleapi.ErrorItem{{ Errors: []googleapi.ErrorItem{{
Reason: "notFound", Reason: "notFound",
}}, }},
}), },
minio.ObjectNotFound{ minio.ObjectNotFound{
Bucket: "bucket", Bucket: "bucket",
Object: "object", Object: "object",
@ -329,22 +328,22 @@ func TestGCSToObjectError(t *testing.T) {
}, },
{ {
[]string{"bucket"}, []string{"bucket"},
errors.Trace(&googleapi.Error{ &googleapi.Error{
Errors: []googleapi.ErrorItem{{ Errors: []googleapi.ErrorItem{{
Reason: "invalid", Reason: "invalid",
}}, }},
}), },
minio.BucketNameInvalid{ minio.BucketNameInvalid{
Bucket: "bucket", Bucket: "bucket",
}, },
}, },
{ {
[]string{"bucket", "object"}, []string{"bucket", "object"},
errors.Trace(&googleapi.Error{ &googleapi.Error{
Errors: []googleapi.ErrorItem{{ Errors: []googleapi.ErrorItem{{
Reason: "forbidden", Reason: "forbidden",
}}, }},
}), },
minio.PrefixAccessDenied{ minio.PrefixAccessDenied{
Bucket: "bucket", Bucket: "bucket",
Object: "object", Object: "object",
@ -352,11 +351,11 @@ func TestGCSToObjectError(t *testing.T) {
}, },
{ {
[]string{"bucket", "object"}, []string{"bucket", "object"},
errors.Trace(&googleapi.Error{ &googleapi.Error{
Errors: []googleapi.ErrorItem{{ Errors: []googleapi.ErrorItem{{
Reason: "keyInvalid", Reason: "keyInvalid",
}}, }},
}), },
minio.PrefixAccessDenied{ minio.PrefixAccessDenied{
Bucket: "bucket", Bucket: "bucket",
Object: "object", Object: "object",
@ -364,11 +363,11 @@ func TestGCSToObjectError(t *testing.T) {
}, },
{ {
[]string{"bucket", "object"}, []string{"bucket", "object"},
errors.Trace(&googleapi.Error{ &googleapi.Error{
Errors: []googleapi.ErrorItem{{ Errors: []googleapi.ErrorItem{{
Reason: "required", Reason: "required",
}}, }},
}), },
minio.PrefixAccessDenied{ minio.PrefixAccessDenied{
Bucket: "bucket", Bucket: "bucket",
Object: "object", Object: "object",
@ -376,11 +375,11 @@ func TestGCSToObjectError(t *testing.T) {
}, },
{ {
[]string{"bucket", "object"}, []string{"bucket", "object"},
errors.Trace(&googleapi.Error{ &googleapi.Error{
Errors: []googleapi.ErrorItem{{ Errors: []googleapi.ErrorItem{{
Reason: "unknown", Reason: "unknown",
}}, }},
}), },
fmt.Errorf("Unsupported error reason: unknown"), fmt.Errorf("Unsupported error reason: unknown"),
}, },
} }

View File

@ -33,8 +33,8 @@ import (
"github.com/joyent/triton-go/storage" "github.com/joyent/triton-go/storage"
"github.com/minio/cli" "github.com/minio/cli"
minio "github.com/minio/minio/cmd" minio "github.com/minio/minio/cmd"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
) )
@ -118,7 +118,7 @@ func mantaGatewayMain(ctx *cli.Context) {
// Validate gateway arguments. // Validate gateway arguments.
host := ctx.Args().First() host := ctx.Args().First()
// Validate gateway arguments. // Validate gateway arguments.
minio.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument") logger.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
minio.StartGateway(ctx, &Manta{host}) minio.StartGateway(ctx, &Manta{host})
} }
@ -139,6 +139,7 @@ func (g *Manta) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, erro
var err error var err error
var signer authentication.Signer var signer authentication.Signer
var endpoint = defaultMantaURL var endpoint = defaultMantaURL
ctx := context.Background()
if g.host != "" { if g.host != "" {
endpoint, _, err = minio.ParseGatewayEndpoint(g.host) endpoint, _, err = minio.ParseGatewayEndpoint(g.host)
@ -163,7 +164,8 @@ func (g *Manta) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, erro
} }
signer, err = authentication.NewSSHAgentSigner(input) signer, err = authentication.NewSSHAgentSigner(input)
if err != nil { if err != nil {
return nil, errors.Trace(err) logger.LogIf(ctx, err)
return nil, err
} }
} else { } else {
var keyBytes []byte var keyBytes []byte
@ -200,7 +202,8 @@ func (g *Manta) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, erro
signer, err = authentication.NewPrivateKeySigner(input) signer, err = authentication.NewPrivateKeySigner(input)
if err != nil { if err != nil {
return nil, errors.Trace(err) logger.LogIf(ctx, err)
return nil, err
} }
} }
@ -352,7 +355,8 @@ func (t *tritonObjects) ListObjects(ctx context.Context, bucket, prefix, marker,
if terrors.IsResourceNotFoundError(err) { if terrors.IsResourceNotFoundError(err) {
return result, nil return result, nil
} }
return result, errors.Trace(err) logger.LogIf(ctx, err)
return result, err
} }
for _, obj := range objs.Entries { for _, obj := range objs.Entries {
@ -362,7 +366,8 @@ func (t *tritonObjects) ListObjects(ctx context.Context, bucket, prefix, marker,
input.DirectoryName = path.Join(mantaRoot, bucket, prefix) input.DirectoryName = path.Join(mantaRoot, bucket, prefix)
objs, err = t.client.Dir().List(ctx, input) objs, err = t.client.Dir().List(ctx, input)
if err != nil { if err != nil {
return result, errors.Trace(err) logger.LogIf(ctx, err)
return result, err
} }
break break
} }
@ -428,7 +433,8 @@ func (t *tritonObjects) ListObjectsV2(ctx context.Context, bucket, prefix, conti
if terrors.IsResourceNotFoundError(err) { if terrors.IsResourceNotFoundError(err) {
return result, nil return result, nil
} }
return result, errors.Trace(err) logger.LogIf(ctx, err)
return result, err
} }
for _, obj := range objs.Entries { for _, obj := range objs.Entries {
@ -436,7 +442,8 @@ func (t *tritonObjects) ListObjectsV2(ctx context.Context, bucket, prefix, conti
input.DirectoryName = path.Join(mantaRoot, bucket, prefix) input.DirectoryName = path.Join(mantaRoot, bucket, prefix)
objs, err = t.client.Dir().List(ctx, input) objs, err = t.client.Dir().List(ctx, input)
if err != nil { if err != nil {
return result, errors.Trace(err) logger.LogIf(ctx, err)
return result, err
} }
break break
} }
@ -479,7 +486,8 @@ func (t *tritonObjects) ListObjectsV2(ctx context.Context, bucket, prefix, conti
func (t *tritonObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error { func (t *tritonObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
// Start offset cannot be negative. // Start offset cannot be negative.
if startOffset < 0 { if startOffset < 0 {
return errors.Trace(fmt.Errorf("Unexpected error")) logger.LogIf(ctx, fmt.Errorf("Unexpected error"))
return fmt.Errorf("Unexpected error")
} }
output, err := t.client.Objects().Get(ctx, &storage.GetObjectInput{ output, err := t.client.Objects().Get(ctx, &storage.GetObjectInput{
@ -555,11 +563,13 @@ func (t *tritonObjects) PutObject(ctx context.Context, bucket, object string, da
ObjectReader: dummySeeker{data}, ObjectReader: dummySeeker{data},
ForceInsert: true, ForceInsert: true,
}); err != nil { }); err != nil {
return objInfo, errors.Trace(err) logger.LogIf(ctx, err)
return objInfo, err
} }
if err = data.Verify(); err != nil { if err = data.Verify(); err != nil {
t.DeleteObject(ctx, bucket, object) t.DeleteObject(ctx, bucket, object)
return objInfo, errors.Trace(err) logger.LogIf(ctx, err)
return objInfo, err
} }
return t.GetObjectInfo(ctx, bucket, object) return t.GetObjectInfo(ctx, bucket, object)
@ -574,7 +584,8 @@ func (t *tritonObjects) CopyObject(ctx context.Context, srcBucket, srcObject, de
SourcePath: path.Join(mantaRoot, srcBucket, srcObject), SourcePath: path.Join(mantaRoot, srcBucket, srcObject),
LinkPath: path.Join(mantaRoot, destBucket, destObject), LinkPath: path.Join(mantaRoot, destBucket, destObject),
}); err != nil { }); err != nil {
return objInfo, errors.Trace(err) logger.LogIf(ctx, err)
return objInfo, err
} }
return t.GetObjectInfo(ctx, destBucket, destObject) return t.GetObjectInfo(ctx, destBucket, destObject)
@ -587,7 +598,8 @@ func (t *tritonObjects) DeleteObject(ctx context.Context, bucket, object string)
if err := t.client.Objects().Delete(ctx, &storage.DeleteObjectInput{ if err := t.client.Objects().Delete(ctx, &storage.DeleteObjectInput{
ObjectPath: path.Join(mantaRoot, bucket, object), ObjectPath: path.Join(mantaRoot, bucket, object),
}); err != nil { }); err != nil {
return errors.Trace(err) logger.LogIf(ctx, err)
return err
} }
return nil return nil

View File

@ -32,8 +32,8 @@ import (
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
minio "github.com/minio/minio/cmd" minio "github.com/minio/minio/cmd"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
) )
@ -113,7 +113,7 @@ func ossGatewayMain(ctx *cli.Context) {
// Validate gateway arguments. // Validate gateway arguments.
host := ctx.Args().First() host := ctx.Args().First()
minio.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument") logger.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
minio.StartGateway(ctx, &OSS{host}) minio.StartGateway(ctx, &OSS{host})
} }
@ -161,7 +161,7 @@ func (g *OSS) Production() bool {
// `X-Amz-Meta-` prefix and converted into `X-Oss-Meta-`. // `X-Amz-Meta-` prefix and converted into `X-Oss-Meta-`.
// //
// Header names are canonicalized as in http.Header. // Header names are canonicalized as in http.Header.
func appendS3MetaToOSSOptions(opts []oss.Option, s3Metadata map[string]string) ([]oss.Option, error) { func appendS3MetaToOSSOptions(ctx context.Context, opts []oss.Option, s3Metadata map[string]string) ([]oss.Option, error) {
if opts == nil { if opts == nil {
opts = make([]oss.Option, 0, len(s3Metadata)) opts = make([]oss.Option, 0, len(s3Metadata))
} }
@ -173,7 +173,8 @@ func appendS3MetaToOSSOptions(opts []oss.Option, s3Metadata map[string]string) (
metaKey := k[len("X-Amz-Meta-"):] metaKey := k[len("X-Amz-Meta-"):]
// NOTE(timonwong): OSS won't allow headers with underscore(_). // NOTE(timonwong): OSS won't allow headers with underscore(_).
if strings.Contains(metaKey, "_") { if strings.Contains(metaKey, "_") {
return nil, errors.Trace(minio.UnsupportedMetadata{}) logger.LogIf(ctx, minio.UnsupportedMetadata{})
return nil, minio.UnsupportedMetadata{}
} }
opts = append(opts, oss.Meta(metaKey, v)) opts = append(opts, oss.Meta(metaKey, v))
case k == "X-Amz-Acl": case k == "X-Amz-Acl":
@ -271,15 +272,6 @@ func ossToObjectError(err error, params ...string) error {
return nil return nil
} }
e, ok := err.(*errors.Error)
if !ok {
// Code should be fixed if this function is called without doing errors.Trace()
// Else handling different situations in this function makes this function complicated.
minio.ErrorIf(err, "Expected type *Error")
return err
}
err = e.Cause
bucket := "" bucket := ""
object := "" object := ""
uploadID := "" uploadID := ""
@ -298,7 +290,7 @@ func ossToObjectError(err error, params ...string) error {
if !ok { if !ok {
// We don't interpret non OSS errors. As oss errors will // We don't interpret non OSS errors. As oss errors will
// have StatusCode to help to convert to object errors. // have StatusCode to help to convert to object errors.
return e return err
} }
switch ossErr.Code { switch ossErr.Code {
@ -330,8 +322,7 @@ func ossToObjectError(err error, params ...string) error {
err = minio.InvalidPart{} err = minio.InvalidPart{}
} }
e.Cause = err return err
return e
} }
// ossObjects implements gateway for Aliyun Object Storage Service. // ossObjects implements gateway for Aliyun Object Storage Service.
@ -366,22 +357,26 @@ func ossIsValidBucketName(bucket string) bool {
// MakeBucketWithLocation creates a new container on OSS backend. // MakeBucketWithLocation creates a new container on OSS backend.
func (l *ossObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string) error { func (l *ossObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string) error {
if !ossIsValidBucketName(bucket) { if !ossIsValidBucketName(bucket) {
return errors.Trace(minio.BucketNameInvalid{Bucket: bucket}) logger.LogIf(ctx, minio.BucketNameInvalid{Bucket: bucket})
return minio.BucketNameInvalid{Bucket: bucket}
} }
err := l.Client.CreateBucket(bucket) err := l.Client.CreateBucket(bucket)
return ossToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return ossToObjectError(err, bucket)
} }
// ossGeBucketInfo gets bucket metadata. // ossGeBucketInfo gets bucket metadata.
func ossGeBucketInfo(client *oss.Client, bucket string) (bi minio.BucketInfo, err error) { func ossGeBucketInfo(ctx context.Context, client *oss.Client, bucket string) (bi minio.BucketInfo, err error) {
if !ossIsValidBucketName(bucket) { if !ossIsValidBucketName(bucket) {
return bi, errors.Trace(minio.BucketNameInvalid{Bucket: bucket}) logger.LogIf(ctx, minio.BucketNameInvalid{Bucket: bucket})
return bi, minio.BucketNameInvalid{Bucket: bucket}
} }
bgir, err := client.GetBucketInfo(bucket) bgir, err := client.GetBucketInfo(bucket)
if err != nil { if err != nil {
return bi, ossToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return bi, ossToObjectError(err, bucket)
} }
return minio.BucketInfo{ return minio.BucketInfo{
@ -392,7 +387,7 @@ func ossGeBucketInfo(client *oss.Client, bucket string) (bi minio.BucketInfo, er
// GetBucketInfo gets bucket metadata. // GetBucketInfo gets bucket metadata.
func (l *ossObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, err error) { func (l *ossObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, err error) {
return ossGeBucketInfo(l.Client, bucket) return ossGeBucketInfo(ctx, l.Client, bucket)
} }
// ListBuckets lists all OSS buckets. // ListBuckets lists all OSS buckets.
@ -401,7 +396,8 @@ func (l *ossObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInf
for { for {
lbr, err := l.Client.ListBuckets(marker) lbr, err := l.Client.ListBuckets(marker)
if err != nil { if err != nil {
return nil, ossToObjectError(errors.Trace(err)) logger.LogIf(ctx, err)
return nil, ossToObjectError(err)
} }
for _, bi := range lbr.Buckets { for _, bi := range lbr.Buckets {
@ -424,7 +420,8 @@ func (l *ossObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInf
func (l *ossObjects) DeleteBucket(ctx context.Context, bucket string) error { func (l *ossObjects) DeleteBucket(ctx context.Context, bucket string) error {
err := l.Client.DeleteBucket(bucket) err := l.Client.DeleteBucket(bucket)
if err != nil { if err != nil {
return ossToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return ossToObjectError(err, bucket)
} }
return nil return nil
} }
@ -462,10 +459,11 @@ func fromOSSClientListObjectsResult(bucket string, lor oss.ListObjectsResult) mi
} }
// ossListObjects lists all blobs in OSS bucket filtered by prefix. // ossListObjects lists all blobs in OSS bucket filtered by prefix.
func ossListObjects(client *oss.Client, bucket, prefix, marker, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) { func ossListObjects(ctx context.Context, client *oss.Client, bucket, prefix, marker, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) {
buck, err := client.Bucket(bucket) buck, err := client.Bucket(bucket)
if err != nil { if err != nil {
return loi, ossToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return loi, ossToObjectError(err, bucket)
} }
// maxKeys should default to 1000 or less. // maxKeys should default to 1000 or less.
@ -475,19 +473,20 @@ func ossListObjects(client *oss.Client, bucket, prefix, marker, delimiter string
lor, err := buck.ListObjects(oss.Prefix(prefix), oss.Marker(marker), oss.Delimiter(delimiter), oss.MaxKeys(maxKeys)) lor, err := buck.ListObjects(oss.Prefix(prefix), oss.Marker(marker), oss.Delimiter(delimiter), oss.MaxKeys(maxKeys))
if err != nil { if err != nil {
return loi, ossToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return loi, ossToObjectError(err, bucket)
} }
return fromOSSClientListObjectsResult(bucket, lor), nil return fromOSSClientListObjectsResult(bucket, lor), nil
} }
// ossListObjectsV2 lists all blobs in OSS bucket filtered by prefix. // ossListObjectsV2 lists all blobs in OSS bucket filtered by prefix.
func ossListObjectsV2(client *oss.Client, bucket, prefix, continuationToken, delimiter string, maxKeys int, func ossListObjectsV2(ctx context.Context, client *oss.Client, bucket, prefix, continuationToken, delimiter string, maxKeys int,
fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, err error) { fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, err error) {
// fetchOwner and startAfter are not supported and unused. // fetchOwner and startAfter are not supported and unused.
marker := continuationToken marker := continuationToken
resultV1, err := ossListObjects(client, bucket, prefix, marker, delimiter, maxKeys) resultV1, err := ossListObjects(ctx, client, bucket, prefix, marker, delimiter, maxKeys)
if err != nil { if err != nil {
return loi, err return loi, err
} }
@ -503,13 +502,13 @@ func ossListObjectsV2(client *oss.Client, bucket, prefix, continuationToken, del
// ListObjects lists all blobs in OSS bucket filtered by prefix. // ListObjects lists all blobs in OSS bucket filtered by prefix.
func (l *ossObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) { func (l *ossObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) {
return ossListObjects(l.Client, bucket, prefix, marker, delimiter, maxKeys) return ossListObjects(ctx, l.Client, bucket, prefix, marker, delimiter, maxKeys)
} }
// ListObjectsV2 lists all blobs in OSS bucket filtered by prefix // ListObjectsV2 lists all blobs in OSS bucket filtered by prefix
func (l *ossObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, func (l *ossObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int,
fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, err error) { fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, err error) {
return ossListObjectsV2(l.Client, bucket, prefix, continuationToken, delimiter, maxKeys, fetchOwner, startAfter) return ossListObjectsV2(ctx, l.Client, bucket, prefix, continuationToken, delimiter, maxKeys, fetchOwner, startAfter)
} }
// ossGetObject reads an object on OSS. Supports additional // ossGetObject reads an object on OSS. Supports additional
@ -518,14 +517,16 @@ func (l *ossObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continua
// //
// startOffset indicates the starting read location of the object. // startOffset indicates the starting read location of the object.
// length indicates the total length of the object. // length indicates the total length of the object.
func ossGetObject(client *oss.Client, bucket, key string, startOffset, length int64, writer io.Writer, etag string) error { func ossGetObject(ctx context.Context, client *oss.Client, bucket, key string, startOffset, length int64, writer io.Writer, etag string) error {
if length < 0 && length != -1 { if length < 0 && length != -1 {
return ossToObjectError(errors.Trace(fmt.Errorf("Invalid argument")), bucket, key) logger.LogIf(ctx, fmt.Errorf("Invalid argument"))
return ossToObjectError(fmt.Errorf("Invalid argument"), bucket, key)
} }
bkt, err := client.Bucket(bucket) bkt, err := client.Bucket(bucket)
if err != nil { if err != nil {
return ossToObjectError(errors.Trace(err), bucket, key) logger.LogIf(ctx, err)
return ossToObjectError(err, bucket, key)
} }
var opts []oss.Option var opts []oss.Option
@ -535,12 +536,14 @@ func ossGetObject(client *oss.Client, bucket, key string, startOffset, length in
object, err := bkt.GetObject(key, opts...) object, err := bkt.GetObject(key, opts...)
if err != nil { if err != nil {
return ossToObjectError(errors.Trace(err), bucket, key) logger.LogIf(ctx, err)
return ossToObjectError(err, bucket, key)
} }
defer object.Close() defer object.Close()
if _, err := io.Copy(writer, object); err != nil { if _, err := io.Copy(writer, object); err != nil {
return ossToObjectError(errors.Trace(err), bucket, key) logger.LogIf(ctx, err)
return ossToObjectError(err, bucket, key)
} }
return nil return nil
} }
@ -552,7 +555,7 @@ func ossGetObject(client *oss.Client, bucket, key string, startOffset, length in
// startOffset indicates the starting read location of the object. // startOffset indicates the starting read location of the object.
// length indicates the total length of the object. // length indicates the total length of the object.
func (l *ossObjects) GetObject(ctx context.Context, bucket, key string, startOffset, length int64, writer io.Writer, etag string) error { func (l *ossObjects) GetObject(ctx context.Context, bucket, key string, startOffset, length int64, writer io.Writer, etag string) error {
return ossGetObject(l.Client, bucket, key, startOffset, length, writer, etag) return ossGetObject(ctx, l.Client, bucket, key, startOffset, length, writer, etag)
} }
func translatePlainError(err error) error { func translatePlainError(err error) error {
@ -569,15 +572,17 @@ func translatePlainError(err error) error {
} }
// ossGetObjectInfo reads object info and replies back ObjectInfo. // ossGetObjectInfo reads object info and replies back ObjectInfo.
func ossGetObjectInfo(client *oss.Client, bucket, object string) (objInfo minio.ObjectInfo, err error) { func ossGetObjectInfo(ctx context.Context, client *oss.Client, bucket, object string) (objInfo minio.ObjectInfo, err error) {
bkt, err := client.Bucket(bucket) bkt, err := client.Bucket(bucket)
if err != nil { if err != nil {
return objInfo, ossToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return objInfo, ossToObjectError(err, bucket, object)
} }
header, err := bkt.GetObjectDetailedMeta(object) header, err := bkt.GetObjectDetailedMeta(object)
if err != nil { if err != nil {
return objInfo, ossToObjectError(errors.Trace(translatePlainError(err)), bucket, object) logger.LogIf(ctx, translatePlainError(err))
return objInfo, ossToObjectError(translatePlainError(err), bucket, object)
} }
// Build S3 metadata from OSS metadata // Build S3 metadata from OSS metadata
@ -600,40 +605,43 @@ func ossGetObjectInfo(client *oss.Client, bucket, object string) (objInfo minio.
// GetObjectInfo reads object info and replies back ObjectInfo. // GetObjectInfo reads object info and replies back ObjectInfo.
func (l *ossObjects) GetObjectInfo(ctx context.Context, bucket, object string) (objInfo minio.ObjectInfo, err error) { func (l *ossObjects) GetObjectInfo(ctx context.Context, bucket, object string) (objInfo minio.ObjectInfo, err error) {
return ossGetObjectInfo(l.Client, bucket, object) return ossGetObjectInfo(ctx, l.Client, bucket, object)
} }
// ossPutObject creates a new object with the incoming data. // ossPutObject creates a new object with the incoming data.
func ossPutObject(client *oss.Client, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) { func ossPutObject(ctx context.Context, client *oss.Client, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
bkt, err := client.Bucket(bucket) bkt, err := client.Bucket(bucket)
if err != nil { if err != nil {
return objInfo, ossToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return objInfo, ossToObjectError(err, bucket, object)
} }
// Build OSS metadata // Build OSS metadata
opts, err := appendS3MetaToOSSOptions(nil, metadata) opts, err := appendS3MetaToOSSOptions(ctx, nil, metadata)
if err != nil { if err != nil {
return objInfo, ossToObjectError(err, bucket, object) return objInfo, ossToObjectError(err, bucket, object)
} }
err = bkt.PutObject(object, data, opts...) err = bkt.PutObject(object, data, opts...)
if err != nil { if err != nil {
return objInfo, ossToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return objInfo, ossToObjectError(err, bucket, object)
} }
return ossGetObjectInfo(client, bucket, object) return ossGetObjectInfo(ctx, client, bucket, object)
} }
// PutObject creates a new object with the incoming data. // PutObject creates a new object with the incoming data.
func (l *ossObjects) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) { func (l *ossObjects) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
return ossPutObject(l.Client, bucket, object, data, metadata) return ossPutObject(ctx, l.Client, bucket, object, data, metadata)
} }
// CopyObject copies an object from source bucket to a destination bucket. // CopyObject copies an object from source bucket to a destination bucket.
func (l *ossObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo minio.ObjectInfo) (objInfo minio.ObjectInfo, err error) { func (l *ossObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo minio.ObjectInfo) (objInfo minio.ObjectInfo, err error) {
bkt, err := l.Client.Bucket(srcBucket) bkt, err := l.Client.Bucket(srcBucket)
if err != nil { if err != nil {
return objInfo, ossToObjectError(errors.Trace(err), srcBucket, srcObject) logger.LogIf(ctx, err)
return objInfo, ossToObjectError(err, srcBucket, srcObject)
} }
opts := make([]oss.Option, 0, len(srcInfo.UserDefined)+1) opts := make([]oss.Option, 0, len(srcInfo.UserDefined)+1)
@ -644,13 +652,14 @@ func (l *ossObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
opts = append(opts, oss.MetadataDirective(oss.MetaReplace)) opts = append(opts, oss.MetadataDirective(oss.MetaReplace))
// Build OSS metadata // Build OSS metadata
opts, err = appendS3MetaToOSSOptions(opts, srcInfo.UserDefined) opts, err = appendS3MetaToOSSOptions(ctx, opts, srcInfo.UserDefined)
if err != nil { if err != nil {
return objInfo, ossToObjectError(err, srcBucket, srcObject) return objInfo, ossToObjectError(err, srcBucket, srcObject)
} }
if _, err = bkt.CopyObjectTo(dstBucket, dstObject, srcObject, opts...); err != nil { if _, err = bkt.CopyObjectTo(dstBucket, dstObject, srcObject, opts...); err != nil {
return objInfo, ossToObjectError(errors.Trace(err), srcBucket, srcObject) logger.LogIf(ctx, err)
return objInfo, ossToObjectError(err, srcBucket, srcObject)
} }
return l.GetObjectInfo(ctx, dstBucket, dstObject) return l.GetObjectInfo(ctx, dstBucket, dstObject)
} }
@ -659,12 +668,14 @@ func (l *ossObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
func (l *ossObjects) DeleteObject(ctx context.Context, bucket, object string) error { func (l *ossObjects) DeleteObject(ctx context.Context, bucket, object string) error {
bkt, err := l.Client.Bucket(bucket) bkt, err := l.Client.Bucket(bucket)
if err != nil { if err != nil {
return ossToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return ossToObjectError(err, bucket, object)
} }
err = bkt.DeleteObject(object) err = bkt.DeleteObject(object)
if err != nil { if err != nil {
return ossToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return ossToObjectError(err, bucket, object)
} }
return nil return nil
} }
@ -701,13 +712,15 @@ func fromOSSClientListMultipartsInfo(lmur oss.ListMultipartUploadResult) minio.L
func (l *ossObjects) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, err error) { func (l *ossObjects) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, err error) {
bkt, err := l.Client.Bucket(bucket) bkt, err := l.Client.Bucket(bucket)
if err != nil { if err != nil {
return lmi, ossToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return lmi, ossToObjectError(err, bucket)
} }
lmur, err := bkt.ListMultipartUploads(oss.Prefix(prefix), oss.KeyMarker(keyMarker), oss.UploadIDMarker(uploadIDMarker), lmur, err := bkt.ListMultipartUploads(oss.Prefix(prefix), oss.KeyMarker(keyMarker), oss.UploadIDMarker(uploadIDMarker),
oss.Delimiter(delimiter), oss.MaxUploads(maxUploads)) oss.Delimiter(delimiter), oss.MaxUploads(maxUploads))
if err != nil { if err != nil {
return lmi, ossToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return lmi, ossToObjectError(err, bucket)
} }
return fromOSSClientListMultipartsInfo(lmur), nil return fromOSSClientListMultipartsInfo(lmur), nil
@ -717,18 +730,20 @@ func (l *ossObjects) ListMultipartUploads(ctx context.Context, bucket, prefix, k
func (l *ossObjects) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error) { func (l *ossObjects) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error) {
bkt, err := l.Client.Bucket(bucket) bkt, err := l.Client.Bucket(bucket)
if err != nil { if err != nil {
return uploadID, ossToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return uploadID, ossToObjectError(err, bucket, object)
} }
// Build OSS metadata // Build OSS metadata
opts, err := appendS3MetaToOSSOptions(nil, metadata) opts, err := appendS3MetaToOSSOptions(ctx, nil, metadata)
if err != nil { if err != nil {
return uploadID, ossToObjectError(err, bucket, object) return uploadID, ossToObjectError(err, bucket, object)
} }
lmur, err := bkt.InitiateMultipartUpload(object, opts...) lmur, err := bkt.InitiateMultipartUpload(object, opts...)
if err != nil { if err != nil {
return uploadID, ossToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return uploadID, ossToObjectError(err, bucket, object)
} }
return lmur.UploadID, nil return lmur.UploadID, nil
@ -738,7 +753,8 @@ func (l *ossObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
func (l *ossObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (pi minio.PartInfo, err error) { func (l *ossObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (pi minio.PartInfo, err error) {
bkt, err := l.Client.Bucket(bucket) bkt, err := l.Client.Bucket(bucket)
if err != nil { if err != nil {
return pi, ossToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return pi, ossToObjectError(err, bucket, object)
} }
imur := oss.InitiateMultipartUploadResult{ imur := oss.InitiateMultipartUploadResult{
@ -749,7 +765,8 @@ func (l *ossObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
size := data.Size() size := data.Size()
up, err := bkt.UploadPart(imur, data, size, partID) up, err := bkt.UploadPart(imur, data, size, partID)
if err != nil { if err != nil {
return pi, ossToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return pi, ossToObjectError(err, bucket, object)
} }
return minio.PartInfo{ return minio.PartInfo{
@ -820,11 +837,12 @@ func (l *ossObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, d
bkt, err := l.Client.Bucket(destBucket) bkt, err := l.Client.Bucket(destBucket)
if err != nil { if err != nil {
return p, ossToObjectError(errors.Trace(err), destBucket) logger.LogIf(ctx, err)
return p, ossToObjectError(err, destBucket)
} }
// Build OSS metadata // Build OSS metadata
opts, err := appendS3MetaToOSSOptions(nil, srcInfo.UserDefined) opts, err := appendS3MetaToOSSOptions(ctx, nil, srcInfo.UserDefined)
if err != nil { if err != nil {
return p, ossToObjectError(err, srcBucket, srcObject) return p, ossToObjectError(err, srcBucket, srcObject)
} }
@ -835,7 +853,8 @@ func (l *ossObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, d
}, srcBucket, srcObject, startOffset, length, partID, opts...) }, srcBucket, srcObject, startOffset, length, partID, opts...)
if err != nil { if err != nil {
return p, ossToObjectError(errors.Trace(err), srcBucket, srcObject) logger.LogIf(ctx, err)
return p, ossToObjectError(err, srcBucket, srcObject)
} }
p.PartNumber = completePart.PartNumber p.PartNumber = completePart.PartNumber
@ -847,7 +866,8 @@ func (l *ossObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, d
func (l *ossObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (lpi minio.ListPartsInfo, err error) { func (l *ossObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (lpi minio.ListPartsInfo, err error) {
lupr, err := ossListObjectParts(l.Client, bucket, object, uploadID, partNumberMarker, maxParts) lupr, err := ossListObjectParts(l.Client, bucket, object, uploadID, partNumberMarker, maxParts)
if err != nil { if err != nil {
return lpi, ossToObjectError(errors.Trace(err), bucket, object, uploadID) logger.LogIf(ctx, err)
return lpi, ossToObjectError(err, bucket, object, uploadID)
} }
return fromOSSClientListPartsInfo(lupr, partNumberMarker), nil return fromOSSClientListPartsInfo(lupr, partNumberMarker), nil
@ -857,7 +877,8 @@ func (l *ossObjects) ListObjectParts(ctx context.Context, bucket, object, upload
func (l *ossObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { func (l *ossObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error {
bkt, err := l.Client.Bucket(bucket) bkt, err := l.Client.Bucket(bucket)
if err != nil { if err != nil {
return ossToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return ossToObjectError(err, bucket, object)
} }
err = bkt.AbortMultipartUpload(oss.InitiateMultipartUploadResult{ err = bkt.AbortMultipartUpload(oss.InitiateMultipartUploadResult{
@ -866,7 +887,8 @@ func (l *ossObjects) AbortMultipartUpload(ctx context.Context, bucket, object, u
UploadID: uploadID, UploadID: uploadID,
}) })
if err != nil { if err != nil {
return ossToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return ossToObjectError(err, bucket, object)
} }
return nil return nil
} }
@ -876,7 +898,8 @@ func (l *ossObjects) CompleteMultipartUpload(ctx context.Context, bucket, object
client := l.Client client := l.Client
bkt, err := client.Bucket(bucket) bkt, err := client.Bucket(bucket)
if err != nil { if err != nil {
return oi, ossToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return oi, ossToObjectError(err, bucket, object)
} }
// Error out if uploadedParts except last part sizing < 5MiB. // Error out if uploadedParts except last part sizing < 5MiB.
@ -886,7 +909,8 @@ func (l *ossObjects) CompleteMultipartUpload(ctx context.Context, bucket, object
for lupr.IsTruncated { for lupr.IsTruncated {
lupr, err = ossListObjectParts(client, bucket, object, uploadID, partNumberMarker, ossMaxParts) lupr, err = ossListObjectParts(client, bucket, object, uploadID, partNumberMarker, ossMaxParts)
if err != nil { if err != nil {
return oi, ossToObjectError(errors.Trace(err), bucket, object, uploadID) logger.LogIf(ctx, err)
return oi, ossToObjectError(err, bucket, object, uploadID)
} }
uploadedParts := lupr.UploadedParts uploadedParts := lupr.UploadedParts
@ -900,11 +924,16 @@ func (l *ossObjects) CompleteMultipartUpload(ctx context.Context, bucket, object
for _, part := range uploadedParts { for _, part := range uploadedParts {
if part.Size < ossS3MinPartSize { if part.Size < ossS3MinPartSize {
return oi, errors.Trace(minio.PartTooSmall{ logger.LogIf(ctx, minio.PartTooSmall{
PartNumber: part.PartNumber, PartNumber: part.PartNumber,
PartSize: int64(part.Size), PartSize: int64(part.Size),
PartETag: minio.ToS3ETag(part.ETag), PartETag: minio.ToS3ETag(part.ETag),
}) })
return oi, minio.PartTooSmall{
PartNumber: part.PartNumber,
PartSize: int64(part.Size),
PartETag: minio.ToS3ETag(part.ETag),
}
} }
} }
@ -926,7 +955,8 @@ func (l *ossObjects) CompleteMultipartUpload(ctx context.Context, bucket, object
_, err = bkt.CompleteMultipartUpload(imur, parts) _, err = bkt.CompleteMultipartUpload(imur, parts)
if err != nil { if err != nil {
return oi, ossToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return oi, ossToObjectError(err, bucket, object)
} }
return l.GetObjectInfo(ctx, bucket, object) return l.GetObjectInfo(ctx, bucket, object)
@ -940,13 +970,15 @@ func (l *ossObjects) CompleteMultipartUpload(ctx context.Context, bucket, object
func (l *ossObjects) SetBucketPolicy(ctx context.Context, bucket string, policyInfo policy.BucketAccessPolicy) error { func (l *ossObjects) SetBucketPolicy(ctx context.Context, bucket string, policyInfo policy.BucketAccessPolicy) error {
bucketPolicies := policy.GetPolicies(policyInfo.Statements, bucket, "") bucketPolicies := policy.GetPolicies(policyInfo.Statements, bucket, "")
if len(bucketPolicies) != 1 { if len(bucketPolicies) != 1 {
return errors.Trace(minio.NotImplemented{}) logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
} }
prefix := bucket + "/*" // For all objects inside the bucket. prefix := bucket + "/*" // For all objects inside the bucket.
for policyPrefix, bucketPolicy := range bucketPolicies { for policyPrefix, bucketPolicy := range bucketPolicies {
if policyPrefix != prefix { if policyPrefix != prefix {
return errors.Trace(minio.NotImplemented{}) logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
} }
var acl oss.ACLType var acl oss.ACLType
@ -958,12 +990,14 @@ func (l *ossObjects) SetBucketPolicy(ctx context.Context, bucket string, policyI
case policy.BucketPolicyReadWrite: case policy.BucketPolicyReadWrite:
acl = oss.ACLPublicReadWrite acl = oss.ACLPublicReadWrite
default: default:
return errors.Trace(minio.NotImplemented{}) logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
} }
err := l.Client.SetBucketACL(bucket, acl) err := l.Client.SetBucketACL(bucket, acl)
if err != nil { if err != nil {
return ossToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return ossToObjectError(err, bucket)
} }
} }
@ -974,20 +1008,23 @@ func (l *ossObjects) SetBucketPolicy(ctx context.Context, bucket string, policyI
func (l *ossObjects) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) { func (l *ossObjects) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) {
result, err := l.Client.GetBucketACL(bucket) result, err := l.Client.GetBucketACL(bucket)
if err != nil { if err != nil {
return policy.BucketAccessPolicy{}, ossToObjectError(errors.Trace(err)) logger.LogIf(ctx, err)
return policy.BucketAccessPolicy{}, ossToObjectError(err)
} }
policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"} policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"}
switch result.ACL { switch result.ACL {
case string(oss.ACLPrivate): case string(oss.ACLPrivate):
// By default, all buckets starts with a "private" policy. // By default, all buckets starts with a "private" policy.
return policy.BucketAccessPolicy{}, ossToObjectError(errors.Trace(minio.PolicyNotFound{}), bucket) logger.LogIf(ctx, minio.PolicyNotFound{})
return policy.BucketAccessPolicy{}, ossToObjectError(minio.PolicyNotFound{}, bucket)
case string(oss.ACLPublicRead): case string(oss.ACLPublicRead):
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "") policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "")
case string(oss.ACLPublicReadWrite): case string(oss.ACLPublicReadWrite):
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadWrite, bucket, "") policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadWrite, bucket, "")
default: default:
return policy.BucketAccessPolicy{}, errors.Trace(minio.NotImplemented{}) logger.LogIf(ctx, minio.NotImplemented{})
return policy.BucketAccessPolicy{}, minio.NotImplemented{}
} }
return policyInfo, nil return policyInfo, nil
@ -997,7 +1034,8 @@ func (l *ossObjects) GetBucketPolicy(ctx context.Context, bucket string) (policy
func (l *ossObjects) DeleteBucketPolicy(ctx context.Context, bucket string) error { func (l *ossObjects) DeleteBucketPolicy(ctx context.Context, bucket string) error {
err := l.Client.SetBucketACL(bucket, oss.ACLPrivate) err := l.Client.SetBucketACL(bucket, oss.ACLPrivate)
if err != nil { if err != nil {
return ossToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return ossToObjectError(err, bucket)
} }
return nil return nil
} }

View File

@ -17,6 +17,7 @@
package oss package oss
import ( import (
"context"
"fmt" "fmt"
"net/http" "net/http"
"reflect" "reflect"
@ -29,9 +30,9 @@ import (
) )
func ossErrResponse(code string) error { func ossErrResponse(code string) error {
return errors.Trace(oss.ServiceError{ return oss.ServiceError{
Code: code, Code: code,
}) }
} }
func TestOSSToObjectError(t *testing.T) { func TestOSSToObjectError(t *testing.T) {
@ -116,7 +117,7 @@ func TestS3MetaToOSSOptions(t *testing.T) {
headers = map[string]string{ headers = map[string]string{
"x-amz-meta-invalid_meta": "value", "x-amz-meta-invalid_meta": "value",
} }
_, err = appendS3MetaToOSSOptions(nil, headers) _, err = appendS3MetaToOSSOptions(context.Background(), nil, headers)
if err = errors.Cause(err); err != nil { if err = errors.Cause(err); err != nil {
if _, ok := err.(minio.UnsupportedMetadata); !ok { if _, ok := err.(minio.UnsupportedMetadata); !ok {
t.Fatalf("Test failed with unexpected error %s, expected UnsupportedMetadata", err) t.Fatalf("Test failed with unexpected error %s, expected UnsupportedMetadata", err)
@ -133,7 +134,7 @@ func TestS3MetaToOSSOptions(t *testing.T) {
"X-Amz-Meta-X-Amz-Matdesc": "{}", "X-Amz-Meta-X-Amz-Matdesc": "{}",
"X-Amz-Meta-X-Amz-Iv": "eWmyryl8kq+EVnnsE7jpOg==", "X-Amz-Meta-X-Amz-Iv": "eWmyryl8kq+EVnnsE7jpOg==",
} }
opts, err := appendS3MetaToOSSOptions(nil, headers) opts, err := appendS3MetaToOSSOptions(context.Background(), nil, headers)
if err != nil { if err != nil {
t.Fatalf("Test failed, with %s", err) t.Fatalf("Test failed, with %s", err)
} }

View File

@ -24,8 +24,8 @@ import (
miniogo "github.com/minio/minio-go" miniogo "github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/s3utils" "github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
minio "github.com/minio/minio/cmd" minio "github.com/minio/minio/cmd"
@ -101,7 +101,7 @@ func s3GatewayMain(ctx *cli.Context) {
// Validate gateway arguments. // Validate gateway arguments.
host := ctx.Args().First() host := ctx.Args().First()
// Validate gateway arguments. // Validate gateway arguments.
minio.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument") logger.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
minio.StartGateway(ctx, &S3{host}) minio.StartGateway(ctx, &S3{host})
} }
@ -173,7 +173,8 @@ func (l *s3Objects) StorageInfo(ctx context.Context) (si minio.StorageInfo) {
func (l *s3Objects) MakeBucketWithLocation(ctx context.Context, bucket, location string) error { func (l *s3Objects) MakeBucketWithLocation(ctx context.Context, bucket, location string) error {
err := l.Client.MakeBucket(bucket, location) err := l.Client.MakeBucket(bucket, location)
if err != nil { if err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket)
} }
return err return err
} }
@ -188,12 +189,14 @@ func (l *s3Objects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.
// access to these buckets. // access to these buckets.
// Ref - http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html // Ref - http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
if s3utils.CheckValidBucketName(bucket) != nil { if s3utils.CheckValidBucketName(bucket) != nil {
return bi, errors.Trace(minio.BucketNameInvalid{Bucket: bucket}) logger.LogIf(ctx, minio.BucketNameInvalid{Bucket: bucket})
return bi, minio.BucketNameInvalid{Bucket: bucket}
} }
buckets, err := l.Client.ListBuckets() buckets, err := l.Client.ListBuckets()
if err != nil { if err != nil {
return bi, minio.ErrorRespToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return bi, minio.ErrorRespToObjectError(err, bucket)
} }
for _, bi := range buckets { for _, bi := range buckets {
@ -207,14 +210,16 @@ func (l *s3Objects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.
}, nil }, nil
} }
return bi, errors.Trace(minio.BucketNotFound{Bucket: bucket}) logger.LogIf(ctx, minio.BucketNotFound{Bucket: bucket})
return bi, minio.BucketNotFound{Bucket: bucket}
} }
// ListBuckets lists all S3 buckets // ListBuckets lists all S3 buckets
func (l *s3Objects) ListBuckets(ctx context.Context) ([]minio.BucketInfo, error) { func (l *s3Objects) ListBuckets(ctx context.Context) ([]minio.BucketInfo, error) {
buckets, err := l.Client.ListBuckets() buckets, err := l.Client.ListBuckets()
if err != nil { if err != nil {
return nil, minio.ErrorRespToObjectError(errors.Trace(err)) logger.LogIf(ctx, err)
return nil, minio.ErrorRespToObjectError(err)
} }
b := make([]minio.BucketInfo, len(buckets)) b := make([]minio.BucketInfo, len(buckets))
@ -232,7 +237,8 @@ func (l *s3Objects) ListBuckets(ctx context.Context) ([]minio.BucketInfo, error)
func (l *s3Objects) DeleteBucket(ctx context.Context, bucket string) error { func (l *s3Objects) DeleteBucket(ctx context.Context, bucket string) error {
err := l.Client.RemoveBucket(bucket) err := l.Client.RemoveBucket(bucket)
if err != nil { if err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket)
} }
return nil return nil
} }
@ -241,7 +247,8 @@ func (l *s3Objects) DeleteBucket(ctx context.Context, bucket string) error {
func (l *s3Objects) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, e error) { func (l *s3Objects) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, e error) {
result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys) result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil { if err != nil {
return loi, minio.ErrorRespToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return loi, minio.ErrorRespToObjectError(err, bucket)
} }
return minio.FromMinioClientListBucketResult(bucket, result), nil return minio.FromMinioClientListBucketResult(bucket, result), nil
@ -251,7 +258,8 @@ func (l *s3Objects) ListObjects(ctx context.Context, bucket string, prefix strin
func (l *s3Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) { func (l *s3Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) {
result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys) result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys)
if err != nil { if err != nil {
return loi, minio.ErrorRespToObjectError(errors.Trace(err), bucket) logger.LogIf(ctx, err)
return loi, minio.ErrorRespToObjectError(err, bucket)
} }
return minio.FromMinioClientListBucketV2Result(bucket, result), nil return minio.FromMinioClientListBucketV2Result(bucket, result), nil
@ -265,23 +273,27 @@ func (l *s3Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuat
// length indicates the total length of the object. // length indicates the total length of the object.
func (l *s3Objects) GetObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string) error { func (l *s3Objects) GetObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string) error {
if length < 0 && length != -1 { if length < 0 && length != -1 {
return minio.ErrorRespToObjectError(errors.Trace(minio.InvalidRange{}), bucket, key) logger.LogIf(ctx, minio.InvalidRange{})
return minio.ErrorRespToObjectError(minio.InvalidRange{}, bucket, key)
} }
opts := miniogo.GetObjectOptions{} opts := miniogo.GetObjectOptions{}
if startOffset >= 0 && length >= 0 { if startOffset >= 0 && length >= 0 {
if err := opts.SetRange(startOffset, startOffset+length-1); err != nil { if err := opts.SetRange(startOffset, startOffset+length-1); err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key) logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket, key)
} }
} }
object, _, err := l.Client.GetObject(bucket, key, opts) object, _, err := l.Client.GetObject(bucket, key, opts)
if err != nil { if err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key) logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket, key)
} }
defer object.Close() defer object.Close()
if _, err := io.Copy(writer, object); err != nil { if _, err := io.Copy(writer, object); err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, key) logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket, key)
} }
return nil return nil
} }
@ -290,7 +302,8 @@ func (l *s3Objects) GetObject(ctx context.Context, bucket string, key string, st
func (l *s3Objects) GetObjectInfo(ctx context.Context, bucket string, object string) (objInfo minio.ObjectInfo, err error) { func (l *s3Objects) GetObjectInfo(ctx context.Context, bucket string, object string) (objInfo minio.ObjectInfo, err error) {
oi, err := l.Client.StatObject(bucket, object, miniogo.StatObjectOptions{}) oi, err := l.Client.StatObject(bucket, object, miniogo.StatObjectOptions{})
if err != nil { if err != nil {
return minio.ObjectInfo{}, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return minio.ObjectInfo{}, minio.ErrorRespToObjectError(err, bucket, object)
} }
return minio.FromMinioClientObjectInfo(bucket, oi), nil return minio.FromMinioClientObjectInfo(bucket, oi), nil
@ -300,7 +313,8 @@ func (l *s3Objects) GetObjectInfo(ctx context.Context, bucket string, object str
func (l *s3Objects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) { func (l *s3Objects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5Base64String(), data.SHA256HexString(), minio.ToMinioClientMetadata(metadata)) oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5Base64String(), data.SHA256HexString(), minio.ToMinioClientMetadata(metadata))
if err != nil { if err != nil {
return objInfo, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return objInfo, minio.ErrorRespToObjectError(err, bucket, object)
} }
return minio.FromMinioClientObjectInfo(bucket, oi), nil return minio.FromMinioClientObjectInfo(bucket, oi), nil
@ -315,7 +329,8 @@ func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject
srcInfo.UserDefined["x-amz-metadata-directive"] = "REPLACE" srcInfo.UserDefined["x-amz-metadata-directive"] = "REPLACE"
srcInfo.UserDefined["x-amz-copy-source-if-match"] = srcInfo.ETag srcInfo.UserDefined["x-amz-copy-source-if-match"] = srcInfo.ETag
if _, err = l.Client.CopyObject(srcBucket, srcObject, dstBucket, dstObject, srcInfo.UserDefined); err != nil { if _, err = l.Client.CopyObject(srcBucket, srcObject, dstBucket, dstObject, srcInfo.UserDefined); err != nil {
return objInfo, minio.ErrorRespToObjectError(errors.Trace(err), srcBucket, srcObject) logger.LogIf(ctx, err)
return objInfo, minio.ErrorRespToObjectError(err, srcBucket, srcObject)
} }
return l.GetObjectInfo(ctx, dstBucket, dstObject) return l.GetObjectInfo(ctx, dstBucket, dstObject)
} }
@ -324,7 +339,8 @@ func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject
func (l *s3Objects) DeleteObject(ctx context.Context, bucket string, object string) error { func (l *s3Objects) DeleteObject(ctx context.Context, bucket string, object string) error {
err := l.Client.RemoveObject(bucket, object) err := l.Client.RemoveObject(bucket, object)
if err != nil { if err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket, object)
} }
return nil return nil
@ -346,7 +362,8 @@ func (l *s3Objects) NewMultipartUpload(ctx context.Context, bucket string, objec
opts := miniogo.PutObjectOptions{UserMetadata: metadata} opts := miniogo.PutObjectOptions{UserMetadata: metadata}
uploadID, err = l.Client.NewMultipartUpload(bucket, object, opts) uploadID, err = l.Client.NewMultipartUpload(bucket, object, opts)
if err != nil { if err != nil {
return uploadID, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return uploadID, minio.ErrorRespToObjectError(err, bucket, object)
} }
return uploadID, nil return uploadID, nil
} }
@ -355,7 +372,8 @@ func (l *s3Objects) NewMultipartUpload(ctx context.Context, bucket string, objec
func (l *s3Objects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi minio.PartInfo, e error) { func (l *s3Objects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi minio.PartInfo, e error) {
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5Base64String(), data.SHA256HexString()) info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5Base64String(), data.SHA256HexString())
if err != nil { if err != nil {
return pi, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return pi, minio.ErrorRespToObjectError(err, bucket, object)
} }
return minio.FromMinioClientObjectPart(info), nil return minio.FromMinioClientObjectPart(info), nil
@ -372,7 +390,8 @@ func (l *s3Objects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, de
completePart, err := l.Client.CopyObjectPart(srcBucket, srcObject, destBucket, destObject, completePart, err := l.Client.CopyObjectPart(srcBucket, srcObject, destBucket, destObject,
uploadID, partID, startOffset, length, srcInfo.UserDefined) uploadID, partID, startOffset, length, srcInfo.UserDefined)
if err != nil { if err != nil {
return p, minio.ErrorRespToObjectError(errors.Trace(err), srcBucket, srcObject) logger.LogIf(ctx, err)
return p, minio.ErrorRespToObjectError(err, srcBucket, srcObject)
} }
p.PartNumber = completePart.PartNumber p.PartNumber = completePart.PartNumber
p.ETag = completePart.ETag p.ETag = completePart.ETag
@ -392,14 +411,16 @@ func (l *s3Objects) ListObjectParts(ctx context.Context, bucket string, object s
// AbortMultipartUpload aborts a ongoing multipart upload // AbortMultipartUpload aborts a ongoing multipart upload
func (l *s3Objects) AbortMultipartUpload(ctx context.Context, bucket string, object string, uploadID string) error { func (l *s3Objects) AbortMultipartUpload(ctx context.Context, bucket string, object string, uploadID string) error {
err := l.Client.AbortMultipartUpload(bucket, object, uploadID) err := l.Client.AbortMultipartUpload(bucket, object, uploadID)
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket, object)
} }
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object // CompleteMultipartUpload completes ongoing multipart upload and finalizes object
func (l *s3Objects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []minio.CompletePart) (oi minio.ObjectInfo, e error) { func (l *s3Objects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []minio.CompletePart) (oi minio.ObjectInfo, e error) {
err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, minio.ToMinioClientCompleteParts(uploadedParts)) err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, minio.ToMinioClientCompleteParts(uploadedParts))
if err != nil { if err != nil {
return oi, minio.ErrorRespToObjectError(errors.Trace(err), bucket, object) logger.LogIf(ctx, err)
return oi, minio.ErrorRespToObjectError(err, bucket, object)
} }
return l.GetObjectInfo(ctx, bucket, object) return l.GetObjectInfo(ctx, bucket, object)
@ -408,7 +429,8 @@ func (l *s3Objects) CompleteMultipartUpload(ctx context.Context, bucket string,
// SetBucketPolicy sets policy on bucket // SetBucketPolicy sets policy on bucket
func (l *s3Objects) SetBucketPolicy(ctx context.Context, bucket string, policyInfo policy.BucketAccessPolicy) error { func (l *s3Objects) SetBucketPolicy(ctx context.Context, bucket string, policyInfo policy.BucketAccessPolicy) error {
if err := l.Client.PutBucketPolicy(bucket, policyInfo); err != nil { if err := l.Client.PutBucketPolicy(bucket, policyInfo); err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, "") logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket, "")
} }
return nil return nil
@ -418,7 +440,8 @@ func (l *s3Objects) SetBucketPolicy(ctx context.Context, bucket string, policyIn
func (l *s3Objects) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) { func (l *s3Objects) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) {
policyInfo, err := l.Client.GetBucketPolicy(bucket) policyInfo, err := l.Client.GetBucketPolicy(bucket)
if err != nil { if err != nil {
return policy.BucketAccessPolicy{}, minio.ErrorRespToObjectError(errors.Trace(err), bucket, "") logger.LogIf(ctx, err)
return policy.BucketAccessPolicy{}, minio.ErrorRespToObjectError(err, bucket, "")
} }
return policyInfo, nil return policyInfo, nil
} }
@ -426,7 +449,8 @@ func (l *s3Objects) GetBucketPolicy(ctx context.Context, bucket string) (policy.
// DeleteBucketPolicy deletes all policies on bucket // DeleteBucketPolicy deletes all policies on bucket
func (l *s3Objects) DeleteBucketPolicy(ctx context.Context, bucket string) error { func (l *s3Objects) DeleteBucketPolicy(ctx context.Context, bucket string) error {
if err := l.Client.PutBucketPolicy(bucket, policy.BucketAccessPolicy{}); err != nil { if err := l.Client.PutBucketPolicy(bucket, policy.BucketAccessPolicy{}); err != nil {
return minio.ErrorRespToObjectError(errors.Trace(err), bucket, "") logger.LogIf(ctx, err)
return minio.ErrorRespToObjectError(err, bucket, "")
} }
return nil return nil
} }

View File

@ -119,9 +119,9 @@ func TestS3ToObjectError(t *testing.T) {
} }
for i, tc := range testCases { for i, tc := range testCases {
actualErr := minio.ErrorRespToObjectError(errors.Trace(tc.inputErr), tc.bucket, tc.object) actualErr := minio.ErrorRespToObjectError(tc.inputErr, tc.bucket, tc.object)
if e, ok := actualErr.(*errors.Error); ok && e.Cause.Error() != tc.expectedErr.Error() { if e, ok := actualErr.(*errors.Error); ok && e.Cause.Error() != tc.expectedErr.Error() {
t.Errorf("Test case %d: Expected error %v but received error %v", i+1, tc.expectedErr, e) t.Errorf("Test case %d: Expected error %v but received error %v", i+1, tc.expectedErr, actualErr)
} }
} }
} }

View File

@ -24,7 +24,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"log"
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
@ -38,8 +37,8 @@ import (
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/minio-go/pkg/set" "github.com/minio/minio-go/pkg/set"
minio "github.com/minio/minio/cmd" minio "github.com/minio/minio/cmd"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
) )
@ -112,7 +111,7 @@ func siaGatewayMain(ctx *cli.Context) {
// Validate gateway arguments. // Validate gateway arguments.
host := ctx.Args().First() host := ctx.Args().First()
// Validate gateway arguments. // Validate gateway arguments.
minio.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument") logger.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument")
minio.StartGateway(ctx, &Sia{host}) minio.StartGateway(ctx, &Sia{host})
} }
@ -164,9 +163,9 @@ func (g *Sia) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error)
colorBlue := color.New(color.FgBlue).SprintfFunc() colorBlue := color.New(color.FgBlue).SprintfFunc()
colorBold := color.New(color.Bold).SprintFunc() colorBold := color.New(color.Bold).SprintFunc()
log.Println(colorBlue("\nSia Gateway Configuration:")) logger.Println(colorBlue("\nSia Gateway Configuration:"))
log.Println(colorBlue(" Sia Daemon API Address:") + colorBold(fmt.Sprintf(" %s\n", sia.Address))) logger.Println(colorBlue(" Sia Daemon API Address:") + colorBold(fmt.Sprintf(" %s\n", sia.Address)))
log.Println(colorBlue(" Sia Temp Directory:") + colorBold(fmt.Sprintf(" %s\n", sia.TempDir))) logger.Println(colorBlue(" Sia Temp Directory:") + colorBold(fmt.Sprintf(" %s\n", sia.TempDir)))
return sia, nil return sia, nil
} }
@ -217,10 +216,11 @@ func (s MethodNotSupported) Error() string {
// apiGet wraps a GET request with a status code check, such that if the GET does // apiGet wraps a GET request with a status code check, such that if the GET does
// not return 2xx, the error will be read and returned. The response body is // not return 2xx, the error will be read and returned. The response body is
// not closed. // not closed.
func apiGet(addr, call, apiPassword string) (*http.Response, error) { func apiGet(ctx context.Context, addr, call, apiPassword string) (*http.Response, error) {
req, err := http.NewRequest("GET", "http://"+addr+call, nil) req, err := http.NewRequest("GET", "http://"+addr+call, nil)
if err != nil { if err != nil {
return nil, errors.Trace(err) logger.LogIf(ctx, err)
return nil, err
} }
req.Header.Set("User-Agent", "Sia-Agent") req.Header.Set("User-Agent", "Sia-Agent")
if apiPassword != "" { if apiPassword != "" {
@ -228,15 +228,18 @@ func apiGet(addr, call, apiPassword string) (*http.Response, error) {
} }
resp, err := http.DefaultClient.Do(req) resp, err := http.DefaultClient.Do(req)
if err != nil { if err != nil {
return nil, errors.Trace(err) logger.LogIf(ctx, err)
return nil, err
} }
if resp.StatusCode == http.StatusNotFound { if resp.StatusCode == http.StatusNotFound {
resp.Body.Close() resp.Body.Close()
logger.LogIf(ctx, MethodNotSupported{call})
return nil, MethodNotSupported{call} return nil, MethodNotSupported{call}
} }
if non2xx(resp.StatusCode) { if non2xx(resp.StatusCode) {
err := decodeError(resp) err := decodeError(resp)
resp.Body.Close() resp.Body.Close()
logger.LogIf(ctx, err)
return nil, err return nil, err
} }
return resp, nil return resp, nil
@ -245,7 +248,7 @@ func apiGet(addr, call, apiPassword string) (*http.Response, error) {
// apiPost wraps a POST request with a status code check, such that if the POST // apiPost wraps a POST request with a status code check, such that if the POST
// does not return 2xx, the error will be read and returned. The response body // does not return 2xx, the error will be read and returned. The response body
// is not closed. // is not closed.
func apiPost(addr, call, vals, apiPassword string) (*http.Response, error) { func apiPost(ctx context.Context, addr, call, vals, apiPassword string) (*http.Response, error) {
req, err := http.NewRequest("POST", "http://"+addr+call, strings.NewReader(vals)) req, err := http.NewRequest("POST", "http://"+addr+call, strings.NewReader(vals))
if err != nil { if err != nil {
return nil, err return nil, err
@ -257,7 +260,8 @@ func apiPost(addr, call, vals, apiPassword string) (*http.Response, error) {
} }
resp, err := http.DefaultClient.Do(req) resp, err := http.DefaultClient.Do(req)
if err != nil { if err != nil {
return nil, errors.Trace(err) logger.LogIf(ctx, err)
return nil, err
} }
if resp.StatusCode == http.StatusNotFound { if resp.StatusCode == http.StatusNotFound {
@ -275,8 +279,8 @@ func apiPost(addr, call, vals, apiPassword string) (*http.Response, error) {
// post makes an API call and discards the response. An error is returned if // post makes an API call and discards the response. An error is returned if
// the response status is not 2xx. // the response status is not 2xx.
func post(addr, call, vals, apiPassword string) error { func post(ctx context.Context, addr, call, vals, apiPassword string) error {
resp, err := apiPost(addr, call, vals, apiPassword) resp, err := apiPost(ctx, addr, call, vals, apiPassword)
if err != nil { if err != nil {
return err return err
} }
@ -285,24 +289,26 @@ func post(addr, call, vals, apiPassword string) error {
} }
// list makes a lists all the uploaded files, decodes the json response. // list makes a lists all the uploaded files, decodes the json response.
func list(addr string, apiPassword string, obj *renterFiles) error { func list(ctx context.Context, addr string, apiPassword string, obj *renterFiles) error {
resp, err := apiGet(addr, "/renter/files", apiPassword) resp, err := apiGet(ctx, addr, "/renter/files", apiPassword)
if err != nil { if err != nil {
return err return err
} }
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode == http.StatusNoContent { if resp.StatusCode == http.StatusNoContent {
logger.LogIf(ctx, fmt.Errorf("Expecting a response, but API returned %s", resp.Status))
return fmt.Errorf("Expecting a response, but API returned %s", resp.Status) return fmt.Errorf("Expecting a response, but API returned %s", resp.Status)
} }
err = json.NewDecoder(resp.Body).Decode(obj)
return json.NewDecoder(resp.Body).Decode(obj) logger.LogIf(ctx, err)
return err
} }
// get makes an API call and discards the response. An error is returned if the // get makes an API call and discards the response. An error is returned if the
// responsee status is not 2xx. // responsee status is not 2xx.
func get(addr, call, apiPassword string) error { func get(ctx context.Context, addr, call, apiPassword string) error {
resp, err := apiGet(addr, call, apiPassword) resp, err := apiGet(ctx, addr, call, apiPassword)
if err != nil { if err != nil {
return err return err
} }
@ -336,7 +342,7 @@ func (s *siaObjects) MakeBucketWithLocation(ctx context.Context, bucket, locatio
sha256sum := sha256.Sum256([]byte(bucket)) sha256sum := sha256.Sum256([]byte(bucket))
var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:])) var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:]))
return post(s.Address, "/renter/upload/"+siaObj, "source="+srcFile, s.password) return post(ctx, s.Address, "/renter/upload/"+siaObj, "source="+srcFile, s.password)
} }
// GetBucketInfo gets bucket metadata. // GetBucketInfo gets bucket metadata.
@ -347,7 +353,7 @@ func (s *siaObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio
dstFile := path.Join(s.TempDir, minio.MustGetUUID()) dstFile := path.Join(s.TempDir, minio.MustGetUUID())
defer os.Remove(dstFile) defer os.Remove(dstFile)
if err := get(s.Address, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(dstFile), s.password); err != nil { if err := get(ctx, s.Address, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(dstFile), s.password); err != nil {
return bi, err return bi, err
} }
return minio.BucketInfo{Name: bucket}, nil return minio.BucketInfo{Name: bucket}, nil
@ -355,7 +361,7 @@ func (s *siaObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio
// ListBuckets will detect and return existing buckets on Sia. // ListBuckets will detect and return existing buckets on Sia.
func (s *siaObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) { func (s *siaObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) {
sObjs, serr := s.listRenterFiles("") sObjs, serr := s.listRenterFiles(ctx, "")
if serr != nil { if serr != nil {
return buckets, serr return buckets, serr
} }
@ -388,11 +394,11 @@ func (s *siaObjects) DeleteBucket(ctx context.Context, bucket string) error {
sha256sum := sha256.Sum256([]byte(bucket)) sha256sum := sha256.Sum256([]byte(bucket))
var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:])) var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:]))
return post(s.Address, "/renter/delete/"+siaObj, "", s.password) return post(ctx, s.Address, "/renter/delete/"+siaObj, "", s.password)
} }
func (s *siaObjects) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) { func (s *siaObjects) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) {
siaObjs, siaErr := s.listRenterFiles(bucket) siaObjs, siaErr := s.listRenterFiles(ctx, bucket)
if siaErr != nil { if siaErr != nil {
return loi, siaErr return loi, siaErr
} }
@ -429,7 +435,7 @@ func (s *siaObjects) GetObject(ctx context.Context, bucket string, object string
defer os.Remove(dstFile) defer os.Remove(dstFile)
var siaObj = path.Join(s.RootDir, bucket, object) var siaObj = path.Join(s.RootDir, bucket, object)
if err := get(s.Address, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(dstFile), s.password); err != nil { if err := get(ctx, s.Address, "/renter/download/"+siaObj+"?destination="+url.QueryEscape(dstFile), s.password); err != nil {
return err return err
} }
@ -459,11 +465,16 @@ func (s *siaObjects) GetObject(ctx context.Context, bucket string, object string
// Reply back invalid range if the input offset and length fall out of range. // Reply back invalid range if the input offset and length fall out of range.
if startOffset > size || startOffset+length > size { if startOffset > size || startOffset+length > size {
return errors.Trace(minio.InvalidRange{ logger.LogIf(ctx, minio.InvalidRange{
OffsetBegin: startOffset, OffsetBegin: startOffset,
OffsetEnd: length, OffsetEnd: length,
ResourceSize: size, ResourceSize: size,
}) })
return minio.InvalidRange{
OffsetBegin: startOffset,
OffsetEnd: length,
ResourceSize: size,
}
} }
// Allocate a staging buffer. // Allocate a staging buffer.
@ -476,10 +487,10 @@ func (s *siaObjects) GetObject(ctx context.Context, bucket string, object string
// findSiaObject retrieves the siaObjectInfo for the Sia object with the given // findSiaObject retrieves the siaObjectInfo for the Sia object with the given
// Sia path name. // Sia path name.
func (s *siaObjects) findSiaObject(bucket, object string) (siaObjectInfo, error) { func (s *siaObjects) findSiaObject(ctx context.Context, bucket, object string) (siaObjectInfo, error) {
siaPath := path.Join(s.RootDir, bucket, object) siaPath := path.Join(s.RootDir, bucket, object)
sObjs, err := s.listRenterFiles("") sObjs, err := s.listRenterFiles(ctx, "")
if err != nil { if err != nil {
return siaObjectInfo{}, err return siaObjectInfo{}, err
} }
@ -489,16 +500,19 @@ func (s *siaObjects) findSiaObject(bucket, object string) (siaObjectInfo, error)
return sObj, nil return sObj, nil
} }
} }
logger.LogIf(ctx, minio.ObjectNotFound{
return siaObjectInfo{}, errors.Trace(minio.ObjectNotFound{
Bucket: bucket, Bucket: bucket,
Object: object, Object: object,
}) })
return siaObjectInfo{}, minio.ObjectNotFound{
Bucket: bucket,
Object: object,
}
} }
// GetObjectInfo reads object info and replies back ObjectInfo // GetObjectInfo reads object info and replies back ObjectInfo
func (s *siaObjects) GetObjectInfo(ctx context.Context, bucket string, object string) (minio.ObjectInfo, error) { func (s *siaObjects) GetObjectInfo(ctx context.Context, bucket string, object string) (minio.ObjectInfo, error) {
so, err := s.findSiaObject(bucket, object) so, err := s.findSiaObject(ctx, bucket, object)
if err != nil { if err != nil {
return minio.ObjectInfo{}, err return minio.ObjectInfo{}, err
} }
@ -527,11 +541,11 @@ func (s *siaObjects) PutObject(ctx context.Context, bucket string, object string
return objInfo, err return objInfo, err
} }
if err = post(s.Address, "/renter/upload/"+path.Join(s.RootDir, bucket, object), "source="+srcFile, s.password); err != nil { if err = post(ctx, s.Address, "/renter/upload/"+path.Join(s.RootDir, bucket, object), "source="+srcFile, s.password); err != nil {
os.Remove(srcFile) os.Remove(srcFile)
return objInfo, err return objInfo, err
} }
defer s.deleteTempFileWhenUploadCompletes(srcFile, bucket, object) defer s.deleteTempFileWhenUploadCompletes(ctx, srcFile, bucket, object)
return minio.ObjectInfo{ return minio.ObjectInfo{
Name: object, Name: object,
@ -546,7 +560,7 @@ func (s *siaObjects) PutObject(ctx context.Context, bucket string, object string
func (s *siaObjects) DeleteObject(ctx context.Context, bucket string, object string) error { func (s *siaObjects) DeleteObject(ctx context.Context, bucket string, object string) error {
// Tell Sia daemon to delete the object // Tell Sia daemon to delete the object
var siaObj = path.Join(s.RootDir, bucket, object) var siaObj = path.Join(s.RootDir, bucket, object)
return post(s.Address, "/renter/delete/"+siaObj, "", s.password) return post(ctx, s.Address, "/renter/delete/"+siaObj, "", s.password)
} }
// siaObjectInfo represents object info stored on Sia // siaObjectInfo represents object info stored on Sia
@ -565,10 +579,10 @@ type renterFiles struct {
} }
// listRenterFiles will return a list of existing objects in the bucket provided // listRenterFiles will return a list of existing objects in the bucket provided
func (s *siaObjects) listRenterFiles(bucket string) (siaObjs []siaObjectInfo, err error) { func (s *siaObjects) listRenterFiles(ctx context.Context, bucket string) (siaObjs []siaObjectInfo, err error) {
// Get list of all renter files // Get list of all renter files
var rf renterFiles var rf renterFiles
if err = list(s.Address, s.password, &rf); err != nil { if err = list(ctx, s.Address, s.password, &rf); err != nil {
return siaObjs, err return siaObjs, err
} }
@ -592,16 +606,15 @@ func (s *siaObjects) listRenterFiles(bucket string) (siaObjs []siaObjectInfo, er
// deleteTempFileWhenUploadCompletes checks the status of a Sia file upload // deleteTempFileWhenUploadCompletes checks the status of a Sia file upload
// until it reaches 100% upload progress, then deletes the local temp copy from // until it reaches 100% upload progress, then deletes the local temp copy from
// the filesystem. // the filesystem.
func (s *siaObjects) deleteTempFileWhenUploadCompletes(tempFile string, bucket, object string) { func (s *siaObjects) deleteTempFileWhenUploadCompletes(ctx context.Context, tempFile string, bucket, object string) {
var soi siaObjectInfo var soi siaObjectInfo
// Wait until 100% upload instead of 1x redundancy because if we delete // Wait until 100% upload instead of 1x redundancy because if we delete
// after 1x redundancy, the user has to pay the cost of other hosts // after 1x redundancy, the user has to pay the cost of other hosts
// redistributing the file. // redistributing the file.
for soi.UploadProgress < 100.0 { for soi.UploadProgress < 100.0 {
var err error var err error
soi, err = s.findSiaObject(bucket, object) soi, err = s.findSiaObject(ctx, bucket, object)
if err != nil { if err != nil {
minio.ErrorIf(err, "Unable to find file uploaded to Sia path %s/%s", bucket, object)
break break
} }

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"io" "io"
"mime/multipart" "mime/multipart"
"net" "net"
@ -24,7 +25,7 @@ import (
"net/url" "net/url"
"strings" "strings"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/cmd/logger"
httptracer "github.com/minio/minio/pkg/handlers" httptracer "github.com/minio/minio/pkg/handlers"
) )
@ -36,7 +37,7 @@ func parseLocationConstraint(r *http.Request) (location string, s3Error APIError
locationConstraint := createBucketLocationConfiguration{} locationConstraint := createBucketLocationConfiguration{}
err := xmlDecoder(r.Body, &locationConstraint, r.ContentLength) err := xmlDecoder(r.Body, &locationConstraint, r.ContentLength)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
errorIf(err, "Unable to xml decode location constraint") logger.LogIf(context.Background(), err)
// Treat all other failures as XML parsing errors. // Treat all other failures as XML parsing errors.
return "", ErrMalformedXML return "", ErrMalformedXML
} // else for both err as nil or io.EOF } // else for both err as nil or io.EOF
@ -113,9 +114,10 @@ var userMetadataKeyPrefixes = []string{
} }
// extractMetadataFromHeader extracts metadata from HTTP header. // extractMetadataFromHeader extracts metadata from HTTP header.
func extractMetadataFromHeader(header http.Header) (map[string]string, error) { func extractMetadataFromHeader(ctx context.Context, header http.Header) (map[string]string, error) {
if header == nil { if header == nil {
return nil, errors.Trace(errInvalidArgument) logger.LogIf(ctx, errInvalidArgument)
return nil, errInvalidArgument
} }
metadata := make(map[string]string) metadata := make(map[string]string)
@ -134,7 +136,8 @@ func extractMetadataFromHeader(header http.Header) (map[string]string, error) {
// Go through all other headers for any additional headers that needs to be saved. // Go through all other headers for any additional headers that needs to be saved.
for key := range header { for key := range header {
if key != http.CanonicalHeaderKey(key) { if key != http.CanonicalHeaderKey(key) {
return nil, errors.Trace(errInvalidArgument) logger.LogIf(ctx, errInvalidArgument)
return nil, errInvalidArgument
} }
for _, prefix := range userMetadataKeyPrefixes { for _, prefix := range userMetadataKeyPrefixes {
if strings.HasPrefix(key, prefix) { if strings.HasPrefix(key, prefix) {
@ -187,12 +190,13 @@ func trimAwsChunkedContentEncoding(contentEnc string) (trimmedContentEnc string)
} }
// Validate form field size for s3 specification requirement. // Validate form field size for s3 specification requirement.
func validateFormFieldSize(formValues http.Header) error { func validateFormFieldSize(ctx context.Context, formValues http.Header) error {
// Iterate over form values // Iterate over form values
for k := range formValues { for k := range formValues {
// Check if value's field exceeds S3 limit // Check if value's field exceeds S3 limit
if int64(len(formValues.Get(k))) > maxFormFieldSize { if int64(len(formValues.Get(k))) > maxFormFieldSize {
return errors.Trace(errSizeUnexpected) logger.LogIf(ctx, errSizeUnexpected)
return errSizeUnexpected
} }
} }
@ -201,7 +205,7 @@ func validateFormFieldSize(formValues http.Header) error {
} }
// Extract form fields and file data from a HTTP POST Policy // Extract form fields and file data from a HTTP POST Policy
func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) { func extractPostPolicyFormValues(ctx context.Context, form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) {
/// HTML Form values /// HTML Form values
fileName = "" fileName = ""
@ -212,7 +216,7 @@ func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser,
} }
// Validate form values. // Validate form values.
if err = validateFormFieldSize(formValues); err != nil { if err = validateFormFieldSize(ctx, formValues); err != nil {
return nil, "", 0, nil, err return nil, "", 0, nil, err
} }
@ -221,7 +225,8 @@ func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser,
canonicalFormName := http.CanonicalHeaderKey(k) canonicalFormName := http.CanonicalHeaderKey(k)
if canonicalFormName == "File" { if canonicalFormName == "File" {
if len(v) == 0 { if len(v) == 0 {
return nil, "", 0, nil, errors.Trace(errInvalidArgument) logger.LogIf(ctx, errInvalidArgument)
return nil, "", 0, nil, errInvalidArgument
} }
// Fetch fileHeader which has the uploaded file information // Fetch fileHeader which has the uploaded file information
fileHeader := v[0] fileHeader := v[0]
@ -230,17 +235,20 @@ func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser,
// Open the uploaded part // Open the uploaded part
filePart, err = fileHeader.Open() filePart, err = fileHeader.Open()
if err != nil { if err != nil {
return nil, "", 0, nil, errors.Trace(err) logger.LogIf(ctx, err)
return nil, "", 0, nil, err
} }
// Compute file size // Compute file size
fileSize, err = filePart.(io.Seeker).Seek(0, 2) fileSize, err = filePart.(io.Seeker).Seek(0, 2)
if err != nil { if err != nil {
return nil, "", 0, nil, errors.Trace(err) logger.LogIf(ctx, err)
return nil, "", 0, nil, err
} }
// Reset Seek to the beginning // Reset Seek to the beginning
_, err = filePart.(io.Seeker).Seek(0, 0) _, err = filePart.(io.Seeker).Seek(0, 0)
if err != nil { if err != nil {
return nil, "", 0, nil, errors.Trace(err) logger.LogIf(ctx, err)
return nil, "", 0, nil, err
} }
// File found and ready for reading // File found and ready for reading
break break
@ -276,7 +284,10 @@ func getResource(path string, host string, domain string) (string, error) {
// In bucket.mydomain.com:9000, strip out :9000 // In bucket.mydomain.com:9000, strip out :9000
var err error var err error
if host, _, err = net.SplitHostPort(host); err != nil { if host, _, err = net.SplitHostPort(host); err != nil {
errorIf(err, "Unable to split %s", host) reqInfo := (&logger.ReqInfo{}).AppendTags("host", host)
reqInfo.AppendTags("path", path)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return "", err return "", err
} }
} }

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"encoding/xml" "encoding/xml"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
@ -114,7 +115,7 @@ func TestValidateFormFieldSize(t *testing.T) {
// Run validate form field size check under all test cases. // Run validate form field size check under all test cases.
for i, testCase := range testCases { for i, testCase := range testCases {
err := validateFormFieldSize(testCase.header) err := validateFormFieldSize(context.Background(), testCase.header)
if err != nil { if err != nil {
if errors.Cause(err).Error() != testCase.err.Error() { if errors.Cause(err).Error() != testCase.err.Error() {
t.Errorf("Test %d: Expected error %s, got %s", i+1, testCase.err, err) t.Errorf("Test %d: Expected error %s, got %s", i+1, testCase.err, err)
@ -180,7 +181,7 @@ func TestExtractMetadataHeaders(t *testing.T) {
// Validate if the extracting headers. // Validate if the extracting headers.
for i, testCase := range testCases { for i, testCase := range testCases {
metadata, err := extractMetadataFromHeader(testCase.header) metadata, err := extractMetadataFromHeader(context.Background(), testCase.header)
if err != nil && !testCase.shouldFail { if err != nil && !testCase.shouldFail {
t.Fatalf("Test %d failed to extract metadata: %v", i+1, err) t.Fatalf("Test %d failed to extract metadata: %v", i+1, err)
} }

View File

@ -17,8 +17,8 @@
package http package http
import ( import (
"context"
"crypto/tls" "crypto/tls"
"errors"
"fmt" "fmt"
"io" "io"
"net" "net"
@ -28,6 +28,8 @@ import (
"sync" "sync"
"syscall" "syscall"
"time" "time"
"github.com/minio/minio/cmd/logger"
) )
var sslRequiredErrMsg = []byte("HTTP/1.0 403 Forbidden\r\n\r\nSSL required") var sslRequiredErrMsg = []byte("HTTP/1.0 403 Forbidden\r\n\r\nSSL required")
@ -85,9 +87,8 @@ type httpListener struct {
tcpKeepAliveTimeout time.Duration tcpKeepAliveTimeout time.Duration
readTimeout time.Duration readTimeout time.Duration
writeTimeout time.Duration writeTimeout time.Duration
updateBytesReadFunc func(int) // function to be called to update bytes read in BufConn. updateBytesReadFunc func(int) // function to be called to update bytes read in BufConn.
updateBytesWrittenFunc func(int) // function to be called to update bytes written in BufConn. updateBytesWrittenFunc func(int) // function to be called to update bytes written in BufConn.
errorLogFunc func(error, string, ...interface{}) // function to be called on errors.
} }
// isRoutineNetErr returns true if error is due to a network timeout, // isRoutineNetErr returns true if error is due to a network timeout,
@ -139,17 +140,16 @@ func (listener *httpListener) start() {
// Peek bytes of maximum length of all HTTP methods. // Peek bytes of maximum length of all HTTP methods.
data, err := bufconn.Peek(methodMaxLen) data, err := bufconn.Peek(methodMaxLen)
if err != nil { if err != nil {
if listener.errorLogFunc != nil { // Peek could fail legitimately when clients abruptly close
// Peek could fail legitimately when clients abruptly close // connection. E.g. Chrome browser opens connections speculatively to
// connection. E.g. Chrome browser opens connections speculatively to // speed up loading of a web page. Peek may also fail due to network
// speed up loading of a web page. Peek may also fail due to network // saturation on a transport with read timeout set. All other kind of
// saturation on a transport with read timeout set. All other kind of // errors should be logged for further investigation. Thanks @brendanashworth.
// errors should be logged for further investigation. Thanks @brendanashworth. if !isRoutineNetErr(err) {
if !isRoutineNetErr(err) { reqInfo := (&logger.ReqInfo{}).AppendTags("remoteAddr", bufconn.RemoteAddr().String())
listener.errorLogFunc(err, reqInfo.AppendTags("localAddr", bufconn.LocalAddr().String())
"Error in reading from new connection %s at server %s", ctx := logger.SetReqInfo(context.Background(), reqInfo)
bufconn.RemoteAddr(), bufconn.LocalAddr()) logger.LogIf(ctx, err)
}
} }
bufconn.Close() bufconn.Close()
return return
@ -172,12 +172,11 @@ func (listener *httpListener) start() {
if listener.tlsConfig != nil { if listener.tlsConfig != nil {
// As the listener is configured with TLS, try to do TLS handshake, drop the connection if it fails. // As the listener is configured with TLS, try to do TLS handshake, drop the connection if it fails.
tlsConn := tls.Server(bufconn, listener.tlsConfig) tlsConn := tls.Server(bufconn, listener.tlsConfig)
if err := tlsConn.Handshake(); err != nil { if err = tlsConn.Handshake(); err != nil {
if listener.errorLogFunc != nil { reqInfo := (&logger.ReqInfo{}).AppendTags("remoteAddr", bufconn.RemoteAddr().String())
listener.errorLogFunc(err, reqInfo.AppendTags("localAddr", bufconn.LocalAddr().String())
"TLS handshake failed with new connection %s at server %s", ctx := logger.SetReqInfo(context.Background(), reqInfo)
bufconn.RemoteAddr(), bufconn.LocalAddr()) logger.LogIf(ctx, err)
}
bufconn.Close() bufconn.Close()
return return
} }
@ -187,12 +186,13 @@ func (listener *httpListener) start() {
listener.updateBytesReadFunc, listener.updateBytesWrittenFunc) listener.updateBytesReadFunc, listener.updateBytesWrittenFunc)
// Peek bytes of maximum length of all HTTP methods. // Peek bytes of maximum length of all HTTP methods.
data, err := bufconn.Peek(methodMaxLen) data, err = bufconn.Peek(methodMaxLen)
if err != nil { if err != nil {
if !isRoutineNetErr(err) && listener.errorLogFunc != nil { if !isRoutineNetErr(err) {
listener.errorLogFunc(err, reqInfo := (&logger.ReqInfo{}).AppendTags("remoteAddr", bufconn.RemoteAddr().String())
"Error in reading from new TLS connection %s at server %s", reqInfo.AppendTags("localAddr", bufconn.LocalAddr().String())
bufconn.RemoteAddr(), bufconn.LocalAddr()) ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
} }
bufconn.Close() bufconn.Close()
return return
@ -205,12 +205,10 @@ func (listener *httpListener) start() {
return return
} }
} }
reqInfo := (&logger.ReqInfo{}).AppendTags("remoteAddr", bufconn.RemoteAddr().String())
if listener.errorLogFunc != nil { reqInfo.AppendTags("localAddr", bufconn.LocalAddr().String())
listener.errorLogFunc(errors.New("junk message"), ctx := logger.SetReqInfo(context.Background(), reqInfo)
"Received non-HTTP message from new connection %s at server %s", logger.LogIf(ctx, err)
bufconn.RemoteAddr(), bufconn.LocalAddr())
}
bufconn.Close() bufconn.Close()
return return
@ -299,8 +297,7 @@ func newHTTPListener(serverAddrs []string,
readTimeout time.Duration, readTimeout time.Duration,
writeTimeout time.Duration, writeTimeout time.Duration,
updateBytesReadFunc func(int), updateBytesReadFunc func(int),
updateBytesWrittenFunc func(int), updateBytesWrittenFunc func(int)) (listener *httpListener, err error) {
errorLogFunc func(error, string, ...interface{})) (listener *httpListener, err error) {
var tcpListeners []*net.TCPListener var tcpListeners []*net.TCPListener
// Close all opened listeners on error // Close all opened listeners on error
@ -337,7 +334,6 @@ func newHTTPListener(serverAddrs []string,
writeTimeout: writeTimeout, writeTimeout: writeTimeout,
updateBytesReadFunc: updateBytesReadFunc, updateBytesReadFunc: updateBytesReadFunc,
updateBytesWrittenFunc: updateBytesWrittenFunc, updateBytesWrittenFunc: updateBytesWrittenFunc,
errorLogFunc: errorLogFunc,
} }
listener.start() listener.start()

View File

@ -19,6 +19,7 @@ package http
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"context"
"crypto/tls" "crypto/tls"
"errors" "errors"
"fmt" "fmt"
@ -205,7 +206,7 @@ func TestNewHTTPListener(t *testing.T) {
writeTimeout time.Duration writeTimeout time.Duration
updateBytesReadFunc func(int) updateBytesReadFunc func(int)
updateBytesWrittenFunc func(int) updateBytesWrittenFunc func(int)
errorLogFunc func(error, string, ...interface{}) errorLogFunc func(context.Context, error)
expectedErr error expectedErr error
}{ }{
{[]string{"93.184.216.34:65432"}, nil, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, nil, errors.New(remoteAddrErrMsg)}, {[]string{"93.184.216.34:65432"}, nil, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, nil, errors.New(remoteAddrErrMsg)},
@ -227,7 +228,6 @@ func TestNewHTTPListener(t *testing.T) {
testCase.writeTimeout, testCase.writeTimeout,
testCase.updateBytesReadFunc, testCase.updateBytesReadFunc,
testCase.updateBytesWrittenFunc, testCase.updateBytesWrittenFunc,
testCase.errorLogFunc,
) )
if testCase.expectedErr == nil { if testCase.expectedErr == nil {
@ -279,7 +279,6 @@ func TestHTTPListenerStartClose(t *testing.T) {
time.Duration(0), time.Duration(0),
nil, nil,
nil, nil,
nil,
) )
if err != nil { if err != nil {
t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err) t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err)
@ -327,7 +326,6 @@ func TestHTTPListenerAddr(t *testing.T) {
time.Duration(0), time.Duration(0),
nil, nil,
nil, nil,
nil,
) )
if err != nil { if err != nil {
t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err) t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err)
@ -372,7 +370,6 @@ func TestHTTPListenerAddrs(t *testing.T) {
time.Duration(0), time.Duration(0),
nil, nil,
nil, nil,
nil,
) )
if err != nil { if err != nil {
t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err) t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err)
@ -419,7 +416,6 @@ func TestHTTPListenerAccept(t *testing.T) {
time.Duration(0), time.Duration(0),
nil, nil,
nil, nil,
nil,
) )
if err != nil { if err != nil {
t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err) t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err)
@ -480,11 +476,6 @@ func TestHTTPListenerAccept(t *testing.T) {
func TestHTTPListenerAcceptPeekError(t *testing.T) { func TestHTTPListenerAcceptPeekError(t *testing.T) {
tlsConfig := getTLSConfig(t) tlsConfig := getTLSConfig(t)
nonLoopBackIP := getNonLoopBackIP(t) nonLoopBackIP := getNonLoopBackIP(t)
errorFunc := func(err error, template string, args ...interface{}) {
msg := fmt.Sprintf("error: %v. ", err)
msg += fmt.Sprintf(template, args...)
fmt.Println(msg)
}
testCases := []struct { testCases := []struct {
serverAddrs []string serverAddrs []string
@ -504,7 +495,6 @@ func TestHTTPListenerAcceptPeekError(t *testing.T) {
time.Duration(0), time.Duration(0),
nil, nil,
nil, nil,
errorFunc,
) )
if err != nil { if err != nil {
t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err) t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err)
@ -540,11 +530,6 @@ func TestHTTPListenerAcceptPeekError(t *testing.T) {
func TestHTTPListenerAcceptTLSError(t *testing.T) { func TestHTTPListenerAcceptTLSError(t *testing.T) {
tlsConfig := getTLSConfig(t) tlsConfig := getTLSConfig(t)
nonLoopBackIP := getNonLoopBackIP(t) nonLoopBackIP := getNonLoopBackIP(t)
errorFunc := func(err error, template string, args ...interface{}) {
msg := fmt.Sprintf("error: %v. ", err)
msg += fmt.Sprintf(template, args...)
fmt.Println(msg)
}
testCases := []struct { testCases := []struct {
serverAddrs []string serverAddrs []string
@ -563,7 +548,6 @@ func TestHTTPListenerAcceptTLSError(t *testing.T) {
time.Duration(0), time.Duration(0),
nil, nil,
nil, nil,
errorFunc,
) )
if err != nil { if err != nil {
t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err) t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err)
@ -609,11 +593,6 @@ func TestHTTPListenerAcceptTLSError(t *testing.T) {
func TestHTTPListenerAcceptError(t *testing.T) { func TestHTTPListenerAcceptError(t *testing.T) {
tlsConfig := getTLSConfig(t) tlsConfig := getTLSConfig(t)
nonLoopBackIP := getNonLoopBackIP(t) nonLoopBackIP := getNonLoopBackIP(t)
errorFunc := func(err error, template string, args ...interface{}) {
msg := fmt.Sprintf("error: %v. ", err)
msg += fmt.Sprintf(template, args...)
fmt.Println(msg)
}
testCases := []struct { testCases := []struct {
serverAddrs []string serverAddrs []string
@ -635,7 +614,6 @@ func TestHTTPListenerAcceptError(t *testing.T) {
time.Duration(0), time.Duration(0),
nil, nil,
nil, nil,
errorFunc,
) )
if err != nil { if err != nil {
t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err) t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err)
@ -761,7 +739,6 @@ func TestHTTPListenerAcceptParallel(t *testing.T) {
time.Duration(0), time.Duration(0),
nil, nil,
nil, nil,
nil,
) )
if err != nil { if err != nil {
t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err) t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err)

View File

@ -50,16 +50,15 @@ const (
// Server - extended http.Server supports multiple addresses to serve and enhanced connection handling. // Server - extended http.Server supports multiple addresses to serve and enhanced connection handling.
type Server struct { type Server struct {
http.Server http.Server
Addrs []string // addresses on which the server listens for new connection. Addrs []string // addresses on which the server listens for new connection.
ShutdownTimeout time.Duration // timeout used for graceful server shutdown. ShutdownTimeout time.Duration // timeout used for graceful server shutdown.
TCPKeepAliveTimeout time.Duration // timeout used for underneath TCP connection. TCPKeepAliveTimeout time.Duration // timeout used for underneath TCP connection.
UpdateBytesReadFunc func(int) // function to be called to update bytes read in bufConn. UpdateBytesReadFunc func(int) // function to be called to update bytes read in bufConn.
UpdateBytesWrittenFunc func(int) // function to be called to update bytes written in bufConn. UpdateBytesWrittenFunc func(int) // function to be called to update bytes written in bufConn.
ErrorLogFunc func(error, string, ...interface{}) // function to be called on errors. listenerMutex *sync.Mutex // to guard 'listener' field.
listenerMutex *sync.Mutex // to guard 'listener' field. listener *httpListener // HTTP listener for all 'Addrs' field.
listener *httpListener // HTTP listener for all 'Addrs' field. inShutdown uint32 // indicates whether the server is in shutdown or not
inShutdown uint32 // indicates whether the server is in shutdown or not requestCount int32 // counter holds no. of request in process.
requestCount int32 // counter holds no. of request in process.
} }
// Start - start HTTP server // Start - start HTTP server
@ -77,7 +76,6 @@ func (srv *Server) Start() (err error) {
tcpKeepAliveTimeout := srv.TCPKeepAliveTimeout tcpKeepAliveTimeout := srv.TCPKeepAliveTimeout
updateBytesReadFunc := srv.UpdateBytesReadFunc updateBytesReadFunc := srv.UpdateBytesReadFunc
updateBytesWrittenFunc := srv.UpdateBytesWrittenFunc updateBytesWrittenFunc := srv.UpdateBytesWrittenFunc
errorLogFunc := srv.ErrorLogFunc // if srv.ErrorLogFunc holds non-synced state -> possible data race
// Create new HTTP listener. // Create new HTTP listener.
var listener *httpListener var listener *httpListener
@ -89,7 +87,6 @@ func (srv *Server) Start() (err error) {
writeTimeout, writeTimeout,
updateBytesReadFunc, updateBytesReadFunc,
updateBytesWrittenFunc, updateBytesWrittenFunc,
errorLogFunc,
) )
if err != nil { if err != nil {
return err return err

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"net/http" "net/http"
@ -24,6 +25,7 @@ import (
jwtgo "github.com/dgrijalva/jwt-go" jwtgo "github.com/dgrijalva/jwt-go"
jwtreq "github.com/dgrijalva/jwt-go/request" jwtreq "github.com/dgrijalva/jwt-go/request"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
) )
@ -97,11 +99,11 @@ func isAuthTokenValid(tokenString string) bool {
var claims jwtgo.StandardClaims var claims jwtgo.StandardClaims
jwtToken, err := jwtgo.ParseWithClaims(tokenString, &claims, keyFuncCallback) jwtToken, err := jwtgo.ParseWithClaims(tokenString, &claims, keyFuncCallback)
if err != nil { if err != nil {
errorIf(err, "Unable to parse JWT token string") logger.LogIf(context.Background(), err)
return false return false
} }
if err = claims.Valid(); err != nil { if err = claims.Valid(); err != nil {
errorIf(err, "Invalid claims in JWT token string") logger.LogIf(context.Background(), err)
return false return false
} }
return jwtToken.Valid && claims.Subject == globalServerConfig.GetCredential().AccessKey return jwtToken.Valid && claims.Subject == globalServerConfig.GetCredential().AccessKey

View File

@ -17,10 +17,11 @@
package cmd package cmd
import ( import (
"context"
"fmt" "fmt"
"time" "time"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/cmd/logger"
) )
type statusType string type statusType string
@ -112,29 +113,34 @@ func (n *nsLockMap) initLockInfoForVolumePath(param nsParam) {
// Change the state of the lock from Blocked to Running. // Change the state of the lock from Blocked to Running.
func (n *nsLockMap) statusBlockedToRunning(param nsParam, lockSource, opsID string, readLock bool) error { func (n *nsLockMap) statusBlockedToRunning(param nsParam, lockSource, opsID string, readLock bool) error {
// This function is called outside nsLockMap.mutex.Lock(), so must be held explicitly. // This function is called outside nsLockMap.mutex.Lock(), so must be held explicitly.
ctx := context.Background()
n.lockMapMutex.Lock() n.lockMapMutex.Lock()
defer n.lockMapMutex.Unlock() defer n.lockMapMutex.Unlock()
// Check whether the lock info entry for <volume, path> pair already exists. // Check whether the lock info entry for <volume, path> pair already exists.
_, ok := n.debugLockMap[param] _, ok := n.debugLockMap[param]
if !ok { if !ok {
return errors.Trace(LockInfoVolPathMissing{param.volume, param.path}) logger.LogIf(ctx, LockInfoVolPathMissing{param.volume, param.path})
return LockInfoVolPathMissing{param.volume, param.path}
} }
// Check whether lock info entry for the given `opsID` exists. // Check whether lock info entry for the given `opsID` exists.
lockInfo, ok := n.debugLockMap[param].lockInfo[opsID] lockInfo, ok := n.debugLockMap[param].lockInfo[opsID]
if !ok { if !ok {
return errors.Trace(LockInfoOpsIDNotFound{param.volume, param.path, opsID}) logger.LogIf(ctx, LockInfoOpsIDNotFound{param.volume, param.path, opsID})
return LockInfoOpsIDNotFound{param.volume, param.path, opsID}
} }
// Check whether lockSource is same. // Check whether lockSource is same.
if lockInfo.lockSource != lockSource { if lockInfo.lockSource != lockSource {
return errors.Trace(LockInfoOriginMismatch{param.volume, param.path, opsID, lockSource}) logger.LogIf(ctx, LockInfoOriginMismatch{param.volume, param.path, opsID, lockSource})
return LockInfoOriginMismatch{param.volume, param.path, opsID, lockSource}
} }
// Status of the lock should be set to "Blocked". // Status of the lock should be set to "Blocked".
if lockInfo.status != blockedStatus { if lockInfo.status != blockedStatus {
return errors.Trace(LockInfoStateNotBlocked{param.volume, param.path, opsID}) logger.LogIf(ctx, LockInfoStateNotBlocked{param.volume, param.path, opsID})
return LockInfoStateNotBlocked{param.volume, param.path, opsID}
} }
// Change lock status to running and update the time. // Change lock status to running and update the time.
n.debugLockMap[param].lockInfo[opsID] = newDebugLockInfo(lockSource, runningStatus, readLock) n.debugLockMap[param].lockInfo[opsID] = newDebugLockInfo(lockSource, runningStatus, readLock)
@ -182,24 +188,29 @@ func (n *nsLockMap) statusNoneToBlocked(param nsParam, lockSource, opsID string,
// Change the state of the lock from Blocked to none. // Change the state of the lock from Blocked to none.
func (n *nsLockMap) statusBlockedToNone(param nsParam, lockSource, opsID string, readLock bool) error { func (n *nsLockMap) statusBlockedToNone(param nsParam, lockSource, opsID string, readLock bool) error {
_, ok := n.debugLockMap[param] _, ok := n.debugLockMap[param]
ctx := context.Background()
if !ok { if !ok {
return errors.Trace(LockInfoVolPathMissing{param.volume, param.path}) logger.LogIf(ctx, LockInfoVolPathMissing{param.volume, param.path})
return LockInfoVolPathMissing{param.volume, param.path}
} }
// Check whether lock info entry for the given `opsID` exists. // Check whether lock info entry for the given `opsID` exists.
lockInfo, ok := n.debugLockMap[param].lockInfo[opsID] lockInfo, ok := n.debugLockMap[param].lockInfo[opsID]
if !ok { if !ok {
return errors.Trace(LockInfoOpsIDNotFound{param.volume, param.path, opsID}) logger.LogIf(ctx, LockInfoOpsIDNotFound{param.volume, param.path, opsID})
return LockInfoOpsIDNotFound{param.volume, param.path, opsID}
} }
// Check whether lockSource is same. // Check whether lockSource is same.
if lockInfo.lockSource != lockSource { if lockInfo.lockSource != lockSource {
return errors.Trace(LockInfoOriginMismatch{param.volume, param.path, opsID, lockSource}) logger.LogIf(ctx, LockInfoOriginMismatch{param.volume, param.path, opsID, lockSource})
return LockInfoOriginMismatch{param.volume, param.path, opsID, lockSource}
} }
// Status of the lock should be set to "Blocked". // Status of the lock should be set to "Blocked".
if lockInfo.status != blockedStatus { if lockInfo.status != blockedStatus {
return errors.Trace(LockInfoStateNotBlocked{param.volume, param.path, opsID}) logger.LogIf(ctx, LockInfoStateNotBlocked{param.volume, param.path, opsID})
return LockInfoStateNotBlocked{param.volume, param.path, opsID}
} }
// Update global lock stats. // Update global lock stats.
@ -214,7 +225,8 @@ func (n *nsLockMap) statusBlockedToNone(param nsParam, lockSource, opsID string,
func (n *nsLockMap) deleteLockInfoEntryForVolumePath(param nsParam) error { func (n *nsLockMap) deleteLockInfoEntryForVolumePath(param nsParam) error {
// delete the lock info for the given operation. // delete the lock info for the given operation.
if _, found := n.debugLockMap[param]; !found { if _, found := n.debugLockMap[param]; !found {
return errors.Trace(LockInfoVolPathMissing{param.volume, param.path}) logger.LogIf(context.Background(), LockInfoVolPathMissing{param.volume, param.path})
return LockInfoVolPathMissing{param.volume, param.path}
} }
// The following stats update is relevant only in case of a // The following stats update is relevant only in case of a
@ -235,17 +247,20 @@ func (n *nsLockMap) deleteLockInfoEntryForVolumePath(param nsParam) error {
// Called when the nsLk ref count for the given (volume, path) is // Called when the nsLk ref count for the given (volume, path) is
// not 0. // not 0.
func (n *nsLockMap) deleteLockInfoEntryForOps(param nsParam, opsID string) error { func (n *nsLockMap) deleteLockInfoEntryForOps(param nsParam, opsID string) error {
ctx := context.Background()
// delete the lock info for the given operation. // delete the lock info for the given operation.
infoMap, found := n.debugLockMap[param] infoMap, found := n.debugLockMap[param]
if !found { if !found {
return errors.Trace(LockInfoVolPathMissing{param.volume, param.path}) logger.LogIf(ctx, LockInfoVolPathMissing{param.volume, param.path})
return LockInfoVolPathMissing{param.volume, param.path}
} }
// The operation finished holding the lock on the resource, remove // The operation finished holding the lock on the resource, remove
// the entry for the given operation with the operation ID. // the entry for the given operation with the operation ID.
opsIDLock, foundInfo := infoMap.lockInfo[opsID] opsIDLock, foundInfo := infoMap.lockInfo[opsID]
if !foundInfo { if !foundInfo {
// Unlock request with invalid operation ID not accepted. // Unlock request with invalid operation ID not accepted.
return errors.Trace(LockInfoOpsIDNotFound{param.volume, param.path, opsID}) logger.LogIf(ctx, LockInfoOpsIDNotFound{param.volume, param.path, opsID})
return LockInfoOpsIDNotFound{param.volume, param.path, opsID}
} }
// Update global and (volume, path) lock status. // Update global and (volume, path) lock status.
granted := opsIDLock.status == runningStatus granted := opsIDLock.status == runningStatus

View File

@ -17,8 +17,11 @@
package cmd package cmd
import ( import (
"context"
"errors" "errors"
"time" "time"
"github.com/minio/minio/cmd/logger"
) )
// Similar to removeEntry but only removes an entry only if the lock entry exists in map. // Similar to removeEntry but only removes an entry only if the lock entry exists in map.
@ -29,7 +32,10 @@ func (l *localLocker) removeEntryIfExists(nlrip nameLockRequesterInfoPair) {
// Remove failed, in case it is a: // Remove failed, in case it is a:
if nlrip.lri.writer { if nlrip.lri.writer {
// Writer: this should never happen as the whole (mapped) entry should have been deleted // Writer: this should never happen as the whole (mapped) entry should have been deleted
errorIf(errors.New(""), "Lock maintenance failed to remove entry for write lock (should never happen) %#v %#v %#v", nlrip.name, nlrip.lri.uid, lri) reqInfo := (&logger.ReqInfo{}).AppendTags("name", nlrip.name)
reqInfo.AppendTags("uid", nlrip.lri.uid)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, errors.New("Lock maintenance failed to remove entry for write lock (should never happen)"))
} // Reader: this can happen if multiple read locks were active and } // Reader: this can happen if multiple read locks were active and
// the one we are looking for has been released concurrently (so it is fine). // the one we are looking for has been released concurrently (so it is fine).
} // Removal went okay, all is fine. } // Removal went okay, all is fine.

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"fmt" "fmt"
"math/rand" "math/rand"
"sync" "sync"
@ -24,7 +25,7 @@ import (
router "github.com/gorilla/mux" router "github.com/gorilla/mux"
"github.com/minio/dsync" "github.com/minio/dsync"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/cmd/logger"
) )
const ( const (
@ -98,7 +99,8 @@ func registerDistNSLockRouter(mux *router.Router, endpoints EndpointList) error
func registerStorageLockers(mux *router.Router, lkSrv *lockServer) error { func registerStorageLockers(mux *router.Router, lkSrv *lockServer) error {
lockRPCServer := newRPCServer() lockRPCServer := newRPCServer()
if err := lockRPCServer.RegisterName(lockServiceName, lkSrv); err != nil { if err := lockRPCServer.RegisterName(lockServiceName, lkSrv); err != nil {
return errors.Trace(err) logger.LogIf(context.Background(), err)
return err
} }
lockRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter() lockRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter()
lockRouter.Path(lockServicePath).Handler(lockRPCServer) lockRouter.Path(lockServicePath).Handler(lockRPCServer)

View File

@ -16,9 +16,13 @@
package logger package logger
import "context" import (
"context"
"fmt"
"sync"
)
// Key used for Get/SetContext // Key used for Get/SetReqInfo
type contextKeyType string type contextKeyType string
const contextLogKey = contextKeyType("miniolog") const contextLogKey = contextKeyType("miniolog")
@ -37,27 +41,59 @@ type ReqInfo struct {
API string // API name - GetObject PutObject NewMultipartUpload etc. API string // API name - GetObject PutObject NewMultipartUpload etc.
BucketName string // Bucket name BucketName string // Bucket name
ObjectName string // Object name ObjectName string // Object name
Tags []KeyVal // Any additional info not accommodated by above fields tags []KeyVal // Any additional info not accommodated by above fields
sync.RWMutex
} }
// AppendTags - appends key/val to ReqInfo.Tags // NewReqInfo :
func (r *ReqInfo) AppendTags(key string, val string) { func NewReqInfo(remoteHost, userAgent, requestID, api, bucket, object string) *ReqInfo {
req := ReqInfo{}
req.RemoteHost = remoteHost
req.UserAgent = userAgent
req.API = api
req.RequestID = requestID
req.BucketName = bucket
req.ObjectName = object
return &req
}
// AppendTags - appends key/val to ReqInfo.tags
func (r *ReqInfo) AppendTags(key string, val string) *ReqInfo {
if r == nil { if r == nil {
return return nil
} }
r.Tags = append(r.Tags, KeyVal{key, val}) r.Lock()
defer r.Unlock()
r.tags = append(r.tags, KeyVal{key, val})
return r
} }
// SetContext sets ReqInfo in the context. // GetTags - returns the user defined tags
func SetContext(ctx context.Context, req *ReqInfo) context.Context { func (r *ReqInfo) GetTags() []KeyVal {
if r == nil {
return nil
}
r.RLock()
defer r.RUnlock()
return append([]KeyVal(nil), r.tags...)
}
// SetReqInfo sets ReqInfo in the context.
func SetReqInfo(ctx context.Context, req *ReqInfo) context.Context {
if ctx == nil {
LogIf(context.Background(), fmt.Errorf("context is nil"))
return nil
}
return context.WithValue(ctx, contextLogKey, req) return context.WithValue(ctx, contextLogKey, req)
} }
// GetContext returns ReqInfo if set. // GetReqInfo returns ReqInfo if set.
func GetContext(ctx context.Context) *ReqInfo { func GetReqInfo(ctx context.Context) *ReqInfo {
r, ok := ctx.Value(contextLogKey).(*ReqInfo) if ctx != nil {
if ok { r, ok := ctx.Value(contextLogKey).(*ReqInfo)
return r if ok {
return r
}
} }
return nil return nil
} }

View File

@ -1,5 +1,5 @@
/* /*
* Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. * Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -14,9 +14,10 @@
* limitations under the License. * limitations under the License.
*/ */
package cmd package logger
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"go/build" "go/build"
@ -26,11 +27,17 @@ import (
"strings" "strings"
"time" "time"
"github.com/fatih/color"
"github.com/minio/mc/pkg/console" "github.com/minio/mc/pkg/console"
"github.com/minio/minio/pkg/errors"
) )
var log = NewLogger() // global colors.
var (
colorBold = color.New(color.Bold).SprintFunc()
colorYellow = color.New(color.FgYellow).SprintfFunc()
colorRed = color.New(color.FgRed).SprintfFunc()
)
var trimStrings []string var trimStrings []string
// Level type // Level type
@ -42,6 +49,15 @@ const (
Fatal Fatal
) )
const loggerTimeFormat string = "15:04:05 MST 01/02/2006"
var matchingFuncNames = [...]string{
"http.HandlerFunc.ServeHTTP",
"cmd.serverMain",
"cmd.StartGateway",
// add more here ..
}
func (level Level) String() string { func (level Level) String() string {
var lvlStr string var lvlStr string
switch level { switch level {
@ -53,61 +69,78 @@ func (level Level) String() string {
return lvlStr return lvlStr
} }
type traceEntry struct {
Message string `json:"message"`
Source []string `json:"source"`
Variables map[string]string `json:"variables,omitempty"`
}
type args struct {
Bucket string `json:"bucket,omitempty"`
Object string `json:"object,omitempty"`
}
type api struct {
Name string `json:"name,omitempty"`
Args args `json:"args,omitempty"`
}
type logEntry struct { type logEntry struct {
Level string `json:"level"` Level string `json:"level"`
Message string `json:"message"` Time string `json:"time"`
Time string `json:"time"` API api `json:"api,omitempty"`
Cause string `json:"cause"` RemoteHost string `json:"remotehost,omitempty"`
Trace []string `json:"trace"` RequestID string `json:"requestID,omitempty"`
UserAgent string `json:"userAgent,omitempty"`
Cause string `json:"cause,omitempty"`
Trace traceEntry `json:"error"`
} }
// Logger - for console messages // quiet: Hide startup messages if enabled
type Logger struct { // jsonFlag: Display in JSON format, if enabled
quiet bool var (
json bool quiet, jsonFlag bool
} )
// NewLogger - to create a new Logger object
func NewLogger() *Logger {
return &Logger{}
}
// EnableQuiet - turns quiet option on. // EnableQuiet - turns quiet option on.
func (log *Logger) EnableQuiet() { func EnableQuiet() {
log.quiet = true quiet = true
} }
// EnableJSON - outputs logs in json format. // EnableJSON - outputs logs in json format.
func (log *Logger) EnableJSON() { func EnableJSON() {
log.json = true jsonFlag = true
log.quiet = true quiet = true
} }
// Println - wrapper to console.Println() with quiet flag. // Println - wrapper to console.Println() with quiet flag.
func (log *Logger) Println(args ...interface{}) { func Println(args ...interface{}) {
if !log.quiet { if !quiet {
console.Println(args...) console.Println(args...)
} }
} }
// Printf - wrapper to console.Printf() with quiet flag. // Printf - wrapper to console.Printf() with quiet flag.
func (log *Logger) Printf(format string, args ...interface{}) { func Printf(format string, args ...interface{}) {
if !log.quiet { if !quiet {
console.Printf(format, args...) console.Printf(format, args...)
} }
} }
func init() { // Init sets the trimStrings to possible GOPATHs
// and GOROOT directories. Also append github.com/minio/minio
// This is done to clean up the filename, when stack trace is
// displayed when an error happens.
func Init(goPath string) {
var goPathList []string var goPathList []string
var defaultgoPathList []string var defaultgoPathList []string
// Add all possible GOPATH paths into trimStrings // Add all possible GOPATH paths into trimStrings
// Split GOPATH depending on the OS type // Split GOPATH depending on the OS type
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
goPathList = strings.Split(GOPATH, ";") goPathList = strings.Split(goPath, ";")
defaultgoPathList = strings.Split(build.Default.GOPATH, ";") defaultgoPathList = strings.Split(build.Default.GOPATH, ";")
} else { } else {
// All other types of OSs // All other types of OSs
goPathList = strings.Split(GOPATH, ":") goPathList = strings.Split(goPath, ":")
defaultgoPathList = strings.Split(build.Default.GOPATH, ":") defaultgoPathList = strings.Split(build.Default.GOPATH, ":")
} }
@ -155,6 +188,13 @@ func getTrace(traceLevel int) []string {
// Form and append a line of stack trace into a // Form and append a line of stack trace into a
// collection, 'trace', to build full stack trace // collection, 'trace', to build full stack trace
trace = append(trace, fmt.Sprintf("%v:%v:%v()", file, lineNumber, funcName)) trace = append(trace, fmt.Sprintf("%v:%v:%v()", file, lineNumber, funcName))
// Ignore trace logs beyond the following conditions
for _, name := range matchingFuncNames {
if funcName == name {
return trace
}
}
} }
traceLevel++ traceLevel++
// Read stack trace information from PC // Read stack trace information from PC
@ -165,21 +205,7 @@ func getTrace(traceLevel int) []string {
func logIf(level Level, err error, msg string, func logIf(level Level, err error, msg string,
data ...interface{}) { data ...interface{}) {
if err == nil {
isErrIgnored := func(err error) (ok bool) {
err = errors.Cause(err)
switch err.(type) {
case BucketNotFound, BucketNotEmpty, BucketExists:
ok = true
case ObjectNotFound, ObjectExistsAsDirectory:
ok = true
case BucketPolicyNotFound, InvalidUploadID:
ok = true
}
return ok
}
if err == nil || isErrIgnored(err) {
return return
} }
// Get the cause for the Error // Get the cause for the Error
@ -187,17 +213,16 @@ func logIf(level Level, err error, msg string,
// Get full stack trace // Get full stack trace
trace := getTrace(3) trace := getTrace(3)
// Get time // Get time
timeOfError := UTCNow().Format(time.RFC3339Nano) timeOfError := time.Now().UTC().Format(time.RFC3339Nano)
// Output the formatted log message at console // Output the formatted log message at console
var output string var output string
message := fmt.Sprintf(msg, data...) message := fmt.Sprintf(msg, data...)
if log.json { if jsonFlag {
logJSON, err := json.Marshal(&logEntry{ logJSON, err := json.Marshal(&logEntry{
Level: level.String(), Level: level.String(),
Message: message, Time: timeOfError,
Time: timeOfError, Cause: cause,
Cause: cause, Trace: traceEntry{Source: trace, Message: message},
Trace: trace,
}) })
if err != nil { if err != nil {
panic("json marshal of logEntry failed: " + err.Error()) panic("json marshal of logEntry failed: " + err.Error())
@ -224,10 +249,102 @@ func logIf(level Level, err error, msg string,
} }
} }
func errorIf(err error, msg string, data ...interface{}) { // FatalIf :
logIf(Error, err, msg, data...) func FatalIf(err error, msg string, data ...interface{}) {
}
func fatalIf(err error, msg string, data ...interface{}) {
logIf(Fatal, err, msg, data...) logIf(Fatal, err, msg, data...)
} }
// LogIf :
func LogIf(ctx context.Context, err error) {
if err == nil {
return
}
req := GetReqInfo(ctx)
if req == nil {
req = &ReqInfo{API: "SYSTEM"}
}
API := "SYSTEM"
if req.API != "" {
API = req.API
}
tags := make(map[string]string)
for _, entry := range req.GetTags() {
tags[entry.Key] = entry.Val
}
// Get the cause for the Error
message := err.Error()
// Get full stack trace
trace := getTrace(2)
// Output the formatted log message at console
var output string
if jsonFlag {
logJSON, err := json.Marshal(&logEntry{
Level: Error.String(),
RemoteHost: req.RemoteHost,
RequestID: req.RequestID,
UserAgent: req.UserAgent,
Time: time.Now().UTC().Format(time.RFC3339Nano),
API: api{Name: API, Args: args{Bucket: req.BucketName, Object: req.ObjectName}},
Trace: traceEntry{Message: message, Source: trace, Variables: tags},
})
if err != nil {
panic(err)
}
output = string(logJSON)
} else {
// Add a sequence number and formatting for each stack trace
// No formatting is required for the first entry
for i, element := range trace {
trace[i] = fmt.Sprintf("%8v: %s", i+1, element)
}
tagString := ""
for key, value := range tags {
if value != "" {
if tagString != "" {
tagString += ", "
}
tagString += key + "=" + value
}
}
apiString := "API: " + API + "("
if req.BucketName != "" {
apiString = apiString + "bucket=" + req.BucketName
}
if req.ObjectName != "" {
apiString = apiString + ", object=" + req.ObjectName
}
apiString += ")"
timeString := "Time: " + time.Now().Format(loggerTimeFormat)
var requestID string
if req.RequestID != "" {
requestID = "\nRequestID: " + req.RequestID
}
var remoteHost string
if req.RemoteHost != "" {
remoteHost = "\nRemoteHost: " + req.RemoteHost
}
var userAgent string
if req.UserAgent != "" {
userAgent = "\nUserAgent: " + req.UserAgent
}
if len(tags) > 0 {
tagString = "\n " + tagString
}
output = fmt.Sprintf("\n%s\n%s%s%s%s\nError: %s%s\n%s",
apiString, timeString, requestID, remoteHost, userAgent,
colorRed(colorBold(message)), tagString, strings.Join(trace, "\n"))
}
fmt.Println(output)
}

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"errors" "errors"
pathutil "path" pathutil "path"
"runtime" "runtime"
@ -29,6 +30,7 @@ import (
"github.com/minio/dsync" "github.com/minio/dsync"
"github.com/minio/lsync" "github.com/minio/lsync"
"github.com/minio/minio-go/pkg/set" "github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/cmd/logger"
) )
// Global name space lock. // Global name space lock.
@ -170,9 +172,7 @@ func (n *nsLockMap) lock(volume, path string, lockSource, opsID string, readLock
// pair of <volume, path> and <OperationID> till the lock // pair of <volume, path> and <OperationID> till the lock
// unblocks. The lock for accessing `globalNSMutex` is held inside // unblocks. The lock for accessing `globalNSMutex` is held inside
// the function itself. // the function itself.
if err := n.statusNoneToBlocked(param, lockSource, opsID, readLock); err != nil { n.statusNoneToBlocked(param, lockSource, opsID, readLock)
errorIf(err, fmt.Sprintf("Failed to set lock state to blocked (param = %v; opsID = %s)", param, opsID))
}
// Unlock map before Locking NS which might block. // Unlock map before Locking NS which might block.
n.lockMapMutex.Unlock() n.lockMapMutex.Unlock()
@ -188,26 +188,19 @@ func (n *nsLockMap) lock(volume, path string, lockSource, opsID string, readLock
n.lockMapMutex.Lock() n.lockMapMutex.Lock()
defer n.lockMapMutex.Unlock() defer n.lockMapMutex.Unlock()
// Changing the status of the operation from blocked to none // Changing the status of the operation from blocked to none
if err := n.statusBlockedToNone(param, lockSource, opsID, readLock); err != nil { n.statusBlockedToNone(param, lockSource, opsID, readLock)
errorIf(err, fmt.Sprintf("Failed to clear the lock state (param = %v; opsID = %s)", param, opsID))
}
nsLk.ref-- // Decrement ref count since we failed to get the lock nsLk.ref-- // Decrement ref count since we failed to get the lock
// delete the lock state entry for given operation ID. // delete the lock state entry for given operation ID.
err := n.deleteLockInfoEntryForOps(param, opsID) n.deleteLockInfoEntryForOps(param, opsID)
if err != nil {
errorIf(err, fmt.Sprintf("Failed to delete lock info entry (param = %v; opsID = %s)", param, opsID))
}
if nsLk.ref == 0 { if nsLk.ref == 0 {
// Remove from the map if there are no more references. // Remove from the map if there are no more references.
delete(n.lockMap, param) delete(n.lockMap, param)
// delete the lock state entry for given // delete the lock state entry for given
// <volume, path> pair. // <volume, path> pair.
err := n.deleteLockInfoEntryForVolumePath(param) n.deleteLockInfoEntryForVolumePath(param)
if err != nil {
errorIf(err, fmt.Sprintf("Failed to delete lock info entry (param = %v)", param))
}
} }
return return
} }
@ -215,9 +208,7 @@ func (n *nsLockMap) lock(volume, path string, lockSource, opsID string, readLock
// Changing the status of the operation from blocked to // Changing the status of the operation from blocked to
// running. change the state of the lock to be running (from // running. change the state of the lock to be running (from
// blocked) for the given pair of <volume, path> and <OperationID>. // blocked) for the given pair of <volume, path> and <OperationID>.
if err := n.statusBlockedToRunning(param, lockSource, opsID, readLock); err != nil { n.statusBlockedToRunning(param, lockSource, opsID, readLock)
errorIf(err, "Failed to set the lock state to running")
}
return return
} }
@ -236,17 +227,13 @@ func (n *nsLockMap) unlock(volume, path, opsID string, readLock bool) {
nsLk.Unlock() nsLk.Unlock()
} }
if nsLk.ref == 0 { if nsLk.ref == 0 {
errorIf(errors.New("Namespace reference count cannot be 0"), logger.LogIf(context.Background(), errors.New("Namespace reference count cannot be 0"))
"Invalid reference count detected")
} }
if nsLk.ref != 0 { if nsLk.ref != 0 {
nsLk.ref-- nsLk.ref--
// delete the lock state entry for given operation ID. // delete the lock state entry for given operation ID.
err := n.deleteLockInfoEntryForOps(param, opsID) n.deleteLockInfoEntryForOps(param, opsID)
if err != nil {
errorIf(err, "Failed to delete lock info entry")
}
} }
if nsLk.ref == 0 { if nsLk.ref == 0 {
// Remove from the map if there are no more references. // Remove from the map if there are no more references.
@ -254,10 +241,7 @@ func (n *nsLockMap) unlock(volume, path, opsID string, readLock bool) {
// delete the lock state entry for given // delete the lock state entry for given
// <volume, path> pair. // <volume, path> pair.
err := n.deleteLockInfoEntryForVolumePath(param) n.deleteLockInfoEntryForVolumePath(param)
if err != nil {
errorIf(err, "Failed to delete lock info entry")
}
} }
} }
} }

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"net" "net"
@ -30,6 +31,7 @@ import (
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
"github.com/minio/minio-go/pkg/set" "github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/cmd/logger"
) )
// IPv4 addresses of local host. // IPv4 addresses of local host.
@ -38,7 +40,7 @@ var localIP4 = mustGetLocalIP4()
// mustSplitHostPort is a wrapper to net.SplitHostPort() where error is assumed to be a fatal. // mustSplitHostPort is a wrapper to net.SplitHostPort() where error is assumed to be a fatal.
func mustSplitHostPort(hostPort string) (host, port string) { func mustSplitHostPort(hostPort string) (host, port string) {
host, port, err := net.SplitHostPort(hostPort) host, port, err := net.SplitHostPort(hostPort)
fatalIf(err, "Unable to split host port %s", hostPort) logger.FatalIf(err, "Unable to split host port %s", hostPort)
return host, port return host, port
} }
@ -46,7 +48,7 @@ func mustSplitHostPort(hostPort string) (host, port string) {
func mustGetLocalIP4() (ipList set.StringSet) { func mustGetLocalIP4() (ipList set.StringSet) {
ipList = set.NewStringSet() ipList = set.NewStringSet()
addrs, err := net.InterfaceAddrs() addrs, err := net.InterfaceAddrs()
fatalIf(err, "Unable to get IP addresses of this host.") logger.FatalIf(err, "Unable to get IP addresses of this host.")
for _, addr := range addrs { for _, addr := range addrs {
var ip net.IP var ip net.IP
@ -98,8 +100,10 @@ func getHostIP4(host string) (ipList set.StringSet, err error) {
if timeElapsed > time.Second { if timeElapsed > time.Second {
// log the message to console about the host not being // log the message to console about the host not being
// resolveable. // resolveable.
errorIf(err, "Unable to resolve host %s (%s)", host, reqInfo := (&logger.ReqInfo{}).AppendTags("host", host)
humanize.RelTime(startTime, startTime.Add(timeElapsed), "elapsed", "")) reqInfo.AppendTags("elapsedTime", humanize.RelTime(startTime, startTime.Add(timeElapsed), "elapsed", ""))
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
} }
} }
} }

View File

@ -26,6 +26,7 @@ import (
"path" "path"
"sync" "sync"
"github.com/minio/minio/cmd/logger"
xerrors "github.com/minio/minio/pkg/errors" xerrors "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
@ -162,7 +163,7 @@ func (sys *NotificationSys) RemoteTargetExist(bucketName string, targetID event.
} }
// initListeners - initializes PeerRPC clients available in listener.json. // initListeners - initializes PeerRPC clients available in listener.json.
func (sys *NotificationSys) initListeners(objAPI ObjectLayer, bucketName string) error { func (sys *NotificationSys) initListeners(ctx context.Context, objAPI ObjectLayer, bucketName string) error {
// listener.json is available/applicable only in DistXL mode. // listener.json is available/applicable only in DistXL mode.
if !globalIsDistXL { if !globalIsDistXL {
return nil return nil
@ -181,7 +182,7 @@ func (sys *NotificationSys) initListeners(objAPI ObjectLayer, bucketName string)
} }
defer objLock.Unlock() defer objLock.Unlock()
reader, err := readConfig(objAPI, configFile) reader, err := readConfig(ctx, objAPI, configFile)
if err != nil && !xerrors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) { if err != nil && !xerrors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) {
return err return err
} }
@ -189,8 +190,8 @@ func (sys *NotificationSys) initListeners(objAPI ObjectLayer, bucketName string)
listenerList := []ListenBucketNotificationArgs{} listenerList := []ListenBucketNotificationArgs{}
if reader != nil { if reader != nil {
if err = json.NewDecoder(reader).Decode(&listenerList); err != nil { if err = json.NewDecoder(reader).Decode(&listenerList); err != nil {
errorIf(err, "Unable to parse listener.json.") logger.LogIf(ctx, err)
return xerrors.Trace(err) return err
} }
} }
@ -203,7 +204,8 @@ func (sys *NotificationSys) initListeners(objAPI ObjectLayer, bucketName string)
for _, args := range listenerList { for _, args := range listenerList {
var found bool var found bool
if found, err = isLocalHost(args.Addr.Name); err != nil { if found, err = isLocalHost(args.Addr.Name); err != nil {
errorIf(err, "unable to check address %v is local host", args.Addr) logger.GetReqInfo(ctx).AppendTags("host", args.Addr.Name)
logger.LogIf(ctx, err)
return err return err
} }
if found { if found {
@ -218,6 +220,8 @@ func (sys *NotificationSys) initListeners(objAPI ObjectLayer, bucketName string)
var exist bool var exist bool
if exist, err = rpcClient.RemoteTargetExist(bucketName, args.TargetID); err != nil { if exist, err = rpcClient.RemoteTargetExist(bucketName, args.TargetID); err != nil {
logger.GetReqInfo(ctx).AppendTags("targetID", args.TargetID.Name)
logger.LogIf(ctx, err)
return err return err
} }
if !exist { if !exist {
@ -228,6 +232,8 @@ func (sys *NotificationSys) initListeners(objAPI ObjectLayer, bucketName string)
target := NewPeerRPCClientTarget(bucketName, args.TargetID, rpcClient) target := NewPeerRPCClientTarget(bucketName, args.TargetID, rpcClient)
rulesMap := event.NewRulesMap(args.EventNames, args.Pattern, target.ID()) rulesMap := event.NewRulesMap(args.EventNames, args.Pattern, target.ID())
if err = sys.AddRemoteTarget(bucketName, target, rulesMap); err != nil { if err = sys.AddRemoteTarget(bucketName, target, rulesMap); err != nil {
logger.GetReqInfo(ctx).AppendTags("targetID", target.id.Name)
logger.LogIf(ctx, err)
return err return err
} }
activeListenerList = append(activeListenerList, args) activeListenerList = append(activeListenerList, args)
@ -235,6 +241,7 @@ func (sys *NotificationSys) initListeners(objAPI ObjectLayer, bucketName string)
data, err := json.Marshal(activeListenerList) data, err := json.Marshal(activeListenerList)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return err return err
} }
@ -253,18 +260,17 @@ func (sys *NotificationSys) Init(objAPI ObjectLayer) error {
} }
for _, bucket := range buckets { for _, bucket := range buckets {
config, err := readNotificationConfig(objAPI, bucket.Name) ctx := logger.SetReqInfo(context.Background(), &logger.ReqInfo{BucketName: bucket.Name})
config, err := readNotificationConfig(ctx, objAPI, bucket.Name)
if err != nil { if err != nil {
if !xerrors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) { if !xerrors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) {
errorIf(err, "Unable to load notification configuration of bucket %v", bucket.Name)
return err return err
} }
} else { } else {
sys.AddRulesMap(bucket.Name, config.ToRulesMap()) sys.AddRulesMap(bucket.Name, config.ToRulesMap())
} }
if err = sys.initListeners(objAPI, bucket.Name); err != nil { if err = sys.initListeners(ctx, objAPI, bucket.Name); err != nil {
errorIf(err, "Unable to initialize HTTP listener for bucket %v", bucket.Name)
return err return err
} }
} }
@ -325,7 +331,9 @@ func (sys *NotificationSys) RemoveAllRemoteTargets() {
// RemoveRemoteTarget - closes and removes target by target ID. // RemoveRemoteTarget - closes and removes target by target ID.
func (sys *NotificationSys) RemoveRemoteTarget(bucketName string, targetID event.TargetID) { func (sys *NotificationSys) RemoveRemoteTarget(bucketName string, targetID event.TargetID) {
for id, err := range sys.targetList.Remove(targetID) { for id, err := range sys.targetList.Remove(targetID) {
errorIf(err, "unable to close target ID %v", id) reqInfo := (&logger.ReqInfo{}).AppendTags("targetID", id.Name)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
} }
sys.Lock() sys.Lock()
@ -457,8 +465,11 @@ func sendEvent(args eventArgs) {
} }
for targetID, err := range globalNotificationSys.Send(args) { for targetID, err := range globalNotificationSys.Send(args) {
errorIf(err, "unable to send event %v of bucket: %v, object: %v to target %v", reqInfo := &logger.ReqInfo{BucketName: args.BucketName, ObjectName: args.Object.Name}
args.EventName, args.BucketName, args.Object.Name, targetID) reqInfo.AppendTags("EventName", args.EventName.String())
reqInfo.AppendTags("targetID", targetID.Name)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
} }
} }
@ -472,36 +483,39 @@ func saveConfig(objAPI ObjectLayer, configFile string, data []byte) error {
return err return err
} }
func readConfig(objAPI ObjectLayer, configFile string) (*bytes.Buffer, error) { func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) (*bytes.Buffer, error) {
var buffer bytes.Buffer var buffer bytes.Buffer
// Read entire content by setting size to -1 // Read entire content by setting size to -1
err := objAPI.GetObject(context.Background(), minioMetaBucket, configFile, 0, -1, &buffer, "") err := objAPI.GetObject(ctx, minioMetaBucket, configFile, 0, -1, &buffer, "")
if err != nil { if err != nil {
// Ignore if err is ObjectNotFound or IncompleteBody when bucket is not configured with notification // Ignore if err is ObjectNotFound or IncompleteBody when bucket is not configured with notification
if isErrObjectNotFound(err) || isErrIncompleteBody(err) { if isErrObjectNotFound(err) || isErrIncompleteBody(err) {
return nil, xerrors.Trace(errNoSuchNotifications) return nil, errNoSuchNotifications
} }
errorIf(err, "Unable to read file %v", configFile) logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
logger.LogIf(ctx, err)
return nil, err return nil, err
} }
// Return NoSuchNotifications on empty content. // Return NoSuchNotifications on empty content.
if buffer.Len() == 0 { if buffer.Len() == 0 {
return nil, xerrors.Trace(errNoSuchNotifications) return nil, errNoSuchNotifications
} }
return &buffer, nil return &buffer, nil
} }
func readNotificationConfig(objAPI ObjectLayer, bucketName string) (*event.Config, error) { func readNotificationConfig(ctx context.Context, objAPI ObjectLayer, bucketName string) (*event.Config, error) {
// Construct path to notification.xml for the given bucket. // Construct path to notification.xml for the given bucket.
configFile := path.Join(bucketConfigPrefix, bucketName, bucketNotificationConfig) configFile := path.Join(bucketConfigPrefix, bucketName, bucketNotificationConfig)
reader, err := readConfig(objAPI, configFile) reader, err := readConfig(ctx, objAPI, configFile)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return event.ParseConfig(reader, globalServerConfig.GetRegion(), globalNotificationSys.targetList) config, err := event.ParseConfig(reader, globalServerConfig.GetRegion(), globalNotificationSys.targetList)
logger.LogIf(ctx, err)
return config, err
} }
func saveNotificationConfig(objAPI ObjectLayer, bucketName string, config *event.Config) error { func saveNotificationConfig(objAPI ObjectLayer, bucketName string, config *event.Config) error {
@ -521,6 +535,8 @@ func SaveListener(objAPI ObjectLayer, bucketName string, eventNames []event.Name
return nil return nil
} }
ctx := logger.SetReqInfo(context.Background(), &logger.ReqInfo{BucketName: bucketName})
// Construct path to listener.json for the given bucket. // Construct path to listener.json for the given bucket.
configFile := path.Join(bucketConfigPrefix, bucketName, bucketListenerConfig) configFile := path.Join(bucketConfigPrefix, bucketName, bucketListenerConfig)
transactionConfigFile := configFile + ".transaction" transactionConfigFile := configFile + ".transaction"
@ -534,7 +550,7 @@ func SaveListener(objAPI ObjectLayer, bucketName string, eventNames []event.Name
} }
defer objLock.Unlock() defer objLock.Unlock()
reader, err := readConfig(objAPI, configFile) reader, err := readConfig(ctx, objAPI, configFile)
if err != nil && !xerrors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) { if err != nil && !xerrors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) {
return err return err
} }
@ -542,8 +558,8 @@ func SaveListener(objAPI ObjectLayer, bucketName string, eventNames []event.Name
listenerList := []ListenBucketNotificationArgs{} listenerList := []ListenBucketNotificationArgs{}
if reader != nil { if reader != nil {
if err = json.NewDecoder(reader).Decode(&listenerList); err != nil { if err = json.NewDecoder(reader).Decode(&listenerList); err != nil {
errorIf(err, "Unable to parse listener.json.") logger.LogIf(ctx, err)
return xerrors.Trace(err) return err
} }
} }
@ -556,6 +572,7 @@ func SaveListener(objAPI ObjectLayer, bucketName string, eventNames []event.Name
data, err := json.Marshal(listenerList) data, err := json.Marshal(listenerList)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return err return err
} }
@ -569,6 +586,8 @@ func RemoveListener(objAPI ObjectLayer, bucketName string, targetID event.Target
return nil return nil
} }
ctx := logger.SetReqInfo(context.Background(), &logger.ReqInfo{BucketName: bucketName})
// Construct path to listener.json for the given bucket. // Construct path to listener.json for the given bucket.
configFile := path.Join(bucketConfigPrefix, bucketName, bucketListenerConfig) configFile := path.Join(bucketConfigPrefix, bucketName, bucketListenerConfig)
transactionConfigFile := configFile + ".transaction" transactionConfigFile := configFile + ".transaction"
@ -582,7 +601,7 @@ func RemoveListener(objAPI ObjectLayer, bucketName string, targetID event.Target
} }
defer objLock.Unlock() defer objLock.Unlock()
reader, err := readConfig(objAPI, configFile) reader, err := readConfig(ctx, objAPI, configFile)
if err != nil && !xerrors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) { if err != nil && !xerrors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) {
return err return err
} }
@ -590,8 +609,8 @@ func RemoveListener(objAPI ObjectLayer, bucketName string, targetID event.Target
listenerList := []ListenBucketNotificationArgs{} listenerList := []ListenBucketNotificationArgs{}
if reader != nil { if reader != nil {
if err = json.NewDecoder(reader).Decode(&listenerList); err != nil { if err = json.NewDecoder(reader).Decode(&listenerList); err != nil {
errorIf(err, "Unable to parse listener.json.") logger.LogIf(ctx, err)
return xerrors.Trace(err) return err
} }
} }
@ -612,6 +631,7 @@ func RemoveListener(objAPI ObjectLayer, bucketName string, targetID event.Target
data, err := json.Marshal(activeListenerList) data, err := json.Marshal(activeListenerList)
if err != nil { if err != nil {
logger.LogIf(ctx, err)
return err return err
} }

View File

@ -22,7 +22,7 @@ import (
"sync" "sync"
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/cmd/logger"
) )
const ( const (
@ -83,15 +83,15 @@ func dirObjectInfo(bucket, object string, size int64, metadata map[string]string
} }
} }
func deleteBucketMetadata(bucket string, objAPI ObjectLayer) { func deleteBucketMetadata(ctx context.Context, bucket string, objAPI ObjectLayer) {
// Delete bucket access policy, if present - ignore any errors. // Delete bucket access policy, if present - ignore any errors.
_ = removeBucketPolicy(bucket, objAPI) removeBucketPolicy(ctx, bucket, objAPI)
// Delete notification config, if present - ignore any errors. // Delete notification config, if present - ignore any errors.
_ = removeNotificationConfig(objAPI, bucket) removeNotificationConfig(ctx, objAPI, bucket)
// Delete listener config, if present - ignore any errors. // Delete listener config, if present - ignore any errors.
_ = removeListenerConfig(objAPI, bucket) removeListenerConfig(ctx, objAPI, bucket)
} }
// Depending on the disk type network or local, initialize storage API. // Depending on the disk type network or local, initialize storage API.
@ -104,13 +104,15 @@ func newStorageAPI(endpoint Endpoint) (storage StorageAPI, err error) {
} }
// Cleanup a directory recursively. // Cleanup a directory recursively.
func cleanupDir(storage StorageAPI, volume, dirPath string) error { func cleanupDir(ctx context.Context, storage StorageAPI, volume, dirPath string) error {
var delFunc func(string) error var delFunc func(string) error
// Function to delete entries recursively. // Function to delete entries recursively.
delFunc = func(entryPath string) error { delFunc = func(entryPath string) error {
if !hasSuffix(entryPath, slashSeparator) { if !hasSuffix(entryPath, slashSeparator) {
// Delete the file entry. // Delete the file entry.
return errors.Trace(storage.DeleteFile(volume, entryPath)) err := storage.DeleteFile(volume, entryPath)
logger.LogIf(ctx, err)
return err
} }
// If it's a directory, list and call delFunc() for each entry. // If it's a directory, list and call delFunc() for each entry.
@ -119,12 +121,15 @@ func cleanupDir(storage StorageAPI, volume, dirPath string) error {
if err == errFileNotFound { if err == errFileNotFound {
return nil return nil
} else if err != nil { // For any other errors fail. } else if err != nil { // For any other errors fail.
return errors.Trace(err) logger.LogIf(ctx, err)
return err
} // else on success.. } // else on success..
// Entry path is empty, just delete it. // Entry path is empty, just delete it.
if len(entries) == 0 { if len(entries) == 0 {
return errors.Trace(storage.DeleteFile(volume, path.Clean(entryPath))) err = storage.DeleteFile(volume, path.Clean(entryPath))
logger.LogIf(ctx, err)
return err
} }
// Recurse and delete all other entries. // Recurse and delete all other entries.
@ -140,21 +145,19 @@ func cleanupDir(storage StorageAPI, volume, dirPath string) error {
} }
// Removes notification.xml for a given bucket, only used during DeleteBucket. // Removes notification.xml for a given bucket, only used during DeleteBucket.
func removeNotificationConfig(objAPI ObjectLayer, bucket string) error { func removeNotificationConfig(ctx context.Context, objAPI ObjectLayer, bucket string) error {
// Verify bucket is valid. // Verify bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return BucketNameInvalid{Bucket: bucket} return BucketNameInvalid{Bucket: bucket}
} }
ncPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig) ncPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig)
return objAPI.DeleteObject(ctx, minioMetaBucket, ncPath)
return objAPI.DeleteObject(context.Background(), minioMetaBucket, ncPath)
} }
// Remove listener configuration from storage layer. Used when a bucket is deleted. // Remove listener configuration from storage layer. Used when a bucket is deleted.
func removeListenerConfig(objAPI ObjectLayer, bucket string) error { func removeListenerConfig(ctx context.Context, objAPI ObjectLayer, bucket string) error {
// make the path // make the path
lcPath := path.Join(bucketConfigPrefix, bucket, bucketListenerConfig) lcPath := path.Join(bucketConfigPrefix, bucket, bucketListenerConfig)
return objAPI.DeleteObject(ctx, minioMetaBucket, lcPath)
return objAPI.DeleteObject(context.Background(), minioMetaBucket, lcPath)
} }

View File

@ -19,165 +19,200 @@ package cmd
import ( import (
"context" "context"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
"github.com/skyrings/skyring-common/tools/uuid" "github.com/skyrings/skyring-common/tools/uuid"
) )
// Checks on GetObject arguments, bucket and object. // Checks on GetObject arguments, bucket and object.
func checkGetObjArgs(bucket, object string) error { func checkGetObjArgs(ctx context.Context, bucket, object string) error {
return checkBucketAndObjectNames(bucket, object) return checkBucketAndObjectNames(ctx, bucket, object)
} }
// Checks on DeleteObject arguments, bucket and object. // Checks on DeleteObject arguments, bucket and object.
func checkDelObjArgs(bucket, object string) error { func checkDelObjArgs(ctx context.Context, bucket, object string) error {
return checkBucketAndObjectNames(bucket, object) return checkBucketAndObjectNames(ctx, bucket, object)
} }
// Checks bucket and object name validity, returns nil if both are valid. // Checks bucket and object name validity, returns nil if both are valid.
func checkBucketAndObjectNames(bucket, object string) error { func checkBucketAndObjectNames(ctx context.Context, bucket, object string) error {
// Verify if bucket is valid. // Verify if bucket is valid.
if !IsValidBucketName(bucket) { if !IsValidBucketName(bucket) {
return errors.Trace(BucketNameInvalid{Bucket: bucket}) logger.LogIf(ctx, BucketNameInvalid{Bucket: bucket})
return BucketNameInvalid{Bucket: bucket}
} }
// Verify if object is valid. // Verify if object is valid.
if len(object) == 0 { if len(object) == 0 {
return errors.Trace(ObjectNameInvalid{Bucket: bucket, Object: object}) logger.LogIf(ctx, ObjectNameInvalid{Bucket: bucket, Object: object})
return ObjectNameInvalid{Bucket: bucket, Object: object}
} }
if !IsValidObjectPrefix(object) { if !IsValidObjectPrefix(object) {
return errors.Trace(ObjectNameInvalid{Bucket: bucket, Object: object}) logger.LogIf(ctx, ObjectNameInvalid{Bucket: bucket, Object: object})
return ObjectNameInvalid{Bucket: bucket, Object: object}
} }
return nil return nil
} }
// Checks for all ListObjects arguments validity. // Checks for all ListObjects arguments validity.
func checkListObjsArgs(bucket, prefix, marker, delimiter string, obj ObjectLayer) error { func checkListObjsArgs(ctx context.Context, bucket, prefix, marker, delimiter string, obj ObjectLayer) error {
// Verify if bucket exists before validating object name. // Verify if bucket exists before validating object name.
// This is done on purpose since the order of errors is // This is done on purpose since the order of errors is
// important here bucket does not exist error should // important here bucket does not exist error should
// happen before we return an error for invalid object name. // happen before we return an error for invalid object name.
// FIXME: should be moved to handler layer. // FIXME: should be moved to handler layer.
if err := checkBucketExist(bucket, obj); err != nil { if err := checkBucketExist(ctx, bucket, obj); err != nil {
return errors.Trace(err) return err
} }
// Validates object prefix validity after bucket exists. // Validates object prefix validity after bucket exists.
if !IsValidObjectPrefix(prefix) { if !IsValidObjectPrefix(prefix) {
return errors.Trace(ObjectNameInvalid{ logger.LogIf(ctx, ObjectNameInvalid{
Bucket: bucket, Bucket: bucket,
Object: prefix, Object: prefix,
}) })
return ObjectNameInvalid{
Bucket: bucket,
Object: prefix,
}
} }
// Verify if delimiter is anything other than '/', which we do not support. // Verify if delimiter is anything other than '/', which we do not support.
if delimiter != "" && delimiter != slashSeparator { if delimiter != "" && delimiter != slashSeparator {
return errors.Trace(UnsupportedDelimiter{ logger.LogIf(ctx, UnsupportedDelimiter{
Delimiter: delimiter, Delimiter: delimiter,
}) })
return UnsupportedDelimiter{
Delimiter: delimiter,
}
} }
// Verify if marker has prefix. // Verify if marker has prefix.
if marker != "" && !hasPrefix(marker, prefix) { if marker != "" && !hasPrefix(marker, prefix) {
return errors.Trace(InvalidMarkerPrefixCombination{ logger.LogIf(ctx, InvalidMarkerPrefixCombination{
Marker: marker, Marker: marker,
Prefix: prefix, Prefix: prefix,
}) })
return InvalidMarkerPrefixCombination{
Marker: marker,
Prefix: prefix,
}
} }
return nil return nil
} }
// Checks for all ListMultipartUploads arguments validity. // Checks for all ListMultipartUploads arguments validity.
func checkListMultipartArgs(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, obj ObjectLayer) error { func checkListMultipartArgs(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, obj ObjectLayer) error {
if err := checkListObjsArgs(bucket, prefix, keyMarker, delimiter, obj); err != nil { if err := checkListObjsArgs(ctx, bucket, prefix, keyMarker, delimiter, obj); err != nil {
return err return err
} }
if uploadIDMarker != "" { if uploadIDMarker != "" {
if hasSuffix(keyMarker, slashSeparator) { if hasSuffix(keyMarker, slashSeparator) {
return errors.Trace(InvalidUploadIDKeyCombination{
logger.LogIf(ctx, InvalidUploadIDKeyCombination{
UploadIDMarker: uploadIDMarker, UploadIDMarker: uploadIDMarker,
KeyMarker: keyMarker, KeyMarker: keyMarker,
}) })
return InvalidUploadIDKeyCombination{
UploadIDMarker: uploadIDMarker,
KeyMarker: keyMarker,
}
} }
id, err := uuid.Parse(uploadIDMarker) id, err := uuid.Parse(uploadIDMarker)
if err != nil { if err != nil {
return errors.Trace(err) logger.LogIf(ctx, err)
return err
} }
if id.IsZero() { if id.IsZero() {
return errors.Trace(MalformedUploadID{ logger.LogIf(ctx, MalformedUploadID{
UploadID: uploadIDMarker, UploadID: uploadIDMarker,
}) })
return MalformedUploadID{
UploadID: uploadIDMarker,
}
} }
} }
return nil return nil
} }
// Checks for NewMultipartUpload arguments validity, also validates if bucket exists. // Checks for NewMultipartUpload arguments validity, also validates if bucket exists.
func checkNewMultipartArgs(bucket, object string, obj ObjectLayer) error { func checkNewMultipartArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error {
return checkObjectArgs(bucket, object, obj) return checkObjectArgs(ctx, bucket, object, obj)
} }
// Checks for PutObjectPart arguments validity, also validates if bucket exists. // Checks for PutObjectPart arguments validity, also validates if bucket exists.
func checkPutObjectPartArgs(bucket, object string, obj ObjectLayer) error { func checkPutObjectPartArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error {
return checkObjectArgs(bucket, object, obj) return checkObjectArgs(ctx, bucket, object, obj)
} }
// Checks for ListParts arguments validity, also validates if bucket exists. // Checks for ListParts arguments validity, also validates if bucket exists.
func checkListPartsArgs(bucket, object string, obj ObjectLayer) error { func checkListPartsArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error {
return checkObjectArgs(bucket, object, obj) return checkObjectArgs(ctx, bucket, object, obj)
} }
// Checks for CompleteMultipartUpload arguments validity, also validates if bucket exists. // Checks for CompleteMultipartUpload arguments validity, also validates if bucket exists.
func checkCompleteMultipartArgs(bucket, object string, obj ObjectLayer) error { func checkCompleteMultipartArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error {
return checkObjectArgs(bucket, object, obj) return checkObjectArgs(ctx, bucket, object, obj)
} }
// Checks for AbortMultipartUpload arguments validity, also validates if bucket exists. // Checks for AbortMultipartUpload arguments validity, also validates if bucket exists.
func checkAbortMultipartArgs(bucket, object string, obj ObjectLayer) error { func checkAbortMultipartArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error {
return checkObjectArgs(bucket, object, obj) return checkObjectArgs(ctx, bucket, object, obj)
} }
// Checks Object arguments validity, also validates if bucket exists. // Checks Object arguments validity, also validates if bucket exists.
func checkObjectArgs(bucket, object string, obj ObjectLayer) error { func checkObjectArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error {
// Verify if bucket exists before validating object name. // Verify if bucket exists before validating object name.
// This is done on purpose since the order of errors is // This is done on purpose since the order of errors is
// important here bucket does not exist error should // important here bucket does not exist error should
// happen before we return an error for invalid object name. // happen before we return an error for invalid object name.
// FIXME: should be moved to handler layer. // FIXME: should be moved to handler layer.
if err := checkBucketExist(bucket, obj); err != nil { if err := checkBucketExist(ctx, bucket, obj); err != nil {
return errors.Trace(err) return err
} }
// Validates object name validity after bucket exists. // Validates object name validity after bucket exists.
if !IsValidObjectName(object) { if !IsValidObjectName(object) {
return errors.Trace(ObjectNameInvalid{ logger.LogIf(ctx, ObjectNameInvalid{
Bucket: bucket, Bucket: bucket,
Object: object, Object: object,
}) })
return ObjectNameInvalid{
Bucket: bucket,
Object: object,
}
} }
return nil return nil
} }
// Checks for PutObject arguments validity, also validates if bucket exists. // Checks for PutObject arguments validity, also validates if bucket exists.
func checkPutObjectArgs(bucket, object string, obj ObjectLayer, size int64) error { func checkPutObjectArgs(ctx context.Context, bucket, object string, obj ObjectLayer, size int64) error {
// Verify if bucket exists before validating object name. // Verify if bucket exists before validating object name.
// This is done on purpose since the order of errors is // This is done on purpose since the order of errors is
// important here bucket does not exist error should // important here bucket does not exist error should
// happen before we return an error for invalid object name. // happen before we return an error for invalid object name.
// FIXME: should be moved to handler layer. // FIXME: should be moved to handler layer.
if err := checkBucketExist(bucket, obj); err != nil { if err := checkBucketExist(ctx, bucket, obj); err != nil {
return errors.Trace(err) return err
} }
if len(object) == 0 || if len(object) == 0 ||
hasPrefix(object, slashSeparator) || hasPrefix(object, slashSeparator) ||
(hasSuffix(object, slashSeparator) && size != 0) || (hasSuffix(object, slashSeparator) && size != 0) ||
!IsValidObjectPrefix(object) { !IsValidObjectPrefix(object) {
return errors.Trace(ObjectNameInvalid{ logger.LogIf(ctx, ObjectNameInvalid{
Bucket: bucket, Bucket: bucket,
Object: object, Object: object,
}) })
return ObjectNameInvalid{
Bucket: bucket,
Object: object,
}
} }
return nil return nil
} }
// Checks whether bucket exists and returns appropriate error if not. // Checks whether bucket exists and returns appropriate error if not.
func checkBucketExist(bucket string, obj ObjectLayer) error { func checkBucketExist(ctx context.Context, bucket string, obj ObjectLayer) error {
_, err := obj.GetBucketInfo(context.Background(), bucket) _, err := obj.GetBucketInfo(ctx, bucket)
if err != nil { if err != nil {
return errors.Cause(err) return errors.Cause(err)
} }

View File

@ -1847,7 +1847,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
}, },
}, },
} }
s3MD5, err := getCompleteMultipartMD5(inputParts[3].parts) s3MD5, err := getCompleteMultipartMD5(context.Background(), inputParts[3].parts)
if err != nil { if err != nil {
t.Fatalf("Obtaining S3MD5 failed") t.Fatalf("Obtaining S3MD5 failed")
} }

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"path" "path"
@ -24,7 +25,7 @@ import (
"strings" "strings"
"unicode/utf8" "unicode/utf8"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/cmd/logger"
"github.com/skyrings/skyring-common/tools/uuid" "github.com/skyrings/skyring-common/tools/uuid"
) )
@ -174,12 +175,13 @@ func mustGetUUID() string {
} }
// Create an s3 compatible MD5sum for complete multipart transaction. // Create an s3 compatible MD5sum for complete multipart transaction.
func getCompleteMultipartMD5(parts []CompletePart) (string, error) { func getCompleteMultipartMD5(ctx context.Context, parts []CompletePart) (string, error) {
var finalMD5Bytes []byte var finalMD5Bytes []byte
for _, part := range parts { for _, part := range parts {
md5Bytes, err := hex.DecodeString(part.ETag) md5Bytes, err := hex.DecodeString(part.ETag)
if err != nil { if err != nil {
return "", errors.Trace(err) logger.LogIf(ctx, err)
return "", err
} }
finalMD5Bytes = append(finalMD5Bytes, md5Bytes...) finalMD5Bytes = append(finalMD5Bytes, md5Bytes...)
} }

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"reflect" "reflect"
"testing" "testing"
) )
@ -148,7 +149,7 @@ func TestGetCompleteMultipartMD5(t *testing.T) {
} }
for i, test := range testCases { for i, test := range testCases {
result, err := getCompleteMultipartMD5(test.parts) result, err := getCompleteMultipartMD5(context.Background(), test.parts)
if result != test.expectedResult { if result != test.expectedResult {
t.Fatalf("test %d failed: expected: result=%v, got=%v", i+1, test.expectedResult, result) t.Fatalf("test %d failed: expected: result=%v, got=%v", i+1, test.expectedResult, result)
} }

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"crypto/hmac" "crypto/hmac"
"encoding/binary" "encoding/binary"
"encoding/hex" "encoding/hex"
@ -31,6 +32,7 @@ import (
"strconv" "strconv"
mux "github.com/gorilla/mux" mux "github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/handlers" "github.com/minio/minio/pkg/handlers"
@ -63,12 +65,12 @@ func setHeadGetRespHeaders(w http.ResponseWriter, reqParams url.Values) {
// this is in keeping with the permissions sections of the docs of both: // this is in keeping with the permissions sections of the docs of both:
// HEAD Object: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html // HEAD Object: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html
// GET Object: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html // GET Object: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
func errAllowableObjectNotFound(bucket string, r *http.Request) APIErrorCode { func errAllowableObjectNotFound(ctx context.Context, bucket string, r *http.Request) APIErrorCode {
if getRequestAuthType(r) == authTypeAnonymous { if getRequestAuthType(r) == authTypeAnonymous {
// We care about the bucket as a whole, not a particular resource. // We care about the bucket as a whole, not a particular resource.
resource := "/" + bucket resource := "/" + bucket
sourceIP := handlers.GetSourceIP(r) sourceIP := handlers.GetSourceIP(r)
if s3Error := enforceBucketPolicy(bucket, "s3:ListBucket", resource, if s3Error := enforceBucketPolicy(ctx, bucket, "s3:ListBucket", resource,
r.Referer(), sourceIP, r.URL.Query()); s3Error != ErrNone { r.Referer(), sourceIP, r.URL.Query()); s3Error != ErrNone {
return ErrAccessDenied return ErrAccessDenied
} }
@ -95,7 +97,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
return return
} }
if s3Error := checkRequestAuthType(r, bucket, "s3:GetObject", globalServerConfig.GetRegion()); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:GetObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
return return
} }
@ -109,7 +111,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
if err != nil { if err != nil {
apiErr := toAPIErrorCode(err) apiErr := toAPIErrorCode(err)
if apiErr == ErrNoSuchKey { if apiErr == ErrNoSuchKey {
apiErr = errAllowableObjectNotFound(bucket, r) apiErr = errAllowableObjectNotFound(ctx, bucket, r)
} }
writeErrorResponse(w, apiErr, r.URL) writeErrorResponse(w, apiErr, r.URL)
return return
@ -135,7 +137,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
} }
// log the error. // log the error.
errorIf(err, "Invalid request range") logger.LogIf(ctx, err)
} }
} }
@ -182,7 +184,6 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
// Reads the object at startOffset and writes to mw. // Reads the object at startOffset and writes to mw.
if err = getObject(ctx, bucket, object, startOffset, length, httpWriter, objInfo.ETag); err != nil { if err = getObject(ctx, bucket, object, startOffset, length, httpWriter, objInfo.ETag); err != nil {
errorIf(err, "Unable to write to client.")
if !httpWriter.HasWritten() { // write error response only if no data has been written to client yet if !httpWriter.HasWritten() { // write error response only if no data has been written to client yet
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
} }
@ -232,7 +233,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
return return
} }
if s3Error := checkRequestAuthType(r, bucket, "s3:GetObject", globalServerConfig.GetRegion()); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:GetObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponseHeadersOnly(w, s3Error) writeErrorResponseHeadersOnly(w, s3Error)
return return
} }
@ -246,7 +247,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
if err != nil { if err != nil {
apiErr := toAPIErrorCode(err) apiErr := toAPIErrorCode(err)
if apiErr == ErrNoSuchKey { if apiErr == ErrNoSuchKey {
apiErr = errAllowableObjectNotFound(bucket, r) apiErr = errAllowableObjectNotFound(ctx, bucket, r)
} }
writeErrorResponseHeadersOnly(w, apiErr) writeErrorResponseHeadersOnly(w, apiErr)
return return
@ -300,7 +301,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
// Extract metadata relevant for an CopyObject operation based on conditional // Extract metadata relevant for an CopyObject operation based on conditional
// header values specified in X-Amz-Metadata-Directive. // header values specified in X-Amz-Metadata-Directive.
func getCpObjMetadataFromHeader(header http.Header, userMeta map[string]string) (map[string]string, error) { func getCpObjMetadataFromHeader(ctx context.Context, header http.Header, userMeta map[string]string) (map[string]string, error) {
// Make a copy of the supplied metadata to avoid // Make a copy of the supplied metadata to avoid
// to change the original one. // to change the original one.
defaultMeta := make(map[string]string, len(userMeta)) defaultMeta := make(map[string]string, len(userMeta))
@ -311,7 +312,7 @@ func getCpObjMetadataFromHeader(header http.Header, userMeta map[string]string)
// if x-amz-metadata-directive says REPLACE then // if x-amz-metadata-directive says REPLACE then
// we extract metadata from the input headers. // we extract metadata from the input headers.
if isMetadataReplace(header) { if isMetadataReplace(header) {
return extractMetadataFromHeader(header) return extractMetadataFromHeader(ctx, header)
} }
// if x-amz-metadata-directive says COPY then we // if x-amz-metadata-directive says COPY then we
@ -340,7 +341,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
return return
} }
if s3Error := checkRequestAuthType(r, dstBucket, "s3:PutObject", globalServerConfig.GetRegion()); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, dstBucket, "s3:PutObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
return return
} }
@ -501,10 +502,9 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
} }
srcInfo.Writer = writer srcInfo.Writer = writer
srcInfo.UserDefined, err = getCpObjMetadataFromHeader(r.Header, srcInfo.UserDefined) srcInfo.UserDefined, err = getCpObjMetadataFromHeader(ctx, r.Header, srcInfo.UserDefined)
if err != nil { if err != nil {
pipeWriter.CloseWithError(err) pipeWriter.CloseWithError(err)
errorIf(err, "found invalid http request header")
writeErrorResponse(w, ErrInternalError, r.URL) writeErrorResponse(w, ErrInternalError, r.URL)
return return
} }
@ -628,9 +628,8 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
} }
// Extract metadata to be saved from incoming HTTP header. // Extract metadata to be saved from incoming HTTP header.
metadata, err := extractMetadataFromHeader(r.Header) metadata, err := extractMetadataFromHeader(ctx, r.Header)
if err != nil { if err != nil {
errorIf(err, "found invalid http request header")
writeErrorResponse(w, ErrInternalError, r.URL) writeErrorResponse(w, ErrInternalError, r.URL)
return return
} }
@ -670,7 +669,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
case authTypeAnonymous: case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html // http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
sourceIP := handlers.GetSourceIP(r) sourceIP := handlers.GetSourceIP(r)
if s3Err = enforceBucketPolicy(bucket, "s3:PutObject", r.URL.Path, r.Referer(), sourceIP, r.URL.Query()); s3Err != ErrNone { if s3Err = enforceBucketPolicy(ctx, bucket, "s3:PutObject", r.URL.Path, r.Referer(), sourceIP, r.URL.Query()); s3Err != ErrNone {
writeErrorResponse(w, s3Err, r.URL) writeErrorResponse(w, s3Err, r.URL)
return return
} }
@ -782,7 +781,8 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
writeErrorResponse(w, ErrServerNotInitialized, r.URL) writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return return
} }
if s3Error := checkRequestAuthType(r, bucket, "s3:PutObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:PutObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
return return
} }
@ -825,9 +825,8 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
} }
// Extract metadata that needs to be saved. // Extract metadata that needs to be saved.
metadata, err := extractMetadataFromHeader(r.Header) metadata, err := extractMetadataFromHeader(ctx, r.Header)
if err != nil { if err != nil {
errorIf(err, "found invalid http request header")
writeErrorResponse(w, ErrInternalError, r.URL) writeErrorResponse(w, ErrInternalError, r.URL)
return return
} }
@ -869,7 +868,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
return return
} }
if s3Error := checkRequestAuthType(r, dstBucket, "s3:PutObject", globalServerConfig.GetRegion()); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, dstBucket, "s3:PutObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
return return
} }
@ -931,7 +930,8 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
if hrange, err = parseCopyPartRange(rangeHeader, srcInfo.Size); err != nil { if hrange, err = parseCopyPartRange(rangeHeader, srcInfo.Size); err != nil {
// Handle only errInvalidRange // Handle only errInvalidRange
// Ignore other parse error and treat it as regular Get request like Amazon S3. // Ignore other parse error and treat it as regular Get request like Amazon S3.
errorIf(err, "Unable to extract range %s", rangeHeader) logger.GetReqInfo(ctx).AppendTags("rangeHeader", rangeHeader)
logger.LogIf(ctx, err)
writeCopyPartErr(w, err, r.URL) writeCopyPartErr(w, err, r.URL)
return return
} }
@ -1135,7 +1135,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
return return
case authTypeAnonymous: case authTypeAnonymous:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html // http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
if s3Error := enforceBucketPolicy(bucket, "s3:PutObject", r.URL.Path, if s3Error := enforceBucketPolicy(ctx, bucket, "s3:PutObject", r.URL.Path,
r.Referer(), handlers.GetSourceIP(r), r.URL.Query()); s3Error != ErrNone { r.Referer(), handlers.GetSourceIP(r), r.URL.Query()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
return return
@ -1263,7 +1263,7 @@ func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter,
if api.CacheAPI() != nil { if api.CacheAPI() != nil {
abortMultipartUpload = api.CacheAPI().AbortMultipartUpload abortMultipartUpload = api.CacheAPI().AbortMultipartUpload
} }
if s3Error := checkRequestAuthType(r, bucket, "s3:AbortMultipartUpload", globalServerConfig.GetRegion()); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:AbortMultipartUpload", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
return return
} }
@ -1278,7 +1278,6 @@ func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter,
uploadID, _, _, _ := getObjectResources(r.URL.Query()) uploadID, _, _, _ := getObjectResources(r.URL.Query())
if err := abortMultipartUpload(ctx, bucket, object, uploadID); err != nil { if err := abortMultipartUpload(ctx, bucket, object, uploadID); err != nil {
errorIf(err, "AbortMultipartUpload failed")
writeErrorResponse(w, toAPIErrorCode(err), r.URL) writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return return
} }
@ -1299,7 +1298,7 @@ func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *ht
return return
} }
if s3Error := checkRequestAuthType(r, bucket, "s3:ListMultipartUploadParts", globalServerConfig.GetRegion()); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:ListMultipartUploadParts", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
return return
} }
@ -1339,7 +1338,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
return return
} }
if s3Error := checkRequestAuthType(r, bucket, "s3:PutObject", globalServerConfig.GetRegion()); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:PutObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
return return
} }
@ -1449,7 +1448,7 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
return return
} }
if s3Error := checkRequestAuthType(r, bucket, "s3:DeleteObject", globalServerConfig.GetRegion()); s3Error != ErrNone { if s3Error := checkRequestAuthType(ctx, r, bucket, "s3:DeleteObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL) writeErrorResponse(w, s3Error, r.URL)
return return
} }
@ -1466,8 +1465,6 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
// Ignore delete object errors while replying to client, since we are // Ignore delete object errors while replying to client, since we are
// suppposed to reply only 204. Additionally log the error for // suppposed to reply only 204. Additionally log the error for
// investigation. // investigation.
if err := deleteObject(ctx, objectAPI, api.CacheAPI(), bucket, object, r); err != nil { deleteObject(ctx, objectAPI, api.CacheAPI(), bucket, object, r)
errorIf(err, "Unable to delete an object %s", pathJoin(bucket, object))
}
writeSuccessNoContent(w) writeSuccessNoContent(w)
} }

View File

@ -2206,7 +2206,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
} }
// on successful complete multipart operation the s3MD5 for the parts uploaded will be returned. // on successful complete multipart operation the s3MD5 for the parts uploaded will be returned.
s3MD5, err := getCompleteMultipartMD5(inputParts[3].parts) s3MD5, err := getCompleteMultipartMD5(context.Background(), inputParts[3].parts)
if err != nil { if err != nil {
t.Fatalf("Obtaining S3MD5 failed") t.Fatalf("Obtaining S3MD5 failed")
} }

View File

@ -22,7 +22,7 @@ import (
"path" "path"
"github.com/gorilla/mux" "github.com/gorilla/mux"
xerrors "github.com/minio/minio/pkg/errors" "github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/event"
xnet "github.com/minio/minio/pkg/net" xnet "github.com/minio/minio/pkg/net"
) )
@ -103,7 +103,10 @@ func (receiver *PeerRPCReceiver) ListenBucketNotification(args *ListenBucketNoti
target := NewPeerRPCClientTarget(args.BucketName, args.TargetID, rpcClient) target := NewPeerRPCClientTarget(args.BucketName, args.TargetID, rpcClient)
rulesMap := event.NewRulesMap(args.EventNames, args.Pattern, target.ID()) rulesMap := event.NewRulesMap(args.EventNames, args.Pattern, target.ID())
if err := globalNotificationSys.AddRemoteTarget(args.BucketName, target, rulesMap); err != nil { if err := globalNotificationSys.AddRemoteTarget(args.BucketName, target, rulesMap); err != nil {
errorIf(err, "Unable to add PeerRPCClientTarget %v to globalNotificationSys.targetList.", target) reqInfo := &logger.ReqInfo{BucketName: target.bucketName}
reqInfo.AppendTags("target", target.id.Name)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return err return err
} }
return nil return nil
@ -158,7 +161,10 @@ func (receiver *PeerRPCReceiver) SendEvent(args *SendEventArgs, reply *SendEvent
} }
if err != nil { if err != nil {
errorIf(err, "unable to send event %v to target %v", args.Event, args.TargetID) reqInfo := (&logger.ReqInfo{}).AppendTags("Event", args.Event.EventName.String())
reqInfo.AppendTags("target", args.TargetID.Name)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
} }
reply.Error = err reply.Error = err
@ -169,7 +175,8 @@ func (receiver *PeerRPCReceiver) SendEvent(args *SendEventArgs, reply *SendEvent
func registerS3PeerRPCRouter(router *mux.Router) error { func registerS3PeerRPCRouter(router *mux.Router) error {
peerRPCServer := newRPCServer() peerRPCServer := newRPCServer()
if err := peerRPCServer.RegisterName("Peer", &PeerRPCReceiver{}); err != nil { if err := peerRPCServer.RegisterName("Peer", &PeerRPCReceiver{}); err != nil {
return xerrors.Trace(err) logger.LogIf(context.Background(), err)
return err
} }
subrouter := router.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter() subrouter := router.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter()
@ -250,7 +257,11 @@ func (rpcClient *PeerRPCClient) SendEvent(bucketName string, targetID, remoteTar
} }
if reply.Error != nil { if reply.Error != nil {
errorIf(reply.Error, "unable to send event %v to rpc target %v of bucket %v", args, targetID, bucketName) reqInfo := &logger.ReqInfo{BucketName: bucketName}
reqInfo.AppendTags("targetID", targetID.Name)
reqInfo.AppendTags("event", eventData.EventName.String())
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, reply.Error)
globalNotificationSys.RemoveRemoteTarget(bucketName, targetID) globalNotificationSys.RemoveRemoteTarget(bucketName, targetID)
} }

View File

@ -19,10 +19,13 @@
package cmd package cmd
import ( import (
"context"
"io" "io"
"os" "os"
"path" "path"
"strings" "strings"
"github.com/minio/minio/cmd/logger"
) )
// Return all the entries at the directory dirPath. // Return all the entries at the directory dirPath.
@ -57,7 +60,9 @@ func readDir(dirPath string) (entries []string, err error) {
var st os.FileInfo var st os.FileInfo
st, err = os.Stat((path.Join(dirPath, fi.Name()))) st, err = os.Stat((path.Join(dirPath, fi.Name())))
if err != nil { if err != nil {
errorIf(err, "Unable to stat path %s", path.Join(dirPath, fi.Name())) reqInfo := (&logger.ReqInfo{}).AppendTags("path", path.Join(dirPath, fi.Name()))
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
continue continue
} }
// Append to entries if symbolic link exists and is valid. // Append to entries if symbolic link exists and is valid.

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"encoding/hex" "encoding/hex"
"io" "io"
"io/ioutil" "io/ioutil"
@ -30,6 +31,7 @@ import (
"syscall" "syscall"
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/disk" "github.com/minio/minio/pkg/disk"
) )
@ -75,7 +77,7 @@ func isDirEmpty(dirname string) bool {
f, err := os.Open((dirname)) f, err := os.Open((dirname))
if err != nil { if err != nil {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
errorIf(err, "Unable to access directory") logger.LogIf(context.Background(), err)
} }
return false return false
@ -85,7 +87,7 @@ func isDirEmpty(dirname string) bool {
_, err = f.Readdirnames(1) _, err = f.Readdirnames(1)
if err != io.EOF { if err != io.EOF {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
errorIf(err, "Unable to list directory") logger.LogIf(context.Background(), err)
} }
return false return false

View File

@ -17,11 +17,13 @@
package cmd package cmd
import ( import (
"context"
"fmt" "fmt"
"os" "os"
"time" "time"
"github.com/minio/mc/pkg/console" "github.com/minio/mc/pkg/console"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
) )
@ -29,19 +31,21 @@ var printEndpointError = func() func(Endpoint, error) {
printOnce := make(map[Endpoint]map[string]bool) printOnce := make(map[Endpoint]map[string]bool)
return func(endpoint Endpoint, err error) { return func(endpoint Endpoint, err error) {
reqInfo := (&logger.ReqInfo{}).AppendTags("endpoint", endpoint.Host)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
m, ok := printOnce[endpoint] m, ok := printOnce[endpoint]
if !ok { if !ok {
m = make(map[string]bool) m = make(map[string]bool)
m[err.Error()] = true m[err.Error()] = true
printOnce[endpoint] = m printOnce[endpoint] = m
errorIf(err, "%s: %s", endpoint, err) logger.LogIf(ctx, err)
return return
} }
if m[err.Error()] { if m[err.Error()] {
return return
} }
m[err.Error()] = true m[err.Error()] = true
errorIf(err, "%s: %s", endpoint, err) logger.LogIf(ctx, err)
} }
}() }()
@ -147,7 +151,7 @@ func connectLoadInitFormats(firstDisk bool, endpoints EndpointList, setCount, dr
if !firstDisk { if !firstDisk {
return nil, errNotFirstDisk return nil, errNotFirstDisk
} }
return initFormatXL(storageDisks, setCount, drivesPerSet) return initFormatXL(context.Background(), storageDisks, setCount, drivesPerSet)
} }
// Following function is added to fix a regressions which was introduced // Following function is added to fix a regressions which was introduced
@ -178,7 +182,7 @@ func connectLoadInitFormats(firstDisk bool, endpoints EndpointList, setCount, dr
} }
// Format disks before initialization of object layer. // Format disks before initialization of object layer.
func waitForFormatXL(firstDisk bool, endpoints EndpointList, setCount, disksPerSet int) (format *formatXLV3, err error) { func waitForFormatXL(ctx context.Context, firstDisk bool, endpoints EndpointList, setCount, disksPerSet int) (format *formatXLV3, err error) {
if len(endpoints) == 0 || setCount == 0 || disksPerSet == 0 { if len(endpoints) == 0 || setCount == 0 || disksPerSet == 0 {
return nil, errInvalidArgument return nil, errInvalidArgument
} }

View File

@ -17,11 +17,13 @@
package cmd package cmd
import ( import (
"context"
"io" "io"
"net/http" "net/http"
"net/rpc" "net/rpc"
miniohttp "github.com/minio/minio/cmd/http" miniohttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
) )
// ServeHTTP implements an http.Handler that answers RPC requests, // ServeHTTP implements an http.Handler that answers RPC requests,
@ -34,7 +36,9 @@ func (server *rpcServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
conn, _, err := w.(http.Hijacker).Hijack() conn, _, err := w.(http.Hijacker).Hijack()
if err != nil { if err != nil {
errorIf(err, "rpc hijacking failed for: %s", req.RemoteAddr) reqInfo := (&logger.ReqInfo{}).AppendTags("remoteaddr", req.RemoteAddr)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return return
} }

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"net/http" "net/http"
"os" "os"
"os/signal" "os/signal"
@ -26,7 +27,7 @@ import (
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/dsync" "github.com/minio/dsync"
miniohttp "github.com/minio/minio/cmd/http" miniohttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/cmd/logger"
) )
var serverFlags = []cli.Flag{ var serverFlags = []cli.Flag{
@ -128,17 +129,17 @@ func serverHandleCmdArgs(ctx *cli.Context) {
// Server address. // Server address.
serverAddr := ctx.String("address") serverAddr := ctx.String("address")
fatalIf(CheckLocalServerAddr(serverAddr), "Invalid address %s in command line argument.", serverAddr) logger.FatalIf(CheckLocalServerAddr(serverAddr), "Invalid address %s in command line argument.", serverAddr)
var setupType SetupType var setupType SetupType
var err error var err error
if len(ctx.Args()) > serverCommandLineArgsMax { if len(ctx.Args()) > serverCommandLineArgsMax {
fatalIf(errInvalidArgument, "Invalid total number of arguments (%d) passed, supported upto 32 unique arguments", len(ctx.Args())) logger.FatalIf(errInvalidArgument, "Invalid total number of arguments (%d) passed, supported upto 32 unique arguments", len(ctx.Args()))
} }
globalMinioAddr, globalEndpoints, setupType, globalXLSetCount, globalXLSetDriveCount, err = createServerEndpoints(serverAddr, ctx.Args()...) globalMinioAddr, globalEndpoints, setupType, globalXLSetCount, globalXLSetDriveCount, err = createServerEndpoints(serverAddr, ctx.Args()...)
fatalIf(err, "Invalid command line arguments server=%s, args=%s", serverAddr, ctx.Args()) logger.FatalIf(err, "Invalid command line arguments server=%s, args=%s", serverAddr, ctx.Args())
globalMinioHost, globalMinioPort = mustSplitHostPort(globalMinioAddr) globalMinioHost, globalMinioPort = mustSplitHostPort(globalMinioAddr)
if runtime.GOOS == "darwin" { if runtime.GOOS == "darwin" {
@ -146,7 +147,7 @@ func serverHandleCmdArgs(ctx *cli.Context) {
// to IPv6 address ie minio will start listening on IPv6 address whereas another // to IPv6 address ie minio will start listening on IPv6 address whereas another
// (non-)minio process is listening on IPv4 of given port. // (non-)minio process is listening on IPv4 of given port.
// To avoid this error sutiation we check for port availability only for macOS. // To avoid this error sutiation we check for port availability only for macOS.
fatalIf(checkPortAvailability(globalMinioPort), "Port %d already in use", globalMinioPort) logger.FatalIf(checkPortAvailability(globalMinioPort), "Port %d already in use", globalMinioPort)
} }
globalIsXL = (setupType == XLSetupType) globalIsXL = (setupType == XLSetupType)
@ -168,6 +169,10 @@ func serverHandleEnvVars() {
} }
func init() {
logger.Init(GOPATH)
}
// serverMain handler called for 'minio server' command. // serverMain handler called for 'minio server' command.
func serverMain(ctx *cli.Context) { func serverMain(ctx *cli.Context) {
if (!ctx.IsSet("sets") && !ctx.Args().Present()) || ctx.Args().First() == "help" { if (!ctx.IsSet("sets") && !ctx.Args().Present()) || ctx.Args().First() == "help" {
@ -178,13 +183,13 @@ func serverMain(ctx *cli.Context) {
// enable json and quite modes if jason flag is turned on. // enable json and quite modes if jason flag is turned on.
jsonFlag := ctx.IsSet("json") || ctx.GlobalIsSet("json") jsonFlag := ctx.IsSet("json") || ctx.GlobalIsSet("json")
if jsonFlag { if jsonFlag {
log.EnableJSON() logger.EnableJSON()
} }
// Get quiet flag from command line argument. // Get quiet flag from command line argument.
quietFlag := ctx.IsSet("quiet") || ctx.GlobalIsSet("quiet") quietFlag := ctx.IsSet("quiet") || ctx.GlobalIsSet("quiet")
if quietFlag { if quietFlag {
log.EnableQuiet() logger.EnableQuiet()
} }
// Handle all server command args. // Handle all server command args.
@ -194,22 +199,19 @@ func serverMain(ctx *cli.Context) {
serverHandleEnvVars() serverHandleEnvVars()
// Create certs path. // Create certs path.
fatalIf(createConfigDir(), "Unable to create configuration directories.") logger.FatalIf(createConfigDir(), "Unable to create configuration directories.")
// Initialize server config. // Initialize server config.
initConfig() initConfig()
// Init the error tracing module.
errors.Init(GOPATH, "github.com/minio/minio")
// Check and load SSL certificates. // Check and load SSL certificates.
var err error var err error
globalPublicCerts, globalRootCAs, globalTLSCertificate, globalIsSSL, err = getSSLConfig() globalPublicCerts, globalRootCAs, globalTLSCertificate, globalIsSSL, err = getSSLConfig()
fatalIf(err, "Invalid SSL certificate file") logger.FatalIf(err, "Invalid SSL certificate file")
// Is distributed setup, error out if no certificates are found for HTTPS endpoints. // Is distributed setup, error out if no certificates are found for HTTPS endpoints.
if globalIsDistXL && globalEndpoints.IsHTTPS() && !globalIsSSL { if globalIsDistXL && globalEndpoints.IsHTTPS() && !globalIsSSL {
fatalIf(errInvalidArgument, "No certificates found for HTTPS endpoints (%s)", globalEndpoints) logger.FatalIf(errInvalidArgument, "No certificates found for HTTPS endpoints (%s)", globalEndpoints)
} }
if !quietFlag { if !quietFlag {
@ -224,12 +226,12 @@ func serverMain(ctx *cli.Context) {
} }
// Set system resources to maximum. // Set system resources to maximum.
errorIf(setMaxResources(), "Unable to change resource limit") logger.LogIf(context.Background(), setMaxResources())
// Set nodes for dsync for distributed setup. // Set nodes for dsync for distributed setup.
if globalIsDistXL { if globalIsDistXL {
globalDsync, err = dsync.New(newDsyncNodes(globalEndpoints)) globalDsync, err = dsync.New(newDsyncNodes(globalEndpoints))
fatalIf(err, "Unable to initialize distributed locking on %s", globalEndpoints) logger.FatalIf(err, "Unable to initialize distributed locking on %s", globalEndpoints)
} }
// Initialize name space lock. // Initialize name space lock.
@ -241,11 +243,11 @@ func serverMain(ctx *cli.Context) {
// Configure server. // Configure server.
var handler http.Handler var handler http.Handler
handler, err = configureServerHandler(globalEndpoints) handler, err = configureServerHandler(globalEndpoints)
fatalIf(err, "Unable to configure one of server's RPC services.") logger.FatalIf(err, "Unable to configure one of server's RPC services.")
// Initialize notification system. // Initialize notification system.
globalNotificationSys, err = NewNotificationSys(globalServerConfig, globalEndpoints) globalNotificationSys, err = NewNotificationSys(globalServerConfig, globalEndpoints)
fatalIf(err, "Unable to initialize notification system.") logger.FatalIf(err, "Unable to initialize notification system.")
// Initialize Admin Peers inter-node communication only in distributed setup. // Initialize Admin Peers inter-node communication only in distributed setup.
initGlobalAdminPeers(globalEndpoints) initGlobalAdminPeers(globalEndpoints)
@ -255,7 +257,6 @@ func serverMain(ctx *cli.Context) {
globalHTTPServer.WriteTimeout = globalConnWriteTimeout globalHTTPServer.WriteTimeout = globalConnWriteTimeout
globalHTTPServer.UpdateBytesReadFunc = globalConnStats.incInputBytes globalHTTPServer.UpdateBytesReadFunc = globalConnStats.incInputBytes
globalHTTPServer.UpdateBytesWrittenFunc = globalConnStats.incOutputBytes globalHTTPServer.UpdateBytesWrittenFunc = globalConnStats.incOutputBytes
globalHTTPServer.ErrorLogFunc = errorIf
go func() { go func() {
globalHTTPServerErrorCh <- globalHTTPServer.Start() globalHTTPServerErrorCh <- globalHTTPServer.Start()
}() }()
@ -264,9 +265,9 @@ func serverMain(ctx *cli.Context) {
newObject, err := newObjectLayer(globalEndpoints) newObject, err := newObjectLayer(globalEndpoints)
if err != nil { if err != nil {
errorIf(err, "Initializing object layer failed") logger.LogIf(context.Background(), err)
err = globalHTTPServer.Shutdown() err = globalHTTPServer.Shutdown()
errorIf(err, "Unable to shutdown http server") logger.LogIf(context.Background(), err)
os.Exit(1) os.Exit(1)
} }
@ -294,7 +295,7 @@ func newObjectLayer(endpoints EndpointList) (newObject ObjectLayer, err error) {
return NewFSObjectLayer(endpoints[0].Path) return NewFSObjectLayer(endpoints[0].Path)
} }
format, err := waitForFormatXL(endpoints[0].IsLocal, endpoints, globalXLSetCount, globalXLSetDriveCount) format, err := waitForFormatXL(context.Background(), endpoints[0].IsLocal, endpoints, globalXLSetCount, globalXLSetDriveCount)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -25,6 +25,7 @@ import (
"strings" "strings"
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/cmd/logger"
) )
// Documentation links, these are part of message printing code. // Documentation links, these are part of message printing code.
@ -114,17 +115,17 @@ func printServerCommonMsg(apiEndpoints []string) {
apiEndpointStr := strings.Join(apiEndpoints, " ") apiEndpointStr := strings.Join(apiEndpoints, " ")
// Colorize the message and print. // Colorize the message and print.
log.Println(colorBlue("Endpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr))) logger.Println(colorBlue("Endpoint: ") + colorBold(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 1), apiEndpointStr)))
log.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKey))) logger.Println(colorBlue("AccessKey: ") + colorBold(fmt.Sprintf("%s ", cred.AccessKey)))
log.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", cred.SecretKey))) logger.Println(colorBlue("SecretKey: ") + colorBold(fmt.Sprintf("%s ", cred.SecretKey)))
if region != "" { if region != "" {
log.Println(colorBlue("Region: ") + colorBold(fmt.Sprintf(getFormatStr(len(region), 3), region))) logger.Println(colorBlue("Region: ") + colorBold(fmt.Sprintf(getFormatStr(len(region), 3), region)))
} }
printEventNotifiers() printEventNotifiers()
if globalIsBrowserEnabled { if globalIsBrowserEnabled {
log.Println(colorBlue("\nBrowser Access:")) logger.Println(colorBlue("\nBrowser Access:"))
log.Println(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 3), apiEndpointStr)) logger.Println(fmt.Sprintf(getFormatStr(len(apiEndpointStr), 3), apiEndpointStr))
} }
} }
@ -140,7 +141,7 @@ func printEventNotifiers() {
arnMsg += colorBold(fmt.Sprintf(getFormatStr(len(arn), 1), arn)) arnMsg += colorBold(fmt.Sprintf(getFormatStr(len(arn), 1), arn))
} }
log.Println(arnMsg) logger.Println(arnMsg)
} }
// Prints startup message for command line access. Prints link to our documentation // Prints startup message for command line access. Prints link to our documentation
@ -150,24 +151,24 @@ func printCLIAccessMsg(endPoint string, alias string) {
cred := globalServerConfig.GetCredential() cred := globalServerConfig.GetCredential()
// Configure 'mc', following block prints platform specific information for minio client. // Configure 'mc', following block prints platform specific information for minio client.
log.Println(colorBlue("\nCommand-line Access: ") + mcQuickStartGuide) logger.Println(colorBlue("\nCommand-line Access: ") + mcQuickStartGuide)
if runtime.GOOS == globalWindowsOSName { if runtime.GOOS == globalWindowsOSName {
mcMessage := fmt.Sprintf("$ mc.exe config host add %s %s %s %s", alias, endPoint, cred.AccessKey, cred.SecretKey) mcMessage := fmt.Sprintf("$ mc.exe config host add %s %s %s %s", alias, endPoint, cred.AccessKey, cred.SecretKey)
log.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage)) logger.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
} else { } else {
mcMessage := fmt.Sprintf("$ mc config host add %s %s %s %s", alias, endPoint, cred.AccessKey, cred.SecretKey) mcMessage := fmt.Sprintf("$ mc config host add %s %s %s %s", alias, endPoint, cred.AccessKey, cred.SecretKey)
log.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage)) logger.Println(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
} }
} }
// Prints startup message for Object API acces, prints link to our SDK documentation. // Prints startup message for Object API acces, prints link to our SDK documentation.
func printObjectAPIMsg() { func printObjectAPIMsg() {
log.Println(colorBlue("\nObject API (Amazon S3 compatible):")) logger.Println(colorBlue("\nObject API (Amazon S3 compatible):"))
log.Println(colorBlue(" Go: ") + fmt.Sprintf(getFormatStr(len(goQuickStartGuide), 8), goQuickStartGuide)) logger.Println(colorBlue(" Go: ") + fmt.Sprintf(getFormatStr(len(goQuickStartGuide), 8), goQuickStartGuide))
log.Println(colorBlue(" Java: ") + fmt.Sprintf(getFormatStr(len(javaQuickStartGuide), 6), javaQuickStartGuide)) logger.Println(colorBlue(" Java: ") + fmt.Sprintf(getFormatStr(len(javaQuickStartGuide), 6), javaQuickStartGuide))
log.Println(colorBlue(" Python: ") + fmt.Sprintf(getFormatStr(len(pyQuickStartGuide), 4), pyQuickStartGuide)) logger.Println(colorBlue(" Python: ") + fmt.Sprintf(getFormatStr(len(pyQuickStartGuide), 4), pyQuickStartGuide))
log.Println(colorBlue(" JavaScript: ") + jsQuickStartGuide) logger.Println(colorBlue(" JavaScript: ") + jsQuickStartGuide)
log.Println(colorBlue(" .NET: ") + fmt.Sprintf(getFormatStr(len(dotnetQuickStartGuide), 6), dotnetQuickStartGuide)) logger.Println(colorBlue(" .NET: ") + fmt.Sprintf(getFormatStr(len(dotnetQuickStartGuide), 6), dotnetQuickStartGuide))
} }
// Get formatted disk/storage info message. // Get formatted disk/storage info message.
@ -184,15 +185,15 @@ func getStorageInfoMsg(storageInfo StorageInfo) string {
// Prints startup message of storage capacity and erasure information. // Prints startup message of storage capacity and erasure information.
func printStorageInfo(storageInfo StorageInfo) { func printStorageInfo(storageInfo StorageInfo) {
log.Println(getStorageInfoMsg(storageInfo)) logger.Println(getStorageInfoMsg(storageInfo))
log.Println() logger.Println()
} }
func printCacheStorageInfo(storageInfo StorageInfo) { func printCacheStorageInfo(storageInfo StorageInfo) {
msg := fmt.Sprintf("%s %s Free, %s Total", colorBlue("Cache Capacity:"), msg := fmt.Sprintf("%s %s Free, %s Total", colorBlue("Cache Capacity:"),
humanize.IBytes(uint64(storageInfo.Free)), humanize.IBytes(uint64(storageInfo.Free)),
humanize.IBytes(uint64(storageInfo.Total))) humanize.IBytes(uint64(storageInfo.Total)))
log.Println(msg) logger.Println(msg)
} }
// Prints certificate expiry date warning // Prints certificate expiry date warning
@ -215,5 +216,5 @@ func getCertificateChainMsg(certs []*x509.Certificate) string {
// Prints the certificate expiry message. // Prints the certificate expiry message.
func printCertificateMsg(certs []*x509.Certificate) { func printCertificateMsg(certs []*x509.Certificate) {
log.Println(getCertificateChainMsg(certs)) logger.Println(getCertificateChainMsg(certs))
} }

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"crypto/tls" "crypto/tls"
"crypto/x509" "crypto/x509"
"encoding/xml" "encoding/xml"
@ -2732,7 +2733,7 @@ func (s *TestSuiteCommon) TestObjectMultipart(c *check) {
part.ETag = canonicalizeETag(part.ETag) part.ETag = canonicalizeETag(part.ETag)
parts = append(parts, part) parts = append(parts, part)
} }
etag, err := getCompleteMultipartMD5(parts) etag, err := getCompleteMultipartMD5(context.Background(), parts)
c.Assert(err, nil) c.Assert(err, nil)
c.Assert(canonicalizeETag(response.Header.Get("Etag")), etag) c.Assert(canonicalizeETag(response.Header.Get("Etag")), etag)
} }

View File

@ -19,6 +19,8 @@ package cmd
import ( import (
"context" "context"
"os" "os"
"github.com/minio/minio/cmd/logger"
) )
func handleSignals() { func handleSignals() {
@ -44,11 +46,11 @@ func handleSignals() {
} }
err = globalHTTPServer.Shutdown() err = globalHTTPServer.Shutdown()
errorIf(err, "Unable to shutdown http server") logger.LogIf(context.Background(), err)
if objAPI := newObjectLayerFn(); objAPI != nil { if objAPI := newObjectLayerFn(); objAPI != nil {
oerr = objAPI.Shutdown(context.Background()) oerr = objAPI.Shutdown(context.Background())
errorIf(oerr, "Unable to shutdown object layer") logger.LogIf(context.Background(), oerr)
} }
return (err == nil && oerr == nil) return (err == nil && oerr == nil)
@ -57,33 +59,32 @@ func handleSignals() {
for { for {
select { select {
case err := <-globalHTTPServerErrorCh: case err := <-globalHTTPServerErrorCh:
errorIf(err, "http server exited abnormally") logger.LogIf(context.Background(), err)
var oerr error var oerr error
if objAPI := newObjectLayerFn(); objAPI != nil { if objAPI := newObjectLayerFn(); objAPI != nil {
oerr = objAPI.Shutdown(context.Background()) oerr = objAPI.Shutdown(context.Background())
errorIf(oerr, "Unable to shutdown object layer")
} }
exit(err == nil && oerr == nil) exit(err == nil && oerr == nil)
case osSignal := <-globalOSSignalCh: case osSignal := <-globalOSSignalCh:
stopHTTPTrace() stopHTTPTrace()
log.Printf("Exiting on signal %v\n", osSignal) logger.Printf("Exiting on signal %v\n", osSignal)
exit(stopProcess()) exit(stopProcess())
case signal := <-globalServiceSignalCh: case signal := <-globalServiceSignalCh:
switch signal { switch signal {
case serviceStatus: case serviceStatus:
// Ignore this at the moment. // Ignore this at the moment.
case serviceRestart: case serviceRestart:
log.Println("Restarting on service signal") logger.Println("Restarting on service signal")
err := globalHTTPServer.Shutdown() err := globalHTTPServer.Shutdown()
errorIf(err, "Unable to shutdown http server") logger.LogIf(context.Background(), err)
stopHTTPTrace() stopHTTPTrace()
rerr := restartProcess() rerr := restartProcess()
errorIf(rerr, "Unable to restart the server") logger.LogIf(context.Background(), rerr)
exit(err == nil && rerr == nil) exit(err == nil && rerr == nil)
case serviceStop: case serviceStop:
log.Println("Stopping on service signal") logger.Println("Stopping on service signal")
stopHTTPTrace() stopHTTPTrace()
exit(stopProcess()) exit(stopProcess())
} }

View File

@ -193,7 +193,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
parts1, errs1 := readAllXLMetadata(xlDisks, bucket, object1) parts1, errs1 := readAllXLMetadata(context.Background(), xlDisks, bucket, object1)
// Object for test case 2 - No StorageClass defined, MetaData in PutObject requesting RRS Class // Object for test case 2 - No StorageClass defined, MetaData in PutObject requesting RRS Class
object2 := "object2" object2 := "object2"
@ -204,7 +204,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
parts2, errs2 := readAllXLMetadata(xlDisks, bucket, object2) parts2, errs2 := readAllXLMetadata(context.Background(), xlDisks, bucket, object2)
// Object for test case 3 - No StorageClass defined, MetaData in PutObject requesting Standard Storage Class // Object for test case 3 - No StorageClass defined, MetaData in PutObject requesting Standard Storage Class
object3 := "object3" object3 := "object3"
@ -215,7 +215,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
parts3, errs3 := readAllXLMetadata(xlDisks, bucket, object3) parts3, errs3 := readAllXLMetadata(context.Background(), xlDisks, bucket, object3)
// Object for test case 4 - Standard StorageClass defined as Parity 6, MetaData in PutObject requesting Standard Storage Class // Object for test case 4 - Standard StorageClass defined as Parity 6, MetaData in PutObject requesting Standard Storage Class
object4 := "object4" object4 := "object4"
@ -231,7 +231,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
parts4, errs4 := readAllXLMetadata(xlDisks, bucket, object4) parts4, errs4 := readAllXLMetadata(context.Background(), xlDisks, bucket, object4)
// Object for test case 5 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting RRS Class // Object for test case 5 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting RRS Class
// Reset global storage class flags // Reset global storage class flags
@ -249,7 +249,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
parts5, errs5 := readAllXLMetadata(xlDisks, bucket, object5) parts5, errs5 := readAllXLMetadata(context.Background(), xlDisks, bucket, object5)
// Object for test case 6 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting Standard Storage Class // Object for test case 6 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting Standard Storage Class
// Reset global storage class flags // Reset global storage class flags
@ -267,7 +267,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
parts6, errs6 := readAllXLMetadata(xlDisks, bucket, object6) parts6, errs6 := readAllXLMetadata(context.Background(), xlDisks, bucket, object6)
// Object for test case 7 - Standard StorageClass defined as Parity 5, MetaData in PutObject requesting RRS Class // Object for test case 7 - Standard StorageClass defined as Parity 5, MetaData in PutObject requesting RRS Class
// Reset global storage class flags // Reset global storage class flags
@ -285,7 +285,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
parts7, errs7 := readAllXLMetadata(xlDisks, bucket, object7) parts7, errs7 := readAllXLMetadata(context.Background(), xlDisks, bucket, object7)
tests := []struct { tests := []struct {
parts []xlMetaV1 parts []xlMetaV1

View File

@ -17,13 +17,14 @@
package cmd package cmd
import ( import (
"context"
"io" "io"
"path" "path"
"time" "time"
router "github.com/gorilla/mux" router "github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/disk" "github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/errors"
) )
// Storage server implements rpc primitives to facilitate exporting a // Storage server implements rpc primitives to facilitate exporting a
@ -224,7 +225,8 @@ func registerStorageRPCRouters(mux *router.Router, endpoints EndpointList) error
// Initialize storage rpc servers for every disk that is hosted on this node. // Initialize storage rpc servers for every disk that is hosted on this node.
storageRPCs, err := newStorageRPCServer(endpoints) storageRPCs, err := newStorageRPCServer(endpoints)
if err != nil { if err != nil {
return errors.Trace(err) logger.LogIf(context.Background(), err)
return err
} }
// Create a unique route for each disk exported from this node. // Create a unique route for each disk exported from this node.
@ -232,7 +234,8 @@ func registerStorageRPCRouters(mux *router.Router, endpoints EndpointList) error
storageRPCServer := newRPCServer() storageRPCServer := newRPCServer()
err = storageRPCServer.RegisterName("Storage", stServer) err = storageRPCServer.RegisterName("Storage", stServer)
if err != nil { if err != nil {
return errors.Trace(err) logger.LogIf(context.Background(), err)
return err
} }
// Add minio storage routes. // Add minio storage routes.
storageRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter() storageRouter := mux.PathPrefix(minioReservedBucketPath).Subrouter()

View File

@ -55,6 +55,7 @@ import (
router "github.com/gorilla/mux" router "github.com/gorilla/mux"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/s3signer" "github.com/minio/minio-go/pkg/s3signer"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/bpool" "github.com/minio/minio/pkg/bpool"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
@ -74,8 +75,7 @@ func init() {
// Set system resources to maximum. // Set system resources to maximum.
setMaxResources() setMaxResources()
log = NewLogger() logger.EnableQuiet()
log.EnableQuiet()
} }
// concurreny level for certain parallel tests. // concurreny level for certain parallel tests.
@ -187,7 +187,7 @@ func prepareXL32() (ObjectLayer, []string, error) {
endpoints := append(endpoints1, endpoints2...) endpoints := append(endpoints1, endpoints2...)
fsDirs := append(fsDirs1, fsDirs2...) fsDirs := append(fsDirs1, fsDirs2...)
format, err := waitForFormatXL(true, endpoints, 2, 16) format, err := waitForFormatXL(context.Background(), true, endpoints, 2, 16)
if err != nil { if err != nil {
removeRoots(fsDirs) removeRoots(fsDirs)
return nil, nil, err return nil, nil, err
@ -1685,7 +1685,7 @@ func newTestObjectLayer(endpoints EndpointList) (newObject ObjectLayer, err erro
return NewFSObjectLayer(endpoints[0].Path) return NewFSObjectLayer(endpoints[0].Path)
} }
_, err = waitForFormatXL(endpoints[0].IsLocal, endpoints, 1, 16) _, err = waitForFormatXL(context.Background(), endpoints[0].IsLocal, endpoints, 1, 16)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -2423,12 +2423,12 @@ func generateTLSCertKey(host string) ([]byte, []byte, error) {
func mustGetNewEndpointList(args ...string) (endpoints EndpointList) { func mustGetNewEndpointList(args ...string) (endpoints EndpointList) {
if len(args) == 1 { if len(args) == 1 {
endpoint, err := NewEndpoint(args[0]) endpoint, err := NewEndpoint(args[0])
fatalIf(err, "unable to create new endpoint") logger.FatalIf(err, "unable to create new endpoint")
endpoints = append(endpoints, endpoint) endpoints = append(endpoints, endpoint)
} else { } else {
var err error var err error
endpoints, err = NewEndpointList(args...) endpoints, err = NewEndpointList(args...)
fatalIf(err, "unable to create new endpoint list") logger.FatalIf(err, "unable to create new endpoint list")
} }
return endpoints return endpoints
} }

View File

@ -17,10 +17,11 @@
package cmd package cmd
import ( import (
"context"
"sort" "sort"
"strings" "strings"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/cmd/logger"
) )
// Tree walk result carries results of tree walking. // Tree walk result carries results of tree walking.
@ -124,7 +125,7 @@ func filterListEntries(bucket, prefixDir string, entries []string, prefixEntry s
} }
// treeWalk walks directory tree recursively pushing treeWalkResult into the channel as and when it encounters files. // treeWalk walks directory tree recursively pushing treeWalkResult into the channel as and when it encounters files.
func doTreeWalk(bucket, prefixDir, entryPrefixMatch, marker string, recursive bool, listDir listDirFunc, isLeaf isLeafFunc, resultCh chan treeWalkResult, endWalkCh chan struct{}, isEnd bool) error { func doTreeWalk(ctx context.Context, bucket, prefixDir, entryPrefixMatch, marker string, recursive bool, listDir listDirFunc, isLeaf isLeafFunc, resultCh chan treeWalkResult, endWalkCh chan struct{}, isEnd bool) error {
// Example: // Example:
// if prefixDir="one/two/three/" and marker="four/five.txt" treeWalk is recursively // if prefixDir="one/two/three/" and marker="four/five.txt" treeWalk is recursively
// called with prefixDir="one/two/three/four/" and marker="five.txt" // called with prefixDir="one/two/three/four/" and marker="five.txt"
@ -143,7 +144,8 @@ func doTreeWalk(bucket, prefixDir, entryPrefixMatch, marker string, recursive bo
if err != nil { if err != nil {
select { select {
case <-endWalkCh: case <-endWalkCh:
return errors.Trace(errWalkAbort) logger.LogIf(ctx, errWalkAbort)
return errWalkAbort
case resultCh <- treeWalkResult{err: err}: case resultCh <- treeWalkResult{err: err}:
return err return err
} }
@ -196,7 +198,7 @@ func doTreeWalk(bucket, prefixDir, entryPrefixMatch, marker string, recursive bo
// markIsEnd is passed to this entry's treeWalk() so that treeWalker.end can be marked // markIsEnd is passed to this entry's treeWalk() so that treeWalker.end can be marked
// true at the end of the treeWalk stream. // true at the end of the treeWalk stream.
markIsEnd := i == len(entries)-1 && isEnd markIsEnd := i == len(entries)-1 && isEnd
if tErr := doTreeWalk(bucket, pathJoin(prefixDir, entry), prefixMatch, markerArg, recursive, listDir, isLeaf, resultCh, endWalkCh, markIsEnd); tErr != nil { if tErr := doTreeWalk(ctx, bucket, pathJoin(prefixDir, entry), prefixMatch, markerArg, recursive, listDir, isLeaf, resultCh, endWalkCh, markIsEnd); tErr != nil {
return tErr return tErr
} }
continue continue
@ -205,7 +207,8 @@ func doTreeWalk(bucket, prefixDir, entryPrefixMatch, marker string, recursive bo
isEOF := ((i == len(entries)-1) && isEnd) isEOF := ((i == len(entries)-1) && isEnd)
select { select {
case <-endWalkCh: case <-endWalkCh:
return errors.Trace(errWalkAbort) logger.LogIf(ctx, errWalkAbort)
return errWalkAbort
case resultCh <- treeWalkResult{entry: pathJoin(prefixDir, entry), end: isEOF}: case resultCh <- treeWalkResult{entry: pathJoin(prefixDir, entry), end: isEOF}:
} }
} }
@ -215,7 +218,7 @@ func doTreeWalk(bucket, prefixDir, entryPrefixMatch, marker string, recursive bo
} }
// Initiate a new treeWalk in a goroutine. // Initiate a new treeWalk in a goroutine.
func startTreeWalk(bucket, prefix, marker string, recursive bool, listDir listDirFunc, isLeaf isLeafFunc, endWalkCh chan struct{}) chan treeWalkResult { func startTreeWalk(ctx context.Context, bucket, prefix, marker string, recursive bool, listDir listDirFunc, isLeaf isLeafFunc, endWalkCh chan struct{}) chan treeWalkResult {
// Example 1 // Example 1
// If prefix is "one/two/three/" and marker is "one/two/three/four/five.txt" // If prefix is "one/two/three/" and marker is "one/two/three/four/five.txt"
// treeWalk is called with prefixDir="one/two/three/" and marker="four/five.txt" // treeWalk is called with prefixDir="one/two/three/" and marker="four/five.txt"
@ -237,7 +240,7 @@ func startTreeWalk(bucket, prefix, marker string, recursive bool, listDir listDi
marker = strings.TrimPrefix(marker, prefixDir) marker = strings.TrimPrefix(marker, prefixDir)
go func() { go func() {
isEnd := true // Indication to start walking the tree with end as true. isEnd := true // Indication to start walking the tree with end as true.
doTreeWalk(bucket, prefixDir, entryPrefixMatch, marker, recursive, listDir, isLeaf, resultCh, endWalkCh, isEnd) doTreeWalk(ctx, bucket, prefixDir, entryPrefixMatch, marker, recursive, listDir, isLeaf, resultCh, endWalkCh, isEnd)
close(resultCh) close(resultCh)
}() }()
return resultCh return resultCh

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
@ -131,7 +132,7 @@ func testTreeWalkPrefix(t *testing.T, listDir listDirFunc, isLeaf isLeafFunc) {
// Start the tree walk go-routine. // Start the tree walk go-routine.
prefix := "d/" prefix := "d/"
endWalkCh := make(chan struct{}) endWalkCh := make(chan struct{})
twResultCh := startTreeWalk(volume, prefix, "", true, listDir, isLeaf, endWalkCh) twResultCh := startTreeWalk(context.Background(), volume, prefix, "", true, listDir, isLeaf, endWalkCh)
// Check if all entries received on the channel match the prefix. // Check if all entries received on the channel match the prefix.
for res := range twResultCh { for res := range twResultCh {
@ -146,7 +147,7 @@ func testTreeWalkMarker(t *testing.T, listDir listDirFunc, isLeaf isLeafFunc) {
// Start the tree walk go-routine. // Start the tree walk go-routine.
prefix := "" prefix := ""
endWalkCh := make(chan struct{}) endWalkCh := make(chan struct{})
twResultCh := startTreeWalk(volume, prefix, "d/g", true, listDir, isLeaf, endWalkCh) twResultCh := startTreeWalk(context.Background(), volume, prefix, "d/g", true, listDir, isLeaf, endWalkCh)
// Check if only 3 entries, namely d/g/h, i/j/k, lmn are received on the channel. // Check if only 3 entries, namely d/g/h, i/j/k, lmn are received on the channel.
expectedCount := 3 expectedCount := 3
@ -186,7 +187,7 @@ func TestTreeWalk(t *testing.T) {
isLeaf := func(volume, prefix string) bool { isLeaf := func(volume, prefix string) bool {
return !hasSuffix(prefix, slashSeparator) return !hasSuffix(prefix, slashSeparator)
} }
listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk) listDir := listDirFactory(context.Background(), isLeaf, xlTreeWalkIgnoredErrs, disk)
// Simple test for prefix based walk. // Simple test for prefix based walk.
testTreeWalkPrefix(t, listDir, isLeaf) testTreeWalkPrefix(t, listDir, isLeaf)
// Simple test when marker is set. // Simple test when marker is set.
@ -221,7 +222,7 @@ func TestTreeWalkTimeout(t *testing.T) {
isLeaf := func(volume, prefix string) bool { isLeaf := func(volume, prefix string) bool {
return !hasSuffix(prefix, slashSeparator) return !hasSuffix(prefix, slashSeparator)
} }
listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk) listDir := listDirFactory(context.Background(), isLeaf, xlTreeWalkIgnoredErrs, disk)
// TreeWalk pool with 2 seconds timeout for tree-walk go routines. // TreeWalk pool with 2 seconds timeout for tree-walk go routines.
pool := newTreeWalkPool(2 * time.Second) pool := newTreeWalkPool(2 * time.Second)
@ -230,7 +231,7 @@ func TestTreeWalkTimeout(t *testing.T) {
prefix := "" prefix := ""
marker := "" marker := ""
recursive := true recursive := true
resultCh := startTreeWalk(volume, prefix, marker, recursive, listDir, isLeaf, endWalkCh) resultCh := startTreeWalk(context.Background(), volume, prefix, marker, recursive, listDir, isLeaf, endWalkCh)
params := listParams{ params := listParams{
bucket: volume, bucket: volume,
@ -294,7 +295,7 @@ func TestListDir(t *testing.T) {
} }
// create listDir function. // create listDir function.
listDir := listDirFactory(func(volume, prefix string) bool { listDir := listDirFactory(context.Background(), func(volume, prefix string) bool {
return !hasSuffix(prefix, slashSeparator) return !hasSuffix(prefix, slashSeparator)
}, xlTreeWalkIgnoredErrs, disk1, disk2) }, xlTreeWalkIgnoredErrs, disk1, disk2)
@ -373,7 +374,7 @@ func TestRecursiveTreeWalk(t *testing.T) {
} }
// Create listDir function. // Create listDir function.
listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk1) listDir := listDirFactory(context.Background(), isLeaf, xlTreeWalkIgnoredErrs, disk1)
// Create the namespace. // Create the namespace.
var files = []string{ var files = []string{
@ -447,7 +448,7 @@ func TestRecursiveTreeWalk(t *testing.T) {
}}, }},
} }
for i, testCase := range testCases { for i, testCase := range testCases {
for entry := range startTreeWalk(volume, for entry := range startTreeWalk(context.Background(), volume,
testCase.prefix, testCase.marker, testCase.recursive, testCase.prefix, testCase.marker, testCase.recursive,
listDir, isLeaf, endWalkCh) { listDir, isLeaf, endWalkCh) {
if _, found := testCase.expected[entry.entry]; !found { if _, found := testCase.expected[entry.entry]; !found {
@ -479,7 +480,7 @@ func TestSortedness(t *testing.T) {
return !hasSuffix(prefix, slashSeparator) return !hasSuffix(prefix, slashSeparator)
} }
// Create listDir function. // Create listDir function.
listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk1) listDir := listDirFactory(context.Background(), isLeaf, xlTreeWalkIgnoredErrs, disk1)
// Create the namespace. // Create the namespace.
var files = []string{ var files = []string{
@ -519,7 +520,7 @@ func TestSortedness(t *testing.T) {
} }
for i, test := range testCases { for i, test := range testCases {
var actualEntries []string var actualEntries []string
for entry := range startTreeWalk(volume, for entry := range startTreeWalk(context.Background(), volume,
test.prefix, test.marker, test.recursive, test.prefix, test.marker, test.recursive,
listDir, isLeaf, endWalkCh) { listDir, isLeaf, endWalkCh) {
actualEntries = append(actualEntries, entry.entry) actualEntries = append(actualEntries, entry.entry)
@ -553,7 +554,7 @@ func TestTreeWalkIsEnd(t *testing.T) {
return !hasSuffix(prefix, slashSeparator) return !hasSuffix(prefix, slashSeparator)
} }
// Create listDir function. // Create listDir function.
listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs, disk1) listDir := listDirFactory(context.Background(), isLeaf, xlTreeWalkIgnoredErrs, disk1)
// Create the namespace. // Create the namespace.
var files = []string{ var files = []string{
@ -594,7 +595,7 @@ func TestTreeWalkIsEnd(t *testing.T) {
} }
for i, test := range testCases { for i, test := range testCases {
var entry treeWalkResult var entry treeWalkResult
for entry = range startTreeWalk(volume, test.prefix, test.marker, test.recursive, listDir, isLeaf, endWalkCh) { for entry = range startTreeWalk(context.Background(), volume, test.prefix, test.marker, test.recursive, listDir, isLeaf, endWalkCh) {
} }
if entry.entry != test.expectedEntry { if entry.entry != test.expectedEntry {
t.Errorf("Test %d: Expected entry %s, but received %s with the EOF marker", i, test.expectedEntry, entry.entry) t.Errorf("Test %d: Expected entry %s, but received %s with the EOF marker", i, test.expectedEntry, entry.entry)

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bufio" "bufio"
"context"
"crypto" "crypto"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
@ -32,6 +33,7 @@ import (
"github.com/fatih/color" "github.com/fatih/color"
"github.com/inconshreveable/go-update" "github.com/inconshreveable/go-update"
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/minio/cmd/logger"
_ "github.com/minio/sha256-simd" // Needed for sha256 hash verifier. _ "github.com/minio/sha256-simd" // Needed for sha256 hash verifier.
"github.com/segmentio/go-prompt" "github.com/segmentio/go-prompt"
) )
@ -154,7 +156,7 @@ func IsDocker() bool {
} }
// Log error, as we will not propagate it to caller // Log error, as we will not propagate it to caller
errorIf(err, "Error in docker check.") logger.LogIf(context.Background(), err)
return err == nil return err == nil
} }
@ -184,7 +186,7 @@ func IsBOSH() bool {
} }
// Log error, as we will not propagate it to caller // Log error, as we will not propagate it to caller
errorIf(err, "Error in BOSH check.") logger.LogIf(context.Background(), err)
return err == nil return err == nil
} }
@ -199,7 +201,9 @@ func getHelmVersion(helmInfoFilePath string) string {
// Log errors and return "" as Minio can be deployed // Log errors and return "" as Minio can be deployed
// without Helm charts as well. // without Helm charts as well.
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
errorIf(err, "Unable to read %s", helmInfoFilePath) reqInfo := (&logger.ReqInfo{}).AppendTags("helmInfoFilePath", helmInfoFilePath)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
} }
return "" return ""
} }
@ -491,33 +495,33 @@ func mainUpdate(ctx *cli.Context) {
quiet := ctx.Bool("quiet") || ctx.GlobalBool("quiet") quiet := ctx.Bool("quiet") || ctx.GlobalBool("quiet")
if quiet { if quiet {
log.EnableQuiet() logger.EnableQuiet()
} }
minioMode := "" minioMode := ""
updateMsg, sha256Hex, _, latestReleaseTime, err := getUpdateInfo(10*time.Second, minioMode) updateMsg, sha256Hex, _, latestReleaseTime, err := getUpdateInfo(10*time.Second, minioMode)
if err != nil { if err != nil {
log.Println(err) logger.Println(err)
os.Exit(-1) os.Exit(-1)
} }
// Nothing to update running the latest release. // Nothing to update running the latest release.
if updateMsg == "" { if updateMsg == "" {
log.Println(greenColorSprintf("You are already running the most recent version of minio.")) logger.Println(greenColorSprintf("You are already running the most recent version of minio."))
os.Exit(0) os.Exit(0)
} }
log.Println(updateMsg) logger.Println(updateMsg)
// if the in-place update is disabled then we shouldn't ask the // if the in-place update is disabled then we shouldn't ask the
// user to update the binaries. // user to update the binaries.
if strings.Contains(updateMsg, minioReleaseURL) && !globalInplaceUpdateDisabled { if strings.Contains(updateMsg, minioReleaseURL) && !globalInplaceUpdateDisabled {
var successMsg string var successMsg string
successMsg, err = doUpdate(sha256Hex, latestReleaseTime, shouldUpdate(quiet, sha256Hex, latestReleaseTime)) successMsg, err = doUpdate(sha256Hex, latestReleaseTime, shouldUpdate(quiet, sha256Hex, latestReleaseTime))
if err != nil { if err != nil {
log.Println(err) logger.Println(err)
os.Exit(-1) os.Exit(-1)
} }
log.Println(successMsg) logger.Println(successMsg)
os.Exit(1) os.Exit(1)
} }
} }

View File

@ -44,7 +44,9 @@ import (
// Close Http tracing file. // Close Http tracing file.
func stopHTTPTrace() { func stopHTTPTrace() {
if globalHTTPTraceFile != nil { if globalHTTPTraceFile != nil {
errorIf(globalHTTPTraceFile.Close(), "Unable to close httpTraceFile %s", globalHTTPTraceFile.Name()) reqInfo := (&logger.ReqInfo{}).AppendTags("traceFile", globalHTTPTraceFile.Name())
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, globalHTTPTraceFile.Close())
globalHTTPTraceFile = nil globalHTTPTraceFile = nil
} }
} }
@ -331,8 +333,8 @@ func newContext(r *http.Request, api string) context.Context {
if prefix != "" { if prefix != "" {
object = prefix object = prefix
} }
reqInfo := &logger.ReqInfo{RemoteHost: r.RemoteAddr, UserAgent: r.Header.Get("user-agent"), API: api, BucketName: bucket, ObjectName: object}
return logger.SetContext(context.Background(), &logger.ReqInfo{r.RemoteAddr, r.Header.Get("user-agent"), "", api, bucket, object, nil}) return logger.SetReqInfo(context.Background(), reqInfo)
} }
// isNetworkOrHostDown - if there was a network error or if the host is down. // isNetworkOrHostDown - if there was a network error or if the host is down.

View File

@ -35,6 +35,7 @@ import (
"github.com/gorilla/rpc/v2/json2" "github.com/gorilla/rpc/v2/json2"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/browser" "github.com/minio/minio/browser"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/event"
@ -384,7 +385,9 @@ func (web *webAPIHandlers) Login(r *http.Request, args *LoginArgs, reply *LoginR
if err != nil { if err != nil {
// Make sure to log errors related to browser login, // Make sure to log errors related to browser login,
// for security and auditing reasons. // for security and auditing reasons.
errorIf(err, "Unable to login request from %s", r.RemoteAddr) reqInfo := (&logger.ReqInfo{}).AppendTags("remoteAddr", r.RemoteAddr)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return toJSONError(err) return toJSONError(err)
} }
@ -463,7 +466,7 @@ func (web *webAPIHandlers) SetAuth(r *http.Request, args *SetAuthArgs, reply *Se
reply.PeerErrMsgs = make(map[string]string) reply.PeerErrMsgs = make(map[string]string)
for svr, errVal := range errsMap { for svr, errVal := range errsMap {
tErr := fmt.Errorf("Unable to change credentials on %s: %v", svr, errVal) tErr := fmt.Errorf("Unable to change credentials on %s: %v", svr, errVal)
errorIf(tErr, "Credentials change could not be propagated successfully!") logger.LogIf(context.Background(), tErr)
reply.PeerErrMsgs[svr] = errVal.Error() reply.PeerErrMsgs[svr] = errVal.Error()
} }
@ -571,7 +574,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
} }
// Extract incoming metadata if any. // Extract incoming metadata if any.
metadata, err := extractMetadataFromHeader(r.Header) metadata, err := extractMetadataFromHeader(context.Background(), r.Header)
if err != nil { if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL) writeErrorResponse(w, ErrInternalError, r.URL)
return return
@ -1095,7 +1098,7 @@ func toWebAPIError(err error) APIError {
} }
// Log unexpected and unhandled errors. // Log unexpected and unhandled errors.
errorIf(err, errUnexpected.Error()) logger.LogIf(context.Background(), err)
return APIError{ return APIError{
Code: "InternalError", Code: "InternalError",
HTTPStatusCode: http.StatusInternalServerError, HTTPStatusCode: http.StatusInternalServerError,

View File

@ -1286,7 +1286,7 @@ func testWebGetBucketPolicyHandler(obj ObjectLayer, instanceType string, t TestE
}, },
}, },
} }
if err := writeBucketPolicy(bucketName, obj, policyVal); err != nil { if err := writeBucketPolicy(context.Background(), bucketName, obj, policyVal); err != nil {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
@ -1380,7 +1380,7 @@ func testWebListAllBucketPoliciesHandler(obj ObjectLayer, instanceType string, t
}, },
}, },
} }
if err := writeBucketPolicy(bucketName, obj, policyVal); err != nil { if err := writeBucketPolicy(context.Background(), bucketName, obj, policyVal); err != nil {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }

View File

@ -28,6 +28,7 @@ import (
"time" "time"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bpool" "github.com/minio/minio/pkg/bpool"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
@ -214,7 +215,7 @@ func newXLSets(endpoints EndpointList, format *formatXLV3, setCount int, drivesP
nsMutex: mutex, nsMutex: mutex,
bp: bpool.NewBytePoolCap(setCount*drivesPerSet, blockSizeV1, blockSizeV1*2), bp: bpool.NewBytePoolCap(setCount*drivesPerSet, blockSizeV1, blockSizeV1*2),
} }
go s.sets[i].cleanupStaleMultipartUploads(globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh) go s.sets[i].cleanupStaleMultipartUploads(context.Background(), globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh)
} }
// Connect disks right away. // Connect disks right away.
@ -431,7 +432,7 @@ func (s *xlSets) ListObjectsV2(ctx context.Context, bucket, prefix, continuation
// SetBucketPolicy persist the new policy on the bucket. // SetBucketPolicy persist the new policy on the bucket.
func (s *xlSets) SetBucketPolicy(ctx context.Context, bucket string, policy policy.BucketAccessPolicy) error { func (s *xlSets) SetBucketPolicy(ctx context.Context, bucket string, policy policy.BucketAccessPolicy) error {
return persistAndNotifyBucketPolicyChange(bucket, false, policy, s) return persistAndNotifyBucketPolicyChange(ctx, bucket, false, policy, s)
} }
// GetBucketPolicy will return a policy on a bucket // GetBucketPolicy will return a policy on a bucket
@ -446,7 +447,7 @@ func (s *xlSets) GetBucketPolicy(ctx context.Context, bucket string) (policy.Buc
// DeleteBucketPolicy deletes all policies on bucket // DeleteBucketPolicy deletes all policies on bucket
func (s *xlSets) DeleteBucketPolicy(ctx context.Context, bucket string) error { func (s *xlSets) DeleteBucketPolicy(ctx context.Context, bucket string) error {
return persistAndNotifyBucketPolicyChange(bucket, true, emptyBucketPolicy, s) return persistAndNotifyBucketPolicyChange(ctx, bucket, true, emptyBucketPolicy, s)
} }
// RefreshBucketPolicy refreshes policy cache from disk // RefreshBucketPolicy refreshes policy cache from disk
@ -498,7 +499,7 @@ func (s *xlSets) DeleteBucket(ctx context.Context, bucket string) error {
} }
// Delete all bucket metadata. // Delete all bucket metadata.
deleteBucketMetadata(bucket, s) deleteBucketMetadata(ctx, bucket, s)
// Success. // Success.
return nil return nil
@ -585,26 +586,26 @@ func (s *xlSets) CopyObject(ctx context.Context, srcBucket, srcObject, destBucke
} }
go func() { go func() {
if gerr := srcSet.getObject(srcBucket, srcObject, 0, srcInfo.Size, srcInfo.Writer, srcInfo.ETag); gerr != nil { if gerr := srcSet.getObject(ctx, srcBucket, srcObject, 0, srcInfo.Size, srcInfo.Writer, srcInfo.ETag); gerr != nil {
if gerr = srcInfo.Writer.Close(); gerr != nil { if gerr = srcInfo.Writer.Close(); gerr != nil {
errorIf(gerr, "Unable to read the object %s/%s.", srcBucket, srcObject) logger.LogIf(ctx, gerr)
} }
return return
} }
// Close writer explicitly signalling we wrote all data. // Close writer explicitly signalling we wrote all data.
if gerr := srcInfo.Writer.Close(); gerr != nil { if gerr := srcInfo.Writer.Close(); gerr != nil {
errorIf(gerr, "Unable to read the object %s/%s.", srcBucket, srcObject) logger.LogIf(ctx, gerr)
return return
} }
}() }()
return destSet.putObject(destBucket, destObject, srcInfo.Reader, srcInfo.UserDefined) return destSet.putObject(ctx, destBucket, destObject, srcInfo.Reader, srcInfo.UserDefined)
} }
// Returns function "listDir" of the type listDirFunc. // Returns function "listDir" of the type listDirFunc.
// isLeaf - is used by listDir function to check if an entry is a leaf or non-leaf entry. // isLeaf - is used by listDir function to check if an entry is a leaf or non-leaf entry.
// disks - used for doing disk.ListDir(). Sets passes set of disks. // disks - used for doing disk.ListDir(). Sets passes set of disks.
func listDirSetsFactory(isLeaf isLeafFunc, treeWalkIgnoredErrs []error, sets ...[]StorageAPI) listDirFunc { func listDirSetsFactory(ctx context.Context, isLeaf isLeafFunc, treeWalkIgnoredErrs []error, sets ...[]StorageAPI) listDirFunc {
listDirInternal := func(bucket, prefixDir, prefixEntry string, disks []StorageAPI) (mergedEntries []string, err error) { listDirInternal := func(bucket, prefixDir, prefixEntry string, disks []StorageAPI) (mergedEntries []string, err error) {
for _, disk := range disks { for _, disk := range disks {
if disk == nil { if disk == nil {
@ -620,7 +621,8 @@ func listDirSetsFactory(isLeaf isLeafFunc, treeWalkIgnoredErrs []error, sets ...
if errors.IsErrIgnored(err, treeWalkIgnoredErrs...) { if errors.IsErrIgnored(err, treeWalkIgnoredErrs...) {
continue continue
} }
return nil, errors.Trace(err) logger.LogIf(ctx, err)
return nil, err
} }
// Find elements in entries which are not in mergedEntries // Find elements in entries which are not in mergedEntries
@ -679,7 +681,7 @@ func listDirSetsFactory(isLeaf isLeafFunc, treeWalkIgnoredErrs []error, sets ...
// value through the walk channel receives the data properly lexically sorted. // value through the walk channel receives the data properly lexically sorted.
func (s *xlSets) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) { func (s *xlSets) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) {
// validate all the inputs for listObjects // validate all the inputs for listObjects
if err = checkListObjsArgs(bucket, prefix, marker, delimiter, s); err != nil { if err = checkListObjsArgs(ctx, bucket, prefix, marker, delimiter, s); err != nil {
return result, err return result, err
} }
@ -707,8 +709,8 @@ func (s *xlSets) ListObjects(ctx context.Context, bucket, prefix, marker, delimi
setDisks = append(setDisks, set.getLoadBalancedDisks()) setDisks = append(setDisks, set.getLoadBalancedDisks())
} }
listDir := listDirSetsFactory(isLeaf, xlTreeWalkIgnoredErrs, setDisks...) listDir := listDirSetsFactory(ctx, isLeaf, xlTreeWalkIgnoredErrs, setDisks...)
walkResultCh = startTreeWalk(bucket, prefix, marker, recursive, listDir, isLeaf, endWalkCh) walkResultCh = startTreeWalk(ctx, bucket, prefix, marker, recursive, listDir, isLeaf, endWalkCh)
} }
for i := 0; i < maxKeys; { for i := 0; i < maxKeys; {
@ -726,9 +728,9 @@ func (s *xlSets) ListObjects(ctx context.Context, bucket, prefix, marker, delimi
var objInfo ObjectInfo var objInfo ObjectInfo
var err error var err error
if hasSuffix(walkResult.entry, slashSeparator) { if hasSuffix(walkResult.entry, slashSeparator) {
objInfo, err = s.getHashedSet(walkResult.entry).getObjectInfoDir(bucket, walkResult.entry) objInfo, err = s.getHashedSet(walkResult.entry).getObjectInfoDir(ctx, bucket, walkResult.entry)
} else { } else {
objInfo, err = s.getHashedSet(walkResult.entry).getObjectInfo(bucket, walkResult.entry) objInfo, err = s.getHashedSet(walkResult.entry).getObjectInfo(ctx, bucket, walkResult.entry)
} }
if err != nil { if err != nil {
// Ignore errFileNotFound as the object might have got // Ignore errFileNotFound as the object might have got
@ -787,12 +789,12 @@ func (s *xlSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destB
go func() { go func() {
if gerr := srcSet.GetObject(ctx, srcBucket, srcObject, startOffset, length, srcInfo.Writer, srcInfo.ETag); gerr != nil { if gerr := srcSet.GetObject(ctx, srcBucket, srcObject, startOffset, length, srcInfo.Writer, srcInfo.ETag); gerr != nil {
if gerr = srcInfo.Writer.Close(); gerr != nil { if gerr = srcInfo.Writer.Close(); gerr != nil {
errorIf(gerr, "Unable to read %s of the object `%s/%s`.", srcBucket, srcObject) logger.LogIf(ctx, gerr)
return return
} }
} }
if gerr := srcInfo.Writer.Close(); gerr != nil { if gerr := srcInfo.Writer.Close(); gerr != nil {
errorIf(gerr, "Unable to read %s of the object `%s/%s`.", srcBucket, srcObject) logger.LogIf(ctx, gerr)
return return
} }
}() }()
@ -1034,7 +1036,7 @@ func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResult
} }
// Save formats `format.json` across all disks. // Save formats `format.json` across all disks.
if err = saveFormatXLAll(storageDisks, tmpNewFormats); err != nil { if err = saveFormatXLAll(ctx, storageDisks, tmpNewFormats); err != nil {
return madmin.HealResultItem{}, err return madmin.HealResultItem{}, err
} }
@ -1228,7 +1230,7 @@ func listDirSetsHealFactory(isLeaf isLeafFunc, sets ...[]StorageAPI) listDirFunc
} }
// listObjectsHeal - wrapper function implemented over file tree walk. // listObjectsHeal - wrapper function implemented over file tree walk.
func (s *xlSets) listObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { func (s *xlSets) listObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
// Default is recursive, if delimiter is set then list non recursive. // Default is recursive, if delimiter is set then list non recursive.
recursive := true recursive := true
if delimiter == slashSeparator { if delimiter == slashSeparator {
@ -1252,7 +1254,7 @@ func (s *xlSets) listObjectsHeal(bucket, prefix, marker, delimiter string, maxKe
} }
listDir := listDirSetsHealFactory(isLeaf, setDisks...) listDir := listDirSetsHealFactory(isLeaf, setDisks...)
walkResultCh = startTreeWalk(bucket, prefix, marker, recursive, listDir, nil, endWalkCh) walkResultCh = startTreeWalk(ctx, bucket, prefix, marker, recursive, listDir, nil, endWalkCh)
} }
var objInfos []ObjectInfo var objInfos []ObjectInfo
@ -1272,9 +1274,9 @@ func (s *xlSets) listObjectsHeal(bucket, prefix, marker, delimiter string, maxKe
var objInfo ObjectInfo var objInfo ObjectInfo
var err error var err error
if hasSuffix(walkResult.entry, slashSeparator) { if hasSuffix(walkResult.entry, slashSeparator) {
objInfo, err = s.getHashedSet(walkResult.entry).getObjectInfoDir(bucket, walkResult.entry) objInfo, err = s.getHashedSet(walkResult.entry).getObjectInfoDir(ctx, bucket, walkResult.entry)
} else { } else {
objInfo, err = s.getHashedSet(walkResult.entry).getObjectInfo(bucket, walkResult.entry) objInfo, err = s.getHashedSet(walkResult.entry).getObjectInfo(ctx, bucket, walkResult.entry)
} }
if err != nil { if err != nil {
// Ignore errFileNotFound // Ignore errFileNotFound
@ -1320,7 +1322,7 @@ func (s *xlSets) listObjectsHeal(bucket, prefix, marker, delimiter string, maxKe
// This is not implemented yet, will be implemented later to comply with Admin API refactor. // This is not implemented yet, will be implemented later to comply with Admin API refactor.
func (s *xlSets) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { func (s *xlSets) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) {
if err = checkListObjsArgs(bucket, prefix, marker, delimiter, s); err != nil { if err = checkListObjsArgs(ctx, bucket, prefix, marker, delimiter, s); err != nil {
return loi, err return loi, err
} }
@ -1343,7 +1345,7 @@ func (s *xlSets) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, de
} }
// Initiate a list operation, if successful filter and return quickly. // Initiate a list operation, if successful filter and return quickly.
listObjInfo, err := s.listObjectsHeal(bucket, prefix, marker, delimiter, maxKeys) listObjInfo, err := s.listObjectsHeal(ctx, bucket, prefix, marker, delimiter, maxKeys)
if err == nil { if err == nil {
// We got the entries successfully return. // We got the entries successfully return.
return listObjInfo, nil return listObjInfo, nil

Some files were not shown because too many files have changed in this diff Show More