logging: Log only for unhandled errors, remove all the debug logging. (#1652)

This patch brings in the removal of debug logging altogether, instead
we bring in the functionality of being able to trace the errors properly
pointing back to the origination of the problem.

To enable tracing you need to enable "MINIO_TRACE" set to "1" or "true"
environment variable which would print back traces whenever there is an
error which is unhandled or at the handler layer.

By default this tracing is turned off and only user level logging is
provided.
This commit is contained in:
Harshavardhana 2016-05-16 14:31:28 -07:00 committed by Anand Babu (AB) Periasamy
parent 8828fd1e5c
commit 9472299308
38 changed files with 166 additions and 731 deletions

View File

@ -50,7 +50,7 @@ var isValidAccessKey = regexp.MustCompile(`^[a-zA-Z0-9\\-\\.\\_\\~]{5,20}$`)
// mustGenAccessKeys - must generate access credentials. // mustGenAccessKeys - must generate access credentials.
func mustGenAccessKeys() (creds credential) { func mustGenAccessKeys() (creds credential) {
creds, err := genAccessKeys() creds, err := genAccessKeys()
fatalIf(err, "Unable to generate access keys.", nil) fatalIf(err, "Unable to generate access keys.")
return creds return creds
} }

View File

@ -58,9 +58,9 @@ type LogMessage struct {
func (h *accessLogHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { func (h *accessLogHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
message, err := getLogMessage(w, req) message, err := getLogMessage(w, req)
fatalIf(err, "Unable to extract http message.", nil) fatalIf(err, "Unable to parse HTTP request and response fields.")
_, err = h.accessLogFile.Write(message) _, err = h.accessLogFile.Write(message)
fatalIf(err, "Writing to log file failed.", nil) fatalIf(err, "Unable to log HTTP access.")
h.Handler.ServeHTTP(w, req) h.Handler.ServeHTTP(w, req)
} }
@ -112,7 +112,7 @@ func getLogMessage(w http.ResponseWriter, req *http.Request) ([]byte, error) {
// setAccessLogHandler logs requests // setAccessLogHandler logs requests
func setAccessLogHandler(h http.Handler) http.Handler { func setAccessLogHandler(h http.Handler) http.Handler {
file, err := os.OpenFile("access.log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) file, err := os.OpenFile("access.log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
fatalIf(err, "Unable to open access log.", nil) fatalIf(err, "Failed top open access log.")
return &accessLogHandler{Handler: h, accessLogFile: file} return &accessLogHandler{Handler: h, accessLogFile: file}
} }

View File

@ -111,12 +111,10 @@ func sumMD5(data []byte) []byte {
// Verify if request has valid AWS Signature Version '4'. // Verify if request has valid AWS Signature Version '4'.
func isReqAuthenticated(r *http.Request) (s3Error APIErrorCode) { func isReqAuthenticated(r *http.Request) (s3Error APIErrorCode) {
if r == nil { if r == nil {
errorIf(errInvalidArgument, "HTTP request cannot be empty.", nil)
return ErrInternalError return ErrInternalError
} }
payload, err := ioutil.ReadAll(r.Body) payload, err := ioutil.ReadAll(r.Body)
if err != nil { if err != nil {
errorIf(err, "Unable to read HTTP body.", nil)
return ErrInternalError return ErrInternalError
} }
// Verify Content-Md5, if payload is set. // Verify Content-Md5, if payload is set.

View File

@ -36,7 +36,7 @@ func enforceBucketPolicy(action string, bucket string, reqURL *url.URL) (s3Error
// Read saved bucket policy. // Read saved bucket policy.
policy, err := readBucketPolicy(bucket) policy, err := readBucketPolicy(bucket)
if err != nil { if err != nil {
errorIf(err, "GetBucketPolicy failed.", nil) errorIf(err, "Unable read bucket policy.")
switch err.(type) { switch err.(type) {
case BucketNotFound: case BucketNotFound:
return ErrNoSuchBucket return ErrNoSuchBucket
@ -50,7 +50,7 @@ func enforceBucketPolicy(action string, bucket string, reqURL *url.URL) (s3Error
// Parse the saved policy. // Parse the saved policy.
bucketPolicy, err := parseBucketPolicy(policy) bucketPolicy, err := parseBucketPolicy(policy)
if err != nil { if err != nil {
errorIf(err, "Parse policy failed.", nil) errorIf(err, "Unable to parse bucket policy.")
return ErrAccessDenied return ErrAccessDenied
} }
@ -117,7 +117,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
} }
if _, err := api.ObjectAPI.GetBucketInfo(bucket); err != nil { if _, err := api.ObjectAPI.GetBucketInfo(bucket); err != nil {
errorIf(err, "GetBucketInfo failed.", nil) errorIf(err, "Unable to fetch bucket info.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return return
} }
@ -180,7 +180,7 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
listMultipartsInfo, err := api.ObjectAPI.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) listMultipartsInfo, err := api.ObjectAPI.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
if err != nil { if err != nil {
errorIf(err, "ListMultipartUploads failed.", nil) errorIf(err, "Unable to list multipart uploads.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return return
} }
@ -252,7 +252,7 @@ func (api objectAPIHandlers) ListObjectsHandler(w http.ResponseWriter, r *http.R
writeSuccessResponse(w, encodedSuccessResponse) writeSuccessResponse(w, encodedSuccessResponse)
return return
} }
errorIf(err, "ListObjects failed.", nil) errorIf(err, "Unable to list objects.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
} }
@ -306,7 +306,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
writeSuccessResponse(w, encodedSuccessResponse) writeSuccessResponse(w, encodedSuccessResponse)
return return
} }
errorIf(err, "ListBuckets failed.", nil) errorIf(err, "Unable to list buckets.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
} }
@ -352,7 +352,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
// Read incoming body XML bytes. // Read incoming body XML bytes.
if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil { if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil {
errorIf(err, "DeleteMultipleObjects failed.", nil) errorIf(err, "Unable to read HTTP body.")
writeErrorResponse(w, r, ErrInternalError, r.URL.Path) writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
return return
} }
@ -360,7 +360,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
// Unmarshal list of keys to be deleted. // Unmarshal list of keys to be deleted.
deleteObjects := &DeleteObjectsRequest{} deleteObjects := &DeleteObjectsRequest{}
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil { if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
errorIf(err, "DeleteMultipartObjects xml decoding failed.", nil) errorIf(err, "Unable to unmarshal delete objects request XML.")
writeErrorResponse(w, r, ErrMalformedXML, r.URL.Path) writeErrorResponse(w, r, ErrMalformedXML, r.URL.Path)
return return
} }
@ -375,7 +375,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
ObjectName: object.ObjectName, ObjectName: object.ObjectName,
}) })
} else { } else {
errorIf(err, "DeleteObject failed.", nil) errorIf(err, "Unable to delete object.")
deleteErrors = append(deleteErrors, DeleteError{ deleteErrors = append(deleteErrors, DeleteError{
Code: errorCodeResponse[toAPIErrorCode(err)].Code, Code: errorCodeResponse[toAPIErrorCode(err)].Code,
Message: errorCodeResponse[toAPIErrorCode(err)].Description, Message: errorCodeResponse[toAPIErrorCode(err)].Description,
@ -423,7 +423,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
// Make bucket. // Make bucket.
err := api.ObjectAPI.MakeBucket(bucket) err := api.ObjectAPI.MakeBucket(bucket)
if err != nil { if err != nil {
errorIf(err, "MakeBucket failed.", nil) errorIf(err, "Unable to create a bucket.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return return
} }
@ -467,14 +467,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
// be loaded in memory, the remaining being put in temporary files. // be loaded in memory, the remaining being put in temporary files.
reader, err := r.MultipartReader() reader, err := r.MultipartReader()
if err != nil { if err != nil {
errorIf(err, "Unable to initialize multipart reader.", nil) errorIf(err, "Unable to initialize multipart reader.")
writeErrorResponse(w, r, ErrMalformedPOSTRequest, r.URL.Path) writeErrorResponse(w, r, ErrMalformedPOSTRequest, r.URL.Path)
return return
} }
fileBody, formValues, err := extractHTTPFormValues(reader) fileBody, formValues, err := extractHTTPFormValues(reader)
if err != nil { if err != nil {
errorIf(err, "Unable to parse form values.", nil) errorIf(err, "Unable to parse form values.")
writeErrorResponse(w, r, ErrMalformedPOSTRequest, r.URL.Path) writeErrorResponse(w, r, ErrMalformedPOSTRequest, r.URL.Path)
return return
} }
@ -494,7 +494,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
} }
md5Sum, err := api.ObjectAPI.PutObject(bucket, object, -1, fileBody, nil) md5Sum, err := api.ObjectAPI.PutObject(bucket, object, -1, fileBody, nil)
if err != nil { if err != nil {
errorIf(err, "PutObject failed.", nil) errorIf(err, "Unable to create object.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return return
} }
@ -540,7 +540,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
} }
if _, err := api.ObjectAPI.GetBucketInfo(bucket); err != nil { if _, err := api.ObjectAPI.GetBucketInfo(bucket); err != nil {
errorIf(err, "GetBucketInfo failed.", nil) errorIf(err, "Unable to fetch bucket info.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return return
} }
@ -565,7 +565,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
} }
if err := api.ObjectAPI.DeleteBucket(bucket); err != nil { if err := api.ObjectAPI.DeleteBucket(bucket); err != nil {
errorIf(err, "DeleteBucket failed.", nil) errorIf(err, "Unable to delete a bucket.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return return
} }

View File

@ -67,7 +67,7 @@ func bucketPolicyActionMatch(action string, statement policyStatement) bool {
for _, policyAction := range statement.Actions { for _, policyAction := range statement.Actions {
// Policy action can be a regex, validate the action with matching string. // Policy action can be a regex, validate the action with matching string.
matched, err := regexp.MatchString(policyAction, action) matched, err := regexp.MatchString(policyAction, action)
fatalIf(err, "Invalid pattern, please verify the pattern string.", nil) fatalIf(err, "Invalid action \"%s\" in bucket policy.", action)
if matched { if matched {
return true return true
} }
@ -190,7 +190,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
// bucket policies are limited to 20KB in size, using a limit reader. // bucket policies are limited to 20KB in size, using a limit reader.
bucketPolicyBuf, err := ioutil.ReadAll(io.LimitReader(r.Body, maxAccessPolicySize)) bucketPolicyBuf, err := ioutil.ReadAll(io.LimitReader(r.Body, maxAccessPolicySize))
if err != nil { if err != nil {
errorIf(err, "Reading policy failed.", nil) errorIf(err, "Unable to read bucket policy.")
writeErrorResponse(w, r, ErrInternalError, r.URL.Path) writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
return return
} }
@ -198,7 +198,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
// Parse bucket policy. // Parse bucket policy.
bucketPolicy, err := parseBucketPolicy(bucketPolicyBuf) bucketPolicy, err := parseBucketPolicy(bucketPolicyBuf)
if err != nil { if err != nil {
errorIf(err, "Unable to parse bucket policy.", nil) errorIf(err, "Unable to parse bucket policy.")
writeErrorResponse(w, r, ErrInvalidPolicyDocument, r.URL.Path) writeErrorResponse(w, r, ErrInvalidPolicyDocument, r.URL.Path)
return return
} }
@ -211,7 +211,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
// Save bucket policy. // Save bucket policy.
if err := writeBucketPolicy(bucket, bucketPolicyBuf); err != nil { if err := writeBucketPolicy(bucket, bucketPolicyBuf); err != nil {
errorIf(err, "SaveBucketPolicy failed.", nil) errorIf(err, "Unable to write bucket policy.")
switch err.(type) { switch err.(type) {
case BucketNameInvalid: case BucketNameInvalid:
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path) writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
@ -245,7 +245,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
// Delete bucket access policy. // Delete bucket access policy.
if err := removeBucketPolicy(bucket); err != nil { if err := removeBucketPolicy(bucket); err != nil {
errorIf(err, "DeleteBucketPolicy failed.", nil) errorIf(err, "Unable to remove bucket policy.")
switch err.(type) { switch err.(type) {
case BucketNameInvalid: case BucketNameInvalid:
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path) writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
@ -282,7 +282,7 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht
// Read bucket access policy. // Read bucket access policy.
p, err := readBucketPolicy(bucket) p, err := readBucketPolicy(bucket)
if err != nil { if err != nil {
errorIf(err, "GetBucketPolicy failed.", nil) errorIf(err, "Unable to read bucket policy.")
switch err.(type) { switch err.(type) {
case BucketNameInvalid: case BucketNameInvalid:
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path) writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)

View File

@ -45,7 +45,7 @@ func getCertsPath() (string, error) {
// mustGetCertsPath must get certs path. // mustGetCertsPath must get certs path.
func mustGetCertsPath() string { func mustGetCertsPath() string {
certsPath, err := getCertsPath() certsPath, err := getCertsPath()
fatalIf(err, "Unable to retrieve certs path.", nil) fatalIf(err, "Failed to get certificate path.")
return certsPath return certsPath
} }

View File

@ -31,6 +31,10 @@ var globalFlags = []cli.Flag{
Value: mustGetConfigPath(), Value: mustGetConfigPath(),
Usage: "Path to configuration folder.", Usage: "Path to configuration folder.",
}, },
cli.BoolFlag{
Name: "quiet",
Usage: "Suppress chatty output.",
},
} }
// registerCommand registers a cli command. // registerCommand registers a cli command.

View File

@ -40,18 +40,18 @@ func purgeV1() {
if err != nil && os.IsNotExist(err) { if err != nil && os.IsNotExist(err) {
return return
} }
fatalIf(err, "Unable to load config version 1.", nil) fatalIf(err, "Unable to load config version 1.")
if cv1.Version == "1" { if cv1.Version == "1" {
console.Println("Unsupported config version 1 found, removed successfully.") console.Println("Removed unsupported config version 1.")
/// Purge old fsUsers.json file /// Purge old fsUsers.json file
configPath, err := getConfigPath() configPath, err := getConfigPath()
fatalIf(err, "Unable to retrieve config path.", nil) fatalIf(err, "Unable to retrieve config path.")
configFile := filepath.Join(configPath, "fsUsers.json") configFile := filepath.Join(configPath, "fsUsers.json")
os.RemoveAll(configFile) os.RemoveAll(configFile)
} }
fatalIf(errors.New(""), "Unexpected version found "+cv1.Version+", cannot migrate.", nil) fatalIf(errors.New(""), "Failed to migrate unrecognized config version "+cv1.Version+".")
} }
// Version '2' to '3' config migration adds new fields and re-orders // Version '2' to '3' config migration adds new fields and re-orders
@ -61,7 +61,7 @@ func migrateV2ToV3() {
if err != nil && os.IsNotExist(err) { if err != nil && os.IsNotExist(err) {
return return
} }
fatalIf(err, "Unable to load config version 2.", nil) fatalIf(err, "Unable to load config version 2.")
if cv2.Version != "2" { if cv2.Version != "2" {
return return
} }
@ -98,14 +98,14 @@ func migrateV2ToV3() {
srvConfig.Logger.Syslog = slogger srvConfig.Logger.Syslog = slogger
qc, err := quick.New(srvConfig) qc, err := quick.New(srvConfig)
fatalIf(err, "Unable to initialize config.", nil) fatalIf(err, "Unable to initialize config.")
configFile, err := getConfigFile() configFile, err := getConfigFile()
fatalIf(err, "Unable to get config file.", nil) fatalIf(err, "Unable to get config file.")
// Migrate the config. // Migrate the config.
err = qc.Save(configFile) err = qc.Save(configFile)
fatalIf(err, "Migrating from version "+cv2.Version+" to "+srvConfig.Version+" failed.", nil) fatalIf(err, "Failed to migrate config from "+cv2.Version+" to "+srvConfig.Version+" failed.")
console.Println("Migration from version " + cv2.Version + " to " + srvConfig.Version + " completed successfully.") console.Println("Migration from version " + cv2.Version + " to " + srvConfig.Version + " completed successfully.")
} }
@ -118,7 +118,7 @@ func migrateV3ToV4() {
if err != nil && os.IsNotExist(err) { if err != nil && os.IsNotExist(err) {
return return
} }
fatalIf(err, "Unable to load config version 3.", nil) fatalIf(err, "Unable to load config version 3.")
if cv3.Version != "3" { if cv3.Version != "3" {
return return
} }
@ -137,12 +137,12 @@ func migrateV3ToV4() {
srvConfig.Logger.Syslog = cv3.Logger.Syslog srvConfig.Logger.Syslog = cv3.Logger.Syslog
qc, err := quick.New(srvConfig) qc, err := quick.New(srvConfig)
fatalIf(err, "Unable to initialize the quick config.", nil) fatalIf(err, "Unable to initialize the quick config.")
configFile, err := getConfigFile() configFile, err := getConfigFile()
fatalIf(err, "Unable to get config file.", nil) fatalIf(err, "Unable to get config file.")
err = qc.Save(configFile) err = qc.Save(configFile)
fatalIf(err, "Migrating from version "+cv3.Version+" to "+srvConfig.Version+" failed.", nil) fatalIf(err, "Failed to migrate config from "+cv3.Version+" to "+srvConfig.Version+" failed.")
console.Println("Migration from version " + cv3.Version + " to " + srvConfig.Version + " completed successfully.") console.Println("Migration from version " + cv3.Version + " to " + srvConfig.Version + " completed successfully.")
} }

View File

@ -47,7 +47,7 @@ func getConfigPath() (string, error) {
// mustGetConfigPath must get server config path. // mustGetConfigPath must get server config path.
func mustGetConfigPath() string { func mustGetConfigPath() string {
configPath, err := getConfigPath() configPath, err := getConfigPath()
fatalIf(err, "Unable to get config path.", nil) fatalIf(err, "Unable to get config path.")
return configPath return configPath
} }
@ -73,7 +73,7 @@ func isConfigFileExists() bool {
// mustGetConfigFile must get server config file. // mustGetConfigFile must get server config file.
func mustGetConfigFile() string { func mustGetConfigFile() string {
configFile, err := getConfigFile() configFile, err := getConfigFile()
fatalIf(err, "Unable to get config file.", nil) fatalIf(err, "Unable to get config file.")
return configFile return configFile
} }

View File

@ -1,11 +1,9 @@
### Logging. ### Logging.
- `log.Fatalf` - `fatalIf` - wrapper function which takes error and prints jsonic error messages.
- `log.Errorf` - `errorIf` - similar to fatalIf but doesn't exit on err != nil.
- `log.Warnf`
- `log.Infof`
Logging is enabled across the codebase. There are three types of logging supported. Supported logging targets.
- console - console
- file - file
@ -20,11 +18,11 @@ Sample logger section from `~/.minio/config.json`
"file": { "file": {
"enable": false, "enable": false,
"fileName": "", "fileName": "",
"level": "trace" "level": "error"
}, },
"syslog": { "syslog": {
"enable": false, "enable": false,
"address": "", "address": "",
"level": "info" "level": "error"
} }
``` ```

View File

@ -16,11 +16,7 @@
package main package main
import ( import "github.com/fatih/color"
"github.com/fatih/color"
"github.com/minio/cli"
"github.com/minio/mc/pkg/console"
)
// Global constants for Minio. // Global constants for Minio.
const ( const (
@ -41,7 +37,7 @@ const (
var ( var (
globalQuiet = false // Quiet flag set via command line globalQuiet = false // Quiet flag set via command line
globalDebug = false // Debug flag set via command line globalTrace = false // Trace flag set via environment setting.
// Add new global flags here. // Add new global flags here.
) )
@ -51,22 +47,3 @@ var (
colorWhite = color.New(color.FgWhite, color.Bold).SprintfFunc() colorWhite = color.New(color.FgWhite, color.Bold).SprintfFunc()
colorGreen = color.New(color.FgGreen, color.Bold).SprintfFunc() colorGreen = color.New(color.FgGreen, color.Bold).SprintfFunc()
) )
// Set global states. NOTE: It is deliberately kept monolithic to
// ensure we dont miss out any flags.
func setGlobals(quiet, debug bool) {
globalQuiet = quiet
globalDebug = debug
// Enable debug messages if requested.
if globalDebug {
console.DebugPrint = true
}
}
// Set global states. NOTE: It is deliberately kept monolithic to
// ensure we dont miss out any flags.
func setGlobalsFromContext(ctx *cli.Context) {
quiet := ctx.Bool("quiet") || ctx.GlobalBool("quiet")
debug := ctx.Bool("debug") || ctx.GlobalBool("debug")
setGlobals(quiet, debug)
}

View File

@ -36,10 +36,11 @@ func enableConsoleLogger() {
log.Out = ioutil.Discard log.Out = ioutil.Discard
return return
} }
// log.Out and log.Formatter use the default versions. // log.Out and log.Formatter use the default versions.
// Only set specific log level. // Only set specific log level.
lvl, err := logrus.ParseLevel(clogger.Level) lvl, err := logrus.ParseLevel(clogger.Level)
fatalIf(err, "Unknown log level detected, please fix your console logger configuration.", nil) fatalIf(err, "Unknown log level found in the config file.")
log.Level = lvl log.Level = lvl
} }

View File

@ -40,13 +40,13 @@ func enableFileLogger() {
} }
file, err := os.OpenFile(flogger.Filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) file, err := os.OpenFile(flogger.Filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
fatalIf(err, "Unable to open log file.", nil) fatalIf(err, "Unable to open log file.")
// Add a local file hook. // Add a local file hook.
log.Hooks.Add(&localFile{file}) log.Hooks.Add(&localFile{file})
lvl, err := logrus.ParseLevel(flogger.Level) lvl, err := logrus.ParseLevel(flogger.Level)
fatalIf(err, "Unknown log level detected, please fix your console logger configuration.", nil) fatalIf(err, "Unknown log level found in the config file.")
// Set default JSON formatter. // Set default JSON formatter.
log.Formatter = new(logrus.JSONFormatter) log.Formatter = new(logrus.JSONFormatter)
@ -64,14 +64,11 @@ func (l *localFile) Fire(entry *logrus.Entry) error {
return nil return nil
} }
// Levels - // Levels - indicate log levels supported.
func (l *localFile) Levels() []logrus.Level { func (l *localFile) Levels() []logrus.Level {
return []logrus.Level{ return []logrus.Level{
logrus.PanicLevel, logrus.PanicLevel,
logrus.FatalLevel, logrus.FatalLevel,
logrus.ErrorLevel, logrus.ErrorLevel,
logrus.WarnLevel,
logrus.InfoLevel,
logrus.DebugLevel,
} }
} }

View File

@ -41,11 +41,11 @@ type syslogHook struct {
// enableSyslogLogger - enable logger at raddr. // enableSyslogLogger - enable logger at raddr.
func enableSyslogLogger(raddr string) { func enableSyslogLogger(raddr string) {
syslogHook, err := newSyslog("udp", raddr, syslog.LOG_ERR, "MINIO") syslogHook, err := newSyslog("udp", raddr, syslog.LOG_ERR, "MINIO")
fatalIf(err, "Unable to instantiate syslog.", nil) fatalIf(err, "Unable to initialize syslog logger.")
log.Hooks.Add(syslogHook) // Add syslog hook. log.Hooks.Add(syslogHook) // Add syslog hook.
log.Formatter = &logrus.JSONFormatter{} // JSON formatted log. log.Formatter = &logrus.JSONFormatter{} // JSON formatted log.
log.Level = logrus.InfoLevel // Minimum log level. log.Level = logrus.ErrorLevel // Minimum log level.
} }
// newSyslog - Creates a hook to be added to an instance of logger. // newSyslog - Creates a hook to be added to an instance of logger.
@ -67,12 +67,6 @@ func (hook *syslogHook) Fire(entry *logrus.Entry) error {
return hook.writer.Crit(line) return hook.writer.Crit(line)
case logrus.ErrorLevel: case logrus.ErrorLevel:
return hook.writer.Err(line) return hook.writer.Err(line)
case logrus.WarnLevel:
return hook.writer.Warning(line)
case logrus.InfoLevel:
return hook.writer.Info(line)
case logrus.DebugLevel:
return hook.writer.Debug(line)
default: default:
return nil return nil
} }
@ -84,8 +78,5 @@ func (hook *syslogHook) Levels() []logrus.Level {
logrus.PanicLevel, logrus.PanicLevel,
logrus.FatalLevel, logrus.FatalLevel,
logrus.ErrorLevel, logrus.ErrorLevel,
logrus.WarnLevel,
logrus.InfoLevel,
logrus.DebugLevel,
} }
} }

View File

@ -26,5 +26,5 @@ type syslogLogger struct {
// enableSyslogLogger - unsupported on windows. // enableSyslogLogger - unsupported on windows.
func enableSyslogLogger(raddr string) { func enableSyslogLogger(raddr string) {
fatalIf(errSyslogNotSupported, "Unable to enable syslog.", nil) fatalIf(errSyslogNotSupported, "Unable to enable syslog.")
} }

View File

@ -18,8 +18,10 @@ package main
import ( import (
"reflect" "reflect"
"runtime/debug"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/minio/minio/pkg/probe"
) )
type fields map[string]interface{} type fields map[string]interface{}
@ -41,38 +43,37 @@ type logger struct {
} }
// errorIf synonymous with fatalIf but doesn't exit on error != nil // errorIf synonymous with fatalIf but doesn't exit on error != nil
func errorIf(err error, msg string, fields logrus.Fields) { func errorIf(err error, msg string, data ...interface{}) {
if err == nil { if err == nil {
return return
} }
if fields == nil { sysInfo := probe.GetSysInfo()
fields = make(logrus.Fields) fields := logrus.Fields{
"cause": err.Error(),
"type": reflect.TypeOf(err),
"sysInfo": sysInfo,
} }
fields["Error"] = struct { if globalTrace {
Cause string `json:"cause,omitempty"` stack := debug.Stack()
Type string `json:"type,omitempty"` fields["stack"] = string(stack)
}{
err.Error(),
reflect.TypeOf(err).String(),
} }
log.WithFields(fields).Error(msg) log.WithFields(fields).Errorf(msg, data...)
} }
// fatalIf wrapper function which takes error and prints jsonic error messages. // fatalIf wrapper function which takes error and prints jsonic error messages.
func fatalIf(err error, msg string, fields logrus.Fields) { func fatalIf(err error, msg string, data ...interface{}) {
if err == nil { if err == nil {
return return
} }
if fields == nil { sysInfo := probe.GetSysInfo()
fields = make(logrus.Fields) fields := logrus.Fields{
"cause": err.Error(),
"type": reflect.TypeOf(err),
"sysInfo": sysInfo,
} }
if globalTrace {
fields["Error"] = struct { stack := debug.Stack()
Cause string `json:"cause,omitempty"` fields["stack"] = string(stack)
Type string `json:"type,omitempty"`
}{
err.Error(),
reflect.TypeOf(err).String(),
} }
log.WithFields(fields).Fatal(msg) log.WithFields(fields).Fatalf(msg, data...)
} }

View File

@ -36,12 +36,12 @@ func (s *LoggerSuite) TestLogger(c *C) {
log.Out = &buffer log.Out = &buffer
log.Formatter = new(logrus.JSONFormatter) log.Formatter = new(logrus.JSONFormatter)
errorIf(errors.New("Fake error"), "Failed with error.", nil) errorIf(errors.New("Fake error"), "Failed with error.")
err := json.Unmarshal(buffer.Bytes(), &fields) err := json.Unmarshal(buffer.Bytes(), &fields)
c.Assert(err, IsNil) c.Assert(err, IsNil)
c.Assert(fields["level"], Equals, "error") c.Assert(fields["level"], Equals, "error")
msg, ok := fields["Error"] msg, ok := fields["cause"]
c.Assert(ok, Equals, true) c.Assert(ok, Equals, true)
c.Assert(msg.(map[string]interface{})["cause"], Equals, "Fake error") c.Assert(msg, Equals, "Fake error")
} }

57
main.go
View File

@ -20,15 +20,11 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"runtime"
"sort" "sort"
"strconv"
"github.com/dustin/go-humanize"
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/mc/pkg/console" "github.com/minio/mc/pkg/console"
"github.com/minio/minio/pkg/probe" "github.com/minio/minio/pkg/probe"
"github.com/olekukonko/ts"
"github.com/pkg/profile" "github.com/pkg/profile"
) )
@ -60,10 +56,7 @@ FLAGS:
{{end}}{{end}} {{end}}{{end}}
VERSION: VERSION:
` + minioVersion + ` + minioVersion +
`{{ "\n"}}{{range $key, $value := ExtraInfo}} `{{ "\n"}}`
{{$key}}:
{{$value}}
{{end}}`
// init - check the environment before main starts // init - check the environment before main starts
func init() { func init() {
@ -92,32 +85,6 @@ func enableLoggers() {
// Add your logger here. // Add your logger here.
} }
// Tries to get os/arch/platform specific information
// Returns a map of current os/arch/platform/memstats
func getSystemData() map[string]string {
host, err := os.Hostname()
if err != nil {
host = ""
}
memstats := &runtime.MemStats{}
runtime.ReadMemStats(memstats)
mem := fmt.Sprintf("Used: %s | Allocated: %s | Used-Heap: %s | Allocated-Heap: %s",
humanize.Bytes(memstats.Alloc),
humanize.Bytes(memstats.TotalAlloc),
humanize.Bytes(memstats.HeapAlloc),
humanize.Bytes(memstats.HeapSys))
platform := fmt.Sprintf("Host: %s | OS: %s | Arch: %s",
host,
runtime.GOOS,
runtime.GOARCH)
goruntime := fmt.Sprintf("Version: %s | CPUs: %s", runtime.Version(), strconv.Itoa(runtime.NumCPU()))
return map[string]string{
"PLATFORM": platform,
"RUNTIME": goruntime,
"MEM": mem,
}
}
func findClosestCommands(command string) []string { func findClosestCommands(command string) []string {
var closestCommands []string var closestCommands []string
for _, value := range commandsTree.PrefixMatch(command) { for _, value := range commandsTree.PrefixMatch(command) {
@ -195,15 +162,16 @@ func main() {
defer profile.Start(profile.BlockProfile, profile.ProfilePath(mustGetProfilePath())).Stop() defer profile.Start(profile.BlockProfile, profile.ProfilePath(mustGetProfilePath())).Stop()
} }
// Set global trace flag.
trace := os.Getenv("MINIO_TRACE")
globalTrace = trace == "1"
probe.Init() // Set project's root source path. probe.Init() // Set project's root source path.
probe.SetAppInfo("Release-Tag", minioReleaseTag) probe.SetAppInfo("Release-Tag", minioReleaseTag)
probe.SetAppInfo("Commit-ID", minioShortCommitID) probe.SetAppInfo("Commit-ID", minioShortCommitID)
app := registerApp() app := registerApp()
app.Before = func(c *cli.Context) error { app.Before = func(c *cli.Context) error {
// Set global flags.
setGlobalsFromContext(c)
// Sets new config folder. // Sets new config folder.
setGlobalConfigPath(c.GlobalString("config-dir")) setGlobalConfigPath(c.GlobalString("config-dir"))
@ -215,7 +183,7 @@ func main() {
// Initialize config. // Initialize config.
err := initConfig() err := initConfig()
fatalIf(err, "Unable to initialize minio config.", nil) fatalIf(err, "Unable to initialize minio config.")
// Enable all loggers by now. // Enable all loggers by now.
enableLoggers() enableLoggers()
@ -223,6 +191,9 @@ func main() {
// Initialize name space lock. // Initialize name space lock.
initNSLock() initNSLock()
// Set global quiet flag.
globalQuiet = c.Bool("quiet") || c.GlobalBool("quiet")
// Do not print update messages, if quiet flag is set. // Do not print update messages, if quiet flag is set.
if !globalQuiet { if !globalQuiet {
// Do not print any errors in release update function. // Do not print any errors in release update function.
@ -236,16 +207,6 @@ func main() {
// Return here. // Return here.
return nil return nil
} }
app.ExtraInfo = func() map[string]string {
if _, e := ts.GetSize(); e != nil {
globalQuiet = true
}
// Enable if debug is enabled.
if globalDebug {
return getSystemData()
}
return make(map[string]string)
}
// Run the app - exit on error. // Run the app - exit on error.
app.RunAndExitOnError() app.RunAndExitOnError()
} }

View File

@ -17,9 +17,8 @@
package main package main
import ( import (
"errors"
"sync" "sync"
"github.com/Sirupsen/logrus"
) )
// nsParam - carries name space resource. // nsParam - carries name space resource.
@ -91,10 +90,7 @@ func (n *nsLockMap) unlock(volume, path string, readLock bool) {
nsLk.Unlock() nsLk.Unlock()
} }
if nsLk.ref == 0 { if nsLk.ref == 0 {
log.WithFields(logrus.Fields{ errorIf(errors.New("Namespace reference count cannot be 0."), "Invalid reference count detected.")
"volume": volume,
"path": path,
}).Error("ref count in NS lock can not be 0.")
} }
if nsLk.ref != 0 { if nsLk.ref != 0 {
nsLk.ref-- nsLk.ref--

View File

@ -27,7 +27,6 @@ import (
"strconv" "strconv"
"strings" "strings"
"github.com/Sirupsen/logrus"
"github.com/skyrings/skyring-common/tools/uuid" "github.com/skyrings/skyring-common/tools/uuid"
) )
@ -306,12 +305,6 @@ func listMetaBucketMultipartFiles(layer ObjectLayer, prefixPath string, markerPa
} }
// For any walk error return right away. // For any walk error return right away.
if walkResult.err != nil { if walkResult.err != nil {
log.WithFields(logrus.Fields{
"bucket": minioMetaBucket,
"prefix": prefixPath,
"marker": markerPath,
"recursive": recursive,
}).Debugf("Walk resulted in an error %s", walkResult.err)
// File not found or Disk not found is a valid case. // File not found or Disk not found is a valid case.
if walkResult.err == errFileNotFound || walkResult.err == errDiskNotFound { if walkResult.err == errFileNotFound || walkResult.err == errDiskNotFound {
return nil, true, nil return nil, true, nil
@ -326,9 +319,6 @@ func listMetaBucketMultipartFiles(layer ObjectLayer, prefixPath string, markerPa
// entries are empty. // entries are empty.
entries, err = listLeafEntries(storage, fi.Name) entries, err = listLeafEntries(storage, fi.Name)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"prefixPath": fi.Name,
}).Errorf("%s", err)
return nil, false, err return nil, false, err
} }
} }
@ -442,12 +432,6 @@ func listMultipartUploadsCommon(layer ObjectLayer, bucket, prefix, keyMarker, up
// List all the multipart files at prefixPath, starting with marker keyMarkerPath. // List all the multipart files at prefixPath, starting with marker keyMarkerPath.
fileInfos, eof, err := listMetaBucketMultipartFiles(layer, multipartPrefixPath, multipartMarkerPath, recursive, maxUploads) fileInfos, eof, err := listMetaBucketMultipartFiles(layer, multipartPrefixPath, multipartMarkerPath, recursive, maxUploads)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"prefixPath": multipartPrefixPath,
"markerPath": multipartMarkerPath,
"recursive": recursive,
"maxUploads": maxUploads,
}).Errorf("listMetaBucketMultipartFiles failed with %s", err)
return ListMultipartsInfo{}, err return ListMultipartsInfo{}, err
} }
@ -552,9 +536,8 @@ func isUploadIDExists(storage StorageAPI, bucket, object, uploadID string) bool
if err == errFileNotFound { if err == errFileNotFound {
return false return false
} }
log.Errorf("StatFile failed wtih %s", err) errorIf(err, "Stat failed on "+minioMetaBucket+"/"+uploadIDPath+".")
return false return false
} }
log.Debugf("FileInfo: %v", st)
return st.Mode.IsRegular() return st.Mode.IsRegular()
} }

View File

@ -19,8 +19,6 @@ package main
import ( import (
"sort" "sort"
"strings" "strings"
"github.com/Sirupsen/logrus"
) )
// Common initialization needed for both object layers. // Common initialization needed for both object layers.
@ -198,7 +196,6 @@ func listObjectsCommon(layer ObjectLayer, bucket, prefix, marker, delimiter stri
var fileInfos []FileInfo var fileInfos []FileInfo
var eof bool var eof bool
var nextMarker string var nextMarker string
log.Debugf("Reading from the tree walk channel has begun.")
for i := 0; i < maxKeys; { for i := 0; i < maxKeys; {
walkResult, ok := <-walker.ch walkResult, ok := <-walker.ch
if !ok { if !ok {
@ -208,12 +205,6 @@ func listObjectsCommon(layer ObjectLayer, bucket, prefix, marker, delimiter stri
} }
// For any walk error return right away. // For any walk error return right away.
if walkResult.err != nil { if walkResult.err != nil {
log.WithFields(logrus.Fields{
"bucket": bucket,
"prefix": prefix,
"marker": marker,
"recursive": recursive,
}).Debugf("Walk resulted in an error %s", walkResult.err)
// File not found is a valid case. // File not found is a valid case.
if walkResult.err == errFileNotFound { if walkResult.err == errFileNotFound {
return ListObjectsInfo{}, nil return ListObjectsInfo{}, nil
@ -230,12 +221,6 @@ func listObjectsCommon(layer ObjectLayer, bucket, prefix, marker, delimiter stri
i++ i++
} }
params := listParams{bucket, recursive, nextMarker, prefix} params := listParams{bucket, recursive, nextMarker, prefix}
log.WithFields(logrus.Fields{
"bucket": params.bucket,
"recursive": params.recursive,
"marker": params.marker,
"prefix": params.prefix,
}).Debugf("Save the tree walk into map for subsequent requests.")
if !eof { if !eof {
saveTreeWalk(layer, params, walker) saveTreeWalk(layer, params, walker)
} }
@ -268,7 +253,7 @@ func isBucketExist(storage StorageAPI, bucketName string) bool {
if err == errVolumeNotFound { if err == errVolumeNotFound {
return false return false
} }
log.Errorf("StatVol failed with %s", err) errorIf(err, "Stat failed on bucket "+bucketName+".")
return false return false
} }
return true return true

View File

@ -98,7 +98,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
// Fetch object stat info. // Fetch object stat info.
objInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object) objInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object)
if err != nil { if err != nil {
errorIf(err, "GetObjectInfo failed.", nil) errorIf(err, "Unable to fetch object info.")
apiErr := toAPIErrorCode(err) apiErr := toAPIErrorCode(err)
if apiErr == ErrNoSuchKey { if apiErr == ErrNoSuchKey {
apiErr = errAllowableObjectNotFound(bucket, r) apiErr = errAllowableObjectNotFound(bucket, r)
@ -128,7 +128,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
startOffset := hrange.start startOffset := hrange.start
readCloser, err := api.ObjectAPI.GetObject(bucket, object, startOffset) readCloser, err := api.ObjectAPI.GetObject(bucket, object, startOffset)
if err != nil { if err != nil {
errorIf(err, "GetObject failed.", nil) errorIf(err, "Unable to read object.")
apiErr := toAPIErrorCode(err) apiErr := toAPIErrorCode(err)
if apiErr == ErrNoSuchKey { if apiErr == ErrNoSuchKey {
apiErr = errAllowableObjectNotFound(bucket, r) apiErr = errAllowableObjectNotFound(bucket, r)
@ -146,13 +146,13 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
if hrange.length > 0 { if hrange.length > 0 {
if _, err := io.CopyN(w, readCloser, hrange.length); err != nil { if _, err := io.CopyN(w, readCloser, hrange.length); err != nil {
errorIf(err, "Writing to client failed", nil) errorIf(err, "Writing to client failed.")
// Do not send error response here, since client could have died. // Do not send error response here, since client could have died.
return return
} }
} else { } else {
if _, err := io.Copy(w, readCloser); err != nil { if _, err := io.Copy(w, readCloser); err != nil {
errorIf(err, "Writing to client failed", nil) errorIf(err, "Writing to client failed.")
// Do not send error response here, since client could have died. // Do not send error response here, since client could have died.
return return
} }
@ -282,7 +282,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
objInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object) objInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object)
if err != nil { if err != nil {
errorIf(err, "GetObjectInfo failed.", nil) errorIf(err, "Unable to fetch object info.")
apiErr := toAPIErrorCode(err) apiErr := toAPIErrorCode(err)
if apiErr == ErrNoSuchKey { if apiErr == ErrNoSuchKey {
apiErr = errAllowableObjectNotFound(bucket, r) apiErr = errAllowableObjectNotFound(bucket, r)
@ -368,7 +368,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
objInfo, err := api.ObjectAPI.GetObjectInfo(sourceBucket, sourceObject) objInfo, err := api.ObjectAPI.GetObjectInfo(sourceBucket, sourceObject)
if err != nil { if err != nil {
errorIf(err, "GetObjectInfo failed.", nil) errorIf(err, "Unable to fetch object info.")
writeErrorResponse(w, r, toAPIErrorCode(err), objectSource) writeErrorResponse(w, r, toAPIErrorCode(err), objectSource)
return return
} }
@ -397,7 +397,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
// Get the object. // Get the object.
readCloser, err := api.ObjectAPI.GetObject(sourceBucket, sourceObject, startOffset) readCloser, err := api.ObjectAPI.GetObject(sourceBucket, sourceObject, startOffset)
if err != nil { if err != nil {
errorIf(err, "Reading "+objectSource+" failed.", nil) errorIf(err, "Unable to read an object.")
writeErrorResponse(w, r, toAPIErrorCode(err), objectSource) writeErrorResponse(w, r, toAPIErrorCode(err), objectSource)
return return
} }
@ -407,14 +407,14 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
// Create the object. // Create the object.
md5Sum, err := api.ObjectAPI.PutObject(bucket, object, size, readCloser, nil) md5Sum, err := api.ObjectAPI.PutObject(bucket, object, size, readCloser, nil)
if err != nil { if err != nil {
errorIf(err, "PutObject failed.", nil) errorIf(err, "Unable to create an object.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return return
} }
objInfo, err = api.ObjectAPI.GetObjectInfo(bucket, object) objInfo, err = api.ObjectAPI.GetObjectInfo(bucket, object)
if err != nil { if err != nil {
errorIf(err, "GetObjectInfo failed.", nil) errorIf(err, "Unable to fetch object info.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return return
} }
@ -538,7 +538,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
// Get Content-Md5 sent by client and verify if valid // Get Content-Md5 sent by client and verify if valid
md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5")) md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5"))
if err != nil { if err != nil {
errorIf(err, "Decoding md5 failed.", nil) errorIf(err, "Unable to validate content-md5 format.")
writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path) writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path)
return return
} }
@ -583,7 +583,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
if wErr == io.ErrClosedPipe { if wErr == io.ErrClosedPipe {
return return
} }
errorIf(wErr, "Unable to read HTTP body.", nil) errorIf(wErr, "Unable to read from HTTP body.")
writer.CloseWithError(wErr) writer.CloseWithError(wErr)
return return
} }
@ -620,7 +620,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
wg.Wait() wg.Wait()
} }
if err != nil { if err != nil {
errorIf(err, "PutObject failed.", nil) errorIf(err, "Unable to create an object.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return return
} }
@ -659,7 +659,7 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
uploadID, err := api.ObjectAPI.NewMultipartUpload(bucket, object) uploadID, err := api.ObjectAPI.NewMultipartUpload(bucket, object)
if err != nil { if err != nil {
errorIf(err, "NewMultipartUpload failed.", nil) errorIf(err, "Unable to initiate new multipart upload id.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return return
} }
@ -681,7 +681,6 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
// get Content-Md5 sent by client and verify if valid // get Content-Md5 sent by client and verify if valid
md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5")) md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5"))
if err != nil { if err != nil {
errorIf(err, "Decoding md5 failed.", nil)
writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path) writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path)
return return
} }
@ -739,7 +738,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
if wErr == io.ErrClosedPipe { if wErr == io.ErrClosedPipe {
return return
} }
errorIf(wErr, "Unable to read HTTP body.", nil) errorIf(wErr, "Unable to read from HTTP request body.")
writer.CloseWithError(wErr) writer.CloseWithError(wErr)
return return
} }
@ -771,7 +770,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
wg.Wait() wg.Wait()
} }
if err != nil { if err != nil {
errorIf(err, "PutObjectPart failed.", nil) errorIf(err, "Unable to create object part.")
// Verify if the underlying error is signature mismatch. // Verify if the underlying error is signature mismatch.
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return return
@ -808,7 +807,7 @@ func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter,
uploadID, _, _, _ := getObjectResources(r.URL.Query()) uploadID, _, _, _ := getObjectResources(r.URL.Query())
if err := api.ObjectAPI.AbortMultipartUpload(bucket, object, uploadID); err != nil { if err := api.ObjectAPI.AbortMultipartUpload(bucket, object, uploadID); err != nil {
errorIf(err, "AbortMutlipartUpload failed.", nil) errorIf(err, "Unable to abort multipart upload.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return return
} }
@ -854,7 +853,7 @@ func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *ht
listPartsInfo, err := api.ObjectAPI.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts) listPartsInfo, err := api.ObjectAPI.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
if err != nil { if err != nil {
errorIf(err, "ListObjectParts failed.", nil) errorIf(err, "Unable to list uploaded parts.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return return
} }
@ -896,13 +895,13 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
} }
completeMultipartBytes, err := ioutil.ReadAll(r.Body) completeMultipartBytes, err := ioutil.ReadAll(r.Body)
if err != nil { if err != nil {
errorIf(err, "CompleteMultipartUpload failed.", nil) errorIf(err, "Unable to complete multipart upload.")
writeErrorResponse(w, r, ErrInternalError, r.URL.Path) writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
return return
} }
complMultipartUpload := &completeMultipartUpload{} complMultipartUpload := &completeMultipartUpload{}
if err = xml.Unmarshal(completeMultipartBytes, complMultipartUpload); err != nil { if err = xml.Unmarshal(completeMultipartBytes, complMultipartUpload); err != nil {
errorIf(err, "XML Unmarshal failed", nil) errorIf(err, "Unable to parse complete multipart upload XML.")
writeErrorResponse(w, r, ErrMalformedXML, r.URL.Path) writeErrorResponse(w, r, ErrMalformedXML, r.URL.Path)
return return
} }
@ -920,7 +919,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
// Complete multipart upload. // Complete multipart upload.
md5Sum, err = api.ObjectAPI.CompleteMultipartUpload(bucket, object, uploadID, completeParts) md5Sum, err = api.ObjectAPI.CompleteMultipartUpload(bucket, object, uploadID, completeParts)
if err != nil { if err != nil {
errorIf(err, "CompleteMultipartUpload failed.", nil) errorIf(err, "Unable to complete multipart upload.")
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path) writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
return return
} }

View File

@ -25,8 +25,6 @@ import (
"strings" "strings"
"syscall" "syscall"
"unsafe" "unsafe"
"github.com/Sirupsen/logrus"
) )
const ( const (
@ -114,10 +112,6 @@ func readDir(dirPath string) (entries []string, err error) {
buf := make([]byte, readDirentBufSize) buf := make([]byte, readDirentBufSize)
d, err := os.Open(dirPath) d, err := os.Open(dirPath)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"dirPath": dirPath,
}).Debugf("Open failed with %s", err)
// File is really not found. // File is really not found.
if os.IsNotExist(err) { if os.IsNotExist(err) {
return nil, errFileNotFound return nil, errFileNotFound

View File

@ -22,18 +22,12 @@ import (
"io" "io"
"os" "os"
"strings" "strings"
"github.com/Sirupsen/logrus"
) )
// Return all the entries at the directory dirPath. // Return all the entries at the directory dirPath.
func readDir(dirPath string) (entries []string, err error) { func readDir(dirPath string) (entries []string, err error) {
d, err := os.Open(dirPath) d, err := os.Open(dirPath)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"dirPath": dirPath,
}).Debugf("Open failed with %s", err)
// File is really not found. // File is really not found.
if os.IsNotExist(err) { if os.IsNotExist(err) {
return nil, errFileNotFound return nil, errFileNotFound

151
posix.go
View File

@ -24,7 +24,6 @@ import (
"strings" "strings"
"syscall" "syscall"
"github.com/Sirupsen/logrus"
"github.com/minio/minio/pkg/disk" "github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/safe" "github.com/minio/minio/pkg/safe"
) )
@ -68,7 +67,7 @@ func checkPathLength(pathName string) error {
func isDirEmpty(dirname string) bool { func isDirEmpty(dirname string) bool {
f, err := os.Open(dirname) f, err := os.Open(dirname)
if err != nil { if err != nil {
log.Errorf("Unable to access directory %s, failed with %s", dirname, err) errorIf(err, "Unable to access directory.")
return false return false
} }
defer f.Close() defer f.Close()
@ -76,11 +75,10 @@ func isDirEmpty(dirname string) bool {
_, err = f.Readdirnames(1) _, err = f.Readdirnames(1)
if err != nil { if err != nil {
if err == io.EOF { if err == io.EOF {
// Returns true if we have reached EOF, directory is // Returns true if we have reached EOF, directory is indeed empty.
// indeed empty.
return true return true
} }
log.Errorf("Unable to list directory %s, failed with %s", dirname, err) errorIf(err, "Unable to list directory.")
return false return false
} }
// Directory is not empty. // Directory is not empty.
@ -90,7 +88,6 @@ func isDirEmpty(dirname string) bool {
// Initialize a new storage disk. // Initialize a new storage disk.
func newPosix(diskPath string) (StorageAPI, error) { func newPosix(diskPath string) (StorageAPI, error) {
if diskPath == "" { if diskPath == "" {
log.Error("Disk cannot be empty")
return nil, errInvalidArgument return nil, errInvalidArgument
} }
fs := fsStorage{ fs := fsStorage{
@ -99,24 +96,14 @@ func newPosix(diskPath string) (StorageAPI, error) {
} }
st, err := os.Stat(diskPath) st, err := os.Stat(diskPath)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"diskPath": diskPath,
}).Debugf("Stat failed, with error %s.", err)
if os.IsNotExist(err) { if os.IsNotExist(err) {
return fs, errDiskNotFound return fs, errDiskNotFound
} }
return fs, err return fs, err
} }
if !st.IsDir() { if !st.IsDir() {
log.WithFields(logrus.Fields{
"diskPath": diskPath,
}).Debugf("Disk %s.", syscall.ENOTDIR)
return fs, syscall.ENOTDIR return fs, syscall.ENOTDIR
} }
log.WithFields(logrus.Fields{
"diskPath": diskPath,
"minFreeDisk": fsMinSpacePercent,
}).Debugf("Successfully configured FS storage API.")
return fs, nil return fs, nil
} }
@ -127,9 +114,6 @@ func checkDiskFree(diskPath string, minFreeDisk int64) (err error) {
} }
di, err := disk.GetInfo(diskPath) di, err := disk.GetInfo(diskPath)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"diskPath": diskPath,
}).Debugf("Failed to get disk info, %s", err)
return err return err
} }
@ -137,10 +121,6 @@ func checkDiskFree(diskPath string, minFreeDisk int64) (err error) {
// space used for journalling, inodes etc. // space used for journalling, inodes etc.
availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100 availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100
if int64(availableDiskSpace) <= minFreeDisk { if int64(availableDiskSpace) <= minFreeDisk {
log.WithFields(logrus.Fields{
"availableDiskSpace": int64(availableDiskSpace),
"minFreeDiskSpace": minFreeDisk,
}).Debugf("Disk free space has reached its limit.")
return errDiskFull return errDiskFull
} }
@ -175,9 +155,6 @@ func getAllUniqueVols(dirPath string) ([]VolInfo, error) {
} }
entries, err := readDir(dirPath) entries, err := readDir(dirPath)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"dirPath": dirPath,
}).Debugf("readDir failed with error %s", err)
return nil, errDiskNotFound return nil, errDiskNotFound
} }
var volsInfo []VolInfo var volsInfo []VolInfo
@ -189,9 +166,6 @@ func getAllUniqueVols(dirPath string) ([]VolInfo, error) {
var fi os.FileInfo var fi os.FileInfo
fi, err = os.Stat(pathJoin(dirPath, entry)) fi, err = os.Stat(pathJoin(dirPath, entry))
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"path": pathJoin(dirPath, entry),
}).Debugf("Stat failed with error %s", err)
// If the file does not exist, skip the entry. // If the file does not exist, skip the entry.
if os.IsNotExist(err) { if os.IsNotExist(err) {
continue continue
@ -241,14 +215,8 @@ func (s fsStorage) getVolumeDir(volume string) (string, error) {
} }
return volumeDir, errVolumeNotFound return volumeDir, errVolumeNotFound
} else if os.IsPermission(err) { } else if os.IsPermission(err) {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
}).Debugf("Stat failed with error %s", err)
return volumeDir, errVolumeAccessDenied return volumeDir, errVolumeAccessDenied
} }
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
}).Debugf("Stat failed with error %s", err)
return volumeDir, err return volumeDir, err
} }
@ -271,11 +239,6 @@ func (s fsStorage) MakeVol(volume string) (err error) {
return os.Mkdir(volumeDir, 0700) return os.Mkdir(volumeDir, 0700)
} }
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
"volume": volume,
}).Debugf("MakeVol failed with %s", err)
// For all other errors return here. // For all other errors return here.
return err return err
} }
@ -286,16 +249,10 @@ func (s fsStorage) ListVols() (volsInfo []VolInfo, err error) {
var diskInfo disk.Info var diskInfo disk.Info
diskInfo, err = disk.GetInfo(s.diskPath) diskInfo, err = disk.GetInfo(s.diskPath)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
}).Debugf("Failed to get disk info, %s", err)
return nil, err return nil, err
} }
volsInfo, err = getAllUniqueVols(s.diskPath) volsInfo, err = getAllUniqueVols(s.diskPath)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
}).Debugf("getAllUniqueVols failed with %s", err)
return nil, err return nil, err
} }
for i, vol := range volsInfo { for i, vol := range volsInfo {
@ -320,20 +277,12 @@ func (s fsStorage) StatVol(volume string) (volInfo VolInfo, err error) {
// Verify if volume is valid and it exists. // Verify if volume is valid and it exists.
volumeDir, err := s.getVolumeDir(volume) volumeDir, err := s.getVolumeDir(volume)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
"volume": volume,
}).Debugf("getVolumeDir failed with %s", err)
return VolInfo{}, err return VolInfo{}, err
} }
// Stat a volume entry. // Stat a volume entry.
var st os.FileInfo var st os.FileInfo
st, err = os.Stat(volumeDir) st, err = os.Stat(volumeDir)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
"volume": volume,
}).Debugf("Stat on the volume failed with %s", err)
if os.IsNotExist(err) { if os.IsNotExist(err) {
return VolInfo{}, errVolumeNotFound return VolInfo{}, errVolumeNotFound
} }
@ -343,10 +292,6 @@ func (s fsStorage) StatVol(volume string) (volInfo VolInfo, err error) {
var diskInfo disk.Info var diskInfo disk.Info
diskInfo, err = disk.GetInfo(s.diskPath) diskInfo, err = disk.GetInfo(s.diskPath)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
"volume": volume,
}).Debugf("Failed to get disk info, %s", err)
return VolInfo{}, err return VolInfo{}, err
} }
// As os.Stat() doesn't carry other than ModTime(), use ModTime() // As os.Stat() doesn't carry other than ModTime(), use ModTime()
@ -366,18 +311,10 @@ func (s fsStorage) DeleteVol(volume string) error {
// Verify if volume is valid and it exists. // Verify if volume is valid and it exists.
volumeDir, err := s.getVolumeDir(volume) volumeDir, err := s.getVolumeDir(volume)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
"volume": volume,
}).Debugf("getVolumeDir failed with %s", err)
return err return err
} }
err = os.Remove(volumeDir) err = os.Remove(volumeDir)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
"volume": volume,
}).Debugf("Volume remove failed with %s", err)
if os.IsNotExist(err) { if os.IsNotExist(err) {
return errVolumeNotFound return errVolumeNotFound
} else if strings.Contains(err.Error(), "directory is not empty") { } else if strings.Contains(err.Error(), "directory is not empty") {
@ -400,19 +337,11 @@ func (s fsStorage) ListDir(volume, dirPath string) ([]string, error) {
// Verify if volume is valid and it exists. // Verify if volume is valid and it exists.
volumeDir, err := s.getVolumeDir(volume) volumeDir, err := s.getVolumeDir(volume)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
"volume": volume,
}).Debugf("getVolumeDir failed with %s", err)
return nil, err return nil, err
} }
// Stat a volume entry. // Stat a volume entry.
_, err = os.Stat(volumeDir) _, err = os.Stat(volumeDir)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
"volume": volume,
}).Debugf("Stat on the volume failed with %s", err)
if os.IsNotExist(err) { if os.IsNotExist(err) {
return nil, errVolumeNotFound return nil, errVolumeNotFound
} }
@ -425,10 +354,6 @@ func (s fsStorage) ListDir(volume, dirPath string) ([]string, error) {
func (s fsStorage) ReadFile(volume string, path string, offset int64) (readCloser io.ReadCloser, err error) { func (s fsStorage) ReadFile(volume string, path string, offset int64) (readCloser io.ReadCloser, err error) {
volumeDir, err := s.getVolumeDir(volume) volumeDir, err := s.getVolumeDir(volume)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
"volume": volume,
}).Debugf("getVolumeDir failed with %s", err)
return nil, err return nil, err
} }
@ -443,36 +368,19 @@ func (s fsStorage) ReadFile(volume string, path string, offset int64) (readClose
} else if os.IsPermission(err) { } else if os.IsPermission(err) {
return nil, errFileAccessDenied return nil, errFileAccessDenied
} }
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
"filePath": filePath,
}).Debugf("Opening a file failed with %s", err)
return nil, err return nil, err
} }
st, err := file.Stat() st, err := file.Stat()
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
"filePath": filePath,
}).Debugf("Stat failed with %s", err)
return nil, err return nil, err
} }
// Verify if its not a regular file, since subsequent Seek is undefined. // Verify if its not a regular file, since subsequent Seek is undefined.
if !st.Mode().IsRegular() { if !st.Mode().IsRegular() {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
"filePath": filePath,
}).Debugf("Unexpected type %s", errIsNotRegular)
return nil, errFileNotFound return nil, errFileNotFound
} }
// Seek to requested offset. // Seek to requested offset.
_, err = file.Seek(offset, os.SEEK_SET) _, err = file.Seek(offset, os.SEEK_SET)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
"filePath": filePath,
"offset": offset,
}).Debugf("Seek failed with %s", err)
return nil, err return nil, err
} }
return file, nil return file, nil
@ -482,10 +390,6 @@ func (s fsStorage) ReadFile(volume string, path string, offset int64) (readClose
func (s fsStorage) CreateFile(volume, path string) (writeCloser io.WriteCloser, err error) { func (s fsStorage) CreateFile(volume, path string) (writeCloser io.WriteCloser, err error) {
volumeDir, err := s.getVolumeDir(volume) volumeDir, err := s.getVolumeDir(volume)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
"volume": volume,
}).Debugf("getVolumeDir failed with %s", err)
return nil, err return nil, err
} }
if err = checkDiskFree(s.diskPath, s.minFreeDisk); err != nil { if err = checkDiskFree(s.diskPath, s.minFreeDisk); err != nil {
@ -499,10 +403,6 @@ func (s fsStorage) CreateFile(volume, path string) (writeCloser io.WriteCloser,
var st os.FileInfo var st os.FileInfo
if st, err = os.Stat(filePath); err == nil { if st, err = os.Stat(filePath); err == nil {
if st.IsDir() { if st.IsDir() {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
"filePath": filePath,
}).Debugf("Unexpected type %s", errIsNotRegular)
return nil, errIsNotRegular return nil, errIsNotRegular
} }
} }
@ -521,10 +421,6 @@ func (s fsStorage) CreateFile(volume, path string) (writeCloser io.WriteCloser,
func (s fsStorage) StatFile(volume, path string) (file FileInfo, err error) { func (s fsStorage) StatFile(volume, path string) (file FileInfo, err error) {
volumeDir, err := s.getVolumeDir(volume) volumeDir, err := s.getVolumeDir(volume)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
"volume": volume,
}).Debugf("getVolumeDir failed with %s", err)
return FileInfo{}, err return FileInfo{}, err
} }
@ -534,11 +430,6 @@ func (s fsStorage) StatFile(volume, path string) (file FileInfo, err error) {
} }
st, err := os.Stat(filePath) st, err := os.Stat(filePath)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
"filePath": filePath,
}).Debugf("Stat failed with %s", err)
// File is really not found. // File is really not found.
if os.IsNotExist(err) { if os.IsNotExist(err) {
return FileInfo{}, errFileNotFound return FileInfo{}, errFileNotFound
@ -555,10 +446,6 @@ func (s fsStorage) StatFile(volume, path string) (file FileInfo, err error) {
// If its a directory its not a regular file. // If its a directory its not a regular file.
if st.Mode().IsDir() { if st.Mode().IsDir() {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
"filePath": filePath,
}).Debugf("File is %s.", errIsNotRegular)
return FileInfo{}, errFileNotFound return FileInfo{}, errFileNotFound
} }
return FileInfo{ return FileInfo{
@ -578,9 +465,6 @@ func deleteFile(basePath, deletePath string) error {
// Verify if the path exists. // Verify if the path exists.
pathSt, err := os.Stat(deletePath) pathSt, err := os.Stat(deletePath)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"deletePath": deletePath,
}).Debugf("Stat failed with %s", err)
if os.IsNotExist(err) { if os.IsNotExist(err) {
return errFileNotFound return errFileNotFound
} else if os.IsPermission(err) { } else if os.IsPermission(err) {
@ -594,17 +478,10 @@ func deleteFile(basePath, deletePath string) error {
} }
// Attempt to remove path. // Attempt to remove path.
if err := os.Remove(deletePath); err != nil { if err := os.Remove(deletePath); err != nil {
log.WithFields(logrus.Fields{
"deletePath": deletePath,
}).Debugf("Remove failed with %s", err)
return err return err
} }
// Recursively go down the next path and delete again. // Recursively go down the next path and delete again.
if err := deleteFile(basePath, slashpath.Dir(deletePath)); err != nil { if err := deleteFile(basePath, slashpath.Dir(deletePath)); err != nil {
log.WithFields(logrus.Fields{
"basePath": basePath,
"deleteDir": slashpath.Dir(deletePath),
}).Debugf("deleteFile failed with %s", err)
return err return err
} }
return nil return nil
@ -614,10 +491,6 @@ func deleteFile(basePath, deletePath string) error {
func (s fsStorage) DeleteFile(volume, path string) error { func (s fsStorage) DeleteFile(volume, path string) error {
volumeDir, err := s.getVolumeDir(volume) volumeDir, err := s.getVolumeDir(volume)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
"volume": volume,
}).Debugf("getVolumeDir failed with %s", err)
return err return err
} }
@ -636,39 +509,25 @@ func (s fsStorage) DeleteFile(volume, path string) error {
func (s fsStorage) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) error { func (s fsStorage) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) error {
srcVolumeDir, err := s.getVolumeDir(srcVolume) srcVolumeDir, err := s.getVolumeDir(srcVolume)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
"volume": srcVolume,
}).Errorf("getVolumeDir failed with %s", err)
return err return err
} }
dstVolumeDir, err := s.getVolumeDir(dstVolume) dstVolumeDir, err := s.getVolumeDir(dstVolume)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"diskPath": s.diskPath,
"volume": dstVolume,
}).Errorf("getVolumeDir failed with %s", err)
return err return err
} }
srcIsDir := strings.HasSuffix(srcPath, slashSeparator) srcIsDir := strings.HasSuffix(srcPath, slashSeparator)
dstIsDir := strings.HasSuffix(dstPath, slashSeparator) dstIsDir := strings.HasSuffix(dstPath, slashSeparator)
// for XL src and dst are always directories. // Either src and dst have to be directories or files, else return error.
// for FS src and dst are always files.
if !(srcIsDir && dstIsDir || !srcIsDir && !dstIsDir) { if !(srcIsDir && dstIsDir || !srcIsDir && !dstIsDir) {
// Either src and dst have to be directories or files, else return error.
log.Errorf("source and destination are not of same file type. source=%s, destination=%s", srcPath, dstPath)
return errFileAccessDenied return errFileAccessDenied
} }
if srcIsDir { if srcIsDir {
// If source is a directory we expect the destination to be non-existent always. // If source is a directory we expect the destination to be non-existent always.
_, err = os.Stat(slashpath.Join(dstVolumeDir, dstPath)) _, err = os.Stat(slashpath.Join(dstVolumeDir, dstPath))
if err == nil { if err == nil {
log.Errorf("Source is a directory and destination exists. source=%s, destination=%s", srcPath, dstPath)
return errFileAccessDenied return errFileAccessDenied
} }
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
// Return error for any error other than ENOENT.
log.Errorf("Stat failed with %s", err)
return err return err
} }
// Destination does not exist, hence proceed with the rename. // Destination does not exist, hence proceed with the rename.
@ -678,7 +537,6 @@ func (s fsStorage) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) err
if strings.Contains(err.Error(), "not a directory") { if strings.Contains(err.Error(), "not a directory") {
return errFileAccessDenied return errFileAccessDenied
} }
log.Errorf("os.MkdirAll failed with %s", err)
return err return err
} }
err = os.Rename(slashpath.Join(srcVolumeDir, srcPath), slashpath.Join(dstVolumeDir, dstPath)) err = os.Rename(slashpath.Join(srcVolumeDir, srcPath), slashpath.Join(dstVolumeDir, dstPath))
@ -686,7 +544,6 @@ func (s fsStorage) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) err
if os.IsNotExist(err) { if os.IsNotExist(err) {
return errFileNotFound return errFileNotFound
} }
log.Errorf("os.Rename failed with %s", err)
return err return err
} }
return nil return nil

View File

@ -42,11 +42,11 @@ func newObjectLayer(exportPaths ...string) (ObjectLayer, error) {
// configureServer handler returns final handler for the http server. // configureServer handler returns final handler for the http server.
func configureServerHandler(srvCmdConfig serverCmdConfig) http.Handler { func configureServerHandler(srvCmdConfig serverCmdConfig) http.Handler {
objAPI, err := newObjectLayer(srvCmdConfig.exportPaths...) objAPI, err := newObjectLayer(srvCmdConfig.exportPaths...)
fatalIf(err, "Initializing object layer failed.", nil) fatalIf(err, "Unable to intialize object layer.")
// Initialize storage rpc server. // Initialize storage rpc server.
storageRPC, err := newRPCServer(srvCmdConfig.exportPaths[0]) // FIXME: should only have one path. storageRPC, err := newRPCServer(srvCmdConfig.exportPaths[0]) // FIXME: should only have one path.
fatalIf(err, "Initializing storage rpc server failed.", nil) fatalIf(err, "Unable to initialize storage RPC server.")
// Initialize API. // Initialize API.
apiHandlers := objectAPIHandlers{ apiHandlers := objectAPIHandlers{

View File

@ -27,8 +27,6 @@ import (
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/Sirupsen/logrus"
) )
type networkStorage struct { type networkStorage struct {
@ -80,9 +78,6 @@ func toStorageErr(err error) error {
func newRPCClient(networkPath string) (StorageAPI, error) { func newRPCClient(networkPath string) (StorageAPI, error) {
// Input validation. // Input validation.
if networkPath == "" || strings.LastIndex(networkPath, ":") == -1 { if networkPath == "" || strings.LastIndex(networkPath, ":") == -1 {
log.WithFields(logrus.Fields{
"networkPath": networkPath,
}).Debugf("Network path is malformed, should be of form <ip>:<port>:<export_dir>")
return nil, errInvalidArgument return nil, errInvalidArgument
} }
@ -92,10 +87,6 @@ func newRPCClient(networkPath string) (StorageAPI, error) {
// Dial minio rpc storage http path. // Dial minio rpc storage http path.
rpcClient, err := rpc.DialHTTPPath("tcp", netAddr, storageRPCPath) rpcClient, err := rpc.DialHTTPPath("tcp", netAddr, storageRPCPath)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"netAddr": netAddr,
"storageRPCPath": storageRPCPath,
}).Debugf("RPC HTTP dial failed with %s", err)
return nil, err return nil, err
} }
@ -125,9 +116,6 @@ func newRPCClient(networkPath string) (StorageAPI, error) {
func (n networkStorage) MakeVol(volume string) error { func (n networkStorage) MakeVol(volume string) error {
reply := GenericReply{} reply := GenericReply{}
if err := n.rpcClient.Call("Storage.MakeVolHandler", volume, &reply); err != nil { if err := n.rpcClient.Call("Storage.MakeVolHandler", volume, &reply); err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
}).Debugf("Storage.MakeVolHandler returned an error %s", err)
return toStorageErr(err) return toStorageErr(err)
} }
return nil return nil
@ -138,7 +126,6 @@ func (n networkStorage) ListVols() (vols []VolInfo, err error) {
ListVols := ListVolsReply{} ListVols := ListVolsReply{}
err = n.rpcClient.Call("Storage.ListVolsHandler", "", &ListVols) err = n.rpcClient.Call("Storage.ListVolsHandler", "", &ListVols)
if err != nil { if err != nil {
log.Debugf("Storage.ListVolsHandler returned an error %s", err)
return nil, err return nil, err
} }
return ListVols.Vols, nil return ListVols.Vols, nil
@ -147,9 +134,6 @@ func (n networkStorage) ListVols() (vols []VolInfo, err error) {
// StatVol - get current Stat volume info. // StatVol - get current Stat volume info.
func (n networkStorage) StatVol(volume string) (volInfo VolInfo, err error) { func (n networkStorage) StatVol(volume string) (volInfo VolInfo, err error) {
if err = n.rpcClient.Call("Storage.StatVolHandler", volume, &volInfo); err != nil { if err = n.rpcClient.Call("Storage.StatVolHandler", volume, &volInfo); err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
}).Debugf("Storage.StatVolHandler returned an error %s", err)
return VolInfo{}, toStorageErr(err) return VolInfo{}, toStorageErr(err)
} }
return volInfo, nil return volInfo, nil
@ -159,9 +143,6 @@ func (n networkStorage) StatVol(volume string) (volInfo VolInfo, err error) {
func (n networkStorage) DeleteVol(volume string) error { func (n networkStorage) DeleteVol(volume string) error {
reply := GenericReply{} reply := GenericReply{}
if err := n.rpcClient.Call("Storage.DeleteVolHandler", volume, &reply); err != nil { if err := n.rpcClient.Call("Storage.DeleteVolHandler", volume, &reply); err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
}).Debugf("Storage.DeleteVolHandler returned an error %s", err)
return toStorageErr(err) return toStorageErr(err)
} }
return nil return nil
@ -181,10 +162,6 @@ func (n networkStorage) CreateFile(volume, path string) (writeCloser io.WriteClo
go func() { go func() {
resp, err := n.httpClient.Post(writeURL.String(), contentType, readCloser) resp, err := n.httpClient.Post(writeURL.String(), contentType, readCloser)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Debugf("CreateFile HTTP POST failed to upload data with error %s", err)
readCloser.CloseWithError(err) readCloser.CloseWithError(err)
return return
} }
@ -210,10 +187,6 @@ func (n networkStorage) StatFile(volume, path string) (fileInfo FileInfo, err er
Vol: volume, Vol: volume,
Path: path, Path: path,
}, &fileInfo); err != nil { }, &fileInfo); err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Debugf("Storage.StatFileHandler failed with %s", err)
return FileInfo{}, toStorageErr(err) return FileInfo{}, toStorageErr(err)
} }
return fileInfo, nil return fileInfo, nil
@ -230,10 +203,6 @@ func (n networkStorage) ReadFile(volume string, path string, offset int64) (read
readURL.RawQuery = readQuery.Encode() readURL.RawQuery = readQuery.Encode()
resp, err := n.httpClient.Get(readURL.String()) resp, err := n.httpClient.Get(readURL.String())
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Debugf("ReadFile http Get failed with error %s", err)
return nil, err return nil, err
} }
if resp != nil { if resp != nil {
@ -253,10 +222,6 @@ func (n networkStorage) ListDir(volume, path string) (entries []string, err erro
Vol: volume, Vol: volume,
Path: path, Path: path,
}, &entries); err != nil { }, &entries); err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Debugf("Storage.ListDirHandlers failed with %s", err)
return nil, toStorageErr(err) return nil, toStorageErr(err)
} }
// Return successfully unmarshalled results. // Return successfully unmarshalled results.
@ -270,10 +235,6 @@ func (n networkStorage) DeleteFile(volume, path string) (err error) {
Vol: volume, Vol: volume,
Path: path, Path: path,
}, &reply); err != nil { }, &reply); err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Debugf("Storage.DeleteFileHandler failed with %s", err)
return toStorageErr(err) return toStorageErr(err)
} }
return nil return nil
@ -288,12 +249,6 @@ func (n networkStorage) RenameFile(srcVolume, srcPath, dstVolume, dstPath string
DstVol: dstVolume, DstVol: dstVolume,
DstPath: dstPath, DstPath: dstPath,
}, &reply); err != nil { }, &reply); err != nil {
log.WithFields(logrus.Fields{
"srcVolume": srcVolume,
"srcPath": srcPath,
"dstVolume": dstVolume,
"dstPath": dstPath,
}).Errorf("Storage.RenameFileHandler failed with %s", err)
return toStorageErr(err) return toStorageErr(err)
} }
return nil return nil

View File

@ -6,7 +6,6 @@ import (
"net/rpc" "net/rpc"
"strconv" "strconv"
"github.com/Sirupsen/logrus"
router "github.com/gorilla/mux" router "github.com/gorilla/mux"
) )
@ -22,9 +21,6 @@ type storageServer struct {
func (s *storageServer) MakeVolHandler(arg *string, reply *GenericReply) error { func (s *storageServer) MakeVolHandler(arg *string, reply *GenericReply) error {
err := s.storage.MakeVol(*arg) err := s.storage.MakeVol(*arg)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": *arg,
}).Debugf("MakeVol failed with error %s", err)
return err return err
} }
return nil return nil
@ -34,7 +30,6 @@ func (s *storageServer) MakeVolHandler(arg *string, reply *GenericReply) error {
func (s *storageServer) ListVolsHandler(arg *string, reply *ListVolsReply) error { func (s *storageServer) ListVolsHandler(arg *string, reply *ListVolsReply) error {
vols, err := s.storage.ListVols() vols, err := s.storage.ListVols()
if err != nil { if err != nil {
log.Debugf("Listsvols failed with error %s", err)
return err return err
} }
reply.Vols = vols reply.Vols = vols
@ -45,9 +40,6 @@ func (s *storageServer) ListVolsHandler(arg *string, reply *ListVolsReply) error
func (s *storageServer) StatVolHandler(arg *string, reply *VolInfo) error { func (s *storageServer) StatVolHandler(arg *string, reply *VolInfo) error {
volInfo, err := s.storage.StatVol(*arg) volInfo, err := s.storage.StatVol(*arg)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": *arg,
}).Debugf("StatVol failed with error %s", err)
return err return err
} }
*reply = volInfo *reply = volInfo
@ -59,9 +51,6 @@ func (s *storageServer) StatVolHandler(arg *string, reply *VolInfo) error {
func (s *storageServer) DeleteVolHandler(arg *string, reply *GenericReply) error { func (s *storageServer) DeleteVolHandler(arg *string, reply *GenericReply) error {
err := s.storage.DeleteVol(*arg) err := s.storage.DeleteVol(*arg)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": *arg,
}).Debugf("DeleteVol failed with error %s", err)
return err return err
} }
return nil return nil
@ -73,10 +62,6 @@ func (s *storageServer) DeleteVolHandler(arg *string, reply *GenericReply) error
func (s *storageServer) StatFileHandler(arg *StatFileArgs, reply *FileInfo) error { func (s *storageServer) StatFileHandler(arg *StatFileArgs, reply *FileInfo) error {
fileInfo, err := s.storage.StatFile(arg.Vol, arg.Path) fileInfo, err := s.storage.StatFile(arg.Vol, arg.Path)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": arg.Vol,
"path": arg.Path,
}).Debugf("StatFile failed with error %s", err)
return err return err
} }
*reply = fileInfo *reply = fileInfo
@ -87,10 +72,6 @@ func (s *storageServer) StatFileHandler(arg *StatFileArgs, reply *FileInfo) erro
func (s *storageServer) ListDirHandler(arg *ListDirArgs, reply *[]string) error { func (s *storageServer) ListDirHandler(arg *ListDirArgs, reply *[]string) error {
entries, err := s.storage.ListDir(arg.Vol, arg.Path) entries, err := s.storage.ListDir(arg.Vol, arg.Path)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": arg.Vol,
"path": arg.Path,
}).Debugf("ListDir failed with error %s", err)
return err return err
} }
*reply = entries *reply = entries
@ -101,10 +82,6 @@ func (s *storageServer) ListDirHandler(arg *ListDirArgs, reply *[]string) error
func (s *storageServer) DeleteFileHandler(arg *DeleteFileArgs, reply *GenericReply) error { func (s *storageServer) DeleteFileHandler(arg *DeleteFileArgs, reply *GenericReply) error {
err := s.storage.DeleteFile(arg.Vol, arg.Path) err := s.storage.DeleteFile(arg.Vol, arg.Path)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": arg.Vol,
"path": arg.Path,
}).Debugf("DeleteFile failed with error %s", err)
return err return err
} }
return nil return nil
@ -114,12 +91,6 @@ func (s *storageServer) DeleteFileHandler(arg *DeleteFileArgs, reply *GenericRep
func (s *storageServer) RenameFileHandler(arg *RenameFileArgs, reply *GenericReply) error { func (s *storageServer) RenameFileHandler(arg *RenameFileArgs, reply *GenericReply) error {
err := s.storage.RenameFile(arg.SrcVol, arg.SrcPath, arg.DstVol, arg.DstPath) err := s.storage.RenameFile(arg.SrcVol, arg.SrcPath, arg.DstVol, arg.DstPath)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"srcVolume": arg.SrcVol,
"srcPath": arg.SrcPath,
"dstVolume": arg.DstVol,
"dstPath": arg.DstPath,
}).Errorf("RenameFile failed with error %s", err)
return err return err
} }
return nil return nil
@ -151,10 +122,6 @@ func registerStorageRPCRouter(mux *router.Router, stServer *storageServer) {
path := vars["path"] path := vars["path"]
writeCloser, err := stServer.storage.CreateFile(volume, path) writeCloser, err := stServer.storage.CreateFile(volume, path)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Debugf("CreateFile failed with error %s", err)
httpErr := http.StatusInternalServerError httpErr := http.StatusInternalServerError
if err == errVolumeNotFound { if err == errVolumeNotFound {
httpErr = http.StatusNotFound httpErr = http.StatusNotFound
@ -166,10 +133,6 @@ func registerStorageRPCRouter(mux *router.Router, stServer *storageServer) {
} }
reader := r.Body reader := r.Body
if _, err = io.Copy(writeCloser, reader); err != nil { if _, err = io.Copy(writeCloser, reader); err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Debugf("Copying incoming reader to writer failed %s", err)
safeCloseAndRemove(writeCloser) safeCloseAndRemove(writeCloser)
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
return return
@ -184,19 +147,11 @@ func registerStorageRPCRouter(mux *router.Router, stServer *storageServer) {
path := vars["path"] path := vars["path"]
offset, err := strconv.ParseInt(r.URL.Query().Get("offset"), 10, 64) offset, err := strconv.ParseInt(r.URL.Query().Get("offset"), 10, 64)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Debugf("Parse offset failure with error %s", err)
http.Error(w, err.Error(), http.StatusBadRequest) http.Error(w, err.Error(), http.StatusBadRequest)
return return
} }
readCloser, err := stServer.storage.ReadFile(volume, path, offset) readCloser, err := stServer.storage.ReadFile(volume, path, offset)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Debugf("ReadFile failed with error %s", err)
httpErr := http.StatusBadRequest httpErr := http.StatusBadRequest
if err == errVolumeNotFound { if err == errVolumeNotFound {
httpErr = http.StatusNotFound httpErr = http.StatusNotFound

View File

@ -91,14 +91,14 @@ func configureServer(srvCmdConfig serverCmdConfig) *http.Server {
// getListenIPs - gets all the ips to listen on. // getListenIPs - gets all the ips to listen on.
func getListenIPs(httpServerConf *http.Server) (hosts []string, port string) { func getListenIPs(httpServerConf *http.Server) (hosts []string, port string) {
host, port, err := net.SplitHostPort(httpServerConf.Addr) host, port, err := net.SplitHostPort(httpServerConf.Addr)
fatalIf(err, "Unable to split host port.", nil) fatalIf(err, "Unable to parse host port.")
switch { switch {
case host != "": case host != "":
hosts = append(hosts, host) hosts = append(hosts, host)
default: default:
addrs, err := net.InterfaceAddrs() addrs, err := net.InterfaceAddrs()
fatalIf(err, "Unable to get interface address.", nil) fatalIf(err, "Unable to determine network interface address.")
for _, addr := range addrs { for _, addr := range addrs {
if addr.Network() == "ip+net" { if addr.Network() == "ip+net" {
host := strings.Split(addr.String(), "/")[0] host := strings.Split(addr.String(), "/")[0]
@ -126,7 +126,7 @@ func printListenIPs(tls bool, hosts []string, port string) {
func initServerConfig(c *cli.Context) { func initServerConfig(c *cli.Context) {
// Save new config. // Save new config.
err := serverConfig.Save() err := serverConfig.Save()
fatalIf(err, "Unable to save config.", nil) fatalIf(err, "Unable to save config.")
// Fetch access keys from environment variables if any and update the config. // Fetch access keys from environment variables if any and update the config.
accessKey := os.Getenv("MINIO_ACCESS_KEY") accessKey := os.Getenv("MINIO_ACCESS_KEY")
@ -135,10 +135,10 @@ func initServerConfig(c *cli.Context) {
// Validate if both keys are specified and they are valid save them. // Validate if both keys are specified and they are valid save them.
if accessKey != "" && secretKey != "" { if accessKey != "" && secretKey != "" {
if !isValidAccessKey.MatchString(accessKey) { if !isValidAccessKey.MatchString(accessKey) {
fatalIf(errInvalidArgument, "Access key does not have required length", nil) fatalIf(errInvalidArgument, "Invalid access key.")
} }
if !isValidSecretKey.MatchString(secretKey) { if !isValidSecretKey.MatchString(secretKey) {
fatalIf(errInvalidArgument, "Secret key does not have required length", nil) fatalIf(errInvalidArgument, "Invalid secret key.")
} }
serverConfig.SetCredential(credential{ serverConfig.SetCredential(credential{
AccessKeyID: accessKey, AccessKeyID: accessKey,
@ -162,9 +162,9 @@ func checkServerSyntax(c *cli.Context) {
// Extract port number from address address should be of the form host:port. // Extract port number from address address should be of the form host:port.
func getPort(address string) int { func getPort(address string) int {
_, portStr, err := net.SplitHostPort(address) _, portStr, err := net.SplitHostPort(address)
fatalIf(err, "Unable to split host port.", nil) fatalIf(err, "Unable to parse host port.")
portInt, err := strconv.Atoi(portStr) portInt, err := strconv.Atoi(portStr)
fatalIf(err, "Invalid port number.", nil) fatalIf(err, "Invalid port number.")
return portInt return portInt
} }
@ -201,17 +201,17 @@ func checkPortAvailability(port int) {
} }
ifcs, err := net.Interfaces() ifcs, err := net.Interfaces()
if err != nil { if err != nil {
fatalIf(err, "Unable to list interfaces.", nil) fatalIf(err, "Unable to list interfaces.")
} }
for _, ifc := range ifcs { for _, ifc := range ifcs {
addrs, err := ifc.Addrs() addrs, err := ifc.Addrs()
if err != nil { if err != nil {
fatalIf(err, fmt.Sprintf("Unable to list addresses on interface %s.", ifc.Name), nil) fatalIf(err, "Unable to list addresses on interface %s.", ifc.Name)
} }
for _, addr := range addrs { for _, addr := range addrs {
ipnet, ok := addr.(*net.IPNet) ipnet, ok := addr.(*net.IPNet)
if !ok { if !ok {
errorIf(errors.New(""), "Interface type assertion to (*net.IPNet) failed.", nil) errorIf(errors.New(""), "Failed to assert type on (*net.IPNet) interface.")
continue continue
} }
ip := ipnet.IP ip := ipnet.IP
@ -224,14 +224,14 @@ func checkPortAvailability(port int) {
if err != nil { if err != nil {
if isAddrInUse(err) { if isAddrInUse(err) {
// Fail if port is already in use. // Fail if port is already in use.
fatalIf(err, fmt.Sprintf("Unable to listen on IP %s, port %.d", tcpAddr.IP, tcpAddr.Port), nil) fatalIf(err, "Unable to listen on %s:%.d.", tcpAddr.IP, tcpAddr.Port)
} else { } else {
// Ignore other errors. // Ignore other errors.
continue continue
} }
} }
if err = l.Close(); err != nil { if err = l.Close(); err != nil {
fatalIf(err, fmt.Sprintf("Unable to close listener on IP %s, port %.d", tcpAddr.IP, tcpAddr.Port), nil) fatalIf(err, "Unable to close listener on %s:%.d.", tcpAddr.IP, tcpAddr.Port)
} }
} }
} }
@ -316,5 +316,5 @@ func serverMain(c *cli.Context) {
// Fallback to http. // Fallback to http.
err = apiServer.ListenAndServe() err = apiServer.ListenAndServe()
} }
errorIf(err, "Failed to start the minio server.", nil) errorIf(err, "Failed to start minio server.")
} }

View File

@ -23,8 +23,6 @@ import (
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/Sirupsen/logrus"
) )
// listParams - list object params used for list object map // listParams - list object params used for list object map
@ -258,18 +256,10 @@ func saveTreeWalk(layer ObjectLayer, params listParams, walker *treeWalker) {
listObjectMapMutex.Lock() listObjectMapMutex.Lock()
defer listObjectMapMutex.Unlock() defer listObjectMapMutex.Unlock()
log.WithFields(logrus.Fields{
"bucket": params.bucket,
"recursive": params.recursive,
"marker": params.marker,
"prefix": params.prefix,
}).Debugf("saveTreeWalk has been invoked.")
walkers, _ := listObjectMap[params] walkers, _ := listObjectMap[params]
walkers = append(walkers, walker) walkers = append(walkers, walker)
listObjectMap[params] = walkers listObjectMap[params] = walkers
log.Debugf("Successfully saved in listObjectMap.")
} }
// Lookup the goroutine reference from map // Lookup the goroutine reference from map
@ -287,12 +277,6 @@ func lookupTreeWalk(layer ObjectLayer, params listParams) *treeWalker {
listObjectMapMutex.Lock() listObjectMapMutex.Lock()
defer listObjectMapMutex.Unlock() defer listObjectMapMutex.Unlock()
log.WithFields(logrus.Fields{
"bucket": params.bucket,
"recursive": params.recursive,
"marker": params.marker,
"prefix": params.prefix,
}).Debugf("lookupTreeWalk has been invoked.")
if walkChs, ok := listObjectMap[params]; ok { if walkChs, ok := listObjectMap[params]; ok {
for i, walkCh := range walkChs { for i, walkCh := range walkChs {
if !walkCh.timedOut { if !walkCh.timedOut {
@ -302,12 +286,6 @@ func lookupTreeWalk(layer ObjectLayer, params listParams) *treeWalker {
} else { } else {
delete(listObjectMap, params) delete(listObjectMap, params)
} }
log.WithFields(logrus.Fields{
"bucket": params.bucket,
"recursive": params.recursive,
"marker": params.marker,
"prefix": params.prefix,
}).Debugf("Found the previous saved listsObjects params.")
return walkCh return walkCh
} }
} }

View File

@ -89,7 +89,7 @@ func (u updateMessage) String() string {
return updateMessage("You are already running the most recent version of minio.") return updateMessage("You are already running the most recent version of minio.")
} }
msg, err := colorizeUpdateMessage(u.Download) msg, err := colorizeUpdateMessage(u.Download)
fatalIf(err, "Unable to colorize experimental update notification string "+msg+".", nil) fatalIf(err, "Unable to colorize update notice "+msg+".")
return msg return msg
} }
@ -97,7 +97,7 @@ func (u updateMessage) String() string {
func (u updateMessage) JSON() string { func (u updateMessage) JSON() string {
u.Status = "success" u.Status = "success"
updateMessageJSONBytes, err := json.Marshal(u) updateMessageJSONBytes, err := json.Marshal(u)
fatalIf((err), "Unable to marshal into JSON.", nil) fatalIf((err), "Unable to marshal into JSON.")
return string(updateMessageJSONBytes) return string(updateMessageJSONBytes)
} }
@ -166,12 +166,12 @@ func getReleaseUpdate(updateURL string, noError bool) updateMessage {
if err != nil && noError { if err != nil && noError {
return updateMsg return updateMsg
} }
fatalIf((err), "Unable to read from update URL "+newUpdateURL+".", nil) fatalIf((err), "Unable to read from update URL "+newUpdateURL+".")
// Error out if 'update' command is issued for development based builds. // Error out if 'update' command is issued for development based builds.
if minioVersion == "DEVELOPMENT.GOGET" && !noError { if minioVersion == "DEVELOPMENT.GOGET" && !noError {
fatalIf((errors.New("")), fatalIf((errors.New("")),
"Update mechanism is not supported for go get based binary builds. Please download official releases from https://minio.io/#minio", nil) "Update mechanism is not supported for go get based binary builds. Please download official releases from https://minio.io/#minio")
} }
// Parse current minio version into RFC3339. // Parse current minio version into RFC3339.
@ -179,12 +179,12 @@ func getReleaseUpdate(updateURL string, noError bool) updateMessage {
if err != nil && noError { if err != nil && noError {
return updateMsg return updateMsg
} }
fatalIf((err), "Unable to parse version string as time.", nil) fatalIf((err), "Unable to parse version string as time.")
// Verify if current minio version is zero. // Verify if current minio version is zero.
if current.IsZero() && !noError { if current.IsZero() && !noError {
fatalIf((errors.New("")), fatalIf((errors.New("")),
"Updates not supported for custom builds. Version field is empty. Please download official releases from https://minio.io/#minio", nil) "Updates mechanism is not supported for custom builds. Please download official releases from https://minio.io/#minio")
} }
// Verify if we have a valid http response i.e http.StatusOK. // Verify if we have a valid http response i.e http.StatusOK.
@ -194,7 +194,7 @@ func getReleaseUpdate(updateURL string, noError bool) updateMessage {
if noError { if noError {
return updateMsg return updateMsg
} }
fatalIf((errors.New("")), "Update server responsed with "+data.Status, nil) fatalIf((errors.New("")), "Failed to retrieve update notice. "+data.Status)
} }
} }
@ -203,19 +203,19 @@ func getReleaseUpdate(updateURL string, noError bool) updateMessage {
if err != nil && noError { if err != nil && noError {
return updateMsg return updateMsg
} }
fatalIf((err), "Fetching updates failed. Please try again.", nil) fatalIf((err), "Failed to retrieve update notice. Please try again later.")
// Parse the date if its valid. // Parse the date if its valid.
latest, err := parseReleaseData(string(updateBody)) latest, err := parseReleaseData(string(updateBody))
if err != nil && noError { if err != nil && noError {
return updateMsg return updateMsg
} }
fatalIf(err, "Please report this issue at https://github.com/minio/minio/issues.", nil) errMsg := "Failed to retrieve update notice. Please try again later. Please report this issue at https://github.com/minio/minio/issues"
fatalIf(err, errMsg)
// Verify if the date is not zero. // Verify if the date is not zero.
if latest.IsZero() && !noError { if latest.IsZero() && !noError {
fatalIf((errors.New("")), fatalIf((errors.New("")), errMsg)
"Unable to validate any update available at this time. Please open an issue at https://github.com/minio/minio/issues", nil)
} }
// Is the update latest?. // Is the update latest?.

View File

@ -20,8 +20,6 @@ import (
"errors" "errors"
slashpath "path" slashpath "path"
"sync" "sync"
"github.com/Sirupsen/logrus"
) )
// Get the highest integer from a given integer slice. // Get the highest integer from a given integer slice.
@ -125,12 +123,6 @@ func (xl XL) listOnlineDisks(volume, path string) (onlineDisks []StorageAPI, mda
// Verify if online disks count are lesser than readQuorum // Verify if online disks count are lesser than readQuorum
// threshold, return an error if yes. // threshold, return an error if yes.
if onlineDiskCount < xl.readQuorum { if onlineDiskCount < xl.readQuorum {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
"onlineDiskCount": onlineDiskCount,
"readQuorumCount": xl.readQuorum,
}).Errorf("%s", errReadQuorum)
return nil, xlMetaV1{}, false, errReadQuorum return nil, xlMetaV1{}, false, errReadQuorum
} }
} }

View File

@ -22,8 +22,6 @@ import (
slashpath "path" slashpath "path"
"sync" "sync"
"time" "time"
"github.com/Sirupsen/logrus"
) )
// Erasure block size. // Erasure block size.
@ -35,10 +33,7 @@ func (xl XL) cleanupCreateFileOps(volume, path string, writers ...io.WriteCloser
closeAndRemoveWriters(writers...) closeAndRemoveWriters(writers...)
for _, disk := range xl.storageDisks { for _, disk := range xl.storageDisks {
if err := disk.DeleteFile(volume, path); err != nil { if err := disk.DeleteFile(volume, path); err != nil {
log.WithFields(logrus.Fields{ errorIf(err, "Unable to delete file.")
"volume": volume,
"path": path,
}).Errorf("DeleteFile failed with %s", err)
} }
} }
} }
@ -47,7 +42,7 @@ func (xl XL) cleanupCreateFileOps(volume, path string, writers ...io.WriteCloser
func closeAndRemoveWriters(writers ...io.WriteCloser) { func closeAndRemoveWriters(writers ...io.WriteCloser) {
for _, writer := range writers { for _, writer := range writers {
if err := safeCloseAndRemove(writer); err != nil { if err := safeCloseAndRemove(writer); err != nil {
log.Errorf("Closing writer failed with %s", err) errorIf(err, "Failed to close writer.")
} }
} }
} }
@ -67,10 +62,6 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
// based on total number of errors and read quorum. // based on total number of errors and read quorum.
err := xl.errsToStorageErr(errs) err := xl.errsToStorageErr(errs)
if err != nil && err != errFileNotFound { if err != nil && err != errFileNotFound {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("%s", err)
reader.CloseWithError(err) reader.CloseWithError(err)
return return
} }
@ -96,12 +87,7 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
var writer io.WriteCloser var writer io.WriteCloser
writer, err = disk.CreateFile(volume, erasurePart) writer, err = disk.CreateFile(volume, erasurePart)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{ // Treat errFileNameTooLong specially
"volume": volume,
"path": path,
}).Errorf("CreateFile failed with %s", err)
// treat errFileNameTooLong specially
if err == errFileNameTooLong { if err == errFileNameTooLong {
xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...) xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...)
reader.CloseWithError(err) reader.CloseWithError(err)
@ -122,14 +108,10 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
return return
} }
// create meta data file // Create meta data file.
var metadataWriter io.WriteCloser var metadataWriter io.WriteCloser
metadataWriter, err = disk.CreateFile(volume, xlMetaV1FilePath) metadataWriter, err = disk.CreateFile(volume, xlMetaV1FilePath)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("CreateFile failed with %s", err)
createFileError++ createFileError++
// We can safely allow CreateFile errors up to // We can safely allow CreateFile errors up to
@ -158,10 +140,6 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
if err != nil { if err != nil {
// Any unexpected errors, close the pipe reader with error. // Any unexpected errors, close the pipe reader with error.
if err != io.ErrUnexpectedEOF && err != io.EOF { if err != io.ErrUnexpectedEOF && err != io.EOF {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("io.ReadFull failed with %s", err)
// Remove all temp writers. // Remove all temp writers.
xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...) xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...)
reader.CloseWithError(err) reader.CloseWithError(err)
@ -177,10 +155,6 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
var dataBlocks [][]byte var dataBlocks [][]byte
dataBlocks, err = xl.ReedSolomon.Split(dataBuffer[0:n]) dataBlocks, err = xl.ReedSolomon.Split(dataBuffer[0:n])
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("Splitting data buffer into erasure data blocks failed with %s", err)
// Remove all temp writers. // Remove all temp writers.
xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...) xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...)
reader.CloseWithError(err) reader.CloseWithError(err)
@ -190,10 +164,6 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
// Encode parity blocks using data blocks. // Encode parity blocks using data blocks.
err = xl.ReedSolomon.Encode(dataBlocks) err = xl.ReedSolomon.Encode(dataBlocks)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("Encoding erasure data blocks failed with %s", err)
// Remove all temp writers upon error. // Remove all temp writers upon error.
xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...) xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...)
reader.CloseWithError(err) reader.CloseWithError(err)
@ -220,10 +190,6 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
if wErr == nil { if wErr == nil {
continue continue
} }
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("Writing encoded blocks failed with %s", wErr)
// Remove all temp writers upon error. // Remove all temp writers upon error.
xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...) xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...)
reader.CloseWithError(wErr) reader.CloseWithError(wErr)
@ -255,7 +221,7 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
// Case: when storageDisks is 16 and write quorumDisks is 13, // Case: when storageDisks is 16 and write quorumDisks is 13,
// meta data write failure up to 2 can be considered. // meta data write failure up to 2 can be considered.
// currently we fail for any meta data writes // currently we fail for any meta data writes
for index, metadataWriter := range metadataWriters { for _, metadataWriter := range metadataWriters {
if metadataWriter == nil { if metadataWriter == nil {
continue continue
} }
@ -263,11 +229,6 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
// Write metadata. // Write metadata.
err = metadata.Write(metadataWriter) err = metadata.Write(metadataWriter)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
"diskIndex": index,
}).Errorf("Writing metadata failed with %s", err)
// Remove temporary files. // Remove temporary files.
xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...) xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...)
reader.CloseWithError(err) reader.CloseWithError(err)
@ -286,11 +247,6 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
} }
// Safely wrote, now rename to its actual location. // Safely wrote, now rename to its actual location.
if err = writer.Close(); err != nil { if err = writer.Close(); err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
"diskIndex": index,
}).Errorf("Safely committing part failed with %s", err)
// Remove all temp writers upon error. // Remove all temp writers upon error.
xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...) xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...)
reader.CloseWithError(err) reader.CloseWithError(err)
@ -302,11 +258,6 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
} }
// Safely wrote, now rename to its actual location. // Safely wrote, now rename to its actual location.
if err = metadataWriters[index].Close(); err != nil { if err = metadataWriters[index].Close(); err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
"diskIndex": index,
}).Errorf("Safely committing metadata failed with %s", err)
// Remove all temp writers upon error. // Remove all temp writers upon error.
xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...) xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...)
reader.CloseWithError(err) reader.CloseWithError(err)

View File

@ -21,8 +21,6 @@ import (
"fmt" "fmt"
"io" "io"
slashpath "path" slashpath "path"
"github.com/Sirupsen/logrus"
) )
// healHeal - heals the file at path. // healHeal - heals the file at path.
@ -39,10 +37,6 @@ func (xl XL) healFile(volume string, path string) error {
// List all online disks to verify if we need to heal. // List all online disks to verify if we need to heal.
onlineDisks, metadata, heal, err := xl.listOnlineDisks(volume, path) onlineDisks, metadata, heal, err := xl.listOnlineDisks(volume, path)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("List online disks failed with %s", err)
return err return err
} }
if !heal { if !heal {
@ -87,10 +81,6 @@ func (xl XL) healFile(volume string, path string) error {
writers[index], err = xl.storageDisks[index].CreateFile(volume, erasurePart) writers[index], err = xl.storageDisks[index].CreateFile(volume, erasurePart)
if err != nil { if err != nil {
needsHeal[index] = false needsHeal[index] = false
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("CreateFile failed with error %s", err)
safeCloseAndRemove(writers[index]) safeCloseAndRemove(writers[index])
continue continue
} }
@ -129,20 +119,12 @@ func (xl XL) healFile(volume string, path string) error {
// Check blocks if they are all zero in length. // Check blocks if they are all zero in length.
if checkBlockSize(enBlocks) == 0 { if checkBlockSize(enBlocks) == 0 {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("%s", errDataCorrupt)
return errDataCorrupt return errDataCorrupt
} }
// Verify the blocks. // Verify the blocks.
ok, err := xl.ReedSolomon.Verify(enBlocks) ok, err := xl.ReedSolomon.Verify(enBlocks)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("ReedSolomon verify failed with %s", err)
closeAndRemoveWriters(writers...) closeAndRemoveWriters(writers...)
return err return err
} }
@ -157,30 +139,18 @@ func (xl XL) healFile(volume string, path string) error {
} }
err = xl.ReedSolomon.Reconstruct(enBlocks) err = xl.ReedSolomon.Reconstruct(enBlocks)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("ReedSolomon reconstruct failed with %s", err)
closeAndRemoveWriters(writers...) closeAndRemoveWriters(writers...)
return err return err
} }
// Verify reconstructed blocks again. // Verify reconstructed blocks again.
ok, err = xl.ReedSolomon.Verify(enBlocks) ok, err = xl.ReedSolomon.Verify(enBlocks)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("ReedSolomon verify failed with %s", err)
closeAndRemoveWriters(writers...) closeAndRemoveWriters(writers...)
return err return err
} }
if !ok { if !ok {
// Blocks cannot be reconstructed, corrupted data. // Blocks cannot be reconstructed, corrupted data.
err = errors.New("Verification failed after reconstruction, data likely corrupted.") err = errors.New("Verification failed after reconstruction, data likely corrupted.")
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("%s", err)
closeAndRemoveWriters(writers...) closeAndRemoveWriters(writers...)
return err return err
} }
@ -191,10 +161,6 @@ func (xl XL) healFile(volume string, path string) error {
} }
_, err := writers[index].Write(enBlocks[index]) _, err := writers[index].Write(enBlocks[index])
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("Write failed with %s", err)
safeCloseAndRemove(writers[index]) safeCloseAndRemove(writers[index])
continue continue
} }

View File

@ -22,8 +22,6 @@ import (
"io" "io"
slashpath "path" slashpath "path"
"sync" "sync"
"github.com/Sirupsen/logrus"
) )
// ReadFile - read file // ReadFile - read file
@ -41,10 +39,6 @@ func (xl XL) ReadFile(volume, path string, startOffset int64) (io.ReadCloser, er
onlineDisks, metadata, heal, err := xl.listOnlineDisks(volume, path) onlineDisks, metadata, heal, err := xl.listOnlineDisks(volume, path)
nsMutex.RUnlock(volume, path) nsMutex.RUnlock(volume, path)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("Get readable disks failed with %s", err)
return nil, err return nil, err
} }
@ -52,13 +46,8 @@ func (xl XL) ReadFile(volume, path string, startOffset int64) (io.ReadCloser, er
// Heal in background safely, since we already have read // Heal in background safely, since we already have read
// quorum disks. Let the reads continue. // quorum disks. Let the reads continue.
go func() { go func() {
if hErr := xl.healFile(volume, path); hErr != nil { hErr := xl.healFile(volume, path)
log.WithFields(logrus.Fields{ errorIf(hErr, "Unable to heal file "+volume+"/"+path+".")
"volume": volume,
"path": path,
}).Errorf("healFile failed with %s", hErr)
return
}
}() }()
} }
@ -120,10 +109,6 @@ func (xl XL) ReadFile(volume, path string, startOffset int64) (io.ReadCloser, er
// Check blocks if they are all zero in length. // Check blocks if they are all zero in length.
if checkBlockSize(enBlocks) == 0 { if checkBlockSize(enBlocks) == 0 {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("%s", errDataCorrupt)
pipeWriter.CloseWithError(errDataCorrupt) pipeWriter.CloseWithError(errDataCorrupt)
return return
} }
@ -132,10 +117,6 @@ func (xl XL) ReadFile(volume, path string, startOffset int64) (io.ReadCloser, er
var ok bool var ok bool
ok, err = xl.ReedSolomon.Verify(enBlocks) ok, err = xl.ReedSolomon.Verify(enBlocks)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("ReedSolomon verify failed with %s", err)
pipeWriter.CloseWithError(err) pipeWriter.CloseWithError(err)
return return
} }
@ -150,30 +131,18 @@ func (xl XL) ReadFile(volume, path string, startOffset int64) (io.ReadCloser, er
} }
err = xl.ReedSolomon.Reconstruct(enBlocks) err = xl.ReedSolomon.Reconstruct(enBlocks)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("ReedSolomon reconstruct failed with %s", err)
pipeWriter.CloseWithError(err) pipeWriter.CloseWithError(err)
return return
} }
// Verify reconstructed blocks again. // Verify reconstructed blocks again.
ok, err = xl.ReedSolomon.Verify(enBlocks) ok, err = xl.ReedSolomon.Verify(enBlocks)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("ReedSolomon verify failed with %s", err)
pipeWriter.CloseWithError(err) pipeWriter.CloseWithError(err)
return return
} }
if !ok { if !ok {
// Blocks cannot be reconstructed, corrupted data. // Blocks cannot be reconstructed, corrupted data.
err = errors.New("Verification failed after reconstruction, data likely corrupted.") err = errors.New("Verification failed after reconstruction, data likely corrupted.")
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("%s", err)
pipeWriter.CloseWithError(err) pipeWriter.CloseWithError(err)
return return
} }
@ -199,10 +168,6 @@ func (xl XL) ReadFile(volume, path string, startOffset int64) (io.ReadCloser, er
// Write safely the necessary blocks. // Write safely the necessary blocks.
_, err = pipeWriter.Write(dataBlocks[int(startOffset):]) _, err = pipeWriter.Write(dataBlocks[int(startOffset):])
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("ReedSolomon joining decoded blocks failed with %s", err)
pipeWriter.CloseWithError(err) pipeWriter.CloseWithError(err)
return return
} }

View File

@ -26,7 +26,6 @@ import (
"path" "path"
"sync" "sync"
"github.com/Sirupsen/logrus"
"github.com/klauspost/reedsolomon" "github.com/klauspost/reedsolomon"
) )
@ -141,9 +140,6 @@ func (xl XL) MakeVol(volume string) error {
if err != nil && err != errVolumeNotFound { if err != nil && err != errVolumeNotFound {
errCount++ errCount++
if errCount > xl.readQuorum { if errCount > xl.readQuorum {
log.WithFields(logrus.Fields{
"volume": volume,
}).Errorf("%s", err)
return err return err
} }
} }
@ -182,9 +178,6 @@ func (xl XL) MakeVol(volume string) error {
// Loop through all the concocted errors. // Loop through all the concocted errors.
for _, err := range dErrs { for _, err := range dErrs {
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
}).Errorf("MakeVol failed with %s", err)
// if volume already exists, count them. // if volume already exists, count them.
if err == errVolumeExists { if err == errVolumeExists {
volumeExistsErrCnt++ volumeExistsErrCnt++
@ -241,9 +234,6 @@ func (xl XL) DeleteVol(volume string) error {
// Loop through concocted errors and return anything unusual. // Loop through concocted errors and return anything unusual.
for _, err := range dErrs { for _, err := range dErrs {
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
}).Errorf("DeleteVol failed with %s", err)
// We ignore error if errVolumeNotFound or errDiskNotFound // We ignore error if errVolumeNotFound or errDiskNotFound
if err == errVolumeNotFound || err == errDiskNotFound { if err == errVolumeNotFound || err == errDiskNotFound {
volumeNotFoundErrCnt++ volumeNotFoundErrCnt++
@ -388,11 +378,6 @@ func (xl XL) listAllVolumeInfo(volume string) ([]VolInfo, bool, error) {
// Verify if online disks count are lesser than readQuorum // Verify if online disks count are lesser than readQuorum
// threshold, return an error if yes. // threshold, return an error if yes.
if onlineDiskCount < xl.readQuorum { if onlineDiskCount < xl.readQuorum {
log.WithFields(logrus.Fields{
"volume": volume,
"onlineDiskCount": onlineDiskCount,
"readQuorumCount": xl.readQuorum,
}).Errorf("%s", errReadQuorum)
return nil, false, errReadQuorum return nil, false, errReadQuorum
} }
} }
@ -410,9 +395,6 @@ func (xl XL) healVolume(volume string) error {
// Lists volume info for all online disks. // Lists volume info for all online disks.
volsInfo, heal, err := xl.listAllVolumeInfo(volume) volsInfo, heal, err := xl.listAllVolumeInfo(volume)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
}).Errorf("List online disks failed with %s", err)
return err return err
} }
if !heal { if !heal {
@ -425,11 +407,6 @@ func (xl XL) healVolume(volume string) error {
} }
// Volinfo name would be an empty string, create it. // Volinfo name would be an empty string, create it.
if err = xl.storageDisks[index].MakeVol(volume); err != nil { if err = xl.storageDisks[index].MakeVol(volume); err != nil {
if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
}).Errorf("MakeVol failed with error %s", err)
}
continue continue
} }
} }
@ -447,20 +424,13 @@ func (xl XL) StatVol(volume string) (volInfo VolInfo, err error) {
volsInfo, heal, err := xl.listAllVolumeInfo(volume) volsInfo, heal, err := xl.listAllVolumeInfo(volume)
nsMutex.RUnlock(volume, "") nsMutex.RUnlock(volume, "")
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
}).Errorf("listOnlineVolsInfo failed with %s", err)
return VolInfo{}, err return VolInfo{}, err
} }
if heal { if heal {
go func() { go func() {
if hErr := xl.healVolume(volume); hErr != nil { hErr := xl.healVolume(volume)
log.WithFields(logrus.Fields{ errorIf(hErr, "Unable to heal volume "+volume+".")
"volume": volume,
}).Errorf("healVolume failed with %s", hErr)
return
}
}() }()
} }
@ -534,23 +504,14 @@ func (xl XL) StatFile(volume, path string) (FileInfo, error) {
_, metadata, heal, err := xl.listOnlineDisks(volume, path) _, metadata, heal, err := xl.listOnlineDisks(volume, path)
nsMutex.RUnlock(volume, path) nsMutex.RUnlock(volume, path)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("listOnlineDisks failed with %s", err)
return FileInfo{}, err return FileInfo{}, err
} }
if heal { if heal {
// Heal in background safely, since we already have read quorum disks. // Heal in background safely, since we already have read quorum disks.
go func() { go func() {
if hErr := xl.healFile(volume, path); hErr != nil { hErr := xl.healFile(volume, path)
log.WithFields(logrus.Fields{ errorIf(hErr, "Unable to heal file "+volume+"/"+path+".")
"volume": volume,
"path": path,
}).Errorf("healFile failed with %s", hErr)
return
}
}() }()
} }
@ -582,11 +543,6 @@ func (xl XL) DeleteFile(volume, path string) error {
erasureFilePart := slashpath.Join(path, fmt.Sprintf("file.%d", index)) erasureFilePart := slashpath.Join(path, fmt.Sprintf("file.%d", index))
err := disk.DeleteFile(volume, erasureFilePart) err := disk.DeleteFile(volume, erasureFilePart)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("DeleteFile failed with %s", err)
errCount++ errCount++
// We can safely allow DeleteFile errors up to len(xl.storageDisks) - xl.writeQuorum // We can safely allow DeleteFile errors up to len(xl.storageDisks) - xl.writeQuorum
@ -601,11 +557,6 @@ func (xl XL) DeleteFile(volume, path string) error {
xlMetaV1FilePath := slashpath.Join(path, "file.json") xlMetaV1FilePath := slashpath.Join(path, "file.json")
err = disk.DeleteFile(volume, xlMetaV1FilePath) err = disk.DeleteFile(volume, xlMetaV1FilePath)
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"volume": volume,
"path": path,
}).Errorf("DeleteFile failed with %s", err)
errCount++ errCount++
// We can safely allow DeleteFile errors up to len(xl.storageDisks) - xl.writeQuorum // We can safely allow DeleteFile errors up to len(xl.storageDisks) - xl.writeQuorum
@ -653,13 +604,6 @@ func (xl XL) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) error {
// not rename the part and metadata files separately. // not rename the part and metadata files separately.
err := disk.RenameFile(srcVolume, retainSlash(srcPath), dstVolume, retainSlash(dstPath)) err := disk.RenameFile(srcVolume, retainSlash(srcPath), dstVolume, retainSlash(dstPath))
if err != nil { if err != nil {
log.WithFields(logrus.Fields{
"srcVolume": srcVolume,
"srcPath": srcPath,
"dstVolume": dstVolume,
"dstPath": dstPath,
}).Errorf("RenameFile failed with %s", err)
errCount++ errCount++
// We can safely allow RenameFile errors up to len(xl.storageDisks) - xl.writeQuorum // We can safely allow RenameFile errors up to len(xl.storageDisks) - xl.writeQuorum
// otherwise return failure. // otherwise return failure.

View File

@ -223,9 +223,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
src := path.Join(mpartMetaPrefix, bucket, object, uploadID, partSuffix) src := path.Join(mpartMetaPrefix, bucket, object, uploadID, partSuffix)
dst := path.Join(mpartMetaPrefix, bucket, object, uploadID, partNumToPartFileName(part.PartNumber)) dst := path.Join(mpartMetaPrefix, bucket, object, uploadID, partNumToPartFileName(part.PartNumber))
errs[index] = xl.storage.RenameFile(minioMetaBucket, src, minioMetaBucket, dst) errs[index] = xl.storage.RenameFile(minioMetaBucket, src, minioMetaBucket, dst)
if errs[index] != nil { errorIf(errs[index], "Unable to rename file %s to %s.", src, dst)
log.Errorf("Unable to rename file %s to %s, failed with %s", src, dst, errs[index])
}
}(index, part) }(index, part)
} }
@ -240,10 +238,10 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
} }
// Delete the incomplete file place holder. // Delete the incomplete file place holder.
uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID, incompleteFile) uploadIDIncompletePath := path.Join(mpartMetaPrefix, bucket, object, uploadID, incompleteFile)
err = xl.storage.DeleteFile(minioMetaBucket, uploadIDPath) err = xl.storage.DeleteFile(minioMetaBucket, uploadIDIncompletePath)
if err != nil { if err != nil {
return "", toObjectErr(err, minioMetaBucket, uploadIDPath) return "", toObjectErr(err, minioMetaBucket, uploadIDIncompletePath)
} }
// Delete if an object already exists. // Delete if an object already exists.
@ -255,7 +253,8 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
return "", toObjectErr(err, bucket, object) return "", toObjectErr(err, bucket, object)
} }
if err = xl.storage.RenameFile(minioMetaBucket, path.Join(mpartMetaPrefix, bucket, object, uploadID), bucket, object); err != nil { uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID)
if err = xl.storage.RenameFile(minioMetaBucket, uploadIDPath, bucket, object); err != nil {
return "", toObjectErr(err, bucket, object) return "", toObjectErr(err, bucket, object)
} }
// Validate if there are other incomplete upload-id's present for // Validate if there are other incomplete upload-id's present for

View File

@ -49,20 +49,17 @@ func isValidFormat(storage StorageAPI, exportPaths ...string) bool {
// Load saved XL format.json and validate. // Load saved XL format.json and validate.
xl, err := loadFormatXL(storage) xl, err := loadFormatXL(storage)
if err != nil { if err != nil {
log.Errorf("loadFormatXL failed with %s", err) errorIf(err, "Unable to load format file 'format.json'.")
return false return false
} }
if xl.Version != "1" { if xl.Version != "1" {
log.Errorf("Unsupported XL backend format found [%s]", xl.Version)
return false return false
} }
if len(exportPaths) != len(xl.Disks) { if len(exportPaths) != len(xl.Disks) {
log.Errorf("Number of disks %d passed at the command-line did not match the backend format %d", len(exportPaths), len(xl.Disks))
return false return false
} }
for index, disk := range xl.Disks { for index, disk := range xl.Disks {
if exportPaths[index] != disk { if exportPaths[index] != disk {
log.Errorf("Invalid order of disks detected %s. Required order is %s.", exportPaths, xl.Disks)
return false return false
} }
} }
@ -73,7 +70,6 @@ func isValidFormat(storage StorageAPI, exportPaths ...string) bool {
func newXLObjects(exportPaths ...string) (ObjectLayer, error) { func newXLObjects(exportPaths ...string) (ObjectLayer, error) {
storage, err := newXL(exportPaths...) storage, err := newXL(exportPaths...)
if err != nil { if err != nil {
log.Errorf("newXL failed with %s", err)
return nil, err return nil, err
} }
@ -90,13 +86,11 @@ func newXLObjects(exportPaths ...string) (ObjectLayer, error) {
Disks: exportPaths, Disks: exportPaths,
}) })
if errSave != nil { if errSave != nil {
log.Errorf("saveFormatXL failed with %s", errSave)
return nil, errSave return nil, errSave
} }
} else { } else {
log.Errorf("Unable to check backend format %s", err)
if err == errReadQuorum { if err == errReadQuorum {
errMsg := fmt.Sprintf("Not all disks %s are available, did not meet read quroum.", exportPaths) errMsg := fmt.Sprintf("Disks %s are offline. Unable to establish quorum.", exportPaths)
err = errors.New(errMsg) err = errors.New(errMsg)
} else if err == errDiskNotFound { } else if err == errDiskNotFound {
errMsg := fmt.Sprintf("Disks %s not found.", exportPaths) errMsg := fmt.Sprintf("Disks %s not found.", exportPaths)