mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
logging: Log only for unhandled errors, remove all the debug logging. (#1652)
This patch brings in the removal of debug logging altogether, instead we bring in the functionality of being able to trace the errors properly pointing back to the origination of the problem. To enable tracing you need to enable "MINIO_TRACE" set to "1" or "true" environment variable which would print back traces whenever there is an error which is unhandled or at the handler layer. By default this tracing is turned off and only user level logging is provided.
This commit is contained in:
parent
8828fd1e5c
commit
9472299308
@ -50,7 +50,7 @@ var isValidAccessKey = regexp.MustCompile(`^[a-zA-Z0-9\\-\\.\\_\\~]{5,20}$`)
|
||||
// mustGenAccessKeys - must generate access credentials.
|
||||
func mustGenAccessKeys() (creds credential) {
|
||||
creds, err := genAccessKeys()
|
||||
fatalIf(err, "Unable to generate access keys.", nil)
|
||||
fatalIf(err, "Unable to generate access keys.")
|
||||
return creds
|
||||
}
|
||||
|
||||
|
@ -58,9 +58,9 @@ type LogMessage struct {
|
||||
|
||||
func (h *accessLogHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
message, err := getLogMessage(w, req)
|
||||
fatalIf(err, "Unable to extract http message.", nil)
|
||||
fatalIf(err, "Unable to parse HTTP request and response fields.")
|
||||
_, err = h.accessLogFile.Write(message)
|
||||
fatalIf(err, "Writing to log file failed.", nil)
|
||||
fatalIf(err, "Unable to log HTTP access.")
|
||||
|
||||
h.Handler.ServeHTTP(w, req)
|
||||
}
|
||||
@ -112,7 +112,7 @@ func getLogMessage(w http.ResponseWriter, req *http.Request) ([]byte, error) {
|
||||
// setAccessLogHandler logs requests
|
||||
func setAccessLogHandler(h http.Handler) http.Handler {
|
||||
file, err := os.OpenFile("access.log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
|
||||
fatalIf(err, "Unable to open access log.", nil)
|
||||
fatalIf(err, "Failed top open access log.")
|
||||
|
||||
return &accessLogHandler{Handler: h, accessLogFile: file}
|
||||
}
|
||||
|
@ -111,12 +111,10 @@ func sumMD5(data []byte) []byte {
|
||||
// Verify if request has valid AWS Signature Version '4'.
|
||||
func isReqAuthenticated(r *http.Request) (s3Error APIErrorCode) {
|
||||
if r == nil {
|
||||
errorIf(errInvalidArgument, "HTTP request cannot be empty.", nil)
|
||||
return ErrInternalError
|
||||
}
|
||||
payload, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to read HTTP body.", nil)
|
||||
return ErrInternalError
|
||||
}
|
||||
// Verify Content-Md5, if payload is set.
|
||||
|
@ -36,7 +36,7 @@ func enforceBucketPolicy(action string, bucket string, reqURL *url.URL) (s3Error
|
||||
// Read saved bucket policy.
|
||||
policy, err := readBucketPolicy(bucket)
|
||||
if err != nil {
|
||||
errorIf(err, "GetBucketPolicy failed.", nil)
|
||||
errorIf(err, "Unable read bucket policy.")
|
||||
switch err.(type) {
|
||||
case BucketNotFound:
|
||||
return ErrNoSuchBucket
|
||||
@ -50,7 +50,7 @@ func enforceBucketPolicy(action string, bucket string, reqURL *url.URL) (s3Error
|
||||
// Parse the saved policy.
|
||||
bucketPolicy, err := parseBucketPolicy(policy)
|
||||
if err != nil {
|
||||
errorIf(err, "Parse policy failed.", nil)
|
||||
errorIf(err, "Unable to parse bucket policy.")
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
@ -117,7 +117,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
|
||||
if _, err := api.ObjectAPI.GetBucketInfo(bucket); err != nil {
|
||||
errorIf(err, "GetBucketInfo failed.", nil)
|
||||
errorIf(err, "Unable to fetch bucket info.")
|
||||
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
|
||||
return
|
||||
}
|
||||
@ -180,7 +180,7 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
|
||||
|
||||
listMultipartsInfo, err := api.ObjectAPI.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||
if err != nil {
|
||||
errorIf(err, "ListMultipartUploads failed.", nil)
|
||||
errorIf(err, "Unable to list multipart uploads.")
|
||||
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
|
||||
return
|
||||
}
|
||||
@ -252,7 +252,7 @@ func (api objectAPIHandlers) ListObjectsHandler(w http.ResponseWriter, r *http.R
|
||||
writeSuccessResponse(w, encodedSuccessResponse)
|
||||
return
|
||||
}
|
||||
errorIf(err, "ListObjects failed.", nil)
|
||||
errorIf(err, "Unable to list objects.")
|
||||
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
|
||||
}
|
||||
|
||||
@ -306,7 +306,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
writeSuccessResponse(w, encodedSuccessResponse)
|
||||
return
|
||||
}
|
||||
errorIf(err, "ListBuckets failed.", nil)
|
||||
errorIf(err, "Unable to list buckets.")
|
||||
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
|
||||
}
|
||||
|
||||
@ -352,7 +352,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
// Read incoming body XML bytes.
|
||||
if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil {
|
||||
errorIf(err, "DeleteMultipleObjects failed.", nil)
|
||||
errorIf(err, "Unable to read HTTP body.")
|
||||
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
|
||||
return
|
||||
}
|
||||
@ -360,7 +360,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
// Unmarshal list of keys to be deleted.
|
||||
deleteObjects := &DeleteObjectsRequest{}
|
||||
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
|
||||
errorIf(err, "DeleteMultipartObjects xml decoding failed.", nil)
|
||||
errorIf(err, "Unable to unmarshal delete objects request XML.")
|
||||
writeErrorResponse(w, r, ErrMalformedXML, r.URL.Path)
|
||||
return
|
||||
}
|
||||
@ -375,7 +375,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
ObjectName: object.ObjectName,
|
||||
})
|
||||
} else {
|
||||
errorIf(err, "DeleteObject failed.", nil)
|
||||
errorIf(err, "Unable to delete object.")
|
||||
deleteErrors = append(deleteErrors, DeleteError{
|
||||
Code: errorCodeResponse[toAPIErrorCode(err)].Code,
|
||||
Message: errorCodeResponse[toAPIErrorCode(err)].Description,
|
||||
@ -423,7 +423,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
// Make bucket.
|
||||
err := api.ObjectAPI.MakeBucket(bucket)
|
||||
if err != nil {
|
||||
errorIf(err, "MakeBucket failed.", nil)
|
||||
errorIf(err, "Unable to create a bucket.")
|
||||
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
|
||||
return
|
||||
}
|
||||
@ -467,14 +467,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
// be loaded in memory, the remaining being put in temporary files.
|
||||
reader, err := r.MultipartReader()
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to initialize multipart reader.", nil)
|
||||
errorIf(err, "Unable to initialize multipart reader.")
|
||||
writeErrorResponse(w, r, ErrMalformedPOSTRequest, r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
fileBody, formValues, err := extractHTTPFormValues(reader)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to parse form values.", nil)
|
||||
errorIf(err, "Unable to parse form values.")
|
||||
writeErrorResponse(w, r, ErrMalformedPOSTRequest, r.URL.Path)
|
||||
return
|
||||
}
|
||||
@ -494,7 +494,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
md5Sum, err := api.ObjectAPI.PutObject(bucket, object, -1, fileBody, nil)
|
||||
if err != nil {
|
||||
errorIf(err, "PutObject failed.", nil)
|
||||
errorIf(err, "Unable to create object.")
|
||||
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
|
||||
return
|
||||
}
|
||||
@ -540,7 +540,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
|
||||
if _, err := api.ObjectAPI.GetBucketInfo(bucket); err != nil {
|
||||
errorIf(err, "GetBucketInfo failed.", nil)
|
||||
errorIf(err, "Unable to fetch bucket info.")
|
||||
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
|
||||
return
|
||||
}
|
||||
@ -565,7 +565,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
if err := api.ObjectAPI.DeleteBucket(bucket); err != nil {
|
||||
errorIf(err, "DeleteBucket failed.", nil)
|
||||
errorIf(err, "Unable to delete a bucket.")
|
||||
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ func bucketPolicyActionMatch(action string, statement policyStatement) bool {
|
||||
for _, policyAction := range statement.Actions {
|
||||
// Policy action can be a regex, validate the action with matching string.
|
||||
matched, err := regexp.MatchString(policyAction, action)
|
||||
fatalIf(err, "Invalid pattern, please verify the pattern string.", nil)
|
||||
fatalIf(err, "Invalid action \"%s\" in bucket policy.", action)
|
||||
if matched {
|
||||
return true
|
||||
}
|
||||
@ -190,7 +190,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
// bucket policies are limited to 20KB in size, using a limit reader.
|
||||
bucketPolicyBuf, err := ioutil.ReadAll(io.LimitReader(r.Body, maxAccessPolicySize))
|
||||
if err != nil {
|
||||
errorIf(err, "Reading policy failed.", nil)
|
||||
errorIf(err, "Unable to read bucket policy.")
|
||||
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
|
||||
return
|
||||
}
|
||||
@ -198,7 +198,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
// Parse bucket policy.
|
||||
bucketPolicy, err := parseBucketPolicy(bucketPolicyBuf)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to parse bucket policy.", nil)
|
||||
errorIf(err, "Unable to parse bucket policy.")
|
||||
writeErrorResponse(w, r, ErrInvalidPolicyDocument, r.URL.Path)
|
||||
return
|
||||
}
|
||||
@ -211,7 +211,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
|
||||
// Save bucket policy.
|
||||
if err := writeBucketPolicy(bucket, bucketPolicyBuf); err != nil {
|
||||
errorIf(err, "SaveBucketPolicy failed.", nil)
|
||||
errorIf(err, "Unable to write bucket policy.")
|
||||
switch err.(type) {
|
||||
case BucketNameInvalid:
|
||||
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
|
||||
@ -245,7 +245,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
|
||||
|
||||
// Delete bucket access policy.
|
||||
if err := removeBucketPolicy(bucket); err != nil {
|
||||
errorIf(err, "DeleteBucketPolicy failed.", nil)
|
||||
errorIf(err, "Unable to remove bucket policy.")
|
||||
switch err.(type) {
|
||||
case BucketNameInvalid:
|
||||
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
|
||||
@ -282,7 +282,7 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
// Read bucket access policy.
|
||||
p, err := readBucketPolicy(bucket)
|
||||
if err != nil {
|
||||
errorIf(err, "GetBucketPolicy failed.", nil)
|
||||
errorIf(err, "Unable to read bucket policy.")
|
||||
switch err.(type) {
|
||||
case BucketNameInvalid:
|
||||
writeErrorResponse(w, r, ErrInvalidBucketName, r.URL.Path)
|
||||
|
2
certs.go
2
certs.go
@ -45,7 +45,7 @@ func getCertsPath() (string, error) {
|
||||
// mustGetCertsPath must get certs path.
|
||||
func mustGetCertsPath() string {
|
||||
certsPath, err := getCertsPath()
|
||||
fatalIf(err, "Unable to retrieve certs path.", nil)
|
||||
fatalIf(err, "Failed to get certificate path.")
|
||||
return certsPath
|
||||
}
|
||||
|
||||
|
@ -31,6 +31,10 @@ var globalFlags = []cli.Flag{
|
||||
Value: mustGetConfigPath(),
|
||||
Usage: "Path to configuration folder.",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "quiet",
|
||||
Usage: "Suppress chatty output.",
|
||||
},
|
||||
}
|
||||
|
||||
// registerCommand registers a cli command.
|
||||
|
@ -40,18 +40,18 @@ func purgeV1() {
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
fatalIf(err, "Unable to load config version ‘1’.", nil)
|
||||
fatalIf(err, "Unable to load config version ‘1’.")
|
||||
|
||||
if cv1.Version == "1" {
|
||||
console.Println("Unsupported config version ‘1’ found, removed successfully.")
|
||||
console.Println("Removed unsupported config version ‘1’.")
|
||||
/// Purge old fsUsers.json file
|
||||
configPath, err := getConfigPath()
|
||||
fatalIf(err, "Unable to retrieve config path.", nil)
|
||||
fatalIf(err, "Unable to retrieve config path.")
|
||||
|
||||
configFile := filepath.Join(configPath, "fsUsers.json")
|
||||
os.RemoveAll(configFile)
|
||||
}
|
||||
fatalIf(errors.New(""), "Unexpected version found ‘"+cv1.Version+"’, cannot migrate.", nil)
|
||||
fatalIf(errors.New(""), "Failed to migrate unrecognized config version ‘"+cv1.Version+"’.")
|
||||
}
|
||||
|
||||
// Version '2' to '3' config migration adds new fields and re-orders
|
||||
@ -61,7 +61,7 @@ func migrateV2ToV3() {
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
fatalIf(err, "Unable to load config version ‘2’.", nil)
|
||||
fatalIf(err, "Unable to load config version ‘2’.")
|
||||
if cv2.Version != "2" {
|
||||
return
|
||||
}
|
||||
@ -98,14 +98,14 @@ func migrateV2ToV3() {
|
||||
srvConfig.Logger.Syslog = slogger
|
||||
|
||||
qc, err := quick.New(srvConfig)
|
||||
fatalIf(err, "Unable to initialize config.", nil)
|
||||
fatalIf(err, "Unable to initialize config.")
|
||||
|
||||
configFile, err := getConfigFile()
|
||||
fatalIf(err, "Unable to get config file.", nil)
|
||||
fatalIf(err, "Unable to get config file.")
|
||||
|
||||
// Migrate the config.
|
||||
err = qc.Save(configFile)
|
||||
fatalIf(err, "Migrating from version ‘"+cv2.Version+"’ to ‘"+srvConfig.Version+"’ failed.", nil)
|
||||
fatalIf(err, "Failed to migrate config from ‘"+cv2.Version+"’ to ‘"+srvConfig.Version+"’ failed.")
|
||||
|
||||
console.Println("Migration from version ‘" + cv2.Version + "’ to ‘" + srvConfig.Version + "’ completed successfully.")
|
||||
}
|
||||
@ -118,7 +118,7 @@ func migrateV3ToV4() {
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
fatalIf(err, "Unable to load config version ‘3’.", nil)
|
||||
fatalIf(err, "Unable to load config version ‘3’.")
|
||||
if cv3.Version != "3" {
|
||||
return
|
||||
}
|
||||
@ -137,12 +137,12 @@ func migrateV3ToV4() {
|
||||
srvConfig.Logger.Syslog = cv3.Logger.Syslog
|
||||
|
||||
qc, err := quick.New(srvConfig)
|
||||
fatalIf(err, "Unable to initialize the quick config.", nil)
|
||||
fatalIf(err, "Unable to initialize the quick config.")
|
||||
configFile, err := getConfigFile()
|
||||
fatalIf(err, "Unable to get config file.", nil)
|
||||
fatalIf(err, "Unable to get config file.")
|
||||
|
||||
err = qc.Save(configFile)
|
||||
fatalIf(err, "Migrating from version ‘"+cv3.Version+"’ to ‘"+srvConfig.Version+"’ failed.", nil)
|
||||
fatalIf(err, "Failed to migrate config from ‘"+cv3.Version+"’ to ‘"+srvConfig.Version+"’ failed.")
|
||||
|
||||
console.Println("Migration from version ‘" + cv3.Version + "’ to ‘" + srvConfig.Version + "’ completed successfully.")
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ func getConfigPath() (string, error) {
|
||||
// mustGetConfigPath must get server config path.
|
||||
func mustGetConfigPath() string {
|
||||
configPath, err := getConfigPath()
|
||||
fatalIf(err, "Unable to get config path.", nil)
|
||||
fatalIf(err, "Unable to get config path.")
|
||||
return configPath
|
||||
}
|
||||
|
||||
@ -73,7 +73,7 @@ func isConfigFileExists() bool {
|
||||
// mustGetConfigFile must get server config file.
|
||||
func mustGetConfigFile() string {
|
||||
configFile, err := getConfigFile()
|
||||
fatalIf(err, "Unable to get config file.", nil)
|
||||
fatalIf(err, "Unable to get config file.")
|
||||
|
||||
return configFile
|
||||
}
|
||||
|
@ -1,11 +1,9 @@
|
||||
### Logging.
|
||||
|
||||
- `log.Fatalf`
|
||||
- `log.Errorf`
|
||||
- `log.Warnf`
|
||||
- `log.Infof`
|
||||
- `fatalIf` - wrapper function which takes error and prints jsonic error messages.
|
||||
- `errorIf` - similar to fatalIf but doesn't exit on err != nil.
|
||||
|
||||
Logging is enabled across the codebase. There are three types of logging supported.
|
||||
Supported logging targets.
|
||||
|
||||
- console
|
||||
- file
|
||||
@ -20,11 +18,11 @@ Sample logger section from `~/.minio/config.json`
|
||||
"file": {
|
||||
"enable": false,
|
||||
"fileName": "",
|
||||
"level": "trace"
|
||||
"level": "error"
|
||||
},
|
||||
"syslog": {
|
||||
"enable": false,
|
||||
"address": "",
|
||||
"level": "info"
|
||||
"level": "error"
|
||||
}
|
||||
```
|
||||
|
27
globals.go
27
globals.go
@ -16,11 +16,7 @@
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/fatih/color"
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/mc/pkg/console"
|
||||
)
|
||||
import "github.com/fatih/color"
|
||||
|
||||
// Global constants for Minio.
|
||||
const (
|
||||
@ -41,7 +37,7 @@ const (
|
||||
|
||||
var (
|
||||
globalQuiet = false // Quiet flag set via command line
|
||||
globalDebug = false // Debug flag set via command line
|
||||
globalTrace = false // Trace flag set via environment setting.
|
||||
// Add new global flags here.
|
||||
)
|
||||
|
||||
@ -51,22 +47,3 @@ var (
|
||||
colorWhite = color.New(color.FgWhite, color.Bold).SprintfFunc()
|
||||
colorGreen = color.New(color.FgGreen, color.Bold).SprintfFunc()
|
||||
)
|
||||
|
||||
// Set global states. NOTE: It is deliberately kept monolithic to
|
||||
// ensure we dont miss out any flags.
|
||||
func setGlobals(quiet, debug bool) {
|
||||
globalQuiet = quiet
|
||||
globalDebug = debug
|
||||
// Enable debug messages if requested.
|
||||
if globalDebug {
|
||||
console.DebugPrint = true
|
||||
}
|
||||
}
|
||||
|
||||
// Set global states. NOTE: It is deliberately kept monolithic to
|
||||
// ensure we dont miss out any flags.
|
||||
func setGlobalsFromContext(ctx *cli.Context) {
|
||||
quiet := ctx.Bool("quiet") || ctx.GlobalBool("quiet")
|
||||
debug := ctx.Bool("debug") || ctx.GlobalBool("debug")
|
||||
setGlobals(quiet, debug)
|
||||
}
|
||||
|
@ -36,10 +36,11 @@ func enableConsoleLogger() {
|
||||
log.Out = ioutil.Discard
|
||||
return
|
||||
}
|
||||
|
||||
// log.Out and log.Formatter use the default versions.
|
||||
// Only set specific log level.
|
||||
lvl, err := logrus.ParseLevel(clogger.Level)
|
||||
fatalIf(err, "Unknown log level detected, please fix your console logger configuration.", nil)
|
||||
fatalIf(err, "Unknown log level found in the config file.")
|
||||
|
||||
log.Level = lvl
|
||||
}
|
||||
|
@ -40,13 +40,13 @@ func enableFileLogger() {
|
||||
}
|
||||
|
||||
file, err := os.OpenFile(flogger.Filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
|
||||
fatalIf(err, "Unable to open log file.", nil)
|
||||
fatalIf(err, "Unable to open log file.")
|
||||
|
||||
// Add a local file hook.
|
||||
log.Hooks.Add(&localFile{file})
|
||||
|
||||
lvl, err := logrus.ParseLevel(flogger.Level)
|
||||
fatalIf(err, "Unknown log level detected, please fix your console logger configuration.", nil)
|
||||
fatalIf(err, "Unknown log level found in the config file.")
|
||||
|
||||
// Set default JSON formatter.
|
||||
log.Formatter = new(logrus.JSONFormatter)
|
||||
@ -64,14 +64,11 @@ func (l *localFile) Fire(entry *logrus.Entry) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Levels -
|
||||
// Levels - indicate log levels supported.
|
||||
func (l *localFile) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.PanicLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.ErrorLevel,
|
||||
logrus.WarnLevel,
|
||||
logrus.InfoLevel,
|
||||
logrus.DebugLevel,
|
||||
}
|
||||
}
|
||||
|
@ -41,11 +41,11 @@ type syslogHook struct {
|
||||
// enableSyslogLogger - enable logger at raddr.
|
||||
func enableSyslogLogger(raddr string) {
|
||||
syslogHook, err := newSyslog("udp", raddr, syslog.LOG_ERR, "MINIO")
|
||||
fatalIf(err, "Unable to instantiate syslog.", nil)
|
||||
fatalIf(err, "Unable to initialize syslog logger.")
|
||||
|
||||
log.Hooks.Add(syslogHook) // Add syslog hook.
|
||||
log.Formatter = &logrus.JSONFormatter{} // JSON formatted log.
|
||||
log.Level = logrus.InfoLevel // Minimum log level.
|
||||
log.Level = logrus.ErrorLevel // Minimum log level.
|
||||
}
|
||||
|
||||
// newSyslog - Creates a hook to be added to an instance of logger.
|
||||
@ -67,12 +67,6 @@ func (hook *syslogHook) Fire(entry *logrus.Entry) error {
|
||||
return hook.writer.Crit(line)
|
||||
case logrus.ErrorLevel:
|
||||
return hook.writer.Err(line)
|
||||
case logrus.WarnLevel:
|
||||
return hook.writer.Warning(line)
|
||||
case logrus.InfoLevel:
|
||||
return hook.writer.Info(line)
|
||||
case logrus.DebugLevel:
|
||||
return hook.writer.Debug(line)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@ -84,8 +78,5 @@ func (hook *syslogHook) Levels() []logrus.Level {
|
||||
logrus.PanicLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.ErrorLevel,
|
||||
logrus.WarnLevel,
|
||||
logrus.InfoLevel,
|
||||
logrus.DebugLevel,
|
||||
}
|
||||
}
|
||||
|
@ -26,5 +26,5 @@ type syslogLogger struct {
|
||||
|
||||
// enableSyslogLogger - unsupported on windows.
|
||||
func enableSyslogLogger(raddr string) {
|
||||
fatalIf(errSyslogNotSupported, "Unable to enable syslog.", nil)
|
||||
fatalIf(errSyslogNotSupported, "Unable to enable syslog.")
|
||||
}
|
||||
|
43
logger.go
43
logger.go
@ -18,8 +18,10 @@ package main
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
)
|
||||
|
||||
type fields map[string]interface{}
|
||||
@ -41,38 +43,37 @@ type logger struct {
|
||||
}
|
||||
|
||||
// errorIf synonymous with fatalIf but doesn't exit on error != nil
|
||||
func errorIf(err error, msg string, fields logrus.Fields) {
|
||||
func errorIf(err error, msg string, data ...interface{}) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
if fields == nil {
|
||||
fields = make(logrus.Fields)
|
||||
sysInfo := probe.GetSysInfo()
|
||||
fields := logrus.Fields{
|
||||
"cause": err.Error(),
|
||||
"type": reflect.TypeOf(err),
|
||||
"sysInfo": sysInfo,
|
||||
}
|
||||
fields["Error"] = struct {
|
||||
Cause string `json:"cause,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
}{
|
||||
err.Error(),
|
||||
reflect.TypeOf(err).String(),
|
||||
if globalTrace {
|
||||
stack := debug.Stack()
|
||||
fields["stack"] = string(stack)
|
||||
}
|
||||
log.WithFields(fields).Error(msg)
|
||||
log.WithFields(fields).Errorf(msg, data...)
|
||||
}
|
||||
|
||||
// fatalIf wrapper function which takes error and prints jsonic error messages.
|
||||
func fatalIf(err error, msg string, fields logrus.Fields) {
|
||||
func fatalIf(err error, msg string, data ...interface{}) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
if fields == nil {
|
||||
fields = make(logrus.Fields)
|
||||
sysInfo := probe.GetSysInfo()
|
||||
fields := logrus.Fields{
|
||||
"cause": err.Error(),
|
||||
"type": reflect.TypeOf(err),
|
||||
"sysInfo": sysInfo,
|
||||
}
|
||||
|
||||
fields["Error"] = struct {
|
||||
Cause string `json:"cause,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
}{
|
||||
err.Error(),
|
||||
reflect.TypeOf(err).String(),
|
||||
if globalTrace {
|
||||
stack := debug.Stack()
|
||||
fields["stack"] = string(stack)
|
||||
}
|
||||
log.WithFields(fields).Fatal(msg)
|
||||
log.WithFields(fields).Fatalf(msg, data...)
|
||||
}
|
||||
|
@ -36,12 +36,12 @@ func (s *LoggerSuite) TestLogger(c *C) {
|
||||
log.Out = &buffer
|
||||
log.Formatter = new(logrus.JSONFormatter)
|
||||
|
||||
errorIf(errors.New("Fake error"), "Failed with error.", nil)
|
||||
errorIf(errors.New("Fake error"), "Failed with error.")
|
||||
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(fields["level"], Equals, "error")
|
||||
|
||||
msg, ok := fields["Error"]
|
||||
msg, ok := fields["cause"]
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Assert(msg.(map[string]interface{})["cause"], Equals, "Fake error")
|
||||
c.Assert(msg, Equals, "Fake error")
|
||||
}
|
||||
|
57
main.go
57
main.go
@ -20,15 +20,11 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/mc/pkg/console"
|
||||
"github.com/minio/minio/pkg/probe"
|
||||
"github.com/olekukonko/ts"
|
||||
"github.com/pkg/profile"
|
||||
)
|
||||
|
||||
@ -60,10 +56,7 @@ FLAGS:
|
||||
{{end}}{{end}}
|
||||
VERSION:
|
||||
` + minioVersion +
|
||||
`{{ "\n"}}{{range $key, $value := ExtraInfo}}
|
||||
{{$key}}:
|
||||
{{$value}}
|
||||
{{end}}`
|
||||
`{{ "\n"}}`
|
||||
|
||||
// init - check the environment before main starts
|
||||
func init() {
|
||||
@ -92,32 +85,6 @@ func enableLoggers() {
|
||||
// Add your logger here.
|
||||
}
|
||||
|
||||
// Tries to get os/arch/platform specific information
|
||||
// Returns a map of current os/arch/platform/memstats
|
||||
func getSystemData() map[string]string {
|
||||
host, err := os.Hostname()
|
||||
if err != nil {
|
||||
host = ""
|
||||
}
|
||||
memstats := &runtime.MemStats{}
|
||||
runtime.ReadMemStats(memstats)
|
||||
mem := fmt.Sprintf("Used: %s | Allocated: %s | Used-Heap: %s | Allocated-Heap: %s",
|
||||
humanize.Bytes(memstats.Alloc),
|
||||
humanize.Bytes(memstats.TotalAlloc),
|
||||
humanize.Bytes(memstats.HeapAlloc),
|
||||
humanize.Bytes(memstats.HeapSys))
|
||||
platform := fmt.Sprintf("Host: %s | OS: %s | Arch: %s",
|
||||
host,
|
||||
runtime.GOOS,
|
||||
runtime.GOARCH)
|
||||
goruntime := fmt.Sprintf("Version: %s | CPUs: %s", runtime.Version(), strconv.Itoa(runtime.NumCPU()))
|
||||
return map[string]string{
|
||||
"PLATFORM": platform,
|
||||
"RUNTIME": goruntime,
|
||||
"MEM": mem,
|
||||
}
|
||||
}
|
||||
|
||||
func findClosestCommands(command string) []string {
|
||||
var closestCommands []string
|
||||
for _, value := range commandsTree.PrefixMatch(command) {
|
||||
@ -195,15 +162,16 @@ func main() {
|
||||
defer profile.Start(profile.BlockProfile, profile.ProfilePath(mustGetProfilePath())).Stop()
|
||||
}
|
||||
|
||||
// Set global trace flag.
|
||||
trace := os.Getenv("MINIO_TRACE")
|
||||
globalTrace = trace == "1"
|
||||
|
||||
probe.Init() // Set project's root source path.
|
||||
probe.SetAppInfo("Release-Tag", minioReleaseTag)
|
||||
probe.SetAppInfo("Commit-ID", minioShortCommitID)
|
||||
|
||||
app := registerApp()
|
||||
app.Before = func(c *cli.Context) error {
|
||||
// Set global flags.
|
||||
setGlobalsFromContext(c)
|
||||
|
||||
// Sets new config folder.
|
||||
setGlobalConfigPath(c.GlobalString("config-dir"))
|
||||
|
||||
@ -215,7 +183,7 @@ func main() {
|
||||
|
||||
// Initialize config.
|
||||
err := initConfig()
|
||||
fatalIf(err, "Unable to initialize minio config.", nil)
|
||||
fatalIf(err, "Unable to initialize minio config.")
|
||||
|
||||
// Enable all loggers by now.
|
||||
enableLoggers()
|
||||
@ -223,6 +191,9 @@ func main() {
|
||||
// Initialize name space lock.
|
||||
initNSLock()
|
||||
|
||||
// Set global quiet flag.
|
||||
globalQuiet = c.Bool("quiet") || c.GlobalBool("quiet")
|
||||
|
||||
// Do not print update messages, if quiet flag is set.
|
||||
if !globalQuiet {
|
||||
// Do not print any errors in release update function.
|
||||
@ -236,16 +207,6 @@ func main() {
|
||||
// Return here.
|
||||
return nil
|
||||
}
|
||||
app.ExtraInfo = func() map[string]string {
|
||||
if _, e := ts.GetSize(); e != nil {
|
||||
globalQuiet = true
|
||||
}
|
||||
// Enable if debug is enabled.
|
||||
if globalDebug {
|
||||
return getSystemData()
|
||||
}
|
||||
return make(map[string]string)
|
||||
}
|
||||
// Run the app - exit on error.
|
||||
app.RunAndExitOnError()
|
||||
}
|
||||
|
@ -17,9 +17,8 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// nsParam - carries name space resource.
|
||||
@ -91,10 +90,7 @@ func (n *nsLockMap) unlock(volume, path string, readLock bool) {
|
||||
nsLk.Unlock()
|
||||
}
|
||||
if nsLk.ref == 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Error("ref count in NS lock can not be 0.")
|
||||
errorIf(errors.New("Namespace reference count cannot be 0."), "Invalid reference count detected.")
|
||||
}
|
||||
if nsLk.ref != 0 {
|
||||
nsLk.ref--
|
||||
|
@ -27,7 +27,6 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
)
|
||||
|
||||
@ -306,12 +305,6 @@ func listMetaBucketMultipartFiles(layer ObjectLayer, prefixPath string, markerPa
|
||||
}
|
||||
// For any walk error return right away.
|
||||
if walkResult.err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"bucket": minioMetaBucket,
|
||||
"prefix": prefixPath,
|
||||
"marker": markerPath,
|
||||
"recursive": recursive,
|
||||
}).Debugf("Walk resulted in an error %s", walkResult.err)
|
||||
// File not found or Disk not found is a valid case.
|
||||
if walkResult.err == errFileNotFound || walkResult.err == errDiskNotFound {
|
||||
return nil, true, nil
|
||||
@ -326,9 +319,6 @@ func listMetaBucketMultipartFiles(layer ObjectLayer, prefixPath string, markerPa
|
||||
// entries are empty.
|
||||
entries, err = listLeafEntries(storage, fi.Name)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"prefixPath": fi.Name,
|
||||
}).Errorf("%s", err)
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
@ -442,12 +432,6 @@ func listMultipartUploadsCommon(layer ObjectLayer, bucket, prefix, keyMarker, up
|
||||
// List all the multipart files at prefixPath, starting with marker keyMarkerPath.
|
||||
fileInfos, eof, err := listMetaBucketMultipartFiles(layer, multipartPrefixPath, multipartMarkerPath, recursive, maxUploads)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"prefixPath": multipartPrefixPath,
|
||||
"markerPath": multipartMarkerPath,
|
||||
"recursive": recursive,
|
||||
"maxUploads": maxUploads,
|
||||
}).Errorf("listMetaBucketMultipartFiles failed with %s", err)
|
||||
return ListMultipartsInfo{}, err
|
||||
}
|
||||
|
||||
@ -552,9 +536,8 @@ func isUploadIDExists(storage StorageAPI, bucket, object, uploadID string) bool
|
||||
if err == errFileNotFound {
|
||||
return false
|
||||
}
|
||||
log.Errorf("StatFile failed wtih %s", err)
|
||||
errorIf(err, "Stat failed on "+minioMetaBucket+"/"+uploadIDPath+".")
|
||||
return false
|
||||
}
|
||||
log.Debugf("FileInfo: %v", st)
|
||||
return st.Mode.IsRegular()
|
||||
}
|
||||
|
@ -19,8 +19,6 @@ package main
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Common initialization needed for both object layers.
|
||||
@ -198,7 +196,6 @@ func listObjectsCommon(layer ObjectLayer, bucket, prefix, marker, delimiter stri
|
||||
var fileInfos []FileInfo
|
||||
var eof bool
|
||||
var nextMarker string
|
||||
log.Debugf("Reading from the tree walk channel has begun.")
|
||||
for i := 0; i < maxKeys; {
|
||||
walkResult, ok := <-walker.ch
|
||||
if !ok {
|
||||
@ -208,12 +205,6 @@ func listObjectsCommon(layer ObjectLayer, bucket, prefix, marker, delimiter stri
|
||||
}
|
||||
// For any walk error return right away.
|
||||
if walkResult.err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"bucket": bucket,
|
||||
"prefix": prefix,
|
||||
"marker": marker,
|
||||
"recursive": recursive,
|
||||
}).Debugf("Walk resulted in an error %s", walkResult.err)
|
||||
// File not found is a valid case.
|
||||
if walkResult.err == errFileNotFound {
|
||||
return ListObjectsInfo{}, nil
|
||||
@ -230,12 +221,6 @@ func listObjectsCommon(layer ObjectLayer, bucket, prefix, marker, delimiter stri
|
||||
i++
|
||||
}
|
||||
params := listParams{bucket, recursive, nextMarker, prefix}
|
||||
log.WithFields(logrus.Fields{
|
||||
"bucket": params.bucket,
|
||||
"recursive": params.recursive,
|
||||
"marker": params.marker,
|
||||
"prefix": params.prefix,
|
||||
}).Debugf("Save the tree walk into map for subsequent requests.")
|
||||
if !eof {
|
||||
saveTreeWalk(layer, params, walker)
|
||||
}
|
||||
@ -268,7 +253,7 @@ func isBucketExist(storage StorageAPI, bucketName string) bool {
|
||||
if err == errVolumeNotFound {
|
||||
return false
|
||||
}
|
||||
log.Errorf("StatVol failed with %s", err)
|
||||
errorIf(err, "Stat failed on bucket "+bucketName+".")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
@ -98,7 +98,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
// Fetch object stat info.
|
||||
objInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object)
|
||||
if err != nil {
|
||||
errorIf(err, "GetObjectInfo failed.", nil)
|
||||
errorIf(err, "Unable to fetch object info.")
|
||||
apiErr := toAPIErrorCode(err)
|
||||
if apiErr == ErrNoSuchKey {
|
||||
apiErr = errAllowableObjectNotFound(bucket, r)
|
||||
@ -128,7 +128,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
startOffset := hrange.start
|
||||
readCloser, err := api.ObjectAPI.GetObject(bucket, object, startOffset)
|
||||
if err != nil {
|
||||
errorIf(err, "GetObject failed.", nil)
|
||||
errorIf(err, "Unable to read object.")
|
||||
apiErr := toAPIErrorCode(err)
|
||||
if apiErr == ErrNoSuchKey {
|
||||
apiErr = errAllowableObjectNotFound(bucket, r)
|
||||
@ -146,13 +146,13 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
|
||||
if hrange.length > 0 {
|
||||
if _, err := io.CopyN(w, readCloser, hrange.length); err != nil {
|
||||
errorIf(err, "Writing to client failed", nil)
|
||||
errorIf(err, "Writing to client failed.")
|
||||
// Do not send error response here, since client could have died.
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if _, err := io.Copy(w, readCloser); err != nil {
|
||||
errorIf(err, "Writing to client failed", nil)
|
||||
errorIf(err, "Writing to client failed.")
|
||||
// Do not send error response here, since client could have died.
|
||||
return
|
||||
}
|
||||
@ -282,7 +282,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
|
||||
objInfo, err := api.ObjectAPI.GetObjectInfo(bucket, object)
|
||||
if err != nil {
|
||||
errorIf(err, "GetObjectInfo failed.", nil)
|
||||
errorIf(err, "Unable to fetch object info.")
|
||||
apiErr := toAPIErrorCode(err)
|
||||
if apiErr == ErrNoSuchKey {
|
||||
apiErr = errAllowableObjectNotFound(bucket, r)
|
||||
@ -368,7 +368,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
|
||||
objInfo, err := api.ObjectAPI.GetObjectInfo(sourceBucket, sourceObject)
|
||||
if err != nil {
|
||||
errorIf(err, "GetObjectInfo failed.", nil)
|
||||
errorIf(err, "Unable to fetch object info.")
|
||||
writeErrorResponse(w, r, toAPIErrorCode(err), objectSource)
|
||||
return
|
||||
}
|
||||
@ -397,7 +397,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
// Get the object.
|
||||
readCloser, err := api.ObjectAPI.GetObject(sourceBucket, sourceObject, startOffset)
|
||||
if err != nil {
|
||||
errorIf(err, "Reading "+objectSource+" failed.", nil)
|
||||
errorIf(err, "Unable to read an object.")
|
||||
writeErrorResponse(w, r, toAPIErrorCode(err), objectSource)
|
||||
return
|
||||
}
|
||||
@ -407,14 +407,14 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
// Create the object.
|
||||
md5Sum, err := api.ObjectAPI.PutObject(bucket, object, size, readCloser, nil)
|
||||
if err != nil {
|
||||
errorIf(err, "PutObject failed.", nil)
|
||||
errorIf(err, "Unable to create an object.")
|
||||
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
||||
objInfo, err = api.ObjectAPI.GetObjectInfo(bucket, object)
|
||||
if err != nil {
|
||||
errorIf(err, "GetObjectInfo failed.", nil)
|
||||
errorIf(err, "Unable to fetch object info.")
|
||||
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
|
||||
return
|
||||
}
|
||||
@ -538,7 +538,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
// Get Content-Md5 sent by client and verify if valid
|
||||
md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5"))
|
||||
if err != nil {
|
||||
errorIf(err, "Decoding md5 failed.", nil)
|
||||
errorIf(err, "Unable to validate content-md5 format.")
|
||||
writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path)
|
||||
return
|
||||
}
|
||||
@ -583,7 +583,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
if wErr == io.ErrClosedPipe {
|
||||
return
|
||||
}
|
||||
errorIf(wErr, "Unable to read HTTP body.", nil)
|
||||
errorIf(wErr, "Unable to read from HTTP body.")
|
||||
writer.CloseWithError(wErr)
|
||||
return
|
||||
}
|
||||
@ -620,7 +620,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
wg.Wait()
|
||||
}
|
||||
if err != nil {
|
||||
errorIf(err, "PutObject failed.", nil)
|
||||
errorIf(err, "Unable to create an object.")
|
||||
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
|
||||
return
|
||||
}
|
||||
@ -659,7 +659,7 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
||||
|
||||
uploadID, err := api.ObjectAPI.NewMultipartUpload(bucket, object)
|
||||
if err != nil {
|
||||
errorIf(err, "NewMultipartUpload failed.", nil)
|
||||
errorIf(err, "Unable to initiate new multipart upload id.")
|
||||
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
|
||||
return
|
||||
}
|
||||
@ -681,7 +681,6 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
// get Content-Md5 sent by client and verify if valid
|
||||
md5Bytes, err := checkValidMD5(r.Header.Get("Content-Md5"))
|
||||
if err != nil {
|
||||
errorIf(err, "Decoding md5 failed.", nil)
|
||||
writeErrorResponse(w, r, ErrInvalidDigest, r.URL.Path)
|
||||
return
|
||||
}
|
||||
@ -739,7 +738,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
if wErr == io.ErrClosedPipe {
|
||||
return
|
||||
}
|
||||
errorIf(wErr, "Unable to read HTTP body.", nil)
|
||||
errorIf(wErr, "Unable to read from HTTP request body.")
|
||||
writer.CloseWithError(wErr)
|
||||
return
|
||||
}
|
||||
@ -771,7 +770,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
wg.Wait()
|
||||
}
|
||||
if err != nil {
|
||||
errorIf(err, "PutObjectPart failed.", nil)
|
||||
errorIf(err, "Unable to create object part.")
|
||||
// Verify if the underlying error is signature mismatch.
|
||||
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
|
||||
return
|
||||
@ -808,7 +807,7 @@ func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter,
|
||||
|
||||
uploadID, _, _, _ := getObjectResources(r.URL.Query())
|
||||
if err := api.ObjectAPI.AbortMultipartUpload(bucket, object, uploadID); err != nil {
|
||||
errorIf(err, "AbortMutlipartUpload failed.", nil)
|
||||
errorIf(err, "Unable to abort multipart upload.")
|
||||
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
|
||||
return
|
||||
}
|
||||
@ -854,7 +853,7 @@ func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *ht
|
||||
|
||||
listPartsInfo, err := api.ObjectAPI.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
|
||||
if err != nil {
|
||||
errorIf(err, "ListObjectParts failed.", nil)
|
||||
errorIf(err, "Unable to list uploaded parts.")
|
||||
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
|
||||
return
|
||||
}
|
||||
@ -896,13 +895,13 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
||||
}
|
||||
completeMultipartBytes, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
errorIf(err, "CompleteMultipartUpload failed.", nil)
|
||||
errorIf(err, "Unable to complete multipart upload.")
|
||||
writeErrorResponse(w, r, ErrInternalError, r.URL.Path)
|
||||
return
|
||||
}
|
||||
complMultipartUpload := &completeMultipartUpload{}
|
||||
if err = xml.Unmarshal(completeMultipartBytes, complMultipartUpload); err != nil {
|
||||
errorIf(err, "XML Unmarshal failed", nil)
|
||||
errorIf(err, "Unable to parse complete multipart upload XML.")
|
||||
writeErrorResponse(w, r, ErrMalformedXML, r.URL.Path)
|
||||
return
|
||||
}
|
||||
@ -920,7 +919,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
||||
// Complete multipart upload.
|
||||
md5Sum, err = api.ObjectAPI.CompleteMultipartUpload(bucket, object, uploadID, completeParts)
|
||||
if err != nil {
|
||||
errorIf(err, "CompleteMultipartUpload failed.", nil)
|
||||
errorIf(err, "Unable to complete multipart upload.")
|
||||
writeErrorResponse(w, r, toAPIErrorCode(err), r.URL.Path)
|
||||
return
|
||||
}
|
||||
|
@ -25,8 +25,6 @@ import (
|
||||
"strings"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -114,10 +112,6 @@ func readDir(dirPath string) (entries []string, err error) {
|
||||
buf := make([]byte, readDirentBufSize)
|
||||
d, err := os.Open(dirPath)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"dirPath": dirPath,
|
||||
}).Debugf("Open failed with %s", err)
|
||||
|
||||
// File is really not found.
|
||||
if os.IsNotExist(err) {
|
||||
return nil, errFileNotFound
|
||||
|
@ -22,18 +22,12 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Return all the entries at the directory dirPath.
|
||||
func readDir(dirPath string) (entries []string, err error) {
|
||||
d, err := os.Open(dirPath)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"dirPath": dirPath,
|
||||
}).Debugf("Open failed with %s", err)
|
||||
|
||||
// File is really not found.
|
||||
if os.IsNotExist(err) {
|
||||
return nil, errFileNotFound
|
||||
|
151
posix.go
151
posix.go
@ -24,7 +24,6 @@ import (
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/safe"
|
||||
)
|
||||
@ -68,7 +67,7 @@ func checkPathLength(pathName string) error {
|
||||
func isDirEmpty(dirname string) bool {
|
||||
f, err := os.Open(dirname)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to access directory %s, failed with %s", dirname, err)
|
||||
errorIf(err, "Unable to access directory.")
|
||||
return false
|
||||
}
|
||||
defer f.Close()
|
||||
@ -76,11 +75,10 @@ func isDirEmpty(dirname string) bool {
|
||||
_, err = f.Readdirnames(1)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
// Returns true if we have reached EOF, directory is
|
||||
// indeed empty.
|
||||
// Returns true if we have reached EOF, directory is indeed empty.
|
||||
return true
|
||||
}
|
||||
log.Errorf("Unable to list directory %s, failed with %s", dirname, err)
|
||||
errorIf(err, "Unable to list directory.")
|
||||
return false
|
||||
}
|
||||
// Directory is not empty.
|
||||
@ -90,7 +88,6 @@ func isDirEmpty(dirname string) bool {
|
||||
// Initialize a new storage disk.
|
||||
func newPosix(diskPath string) (StorageAPI, error) {
|
||||
if diskPath == "" {
|
||||
log.Error("Disk cannot be empty")
|
||||
return nil, errInvalidArgument
|
||||
}
|
||||
fs := fsStorage{
|
||||
@ -99,24 +96,14 @@ func newPosix(diskPath string) (StorageAPI, error) {
|
||||
}
|
||||
st, err := os.Stat(diskPath)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": diskPath,
|
||||
}).Debugf("Stat failed, with error %s.", err)
|
||||
if os.IsNotExist(err) {
|
||||
return fs, errDiskNotFound
|
||||
}
|
||||
return fs, err
|
||||
}
|
||||
if !st.IsDir() {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": diskPath,
|
||||
}).Debugf("Disk %s.", syscall.ENOTDIR)
|
||||
return fs, syscall.ENOTDIR
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": diskPath,
|
||||
"minFreeDisk": fsMinSpacePercent,
|
||||
}).Debugf("Successfully configured FS storage API.")
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
@ -127,9 +114,6 @@ func checkDiskFree(diskPath string, minFreeDisk int64) (err error) {
|
||||
}
|
||||
di, err := disk.GetInfo(diskPath)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": diskPath,
|
||||
}).Debugf("Failed to get disk info, %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -137,10 +121,6 @@ func checkDiskFree(diskPath string, minFreeDisk int64) (err error) {
|
||||
// space used for journalling, inodes etc.
|
||||
availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100
|
||||
if int64(availableDiskSpace) <= minFreeDisk {
|
||||
log.WithFields(logrus.Fields{
|
||||
"availableDiskSpace": int64(availableDiskSpace),
|
||||
"minFreeDiskSpace": minFreeDisk,
|
||||
}).Debugf("Disk free space has reached its limit.")
|
||||
return errDiskFull
|
||||
}
|
||||
|
||||
@ -175,9 +155,6 @@ func getAllUniqueVols(dirPath string) ([]VolInfo, error) {
|
||||
}
|
||||
entries, err := readDir(dirPath)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"dirPath": dirPath,
|
||||
}).Debugf("readDir failed with error %s", err)
|
||||
return nil, errDiskNotFound
|
||||
}
|
||||
var volsInfo []VolInfo
|
||||
@ -189,9 +166,6 @@ func getAllUniqueVols(dirPath string) ([]VolInfo, error) {
|
||||
var fi os.FileInfo
|
||||
fi, err = os.Stat(pathJoin(dirPath, entry))
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"path": pathJoin(dirPath, entry),
|
||||
}).Debugf("Stat failed with error %s", err)
|
||||
// If the file does not exist, skip the entry.
|
||||
if os.IsNotExist(err) {
|
||||
continue
|
||||
@ -241,14 +215,8 @@ func (s fsStorage) getVolumeDir(volume string) (string, error) {
|
||||
}
|
||||
return volumeDir, errVolumeNotFound
|
||||
} else if os.IsPermission(err) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
}).Debugf("Stat failed with error %s", err)
|
||||
return volumeDir, errVolumeAccessDenied
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
}).Debugf("Stat failed with error %s", err)
|
||||
return volumeDir, err
|
||||
}
|
||||
|
||||
@ -271,11 +239,6 @@ func (s fsStorage) MakeVol(volume string) (err error) {
|
||||
return os.Mkdir(volumeDir, 0700)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
"volume": volume,
|
||||
}).Debugf("MakeVol failed with %s", err)
|
||||
|
||||
// For all other errors return here.
|
||||
return err
|
||||
}
|
||||
@ -286,16 +249,10 @@ func (s fsStorage) ListVols() (volsInfo []VolInfo, err error) {
|
||||
var diskInfo disk.Info
|
||||
diskInfo, err = disk.GetInfo(s.diskPath)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
}).Debugf("Failed to get disk info, %s", err)
|
||||
return nil, err
|
||||
}
|
||||
volsInfo, err = getAllUniqueVols(s.diskPath)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
}).Debugf("getAllUniqueVols failed with %s", err)
|
||||
return nil, err
|
||||
}
|
||||
for i, vol := range volsInfo {
|
||||
@ -320,20 +277,12 @@ func (s fsStorage) StatVol(volume string) (volInfo VolInfo, err error) {
|
||||
// Verify if volume is valid and it exists.
|
||||
volumeDir, err := s.getVolumeDir(volume)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
"volume": volume,
|
||||
}).Debugf("getVolumeDir failed with %s", err)
|
||||
return VolInfo{}, err
|
||||
}
|
||||
// Stat a volume entry.
|
||||
var st os.FileInfo
|
||||
st, err = os.Stat(volumeDir)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
"volume": volume,
|
||||
}).Debugf("Stat on the volume failed with %s", err)
|
||||
if os.IsNotExist(err) {
|
||||
return VolInfo{}, errVolumeNotFound
|
||||
}
|
||||
@ -343,10 +292,6 @@ func (s fsStorage) StatVol(volume string) (volInfo VolInfo, err error) {
|
||||
var diskInfo disk.Info
|
||||
diskInfo, err = disk.GetInfo(s.diskPath)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
"volume": volume,
|
||||
}).Debugf("Failed to get disk info, %s", err)
|
||||
return VolInfo{}, err
|
||||
}
|
||||
// As os.Stat() doesn't carry other than ModTime(), use ModTime()
|
||||
@ -366,18 +311,10 @@ func (s fsStorage) DeleteVol(volume string) error {
|
||||
// Verify if volume is valid and it exists.
|
||||
volumeDir, err := s.getVolumeDir(volume)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
"volume": volume,
|
||||
}).Debugf("getVolumeDir failed with %s", err)
|
||||
return err
|
||||
}
|
||||
err = os.Remove(volumeDir)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
"volume": volume,
|
||||
}).Debugf("Volume remove failed with %s", err)
|
||||
if os.IsNotExist(err) {
|
||||
return errVolumeNotFound
|
||||
} else if strings.Contains(err.Error(), "directory is not empty") {
|
||||
@ -400,19 +337,11 @@ func (s fsStorage) ListDir(volume, dirPath string) ([]string, error) {
|
||||
// Verify if volume is valid and it exists.
|
||||
volumeDir, err := s.getVolumeDir(volume)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
"volume": volume,
|
||||
}).Debugf("getVolumeDir failed with %s", err)
|
||||
return nil, err
|
||||
}
|
||||
// Stat a volume entry.
|
||||
_, err = os.Stat(volumeDir)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
"volume": volume,
|
||||
}).Debugf("Stat on the volume failed with %s", err)
|
||||
if os.IsNotExist(err) {
|
||||
return nil, errVolumeNotFound
|
||||
}
|
||||
@ -425,10 +354,6 @@ func (s fsStorage) ListDir(volume, dirPath string) ([]string, error) {
|
||||
func (s fsStorage) ReadFile(volume string, path string, offset int64) (readCloser io.ReadCloser, err error) {
|
||||
volumeDir, err := s.getVolumeDir(volume)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
"volume": volume,
|
||||
}).Debugf("getVolumeDir failed with %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -443,36 +368,19 @@ func (s fsStorage) ReadFile(volume string, path string, offset int64) (readClose
|
||||
} else if os.IsPermission(err) {
|
||||
return nil, errFileAccessDenied
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
"filePath": filePath,
|
||||
}).Debugf("Opening a file failed with %s", err)
|
||||
return nil, err
|
||||
}
|
||||
st, err := file.Stat()
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
"filePath": filePath,
|
||||
}).Debugf("Stat failed with %s", err)
|
||||
return nil, err
|
||||
}
|
||||
// Verify if its not a regular file, since subsequent Seek is undefined.
|
||||
if !st.Mode().IsRegular() {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
"filePath": filePath,
|
||||
}).Debugf("Unexpected type %s", errIsNotRegular)
|
||||
return nil, errFileNotFound
|
||||
}
|
||||
// Seek to requested offset.
|
||||
_, err = file.Seek(offset, os.SEEK_SET)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
"filePath": filePath,
|
||||
"offset": offset,
|
||||
}).Debugf("Seek failed with %s", err)
|
||||
return nil, err
|
||||
}
|
||||
return file, nil
|
||||
@ -482,10 +390,6 @@ func (s fsStorage) ReadFile(volume string, path string, offset int64) (readClose
|
||||
func (s fsStorage) CreateFile(volume, path string) (writeCloser io.WriteCloser, err error) {
|
||||
volumeDir, err := s.getVolumeDir(volume)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
"volume": volume,
|
||||
}).Debugf("getVolumeDir failed with %s", err)
|
||||
return nil, err
|
||||
}
|
||||
if err = checkDiskFree(s.diskPath, s.minFreeDisk); err != nil {
|
||||
@ -499,10 +403,6 @@ func (s fsStorage) CreateFile(volume, path string) (writeCloser io.WriteCloser,
|
||||
var st os.FileInfo
|
||||
if st, err = os.Stat(filePath); err == nil {
|
||||
if st.IsDir() {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
"filePath": filePath,
|
||||
}).Debugf("Unexpected type %s", errIsNotRegular)
|
||||
return nil, errIsNotRegular
|
||||
}
|
||||
}
|
||||
@ -521,10 +421,6 @@ func (s fsStorage) CreateFile(volume, path string) (writeCloser io.WriteCloser,
|
||||
func (s fsStorage) StatFile(volume, path string) (file FileInfo, err error) {
|
||||
volumeDir, err := s.getVolumeDir(volume)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
"volume": volume,
|
||||
}).Debugf("getVolumeDir failed with %s", err)
|
||||
return FileInfo{}, err
|
||||
}
|
||||
|
||||
@ -534,11 +430,6 @@ func (s fsStorage) StatFile(volume, path string) (file FileInfo, err error) {
|
||||
}
|
||||
st, err := os.Stat(filePath)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
"filePath": filePath,
|
||||
}).Debugf("Stat failed with %s", err)
|
||||
|
||||
// File is really not found.
|
||||
if os.IsNotExist(err) {
|
||||
return FileInfo{}, errFileNotFound
|
||||
@ -555,10 +446,6 @@ func (s fsStorage) StatFile(volume, path string) (file FileInfo, err error) {
|
||||
|
||||
// If its a directory its not a regular file.
|
||||
if st.Mode().IsDir() {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
"filePath": filePath,
|
||||
}).Debugf("File is %s.", errIsNotRegular)
|
||||
return FileInfo{}, errFileNotFound
|
||||
}
|
||||
return FileInfo{
|
||||
@ -578,9 +465,6 @@ func deleteFile(basePath, deletePath string) error {
|
||||
// Verify if the path exists.
|
||||
pathSt, err := os.Stat(deletePath)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"deletePath": deletePath,
|
||||
}).Debugf("Stat failed with %s", err)
|
||||
if os.IsNotExist(err) {
|
||||
return errFileNotFound
|
||||
} else if os.IsPermission(err) {
|
||||
@ -594,17 +478,10 @@ func deleteFile(basePath, deletePath string) error {
|
||||
}
|
||||
// Attempt to remove path.
|
||||
if err := os.Remove(deletePath); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"deletePath": deletePath,
|
||||
}).Debugf("Remove failed with %s", err)
|
||||
return err
|
||||
}
|
||||
// Recursively go down the next path and delete again.
|
||||
if err := deleteFile(basePath, slashpath.Dir(deletePath)); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"basePath": basePath,
|
||||
"deleteDir": slashpath.Dir(deletePath),
|
||||
}).Debugf("deleteFile failed with %s", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -614,10 +491,6 @@ func deleteFile(basePath, deletePath string) error {
|
||||
func (s fsStorage) DeleteFile(volume, path string) error {
|
||||
volumeDir, err := s.getVolumeDir(volume)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
"volume": volume,
|
||||
}).Debugf("getVolumeDir failed with %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -636,39 +509,25 @@ func (s fsStorage) DeleteFile(volume, path string) error {
|
||||
func (s fsStorage) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) error {
|
||||
srcVolumeDir, err := s.getVolumeDir(srcVolume)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
"volume": srcVolume,
|
||||
}).Errorf("getVolumeDir failed with %s", err)
|
||||
return err
|
||||
}
|
||||
dstVolumeDir, err := s.getVolumeDir(dstVolume)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"diskPath": s.diskPath,
|
||||
"volume": dstVolume,
|
||||
}).Errorf("getVolumeDir failed with %s", err)
|
||||
return err
|
||||
}
|
||||
srcIsDir := strings.HasSuffix(srcPath, slashSeparator)
|
||||
dstIsDir := strings.HasSuffix(dstPath, slashSeparator)
|
||||
// for XL src and dst are always directories.
|
||||
// for FS src and dst are always files.
|
||||
// Either src and dst have to be directories or files, else return error.
|
||||
if !(srcIsDir && dstIsDir || !srcIsDir && !dstIsDir) {
|
||||
// Either src and dst have to be directories or files, else return error.
|
||||
log.Errorf("source and destination are not of same file type. source=%s, destination=%s", srcPath, dstPath)
|
||||
return errFileAccessDenied
|
||||
}
|
||||
if srcIsDir {
|
||||
// If source is a directory we expect the destination to be non-existent always.
|
||||
_, err = os.Stat(slashpath.Join(dstVolumeDir, dstPath))
|
||||
if err == nil {
|
||||
log.Errorf("Source is a directory and destination exists. source=%s, destination=%s", srcPath, dstPath)
|
||||
return errFileAccessDenied
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
// Return error for any error other than ENOENT.
|
||||
log.Errorf("Stat failed with %s", err)
|
||||
return err
|
||||
}
|
||||
// Destination does not exist, hence proceed with the rename.
|
||||
@ -678,7 +537,6 @@ func (s fsStorage) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) err
|
||||
if strings.Contains(err.Error(), "not a directory") {
|
||||
return errFileAccessDenied
|
||||
}
|
||||
log.Errorf("os.MkdirAll failed with %s", err)
|
||||
return err
|
||||
}
|
||||
err = os.Rename(slashpath.Join(srcVolumeDir, srcPath), slashpath.Join(dstVolumeDir, dstPath))
|
||||
@ -686,7 +544,6 @@ func (s fsStorage) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) err
|
||||
if os.IsNotExist(err) {
|
||||
return errFileNotFound
|
||||
}
|
||||
log.Errorf("os.Rename failed with %s", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -42,11 +42,11 @@ func newObjectLayer(exportPaths ...string) (ObjectLayer, error) {
|
||||
// configureServer handler returns final handler for the http server.
|
||||
func configureServerHandler(srvCmdConfig serverCmdConfig) http.Handler {
|
||||
objAPI, err := newObjectLayer(srvCmdConfig.exportPaths...)
|
||||
fatalIf(err, "Initializing object layer failed.", nil)
|
||||
fatalIf(err, "Unable to intialize object layer.")
|
||||
|
||||
// Initialize storage rpc server.
|
||||
storageRPC, err := newRPCServer(srvCmdConfig.exportPaths[0]) // FIXME: should only have one path.
|
||||
fatalIf(err, "Initializing storage rpc server failed.", nil)
|
||||
fatalIf(err, "Unable to initialize storage RPC server.")
|
||||
|
||||
// Initialize API.
|
||||
apiHandlers := objectAPIHandlers{
|
||||
|
@ -27,8 +27,6 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
type networkStorage struct {
|
||||
@ -80,9 +78,6 @@ func toStorageErr(err error) error {
|
||||
func newRPCClient(networkPath string) (StorageAPI, error) {
|
||||
// Input validation.
|
||||
if networkPath == "" || strings.LastIndex(networkPath, ":") == -1 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"networkPath": networkPath,
|
||||
}).Debugf("Network path is malformed, should be of form <ip>:<port>:<export_dir>")
|
||||
return nil, errInvalidArgument
|
||||
}
|
||||
|
||||
@ -92,10 +87,6 @@ func newRPCClient(networkPath string) (StorageAPI, error) {
|
||||
// Dial minio rpc storage http path.
|
||||
rpcClient, err := rpc.DialHTTPPath("tcp", netAddr, storageRPCPath)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"netAddr": netAddr,
|
||||
"storageRPCPath": storageRPCPath,
|
||||
}).Debugf("RPC HTTP dial failed with %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -125,9 +116,6 @@ func newRPCClient(networkPath string) (StorageAPI, error) {
|
||||
func (n networkStorage) MakeVol(volume string) error {
|
||||
reply := GenericReply{}
|
||||
if err := n.rpcClient.Call("Storage.MakeVolHandler", volume, &reply); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
}).Debugf("Storage.MakeVolHandler returned an error %s", err)
|
||||
return toStorageErr(err)
|
||||
}
|
||||
return nil
|
||||
@ -138,7 +126,6 @@ func (n networkStorage) ListVols() (vols []VolInfo, err error) {
|
||||
ListVols := ListVolsReply{}
|
||||
err = n.rpcClient.Call("Storage.ListVolsHandler", "", &ListVols)
|
||||
if err != nil {
|
||||
log.Debugf("Storage.ListVolsHandler returned an error %s", err)
|
||||
return nil, err
|
||||
}
|
||||
return ListVols.Vols, nil
|
||||
@ -147,9 +134,6 @@ func (n networkStorage) ListVols() (vols []VolInfo, err error) {
|
||||
// StatVol - get current Stat volume info.
|
||||
func (n networkStorage) StatVol(volume string) (volInfo VolInfo, err error) {
|
||||
if err = n.rpcClient.Call("Storage.StatVolHandler", volume, &volInfo); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
}).Debugf("Storage.StatVolHandler returned an error %s", err)
|
||||
return VolInfo{}, toStorageErr(err)
|
||||
}
|
||||
return volInfo, nil
|
||||
@ -159,9 +143,6 @@ func (n networkStorage) StatVol(volume string) (volInfo VolInfo, err error) {
|
||||
func (n networkStorage) DeleteVol(volume string) error {
|
||||
reply := GenericReply{}
|
||||
if err := n.rpcClient.Call("Storage.DeleteVolHandler", volume, &reply); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
}).Debugf("Storage.DeleteVolHandler returned an error %s", err)
|
||||
return toStorageErr(err)
|
||||
}
|
||||
return nil
|
||||
@ -181,10 +162,6 @@ func (n networkStorage) CreateFile(volume, path string) (writeCloser io.WriteClo
|
||||
go func() {
|
||||
resp, err := n.httpClient.Post(writeURL.String(), contentType, readCloser)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Debugf("CreateFile HTTP POST failed to upload data with error %s", err)
|
||||
readCloser.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
@ -210,10 +187,6 @@ func (n networkStorage) StatFile(volume, path string) (fileInfo FileInfo, err er
|
||||
Vol: volume,
|
||||
Path: path,
|
||||
}, &fileInfo); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Debugf("Storage.StatFileHandler failed with %s", err)
|
||||
return FileInfo{}, toStorageErr(err)
|
||||
}
|
||||
return fileInfo, nil
|
||||
@ -230,10 +203,6 @@ func (n networkStorage) ReadFile(volume string, path string, offset int64) (read
|
||||
readURL.RawQuery = readQuery.Encode()
|
||||
resp, err := n.httpClient.Get(readURL.String())
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Debugf("ReadFile http Get failed with error %s", err)
|
||||
return nil, err
|
||||
}
|
||||
if resp != nil {
|
||||
@ -253,10 +222,6 @@ func (n networkStorage) ListDir(volume, path string) (entries []string, err erro
|
||||
Vol: volume,
|
||||
Path: path,
|
||||
}, &entries); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Debugf("Storage.ListDirHandlers failed with %s", err)
|
||||
return nil, toStorageErr(err)
|
||||
}
|
||||
// Return successfully unmarshalled results.
|
||||
@ -270,10 +235,6 @@ func (n networkStorage) DeleteFile(volume, path string) (err error) {
|
||||
Vol: volume,
|
||||
Path: path,
|
||||
}, &reply); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Debugf("Storage.DeleteFileHandler failed with %s", err)
|
||||
return toStorageErr(err)
|
||||
}
|
||||
return nil
|
||||
@ -288,12 +249,6 @@ func (n networkStorage) RenameFile(srcVolume, srcPath, dstVolume, dstPath string
|
||||
DstVol: dstVolume,
|
||||
DstPath: dstPath,
|
||||
}, &reply); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"srcVolume": srcVolume,
|
||||
"srcPath": srcPath,
|
||||
"dstVolume": dstVolume,
|
||||
"dstPath": dstPath,
|
||||
}).Errorf("Storage.RenameFileHandler failed with %s", err)
|
||||
return toStorageErr(err)
|
||||
}
|
||||
return nil
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
"net/rpc"
|
||||
"strconv"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
router "github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
@ -22,9 +21,6 @@ type storageServer struct {
|
||||
func (s *storageServer) MakeVolHandler(arg *string, reply *GenericReply) error {
|
||||
err := s.storage.MakeVol(*arg)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": *arg,
|
||||
}).Debugf("MakeVol failed with error %s", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -34,7 +30,6 @@ func (s *storageServer) MakeVolHandler(arg *string, reply *GenericReply) error {
|
||||
func (s *storageServer) ListVolsHandler(arg *string, reply *ListVolsReply) error {
|
||||
vols, err := s.storage.ListVols()
|
||||
if err != nil {
|
||||
log.Debugf("Listsvols failed with error %s", err)
|
||||
return err
|
||||
}
|
||||
reply.Vols = vols
|
||||
@ -45,9 +40,6 @@ func (s *storageServer) ListVolsHandler(arg *string, reply *ListVolsReply) error
|
||||
func (s *storageServer) StatVolHandler(arg *string, reply *VolInfo) error {
|
||||
volInfo, err := s.storage.StatVol(*arg)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": *arg,
|
||||
}).Debugf("StatVol failed with error %s", err)
|
||||
return err
|
||||
}
|
||||
*reply = volInfo
|
||||
@ -59,9 +51,6 @@ func (s *storageServer) StatVolHandler(arg *string, reply *VolInfo) error {
|
||||
func (s *storageServer) DeleteVolHandler(arg *string, reply *GenericReply) error {
|
||||
err := s.storage.DeleteVol(*arg)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": *arg,
|
||||
}).Debugf("DeleteVol failed with error %s", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -73,10 +62,6 @@ func (s *storageServer) DeleteVolHandler(arg *string, reply *GenericReply) error
|
||||
func (s *storageServer) StatFileHandler(arg *StatFileArgs, reply *FileInfo) error {
|
||||
fileInfo, err := s.storage.StatFile(arg.Vol, arg.Path)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": arg.Vol,
|
||||
"path": arg.Path,
|
||||
}).Debugf("StatFile failed with error %s", err)
|
||||
return err
|
||||
}
|
||||
*reply = fileInfo
|
||||
@ -87,10 +72,6 @@ func (s *storageServer) StatFileHandler(arg *StatFileArgs, reply *FileInfo) erro
|
||||
func (s *storageServer) ListDirHandler(arg *ListDirArgs, reply *[]string) error {
|
||||
entries, err := s.storage.ListDir(arg.Vol, arg.Path)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": arg.Vol,
|
||||
"path": arg.Path,
|
||||
}).Debugf("ListDir failed with error %s", err)
|
||||
return err
|
||||
}
|
||||
*reply = entries
|
||||
@ -101,10 +82,6 @@ func (s *storageServer) ListDirHandler(arg *ListDirArgs, reply *[]string) error
|
||||
func (s *storageServer) DeleteFileHandler(arg *DeleteFileArgs, reply *GenericReply) error {
|
||||
err := s.storage.DeleteFile(arg.Vol, arg.Path)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": arg.Vol,
|
||||
"path": arg.Path,
|
||||
}).Debugf("DeleteFile failed with error %s", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -114,12 +91,6 @@ func (s *storageServer) DeleteFileHandler(arg *DeleteFileArgs, reply *GenericRep
|
||||
func (s *storageServer) RenameFileHandler(arg *RenameFileArgs, reply *GenericReply) error {
|
||||
err := s.storage.RenameFile(arg.SrcVol, arg.SrcPath, arg.DstVol, arg.DstPath)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"srcVolume": arg.SrcVol,
|
||||
"srcPath": arg.SrcPath,
|
||||
"dstVolume": arg.DstVol,
|
||||
"dstPath": arg.DstPath,
|
||||
}).Errorf("RenameFile failed with error %s", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -151,10 +122,6 @@ func registerStorageRPCRouter(mux *router.Router, stServer *storageServer) {
|
||||
path := vars["path"]
|
||||
writeCloser, err := stServer.storage.CreateFile(volume, path)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Debugf("CreateFile failed with error %s", err)
|
||||
httpErr := http.StatusInternalServerError
|
||||
if err == errVolumeNotFound {
|
||||
httpErr = http.StatusNotFound
|
||||
@ -166,10 +133,6 @@ func registerStorageRPCRouter(mux *router.Router, stServer *storageServer) {
|
||||
}
|
||||
reader := r.Body
|
||||
if _, err = io.Copy(writeCloser, reader); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Debugf("Copying incoming reader to writer failed %s", err)
|
||||
safeCloseAndRemove(writeCloser)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
@ -184,19 +147,11 @@ func registerStorageRPCRouter(mux *router.Router, stServer *storageServer) {
|
||||
path := vars["path"]
|
||||
offset, err := strconv.ParseInt(r.URL.Query().Get("offset"), 10, 64)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Debugf("Parse offset failure with error %s", err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
readCloser, err := stServer.storage.ReadFile(volume, path, offset)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Debugf("ReadFile failed with error %s", err)
|
||||
httpErr := http.StatusBadRequest
|
||||
if err == errVolumeNotFound {
|
||||
httpErr = http.StatusNotFound
|
||||
|
@ -91,14 +91,14 @@ func configureServer(srvCmdConfig serverCmdConfig) *http.Server {
|
||||
// getListenIPs - gets all the ips to listen on.
|
||||
func getListenIPs(httpServerConf *http.Server) (hosts []string, port string) {
|
||||
host, port, err := net.SplitHostPort(httpServerConf.Addr)
|
||||
fatalIf(err, "Unable to split host port.", nil)
|
||||
fatalIf(err, "Unable to parse host port.")
|
||||
|
||||
switch {
|
||||
case host != "":
|
||||
hosts = append(hosts, host)
|
||||
default:
|
||||
addrs, err := net.InterfaceAddrs()
|
||||
fatalIf(err, "Unable to get interface address.", nil)
|
||||
fatalIf(err, "Unable to determine network interface address.")
|
||||
for _, addr := range addrs {
|
||||
if addr.Network() == "ip+net" {
|
||||
host := strings.Split(addr.String(), "/")[0]
|
||||
@ -126,7 +126,7 @@ func printListenIPs(tls bool, hosts []string, port string) {
|
||||
func initServerConfig(c *cli.Context) {
|
||||
// Save new config.
|
||||
err := serverConfig.Save()
|
||||
fatalIf(err, "Unable to save config.", nil)
|
||||
fatalIf(err, "Unable to save config.")
|
||||
|
||||
// Fetch access keys from environment variables if any and update the config.
|
||||
accessKey := os.Getenv("MINIO_ACCESS_KEY")
|
||||
@ -135,10 +135,10 @@ func initServerConfig(c *cli.Context) {
|
||||
// Validate if both keys are specified and they are valid save them.
|
||||
if accessKey != "" && secretKey != "" {
|
||||
if !isValidAccessKey.MatchString(accessKey) {
|
||||
fatalIf(errInvalidArgument, "Access key does not have required length", nil)
|
||||
fatalIf(errInvalidArgument, "Invalid access key.")
|
||||
}
|
||||
if !isValidSecretKey.MatchString(secretKey) {
|
||||
fatalIf(errInvalidArgument, "Secret key does not have required length", nil)
|
||||
fatalIf(errInvalidArgument, "Invalid secret key.")
|
||||
}
|
||||
serverConfig.SetCredential(credential{
|
||||
AccessKeyID: accessKey,
|
||||
@ -162,9 +162,9 @@ func checkServerSyntax(c *cli.Context) {
|
||||
// Extract port number from address address should be of the form host:port.
|
||||
func getPort(address string) int {
|
||||
_, portStr, err := net.SplitHostPort(address)
|
||||
fatalIf(err, "Unable to split host port.", nil)
|
||||
fatalIf(err, "Unable to parse host port.")
|
||||
portInt, err := strconv.Atoi(portStr)
|
||||
fatalIf(err, "Invalid port number.", nil)
|
||||
fatalIf(err, "Invalid port number.")
|
||||
return portInt
|
||||
}
|
||||
|
||||
@ -201,17 +201,17 @@ func checkPortAvailability(port int) {
|
||||
}
|
||||
ifcs, err := net.Interfaces()
|
||||
if err != nil {
|
||||
fatalIf(err, "Unable to list interfaces.", nil)
|
||||
fatalIf(err, "Unable to list interfaces.")
|
||||
}
|
||||
for _, ifc := range ifcs {
|
||||
addrs, err := ifc.Addrs()
|
||||
if err != nil {
|
||||
fatalIf(err, fmt.Sprintf("Unable to list addresses on interface %s.", ifc.Name), nil)
|
||||
fatalIf(err, "Unable to list addresses on interface %s.", ifc.Name)
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
ipnet, ok := addr.(*net.IPNet)
|
||||
if !ok {
|
||||
errorIf(errors.New(""), "Interface type assertion to (*net.IPNet) failed.", nil)
|
||||
errorIf(errors.New(""), "Failed to assert type on (*net.IPNet) interface.")
|
||||
continue
|
||||
}
|
||||
ip := ipnet.IP
|
||||
@ -224,14 +224,14 @@ func checkPortAvailability(port int) {
|
||||
if err != nil {
|
||||
if isAddrInUse(err) {
|
||||
// Fail if port is already in use.
|
||||
fatalIf(err, fmt.Sprintf("Unable to listen on IP %s, port %.d", tcpAddr.IP, tcpAddr.Port), nil)
|
||||
fatalIf(err, "Unable to listen on %s:%.d.", tcpAddr.IP, tcpAddr.Port)
|
||||
} else {
|
||||
// Ignore other errors.
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err = l.Close(); err != nil {
|
||||
fatalIf(err, fmt.Sprintf("Unable to close listener on IP %s, port %.d", tcpAddr.IP, tcpAddr.Port), nil)
|
||||
fatalIf(err, "Unable to close listener on %s:%.d.", tcpAddr.IP, tcpAddr.Port)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -316,5 +316,5 @@ func serverMain(c *cli.Context) {
|
||||
// Fallback to http.
|
||||
err = apiServer.ListenAndServe()
|
||||
}
|
||||
errorIf(err, "Failed to start the minio server.", nil)
|
||||
errorIf(err, "Failed to start minio server.")
|
||||
}
|
||||
|
22
tree-walk.go
22
tree-walk.go
@ -23,8 +23,6 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// listParams - list object params used for list object map
|
||||
@ -258,18 +256,10 @@ func saveTreeWalk(layer ObjectLayer, params listParams, walker *treeWalker) {
|
||||
listObjectMapMutex.Lock()
|
||||
defer listObjectMapMutex.Unlock()
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"bucket": params.bucket,
|
||||
"recursive": params.recursive,
|
||||
"marker": params.marker,
|
||||
"prefix": params.prefix,
|
||||
}).Debugf("saveTreeWalk has been invoked.")
|
||||
|
||||
walkers, _ := listObjectMap[params]
|
||||
walkers = append(walkers, walker)
|
||||
|
||||
listObjectMap[params] = walkers
|
||||
log.Debugf("Successfully saved in listObjectMap.")
|
||||
}
|
||||
|
||||
// Lookup the goroutine reference from map
|
||||
@ -287,12 +277,6 @@ func lookupTreeWalk(layer ObjectLayer, params listParams) *treeWalker {
|
||||
listObjectMapMutex.Lock()
|
||||
defer listObjectMapMutex.Unlock()
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"bucket": params.bucket,
|
||||
"recursive": params.recursive,
|
||||
"marker": params.marker,
|
||||
"prefix": params.prefix,
|
||||
}).Debugf("lookupTreeWalk has been invoked.")
|
||||
if walkChs, ok := listObjectMap[params]; ok {
|
||||
for i, walkCh := range walkChs {
|
||||
if !walkCh.timedOut {
|
||||
@ -302,12 +286,6 @@ func lookupTreeWalk(layer ObjectLayer, params listParams) *treeWalker {
|
||||
} else {
|
||||
delete(listObjectMap, params)
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"bucket": params.bucket,
|
||||
"recursive": params.recursive,
|
||||
"marker": params.marker,
|
||||
"prefix": params.prefix,
|
||||
}).Debugf("Found the previous saved listsObjects params.")
|
||||
return walkCh
|
||||
}
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ func (u updateMessage) String() string {
|
||||
return updateMessage("You are already running the most recent version of ‘minio’.")
|
||||
}
|
||||
msg, err := colorizeUpdateMessage(u.Download)
|
||||
fatalIf(err, "Unable to colorize experimental update notification string ‘"+msg+"’.", nil)
|
||||
fatalIf(err, "Unable to colorize update notice ‘"+msg+"’.")
|
||||
return msg
|
||||
}
|
||||
|
||||
@ -97,7 +97,7 @@ func (u updateMessage) String() string {
|
||||
func (u updateMessage) JSON() string {
|
||||
u.Status = "success"
|
||||
updateMessageJSONBytes, err := json.Marshal(u)
|
||||
fatalIf((err), "Unable to marshal into JSON.", nil)
|
||||
fatalIf((err), "Unable to marshal into JSON.")
|
||||
|
||||
return string(updateMessageJSONBytes)
|
||||
}
|
||||
@ -166,12 +166,12 @@ func getReleaseUpdate(updateURL string, noError bool) updateMessage {
|
||||
if err != nil && noError {
|
||||
return updateMsg
|
||||
}
|
||||
fatalIf((err), "Unable to read from update URL ‘"+newUpdateURL+"’.", nil)
|
||||
fatalIf((err), "Unable to read from update URL ‘"+newUpdateURL+"’.")
|
||||
|
||||
// Error out if 'update' command is issued for development based builds.
|
||||
if minioVersion == "DEVELOPMENT.GOGET" && !noError {
|
||||
fatalIf((errors.New("")),
|
||||
"Update mechanism is not supported for ‘go get’ based binary builds. Please download official releases from https://minio.io/#minio", nil)
|
||||
"Update mechanism is not supported for ‘go get’ based binary builds. Please download official releases from https://minio.io/#minio")
|
||||
}
|
||||
|
||||
// Parse current minio version into RFC3339.
|
||||
@ -179,12 +179,12 @@ func getReleaseUpdate(updateURL string, noError bool) updateMessage {
|
||||
if err != nil && noError {
|
||||
return updateMsg
|
||||
}
|
||||
fatalIf((err), "Unable to parse version string as time.", nil)
|
||||
fatalIf((err), "Unable to parse version string as time.")
|
||||
|
||||
// Verify if current minio version is zero.
|
||||
if current.IsZero() && !noError {
|
||||
fatalIf((errors.New("")),
|
||||
"Updates not supported for custom builds. Version field is empty. Please download official releases from https://minio.io/#minio", nil)
|
||||
"Updates mechanism is not supported for custom builds. Please download official releases from https://minio.io/#minio")
|
||||
}
|
||||
|
||||
// Verify if we have a valid http response i.e http.StatusOK.
|
||||
@ -194,7 +194,7 @@ func getReleaseUpdate(updateURL string, noError bool) updateMessage {
|
||||
if noError {
|
||||
return updateMsg
|
||||
}
|
||||
fatalIf((errors.New("")), "Update server responsed with "+data.Status, nil)
|
||||
fatalIf((errors.New("")), "Failed to retrieve update notice. "+data.Status)
|
||||
}
|
||||
}
|
||||
|
||||
@ -203,19 +203,19 @@ func getReleaseUpdate(updateURL string, noError bool) updateMessage {
|
||||
if err != nil && noError {
|
||||
return updateMsg
|
||||
}
|
||||
fatalIf((err), "Fetching updates failed. Please try again.", nil)
|
||||
fatalIf((err), "Failed to retrieve update notice. Please try again later.")
|
||||
|
||||
// Parse the date if its valid.
|
||||
latest, err := parseReleaseData(string(updateBody))
|
||||
if err != nil && noError {
|
||||
return updateMsg
|
||||
}
|
||||
fatalIf(err, "Please report this issue at https://github.com/minio/minio/issues.", nil)
|
||||
errMsg := "Failed to retrieve update notice. Please try again later. Please report this issue at https://github.com/minio/minio/issues"
|
||||
fatalIf(err, errMsg)
|
||||
|
||||
// Verify if the date is not zero.
|
||||
if latest.IsZero() && !noError {
|
||||
fatalIf((errors.New("")),
|
||||
"Unable to validate any update available at this time. Please open an issue at https://github.com/minio/minio/issues", nil)
|
||||
fatalIf((errors.New("")), errMsg)
|
||||
}
|
||||
|
||||
// Is the update latest?.
|
||||
|
@ -20,8 +20,6 @@ import (
|
||||
"errors"
|
||||
slashpath "path"
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Get the highest integer from a given integer slice.
|
||||
@ -125,12 +123,6 @@ func (xl XL) listOnlineDisks(volume, path string) (onlineDisks []StorageAPI, mda
|
||||
// Verify if online disks count are lesser than readQuorum
|
||||
// threshold, return an error if yes.
|
||||
if onlineDiskCount < xl.readQuorum {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
"onlineDiskCount": onlineDiskCount,
|
||||
"readQuorumCount": xl.readQuorum,
|
||||
}).Errorf("%s", errReadQuorum)
|
||||
return nil, xlMetaV1{}, false, errReadQuorum
|
||||
}
|
||||
}
|
||||
|
@ -22,8 +22,6 @@ import (
|
||||
slashpath "path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Erasure block size.
|
||||
@ -35,10 +33,7 @@ func (xl XL) cleanupCreateFileOps(volume, path string, writers ...io.WriteCloser
|
||||
closeAndRemoveWriters(writers...)
|
||||
for _, disk := range xl.storageDisks {
|
||||
if err := disk.DeleteFile(volume, path); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("DeleteFile failed with %s", err)
|
||||
errorIf(err, "Unable to delete file.")
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -47,7 +42,7 @@ func (xl XL) cleanupCreateFileOps(volume, path string, writers ...io.WriteCloser
|
||||
func closeAndRemoveWriters(writers ...io.WriteCloser) {
|
||||
for _, writer := range writers {
|
||||
if err := safeCloseAndRemove(writer); err != nil {
|
||||
log.Errorf("Closing writer failed with %s", err)
|
||||
errorIf(err, "Failed to close writer.")
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -67,10 +62,6 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
|
||||
// based on total number of errors and read quorum.
|
||||
err := xl.errsToStorageErr(errs)
|
||||
if err != nil && err != errFileNotFound {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("%s", err)
|
||||
reader.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
@ -96,12 +87,7 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
|
||||
var writer io.WriteCloser
|
||||
writer, err = disk.CreateFile(volume, erasurePart)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("CreateFile failed with %s", err)
|
||||
|
||||
// treat errFileNameTooLong specially
|
||||
// Treat errFileNameTooLong specially
|
||||
if err == errFileNameTooLong {
|
||||
xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...)
|
||||
reader.CloseWithError(err)
|
||||
@ -122,14 +108,10 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
|
||||
return
|
||||
}
|
||||
|
||||
// create meta data file
|
||||
// Create meta data file.
|
||||
var metadataWriter io.WriteCloser
|
||||
metadataWriter, err = disk.CreateFile(volume, xlMetaV1FilePath)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("CreateFile failed with %s", err)
|
||||
createFileError++
|
||||
|
||||
// We can safely allow CreateFile errors up to
|
||||
@ -158,10 +140,6 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
|
||||
if err != nil {
|
||||
// Any unexpected errors, close the pipe reader with error.
|
||||
if err != io.ErrUnexpectedEOF && err != io.EOF {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("io.ReadFull failed with %s", err)
|
||||
// Remove all temp writers.
|
||||
xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...)
|
||||
reader.CloseWithError(err)
|
||||
@ -177,10 +155,6 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
|
||||
var dataBlocks [][]byte
|
||||
dataBlocks, err = xl.ReedSolomon.Split(dataBuffer[0:n])
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("Splitting data buffer into erasure data blocks failed with %s", err)
|
||||
// Remove all temp writers.
|
||||
xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...)
|
||||
reader.CloseWithError(err)
|
||||
@ -190,10 +164,6 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
|
||||
// Encode parity blocks using data blocks.
|
||||
err = xl.ReedSolomon.Encode(dataBlocks)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("Encoding erasure data blocks failed with %s", err)
|
||||
// Remove all temp writers upon error.
|
||||
xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...)
|
||||
reader.CloseWithError(err)
|
||||
@ -220,10 +190,6 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
|
||||
if wErr == nil {
|
||||
continue
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("Writing encoded blocks failed with %s", wErr)
|
||||
// Remove all temp writers upon error.
|
||||
xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...)
|
||||
reader.CloseWithError(wErr)
|
||||
@ -255,7 +221,7 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
|
||||
// Case: when storageDisks is 16 and write quorumDisks is 13,
|
||||
// meta data write failure up to 2 can be considered.
|
||||
// currently we fail for any meta data writes
|
||||
for index, metadataWriter := range metadataWriters {
|
||||
for _, metadataWriter := range metadataWriters {
|
||||
if metadataWriter == nil {
|
||||
continue
|
||||
}
|
||||
@ -263,11 +229,6 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
|
||||
// Write metadata.
|
||||
err = metadata.Write(metadataWriter)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
"diskIndex": index,
|
||||
}).Errorf("Writing metadata failed with %s", err)
|
||||
// Remove temporary files.
|
||||
xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...)
|
||||
reader.CloseWithError(err)
|
||||
@ -286,11 +247,6 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
|
||||
}
|
||||
// Safely wrote, now rename to its actual location.
|
||||
if err = writer.Close(); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
"diskIndex": index,
|
||||
}).Errorf("Safely committing part failed with %s", err)
|
||||
// Remove all temp writers upon error.
|
||||
xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...)
|
||||
reader.CloseWithError(err)
|
||||
@ -302,11 +258,6 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
|
||||
}
|
||||
// Safely wrote, now rename to its actual location.
|
||||
if err = metadataWriters[index].Close(); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
"diskIndex": index,
|
||||
}).Errorf("Safely committing metadata failed with %s", err)
|
||||
// Remove all temp writers upon error.
|
||||
xl.cleanupCreateFileOps(volume, path, append(writers, metadataWriters...)...)
|
||||
reader.CloseWithError(err)
|
||||
|
@ -21,8 +21,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
slashpath "path"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// healHeal - heals the file at path.
|
||||
@ -39,10 +37,6 @@ func (xl XL) healFile(volume string, path string) error {
|
||||
// List all online disks to verify if we need to heal.
|
||||
onlineDisks, metadata, heal, err := xl.listOnlineDisks(volume, path)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("List online disks failed with %s", err)
|
||||
return err
|
||||
}
|
||||
if !heal {
|
||||
@ -87,10 +81,6 @@ func (xl XL) healFile(volume string, path string) error {
|
||||
writers[index], err = xl.storageDisks[index].CreateFile(volume, erasurePart)
|
||||
if err != nil {
|
||||
needsHeal[index] = false
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("CreateFile failed with error %s", err)
|
||||
safeCloseAndRemove(writers[index])
|
||||
continue
|
||||
}
|
||||
@ -129,20 +119,12 @@ func (xl XL) healFile(volume string, path string) error {
|
||||
|
||||
// Check blocks if they are all zero in length.
|
||||
if checkBlockSize(enBlocks) == 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("%s", errDataCorrupt)
|
||||
return errDataCorrupt
|
||||
}
|
||||
|
||||
// Verify the blocks.
|
||||
ok, err := xl.ReedSolomon.Verify(enBlocks)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("ReedSolomon verify failed with %s", err)
|
||||
closeAndRemoveWriters(writers...)
|
||||
return err
|
||||
}
|
||||
@ -157,30 +139,18 @@ func (xl XL) healFile(volume string, path string) error {
|
||||
}
|
||||
err = xl.ReedSolomon.Reconstruct(enBlocks)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("ReedSolomon reconstruct failed with %s", err)
|
||||
closeAndRemoveWriters(writers...)
|
||||
return err
|
||||
}
|
||||
// Verify reconstructed blocks again.
|
||||
ok, err = xl.ReedSolomon.Verify(enBlocks)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("ReedSolomon verify failed with %s", err)
|
||||
closeAndRemoveWriters(writers...)
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
// Blocks cannot be reconstructed, corrupted data.
|
||||
err = errors.New("Verification failed after reconstruction, data likely corrupted.")
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("%s", err)
|
||||
closeAndRemoveWriters(writers...)
|
||||
return err
|
||||
}
|
||||
@ -191,10 +161,6 @@ func (xl XL) healFile(volume string, path string) error {
|
||||
}
|
||||
_, err := writers[index].Write(enBlocks[index])
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("Write failed with %s", err)
|
||||
safeCloseAndRemove(writers[index])
|
||||
continue
|
||||
}
|
||||
|
@ -22,8 +22,6 @@ import (
|
||||
"io"
|
||||
slashpath "path"
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ReadFile - read file
|
||||
@ -41,10 +39,6 @@ func (xl XL) ReadFile(volume, path string, startOffset int64) (io.ReadCloser, er
|
||||
onlineDisks, metadata, heal, err := xl.listOnlineDisks(volume, path)
|
||||
nsMutex.RUnlock(volume, path)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("Get readable disks failed with %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -52,13 +46,8 @@ func (xl XL) ReadFile(volume, path string, startOffset int64) (io.ReadCloser, er
|
||||
// Heal in background safely, since we already have read
|
||||
// quorum disks. Let the reads continue.
|
||||
go func() {
|
||||
if hErr := xl.healFile(volume, path); hErr != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("healFile failed with %s", hErr)
|
||||
return
|
||||
}
|
||||
hErr := xl.healFile(volume, path)
|
||||
errorIf(hErr, "Unable to heal file "+volume+"/"+path+".")
|
||||
}()
|
||||
}
|
||||
|
||||
@ -120,10 +109,6 @@ func (xl XL) ReadFile(volume, path string, startOffset int64) (io.ReadCloser, er
|
||||
|
||||
// Check blocks if they are all zero in length.
|
||||
if checkBlockSize(enBlocks) == 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("%s", errDataCorrupt)
|
||||
pipeWriter.CloseWithError(errDataCorrupt)
|
||||
return
|
||||
}
|
||||
@ -132,10 +117,6 @@ func (xl XL) ReadFile(volume, path string, startOffset int64) (io.ReadCloser, er
|
||||
var ok bool
|
||||
ok, err = xl.ReedSolomon.Verify(enBlocks)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("ReedSolomon verify failed with %s", err)
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
@ -150,30 +131,18 @@ func (xl XL) ReadFile(volume, path string, startOffset int64) (io.ReadCloser, er
|
||||
}
|
||||
err = xl.ReedSolomon.Reconstruct(enBlocks)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("ReedSolomon reconstruct failed with %s", err)
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
// Verify reconstructed blocks again.
|
||||
ok, err = xl.ReedSolomon.Verify(enBlocks)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("ReedSolomon verify failed with %s", err)
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if !ok {
|
||||
// Blocks cannot be reconstructed, corrupted data.
|
||||
err = errors.New("Verification failed after reconstruction, data likely corrupted.")
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("%s", err)
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
@ -199,10 +168,6 @@ func (xl XL) ReadFile(volume, path string, startOffset int64) (io.ReadCloser, er
|
||||
// Write safely the necessary blocks.
|
||||
_, err = pipeWriter.Write(dataBlocks[int(startOffset):])
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("ReedSolomon joining decoded blocks failed with %s", err)
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/klauspost/reedsolomon"
|
||||
)
|
||||
|
||||
@ -141,9 +140,6 @@ func (xl XL) MakeVol(volume string) error {
|
||||
if err != nil && err != errVolumeNotFound {
|
||||
errCount++
|
||||
if errCount > xl.readQuorum {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
}).Errorf("%s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -182,9 +178,6 @@ func (xl XL) MakeVol(volume string) error {
|
||||
// Loop through all the concocted errors.
|
||||
for _, err := range dErrs {
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
}).Errorf("MakeVol failed with %s", err)
|
||||
// if volume already exists, count them.
|
||||
if err == errVolumeExists {
|
||||
volumeExistsErrCnt++
|
||||
@ -241,9 +234,6 @@ func (xl XL) DeleteVol(volume string) error {
|
||||
// Loop through concocted errors and return anything unusual.
|
||||
for _, err := range dErrs {
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
}).Errorf("DeleteVol failed with %s", err)
|
||||
// We ignore error if errVolumeNotFound or errDiskNotFound
|
||||
if err == errVolumeNotFound || err == errDiskNotFound {
|
||||
volumeNotFoundErrCnt++
|
||||
@ -388,11 +378,6 @@ func (xl XL) listAllVolumeInfo(volume string) ([]VolInfo, bool, error) {
|
||||
// Verify if online disks count are lesser than readQuorum
|
||||
// threshold, return an error if yes.
|
||||
if onlineDiskCount < xl.readQuorum {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"onlineDiskCount": onlineDiskCount,
|
||||
"readQuorumCount": xl.readQuorum,
|
||||
}).Errorf("%s", errReadQuorum)
|
||||
return nil, false, errReadQuorum
|
||||
}
|
||||
}
|
||||
@ -410,9 +395,6 @@ func (xl XL) healVolume(volume string) error {
|
||||
// Lists volume info for all online disks.
|
||||
volsInfo, heal, err := xl.listAllVolumeInfo(volume)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
}).Errorf("List online disks failed with %s", err)
|
||||
return err
|
||||
}
|
||||
if !heal {
|
||||
@ -425,11 +407,6 @@ func (xl XL) healVolume(volume string) error {
|
||||
}
|
||||
// Volinfo name would be an empty string, create it.
|
||||
if err = xl.storageDisks[index].MakeVol(volume); err != nil {
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
}).Errorf("MakeVol failed with error %s", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
@ -447,20 +424,13 @@ func (xl XL) StatVol(volume string) (volInfo VolInfo, err error) {
|
||||
volsInfo, heal, err := xl.listAllVolumeInfo(volume)
|
||||
nsMutex.RUnlock(volume, "")
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
}).Errorf("listOnlineVolsInfo failed with %s", err)
|
||||
return VolInfo{}, err
|
||||
}
|
||||
|
||||
if heal {
|
||||
go func() {
|
||||
if hErr := xl.healVolume(volume); hErr != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
}).Errorf("healVolume failed with %s", hErr)
|
||||
return
|
||||
}
|
||||
hErr := xl.healVolume(volume)
|
||||
errorIf(hErr, "Unable to heal volume "+volume+".")
|
||||
}()
|
||||
}
|
||||
|
||||
@ -534,23 +504,14 @@ func (xl XL) StatFile(volume, path string) (FileInfo, error) {
|
||||
_, metadata, heal, err := xl.listOnlineDisks(volume, path)
|
||||
nsMutex.RUnlock(volume, path)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("listOnlineDisks failed with %s", err)
|
||||
return FileInfo{}, err
|
||||
}
|
||||
|
||||
if heal {
|
||||
// Heal in background safely, since we already have read quorum disks.
|
||||
go func() {
|
||||
if hErr := xl.healFile(volume, path); hErr != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("healFile failed with %s", hErr)
|
||||
return
|
||||
}
|
||||
hErr := xl.healFile(volume, path)
|
||||
errorIf(hErr, "Unable to heal file "+volume+"/"+path+".")
|
||||
}()
|
||||
}
|
||||
|
||||
@ -582,11 +543,6 @@ func (xl XL) DeleteFile(volume, path string) error {
|
||||
erasureFilePart := slashpath.Join(path, fmt.Sprintf("file.%d", index))
|
||||
err := disk.DeleteFile(volume, erasureFilePart)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("DeleteFile failed with %s", err)
|
||||
|
||||
errCount++
|
||||
|
||||
// We can safely allow DeleteFile errors up to len(xl.storageDisks) - xl.writeQuorum
|
||||
@ -601,11 +557,6 @@ func (xl XL) DeleteFile(volume, path string) error {
|
||||
xlMetaV1FilePath := slashpath.Join(path, "file.json")
|
||||
err = disk.DeleteFile(volume, xlMetaV1FilePath)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"volume": volume,
|
||||
"path": path,
|
||||
}).Errorf("DeleteFile failed with %s", err)
|
||||
|
||||
errCount++
|
||||
|
||||
// We can safely allow DeleteFile errors up to len(xl.storageDisks) - xl.writeQuorum
|
||||
@ -653,13 +604,6 @@ func (xl XL) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) error {
|
||||
// not rename the part and metadata files separately.
|
||||
err := disk.RenameFile(srcVolume, retainSlash(srcPath), dstVolume, retainSlash(dstPath))
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"srcVolume": srcVolume,
|
||||
"srcPath": srcPath,
|
||||
"dstVolume": dstVolume,
|
||||
"dstPath": dstPath,
|
||||
}).Errorf("RenameFile failed with %s", err)
|
||||
|
||||
errCount++
|
||||
// We can safely allow RenameFile errors up to len(xl.storageDisks) - xl.writeQuorum
|
||||
// otherwise return failure.
|
||||
|
@ -223,9 +223,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
|
||||
src := path.Join(mpartMetaPrefix, bucket, object, uploadID, partSuffix)
|
||||
dst := path.Join(mpartMetaPrefix, bucket, object, uploadID, partNumToPartFileName(part.PartNumber))
|
||||
errs[index] = xl.storage.RenameFile(minioMetaBucket, src, minioMetaBucket, dst)
|
||||
if errs[index] != nil {
|
||||
log.Errorf("Unable to rename file %s to %s, failed with %s", src, dst, errs[index])
|
||||
}
|
||||
errorIf(errs[index], "Unable to rename file %s to %s.", src, dst)
|
||||
}(index, part)
|
||||
}
|
||||
|
||||
@ -240,10 +238,10 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
|
||||
}
|
||||
|
||||
// Delete the incomplete file place holder.
|
||||
uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID, incompleteFile)
|
||||
err = xl.storage.DeleteFile(minioMetaBucket, uploadIDPath)
|
||||
uploadIDIncompletePath := path.Join(mpartMetaPrefix, bucket, object, uploadID, incompleteFile)
|
||||
err = xl.storage.DeleteFile(minioMetaBucket, uploadIDIncompletePath)
|
||||
if err != nil {
|
||||
return "", toObjectErr(err, minioMetaBucket, uploadIDPath)
|
||||
return "", toObjectErr(err, minioMetaBucket, uploadIDIncompletePath)
|
||||
}
|
||||
|
||||
// Delete if an object already exists.
|
||||
@ -255,7 +253,8 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
|
||||
return "", toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
if err = xl.storage.RenameFile(minioMetaBucket, path.Join(mpartMetaPrefix, bucket, object, uploadID), bucket, object); err != nil {
|
||||
uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID)
|
||||
if err = xl.storage.RenameFile(minioMetaBucket, uploadIDPath, bucket, object); err != nil {
|
||||
return "", toObjectErr(err, bucket, object)
|
||||
}
|
||||
// Validate if there are other incomplete upload-id's present for
|
||||
|
@ -49,20 +49,17 @@ func isValidFormat(storage StorageAPI, exportPaths ...string) bool {
|
||||
// Load saved XL format.json and validate.
|
||||
xl, err := loadFormatXL(storage)
|
||||
if err != nil {
|
||||
log.Errorf("loadFormatXL failed with %s", err)
|
||||
errorIf(err, "Unable to load format file 'format.json'.")
|
||||
return false
|
||||
}
|
||||
if xl.Version != "1" {
|
||||
log.Errorf("Unsupported XL backend format found [%s]", xl.Version)
|
||||
return false
|
||||
}
|
||||
if len(exportPaths) != len(xl.Disks) {
|
||||
log.Errorf("Number of disks %d passed at the command-line did not match the backend format %d", len(exportPaths), len(xl.Disks))
|
||||
return false
|
||||
}
|
||||
for index, disk := range xl.Disks {
|
||||
if exportPaths[index] != disk {
|
||||
log.Errorf("Invalid order of disks detected %s. Required order is %s.", exportPaths, xl.Disks)
|
||||
return false
|
||||
}
|
||||
}
|
||||
@ -73,7 +70,6 @@ func isValidFormat(storage StorageAPI, exportPaths ...string) bool {
|
||||
func newXLObjects(exportPaths ...string) (ObjectLayer, error) {
|
||||
storage, err := newXL(exportPaths...)
|
||||
if err != nil {
|
||||
log.Errorf("newXL failed with %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -90,13 +86,11 @@ func newXLObjects(exportPaths ...string) (ObjectLayer, error) {
|
||||
Disks: exportPaths,
|
||||
})
|
||||
if errSave != nil {
|
||||
log.Errorf("saveFormatXL failed with %s", errSave)
|
||||
return nil, errSave
|
||||
}
|
||||
} else {
|
||||
log.Errorf("Unable to check backend format %s", err)
|
||||
if err == errReadQuorum {
|
||||
errMsg := fmt.Sprintf("Not all disks %s are available, did not meet read quroum.", exportPaths)
|
||||
errMsg := fmt.Sprintf("Disks %s are offline. Unable to establish quorum.", exportPaths)
|
||||
err = errors.New(errMsg)
|
||||
} else if err == errDiskNotFound {
|
||||
errMsg := fmt.Sprintf("Disks %s not found.", exportPaths)
|
||||
|
Loading…
Reference in New Issue
Block a user