From 9df01035dabd985e0c7ce084fc2ced0a4250f99b Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Thu, 9 Feb 2017 23:26:44 -0800 Subject: [PATCH] Remove XL references in public docs to Erasure. (#3725) Ref #3722 --- cmd/admin-handlers_test.go | 8 ++++---- cmd/admin-rpc-client.go | 1 + cmd/auth-handler_test.go | 16 ++++++++-------- cmd/config-v13_test.go | 8 ++++++++ cmd/object-api-common_test.go | 8 ++++---- cmd/object-api-datatypes.go | 8 ++++---- cmd/server-main.go | 6 +++--- cmd/server-main_test.go | 32 ++++++++++++++++---------------- cmd/server-startup-msg.go | 2 +- cmd/server-startup-msg_test.go | 2 +- cmd/xl-v1.go | 2 +- docs/backend/README.md | 8 +++++--- docs/minio-limitations.md | 19 ++++++++++--------- pkg/madmin/API.md | 6 +++--- pkg/madmin/service-commands.go | 8 ++++---- 15 files changed, 73 insertions(+), 61 deletions(-) diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index e576a5304..e309e3f27 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -202,7 +202,7 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) { // Initialize admin peers to make admin RPC calls. Note: In a // single node setup, this degenerates to a simple function // call under the hood. - eps, err := parseStorageEndpoints([]string{"http://localhost"}) + eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"}) if err != nil { t.Fatalf("Failed to parse storage end point - %v", err) } @@ -268,7 +268,7 @@ func TestServiceSetCreds(t *testing.T) { // Initialize admin peers to make admin RPC calls. Note: In a // single node setup, this degenerates to a simple function // call under the hood. - eps, err := parseStorageEndpoints([]string{"http://localhost"}) + eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"}) if err != nil { t.Fatalf("Failed to parse storage end point - %v", err) } @@ -356,7 +356,7 @@ func TestListLocksHandler(t *testing.T) { defer adminTestBed.TearDown() // Initialize admin peers to make admin RPC calls. - eps, err := parseStorageEndpoints([]string{"http://localhost"}) + eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"}) if err != nil { t.Fatalf("Failed to parse storage end point - %v", err) } @@ -431,7 +431,7 @@ func TestClearLocksHandler(t *testing.T) { defer adminTestBed.TearDown() // Initialize admin peers to make admin RPC calls. - eps, err := parseStorageEndpoints([]string{"http://localhost"}) + eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"}) if err != nil { t.Fatalf("Failed to parse storage end point - %v", err) } diff --git a/cmd/admin-rpc-client.go b/cmd/admin-rpc-client.go index a34809560..6cb0709be 100644 --- a/cmd/admin-rpc-client.go +++ b/cmd/admin-rpc-client.go @@ -317,6 +317,7 @@ func getPeerUptimes(peers adminPeers) (time.Duration, error) { latestUptime := time.Duration(0) for _, uptime := range uptimes { if uptime.err != nil { + errorIf(uptime.err, "Unable to fetch uptime") continue } diff --git a/cmd/auth-handler_test.go b/cmd/auth-handler_test.go index c0e85bae2..fbb78d4e2 100644 --- a/cmd/auth-handler_test.go +++ b/cmd/auth-handler_test.go @@ -36,7 +36,7 @@ func TestGetRequestAuthType(t *testing.T) { { req: &http.Request{ URL: &url.URL{ - Host: "localhost:9000", + Host: "127.0.0.1:9000", Scheme: httpScheme, Path: "/", }, @@ -53,7 +53,7 @@ func TestGetRequestAuthType(t *testing.T) { { req: &http.Request{ URL: &url.URL{ - Host: "localhost:9000", + Host: "127.0.0.1:9000", Scheme: httpScheme, Path: "/", }, @@ -68,7 +68,7 @@ func TestGetRequestAuthType(t *testing.T) { { req: &http.Request{ URL: &url.URL{ - Host: "localhost:9000", + Host: "127.0.0.1:9000", Scheme: httpScheme, Path: "/", }, @@ -83,7 +83,7 @@ func TestGetRequestAuthType(t *testing.T) { { req: &http.Request{ URL: &url.URL{ - Host: "localhost:9000", + Host: "127.0.0.1:9000", Scheme: httpScheme, Path: "/", RawQuery: "X-Amz-Credential=EXAMPLEINVALIDEXAMPL%2Fs3%2F20160314%2Fus-east-1", @@ -96,7 +96,7 @@ func TestGetRequestAuthType(t *testing.T) { { req: &http.Request{ URL: &url.URL{ - Host: "localhost:9000", + Host: "127.0.0.1:9000", Scheme: httpScheme, Path: "/", }, @@ -326,11 +326,11 @@ func TestIsReqAuthenticated(t *testing.T) { // When request is nil, internal error is returned. {nil, ErrInternalError}, // When request is unsigned, access denied is returned. - {mustNewRequest("GET", "http://localhost:9000", 0, nil, t), ErrAccessDenied}, + {mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied}, // When request is properly signed, but has bad Content-MD5 header. - {mustNewSignedRequest("PUT", "http://localhost:9000", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest}, + {mustNewSignedRequest("PUT", "http://127.0.0.1:9000", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest}, // When request is properly signed, error is none. - {mustNewSignedRequest("GET", "http://localhost:9000", 0, nil, t), ErrNone}, + {mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone}, } // Validates all testcases. diff --git a/cmd/config-v13_test.go b/cmd/config-v13_test.go index bd505c743..4b4e420f9 100644 --- a/cmd/config-v13_test.go +++ b/cmd/config-v13_test.go @@ -82,6 +82,10 @@ func TestServerConfig(t *testing.T) { if !reflect.DeepEqual(consoleCfg, consoleLogger{Enable: true}) { t.Errorf("Expecting console logger config %#v found %#v", consoleLogger{Enable: true}, consoleCfg) } + // Set new console logger. + serverConfig.Logger.SetConsole(consoleLogger{ + Enable: false, + }) // Set new file logger. serverConfig.Logger.SetFile(fileLogger{ @@ -91,6 +95,10 @@ func TestServerConfig(t *testing.T) { if !reflect.DeepEqual(fileCfg, fileLogger{Enable: true}) { t.Errorf("Expecting file logger config %#v found %#v", fileLogger{Enable: true}, consoleCfg) } + // Set new file logger. + serverConfig.Logger.SetFile(fileLogger{ + Enable: false, + }) // Match version. if serverConfig.GetVersion() != globalMinioConfigVersion { diff --git a/cmd/object-api-common_test.go b/cmd/object-api-common_test.go index d312901c5..784044088 100644 --- a/cmd/object-api-common_test.go +++ b/cmd/object-api-common_test.go @@ -116,8 +116,8 @@ func TestGetPath(t *testing.T) { {"D:\\", "d:\\"}, {"D:", "d:"}, {"\\", "\\"}, - {"http://localhost/d:/export", "d:/export"}, - {"https://localhost/d:/export", "d:/export"}, + {"http://127.0.0.1/d:/export", "d:/export"}, + {"https://127.0.0.1/d:/export", "d:/export"}, } } else { testCases = []struct { @@ -125,8 +125,8 @@ func TestGetPath(t *testing.T) { path string }{ {"/export", "/export"}, - {"http://localhost/export", "/export"}, - {"https://localhost/export", "/export"}, + {"http://127.0.0.1/export", "/export"}, + {"https://127.0.0.1/export", "/export"}, } } testCasesCommon := []struct { diff --git a/cmd/object-api-datatypes.go b/cmd/object-api-datatypes.go index cdb052f29..307220f4d 100644 --- a/cmd/object-api-datatypes.go +++ b/cmd/object-api-datatypes.go @@ -26,8 +26,8 @@ const ( Unknown BackendType = iota // Filesystem backend. FS - // Multi disk XL (single, distributed) backend. - XL + // Multi disk Erasure (single, distributed) backend. + Erasure // Add your own backend. ) @@ -39,10 +39,10 @@ type StorageInfo struct { Free int64 // Backend type. Backend struct { - // Represents various backend types, currently on FS and XL. + // Represents various backend types, currently on FS and Erasure. Type BackendType - // Following fields are only meaningful if BackendType is XL. + // Following fields are only meaningful if BackendType is Erasure. OnlineDisks int // Online disks during server startup. OfflineDisks int // Offline disks during server startup. ReadQuorum int // Minimum disks required for successful read operations. diff --git a/cmd/server-main.go b/cmd/server-main.go index 3d6602604..6b4deca7f 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -466,12 +466,12 @@ func serverMain(c *cli.Context) { globalObjectAPI = newObject globalObjLayerMutex.Unlock() - // Set startup time - globalBootTime = time.Now().UTC() - // Prints the formatted startup message once object layer is initialized. printStartupMessage(apiEndPoints) + // Set uptime time after object layer has initialized. + globalBootTime = time.Now().UTC() + // Waits on the server. <-globalServiceDoneCh } diff --git a/cmd/server-main_test.go b/cmd/server-main_test.go index cf49c9086..84416bb62 100644 --- a/cmd/server-main_test.go +++ b/cmd/server-main_test.go @@ -33,7 +33,7 @@ func TestGetListenIPs(t *testing.T) { port string shouldPass bool }{ - {"localhost", "9000", true}, + {"127.0.0.1", "9000", true}, {"", "9000", true}, {"", "", false}, } @@ -114,8 +114,8 @@ func TestFinalizeAPIEndpoints(t *testing.T) { }{ {":80"}, {":80"}, - {"localhost:80"}, - {"localhost:80"}, + {"127.0.0.1:80"}, + {"127.0.0.1:80"}, } for i, test := range testCases { @@ -283,18 +283,18 @@ func TestParseStorageEndpoints(t *testing.T) { host string expectedErr error }{ - {"", "http://localhost/export", nil}, + {"", "http://127.0.0.1/export", nil}, { "testhost", - "http://localhost/export", - errors.New("Invalid Argument localhost, port mandatory when --address : is used"), + "http://127.0.0.1/export", + errors.New("Invalid Argument 127.0.0.1, port mandatory when --address : is used"), }, { "", - "http://localhost:9000/export", - errors.New("Invalid Argument localhost:9000, port configurable using --address :"), + "http://127.0.0.1:9000/export", + errors.New("Invalid Argument 127.0.0.1:9000, port configurable using --address :"), }, - {"testhost", "http://localhost:9000/export", nil}, + {"testhost", "http://127.0.0.1:9000/export", nil}, } for i, test := range testCases { globalMinioHost = test.globalMinioHost @@ -315,15 +315,15 @@ func TestCheckEndpointsSyntax(t *testing.T) { successCases := []string{ "export", "/export", - "http://localhost/export", - "https://localhost/export", + "http://127.0.0.1/export", + "https://127.0.0.1/export", } failureCases := []string{ "/", - "http://localhost", - "http://localhost/", - "ftp://localhost/export", + "http://127.0.0.1", + "http://127.0.0.1/", + "ftp://127.0.0.1/export", "server:/export", } @@ -495,8 +495,8 @@ func TestIsAnyEndpointLocal(t *testing.T) { result: false, }, { - disks: []string{"http://localhost/mnt/disk1", - "http://localhost/mnt/disk1"}, + disks: []string{"http://127.0.0.1/mnt/disk1", + "http://127.0.0.1/mnt/disk1"}, result: true, }, } diff --git a/cmd/server-startup-msg.go b/cmd/server-startup-msg.go index 0cb7370de..55012fc7c 100644 --- a/cmd/server-startup-msg.go +++ b/cmd/server-startup-msg.go @@ -141,7 +141,7 @@ func getStorageInfoMsg(storageInfo StorageInfo) string { msg := fmt.Sprintf("%s %s Free, %s Total", colorBlue("Drive Capacity:"), humanize.IBytes(uint64(storageInfo.Free)), humanize.IBytes(uint64(storageInfo.Total))) - if storageInfo.Backend.Type == XL { + if storageInfo.Backend.Type == Erasure { diskInfo := fmt.Sprintf(" %d Online, %d Offline. ", storageInfo.Backend.OnlineDisks, storageInfo.Backend.OfflineDisks) if maxDiskFailures := storageInfo.Backend.ReadQuorum - storageInfo.Backend.OfflineDisks; maxDiskFailures >= 0 { diskInfo += fmt.Sprintf("We can withstand [%d] more drive failure(s).", maxDiskFailures) diff --git a/cmd/server-startup-msg_test.go b/cmd/server-startup-msg_test.go index bd15cd1e4..ef39b9e18 100644 --- a/cmd/server-startup-msg_test.go +++ b/cmd/server-startup-msg_test.go @@ -38,7 +38,7 @@ func TestStorageInfoMsg(t *testing.T) { OfflineDisks int ReadQuorum int WriteQuorum int - }{XL, 7, 1, 4, 5}, + }{Erasure, 7, 1, 4, 5}, } if msg := getStorageInfoMsg(infoStorage); !strings.Contains(msg, "2.0 GiB Free, 10 GiB Total") || !strings.Contains(msg, "7 Online, 1 Offline") { diff --git a/cmd/xl-v1.go b/cmd/xl-v1.go index a94cdd05a..4f6083d75 100644 --- a/cmd/xl-v1.go +++ b/cmd/xl-v1.go @@ -244,7 +244,7 @@ func getStorageInfo(disks []StorageAPI) StorageInfo { Free: validDisksInfo[0].Free * int64(onlineDisks) / 2, } - storageInfo.Backend.Type = XL + storageInfo.Backend.Type = Erasure storageInfo.Backend.OnlineDisks = onlineDisks storageInfo.Backend.OfflineDisks = offlineDisks return storageInfo diff --git a/docs/backend/README.md b/docs/backend/README.md index c0412b288..548234b37 100644 --- a/docs/backend/README.md +++ b/docs/backend/README.md @@ -1,6 +1,8 @@ ## Backends -Minio currently implements two types of backends namely. +Minio currently implements two types of backends. -- Filesystem layer (fs). -- ErasureCode layer (XL). +| Minio | FS | Erasure | Stability | +|:-----------:|:----:|:----:|:---:| +| Standalone | x | x | Stable | +| Distributed | x | x | Stable | diff --git a/docs/minio-limitations.md b/docs/minio-limitations.md index d7f49e660..9e59ee747 100644 --- a/docs/minio-limitations.md +++ b/docs/minio-limitations.md @@ -1,5 +1,4 @@ ## Minio Server Limits Per Tenant -We found the following APIs to be redundant or less useful outside of AWS. If you have a different view on any of the APIs we missed, please open a [github issue](https://github.com/minio/minio/issues). ### Erasure Code (Multiple Drives / Servers) @@ -31,19 +30,21 @@ We found the following APIs to be redundant or less useful outside of AWS. If yo |Maximum number of objects returned per list objects request| 1000| |Maximum number of multipart uploads returned per list multipart uploads request| 1000| +We found the following APIs to be redundant or less useful outside of AWS S3. If you have a different view on any of the APIs we missed, please open a [github issue](https://github.com/minio/minio/issues). + ### List of Amazon S3 Bucket API's not supported on Minio. -- BucketACL (Use bucket policies instead) -- BucketCORS (CORS enabled by default) -- BucketLifecycle (Not required for Minio's XL backend) -- BucketReplication (Use `mc mirror` instead) -- BucketVersions, BucketVersioning (Use `s3git`) -- BucketWebsite (Use `caddy` or `nginx`) -- BucketAnalytics, BucketMetrics, BucketLogging (Use bucket notification APIs) +- BucketACL (Use [bucket policies](http://docs.minio.io/docs/minio-client-complete-guide#policy) instead) +- BucketCORS (CORS enabled by default on all buckets for all HTTP verbs) +- BucketLifecycle (Not required for Minio erasure coded backend) +- BucketReplication (Use [`mc mirror`](http://docs.minio.io/docs/minio-client-complete-guide#mirror) instead) +- BucketVersions, BucketVersioning (Use [`s3git`](https://github.com/s3git/s3git)) +- BucketWebsite (Use [`caddy`](https://github.com/mholt/caddy) or [`nginx`](https://www.nginx.com/resources/wiki/)) +- BucketAnalytics, BucketMetrics, BucketLogging (Use [bucket notification](http://docs.minio.io/docs/minio-client-complete-guide#events) APIs) - BucketRequestPayment - BucketTagging ### List of Amazon S3 Object API's not supported on Minio. -- ObjectACL (Use bucket policies instead) +- ObjectACL (Use [bucket policies](http://docs.minio.io/docs/minio-client-complete-guide#policy) instead) - ObjectTorrent diff --git a/pkg/madmin/API.md b/pkg/madmin/API.md index 36c79b1a2..506690783 100644 --- a/pkg/madmin/API.md +++ b/pkg/madmin/API.md @@ -82,9 +82,9 @@ Fetch service status, replies disk space used, backend type and total disks offl | Param | Type | Description | |---|---|---| -|`backend.Type` | _BackendType_ | Type of backend used by the server currently only FS or XL. | -|`backend.OnlineDisks`| _int_ | Total number of disks online (only applies to XL backend), is empty for FS. | -|`backend.OfflineDisks` | _int_ | Total number of disks offline (only applies to XL backend), is empty for FS. | +|`backend.Type` | _BackendType_ | Type of backend used by the server currently only FS or Erasure. | +|`backend.OnlineDisks`| _int_ | Total number of disks online (only applies to Erasure backend), is empty for FS. | +|`backend.OfflineDisks` | _int_ | Total number of disks offline (only applies to Erasure backend), is empty for FS. | |`backend.ReadQuorum` | _int_ | Current total read quorum threshold before reads will be unavailable, is empty for FS. | |`backend.WriteQuorum` | _int_ | Current total write quorum threshold before writes will be unavailable, is empty for FS. | diff --git a/pkg/madmin/service-commands.go b/pkg/madmin/service-commands.go index 2556a8960..292340ed7 100644 --- a/pkg/madmin/service-commands.go +++ b/pkg/madmin/service-commands.go @@ -36,8 +36,8 @@ const ( Unknown BackendType = iota // Filesystem backend. FS - // Multi disk XL (single, distributed) backend. - XL + // Multi disk Erasure (single, distributed) backend. + Erasure // Add your own backend. ) @@ -50,10 +50,10 @@ type StorageInfo struct { Free int64 // Backend type. Backend struct { - // Represents various backend types, currently on FS and XL. + // Represents various backend types, currently on FS and Erasure. Type BackendType - // Following fields are only meaningful if BackendType is XL. + // Following fields are only meaningful if BackendType is Erasure. OnlineDisks int // Online disks during server startup. OfflineDisks int // Offline disks during server startup. ReadQuorum int // Minimum disks required for successful read operations.