mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
parent
1ad96ee09f
commit
9df01035da
@ -202,7 +202,7 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
|
||||
// Initialize admin peers to make admin RPC calls. Note: In a
|
||||
// single node setup, this degenerates to a simple function
|
||||
// call under the hood.
|
||||
eps, err := parseStorageEndpoints([]string{"http://localhost"})
|
||||
eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse storage end point - %v", err)
|
||||
}
|
||||
@ -268,7 +268,7 @@ func TestServiceSetCreds(t *testing.T) {
|
||||
// Initialize admin peers to make admin RPC calls. Note: In a
|
||||
// single node setup, this degenerates to a simple function
|
||||
// call under the hood.
|
||||
eps, err := parseStorageEndpoints([]string{"http://localhost"})
|
||||
eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse storage end point - %v", err)
|
||||
}
|
||||
@ -356,7 +356,7 @@ func TestListLocksHandler(t *testing.T) {
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
// Initialize admin peers to make admin RPC calls.
|
||||
eps, err := parseStorageEndpoints([]string{"http://localhost"})
|
||||
eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse storage end point - %v", err)
|
||||
}
|
||||
@ -431,7 +431,7 @@ func TestClearLocksHandler(t *testing.T) {
|
||||
defer adminTestBed.TearDown()
|
||||
|
||||
// Initialize admin peers to make admin RPC calls.
|
||||
eps, err := parseStorageEndpoints([]string{"http://localhost"})
|
||||
eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse storage end point - %v", err)
|
||||
}
|
||||
|
@ -317,6 +317,7 @@ func getPeerUptimes(peers adminPeers) (time.Duration, error) {
|
||||
latestUptime := time.Duration(0)
|
||||
for _, uptime := range uptimes {
|
||||
if uptime.err != nil {
|
||||
errorIf(uptime.err, "Unable to fetch uptime")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
{
|
||||
req: &http.Request{
|
||||
URL: &url.URL{
|
||||
Host: "localhost:9000",
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
},
|
||||
@ -53,7 +53,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
{
|
||||
req: &http.Request{
|
||||
URL: &url.URL{
|
||||
Host: "localhost:9000",
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
},
|
||||
@ -68,7 +68,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
{
|
||||
req: &http.Request{
|
||||
URL: &url.URL{
|
||||
Host: "localhost:9000",
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
},
|
||||
@ -83,7 +83,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
{
|
||||
req: &http.Request{
|
||||
URL: &url.URL{
|
||||
Host: "localhost:9000",
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
RawQuery: "X-Amz-Credential=EXAMPLEINVALIDEXAMPL%2Fs3%2F20160314%2Fus-east-1",
|
||||
@ -96,7 +96,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
||||
{
|
||||
req: &http.Request{
|
||||
URL: &url.URL{
|
||||
Host: "localhost:9000",
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
},
|
||||
@ -326,11 +326,11 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
// When request is nil, internal error is returned.
|
||||
{nil, ErrInternalError},
|
||||
// When request is unsigned, access denied is returned.
|
||||
{mustNewRequest("GET", "http://localhost:9000", 0, nil, t), ErrAccessDenied},
|
||||
{mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied},
|
||||
// When request is properly signed, but has bad Content-MD5 header.
|
||||
{mustNewSignedRequest("PUT", "http://localhost:9000", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest},
|
||||
{mustNewSignedRequest("PUT", "http://127.0.0.1:9000", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest},
|
||||
// When request is properly signed, error is none.
|
||||
{mustNewSignedRequest("GET", "http://localhost:9000", 0, nil, t), ErrNone},
|
||||
{mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone},
|
||||
}
|
||||
|
||||
// Validates all testcases.
|
||||
|
@ -82,6 +82,10 @@ func TestServerConfig(t *testing.T) {
|
||||
if !reflect.DeepEqual(consoleCfg, consoleLogger{Enable: true}) {
|
||||
t.Errorf("Expecting console logger config %#v found %#v", consoleLogger{Enable: true}, consoleCfg)
|
||||
}
|
||||
// Set new console logger.
|
||||
serverConfig.Logger.SetConsole(consoleLogger{
|
||||
Enable: false,
|
||||
})
|
||||
|
||||
// Set new file logger.
|
||||
serverConfig.Logger.SetFile(fileLogger{
|
||||
@ -91,6 +95,10 @@ func TestServerConfig(t *testing.T) {
|
||||
if !reflect.DeepEqual(fileCfg, fileLogger{Enable: true}) {
|
||||
t.Errorf("Expecting file logger config %#v found %#v", fileLogger{Enable: true}, consoleCfg)
|
||||
}
|
||||
// Set new file logger.
|
||||
serverConfig.Logger.SetFile(fileLogger{
|
||||
Enable: false,
|
||||
})
|
||||
|
||||
// Match version.
|
||||
if serverConfig.GetVersion() != globalMinioConfigVersion {
|
||||
|
@ -116,8 +116,8 @@ func TestGetPath(t *testing.T) {
|
||||
{"D:\\", "d:\\"},
|
||||
{"D:", "d:"},
|
||||
{"\\", "\\"},
|
||||
{"http://localhost/d:/export", "d:/export"},
|
||||
{"https://localhost/d:/export", "d:/export"},
|
||||
{"http://127.0.0.1/d:/export", "d:/export"},
|
||||
{"https://127.0.0.1/d:/export", "d:/export"},
|
||||
}
|
||||
} else {
|
||||
testCases = []struct {
|
||||
@ -125,8 +125,8 @@ func TestGetPath(t *testing.T) {
|
||||
path string
|
||||
}{
|
||||
{"/export", "/export"},
|
||||
{"http://localhost/export", "/export"},
|
||||
{"https://localhost/export", "/export"},
|
||||
{"http://127.0.0.1/export", "/export"},
|
||||
{"https://127.0.0.1/export", "/export"},
|
||||
}
|
||||
}
|
||||
testCasesCommon := []struct {
|
||||
|
@ -26,8 +26,8 @@ const (
|
||||
Unknown BackendType = iota
|
||||
// Filesystem backend.
|
||||
FS
|
||||
// Multi disk XL (single, distributed) backend.
|
||||
XL
|
||||
// Multi disk Erasure (single, distributed) backend.
|
||||
Erasure
|
||||
// Add your own backend.
|
||||
)
|
||||
|
||||
@ -39,10 +39,10 @@ type StorageInfo struct {
|
||||
Free int64
|
||||
// Backend type.
|
||||
Backend struct {
|
||||
// Represents various backend types, currently on FS and XL.
|
||||
// Represents various backend types, currently on FS and Erasure.
|
||||
Type BackendType
|
||||
|
||||
// Following fields are only meaningful if BackendType is XL.
|
||||
// Following fields are only meaningful if BackendType is Erasure.
|
||||
OnlineDisks int // Online disks during server startup.
|
||||
OfflineDisks int // Offline disks during server startup.
|
||||
ReadQuorum int // Minimum disks required for successful read operations.
|
||||
|
@ -466,12 +466,12 @@ func serverMain(c *cli.Context) {
|
||||
globalObjectAPI = newObject
|
||||
globalObjLayerMutex.Unlock()
|
||||
|
||||
// Set startup time
|
||||
globalBootTime = time.Now().UTC()
|
||||
|
||||
// Prints the formatted startup message once object layer is initialized.
|
||||
printStartupMessage(apiEndPoints)
|
||||
|
||||
// Set uptime time after object layer has initialized.
|
||||
globalBootTime = time.Now().UTC()
|
||||
|
||||
// Waits on the server.
|
||||
<-globalServiceDoneCh
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ func TestGetListenIPs(t *testing.T) {
|
||||
port string
|
||||
shouldPass bool
|
||||
}{
|
||||
{"localhost", "9000", true},
|
||||
{"127.0.0.1", "9000", true},
|
||||
{"", "9000", true},
|
||||
{"", "", false},
|
||||
}
|
||||
@ -114,8 +114,8 @@ func TestFinalizeAPIEndpoints(t *testing.T) {
|
||||
}{
|
||||
{":80"},
|
||||
{":80"},
|
||||
{"localhost:80"},
|
||||
{"localhost:80"},
|
||||
{"127.0.0.1:80"},
|
||||
{"127.0.0.1:80"},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
@ -283,18 +283,18 @@ func TestParseStorageEndpoints(t *testing.T) {
|
||||
host string
|
||||
expectedErr error
|
||||
}{
|
||||
{"", "http://localhost/export", nil},
|
||||
{"", "http://127.0.0.1/export", nil},
|
||||
{
|
||||
"testhost",
|
||||
"http://localhost/export",
|
||||
errors.New("Invalid Argument localhost, port mandatory when --address <host>:<port> is used"),
|
||||
"http://127.0.0.1/export",
|
||||
errors.New("Invalid Argument 127.0.0.1, port mandatory when --address <host>:<port> is used"),
|
||||
},
|
||||
{
|
||||
"",
|
||||
"http://localhost:9000/export",
|
||||
errors.New("Invalid Argument localhost:9000, port configurable using --address :<port>"),
|
||||
"http://127.0.0.1:9000/export",
|
||||
errors.New("Invalid Argument 127.0.0.1:9000, port configurable using --address :<port>"),
|
||||
},
|
||||
{"testhost", "http://localhost:9000/export", nil},
|
||||
{"testhost", "http://127.0.0.1:9000/export", nil},
|
||||
}
|
||||
for i, test := range testCases {
|
||||
globalMinioHost = test.globalMinioHost
|
||||
@ -315,15 +315,15 @@ func TestCheckEndpointsSyntax(t *testing.T) {
|
||||
successCases := []string{
|
||||
"export",
|
||||
"/export",
|
||||
"http://localhost/export",
|
||||
"https://localhost/export",
|
||||
"http://127.0.0.1/export",
|
||||
"https://127.0.0.1/export",
|
||||
}
|
||||
|
||||
failureCases := []string{
|
||||
"/",
|
||||
"http://localhost",
|
||||
"http://localhost/",
|
||||
"ftp://localhost/export",
|
||||
"http://127.0.0.1",
|
||||
"http://127.0.0.1/",
|
||||
"ftp://127.0.0.1/export",
|
||||
"server:/export",
|
||||
}
|
||||
|
||||
@ -495,8 +495,8 @@ func TestIsAnyEndpointLocal(t *testing.T) {
|
||||
result: false,
|
||||
},
|
||||
{
|
||||
disks: []string{"http://localhost/mnt/disk1",
|
||||
"http://localhost/mnt/disk1"},
|
||||
disks: []string{"http://127.0.0.1/mnt/disk1",
|
||||
"http://127.0.0.1/mnt/disk1"},
|
||||
result: true,
|
||||
},
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ func getStorageInfoMsg(storageInfo StorageInfo) string {
|
||||
msg := fmt.Sprintf("%s %s Free, %s Total", colorBlue("Drive Capacity:"),
|
||||
humanize.IBytes(uint64(storageInfo.Free)),
|
||||
humanize.IBytes(uint64(storageInfo.Total)))
|
||||
if storageInfo.Backend.Type == XL {
|
||||
if storageInfo.Backend.Type == Erasure {
|
||||
diskInfo := fmt.Sprintf(" %d Online, %d Offline. ", storageInfo.Backend.OnlineDisks, storageInfo.Backend.OfflineDisks)
|
||||
if maxDiskFailures := storageInfo.Backend.ReadQuorum - storageInfo.Backend.OfflineDisks; maxDiskFailures >= 0 {
|
||||
diskInfo += fmt.Sprintf("We can withstand [%d] more drive failure(s).", maxDiskFailures)
|
||||
|
@ -38,7 +38,7 @@ func TestStorageInfoMsg(t *testing.T) {
|
||||
OfflineDisks int
|
||||
ReadQuorum int
|
||||
WriteQuorum int
|
||||
}{XL, 7, 1, 4, 5},
|
||||
}{Erasure, 7, 1, 4, 5},
|
||||
}
|
||||
|
||||
if msg := getStorageInfoMsg(infoStorage); !strings.Contains(msg, "2.0 GiB Free, 10 GiB Total") || !strings.Contains(msg, "7 Online, 1 Offline") {
|
||||
|
@ -244,7 +244,7 @@ func getStorageInfo(disks []StorageAPI) StorageInfo {
|
||||
Free: validDisksInfo[0].Free * int64(onlineDisks) / 2,
|
||||
}
|
||||
|
||||
storageInfo.Backend.Type = XL
|
||||
storageInfo.Backend.Type = Erasure
|
||||
storageInfo.Backend.OnlineDisks = onlineDisks
|
||||
storageInfo.Backend.OfflineDisks = offlineDisks
|
||||
return storageInfo
|
||||
|
@ -1,6 +1,8 @@
|
||||
## Backends
|
||||
|
||||
Minio currently implements two types of backends namely.
|
||||
Minio currently implements two types of backends.
|
||||
|
||||
- Filesystem layer (fs).
|
||||
- ErasureCode layer (XL).
|
||||
| Minio | FS | Erasure | Stability |
|
||||
|:-----------:|:----:|:----:|:---:|
|
||||
| Standalone | x | x | Stable |
|
||||
| Distributed | x | x | Stable |
|
||||
|
@ -1,5 +1,4 @@
|
||||
## Minio Server Limits Per Tenant
|
||||
We found the following APIs to be redundant or less useful outside of AWS. If you have a different view on any of the APIs we missed, please open a [github issue](https://github.com/minio/minio/issues).
|
||||
|
||||
### Erasure Code (Multiple Drives / Servers)
|
||||
|
||||
@ -31,19 +30,21 @@ We found the following APIs to be redundant or less useful outside of AWS. If yo
|
||||
|Maximum number of objects returned per list objects request| 1000|
|
||||
|Maximum number of multipart uploads returned per list multipart uploads request| 1000|
|
||||
|
||||
We found the following APIs to be redundant or less useful outside of AWS S3. If you have a different view on any of the APIs we missed, please open a [github issue](https://github.com/minio/minio/issues).
|
||||
|
||||
### List of Amazon S3 Bucket API's not supported on Minio.
|
||||
|
||||
- BucketACL (Use bucket policies instead)
|
||||
- BucketCORS (CORS enabled by default)
|
||||
- BucketLifecycle (Not required for Minio's XL backend)
|
||||
- BucketReplication (Use `mc mirror` instead)
|
||||
- BucketVersions, BucketVersioning (Use `s3git`)
|
||||
- BucketWebsite (Use `caddy` or `nginx`)
|
||||
- BucketAnalytics, BucketMetrics, BucketLogging (Use bucket notification APIs)
|
||||
- BucketACL (Use [bucket policies](http://docs.minio.io/docs/minio-client-complete-guide#policy) instead)
|
||||
- BucketCORS (CORS enabled by default on all buckets for all HTTP verbs)
|
||||
- BucketLifecycle (Not required for Minio erasure coded backend)
|
||||
- BucketReplication (Use [`mc mirror`](http://docs.minio.io/docs/minio-client-complete-guide#mirror) instead)
|
||||
- BucketVersions, BucketVersioning (Use [`s3git`](https://github.com/s3git/s3git))
|
||||
- BucketWebsite (Use [`caddy`](https://github.com/mholt/caddy) or [`nginx`](https://www.nginx.com/resources/wiki/))
|
||||
- BucketAnalytics, BucketMetrics, BucketLogging (Use [bucket notification](http://docs.minio.io/docs/minio-client-complete-guide#events) APIs)
|
||||
- BucketRequestPayment
|
||||
- BucketTagging
|
||||
|
||||
### List of Amazon S3 Object API's not supported on Minio.
|
||||
|
||||
- ObjectACL (Use bucket policies instead)
|
||||
- ObjectACL (Use [bucket policies](http://docs.minio.io/docs/minio-client-complete-guide#policy) instead)
|
||||
- ObjectTorrent
|
||||
|
@ -82,9 +82,9 @@ Fetch service status, replies disk space used, backend type and total disks offl
|
||||
|
||||
| Param | Type | Description |
|
||||
|---|---|---|
|
||||
|`backend.Type` | _BackendType_ | Type of backend used by the server currently only FS or XL. |
|
||||
|`backend.OnlineDisks`| _int_ | Total number of disks online (only applies to XL backend), is empty for FS. |
|
||||
|`backend.OfflineDisks` | _int_ | Total number of disks offline (only applies to XL backend), is empty for FS. |
|
||||
|`backend.Type` | _BackendType_ | Type of backend used by the server currently only FS or Erasure. |
|
||||
|`backend.OnlineDisks`| _int_ | Total number of disks online (only applies to Erasure backend), is empty for FS. |
|
||||
|`backend.OfflineDisks` | _int_ | Total number of disks offline (only applies to Erasure backend), is empty for FS. |
|
||||
|`backend.ReadQuorum` | _int_ | Current total read quorum threshold before reads will be unavailable, is empty for FS. |
|
||||
|`backend.WriteQuorum` | _int_ | Current total write quorum threshold before writes will be unavailable, is empty for FS. |
|
||||
|
||||
|
@ -36,8 +36,8 @@ const (
|
||||
Unknown BackendType = iota
|
||||
// Filesystem backend.
|
||||
FS
|
||||
// Multi disk XL (single, distributed) backend.
|
||||
XL
|
||||
// Multi disk Erasure (single, distributed) backend.
|
||||
Erasure
|
||||
|
||||
// Add your own backend.
|
||||
)
|
||||
@ -50,10 +50,10 @@ type StorageInfo struct {
|
||||
Free int64
|
||||
// Backend type.
|
||||
Backend struct {
|
||||
// Represents various backend types, currently on FS and XL.
|
||||
// Represents various backend types, currently on FS and Erasure.
|
||||
Type BackendType
|
||||
|
||||
// Following fields are only meaningful if BackendType is XL.
|
||||
// Following fields are only meaningful if BackendType is Erasure.
|
||||
OnlineDisks int // Online disks during server startup.
|
||||
OfflineDisks int // Offline disks during server startup.
|
||||
ReadQuorum int // Minimum disks required for successful read operations.
|
||||
|
Loading…
Reference in New Issue
Block a user