mirror of
https://github.com/minio/minio.git
synced 2024-12-24 22:25:54 -05:00
parent
1ad96ee09f
commit
9df01035da
@ -202,7 +202,7 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
|
|||||||
// Initialize admin peers to make admin RPC calls. Note: In a
|
// Initialize admin peers to make admin RPC calls. Note: In a
|
||||||
// single node setup, this degenerates to a simple function
|
// single node setup, this degenerates to a simple function
|
||||||
// call under the hood.
|
// call under the hood.
|
||||||
eps, err := parseStorageEndpoints([]string{"http://localhost"})
|
eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to parse storage end point - %v", err)
|
t.Fatalf("Failed to parse storage end point - %v", err)
|
||||||
}
|
}
|
||||||
@ -268,7 +268,7 @@ func TestServiceSetCreds(t *testing.T) {
|
|||||||
// Initialize admin peers to make admin RPC calls. Note: In a
|
// Initialize admin peers to make admin RPC calls. Note: In a
|
||||||
// single node setup, this degenerates to a simple function
|
// single node setup, this degenerates to a simple function
|
||||||
// call under the hood.
|
// call under the hood.
|
||||||
eps, err := parseStorageEndpoints([]string{"http://localhost"})
|
eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to parse storage end point - %v", err)
|
t.Fatalf("Failed to parse storage end point - %v", err)
|
||||||
}
|
}
|
||||||
@ -356,7 +356,7 @@ func TestListLocksHandler(t *testing.T) {
|
|||||||
defer adminTestBed.TearDown()
|
defer adminTestBed.TearDown()
|
||||||
|
|
||||||
// Initialize admin peers to make admin RPC calls.
|
// Initialize admin peers to make admin RPC calls.
|
||||||
eps, err := parseStorageEndpoints([]string{"http://localhost"})
|
eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to parse storage end point - %v", err)
|
t.Fatalf("Failed to parse storage end point - %v", err)
|
||||||
}
|
}
|
||||||
@ -431,7 +431,7 @@ func TestClearLocksHandler(t *testing.T) {
|
|||||||
defer adminTestBed.TearDown()
|
defer adminTestBed.TearDown()
|
||||||
|
|
||||||
// Initialize admin peers to make admin RPC calls.
|
// Initialize admin peers to make admin RPC calls.
|
||||||
eps, err := parseStorageEndpoints([]string{"http://localhost"})
|
eps, err := parseStorageEndpoints([]string{"http://127.0.0.1"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to parse storage end point - %v", err)
|
t.Fatalf("Failed to parse storage end point - %v", err)
|
||||||
}
|
}
|
||||||
|
@ -317,6 +317,7 @@ func getPeerUptimes(peers adminPeers) (time.Duration, error) {
|
|||||||
latestUptime := time.Duration(0)
|
latestUptime := time.Duration(0)
|
||||||
for _, uptime := range uptimes {
|
for _, uptime := range uptimes {
|
||||||
if uptime.err != nil {
|
if uptime.err != nil {
|
||||||
|
errorIf(uptime.err, "Unable to fetch uptime")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
|||||||
{
|
{
|
||||||
req: &http.Request{
|
req: &http.Request{
|
||||||
URL: &url.URL{
|
URL: &url.URL{
|
||||||
Host: "localhost:9000",
|
Host: "127.0.0.1:9000",
|
||||||
Scheme: httpScheme,
|
Scheme: httpScheme,
|
||||||
Path: "/",
|
Path: "/",
|
||||||
},
|
},
|
||||||
@ -53,7 +53,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
|||||||
{
|
{
|
||||||
req: &http.Request{
|
req: &http.Request{
|
||||||
URL: &url.URL{
|
URL: &url.URL{
|
||||||
Host: "localhost:9000",
|
Host: "127.0.0.1:9000",
|
||||||
Scheme: httpScheme,
|
Scheme: httpScheme,
|
||||||
Path: "/",
|
Path: "/",
|
||||||
},
|
},
|
||||||
@ -68,7 +68,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
|||||||
{
|
{
|
||||||
req: &http.Request{
|
req: &http.Request{
|
||||||
URL: &url.URL{
|
URL: &url.URL{
|
||||||
Host: "localhost:9000",
|
Host: "127.0.0.1:9000",
|
||||||
Scheme: httpScheme,
|
Scheme: httpScheme,
|
||||||
Path: "/",
|
Path: "/",
|
||||||
},
|
},
|
||||||
@ -83,7 +83,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
|||||||
{
|
{
|
||||||
req: &http.Request{
|
req: &http.Request{
|
||||||
URL: &url.URL{
|
URL: &url.URL{
|
||||||
Host: "localhost:9000",
|
Host: "127.0.0.1:9000",
|
||||||
Scheme: httpScheme,
|
Scheme: httpScheme,
|
||||||
Path: "/",
|
Path: "/",
|
||||||
RawQuery: "X-Amz-Credential=EXAMPLEINVALIDEXAMPL%2Fs3%2F20160314%2Fus-east-1",
|
RawQuery: "X-Amz-Credential=EXAMPLEINVALIDEXAMPL%2Fs3%2F20160314%2Fus-east-1",
|
||||||
@ -96,7 +96,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
|||||||
{
|
{
|
||||||
req: &http.Request{
|
req: &http.Request{
|
||||||
URL: &url.URL{
|
URL: &url.URL{
|
||||||
Host: "localhost:9000",
|
Host: "127.0.0.1:9000",
|
||||||
Scheme: httpScheme,
|
Scheme: httpScheme,
|
||||||
Path: "/",
|
Path: "/",
|
||||||
},
|
},
|
||||||
@ -326,11 +326,11 @@ func TestIsReqAuthenticated(t *testing.T) {
|
|||||||
// When request is nil, internal error is returned.
|
// When request is nil, internal error is returned.
|
||||||
{nil, ErrInternalError},
|
{nil, ErrInternalError},
|
||||||
// When request is unsigned, access denied is returned.
|
// When request is unsigned, access denied is returned.
|
||||||
{mustNewRequest("GET", "http://localhost:9000", 0, nil, t), ErrAccessDenied},
|
{mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied},
|
||||||
// When request is properly signed, but has bad Content-MD5 header.
|
// When request is properly signed, but has bad Content-MD5 header.
|
||||||
{mustNewSignedRequest("PUT", "http://localhost:9000", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest},
|
{mustNewSignedRequest("PUT", "http://127.0.0.1:9000", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest},
|
||||||
// When request is properly signed, error is none.
|
// When request is properly signed, error is none.
|
||||||
{mustNewSignedRequest("GET", "http://localhost:9000", 0, nil, t), ErrNone},
|
{mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validates all testcases.
|
// Validates all testcases.
|
||||||
|
@ -82,6 +82,10 @@ func TestServerConfig(t *testing.T) {
|
|||||||
if !reflect.DeepEqual(consoleCfg, consoleLogger{Enable: true}) {
|
if !reflect.DeepEqual(consoleCfg, consoleLogger{Enable: true}) {
|
||||||
t.Errorf("Expecting console logger config %#v found %#v", consoleLogger{Enable: true}, consoleCfg)
|
t.Errorf("Expecting console logger config %#v found %#v", consoleLogger{Enable: true}, consoleCfg)
|
||||||
}
|
}
|
||||||
|
// Set new console logger.
|
||||||
|
serverConfig.Logger.SetConsole(consoleLogger{
|
||||||
|
Enable: false,
|
||||||
|
})
|
||||||
|
|
||||||
// Set new file logger.
|
// Set new file logger.
|
||||||
serverConfig.Logger.SetFile(fileLogger{
|
serverConfig.Logger.SetFile(fileLogger{
|
||||||
@ -91,6 +95,10 @@ func TestServerConfig(t *testing.T) {
|
|||||||
if !reflect.DeepEqual(fileCfg, fileLogger{Enable: true}) {
|
if !reflect.DeepEqual(fileCfg, fileLogger{Enable: true}) {
|
||||||
t.Errorf("Expecting file logger config %#v found %#v", fileLogger{Enable: true}, consoleCfg)
|
t.Errorf("Expecting file logger config %#v found %#v", fileLogger{Enable: true}, consoleCfg)
|
||||||
}
|
}
|
||||||
|
// Set new file logger.
|
||||||
|
serverConfig.Logger.SetFile(fileLogger{
|
||||||
|
Enable: false,
|
||||||
|
})
|
||||||
|
|
||||||
// Match version.
|
// Match version.
|
||||||
if serverConfig.GetVersion() != globalMinioConfigVersion {
|
if serverConfig.GetVersion() != globalMinioConfigVersion {
|
||||||
|
@ -116,8 +116,8 @@ func TestGetPath(t *testing.T) {
|
|||||||
{"D:\\", "d:\\"},
|
{"D:\\", "d:\\"},
|
||||||
{"D:", "d:"},
|
{"D:", "d:"},
|
||||||
{"\\", "\\"},
|
{"\\", "\\"},
|
||||||
{"http://localhost/d:/export", "d:/export"},
|
{"http://127.0.0.1/d:/export", "d:/export"},
|
||||||
{"https://localhost/d:/export", "d:/export"},
|
{"https://127.0.0.1/d:/export", "d:/export"},
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
testCases = []struct {
|
testCases = []struct {
|
||||||
@ -125,8 +125,8 @@ func TestGetPath(t *testing.T) {
|
|||||||
path string
|
path string
|
||||||
}{
|
}{
|
||||||
{"/export", "/export"},
|
{"/export", "/export"},
|
||||||
{"http://localhost/export", "/export"},
|
{"http://127.0.0.1/export", "/export"},
|
||||||
{"https://localhost/export", "/export"},
|
{"https://127.0.0.1/export", "/export"},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
testCasesCommon := []struct {
|
testCasesCommon := []struct {
|
||||||
|
@ -26,8 +26,8 @@ const (
|
|||||||
Unknown BackendType = iota
|
Unknown BackendType = iota
|
||||||
// Filesystem backend.
|
// Filesystem backend.
|
||||||
FS
|
FS
|
||||||
// Multi disk XL (single, distributed) backend.
|
// Multi disk Erasure (single, distributed) backend.
|
||||||
XL
|
Erasure
|
||||||
// Add your own backend.
|
// Add your own backend.
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -39,10 +39,10 @@ type StorageInfo struct {
|
|||||||
Free int64
|
Free int64
|
||||||
// Backend type.
|
// Backend type.
|
||||||
Backend struct {
|
Backend struct {
|
||||||
// Represents various backend types, currently on FS and XL.
|
// Represents various backend types, currently on FS and Erasure.
|
||||||
Type BackendType
|
Type BackendType
|
||||||
|
|
||||||
// Following fields are only meaningful if BackendType is XL.
|
// Following fields are only meaningful if BackendType is Erasure.
|
||||||
OnlineDisks int // Online disks during server startup.
|
OnlineDisks int // Online disks during server startup.
|
||||||
OfflineDisks int // Offline disks during server startup.
|
OfflineDisks int // Offline disks during server startup.
|
||||||
ReadQuorum int // Minimum disks required for successful read operations.
|
ReadQuorum int // Minimum disks required for successful read operations.
|
||||||
|
@ -466,12 +466,12 @@ func serverMain(c *cli.Context) {
|
|||||||
globalObjectAPI = newObject
|
globalObjectAPI = newObject
|
||||||
globalObjLayerMutex.Unlock()
|
globalObjLayerMutex.Unlock()
|
||||||
|
|
||||||
// Set startup time
|
|
||||||
globalBootTime = time.Now().UTC()
|
|
||||||
|
|
||||||
// Prints the formatted startup message once object layer is initialized.
|
// Prints the formatted startup message once object layer is initialized.
|
||||||
printStartupMessage(apiEndPoints)
|
printStartupMessage(apiEndPoints)
|
||||||
|
|
||||||
|
// Set uptime time after object layer has initialized.
|
||||||
|
globalBootTime = time.Now().UTC()
|
||||||
|
|
||||||
// Waits on the server.
|
// Waits on the server.
|
||||||
<-globalServiceDoneCh
|
<-globalServiceDoneCh
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,7 @@ func TestGetListenIPs(t *testing.T) {
|
|||||||
port string
|
port string
|
||||||
shouldPass bool
|
shouldPass bool
|
||||||
}{
|
}{
|
||||||
{"localhost", "9000", true},
|
{"127.0.0.1", "9000", true},
|
||||||
{"", "9000", true},
|
{"", "9000", true},
|
||||||
{"", "", false},
|
{"", "", false},
|
||||||
}
|
}
|
||||||
@ -114,8 +114,8 @@ func TestFinalizeAPIEndpoints(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{":80"},
|
{":80"},
|
||||||
{":80"},
|
{":80"},
|
||||||
{"localhost:80"},
|
{"127.0.0.1:80"},
|
||||||
{"localhost:80"},
|
{"127.0.0.1:80"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, test := range testCases {
|
for i, test := range testCases {
|
||||||
@ -283,18 +283,18 @@ func TestParseStorageEndpoints(t *testing.T) {
|
|||||||
host string
|
host string
|
||||||
expectedErr error
|
expectedErr error
|
||||||
}{
|
}{
|
||||||
{"", "http://localhost/export", nil},
|
{"", "http://127.0.0.1/export", nil},
|
||||||
{
|
{
|
||||||
"testhost",
|
"testhost",
|
||||||
"http://localhost/export",
|
"http://127.0.0.1/export",
|
||||||
errors.New("Invalid Argument localhost, port mandatory when --address <host>:<port> is used"),
|
errors.New("Invalid Argument 127.0.0.1, port mandatory when --address <host>:<port> is used"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"",
|
"",
|
||||||
"http://localhost:9000/export",
|
"http://127.0.0.1:9000/export",
|
||||||
errors.New("Invalid Argument localhost:9000, port configurable using --address :<port>"),
|
errors.New("Invalid Argument 127.0.0.1:9000, port configurable using --address :<port>"),
|
||||||
},
|
},
|
||||||
{"testhost", "http://localhost:9000/export", nil},
|
{"testhost", "http://127.0.0.1:9000/export", nil},
|
||||||
}
|
}
|
||||||
for i, test := range testCases {
|
for i, test := range testCases {
|
||||||
globalMinioHost = test.globalMinioHost
|
globalMinioHost = test.globalMinioHost
|
||||||
@ -315,15 +315,15 @@ func TestCheckEndpointsSyntax(t *testing.T) {
|
|||||||
successCases := []string{
|
successCases := []string{
|
||||||
"export",
|
"export",
|
||||||
"/export",
|
"/export",
|
||||||
"http://localhost/export",
|
"http://127.0.0.1/export",
|
||||||
"https://localhost/export",
|
"https://127.0.0.1/export",
|
||||||
}
|
}
|
||||||
|
|
||||||
failureCases := []string{
|
failureCases := []string{
|
||||||
"/",
|
"/",
|
||||||
"http://localhost",
|
"http://127.0.0.1",
|
||||||
"http://localhost/",
|
"http://127.0.0.1/",
|
||||||
"ftp://localhost/export",
|
"ftp://127.0.0.1/export",
|
||||||
"server:/export",
|
"server:/export",
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -495,8 +495,8 @@ func TestIsAnyEndpointLocal(t *testing.T) {
|
|||||||
result: false,
|
result: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
disks: []string{"http://localhost/mnt/disk1",
|
disks: []string{"http://127.0.0.1/mnt/disk1",
|
||||||
"http://localhost/mnt/disk1"},
|
"http://127.0.0.1/mnt/disk1"},
|
||||||
result: true,
|
result: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -141,7 +141,7 @@ func getStorageInfoMsg(storageInfo StorageInfo) string {
|
|||||||
msg := fmt.Sprintf("%s %s Free, %s Total", colorBlue("Drive Capacity:"),
|
msg := fmt.Sprintf("%s %s Free, %s Total", colorBlue("Drive Capacity:"),
|
||||||
humanize.IBytes(uint64(storageInfo.Free)),
|
humanize.IBytes(uint64(storageInfo.Free)),
|
||||||
humanize.IBytes(uint64(storageInfo.Total)))
|
humanize.IBytes(uint64(storageInfo.Total)))
|
||||||
if storageInfo.Backend.Type == XL {
|
if storageInfo.Backend.Type == Erasure {
|
||||||
diskInfo := fmt.Sprintf(" %d Online, %d Offline. ", storageInfo.Backend.OnlineDisks, storageInfo.Backend.OfflineDisks)
|
diskInfo := fmt.Sprintf(" %d Online, %d Offline. ", storageInfo.Backend.OnlineDisks, storageInfo.Backend.OfflineDisks)
|
||||||
if maxDiskFailures := storageInfo.Backend.ReadQuorum - storageInfo.Backend.OfflineDisks; maxDiskFailures >= 0 {
|
if maxDiskFailures := storageInfo.Backend.ReadQuorum - storageInfo.Backend.OfflineDisks; maxDiskFailures >= 0 {
|
||||||
diskInfo += fmt.Sprintf("We can withstand [%d] more drive failure(s).", maxDiskFailures)
|
diskInfo += fmt.Sprintf("We can withstand [%d] more drive failure(s).", maxDiskFailures)
|
||||||
|
@ -38,7 +38,7 @@ func TestStorageInfoMsg(t *testing.T) {
|
|||||||
OfflineDisks int
|
OfflineDisks int
|
||||||
ReadQuorum int
|
ReadQuorum int
|
||||||
WriteQuorum int
|
WriteQuorum int
|
||||||
}{XL, 7, 1, 4, 5},
|
}{Erasure, 7, 1, 4, 5},
|
||||||
}
|
}
|
||||||
|
|
||||||
if msg := getStorageInfoMsg(infoStorage); !strings.Contains(msg, "2.0 GiB Free, 10 GiB Total") || !strings.Contains(msg, "7 Online, 1 Offline") {
|
if msg := getStorageInfoMsg(infoStorage); !strings.Contains(msg, "2.0 GiB Free, 10 GiB Total") || !strings.Contains(msg, "7 Online, 1 Offline") {
|
||||||
|
@ -244,7 +244,7 @@ func getStorageInfo(disks []StorageAPI) StorageInfo {
|
|||||||
Free: validDisksInfo[0].Free * int64(onlineDisks) / 2,
|
Free: validDisksInfo[0].Free * int64(onlineDisks) / 2,
|
||||||
}
|
}
|
||||||
|
|
||||||
storageInfo.Backend.Type = XL
|
storageInfo.Backend.Type = Erasure
|
||||||
storageInfo.Backend.OnlineDisks = onlineDisks
|
storageInfo.Backend.OnlineDisks = onlineDisks
|
||||||
storageInfo.Backend.OfflineDisks = offlineDisks
|
storageInfo.Backend.OfflineDisks = offlineDisks
|
||||||
return storageInfo
|
return storageInfo
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
## Backends
|
## Backends
|
||||||
|
|
||||||
Minio currently implements two types of backends namely.
|
Minio currently implements two types of backends.
|
||||||
|
|
||||||
- Filesystem layer (fs).
|
| Minio | FS | Erasure | Stability |
|
||||||
- ErasureCode layer (XL).
|
|:-----------:|:----:|:----:|:---:|
|
||||||
|
| Standalone | x | x | Stable |
|
||||||
|
| Distributed | x | x | Stable |
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
## Minio Server Limits Per Tenant
|
## Minio Server Limits Per Tenant
|
||||||
We found the following APIs to be redundant or less useful outside of AWS. If you have a different view on any of the APIs we missed, please open a [github issue](https://github.com/minio/minio/issues).
|
|
||||||
|
|
||||||
### Erasure Code (Multiple Drives / Servers)
|
### Erasure Code (Multiple Drives / Servers)
|
||||||
|
|
||||||
@ -31,19 +30,21 @@ We found the following APIs to be redundant or less useful outside of AWS. If yo
|
|||||||
|Maximum number of objects returned per list objects request| 1000|
|
|Maximum number of objects returned per list objects request| 1000|
|
||||||
|Maximum number of multipart uploads returned per list multipart uploads request| 1000|
|
|Maximum number of multipart uploads returned per list multipart uploads request| 1000|
|
||||||
|
|
||||||
|
We found the following APIs to be redundant or less useful outside of AWS S3. If you have a different view on any of the APIs we missed, please open a [github issue](https://github.com/minio/minio/issues).
|
||||||
|
|
||||||
### List of Amazon S3 Bucket API's not supported on Minio.
|
### List of Amazon S3 Bucket API's not supported on Minio.
|
||||||
|
|
||||||
- BucketACL (Use bucket policies instead)
|
- BucketACL (Use [bucket policies](http://docs.minio.io/docs/minio-client-complete-guide#policy) instead)
|
||||||
- BucketCORS (CORS enabled by default)
|
- BucketCORS (CORS enabled by default on all buckets for all HTTP verbs)
|
||||||
- BucketLifecycle (Not required for Minio's XL backend)
|
- BucketLifecycle (Not required for Minio erasure coded backend)
|
||||||
- BucketReplication (Use `mc mirror` instead)
|
- BucketReplication (Use [`mc mirror`](http://docs.minio.io/docs/minio-client-complete-guide#mirror) instead)
|
||||||
- BucketVersions, BucketVersioning (Use `s3git`)
|
- BucketVersions, BucketVersioning (Use [`s3git`](https://github.com/s3git/s3git))
|
||||||
- BucketWebsite (Use `caddy` or `nginx`)
|
- BucketWebsite (Use [`caddy`](https://github.com/mholt/caddy) or [`nginx`](https://www.nginx.com/resources/wiki/))
|
||||||
- BucketAnalytics, BucketMetrics, BucketLogging (Use bucket notification APIs)
|
- BucketAnalytics, BucketMetrics, BucketLogging (Use [bucket notification](http://docs.minio.io/docs/minio-client-complete-guide#events) APIs)
|
||||||
- BucketRequestPayment
|
- BucketRequestPayment
|
||||||
- BucketTagging
|
- BucketTagging
|
||||||
|
|
||||||
### List of Amazon S3 Object API's not supported on Minio.
|
### List of Amazon S3 Object API's not supported on Minio.
|
||||||
|
|
||||||
- ObjectACL (Use bucket policies instead)
|
- ObjectACL (Use [bucket policies](http://docs.minio.io/docs/minio-client-complete-guide#policy) instead)
|
||||||
- ObjectTorrent
|
- ObjectTorrent
|
||||||
|
@ -82,9 +82,9 @@ Fetch service status, replies disk space used, backend type and total disks offl
|
|||||||
|
|
||||||
| Param | Type | Description |
|
| Param | Type | Description |
|
||||||
|---|---|---|
|
|---|---|---|
|
||||||
|`backend.Type` | _BackendType_ | Type of backend used by the server currently only FS or XL. |
|
|`backend.Type` | _BackendType_ | Type of backend used by the server currently only FS or Erasure. |
|
||||||
|`backend.OnlineDisks`| _int_ | Total number of disks online (only applies to XL backend), is empty for FS. |
|
|`backend.OnlineDisks`| _int_ | Total number of disks online (only applies to Erasure backend), is empty for FS. |
|
||||||
|`backend.OfflineDisks` | _int_ | Total number of disks offline (only applies to XL backend), is empty for FS. |
|
|`backend.OfflineDisks` | _int_ | Total number of disks offline (only applies to Erasure backend), is empty for FS. |
|
||||||
|`backend.ReadQuorum` | _int_ | Current total read quorum threshold before reads will be unavailable, is empty for FS. |
|
|`backend.ReadQuorum` | _int_ | Current total read quorum threshold before reads will be unavailable, is empty for FS. |
|
||||||
|`backend.WriteQuorum` | _int_ | Current total write quorum threshold before writes will be unavailable, is empty for FS. |
|
|`backend.WriteQuorum` | _int_ | Current total write quorum threshold before writes will be unavailable, is empty for FS. |
|
||||||
|
|
||||||
|
@ -36,8 +36,8 @@ const (
|
|||||||
Unknown BackendType = iota
|
Unknown BackendType = iota
|
||||||
// Filesystem backend.
|
// Filesystem backend.
|
||||||
FS
|
FS
|
||||||
// Multi disk XL (single, distributed) backend.
|
// Multi disk Erasure (single, distributed) backend.
|
||||||
XL
|
Erasure
|
||||||
|
|
||||||
// Add your own backend.
|
// Add your own backend.
|
||||||
)
|
)
|
||||||
@ -50,10 +50,10 @@ type StorageInfo struct {
|
|||||||
Free int64
|
Free int64
|
||||||
// Backend type.
|
// Backend type.
|
||||||
Backend struct {
|
Backend struct {
|
||||||
// Represents various backend types, currently on FS and XL.
|
// Represents various backend types, currently on FS and Erasure.
|
||||||
Type BackendType
|
Type BackendType
|
||||||
|
|
||||||
// Following fields are only meaningful if BackendType is XL.
|
// Following fields are only meaningful if BackendType is Erasure.
|
||||||
OnlineDisks int // Online disks during server startup.
|
OnlineDisks int // Online disks during server startup.
|
||||||
OfflineDisks int // Offline disks during server startup.
|
OfflineDisks int // Offline disks during server startup.
|
||||||
ReadQuorum int // Minimum disks required for successful read operations.
|
ReadQuorum int // Minimum disks required for successful read operations.
|
||||||
|
Loading…
Reference in New Issue
Block a user