mirror of https://github.com/minio/minio.git
update grafana dashboard with disk -> drive rename (#17857)
This commit is contained in:
parent
21f0d6b549
commit
8a9b886011
|
@ -283,7 +283,7 @@ KEY:
|
||||||
heal manage object healing frequency and bitrot verification checks
|
heal manage object healing frequency and bitrot verification checks
|
||||||
|
|
||||||
ARGS:
|
ARGS:
|
||||||
bitrotscan (on|off) perform bitrot scan on disks when checking objects during scanner
|
bitrotscan (on|off) perform bitrot scan on drives when checking objects during scanner
|
||||||
max_sleep (duration) maximum sleep duration between objects to slow down heal operation. eg. 2s
|
max_sleep (duration) maximum sleep duration between objects to slow down heal operation. eg. 2s
|
||||||
max_io (int) maximum IO requests allowed between objects to slow down heal operation. eg. 3
|
max_io (int) maximum IO requests allowed between objects to slow down heal operation. eg. 3
|
||||||
```
|
```
|
||||||
|
|
|
@ -23,13 +23,13 @@ DIR:
|
||||||
|
|
||||||
## Common usage
|
## Common usage
|
||||||
|
|
||||||
Standalone erasure coded configuration with 4 sets with 16 disks each.
|
Standalone erasure coded configuration with 4 sets with 16 drives each.
|
||||||
|
|
||||||
```
|
```
|
||||||
minio server dir{1...64}
|
minio server dir{1...64}
|
||||||
```
|
```
|
||||||
|
|
||||||
Distributed erasure coded configuration with 64 sets with 16 disks each.
|
Distributed erasure coded configuration with 64 sets with 16 drives each.
|
||||||
|
|
||||||
```
|
```
|
||||||
minio server http://host{1...16}/export{1...64}
|
minio server http://host{1...16}/export{1...64}
|
||||||
|
@ -41,17 +41,17 @@ Expansion of ellipses and choice of erasure sets based on this expansion is an a
|
||||||
|
|
||||||
- Erasure coding used by MinIO is [Reed-Solomon](https://github.com/klauspost/reedsolomon) erasure coding scheme, which has a total shard maximum of 256 i.e 128 data and 128 parity. MinIO design goes beyond this limitation by doing some practical architecture choices.
|
- Erasure coding used by MinIO is [Reed-Solomon](https://github.com/klauspost/reedsolomon) erasure coding scheme, which has a total shard maximum of 256 i.e 128 data and 128 parity. MinIO design goes beyond this limitation by doing some practical architecture choices.
|
||||||
|
|
||||||
- Erasure set is a single erasure coding unit within a MinIO deployment. An object is sharded within an erasure set. Erasure set size is automatically calculated based on the number of disks. MinIO supports unlimited number of disks but each erasure set can be upto 16 disks and a minimum of 2 disks.
|
- Erasure set is a single erasure coding unit within a MinIO deployment. An object is sharded within an erasure set. Erasure set size is automatically calculated based on the number of drives. MinIO supports unlimited number of drives but each erasure set can be upto 16 drives and a minimum of 2 drives.
|
||||||
|
|
||||||
- We limited the number of drives to 16 for erasure set because, erasure code shards more than 16 can become chatty and do not have any performance advantages. Additionally since 16 drive erasure set gives you tolerance of 8 disks per object by default which is plenty in any practical scenario.
|
- We limited the number of drives to 16 for erasure set because, erasure code shards more than 16 can become chatty and do not have any performance advantages. Additionally since 16 drive erasure set gives you tolerance of 8 drives per object by default which is plenty in any practical scenario.
|
||||||
|
|
||||||
- Choice of erasure set size is automatic based on the number of disks available, let's say for example if there are 32 servers and 32 disks which is a total of 1024 disks. In this scenario 16 becomes the erasure set size. This is decided based on the greatest common divisor (GCD) of acceptable erasure set sizes ranging from *4 to 16*.
|
- Choice of erasure set size is automatic based on the number of drives available, let's say for example if there are 32 servers and 32 drives which is a total of 1024 drives. In this scenario 16 becomes the erasure set size. This is decided based on the greatest common divisor (GCD) of acceptable erasure set sizes ranging from *4 to 16*.
|
||||||
|
|
||||||
- *If total disks has many common divisors the algorithm chooses the minimum amounts of erasure sets possible for a erasure set size of any N*. In the example with 1024 disks - 4, 8, 16 are GCD factors. With 16 disks we get a total of 64 possible sets, with 8 disks we get a total of 128 possible sets, with 4 disks we get a total of 256 possible sets. So algorithm automatically chooses 64 sets, which is *16* 64 = 1024* disks in total.
|
- *If total drives has many common divisors the algorithm chooses the minimum amounts of erasure sets possible for a erasure set size of any N*. In the example with 1024 drives - 4, 8, 16 are GCD factors. With 16 drives we get a total of 64 possible sets, with 8 drives we get a total of 128 possible sets, with 4 drives we get a total of 256 possible sets. So algorithm automatically chooses 64 sets, which is *16* 64 = 1024* drives in total.
|
||||||
|
|
||||||
- *If total number of nodes are of odd number then GCD algorithm provides affinity towards odd number erasure sets to provide for uniform distribution across nodes*. This is to ensure that same number of disks are pariticipating in any erasure set. For example if you have 2 nodes with 180 drives then GCD is 15 but this would lead to uneven distribution, one of the nodes would participate more drives. To avoid this the affinity is given towards nodes which leads to next best GCD factor of 12 which provides uniform distribution.
|
- *If total number of nodes are of odd number then GCD algorithm provides affinity towards odd number erasure sets to provide for uniform distribution across nodes*. This is to ensure that same number of drives are pariticipating in any erasure set. For example if you have 2 nodes with 180 drives then GCD is 15 but this would lead to uneven distribution, one of the nodes would participate more drives. To avoid this the affinity is given towards nodes which leads to next best GCD factor of 12 which provides uniform distribution.
|
||||||
|
|
||||||
- In this algorithm, we also make sure that we spread the disks out evenly. MinIO server expands ellipses passed as arguments. Here is a sample expansion to demonstrate the process.
|
- In this algorithm, we also make sure that we spread the drives out evenly. MinIO server expands ellipses passed as arguments. Here is a sample expansion to demonstrate the process.
|
||||||
|
|
||||||
```
|
```
|
||||||
minio server http://host{1...2}/export{1...8}
|
minio server http://host{1...2}/export{1...8}
|
||||||
|
@ -141,25 +141,25 @@ func getAvailablePoolIdx(ctx context.Context) int {
|
||||||
|
|
||||||
### Advanced use cases with multiple ellipses
|
### Advanced use cases with multiple ellipses
|
||||||
|
|
||||||
Standalone erasure coded configuration with 4 sets with 16 disks each, which spawns disks across controllers.
|
Standalone erasure coded configuration with 4 sets with 16 drives each, which spawns drives across controllers.
|
||||||
|
|
||||||
```
|
```
|
||||||
minio server /mnt/controller{1...4}/data{1...16}
|
minio server /mnt/controller{1...4}/data{1...16}
|
||||||
```
|
```
|
||||||
|
|
||||||
Standalone erasure coded configuration with 16 sets, 16 disks per set, across mounts and controllers.
|
Standalone erasure coded configuration with 16 sets, 16 drives per set, across mounts and controllers.
|
||||||
|
|
||||||
```
|
```
|
||||||
minio server /mnt{1...4}/controller{1...4}/data{1...16}
|
minio server /mnt{1...4}/controller{1...4}/data{1...16}
|
||||||
```
|
```
|
||||||
|
|
||||||
Distributed erasure coded configuration with 2 sets, 16 disks per set across hosts.
|
Distributed erasure coded configuration with 2 sets, 16 drives per set across hosts.
|
||||||
|
|
||||||
```
|
```
|
||||||
minio server http://host{1...32}/disk1
|
minio server http://host{1...32}/disk1
|
||||||
```
|
```
|
||||||
|
|
||||||
Distributed erasure coded configuration with rack level redundancy 32 sets in total, 16 disks per set.
|
Distributed erasure coded configuration with rack level redundancy 32 sets in total, 16 drives per set.
|
||||||
|
|
||||||
```
|
```
|
||||||
minio server http://rack{1...4}-host{1...8}.example.net/export{1...16}
|
minio server http://rack{1...4}-host{1...8}.example.net/export{1...16}
|
||||||
|
|
|
@ -8,15 +8,15 @@ MinIO in distributed mode can help you setup a highly-available storage system w
|
||||||
|
|
||||||
### Data protection
|
### Data protection
|
||||||
|
|
||||||
Distributed MinIO provides protection against multiple node/drive failures and [bit rot](https://github.com/minio/minio/blob/master/docs/erasure/README.md#what-is-bit-rot-protection) using [erasure code](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html). As the minimum disks required for distributed MinIO is 2 (same as minimum disks required for erasure coding), erasure code automatically kicks in as you launch distributed MinIO.
|
Distributed MinIO provides protection against multiple node/drive failures and [bit rot](https://github.com/minio/minio/blob/master/docs/erasure/README.md#what-is-bit-rot-protection) using [erasure code](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html). As the minimum drives required for distributed MinIO is 2 (same as minimum drives required for erasure coding), erasure code automatically kicks in as you launch distributed MinIO.
|
||||||
|
|
||||||
If one or more disks are offline at the start of a PutObject or NewMultipartUpload operation the object will have additional data protection bits added automatically to provide additional safety for these objects.
|
If one or more drives are offline at the start of a PutObject or NewMultipartUpload operation the object will have additional data protection bits added automatically to provide additional safety for these objects.
|
||||||
|
|
||||||
### High availability
|
### High availability
|
||||||
|
|
||||||
A stand-alone MinIO server would go down if the server hosting the disks goes offline. In contrast, a distributed MinIO setup with _m_ servers and _n_ disks will have your data safe as long as _m/2_ servers or _m*n_/2 or more disks are online.
|
A stand-alone MinIO server would go down if the server hosting the drives goes offline. In contrast, a distributed MinIO setup with _m_ servers and _n_ drives will have your data safe as long as _m/2_ servers or _m*n_/2 or more drives are online.
|
||||||
|
|
||||||
For example, an 16-server distributed setup with 200 disks per node would continue serving files, up to 4 servers can be offline in default configuration i.e around 800 disks down MinIO would continue to read and write objects.
|
For example, an 16-server distributed setup with 200 drives per node would continue serving files, up to 4 servers can be offline in default configuration i.e around 800 drives down MinIO would continue to read and write objects.
|
||||||
|
|
||||||
Refer to sizing guide for more understanding on default values chosen depending on your erasure stripe size [here](https://github.com/minio/minio/blob/master/docs/distributed/SIZING.md). Parity settings can be changed using [storage classes](https://github.com/minio/minio/tree/master/docs/erasure/storage-class).
|
Refer to sizing guide for more understanding on default values chosen depending on your erasure stripe size [here](https://github.com/minio/minio/blob/master/docs/distributed/SIZING.md). Parity settings can be changed using [storage classes](https://github.com/minio/minio/tree/master/docs/erasure/storage-class).
|
||||||
|
|
||||||
|
@ -54,7 +54,7 @@ To start a distributed MinIO instance, you just need to pass drive locations as
|
||||||
- **MinIO creates erasure-coding sets of _2_ to _16_ drives per set. The number of drives you provide in total must be a multiple of one of those numbers.**
|
- **MinIO creates erasure-coding sets of _2_ to _16_ drives per set. The number of drives you provide in total must be a multiple of one of those numbers.**
|
||||||
- **MinIO chooses the largest EC set size which divides into the total number of drives or total number of nodes given - making sure to keep the uniform distribution i.e each node participates equal number of drives per set**.
|
- **MinIO chooses the largest EC set size which divides into the total number of drives or total number of nodes given - making sure to keep the uniform distribution i.e each node participates equal number of drives per set**.
|
||||||
- **Each object is written to a single EC set, and therefore is spread over no more than 16 drives.**
|
- **Each object is written to a single EC set, and therefore is spread over no more than 16 drives.**
|
||||||
- **All the nodes running distributed MinIO setup are recommended to be homogeneous, i.e. same operating system, same number of disks and same network interconnects.**
|
- **All the nodes running distributed MinIO setup are recommended to be homogeneous, i.e. same operating system, same number of drives and same network interconnects.**
|
||||||
- MinIO distributed mode requires **fresh directories**. If required, the drives can be shared with other applications. You can do this by using a sub-directory exclusive to MinIO. For example, if you have mounted your volume under `/export`, pass `/export/data` as arguments to MinIO server.
|
- MinIO distributed mode requires **fresh directories**. If required, the drives can be shared with other applications. You can do this by using a sub-directory exclusive to MinIO. For example, if you have mounted your volume under `/export`, pass `/export/data` as arguments to MinIO server.
|
||||||
- The IP addresses and drive paths below are for demonstration purposes only, you need to replace these with the actual IP addresses and drive paths/folders.
|
- The IP addresses and drive paths below are for demonstration purposes only, you need to replace these with the actual IP addresses and drive paths/folders.
|
||||||
- Servers running distributed MinIO instances should be less than 15 minutes apart. You can enable [NTP](http://www.ntp.org/) service as a best practice to ensure same times across servers.
|
- Servers running distributed MinIO instances should be less than 15 minutes apart. You can enable [NTP](http://www.ntp.org/) service as a best practice to ensure same times across servers.
|
||||||
|
@ -92,7 +92,7 @@ For example:
|
||||||
minio server http://host{1...4}/export{1...16} http://host{5...12}/export{1...16}
|
minio server http://host{1...4}/export{1...16} http://host{5...12}/export{1...16}
|
||||||
```
|
```
|
||||||
|
|
||||||
Now the server has expanded total storage by _(newly_added_servers\*m)_ more disks, taking the total count to _(existing_servers\*m)+(newly_added_servers\*m)_ disks. New object upload requests automatically start using the least used cluster. This expansion strategy works endlessly, so you can perpetually expand your clusters as needed. When you restart, it is immediate and non-disruptive to the applications. Each group of servers in the command-line is called a pool. There are 2 server pools in this example. New objects are placed in server pools in proportion to the amount of free space in each pool. Within each pool, the location of the erasure-set of drives is determined based on a deterministic hashing algorithm.
|
Now the server has expanded total storage by _(newly_added_servers\*m)_ more drives, taking the total count to _(existing_servers\*m)+(newly_added_servers\*m)_ drives. New object upload requests automatically start using the least used cluster. This expansion strategy works endlessly, so you can perpetually expand your clusters as needed. When you restart, it is immediate and non-disruptive to the applications. Each group of servers in the command-line is called a pool. There are 2 server pools in this example. New objects are placed in server pools in proportion to the amount of free space in each pool. Within each pool, the location of the erasure-set of drives is determined based on a deterministic hashing algorithm.
|
||||||
|
|
||||||
> **NOTE:** **Each pool you add must have the same erasure coding parity configuration as the original pool, so the same data redundancy SLA is maintained.**
|
> **NOTE:** **Each pool you add must have the same erasure coding parity configuration as the original pool, so the same data redundancy SLA is maintained.**
|
||||||
|
|
||||||
|
|
|
@ -32,8 +32,8 @@ Capacity constrained environments, MinIO will work but not recommended for produ
|
||||||
| 15 | 2 | 15 | 4 | 4 | 4 |
|
| 15 | 2 | 15 | 4 | 4 | 4 |
|
||||||
| 16 | 2 | 16 | 4 | 4 | 4 |
|
| 16 | 2 | 16 | 4 | 4 | 4 |
|
||||||
|
|
||||||
If one or more disks are offline at the start of a PutObject or NewMultipartUpload operation the object will have additional data
|
If one or more drives are offline at the start of a PutObject or NewMultipartUpload operation the object will have additional data
|
||||||
protection bits added automatically to provide the regular safety for these objects up to 50% of the number of disks.
|
protection bits added automatically to provide the regular safety for these objects up to 50% of the number of drives.
|
||||||
This will allow normal write operations to take place on systems that exceed the write tolerance.
|
This will allow normal write operations to take place on systems that exceed the write tolerance.
|
||||||
|
|
||||||
This means that in the examples above the system will always write 4 parity shards at the expense of slightly higher disk usage.
|
This means that in the examples above the system will always write 4 parity shards at the expense of slightly higher disk usage.
|
||||||
|
|
|
@ -1,15 +1,15 @@
|
||||||
# MinIO Storage Class Quickstart Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io)
|
# MinIO Storage Class Quickstart Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io)
|
||||||
|
|
||||||
MinIO server supports storage class in erasure coding mode. This allows configurable data and parity disks per object.
|
MinIO server supports storage class in erasure coding mode. This allows configurable data and parity drives per object.
|
||||||
|
|
||||||
This page is intended as a summary of MinIO Erasure Coding. For a more complete explanation, see <https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html>.
|
This page is intended as a summary of MinIO Erasure Coding. For a more complete explanation, see <https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html>.
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
MinIO supports two storage classes, Reduced Redundancy class and Standard class. These classes can be defined using environment variables
|
MinIO supports two storage classes, Reduced Redundancy class and Standard class. These classes can be defined using environment variables
|
||||||
set before starting MinIO server. After the data and parity disks for each storage class are defined using environment variables,
|
set before starting MinIO server. After the data and parity drives for each storage class are defined using environment variables,
|
||||||
you can set the storage class of an object via request metadata field `x-amz-storage-class`. MinIO server then honors the storage class by
|
you can set the storage class of an object via request metadata field `x-amz-storage-class`. MinIO server then honors the storage class by
|
||||||
saving the object in specific number of data and parity disks.
|
saving the object in specific number of data and parity drives.
|
||||||
|
|
||||||
## Storage usage
|
## Storage usage
|
||||||
|
|
||||||
|
@ -38,12 +38,12 @@ You can calculate _approximate_ storage usage ratio using the formula - total dr
|
||||||
|
|
||||||
### Allowed values for STANDARD storage class
|
### Allowed values for STANDARD storage class
|
||||||
|
|
||||||
`STANDARD` storage class implies more parity than `REDUCED_REDUNDANCY` class. So, `STANDARD` parity disks should be
|
`STANDARD` storage class implies more parity than `REDUCED_REDUNDANCY` class. So, `STANDARD` parity drives should be
|
||||||
|
|
||||||
- Greater than or equal to 2, if `REDUCED_REDUNDANCY` parity is not set.
|
- Greater than or equal to 2, if `REDUCED_REDUNDANCY` parity is not set.
|
||||||
- Greater than `REDUCED_REDUNDANCY` parity, if it is set.
|
- Greater than `REDUCED_REDUNDANCY` parity, if it is set.
|
||||||
|
|
||||||
Parity blocks can not be higher than data blocks, so `STANDARD` storage class parity can not be higher than N/2. (N being total number of disks)
|
Parity blocks can not be higher than data blocks, so `STANDARD` storage class parity can not be higher than N/2. (N being total number of drives)
|
||||||
|
|
||||||
The default value for the `STANDARD` storage class depends on the number of volumes in the erasure set:
|
The default value for the `STANDARD` storage class depends on the number of volumes in the erasure set:
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ For more complete documentation on Erasure Set sizing, see the [MinIO Documentat
|
||||||
|
|
||||||
### Allowed values for REDUCED_REDUNDANCY storage class
|
### Allowed values for REDUCED_REDUNDANCY storage class
|
||||||
|
|
||||||
`REDUCED_REDUNDANCY` implies lesser parity than `STANDARD` class. So,`REDUCED_REDUNDANCY` parity disks should be
|
`REDUCED_REDUNDANCY` implies lesser parity than `STANDARD` class. So,`REDUCED_REDUNDANCY` parity drives should be
|
||||||
|
|
||||||
- Less than N/2, if `STANDARD` parity is not set.
|
- Less than N/2, if `STANDARD` parity is not set.
|
||||||
- Less than `STANDARD` Parity, if it is set.
|
- Less than `STANDARD` Parity, if it is set.
|
||||||
|
@ -86,14 +86,14 @@ more details.
|
||||||
#### Note
|
#### Note
|
||||||
|
|
||||||
- If `STANDARD` storage class is set via environment variables or `mc admin config` get/set commands, and `x-amz-storage-class` is not present in request metadata, MinIO server will
|
- If `STANDARD` storage class is set via environment variables or `mc admin config` get/set commands, and `x-amz-storage-class` is not present in request metadata, MinIO server will
|
||||||
apply `STANDARD` storage class to the object. This means the data and parity disks will be used as set in `STANDARD` storage class.
|
apply `STANDARD` storage class to the object. This means the data and parity drives will be used as set in `STANDARD` storage class.
|
||||||
|
|
||||||
- If storage class is not defined before starting MinIO server, and subsequent PutObject metadata field has `x-amz-storage-class` present
|
- If storage class is not defined before starting MinIO server, and subsequent PutObject metadata field has `x-amz-storage-class` present
|
||||||
with values `REDUCED_REDUNDANCY` or `STANDARD`, MinIO server uses default parity values.
|
with values `REDUCED_REDUNDANCY` or `STANDARD`, MinIO server uses default parity values.
|
||||||
|
|
||||||
### Set metadata
|
### Set metadata
|
||||||
|
|
||||||
In below example `minio-go` is used to set the storage class to `REDUCED_REDUNDANCY`. This means this object will be split across 6 data disks and 2 parity disks (as per the storage class set in previous step).
|
In below example `minio-go` is used to set the storage class to `REDUCED_REDUNDANCY`. This means this object will be split across 6 data drives and 2 parity drives (as per the storage class set in previous step).
|
||||||
|
|
||||||
```go
|
```go
|
||||||
s3Client, err := minio.New("localhost:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
|
s3Client, err := minio.New("localhost:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
|
||||||
|
|
|
@ -77,7 +77,7 @@ NOTE:
|
||||||
- Additionally in the case of the erasure coded setup `tags.objectErasureMap` provides per object details about
|
- Additionally in the case of the erasure coded setup `tags.objectErasureMap` provides per object details about
|
||||||
- Pool number the object operation was performed on.
|
- Pool number the object operation was performed on.
|
||||||
- Set number the object operation was performed on.
|
- Set number the object operation was performed on.
|
||||||
- The list of disks participating in this operation belong to the set.
|
- The list of drives participating in this operation belong to the set.
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
|
@ -218,7 +218,7 @@ NOTE:
|
||||||
- Additionally in the case of the erasure coded setup `tags.objectErasureMap` provides per object details about
|
- Additionally in the case of the erasure coded setup `tags.objectErasureMap` provides per object details about
|
||||||
- Pool number the object operation was performed on.
|
- Pool number the object operation was performed on.
|
||||||
- Set number the object operation was performed on.
|
- Set number the object operation was performed on.
|
||||||
- The list of disks participating in this operation belong to the set.
|
- The list of drives participating in this operation belong to the set.
|
||||||
|
|
||||||
## Explore Further
|
## Explore Further
|
||||||
|
|
||||||
|
|
|
@ -990,13 +990,13 @@
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Total online disks in MinIO Cluster",
|
"legendFormat": "Total online drives in MinIO Cluster",
|
||||||
"metric": "process_start_time_seconds",
|
"metric": "process_start_time_seconds",
|
||||||
"refId": "A",
|
"refId": "A",
|
||||||
"step": 60
|
"step": 60
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"title": "Total Online Disks",
|
"title": "Total Online Drives",
|
||||||
"type": "stat"
|
"type": "stat"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -1430,7 +1430,7 @@
|
||||||
"step": 60
|
"step": 60
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"title": "Total Offline Disks",
|
"title": "Total Offline Drives",
|
||||||
"type": "stat"
|
"type": "stat"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -2393,7 +2393,7 @@
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"instant": false,
|
"instant": false,
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"legendFormat": "Used Capacity [{{server}}:{{disk}}]",
|
"legendFormat": "Used Capacity [{{server}}:{{drive}}]",
|
||||||
"refId": "A"
|
"refId": "A"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
@ -2483,7 +2483,7 @@
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"instant": false,
|
"instant": false,
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"legendFormat": "Free Inodes [{{server}}:{{disk}}]",
|
"legendFormat": "Free Inodes [{{server}}:{{drive}}]",
|
||||||
"refId": "A"
|
"refId": "A"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
@ -2531,7 +2531,7 @@
|
||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${DS_PROMETHEUS}"
|
"uid": "${DS_PROMETHEUS}"
|
||||||
},
|
},
|
||||||
"description": "Number of online disks per MinIO Server",
|
"description": "Number of online drives per MinIO Server",
|
||||||
"fieldConfig": {
|
"fieldConfig": {
|
||||||
"defaults": {
|
"defaults": {
|
||||||
"links": []
|
"links": []
|
||||||
|
|
|
@ -172,8 +172,8 @@ var (
|
||||||
ErrStorageClassValue = newErrFn(
|
ErrStorageClassValue = newErrFn(
|
||||||
"Invalid storage class value",
|
"Invalid storage class value",
|
||||||
"Please check the value",
|
"Please check the value",
|
||||||
`MINIO_STORAGE_CLASS_STANDARD: Format "EC:<Default_Parity_Standard_Class>" (e.g. "EC:3"). This sets the number of parity disks for MinIO server in Standard mode. Objects are stored in Standard mode, if storage class is not defined in Put request
|
`MINIO_STORAGE_CLASS_STANDARD: Format "EC:<Default_Parity_Standard_Class>" (e.g. "EC:3"). This sets the number of parity drives for MinIO server in Standard mode. Objects are stored in Standard mode, if storage class is not defined in Put request
|
||||||
MINIO_STORAGE_CLASS_RRS: Format "EC:<Default_Parity_Reduced_Redundancy_Class>" (e.g. "EC:3"). This sets the number of parity disks for MinIO server in Reduced Redundancy mode. Objects are stored in Reduced Redundancy mode, if Put request specifies RRS storage class
|
MINIO_STORAGE_CLASS_RRS: Format "EC:<Default_Parity_Reduced_Redundancy_Class>" (e.g. "EC:3"). This sets the number of parity drives for MinIO server in Reduced Redundancy mode. Objects are stored in Reduced Redundancy mode, if Put request specifies RRS storage class
|
||||||
Refer to the link https://github.com/minio/minio/tree/master/docs/erasure/storage-class for more information`,
|
Refer to the link https://github.com/minio/minio/tree/master/docs/erasure/storage-class for more information`,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@ var (
|
||||||
Help = config.HelpKVS{
|
Help = config.HelpKVS{
|
||||||
config.HelpKV{
|
config.HelpKV{
|
||||||
Key: Bitrot,
|
Key: Bitrot,
|
||||||
Description: `perform bitrot scan on disks when checking objects during scanner` + defaultHelpPostfix(Bitrot),
|
Description: `perform bitrot scan on drives when checking objects during scanner` + defaultHelpPostfix(Bitrot),
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Type: "on|off",
|
Type: "on|off",
|
||||||
},
|
},
|
||||||
|
|
|
@ -49,8 +49,8 @@ const (
|
||||||
// Supported storage class scheme is EC
|
// Supported storage class scheme is EC
|
||||||
schemePrefix = "EC"
|
schemePrefix = "EC"
|
||||||
|
|
||||||
// Min parity disks
|
// Min parity drives
|
||||||
minParityDisks = 0
|
minParityDrives = 0
|
||||||
|
|
||||||
// Default RRS parity is always minimum parity.
|
// Default RRS parity is always minimum parity.
|
||||||
defaultRRSParity = 1
|
defaultRRSParity = 1
|
||||||
|
@ -150,26 +150,26 @@ func parseStorageClass(storageClassEnv string) (sc StorageClass, err error) {
|
||||||
return StorageClass{}, config.ErrStorageClassValue(nil).Msg("Unsupported scheme " + s[0] + ". Supported scheme is EC")
|
return StorageClass{}, config.ErrStorageClassValue(nil).Msg("Unsupported scheme " + s[0] + ". Supported scheme is EC")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Number of parity disks should be integer
|
// Number of parity drives should be integer
|
||||||
parityDisks, err := strconv.Atoi(s[1])
|
parityDrives, err := strconv.Atoi(s[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return StorageClass{}, config.ErrStorageClassValue(err)
|
return StorageClass{}, config.ErrStorageClassValue(err)
|
||||||
}
|
}
|
||||||
if parityDisks < 0 {
|
if parityDrives < 0 {
|
||||||
return StorageClass{}, config.ErrStorageClassValue(nil).Msg("Unsupported parity value " + s[1] + " provided")
|
return StorageClass{}, config.ErrStorageClassValue(nil).Msg("Unsupported parity value " + s[1] + " provided")
|
||||||
}
|
}
|
||||||
return StorageClass{
|
return StorageClass{
|
||||||
Parity: parityDisks,
|
Parity: parityDrives,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateParity validate standard storage class parity.
|
// ValidateParity validate standard storage class parity.
|
||||||
func ValidateParity(ssParity, setDriveCount int) error {
|
func ValidateParity(ssParity, setDriveCount int) error {
|
||||||
// SS parity disks should be greater than or equal to minParityDisks.
|
// SS parity drives should be greater than or equal to minParityDrives.
|
||||||
// Parity below minParityDisks is not supported.
|
// Parity below minParityDrives is not supported.
|
||||||
if ssParity > 0 && ssParity < minParityDisks {
|
if ssParity > 0 && ssParity < minParityDrives {
|
||||||
return fmt.Errorf("parity %d should be greater than or equal to %d",
|
return fmt.Errorf("parity %d should be greater than or equal to %d",
|
||||||
ssParity, minParityDisks)
|
ssParity, minParityDrives)
|
||||||
}
|
}
|
||||||
|
|
||||||
if ssParity > setDriveCount/2 {
|
if ssParity > setDriveCount/2 {
|
||||||
|
@ -179,19 +179,19 @@ func ValidateParity(ssParity, setDriveCount int) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validates the parity disks.
|
// Validates the parity drives.
|
||||||
func validateParity(ssParity, rrsParity, setDriveCount int) (err error) {
|
func validateParity(ssParity, rrsParity, setDriveCount int) (err error) {
|
||||||
// SS parity disks should be greater than or equal to minParityDisks.
|
// SS parity drives should be greater than or equal to minParityDrives.
|
||||||
// Parity below minParityDisks is not supported.
|
// Parity below minParityDrives is not supported.
|
||||||
if ssParity > 0 && ssParity < minParityDisks {
|
if ssParity > 0 && ssParity < minParityDrives {
|
||||||
return fmt.Errorf("Standard storage class parity %d should be greater than or equal to %d",
|
return fmt.Errorf("Standard storage class parity %d should be greater than or equal to %d",
|
||||||
ssParity, minParityDisks)
|
ssParity, minParityDrives)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RRS parity disks should be greater than or equal to minParityDisks.
|
// RRS parity drives should be greater than or equal to minParityDrives.
|
||||||
// Parity below minParityDisks is not supported.
|
// Parity below minParityDrives is not supported.
|
||||||
if rrsParity > 0 && rrsParity < minParityDisks {
|
if rrsParity > 0 && rrsParity < minParityDrives {
|
||||||
return fmt.Errorf("Reduced redundancy storage class parity %d should be greater than or equal to %d", rrsParity, minParityDisks)
|
return fmt.Errorf("Reduced redundancy storage class parity %d should be greater than or equal to %d", rrsParity, minParityDrives)
|
||||||
}
|
}
|
||||||
|
|
||||||
if setDriveCount > 2 {
|
if setDriveCount > 2 {
|
||||||
|
|
|
@ -124,7 +124,7 @@ func TestValidateParity(t *testing.T) {
|
||||||
func TestParityCount(t *testing.T) {
|
func TestParityCount(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
sc string
|
sc string
|
||||||
disksCount int
|
drivesCount int
|
||||||
expectedData int
|
expectedData int
|
||||||
expectedParity int
|
expectedParity int
|
||||||
}{
|
}{
|
||||||
|
@ -158,8 +158,8 @@ func TestParityCount(t *testing.T) {
|
||||||
scfg.Standard.Parity = 7
|
scfg.Standard.Parity = 7
|
||||||
}
|
}
|
||||||
parity := scfg.GetParityForSC(tt.sc)
|
parity := scfg.GetParityForSC(tt.sc)
|
||||||
if (tt.disksCount - parity) != tt.expectedData {
|
if (tt.drivesCount - parity) != tt.expectedData {
|
||||||
t.Errorf("Test %d, Expected data drives %d, got %d", i+1, tt.expectedData, tt.disksCount-parity)
|
t.Errorf("Test %d, Expected data drives %d, got %d", i+1, tt.expectedData, tt.drivesCount-parity)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if parity != tt.expectedParity {
|
if parity != tt.expectedParity {
|
||||||
|
|
Loading…
Reference in New Issue