mirror of
https://github.com/minio/minio.git
synced 2025-11-06 20:33:07 -05:00
fix: do not fail upon faulty/non-writable drives
gracefully start the server, if there are other drives available - print enough information for administrator to notice the errors in console. Bonus: for really large streams use larger buffer for writes.
This commit is contained in:
@@ -52,9 +52,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
nullVersionID = "null"
|
||||
blockSizeLarge = 2 * humanize.MiByte // Default r/w block size for larger objects.
|
||||
blockSizeSmall = 128 * humanize.KiByte // Default r/w block size for smaller objects.
|
||||
nullVersionID = "null"
|
||||
blockSizeSmall = 128 * humanize.KiByte // Default r/w block size for smaller objects.
|
||||
blockSizeLarge = 2 * humanize.MiByte // Default r/w block size for larger objects.
|
||||
blockSizeReallyLarge = 4 * humanize.MiByte // Default write block size for objects per shard >= 64MiB
|
||||
|
||||
// On regular files bigger than this;
|
||||
readAheadSize = 16 << 20
|
||||
@@ -63,6 +64,9 @@ const (
|
||||
// Size of each buffer.
|
||||
readAheadBufSize = 1 << 20
|
||||
|
||||
// Really large streams threshold per shard.
|
||||
reallyLargeFileThreshold = 64 * humanize.MiByte // Optimized for HDDs
|
||||
|
||||
// Small file threshold below which data accompanies metadata from storage layer.
|
||||
smallFileThreshold = 128 * humanize.KiByte // Optimized for NVMe/SSDs
|
||||
// For hardrives it is possible to set this to a lower value to avoid any
|
||||
@@ -101,8 +105,9 @@ type xlStorage struct {
|
||||
|
||||
globalSync bool
|
||||
|
||||
poolLarge sync.Pool
|
||||
poolSmall sync.Pool
|
||||
poolReallyLarge sync.Pool
|
||||
poolLarge sync.Pool
|
||||
poolSmall sync.Pool
|
||||
|
||||
rootDisk bool
|
||||
|
||||
@@ -179,7 +184,7 @@ func getValidPath(path string) (string, error) {
|
||||
}
|
||||
if osIsNotExist(err) {
|
||||
// Disk not found create it.
|
||||
if err = reliableMkdirAll(path, 0777); err != nil {
|
||||
if err = mkdirAll(path, 0777); err != nil {
|
||||
return path, err
|
||||
}
|
||||
}
|
||||
@@ -251,6 +256,12 @@ func newXLStorage(ep Endpoint) (*xlStorage, error) {
|
||||
p := &xlStorage{
|
||||
diskPath: path,
|
||||
endpoint: ep,
|
||||
poolReallyLarge: sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := disk.AlignedBlock(blockSizeReallyLarge)
|
||||
return &b
|
||||
},
|
||||
},
|
||||
poolLarge: sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := disk.AlignedBlock(blockSizeLarge)
|
||||
@@ -628,7 +639,7 @@ func (s *xlStorage) MakeVol(ctx context.Context, volume string) error {
|
||||
// Volume does not exist we proceed to create.
|
||||
if osIsNotExist(err) {
|
||||
// Make a volume entry, with mode 0777 mkdir honors system umask.
|
||||
err = reliableMkdirAll(volumeDir, 0777)
|
||||
err = mkdirAll(volumeDir, 0777)
|
||||
}
|
||||
if osIsPermission(err) {
|
||||
return errDiskAccessDenied
|
||||
@@ -1540,8 +1551,15 @@ func (s *xlStorage) CreateFile(ctx context.Context, volume, path string, fileSiz
|
||||
w.Close()
|
||||
}()
|
||||
|
||||
bufp := s.poolLarge.Get().(*[]byte)
|
||||
defer s.poolLarge.Put(bufp)
|
||||
var bufp *[]byte
|
||||
if fileSize > 0 && fileSize >= reallyLargeFileThreshold {
|
||||
// use a larger 4MiB buffer for really large streams.
|
||||
bufp = s.poolReallyLarge.Get().(*[]byte)
|
||||
defer s.poolReallyLarge.Put(bufp)
|
||||
} else {
|
||||
bufp = s.poolLarge.Get().(*[]byte)
|
||||
defer s.poolLarge.Put(bufp)
|
||||
}
|
||||
|
||||
written, err := xioutil.CopyAligned(w, r, *bufp, fileSize)
|
||||
if err != nil {
|
||||
@@ -1984,7 +2002,7 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
|
||||
|
||||
legacyDataPath := pathJoin(dstVolumeDir, dstPath, legacyDataDir)
|
||||
// legacy data dir means its old content, honor system umask.
|
||||
if err = reliableMkdirAll(legacyDataPath, 0777); err != nil {
|
||||
if err = mkdirAll(legacyDataPath, 0777); err != nil {
|
||||
return osErrToFileErr(err)
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user