2015-10-16 14:26:01 -04:00
|
|
|
/*
|
2020-03-12 21:57:41 -04:00
|
|
|
* MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
|
2015-10-16 14:26:01 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2015-10-16 14:26:01 -04:00
|
|
|
|
|
|
|
import (
|
Remove read-ahead for small files (#8522)
We should only read ahead if we are reading big files. We enable it for files >= 16MB.
Benchmark on 64KB objects.
Before:
```
Operation: GET
Errors: 0
Average: 59.976s, 87.13 MB/s, 1394.07 ops ended/s.
Fastest: 1s, 90.99 MB/s, 1455.00 ops ended/s.
50% Median: 1s, 87.53 MB/s, 1401.00 ops ended/s.
Slowest: 1s, 81.39 MB/s, 1301.00 ops ended/s.
```
After:
```
Operation: GET
Errors: 0
Average: 59.992s, 207.99 MB/s, 3327.85 ops ended/s.
Fastest: 1s, 219.20 MB/s, 3507.00 ops ended/s.
50% Median: 1s, 210.54 MB/s, 3368.00 ops ended/s.
Slowest: 1s, 179.14 MB/s, 2865.00 ops ended/s.
```
The 64KB buffer is actually a small disadvantage for this case, but I believe it will be better in general than no buffer.
2019-11-14 15:58:41 -05:00
|
|
|
"bufio"
|
2020-06-12 13:28:21 -04:00
|
|
|
"bytes"
|
2018-04-05 18:04:40 -04:00
|
|
|
"context"
|
2019-11-22 12:50:17 -05:00
|
|
|
"crypto/rand"
|
|
|
|
"encoding/hex"
|
2019-09-11 13:21:43 -04:00
|
|
|
"errors"
|
2020-06-12 23:04:01 -04:00
|
|
|
"fmt"
|
2016-04-08 13:37:38 -04:00
|
|
|
"io"
|
2016-06-25 17:51:06 -04:00
|
|
|
"io/ioutil"
|
2016-04-08 20:13:16 -04:00
|
|
|
"os"
|
2016-05-09 03:46:54 -04:00
|
|
|
slashpath "path"
|
2016-05-28 18:13:15 -04:00
|
|
|
"path/filepath"
|
2016-06-20 09:18:47 -04:00
|
|
|
"runtime"
|
2019-05-14 16:49:10 -04:00
|
|
|
"sort"
|
2018-02-20 18:33:26 -05:00
|
|
|
"strings"
|
2016-10-26 20:14:05 -04:00
|
|
|
"sync"
|
2016-06-20 19:57:14 -04:00
|
|
|
"sync/atomic"
|
2016-04-08 13:37:38 -04:00
|
|
|
"syscall"
|
2018-05-23 06:11:29 -04:00
|
|
|
"time"
|
2015-10-16 14:26:01 -04:00
|
|
|
|
2016-11-22 21:18:22 -05:00
|
|
|
humanize "github.com/dustin/go-humanize"
|
2019-10-25 13:37:53 -04:00
|
|
|
jsoniter "github.com/json-iterator/go"
|
2019-05-22 16:47:15 -04:00
|
|
|
"github.com/klauspost/readahead"
|
2018-04-05 18:04:40 -04:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
2016-04-08 20:13:16 -04:00
|
|
|
"github.com/minio/minio/pkg/disk"
|
2019-05-22 16:47:15 -04:00
|
|
|
xioutil "github.com/minio/minio/pkg/ioutil"
|
2018-06-27 21:59:38 -04:00
|
|
|
"github.com/minio/minio/pkg/mountinfo"
|
2015-10-16 14:26:01 -04:00
|
|
|
)
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
const (
|
2020-06-12 23:04:01 -04:00
|
|
|
nullVersionID = "null"
|
2019-05-22 16:47:15 -04:00
|
|
|
diskMinFreeSpace = 900 * humanize.MiByte // Min 900MiB free space.
|
|
|
|
diskMinTotalSpace = diskMinFreeSpace // Min 900MiB total space.
|
2020-01-17 16:34:43 -05:00
|
|
|
readBlockSize = 4 * humanize.MiByte // Default read block size 4MiB.
|
Remove read-ahead for small files (#8522)
We should only read ahead if we are reading big files. We enable it for files >= 16MB.
Benchmark on 64KB objects.
Before:
```
Operation: GET
Errors: 0
Average: 59.976s, 87.13 MB/s, 1394.07 ops ended/s.
Fastest: 1s, 90.99 MB/s, 1455.00 ops ended/s.
50% Median: 1s, 87.53 MB/s, 1401.00 ops ended/s.
Slowest: 1s, 81.39 MB/s, 1301.00 ops ended/s.
```
After:
```
Operation: GET
Errors: 0
Average: 59.992s, 207.99 MB/s, 3327.85 ops ended/s.
Fastest: 1s, 219.20 MB/s, 3507.00 ops ended/s.
50% Median: 1s, 210.54 MB/s, 3368.00 ops ended/s.
Slowest: 1s, 179.14 MB/s, 2865.00 ops ended/s.
```
The 64KB buffer is actually a small disadvantage for this case, but I believe it will be better in general than no buffer.
2019-11-14 15:58:41 -05:00
|
|
|
|
|
|
|
// On regular files bigger than this;
|
|
|
|
readAheadSize = 16 << 20
|
|
|
|
// Read this many buffers ahead.
|
|
|
|
readAheadBuffers = 4
|
|
|
|
// Size of each buffer.
|
|
|
|
readAheadBufSize = 1 << 20
|
2019-12-12 09:02:37 -05:00
|
|
|
|
|
|
|
// Wait interval to check if active IO count is low
|
2020-06-22 14:57:22 -04:00
|
|
|
// to proceed crawling to compute data usage.
|
|
|
|
// Wait up to lowActiveIOWaitMaxN times.
|
2019-12-12 09:02:37 -05:00
|
|
|
lowActiveIOWaitTick = 100 * time.Millisecond
|
2020-06-22 14:57:22 -04:00
|
|
|
lowActiveIOWaitMaxN = 10
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
// XL metadata file carries per object metadata.
|
|
|
|
xlStorageFormatFile = "xl.meta"
|
2016-04-08 13:37:38 -04:00
|
|
|
)
|
|
|
|
|
2018-06-07 03:01:40 -04:00
|
|
|
// isValidVolname verifies a volname name in accordance with object
|
|
|
|
// layer requirements.
|
|
|
|
func isValidVolname(volname string) bool {
|
|
|
|
if len(volname) < 3 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
// Volname shouldn't have reserved characters in Windows.
|
|
|
|
return !strings.ContainsAny(volname, `\:*?\"<>|`)
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// xlStorage - implements StorageAPI interface.
|
|
|
|
type xlStorage struct {
|
2018-06-04 21:35:41 -04:00
|
|
|
// Disk usage metrics
|
2020-01-17 16:34:43 -05:00
|
|
|
totalUsed uint64 // ref: https://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
2018-06-04 21:35:41 -04:00
|
|
|
|
2019-12-12 09:02:37 -05:00
|
|
|
maxActiveIOCount int32
|
2020-01-17 16:34:43 -05:00
|
|
|
activeIOCount int32
|
2019-12-12 09:02:37 -05:00
|
|
|
|
2019-10-25 13:37:53 -04:00
|
|
|
diskPath string
|
2020-05-19 17:27:20 -04:00
|
|
|
hostname string
|
|
|
|
|
|
|
|
pool sync.Pool
|
2018-05-23 06:11:29 -04:00
|
|
|
|
2018-06-27 21:59:38 -04:00
|
|
|
diskMount bool // indicates if the path is an actual mount.
|
|
|
|
|
2019-10-25 13:37:53 -04:00
|
|
|
diskID string
|
|
|
|
|
posix: cache disk ID for a short while (#8564)
`*posix.getDiskID()` takes up to 30% of all CPU due to the `os.Stat` call on `GET` calls.
Before:
```
Operation: GET - Concurrency: 12
* Average: 1333.97 MB/s, 1365.99 obj/s, 1365.98 ops ended/s (4m59.975s)
* First Byte: Average: 7.801487ms, Median: 7.9974ms, Best: 1.9822ms, Worst: 110.0021ms
Aggregated, split into 299 x 1s time segments:
* Fastest: 1453.50 MB/s, 1488.38 obj/s, 1492.00 ops ended/s (1s)
* 50% Median: 1360.47 MB/s, 1393.12 obj/s, 1393.00 ops ended/s (1s)
* Slowest: 978.68 MB/s, 1002.17 obj/s, 1004.00 ops ended/s (1s)
```
After:
```
Operation: GET - Concurrency: 12
* Average: 1706.07 MB/s, 1747.02 obj/s, 1747.01 ops ended/s (4m59.985s)
* First Byte: Average: 5.797886ms, Median: 5.9959ms, Best: 996.3µs, Worst: 84.0007ms
Aggregated, split into 299 x 1s time segments:
* Fastest: 1830.03 MB/s, 1873.96 obj/s, 1872.00 ops ended/s (1s)
* 50% Median: 1735.04 MB/s, 1776.68 obj/s, 1776.00 ops ended/s (1s)
* Slowest: 994.94 MB/s, 1018.82 obj/s, 1018.00 ops ended/s (1s)
```
TLDR; `os.Stat` is not free.
2019-11-29 05:57:14 -05:00
|
|
|
formatFileInfo os.FileInfo
|
|
|
|
formatLastCheck time.Time
|
2019-10-25 13:37:53 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
ctx context.Context
|
2019-10-25 13:37:53 -04:00
|
|
|
sync.RWMutex
|
2016-03-28 12:52:09 -04:00
|
|
|
}
|
|
|
|
|
2016-05-11 15:55:02 -04:00
|
|
|
// checkPathLength - returns error if given path name length more than 255
|
|
|
|
func checkPathLength(pathName string) error {
|
2016-07-03 14:17:08 -04:00
|
|
|
// Apple OS X path length is limited to 1016
|
|
|
|
if runtime.GOOS == "darwin" && len(pathName) > 1016 {
|
|
|
|
return errFileNameTooLong
|
|
|
|
}
|
|
|
|
|
2020-04-28 20:32:46 -04:00
|
|
|
// Disallow more than 1024 characters on windows, there
|
|
|
|
// are no known name_max limits on Windows.
|
2020-05-14 02:55:38 -04:00
|
|
|
if runtime.GOOS == "windows" && len(pathName) > 1024 {
|
2020-05-04 16:11:56 -04:00
|
|
|
return errFileNameTooLong
|
2018-11-26 00:05:14 -05:00
|
|
|
}
|
2016-10-31 12:34:44 -04:00
|
|
|
|
2020-04-28 20:32:46 -04:00
|
|
|
// On Unix we reject paths if they are just '.', '..' or '/'
|
|
|
|
if pathName == "." || pathName == ".." || pathName == slashSeparator {
|
|
|
|
return errFileAccessDenied
|
|
|
|
}
|
2016-05-11 15:55:02 -04:00
|
|
|
|
2020-04-28 20:32:46 -04:00
|
|
|
// Check each path segment length is > 255 on all Unix
|
|
|
|
// platforms, look for this value as NAME_MAX in
|
|
|
|
// /usr/include/linux/limits.h
|
|
|
|
var count int64
|
|
|
|
for _, p := range pathName {
|
|
|
|
switch p {
|
|
|
|
case '/':
|
|
|
|
count = 0 // Reset
|
2020-05-14 02:55:38 -04:00
|
|
|
case '\\':
|
|
|
|
if runtime.GOOS == globalWindowsOSName {
|
|
|
|
count = 0
|
|
|
|
}
|
2020-04-28 20:32:46 -04:00
|
|
|
default:
|
|
|
|
count++
|
|
|
|
if count > 255 {
|
|
|
|
return errFileNameTooLong
|
|
|
|
}
|
2016-05-11 15:55:02 -04:00
|
|
|
}
|
2016-06-13 05:53:09 -04:00
|
|
|
} // Success.
|
2016-05-11 15:55:02 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-05-08 15:07:51 -04:00
|
|
|
func getValidPath(path string, requireDirectIO bool) (string, error) {
|
2018-04-09 23:56:09 -04:00
|
|
|
if path == "" {
|
|
|
|
return path, errInvalidArgument
|
|
|
|
}
|
|
|
|
|
|
|
|
var err error
|
|
|
|
// Disallow relative paths, figure out absolute paths.
|
|
|
|
path, err = filepath.Abs(path)
|
|
|
|
if err != nil {
|
|
|
|
return path, err
|
|
|
|
}
|
|
|
|
|
|
|
|
fi, err := os.Stat(path)
|
|
|
|
if err != nil && !os.IsNotExist(err) {
|
|
|
|
return path, err
|
|
|
|
}
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
// Disk not found create it.
|
|
|
|
if err = os.MkdirAll(path, 0777); err != nil {
|
|
|
|
return path, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if fi != nil && !fi.IsDir() {
|
2020-06-12 23:04:01 -04:00
|
|
|
return path, errDiskNotDir
|
2018-04-09 23:56:09 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
di, err := getDiskInfo(path)
|
|
|
|
if err != nil {
|
|
|
|
return path, err
|
|
|
|
}
|
|
|
|
if err = checkDiskMinTotal(di); err != nil {
|
|
|
|
return path, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// check if backend is writable.
|
2019-11-22 12:50:17 -05:00
|
|
|
var rnd [8]byte
|
|
|
|
_, _ = rand.Read(rnd[:])
|
2020-05-07 19:12:16 -04:00
|
|
|
|
2019-11-22 12:50:17 -05:00
|
|
|
fn := pathJoin(path, ".writable-check-"+hex.EncodeToString(rnd[:])+".tmp")
|
2020-05-07 19:12:16 -04:00
|
|
|
defer os.Remove(fn)
|
|
|
|
|
2020-05-08 15:07:51 -04:00
|
|
|
var file *os.File
|
|
|
|
|
|
|
|
if requireDirectIO {
|
|
|
|
file, err = disk.OpenFileDirectIO(fn, os.O_CREATE|os.O_EXCL, 0666)
|
|
|
|
} else {
|
|
|
|
file, err = os.OpenFile(fn, os.O_CREATE|os.O_EXCL, 0666)
|
|
|
|
}
|
|
|
|
|
2020-05-07 19:12:16 -04:00
|
|
|
// open file in direct I/O and use default umask, this also verifies
|
|
|
|
// if direct i/o failed.
|
2018-04-09 23:56:09 -04:00
|
|
|
if err != nil {
|
2020-05-07 19:12:16 -04:00
|
|
|
if isSysErrInvalidArg(err) {
|
|
|
|
return path, errUnsupportedDisk
|
|
|
|
}
|
2018-04-09 23:56:09 -04:00
|
|
|
return path, err
|
|
|
|
}
|
|
|
|
file.Close()
|
|
|
|
|
2018-06-12 19:36:31 -04:00
|
|
|
return path, nil
|
2018-04-09 23:56:09 -04:00
|
|
|
}
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// isDirEmpty - returns whether given directory is empty or not.
|
2016-05-08 04:58:05 -04:00
|
|
|
func isDirEmpty(dirname string) bool {
|
2020-06-12 23:04:01 -04:00
|
|
|
f, err := os.Open(dirname)
|
2016-05-08 04:58:05 -04:00
|
|
|
if err != nil {
|
2017-08-04 13:43:51 -04:00
|
|
|
if !os.IsNotExist(err) {
|
2020-04-09 12:30:02 -04:00
|
|
|
logger.LogIf(GlobalContext, err)
|
2017-08-04 13:43:51 -04:00
|
|
|
}
|
|
|
|
|
2016-05-08 04:58:05 -04:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
// List one entry.
|
2020-06-12 23:04:01 -04:00
|
|
|
if _, err = f.Readdirnames(1); err != io.EOF {
|
2017-08-04 13:43:51 -04:00
|
|
|
if !os.IsNotExist(err) {
|
2020-04-09 12:30:02 -04:00
|
|
|
logger.LogIf(GlobalContext, err)
|
2017-08-04 13:43:51 -04:00
|
|
|
}
|
2016-05-08 04:58:05 -04:00
|
|
|
return false
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-10-20 12:26:18 -04:00
|
|
|
// Returns true if we have reached EOF, directory is indeed empty.
|
|
|
|
return true
|
2016-02-18 03:38:58 -05:00
|
|
|
}
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// Initialize a new storage disk.
|
2020-06-12 23:04:01 -04:00
|
|
|
func newXLStorage(path string, hostname string) (*xlStorage, error) {
|
2018-04-09 23:56:09 -04:00
|
|
|
var err error
|
2020-05-08 15:07:51 -04:00
|
|
|
if path, err = getValidPath(path, true); err != nil {
|
2016-06-13 05:53:09 -04:00
|
|
|
return nil, err
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
p := &xlStorage{
|
2019-10-25 13:37:53 -04:00
|
|
|
diskPath: path,
|
2020-05-19 17:27:20 -04:00
|
|
|
hostname: hostname,
|
2016-10-26 20:14:05 -04:00
|
|
|
pool: sync.Pool{
|
|
|
|
New: func() interface{} {
|
2020-03-12 21:57:41 -04:00
|
|
|
b := disk.AlignedBlock(readBlockSize)
|
2016-10-26 20:14:05 -04:00
|
|
|
return &b
|
|
|
|
},
|
|
|
|
},
|
2020-06-12 23:04:01 -04:00
|
|
|
diskMount: mountinfo.IsLikelyMountPoint(path),
|
2020-02-06 23:13:55 -05:00
|
|
|
// Allow disk usage crawler to run with up to 2 concurrent
|
2020-01-17 16:34:43 -05:00
|
|
|
// I/O ops, if and when activeIOCount reaches this
|
|
|
|
// value disk usage routine suspends the crawler
|
|
|
|
// and waits until activeIOCount reaches below this threshold.
|
2020-02-06 23:13:55 -05:00
|
|
|
maxActiveIOCount: 3,
|
2020-06-12 23:04:01 -04:00
|
|
|
ctx: GlobalContext,
|
2018-06-27 21:59:38 -04:00
|
|
|
}
|
2018-05-23 06:11:29 -04:00
|
|
|
|
2017-07-10 21:14:48 -04:00
|
|
|
// Success.
|
2018-06-06 04:51:56 -04:00
|
|
|
return p, nil
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-01-25 02:03:38 -05:00
|
|
|
|
2016-06-29 14:25:35 -04:00
|
|
|
// getDiskInfo returns given disk information.
|
|
|
|
func getDiskInfo(diskPath string) (di disk.Info, err error) {
|
|
|
|
if err = checkPathLength(diskPath); err == nil {
|
|
|
|
di, err = disk.GetInfo(diskPath)
|
|
|
|
}
|
|
|
|
|
2019-07-25 16:35:27 -04:00
|
|
|
switch {
|
|
|
|
case os.IsNotExist(err):
|
2016-06-29 14:25:35 -04:00
|
|
|
err = errDiskNotFound
|
2019-07-25 16:35:27 -04:00
|
|
|
case isSysErrTooLong(err):
|
|
|
|
err = errFileNameTooLong
|
|
|
|
case isSysErrIO(err):
|
|
|
|
err = errFaultyDisk
|
2016-06-29 14:25:35 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return di, err
|
|
|
|
}
|
|
|
|
|
2017-02-09 01:27:35 -05:00
|
|
|
// List of operating systems where we ignore disk space
|
|
|
|
// verification.
|
|
|
|
var ignoreDiskFreeOS = []string{
|
|
|
|
globalWindowsOSName,
|
|
|
|
globalNetBSDOSName,
|
|
|
|
}
|
|
|
|
|
2017-07-10 21:14:48 -04:00
|
|
|
// check if disk total has minimum required size.
|
|
|
|
func checkDiskMinTotal(di disk.Info) (err error) {
|
|
|
|
// Remove 5% from total space for cumulative disk space
|
|
|
|
// used for journalling, inodes etc.
|
2020-06-20 09:36:44 -04:00
|
|
|
totalDiskSpace := float64(di.Total) * diskFillFraction
|
2017-07-10 21:14:48 -04:00
|
|
|
if int64(totalDiskSpace) <= diskMinTotalSpace {
|
2018-06-28 15:01:05 -04:00
|
|
|
return errMinDiskSize
|
2016-10-31 12:34:44 -04:00
|
|
|
}
|
2017-07-10 21:14:48 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// check if disk free has minimum required size.
|
|
|
|
func checkDiskMinFree(di disk.Info) error {
|
2016-09-27 15:46:38 -04:00
|
|
|
// Remove 5% from free space for cumulative disk space used for journalling, inodes etc.
|
2020-06-20 09:36:44 -04:00
|
|
|
availableDiskSpace := float64(di.Free) * diskFillFraction
|
2017-07-10 21:14:48 -04:00
|
|
|
if int64(availableDiskSpace) <= diskMinFreeSpace {
|
2016-04-19 05:42:10 -04:00
|
|
|
return errDiskFull
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-03-28 12:52:09 -04:00
|
|
|
|
2017-07-10 21:14:48 -04:00
|
|
|
// Success.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// checkDiskFree verifies if disk path has sufficient minimum free disk space and files.
|
|
|
|
func checkDiskFree(diskPath string, neededSpace int64) (err error) {
|
|
|
|
// We don't validate disk space or inode utilization on windows.
|
|
|
|
// Each windows call to 'GetVolumeInformationW' takes around
|
|
|
|
// 3-5seconds. And StatDISK is not supported by Go for solaris
|
|
|
|
// and netbsd.
|
|
|
|
if contains(ignoreDiskFreeOS, runtime.GOOS) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var di disk.Info
|
2020-06-12 23:04:01 -04:00
|
|
|
di, err = getDiskInfo(diskPath)
|
2017-07-10 21:14:48 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = checkDiskMinFree(di); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-03-07 15:25:40 -05:00
|
|
|
// Check if we have enough space to store data
|
2020-06-20 09:36:44 -04:00
|
|
|
if neededSpace > int64(float64(di.Free)*diskFillFraction) {
|
2017-03-07 15:25:40 -05:00
|
|
|
return errDiskFull
|
|
|
|
}
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-10-05 15:48:07 -04:00
|
|
|
// Implements stringer compatible interface.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) String() string {
|
2016-10-27 06:30:52 -04:00
|
|
|
return s.diskPath
|
2016-10-05 15:48:07 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) Hostname() string {
|
2020-05-19 17:27:20 -04:00
|
|
|
return s.hostname
|
2020-01-13 16:09:10 -05:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (*xlStorage) Close() error {
|
2016-11-23 18:48:10 -05:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) IsOnline() bool {
|
2019-10-25 13:37:53 -04:00
|
|
|
return true
|
2016-11-23 18:48:10 -05:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) IsLocal() bool {
|
2020-05-19 17:27:20 -04:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) waitForLowActiveIO() {
|
2020-06-22 14:57:22 -04:00
|
|
|
max := lowActiveIOWaitMaxN
|
2020-01-21 17:07:49 -05:00
|
|
|
for atomic.LoadInt32(&s.activeIOCount) >= s.maxActiveIOCount {
|
2020-02-03 19:45:59 -05:00
|
|
|
time.Sleep(lowActiveIOWaitTick)
|
2020-06-22 14:57:22 -04:00
|
|
|
max--
|
|
|
|
if max == 0 {
|
|
|
|
if intDataUpdateTracker.debug {
|
|
|
|
logger.Info("waitForLowActiveIO: waited %d times, resuming", lowActiveIOWaitMaxN)
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
2019-12-12 09:02:37 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCache) (dataUsageCache, error) {
|
2020-06-12 13:28:21 -04:00
|
|
|
// Check if the current bucket has a configured lifecycle policy
|
|
|
|
lc, err := globalLifecycleSys.Get(cache.Info.Name)
|
|
|
|
if err == nil && lc.HasActiveRules("", true) {
|
|
|
|
cache.Info.lifeCycle = lc
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get object api
|
|
|
|
objAPI := newObjectLayerWithoutSafeModeFn()
|
|
|
|
if objAPI == nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
return cache, errServerNotInitialized
|
2020-06-12 13:28:21 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2020-06-12 13:28:21 -04:00
|
|
|
dataUsageInfo, err := crawlDataFolder(ctx, s.diskPath, cache, s.waitForLowActiveIO, func(item crawlItem) (int64, error) {
|
2020-06-12 23:04:01 -04:00
|
|
|
// Look for `xl.meta/xl.json' at the leaf.
|
|
|
|
if !strings.HasSuffix(item.Path, SlashSeparator+xlStorageFormatFile) &&
|
|
|
|
!strings.HasSuffix(item.Path, SlashSeparator+xlStorageFormatFileV1) {
|
|
|
|
// if no xl.meta/xl.json found, skip the file.
|
2020-01-21 17:07:49 -05:00
|
|
|
return 0, errSkipFile
|
2019-12-12 09:02:37 -05:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
buf, err := ioutil.ReadFile(item.Path)
|
2020-01-21 17:07:49 -05:00
|
|
|
if err != nil {
|
|
|
|
return 0, errSkipFile
|
2019-12-12 09:02:37 -05:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Remove filename which is the meta file.
|
|
|
|
item.transformMetaDir()
|
|
|
|
|
|
|
|
fivs, err := getFileInfoVersions(buf, item.bucket, item.objectPath())
|
2020-05-24 14:19:17 -04:00
|
|
|
if err != nil {
|
|
|
|
return 0, errSkipFile
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
var totalSize int64
|
|
|
|
for _, version := range fivs.Versions {
|
2020-07-04 23:56:02 -04:00
|
|
|
size := item.applyActions(ctx, objAPI, actionMeta{numVersions: len(fivs.Versions), oi: version.ToObjectInfo(item.bucket, item.objectPath())})
|
2020-07-03 12:15:44 -04:00
|
|
|
if !version.Deleted {
|
|
|
|
totalSize += size
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
return totalSize, nil
|
2020-01-21 17:07:49 -05:00
|
|
|
})
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
if err != nil {
|
|
|
|
return dataUsageInfo, err
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
dataUsageInfo.Info.LastUpdate = time.Now()
|
|
|
|
total := dataUsageInfo.sizeRecursive(dataUsageInfo.Info.Name)
|
|
|
|
if total == nil {
|
|
|
|
total = &dataUsageEntry{}
|
|
|
|
}
|
|
|
|
atomic.StoreUint64(&s.totalUsed, uint64(total.Size))
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2019-12-12 09:02:37 -05:00
|
|
|
return dataUsageInfo, nil
|
|
|
|
}
|
|
|
|
|
2018-05-23 06:11:29 -04:00
|
|
|
// DiskInfo is an extended type which returns current
|
|
|
|
// disk usage per path.
|
|
|
|
type DiskInfo struct {
|
2020-02-19 22:51:33 -05:00
|
|
|
Total uint64
|
|
|
|
Free uint64
|
|
|
|
Used uint64
|
|
|
|
RootDisk bool
|
|
|
|
MountPath string
|
2020-05-28 16:03:04 -04:00
|
|
|
Error string // reports any error returned by underlying disk
|
2018-05-23 06:11:29 -04:00
|
|
|
}
|
|
|
|
|
2016-08-25 20:16:34 -04:00
|
|
|
// DiskInfo provides current information about disk space usage,
|
|
|
|
// total free inodes and underlying filesystem.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) DiskInfo() (info DiskInfo, err error) {
|
2019-12-12 09:02:37 -05:00
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
2018-05-23 06:11:29 -04:00
|
|
|
di, err := getDiskInfo(s.diskPath)
|
|
|
|
if err != nil {
|
|
|
|
return info, err
|
|
|
|
}
|
2019-07-25 16:35:27 -04:00
|
|
|
|
2018-06-27 21:59:38 -04:00
|
|
|
used := di.Total - di.Free
|
|
|
|
if !s.diskMount {
|
|
|
|
used = atomic.LoadUint64(&s.totalUsed)
|
|
|
|
}
|
2019-01-23 18:29:29 -05:00
|
|
|
|
|
|
|
rootDisk, err := disk.IsRootDisk(s.diskPath)
|
|
|
|
if err != nil {
|
|
|
|
return info, err
|
|
|
|
}
|
2019-10-31 12:13:54 -04:00
|
|
|
|
2018-05-23 06:11:29 -04:00
|
|
|
return DiskInfo{
|
2020-02-19 22:51:33 -05:00
|
|
|
Total: di.Total,
|
|
|
|
Free: di.Free,
|
|
|
|
Used: used,
|
|
|
|
RootDisk: rootDisk,
|
|
|
|
MountPath: s.diskPath,
|
2018-05-23 06:11:29 -04:00
|
|
|
}, nil
|
2016-08-25 20:16:34 -04:00
|
|
|
}
|
|
|
|
|
2016-05-18 00:22:27 -04:00
|
|
|
// getVolDir - will convert incoming volume names to
|
2016-04-13 14:32:47 -04:00
|
|
|
// corresponding valid volume names on the backend in a platform
|
|
|
|
// compatible way for all operating systems. If volume is not found
|
|
|
|
// an error is generated.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) getVolDir(volume string) (string, error) {
|
2018-04-23 23:27:33 -04:00
|
|
|
if volume == "" || volume == "." || volume == ".." {
|
|
|
|
return "", errVolumeNotFound
|
2016-04-13 14:32:47 -04:00
|
|
|
}
|
2016-05-05 04:39:26 -04:00
|
|
|
volumeDir := pathJoin(s.diskPath, volume)
|
2016-05-18 00:22:27 -04:00
|
|
|
return volumeDir, nil
|
2016-04-13 14:32:47 -04:00
|
|
|
}
|
2016-04-08 13:37:38 -04:00
|
|
|
|
2020-03-27 17:48:30 -04:00
|
|
|
// GetDiskID - returns the cached disk uuid
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) GetDiskID() (string, error) {
|
2019-10-25 13:37:53 -04:00
|
|
|
s.RLock()
|
|
|
|
diskID := s.diskID
|
posix: cache disk ID for a short while (#8564)
`*posix.getDiskID()` takes up to 30% of all CPU due to the `os.Stat` call on `GET` calls.
Before:
```
Operation: GET - Concurrency: 12
* Average: 1333.97 MB/s, 1365.99 obj/s, 1365.98 ops ended/s (4m59.975s)
* First Byte: Average: 7.801487ms, Median: 7.9974ms, Best: 1.9822ms, Worst: 110.0021ms
Aggregated, split into 299 x 1s time segments:
* Fastest: 1453.50 MB/s, 1488.38 obj/s, 1492.00 ops ended/s (1s)
* 50% Median: 1360.47 MB/s, 1393.12 obj/s, 1393.00 ops ended/s (1s)
* Slowest: 978.68 MB/s, 1002.17 obj/s, 1004.00 ops ended/s (1s)
```
After:
```
Operation: GET - Concurrency: 12
* Average: 1706.07 MB/s, 1747.02 obj/s, 1747.01 ops ended/s (4m59.985s)
* First Byte: Average: 5.797886ms, Median: 5.9959ms, Best: 996.3µs, Worst: 84.0007ms
Aggregated, split into 299 x 1s time segments:
* Fastest: 1830.03 MB/s, 1873.96 obj/s, 1872.00 ops ended/s (1s)
* 50% Median: 1735.04 MB/s, 1776.68 obj/s, 1776.00 ops ended/s (1s)
* Slowest: 994.94 MB/s, 1018.82 obj/s, 1018.00 ops ended/s (1s)
```
TLDR; `os.Stat` is not free.
2019-11-29 05:57:14 -05:00
|
|
|
fileInfo := s.formatFileInfo
|
|
|
|
lastCheck := s.formatLastCheck
|
2019-10-25 13:37:53 -04:00
|
|
|
s.RUnlock()
|
|
|
|
|
posix: cache disk ID for a short while (#8564)
`*posix.getDiskID()` takes up to 30% of all CPU due to the `os.Stat` call on `GET` calls.
Before:
```
Operation: GET - Concurrency: 12
* Average: 1333.97 MB/s, 1365.99 obj/s, 1365.98 ops ended/s (4m59.975s)
* First Byte: Average: 7.801487ms, Median: 7.9974ms, Best: 1.9822ms, Worst: 110.0021ms
Aggregated, split into 299 x 1s time segments:
* Fastest: 1453.50 MB/s, 1488.38 obj/s, 1492.00 ops ended/s (1s)
* 50% Median: 1360.47 MB/s, 1393.12 obj/s, 1393.00 ops ended/s (1s)
* Slowest: 978.68 MB/s, 1002.17 obj/s, 1004.00 ops ended/s (1s)
```
After:
```
Operation: GET - Concurrency: 12
* Average: 1706.07 MB/s, 1747.02 obj/s, 1747.01 ops ended/s (4m59.985s)
* First Byte: Average: 5.797886ms, Median: 5.9959ms, Best: 996.3µs, Worst: 84.0007ms
Aggregated, split into 299 x 1s time segments:
* Fastest: 1830.03 MB/s, 1873.96 obj/s, 1872.00 ops ended/s (1s)
* 50% Median: 1735.04 MB/s, 1776.68 obj/s, 1776.00 ops ended/s (1s)
* Slowest: 994.94 MB/s, 1018.82 obj/s, 1018.00 ops ended/s (1s)
```
TLDR; `os.Stat` is not free.
2019-11-29 05:57:14 -05:00
|
|
|
// check if we have a valid disk ID that is less than 1 second old.
|
|
|
|
if fileInfo != nil && diskID != "" && time.Now().Before(lastCheck.Add(time.Second)) {
|
|
|
|
return diskID, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
|
|
|
|
|
|
|
// If somebody else updated the disk ID and changed the time, return what they got.
|
2020-03-27 17:48:30 -04:00
|
|
|
if !lastCheck.IsZero() && !s.formatLastCheck.Equal(lastCheck) && diskID != "" {
|
posix: cache disk ID for a short while (#8564)
`*posix.getDiskID()` takes up to 30% of all CPU due to the `os.Stat` call on `GET` calls.
Before:
```
Operation: GET - Concurrency: 12
* Average: 1333.97 MB/s, 1365.99 obj/s, 1365.98 ops ended/s (4m59.975s)
* First Byte: Average: 7.801487ms, Median: 7.9974ms, Best: 1.9822ms, Worst: 110.0021ms
Aggregated, split into 299 x 1s time segments:
* Fastest: 1453.50 MB/s, 1488.38 obj/s, 1492.00 ops ended/s (1s)
* 50% Median: 1360.47 MB/s, 1393.12 obj/s, 1393.00 ops ended/s (1s)
* Slowest: 978.68 MB/s, 1002.17 obj/s, 1004.00 ops ended/s (1s)
```
After:
```
Operation: GET - Concurrency: 12
* Average: 1706.07 MB/s, 1747.02 obj/s, 1747.01 ops ended/s (4m59.985s)
* First Byte: Average: 5.797886ms, Median: 5.9959ms, Best: 996.3µs, Worst: 84.0007ms
Aggregated, split into 299 x 1s time segments:
* Fastest: 1830.03 MB/s, 1873.96 obj/s, 1872.00 ops ended/s (1s)
* 50% Median: 1735.04 MB/s, 1776.68 obj/s, 1776.00 ops ended/s (1s)
* Slowest: 994.94 MB/s, 1018.82 obj/s, 1018.00 ops ended/s (1s)
```
TLDR; `os.Stat` is not free.
2019-11-29 05:57:14 -05:00
|
|
|
// Somebody else got the lock first.
|
|
|
|
return diskID, nil
|
|
|
|
}
|
2019-10-25 13:37:53 -04:00
|
|
|
formatFile := pathJoin(s.diskPath, minioMetaBucket, formatConfigFile)
|
|
|
|
fi, err := os.Stat(formatFile)
|
|
|
|
if err != nil {
|
|
|
|
// If the disk is still not initialized.
|
2020-03-27 17:48:30 -04:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return "", errUnformattedDisk
|
|
|
|
}
|
|
|
|
return "", errCorruptedFormat
|
2019-10-25 13:37:53 -04:00
|
|
|
}
|
|
|
|
|
2020-03-27 17:48:30 -04:00
|
|
|
if xioutil.SameFile(fi, fileInfo) && diskID != "" {
|
2019-10-25 13:37:53 -04:00
|
|
|
// If the file has not changed, just return the cached diskID information.
|
posix: cache disk ID for a short while (#8564)
`*posix.getDiskID()` takes up to 30% of all CPU due to the `os.Stat` call on `GET` calls.
Before:
```
Operation: GET - Concurrency: 12
* Average: 1333.97 MB/s, 1365.99 obj/s, 1365.98 ops ended/s (4m59.975s)
* First Byte: Average: 7.801487ms, Median: 7.9974ms, Best: 1.9822ms, Worst: 110.0021ms
Aggregated, split into 299 x 1s time segments:
* Fastest: 1453.50 MB/s, 1488.38 obj/s, 1492.00 ops ended/s (1s)
* 50% Median: 1360.47 MB/s, 1393.12 obj/s, 1393.00 ops ended/s (1s)
* Slowest: 978.68 MB/s, 1002.17 obj/s, 1004.00 ops ended/s (1s)
```
After:
```
Operation: GET - Concurrency: 12
* Average: 1706.07 MB/s, 1747.02 obj/s, 1747.01 ops ended/s (4m59.985s)
* First Byte: Average: 5.797886ms, Median: 5.9959ms, Best: 996.3µs, Worst: 84.0007ms
Aggregated, split into 299 x 1s time segments:
* Fastest: 1830.03 MB/s, 1873.96 obj/s, 1872.00 ops ended/s (1s)
* 50% Median: 1735.04 MB/s, 1776.68 obj/s, 1776.00 ops ended/s (1s)
* Slowest: 994.94 MB/s, 1018.82 obj/s, 1018.00 ops ended/s (1s)
```
TLDR; `os.Stat` is not free.
2019-11-29 05:57:14 -05:00
|
|
|
s.formatLastCheck = time.Now()
|
2019-10-25 13:37:53 -04:00
|
|
|
return diskID, nil
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
2019-10-25 13:37:53 -04:00
|
|
|
|
|
|
|
b, err := ioutil.ReadFile(formatFile)
|
2016-10-31 12:34:44 -04:00
|
|
|
if err != nil {
|
2020-03-27 17:48:30 -04:00
|
|
|
return "", errCorruptedFormat
|
2016-10-31 12:34:44 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
format := &formatErasureV3{}
|
2019-10-25 13:37:53 -04:00
|
|
|
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
|
|
|
if err = json.Unmarshal(b, &format); err != nil {
|
2020-03-27 17:48:30 -04:00
|
|
|
return "", errCorruptedFormat
|
2019-02-20 16:32:29 -05:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
s.diskID = format.Erasure.This
|
2019-10-25 13:37:53 -04:00
|
|
|
s.formatFileInfo = fi
|
posix: cache disk ID for a short while (#8564)
`*posix.getDiskID()` takes up to 30% of all CPU due to the `os.Stat` call on `GET` calls.
Before:
```
Operation: GET - Concurrency: 12
* Average: 1333.97 MB/s, 1365.99 obj/s, 1365.98 ops ended/s (4m59.975s)
* First Byte: Average: 7.801487ms, Median: 7.9974ms, Best: 1.9822ms, Worst: 110.0021ms
Aggregated, split into 299 x 1s time segments:
* Fastest: 1453.50 MB/s, 1488.38 obj/s, 1492.00 ops ended/s (1s)
* 50% Median: 1360.47 MB/s, 1393.12 obj/s, 1393.00 ops ended/s (1s)
* Slowest: 978.68 MB/s, 1002.17 obj/s, 1004.00 ops ended/s (1s)
```
After:
```
Operation: GET - Concurrency: 12
* Average: 1706.07 MB/s, 1747.02 obj/s, 1747.01 ops ended/s (4m59.985s)
* First Byte: Average: 5.797886ms, Median: 5.9959ms, Best: 996.3µs, Worst: 84.0007ms
Aggregated, split into 299 x 1s time segments:
* Fastest: 1830.03 MB/s, 1873.96 obj/s, 1872.00 ops ended/s (1s)
* 50% Median: 1735.04 MB/s, 1776.68 obj/s, 1776.00 ops ended/s (1s)
* Slowest: 994.94 MB/s, 1018.82 obj/s, 1018.00 ops ended/s (1s)
```
TLDR; `os.Stat` is not free.
2019-11-29 05:57:14 -05:00
|
|
|
s.formatLastCheck = time.Now()
|
2019-10-25 13:37:53 -04:00
|
|
|
return s.diskID, nil
|
2016-10-31 12:34:44 -04:00
|
|
|
}
|
|
|
|
|
2019-10-25 13:37:53 -04:00
|
|
|
// Make a volume entry.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) SetDiskID(id string) {
|
|
|
|
// NO-OP for xlStorage as it is handled either by xlStorageDiskIDCheck{} for local disks or
|
2019-10-25 13:37:53 -04:00
|
|
|
// storage rest server for remote disks.
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) MakeVolBulk(volumes ...string) (err error) {
|
2019-12-23 19:31:03 -05:00
|
|
|
for _, volume := range volumes {
|
|
|
|
if err = s.MakeVol(volume); err != nil {
|
2020-04-28 04:16:57 -04:00
|
|
|
if os.IsPermission(err) {
|
|
|
|
return errVolumeAccessDenied
|
2020-01-10 05:35:06 -05:00
|
|
|
}
|
2019-12-23 19:31:03 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-04-13 14:32:47 -04:00
|
|
|
// Make a volume entry.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) MakeVol(volume string) (err error) {
|
2018-04-23 23:27:33 -04:00
|
|
|
if !isValidVolname(volume) {
|
|
|
|
return errInvalidArgument
|
|
|
|
}
|
|
|
|
|
2019-12-12 09:02:37 -05:00
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
2016-05-18 00:22:27 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2018-01-29 21:43:13 -05:00
|
|
|
|
|
|
|
if _, err := os.Stat(volumeDir); err != nil {
|
|
|
|
// Volume does not exist we proceed to create.
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
// Make a volume entry, with mode 0777 mkdir honors system umask.
|
|
|
|
err = os.MkdirAll(volumeDir, 0777)
|
|
|
|
}
|
|
|
|
if os.IsPermission(err) {
|
2016-07-02 04:59:28 -04:00
|
|
|
return errDiskAccessDenied
|
2018-07-27 18:32:19 -04:00
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return errFaultyDisk
|
2016-07-02 04:59:28 -04:00
|
|
|
}
|
|
|
|
return err
|
2016-04-13 14:32:47 -04:00
|
|
|
}
|
2018-01-29 21:43:13 -05:00
|
|
|
|
|
|
|
// Stat succeeds we return errVolumeExists.
|
|
|
|
return errVolumeExists
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListVols - list volumes.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) ListVols() (volsInfo []VolInfo, err error) {
|
2019-12-12 09:02:37 -05:00
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
2018-07-27 18:32:19 -04:00
|
|
|
volsInfo, err = listVols(s.diskPath)
|
2016-04-08 13:37:38 -04:00
|
|
|
if err != nil {
|
2018-07-27 18:32:19 -04:00
|
|
|
if isSysErrIO(err) {
|
|
|
|
return nil, errFaultyDisk
|
|
|
|
}
|
2016-04-08 13:37:38 -04:00
|
|
|
return nil, err
|
|
|
|
}
|
2016-04-17 15:00:23 -04:00
|
|
|
for i, vol := range volsInfo {
|
2016-04-08 13:37:38 -04:00
|
|
|
volInfo := VolInfo{
|
2016-05-19 21:52:55 -04:00
|
|
|
Name: vol.Name,
|
2016-04-16 15:48:41 -04:00
|
|
|
Created: vol.Created,
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-04-17 15:00:23 -04:00
|
|
|
volsInfo[i] = volInfo
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
return volsInfo, nil
|
2015-10-16 14:26:01 -04:00
|
|
|
}
|
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
|
|
|
|
2016-08-11 22:57:14 -04:00
|
|
|
// List all the volumes from diskPath.
|
|
|
|
func listVols(dirPath string) ([]VolInfo, error) {
|
|
|
|
if err := checkPathLength(dirPath); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
entries, err := readDir(dirPath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errDiskNotFound
|
|
|
|
}
|
|
|
|
var volsInfo []VolInfo
|
|
|
|
for _, entry := range entries {
|
2019-12-06 02:16:06 -05:00
|
|
|
if !HasSuffix(entry, SlashSeparator) || !isValidVolname(slashpath.Clean(entry)) {
|
2016-08-11 22:57:14 -04:00
|
|
|
// Skip if entry is neither a directory not a valid volume name.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
var fi os.FileInfo
|
2018-07-27 18:32:19 -04:00
|
|
|
fi, err = os.Stat(pathJoin(dirPath, entry))
|
2016-08-11 22:57:14 -04:00
|
|
|
if err != nil {
|
|
|
|
// If the file does not exist, skip the entry.
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
continue
|
2018-07-27 18:32:19 -04:00
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return nil, errFaultyDisk
|
2016-08-11 22:57:14 -04:00
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
volsInfo = append(volsInfo, VolInfo{
|
|
|
|
Name: fi.Name(),
|
2017-10-13 06:01:15 -04:00
|
|
|
// As os.Stat() doesn't carry other than ModTime(), use
|
2016-08-11 22:57:14 -04:00
|
|
|
// ModTime() as CreatedTime.
|
|
|
|
Created: fi.ModTime(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return volsInfo, nil
|
|
|
|
}
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// StatVol - get volume info.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) StatVol(volume string) (volInfo VolInfo, err error) {
|
2019-12-12 09:02:37 -05:00
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
2016-04-13 14:32:47 -04:00
|
|
|
// Verify if volume is valid and it exists.
|
2016-05-18 00:22:27 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
|
|
|
return VolInfo{}, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
// Stat a volume entry.
|
|
|
|
var st os.FileInfo
|
2018-01-29 21:43:13 -05:00
|
|
|
st, err = os.Stat(volumeDir)
|
2016-04-08 13:37:38 -04:00
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return VolInfo{}, errVolumeNotFound
|
2018-07-27 18:32:19 -04:00
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return VolInfo{}, errFaultyDisk
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
return VolInfo{}, err
|
2016-04-08 20:13:16 -04:00
|
|
|
}
|
2017-10-13 06:01:15 -04:00
|
|
|
// As os.Stat() doesn't carry other than ModTime(), use ModTime()
|
2016-04-16 15:48:41 -04:00
|
|
|
// as CreatedTime.
|
2016-04-13 14:32:47 -04:00
|
|
|
createdTime := st.ModTime()
|
2016-04-08 13:37:38 -04:00
|
|
|
return VolInfo{
|
2016-04-13 14:32:47 -04:00
|
|
|
Name: volume,
|
|
|
|
Created: createdTime,
|
2016-04-08 13:37:38 -04:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteVol - delete a volume.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) DeleteVol(volume string, forceDelete bool) (err error) {
|
2019-12-12 09:02:37 -05:00
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
2016-04-13 14:32:47 -04:00
|
|
|
// Verify if volume is valid and it exists.
|
2016-05-18 00:22:27 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2020-03-28 00:52:59 -04:00
|
|
|
|
|
|
|
if forceDelete {
|
|
|
|
err = os.RemoveAll(volumeDir)
|
|
|
|
} else {
|
|
|
|
err = os.Remove(volumeDir)
|
|
|
|
}
|
|
|
|
|
2016-04-16 15:48:41 -04:00
|
|
|
if err != nil {
|
2018-08-06 13:26:40 -04:00
|
|
|
switch {
|
|
|
|
case os.IsNotExist(err):
|
2016-04-16 15:48:41 -04:00
|
|
|
return errVolumeNotFound
|
2018-08-06 13:26:40 -04:00
|
|
|
case isSysErrNotEmpty(err):
|
2016-04-16 15:48:41 -04:00
|
|
|
return errVolumeNotEmpty
|
2018-08-06 13:26:40 -04:00
|
|
|
case os.IsPermission(err):
|
2018-04-09 23:56:09 -04:00
|
|
|
return errDiskAccessDenied
|
2018-08-06 13:26:40 -04:00
|
|
|
case isSysErrIO(err):
|
2018-07-27 18:32:19 -04:00
|
|
|
return errFaultyDisk
|
2018-08-06 13:26:40 -04:00
|
|
|
default:
|
|
|
|
return err
|
2016-04-16 15:48:41 -04:00
|
|
|
}
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-03-28 00:52:38 -04:00
|
|
|
return nil
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
2020-03-22 22:23:47 -04:00
|
|
|
const guidSplunk = "guidSplunk"
|
|
|
|
|
|
|
|
// ListDirSplunk - return all the entries at the given directory path.
|
|
|
|
// If an entry is a directory it will be returned with a trailing SlashSeparator.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) ListDirSplunk(volume, dirPath string, count int) (entries []string, err error) {
|
2020-03-22 22:23:47 -04:00
|
|
|
guidIndex := strings.Index(dirPath, guidSplunk)
|
|
|
|
if guidIndex != -1 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
const receiptJSON = "receipt.json"
|
|
|
|
|
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Verify if volume is valid and it exists.
|
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-04-08 22:42:57 -04:00
|
|
|
|
|
|
|
if _, err = os.Stat(volumeDir); err != nil {
|
2020-03-22 22:23:47 -04:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil, errVolumeNotFound
|
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return nil, errFaultyDisk
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-06-17 16:58:38 -04:00
|
|
|
dirPathAbs := pathJoin(volumeDir, dirPath)
|
2020-03-22 22:23:47 -04:00
|
|
|
if count > 0 {
|
2020-06-17 16:58:38 -04:00
|
|
|
entries, err = readDirN(dirPathAbs, count)
|
2020-03-22 22:23:47 -04:00
|
|
|
} else {
|
2020-06-17 16:58:38 -04:00
|
|
|
entries, err = readDir(dirPathAbs)
|
2020-03-22 22:23:47 -04:00
|
|
|
}
|
2020-04-08 22:42:57 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-03-22 22:23:47 -04:00
|
|
|
|
|
|
|
for i, entry := range entries {
|
|
|
|
if entry != receiptJSON {
|
|
|
|
continue
|
|
|
|
}
|
2020-06-17 16:58:38 -04:00
|
|
|
_, err = os.Stat(pathJoin(dirPathAbs, entry, xlStorageFormatFile))
|
2020-06-12 23:04:01 -04:00
|
|
|
if err == nil {
|
2020-03-22 22:23:47 -04:00
|
|
|
entries[i] = strings.TrimSuffix(entry, SlashSeparator)
|
2020-06-12 23:04:01 -04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
if os.IsNotExist(err) {
|
2020-06-17 16:58:38 -04:00
|
|
|
if err = s.renameLegacyMetadata(volume, pathJoin(dirPath, entry)); err == nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
// Rename was successful means we found old `xl.json`
|
|
|
|
entries[i] = strings.TrimSuffix(entry, SlashSeparator)
|
|
|
|
}
|
2020-03-22 22:23:47 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-08 22:42:57 -04:00
|
|
|
return entries, nil
|
2020-03-22 22:23:47 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// WalkSplunk - is a sorted walker which returns file entries in lexically
|
|
|
|
// sorted order, additionally along with metadata about each of those entries.
|
|
|
|
// Implemented specifically for Splunk backend structure and List call with
|
|
|
|
// delimiter as "guidSplunk"
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) WalkSplunk(volume, dirPath, marker string, endWalkCh <-chan struct{}) (ch chan FileInfo, err error) {
|
2020-03-22 22:23:47 -04:00
|
|
|
// Verify if volume is valid and it exists.
|
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stat a volume entry.
|
|
|
|
_, err = os.Stat(volumeDir)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil, errVolumeNotFound
|
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return nil, errFaultyDisk
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
ch = make(chan FileInfo)
|
|
|
|
go func() {
|
|
|
|
defer close(ch)
|
2020-04-08 22:42:57 -04:00
|
|
|
listDir := func(volume, dirPath, dirEntry string) (bool, []string) {
|
2020-03-22 22:23:47 -04:00
|
|
|
entries, err := s.ListDirSplunk(volume, dirPath, -1)
|
|
|
|
if err != nil {
|
2020-04-08 22:42:57 -04:00
|
|
|
return false, nil
|
2020-03-22 22:23:47 -04:00
|
|
|
}
|
|
|
|
if len(entries) == 0 {
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
sort.Strings(entries)
|
|
|
|
return false, filterMatchingPrefix(entries, dirEntry)
|
|
|
|
}
|
|
|
|
|
2020-04-09 12:30:02 -04:00
|
|
|
walkResultCh := startTreeWalk(GlobalContext, volume, dirPath, marker, true, listDir, endWalkCh)
|
2020-03-22 22:23:47 -04:00
|
|
|
for {
|
|
|
|
walkResult, ok := <-walkResultCh
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
var fi FileInfo
|
|
|
|
if HasSuffix(walkResult.entry, SlashSeparator) {
|
|
|
|
fi = FileInfo{
|
|
|
|
Volume: volume,
|
|
|
|
Name: walkResult.entry,
|
|
|
|
Mode: os.ModeDir,
|
|
|
|
}
|
|
|
|
} else {
|
2020-06-12 23:04:01 -04:00
|
|
|
var err error
|
|
|
|
var xlMetaBuf []byte
|
|
|
|
xlMetaBuf, err = ioutil.ReadFile(pathJoin(volumeDir, walkResult.entry, xlStorageFormatFile))
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
fi, err = getFileInfo(xlMetaBuf, volume, walkResult.entry, "")
|
2020-03-22 22:23:47 -04:00
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
if fi.Deleted {
|
|
|
|
// Ignore delete markers.
|
|
|
|
continue
|
|
|
|
}
|
2020-03-22 22:23:47 -04:00
|
|
|
}
|
|
|
|
select {
|
|
|
|
case ch <- fi:
|
|
|
|
case <-endWalkCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return ch, nil
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// WalkVersions - is a sorted walker which returns file entries in lexically sorted order,
|
|
|
|
// additionally along with metadata version info about each of those entries.
|
|
|
|
func (s *xlStorage) WalkVersions(volume, dirPath, marker string, recursive bool, endWalkCh <-chan struct{}) (ch chan FileInfoVersions, err error) {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Verify if volume is valid and it exists.
|
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stat a volume entry.
|
|
|
|
_, err = os.Stat(volumeDir)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil, errVolumeNotFound
|
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return nil, errFaultyDisk
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// buffer channel matches the S3 ListObjects implementation
|
|
|
|
ch = make(chan FileInfoVersions, maxObjectList)
|
|
|
|
go func() {
|
|
|
|
defer close(ch)
|
|
|
|
listDir := func(volume, dirPath, dirEntry string) (emptyDir bool, entries []string) {
|
|
|
|
entries, err := s.ListDir(volume, dirPath, -1)
|
|
|
|
if err != nil {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
if len(entries) == 0 {
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
sort.Strings(entries)
|
|
|
|
return false, filterMatchingPrefix(entries, dirEntry)
|
|
|
|
}
|
|
|
|
|
|
|
|
walkResultCh := startTreeWalk(GlobalContext, volume, dirPath, marker, recursive, listDir, endWalkCh)
|
2020-07-02 13:29:50 -04:00
|
|
|
for walkResult := range walkResultCh {
|
2020-06-12 23:04:01 -04:00
|
|
|
var fiv FileInfoVersions
|
|
|
|
if HasSuffix(walkResult.entry, SlashSeparator) {
|
|
|
|
fiv = FileInfoVersions{
|
|
|
|
Versions: []FileInfo{
|
|
|
|
{
|
|
|
|
Volume: volume,
|
|
|
|
Name: walkResult.entry,
|
|
|
|
Mode: os.ModeDir,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
xlMetaBuf, err := ioutil.ReadFile(pathJoin(volumeDir, walkResult.entry, xlStorageFormatFile))
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
fiv, err = getFileInfoVersions(xlMetaBuf, volume, walkResult.entry)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case ch <- fiv:
|
|
|
|
case <-endWalkCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return ch, nil
|
|
|
|
}
|
|
|
|
|
2019-05-14 16:49:10 -04:00
|
|
|
// Walk - is a sorted walker which returns file entries in lexically
|
|
|
|
// sorted order, additionally along with metadata about each of those entries.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) Walk(volume, dirPath, marker string, recursive bool, endWalkCh <-chan struct{}) (ch chan FileInfo, err error) {
|
2019-12-12 09:02:37 -05:00
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
2019-05-14 16:49:10 -04:00
|
|
|
// Verify if volume is valid and it exists.
|
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stat a volume entry.
|
|
|
|
_, err = os.Stat(volumeDir)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil, errVolumeNotFound
|
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return nil, errFaultyDisk
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-03-22 15:16:36 -04:00
|
|
|
// buffer channel matches the S3 ListObjects implementation
|
|
|
|
ch = make(chan FileInfo, maxObjectList)
|
2019-05-14 16:49:10 -04:00
|
|
|
go func() {
|
|
|
|
defer close(ch)
|
2020-06-12 23:04:01 -04:00
|
|
|
listDir := func(volume, dirPath, dirEntry string) (emptyDir bool, entries []string) {
|
|
|
|
entries, err := s.ListDir(volume, dirPath, -1)
|
2019-05-14 16:49:10 -04:00
|
|
|
if err != nil {
|
2020-04-08 22:42:57 -04:00
|
|
|
return false, nil
|
2019-05-14 16:49:10 -04:00
|
|
|
}
|
2020-03-13 20:43:00 -04:00
|
|
|
if len(entries) == 0 {
|
|
|
|
return true, nil
|
|
|
|
}
|
2019-05-14 16:49:10 -04:00
|
|
|
sort.Strings(entries)
|
2020-03-13 20:43:00 -04:00
|
|
|
return false, filterMatchingPrefix(entries, dirEntry)
|
2019-05-14 16:49:10 -04:00
|
|
|
}
|
|
|
|
|
2020-04-09 12:30:02 -04:00
|
|
|
walkResultCh := startTreeWalk(GlobalContext, volume, dirPath, marker, recursive, listDir, endWalkCh)
|
2020-07-02 13:29:50 -04:00
|
|
|
for walkResult := range walkResultCh {
|
2019-05-14 16:49:10 -04:00
|
|
|
var fi FileInfo
|
2019-12-06 02:16:06 -05:00
|
|
|
if HasSuffix(walkResult.entry, SlashSeparator) {
|
2019-05-14 16:49:10 -04:00
|
|
|
fi = FileInfo{
|
|
|
|
Volume: volume,
|
|
|
|
Name: walkResult.entry,
|
|
|
|
Mode: os.ModeDir,
|
|
|
|
}
|
|
|
|
} else {
|
2020-06-12 23:04:01 -04:00
|
|
|
var err error
|
|
|
|
var xlMetaBuf []byte
|
|
|
|
xlMetaBuf, err = ioutil.ReadFile(pathJoin(volumeDir, walkResult.entry, xlStorageFormatFile))
|
2019-05-14 16:49:10 -04:00
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
fi, err = getFileInfo(xlMetaBuf, volume, walkResult.entry, "")
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if fi.Deleted {
|
|
|
|
// Ignore delete markers.
|
|
|
|
continue
|
|
|
|
}
|
2019-05-14 16:49:10 -04:00
|
|
|
}
|
|
|
|
select {
|
|
|
|
case ch <- fi:
|
|
|
|
case <-endWalkCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return ch, nil
|
|
|
|
}
|
|
|
|
|
2016-05-05 15:51:56 -04:00
|
|
|
// ListDir - return all the entries at the given directory path.
|
2019-08-06 15:08:58 -04:00
|
|
|
// If an entry is a directory it will be returned with a trailing SlashSeparator.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) ListDir(volume, dirPath string, count int) (entries []string, err error) {
|
2019-12-12 09:02:37 -05:00
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
2016-04-13 14:32:47 -04:00
|
|
|
// Verify if volume is valid and it exists.
|
2016-05-18 00:22:27 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
2016-05-05 15:51:56 -04:00
|
|
|
return nil, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2020-04-08 22:42:57 -04:00
|
|
|
|
|
|
|
if _, err = os.Stat(volumeDir); err != nil {
|
2016-05-05 15:51:56 -04:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil, errVolumeNotFound
|
2018-07-27 18:32:19 -04:00
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return nil, errFaultyDisk
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-05-05 15:51:56 -04:00
|
|
|
return nil, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2018-05-08 22:08:21 -04:00
|
|
|
|
2020-06-17 16:58:38 -04:00
|
|
|
dirPathAbs := pathJoin(volumeDir, dirPath)
|
2018-05-08 22:08:21 -04:00
|
|
|
if count > 0 {
|
2020-06-17 16:58:38 -04:00
|
|
|
entries, err = readDirN(dirPathAbs, count)
|
2019-04-23 17:54:28 -04:00
|
|
|
} else {
|
2020-06-17 16:58:38 -04:00
|
|
|
entries, err = readDir(dirPathAbs)
|
2018-05-08 22:08:21 -04:00
|
|
|
}
|
2020-04-08 22:42:57 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-04-23 17:54:28 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
for i, entry := range entries {
|
2020-06-17 16:58:38 -04:00
|
|
|
_, err = os.Stat(pathJoin(dirPathAbs, entry, xlStorageFormatFile))
|
2020-06-12 23:04:01 -04:00
|
|
|
if err == nil {
|
|
|
|
entries[i] = strings.TrimSuffix(entry, SlashSeparator)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if os.IsNotExist(err) {
|
2020-06-17 16:58:38 -04:00
|
|
|
if err = s.renameLegacyMetadata(volume, pathJoin(dirPath, entry)); err == nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
// if rename was successful, means we did find old `xl.json`
|
2019-08-06 15:08:58 -04:00
|
|
|
entries[i] = strings.TrimSuffix(entry, SlashSeparator)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return entries, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteVersions deletes slice of versions, it can be same object
|
|
|
|
// or multiple objects.
|
|
|
|
func (s *xlStorage) DeleteVersions(volume string, versions []FileInfo) []error {
|
|
|
|
errs := make([]error, len(versions))
|
|
|
|
for i, version := range versions {
|
|
|
|
if err := s.DeleteVersion(volume, version.Name, version); err != nil {
|
|
|
|
errs[i] = err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return errs
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteVersion - deletes FileInfo metadata for path at `xl.meta`
|
|
|
|
func (s *xlStorage) DeleteVersion(volume, path string, fi FileInfo) error {
|
|
|
|
if HasSuffix(path, SlashSeparator) {
|
|
|
|
return s.DeleteFile(volume, path)
|
|
|
|
}
|
|
|
|
|
|
|
|
buf, err := s.ReadAll(volume, pathJoin(path, xlStorageFormatFile))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(buf) == 0 {
|
|
|
|
return errFileNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if !isXL2V1Format(buf) {
|
|
|
|
// Delete the meta file, if there are no more versions the
|
|
|
|
// top level parent is automatically removed.
|
|
|
|
filePath := pathJoin(volumeDir, path, xlStorageFormatFile)
|
|
|
|
if err = checkPathLength(filePath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return deleteFile(volumeDir, filePath, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
var xlMeta xlMetaV2
|
|
|
|
if err = xlMeta.Load(buf); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if fi.Deleted {
|
|
|
|
if err = xlMeta.AddVersion(fi); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
buf, err = xlMeta.MarshalMsg(append(xlHeader[:], xlVersionV1[:]...))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return s.WriteAll(volume, pathJoin(path, xlStorageFormatFile), bytes.NewReader(buf))
|
|
|
|
}
|
|
|
|
|
|
|
|
dataDir, lastVersion, err := xlMeta.DeleteVersion(fi)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
buf, err = xlMeta.MarshalMsg(append(xlHeader[:], xlVersionV1[:]...))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// when data-dir is specified.
|
|
|
|
if dataDir != "" {
|
|
|
|
filePath := pathJoin(volumeDir, path, dataDir)
|
|
|
|
if err = checkPathLength(filePath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = removeAll(filePath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !lastVersion {
|
|
|
|
return s.WriteAll(volume, pathJoin(path, xlStorageFormatFile), bytes.NewReader(buf))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete the meta file, if there are no more versions the
|
|
|
|
// top level parent is automatically removed.
|
|
|
|
filePath := pathJoin(volumeDir, path, xlStorageFormatFile)
|
|
|
|
if err = checkPathLength(filePath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return deleteFile(volumeDir, filePath, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteMetadata - writes FileInfo metadata for path at `xl.meta`
|
|
|
|
func (s *xlStorage) WriteMetadata(volume, path string, fi FileInfo) error {
|
|
|
|
buf, err := s.ReadAll(volume, pathJoin(path, xlStorageFormatFile))
|
|
|
|
if err != nil && err != errFileNotFound {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
|
|
|
var xlMeta xlMetaV2
|
|
|
|
if !isXL2V1Format(buf) {
|
|
|
|
xlMeta, err = newXLMetaV2(fi)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
buf, err = xlMeta.MarshalMsg(append(xlHeader[:], xlVersionV1[:]...))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err = xlMeta.Load(buf); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err = xlMeta.AddVersion(fi); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
buf, err = xlMeta.MarshalMsg(append(xlHeader[:], xlVersionV1[:]...))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return s.WriteAll(volume, pathJoin(path, xlStorageFormatFile), bytes.NewReader(buf))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *xlStorage) renameLegacyMetadata(volume, path string) error {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
//gi Stat a volume entry.
|
|
|
|
_, err = os.Stat(volumeDir)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return errVolumeNotFound
|
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return errFaultyDisk
|
|
|
|
} else if isSysErrTooManyFiles(err) {
|
|
|
|
return errTooManyOpenFiles
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Validate file path length, before reading.
|
|
|
|
filePath := pathJoin(volumeDir, path)
|
|
|
|
if err = checkPathLength(filePath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
srcFilePath := pathJoin(filePath, xlStorageFormatFileV1)
|
|
|
|
dstFilePath := pathJoin(filePath, xlStorageFormatFile)
|
2020-06-19 13:58:17 -04:00
|
|
|
|
|
|
|
// Renaming xl.json to xl.meta should be fully synced to disk.
|
2020-07-11 12:37:34 -04:00
|
|
|
defer func() {
|
|
|
|
if err == nil {
|
|
|
|
// Sync to disk only upon success.
|
|
|
|
globalSync()
|
|
|
|
}
|
|
|
|
}()
|
2020-06-19 13:58:17 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
if err = os.Rename(srcFilePath, dstFilePath); err != nil {
|
|
|
|
switch {
|
|
|
|
case isSysErrNotDir(err):
|
|
|
|
return errFileNotFound
|
|
|
|
case isSysErrPathNotFound(err):
|
|
|
|
return errFileNotFound
|
|
|
|
case isSysErrCrossDevice(err):
|
|
|
|
return fmt.Errorf("%w (%s)->(%s)", errCrossDeviceLink, srcFilePath, dstFilePath)
|
|
|
|
case os.IsNotExist(err):
|
|
|
|
return errFileNotFound
|
|
|
|
case os.IsExist(err):
|
|
|
|
// This is returned only when destination is a directory and we
|
|
|
|
// are attempting a rename from file to directory.
|
|
|
|
return errIsNotRegular
|
|
|
|
default:
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadVersion - reads metadata and returns FileInfo at path `xl.meta`
|
|
|
|
func (s *xlStorage) ReadVersion(volume, path, versionID string) (fi FileInfo, err error) {
|
|
|
|
buf, err := s.ReadAll(volume, pathJoin(path, xlStorageFormatFile))
|
|
|
|
if err != nil {
|
|
|
|
if err == errFileNotFound {
|
|
|
|
if err = s.renameLegacyMetadata(volume, path); err != nil {
|
|
|
|
return fi, err
|
2019-04-23 17:54:28 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
buf, err = s.ReadAll(volume, pathJoin(path, xlStorageFormatFile))
|
|
|
|
if err != nil {
|
|
|
|
return fi, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return fi, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(buf) == 0 {
|
|
|
|
if versionID != "" {
|
|
|
|
return fi, errFileVersionNotFound
|
2019-04-23 17:54:28 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
return fi, errFileNotFound
|
2019-04-23 17:54:28 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
return getFileInfo(buf, volume, path, versionID)
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
2016-06-25 17:51:06 -04:00
|
|
|
// ReadAll reads from r until an error or EOF and returns the data it read.
|
|
|
|
// A successful call returns err == nil, not err == EOF. Because ReadAll is
|
|
|
|
// defined to read from src until EOF, it does not treat an EOF from Read
|
|
|
|
// as an error to be reported.
|
|
|
|
// This API is meant to be used on files which have small memory footprint, do
|
|
|
|
// not use this on large files as it would cause server to crash.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) ReadAll(volume, path string) (buf []byte, err error) {
|
2019-12-12 09:02:37 -05:00
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
2016-06-25 17:51:06 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2016-06-25 17:51:06 -04:00
|
|
|
// Stat a volume entry.
|
2020-06-12 23:04:01 -04:00
|
|
|
_, err = os.Stat(volumeDir)
|
2016-06-25 17:51:06 -04:00
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil, errVolumeNotFound
|
2018-07-27 18:32:19 -04:00
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return nil, errFaultyDisk
|
2019-05-02 10:09:57 -04:00
|
|
|
} else if isSysErrTooManyFiles(err) {
|
|
|
|
return nil, errTooManyOpenFiles
|
2016-06-25 17:51:06 -04:00
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Validate file path length, before reading.
|
|
|
|
filePath := pathJoin(volumeDir, path)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err = checkPathLength(filePath); err != nil {
|
2016-06-25 17:51:06 -04:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open the file for reading.
|
2020-06-12 23:04:01 -04:00
|
|
|
buf, err = ioutil.ReadFile(filePath)
|
2016-06-25 17:51:06 -04:00
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil, errFileNotFound
|
|
|
|
} else if os.IsPermission(err) {
|
|
|
|
return nil, errFileAccessDenied
|
2019-09-11 13:21:43 -04:00
|
|
|
} else if errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.EISDIR) {
|
|
|
|
return nil, errFileNotFound
|
|
|
|
} else if isSysErrHandleInvalid(err) {
|
|
|
|
// This case is special and needs to be handled for windows.
|
|
|
|
return nil, errFileNotFound
|
2018-07-27 18:32:19 -04:00
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return nil, errFaultyDisk
|
2016-06-25 17:51:06 -04:00
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return buf, nil
|
|
|
|
}
|
|
|
|
|
2016-05-28 18:13:15 -04:00
|
|
|
// ReadFile reads exactly len(buf) bytes into buf. It returns the
|
|
|
|
// number of bytes copied. The error is EOF only if no bytes were
|
|
|
|
// read. On return, n == len(buf) if and only if err == nil. n == 0
|
2016-11-21 02:42:53 -05:00
|
|
|
// for io.EOF.
|
2017-05-16 17:21:52 -04:00
|
|
|
//
|
2016-11-21 02:42:53 -05:00
|
|
|
// If an EOF happens after reading some but not all the bytes,
|
2017-09-25 14:32:56 -04:00
|
|
|
// ReadFile returns ErrUnexpectedEOF.
|
|
|
|
//
|
|
|
|
// If the BitrotVerifier is not nil or not verified ReadFile
|
|
|
|
// tries to verify whether the disk has bitrot.
|
2017-05-16 17:21:52 -04:00
|
|
|
//
|
|
|
|
// Additionally ReadFile also starts reading from an offset. ReadFile
|
|
|
|
// semantics are same as io.ReadFull.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) ReadFile(volume, path string, offset int64, buffer []byte, verifier *BitrotVerifier) (int64, error) {
|
2018-08-06 18:14:08 -04:00
|
|
|
if offset < 0 {
|
|
|
|
return 0, errInvalidArgument
|
|
|
|
}
|
|
|
|
|
2019-12-12 09:02:37 -05:00
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
2016-05-18 00:22:27 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
2016-05-28 18:13:15 -04:00
|
|
|
return 0, err
|
2016-05-18 00:22:27 -04:00
|
|
|
}
|
2020-01-17 16:34:43 -05:00
|
|
|
|
|
|
|
var n int
|
|
|
|
|
2016-05-18 00:22:27 -04:00
|
|
|
// Stat a volume entry.
|
2020-06-12 23:04:01 -04:00
|
|
|
_, err = os.Stat(volumeDir)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
2016-05-18 00:22:27 -04:00
|
|
|
if os.IsNotExist(err) {
|
2016-05-28 18:13:15 -04:00
|
|
|
return 0, errVolumeNotFound
|
2018-07-27 18:32:19 -04:00
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return 0, errFaultyDisk
|
2016-05-18 00:22:27 -04:00
|
|
|
}
|
2016-05-28 18:13:15 -04:00
|
|
|
return 0, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-04-13 14:32:47 -04:00
|
|
|
|
2016-06-25 17:51:06 -04:00
|
|
|
// Validate effective path length before reading.
|
2016-05-05 04:39:26 -04:00
|
|
|
filePath := pathJoin(volumeDir, path)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err = checkPathLength(filePath); err != nil {
|
2016-05-28 18:13:15 -04:00
|
|
|
return 0, err
|
2016-05-11 15:55:02 -04:00
|
|
|
}
|
2016-06-25 17:51:06 -04:00
|
|
|
|
|
|
|
// Open the file for reading.
|
2020-06-12 23:04:01 -04:00
|
|
|
file, err := os.Open(filePath)
|
2016-04-08 13:37:38 -04:00
|
|
|
if err != nil {
|
2018-08-06 13:26:40 -04:00
|
|
|
switch {
|
|
|
|
case os.IsNotExist(err):
|
2016-05-28 18:13:15 -04:00
|
|
|
return 0, errFileNotFound
|
2018-08-06 13:26:40 -04:00
|
|
|
case os.IsPermission(err):
|
2016-05-28 18:13:15 -04:00
|
|
|
return 0, errFileAccessDenied
|
2018-08-06 13:26:40 -04:00
|
|
|
case isSysErrNotDir(err):
|
2016-07-29 00:57:11 -04:00
|
|
|
return 0, errFileAccessDenied
|
2018-08-06 13:26:40 -04:00
|
|
|
case isSysErrIO(err):
|
2018-07-27 18:32:19 -04:00
|
|
|
return 0, errFaultyDisk
|
2019-05-02 10:09:57 -04:00
|
|
|
case isSysErrTooManyFiles(err):
|
|
|
|
return 0, errTooManyOpenFiles
|
2018-08-06 13:26:40 -04:00
|
|
|
default:
|
|
|
|
return 0, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
}
|
2016-07-27 22:22:32 -04:00
|
|
|
|
|
|
|
// Close the file descriptor.
|
|
|
|
defer file.Close()
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
st, err := file.Stat()
|
|
|
|
if err != nil {
|
2016-05-28 18:13:15 -04:00
|
|
|
return 0, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-07-29 00:57:11 -04:00
|
|
|
|
2017-05-16 17:21:52 -04:00
|
|
|
// Verify it is a regular file, otherwise subsequent Seek is
|
|
|
|
// undefined.
|
2016-04-08 13:37:38 -04:00
|
|
|
if !st.Mode().IsRegular() {
|
2016-07-29 00:57:11 -04:00
|
|
|
return 0, errIsNotRegular
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-07-29 00:57:11 -04:00
|
|
|
|
2018-08-06 18:14:08 -04:00
|
|
|
if verifier == nil {
|
|
|
|
n, err = file.ReadAt(buffer, offset)
|
|
|
|
return int64(n), err
|
|
|
|
}
|
2017-05-16 17:21:52 -04:00
|
|
|
|
2018-08-06 18:14:08 -04:00
|
|
|
bufp := s.pool.Get().(*[]byte)
|
|
|
|
defer s.pool.Put(bufp)
|
|
|
|
|
|
|
|
h := verifier.algorithm.New()
|
|
|
|
if _, err = io.CopyBuffer(h, io.LimitReader(file, offset), *bufp); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if n, err = io.ReadFull(file, buffer); err != nil {
|
|
|
|
return int64(n), err
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err = h.Write(buffer); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err = io.CopyBuffer(h, file, *bufp); err != nil {
|
|
|
|
return 0, err
|
2017-05-16 17:21:52 -04:00
|
|
|
}
|
2016-05-28 18:13:15 -04:00
|
|
|
|
2019-02-13 07:59:36 -05:00
|
|
|
if !bytes.Equal(h.Sum(nil), verifier.sum) {
|
2019-10-01 16:12:15 -04:00
|
|
|
return 0, errFileCorrupt
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
2018-08-06 18:14:08 -04:00
|
|
|
|
|
|
|
return int64(len(buffer)), nil
|
2016-04-08 20:13:16 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) openFile(volume, path string, mode int) (f *os.File, err error) {
|
2016-05-18 00:22:27 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
2016-10-29 15:44:44 -04:00
|
|
|
return nil, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-05-18 00:22:27 -04:00
|
|
|
// Stat a volume entry.
|
2020-06-12 23:04:01 -04:00
|
|
|
_, err = os.Stat(volumeDir)
|
2016-05-18 00:22:27 -04:00
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
2016-10-29 15:44:44 -04:00
|
|
|
return nil, errVolumeNotFound
|
2018-07-27 18:32:19 -04:00
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return nil, errFaultyDisk
|
2016-05-18 00:22:27 -04:00
|
|
|
}
|
2016-10-29 15:44:44 -04:00
|
|
|
return nil, err
|
2016-05-18 00:22:27 -04:00
|
|
|
}
|
2016-10-29 15:44:44 -04:00
|
|
|
|
2016-05-05 04:39:26 -04:00
|
|
|
filePath := pathJoin(volumeDir, path)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err = checkPathLength(filePath); err != nil {
|
2016-10-29 15:44:44 -04:00
|
|
|
return nil, err
|
2016-05-11 15:55:02 -04:00
|
|
|
}
|
2016-10-29 15:44:44 -04:00
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// Verify if the file already exists and is not of regular type.
|
2016-05-04 15:18:40 -04:00
|
|
|
var st os.FileInfo
|
2018-04-09 23:56:09 -04:00
|
|
|
if st, err = os.Stat(filePath); err == nil {
|
2016-07-29 00:57:11 -04:00
|
|
|
if !st.Mode().IsRegular() {
|
2016-10-29 15:44:44 -04:00
|
|
|
return nil, errIsNotRegular
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-10-29 15:44:44 -04:00
|
|
|
} else {
|
|
|
|
// Create top level directories if they don't exist.
|
|
|
|
// with mode 0777 mkdir honors system umask.
|
2018-01-13 12:13:02 -05:00
|
|
|
if err = mkdirAll(slashpath.Dir(filePath), 0777); err != nil {
|
2016-10-29 15:44:44 -04:00
|
|
|
return nil, err
|
2016-07-03 14:17:08 -04:00
|
|
|
}
|
2016-05-28 18:13:15 -04:00
|
|
|
}
|
2016-06-23 23:19:27 -04:00
|
|
|
|
2018-11-14 09:18:35 -05:00
|
|
|
w, err := os.OpenFile(filePath, mode, 0666)
|
2016-05-04 15:18:40 -04:00
|
|
|
if err != nil {
|
|
|
|
// File path cannot be verified since one of the parents is a file.
|
2018-08-06 13:26:40 -04:00
|
|
|
switch {
|
|
|
|
case isSysErrNotDir(err):
|
2016-10-29 15:44:44 -04:00
|
|
|
return nil, errFileAccessDenied
|
2018-08-06 13:26:40 -04:00
|
|
|
case os.IsPermission(err):
|
2018-04-09 23:56:09 -04:00
|
|
|
return nil, errFileAccessDenied
|
2018-08-06 13:26:40 -04:00
|
|
|
case isSysErrIO(err):
|
2018-07-27 18:32:19 -04:00
|
|
|
return nil, errFaultyDisk
|
2019-05-02 10:09:57 -04:00
|
|
|
case isSysErrTooManyFiles(err):
|
|
|
|
return nil, errTooManyOpenFiles
|
2018-08-06 13:26:40 -04:00
|
|
|
default:
|
|
|
|
return nil, err
|
2016-05-04 15:18:40 -04:00
|
|
|
}
|
2016-10-29 15:44:44 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return w, nil
|
|
|
|
}
|
|
|
|
|
2019-01-17 07:58:18 -05:00
|
|
|
// ReadFileStream - Returns the read stream of the file.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) ReadFileStream(volume, path string, offset, length int64) (io.ReadCloser, error) {
|
2019-01-17 07:58:18 -05:00
|
|
|
if offset < 0 {
|
|
|
|
return nil, errInvalidArgument
|
|
|
|
}
|
|
|
|
|
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// Stat a volume entry.
|
2020-06-12 23:04:01 -04:00
|
|
|
_, err = os.Stat(volumeDir)
|
2019-01-17 07:58:18 -05:00
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil, errVolumeNotFound
|
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return nil, errFaultyDisk
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Validate effective path length before reading.
|
|
|
|
filePath := pathJoin(volumeDir, path)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err = checkPathLength(filePath); err != nil {
|
2019-01-17 07:58:18 -05:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open the file for reading.
|
2020-06-12 23:04:01 -04:00
|
|
|
file, err := os.Open(filePath)
|
2019-01-17 07:58:18 -05:00
|
|
|
if err != nil {
|
|
|
|
switch {
|
|
|
|
case os.IsNotExist(err):
|
|
|
|
return nil, errFileNotFound
|
|
|
|
case os.IsPermission(err):
|
|
|
|
return nil, errFileAccessDenied
|
|
|
|
case isSysErrNotDir(err):
|
|
|
|
return nil, errFileAccessDenied
|
|
|
|
case isSysErrIO(err):
|
|
|
|
return nil, errFaultyDisk
|
2019-05-02 10:09:57 -04:00
|
|
|
case isSysErrTooManyFiles(err):
|
|
|
|
return nil, errTooManyOpenFiles
|
2019-01-17 07:58:18 -05:00
|
|
|
default:
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
st, err := file.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify it is a regular file, otherwise subsequent Seek is
|
|
|
|
// undefined.
|
|
|
|
if !st.Mode().IsRegular() {
|
|
|
|
return nil, errIsNotRegular
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err = file.Seek(offset, io.SeekStart); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-05-22 16:47:15 -04:00
|
|
|
|
2020-02-06 23:13:55 -05:00
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
2019-05-22 16:47:15 -04:00
|
|
|
r := struct {
|
2019-03-18 01:20:26 -04:00
|
|
|
io.Reader
|
|
|
|
io.Closer
|
2020-02-06 23:13:55 -05:00
|
|
|
}{Reader: io.LimitReader(file, length), Closer: closeWrapper(func() error {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
return file.Close()
|
|
|
|
})}
|
|
|
|
|
|
|
|
// Add readahead to big reads
|
Remove read-ahead for small files (#8522)
We should only read ahead if we are reading big files. We enable it for files >= 16MB.
Benchmark on 64KB objects.
Before:
```
Operation: GET
Errors: 0
Average: 59.976s, 87.13 MB/s, 1394.07 ops ended/s.
Fastest: 1s, 90.99 MB/s, 1455.00 ops ended/s.
50% Median: 1s, 87.53 MB/s, 1401.00 ops ended/s.
Slowest: 1s, 81.39 MB/s, 1301.00 ops ended/s.
```
After:
```
Operation: GET
Errors: 0
Average: 59.992s, 207.99 MB/s, 3327.85 ops ended/s.
Fastest: 1s, 219.20 MB/s, 3507.00 ops ended/s.
50% Median: 1s, 210.54 MB/s, 3368.00 ops ended/s.
Slowest: 1s, 179.14 MB/s, 2865.00 ops ended/s.
```
The 64KB buffer is actually a small disadvantage for this case, but I believe it will be better in general than no buffer.
2019-11-14 15:58:41 -05:00
|
|
|
if length >= readAheadSize {
|
2020-02-06 23:13:55 -05:00
|
|
|
rc, err := readahead.NewReadCloserSize(r, readAheadBuffers, readAheadBufSize)
|
|
|
|
if err != nil {
|
|
|
|
r.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return rc, nil
|
Remove read-ahead for small files (#8522)
We should only read ahead if we are reading big files. We enable it for files >= 16MB.
Benchmark on 64KB objects.
Before:
```
Operation: GET
Errors: 0
Average: 59.976s, 87.13 MB/s, 1394.07 ops ended/s.
Fastest: 1s, 90.99 MB/s, 1455.00 ops ended/s.
50% Median: 1s, 87.53 MB/s, 1401.00 ops ended/s.
Slowest: 1s, 81.39 MB/s, 1301.00 ops ended/s.
```
After:
```
Operation: GET
Errors: 0
Average: 59.992s, 207.99 MB/s, 3327.85 ops ended/s.
Fastest: 1s, 219.20 MB/s, 3507.00 ops ended/s.
50% Median: 1s, 210.54 MB/s, 3368.00 ops ended/s.
Slowest: 1s, 179.14 MB/s, 2865.00 ops ended/s.
```
The 64KB buffer is actually a small disadvantage for this case, but I believe it will be better in general than no buffer.
2019-11-14 15:58:41 -05:00
|
|
|
}
|
2019-05-22 16:47:15 -04:00
|
|
|
|
Remove read-ahead for small files (#8522)
We should only read ahead if we are reading big files. We enable it for files >= 16MB.
Benchmark on 64KB objects.
Before:
```
Operation: GET
Errors: 0
Average: 59.976s, 87.13 MB/s, 1394.07 ops ended/s.
Fastest: 1s, 90.99 MB/s, 1455.00 ops ended/s.
50% Median: 1s, 87.53 MB/s, 1401.00 ops ended/s.
Slowest: 1s, 81.39 MB/s, 1301.00 ops ended/s.
```
After:
```
Operation: GET
Errors: 0
Average: 59.992s, 207.99 MB/s, 3327.85 ops ended/s.
Fastest: 1s, 219.20 MB/s, 3507.00 ops ended/s.
50% Median: 1s, 210.54 MB/s, 3368.00 ops ended/s.
Slowest: 1s, 179.14 MB/s, 2865.00 ops ended/s.
```
The 64KB buffer is actually a small disadvantage for this case, but I believe it will be better in general than no buffer.
2019-11-14 15:58:41 -05:00
|
|
|
// Just add a small 64k buffer.
|
|
|
|
r.Reader = bufio.NewReaderSize(r.Reader, 64<<10)
|
|
|
|
return r, nil
|
2019-01-17 07:58:18 -05:00
|
|
|
}
|
|
|
|
|
2020-02-06 23:13:55 -05:00
|
|
|
// closeWrapper converts a function to an io.Closer
|
|
|
|
type closeWrapper func() error
|
|
|
|
|
|
|
|
// Close calls the wrapped function.
|
|
|
|
func (c closeWrapper) Close() error {
|
|
|
|
return c()
|
|
|
|
}
|
|
|
|
|
2019-01-17 07:58:18 -05:00
|
|
|
// CreateFile - creates the file.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) CreateFile(volume, path string, fileSize int64, r io.Reader) (err error) {
|
2019-04-30 19:27:31 -04:00
|
|
|
if fileSize < -1 {
|
2016-10-29 15:44:44 -04:00
|
|
|
return errInvalidArgument
|
|
|
|
}
|
|
|
|
|
2019-12-12 09:02:37 -05:00
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
2016-10-31 12:34:44 -04:00
|
|
|
// Validate if disk is indeed free.
|
2017-03-07 15:25:40 -05:00
|
|
|
if err = checkDiskFree(s.diskPath, fileSize); err != nil {
|
2018-07-27 18:32:19 -04:00
|
|
|
if isSysErrIO(err) {
|
|
|
|
return errFaultyDisk
|
|
|
|
}
|
2016-10-31 12:34:44 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-04-24 00:25:06 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
2016-10-29 15:44:44 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-04-24 00:25:06 -04:00
|
|
|
// Stat a volume entry.
|
2020-06-12 23:04:01 -04:00
|
|
|
_, err = os.Stat(volumeDir)
|
2019-04-24 00:25:06 -04:00
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return errVolumeNotFound
|
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return errFaultyDisk
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
filePath := pathJoin(volumeDir, path)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err = checkPathLength(filePath); err != nil {
|
2019-04-24 00:25:06 -04:00
|
|
|
return err
|
|
|
|
}
|
2016-10-29 15:44:44 -04:00
|
|
|
|
2019-04-24 00:25:06 -04:00
|
|
|
// Create top level directories if they don't exist.
|
|
|
|
// with mode 0777 mkdir honors system umask.
|
|
|
|
if err = mkdirAll(slashpath.Dir(filePath), 0777); err != nil {
|
2020-06-10 11:14:22 -04:00
|
|
|
if errors.Is(err, &os.PathError{}) {
|
|
|
|
return errFileAccessDenied
|
|
|
|
}
|
2019-04-24 00:25:06 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-05-11 18:03:10 -04:00
|
|
|
w, err := disk.OpenFileDirectIO(filePath, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0666)
|
2019-04-24 00:25:06 -04:00
|
|
|
if err != nil {
|
|
|
|
switch {
|
|
|
|
case os.IsPermission(err):
|
|
|
|
return errFileAccessDenied
|
|
|
|
case os.IsExist(err):
|
|
|
|
return errFileAccessDenied
|
|
|
|
case isSysErrIO(err):
|
|
|
|
return errFaultyDisk
|
2020-05-07 19:12:16 -04:00
|
|
|
case isSysErrInvalidArg(err):
|
|
|
|
return errUnsupportedDisk
|
2019-04-24 00:25:06 -04:00
|
|
|
default:
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2016-10-29 15:44:44 -04:00
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
var e error
|
|
|
|
if fileSize > 0 {
|
|
|
|
// Allocate needed disk space to append data
|
|
|
|
e = Fallocate(int(w.Fd()), 0, fileSize)
|
|
|
|
}
|
2016-10-29 15:44:44 -04:00
|
|
|
|
|
|
|
// Ignore errors when Fallocate is not supported in the current system
|
|
|
|
if e != nil && !isSysErrNoSys(e) && !isSysErrOpNotSupported(e) {
|
|
|
|
switch {
|
|
|
|
case isSysErrNoSpace(e):
|
|
|
|
err = errDiskFull
|
|
|
|
case isSysErrIO(e):
|
2018-07-27 18:32:19 -04:00
|
|
|
err = errFaultyDisk
|
2016-10-29 15:44:44 -04:00
|
|
|
default:
|
|
|
|
// For errors: EBADF, EINTR, EINVAL, ENODEV, EPERM, ESPIPE and ETXTBSY
|
|
|
|
// Appending was failed anyway, returns unexpected error
|
|
|
|
err = errUnexpected
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
2019-01-17 07:58:18 -05:00
|
|
|
|
2020-07-01 13:57:23 -04:00
|
|
|
defer func() {
|
|
|
|
disk.Fdatasync(w) // Only interested in flushing the size_t not mtime/atime
|
|
|
|
w.Close()
|
|
|
|
}()
|
2019-05-22 16:47:15 -04:00
|
|
|
|
2019-01-17 07:58:18 -05:00
|
|
|
bufp := s.pool.Get().(*[]byte)
|
|
|
|
defer s.pool.Put(bufp)
|
|
|
|
|
2019-08-23 18:36:46 -04:00
|
|
|
written, err := xioutil.CopyAligned(w, r, *bufp, fileSize)
|
2019-05-22 16:47:15 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2019-01-17 07:58:18 -05:00
|
|
|
}
|
2019-04-24 00:25:06 -04:00
|
|
|
|
2019-05-22 16:47:15 -04:00
|
|
|
if written < fileSize {
|
|
|
|
return errLessData
|
|
|
|
} else if written > fileSize {
|
|
|
|
return errMoreData
|
2019-01-17 07:58:18 -05:00
|
|
|
}
|
2019-05-22 16:47:15 -04:00
|
|
|
|
|
|
|
return nil
|
2016-10-29 15:44:44 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) WriteAll(volume, path string, reader io.Reader) (err error) {
|
2019-12-12 09:02:37 -05:00
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
w, err := s.openFile(volume, path, os.O_CREATE|os.O_SYNC|os.O_WRONLY)
|
2018-11-14 09:18:35 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-05-22 16:47:15 -04:00
|
|
|
defer w.Close()
|
|
|
|
|
|
|
|
bufp := s.pool.Get().(*[]byte)
|
|
|
|
defer s.pool.Put(bufp)
|
2018-11-14 09:18:35 -05:00
|
|
|
|
2019-05-22 16:47:15 -04:00
|
|
|
_, err = io.CopyBuffer(w, reader, *bufp)
|
|
|
|
return err
|
2018-11-14 09:18:35 -05:00
|
|
|
}
|
|
|
|
|
2016-10-29 15:44:44 -04:00
|
|
|
// AppendFile - append a byte array at path, if file doesn't exist at
|
|
|
|
// path this call explicitly creates it.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) AppendFile(volume, path string, buf []byte) (err error) {
|
2019-12-12 09:02:37 -05:00
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
2018-12-11 19:22:56 -05:00
|
|
|
var w *os.File
|
2019-04-24 00:25:06 -04:00
|
|
|
// Create file if not found. Not doing O_DIRECT here to avoid the code that does buffer aligned writes.
|
|
|
|
// AppendFile() is only used by healing code to heal objects written in old format.
|
|
|
|
w, err = s.openFile(volume, path, os.O_CREATE|os.O_SYNC|os.O_APPEND|os.O_WRONLY)
|
2016-10-29 15:44:44 -04:00
|
|
|
if err != nil {
|
2016-06-19 18:31:13 -04:00
|
|
|
return err
|
2016-05-04 15:18:40 -04:00
|
|
|
}
|
2018-11-14 09:18:35 -05:00
|
|
|
|
|
|
|
if _, err = w.Write(buf); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return w.Close()
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// CheckParts check if path has necessary parts available.
|
|
|
|
func (s *xlStorage) CheckParts(volume, path string, fi FileInfo) error {
|
2019-12-12 09:02:37 -05:00
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
2016-05-18 00:22:27 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
return err
|
2016-05-18 00:22:27 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2016-05-18 00:22:27 -04:00
|
|
|
// Stat a volume entry.
|
2020-06-12 23:04:01 -04:00
|
|
|
if _, err = os.Stat(volumeDir); err != nil {
|
2016-05-18 00:22:27 -04:00
|
|
|
if os.IsNotExist(err) {
|
2020-06-12 23:04:01 -04:00
|
|
|
return errVolumeNotFound
|
2016-05-18 00:22:27 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
return err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
for _, part := range fi.Parts {
|
|
|
|
partPath := pathJoin(path, fi.DataDir, fmt.Sprintf("part.%d", part.Number))
|
|
|
|
filePath := pathJoin(volumeDir, partPath)
|
|
|
|
if err = checkPathLength(filePath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
st, err := os.Stat(filePath)
|
|
|
|
if err != nil {
|
|
|
|
return osErrToFileErr(err)
|
|
|
|
}
|
|
|
|
if st.Mode().IsDir() {
|
|
|
|
return errFileNotFound
|
|
|
|
}
|
2016-05-11 15:55:02 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CheckFile check if path has necessary metadata.
|
|
|
|
func (s *xlStorage) CheckFile(volume, path string) error {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
|
|
|
volumeDir, err := s.getVolDir(volume)
|
2016-04-08 13:37:38 -04:00
|
|
|
if err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stat a volume entry.
|
|
|
|
_, err = os.Stat(volumeDir)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return errVolumeNotFound
|
2016-04-12 15:45:15 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
return err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
filePath := pathJoin(volumeDir, path, xlStorageFormatFile)
|
|
|
|
if err = checkPathLength(filePath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
filePathOld := pathJoin(volumeDir, path, xlStorageFormatFileV1)
|
|
|
|
if err = checkPathLength(filePathOld); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
st, err := os.Stat(filePath)
|
|
|
|
if err != nil && !os.IsNotExist(err) {
|
|
|
|
return osErrToFileErr(err)
|
|
|
|
}
|
|
|
|
if st == nil {
|
|
|
|
st, err = os.Stat(filePathOld)
|
|
|
|
if err != nil {
|
|
|
|
return osErrToFileErr(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-13 14:32:47 -04:00
|
|
|
// If its a directory its not a regular file.
|
2016-04-08 13:37:38 -04:00
|
|
|
if st.Mode().IsDir() {
|
2020-06-12 23:04:01 -04:00
|
|
|
return errFileNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
2020-03-11 11:56:36 -04:00
|
|
|
// deleteFile deletes a file or a directory if its empty unless recursive
|
|
|
|
// is set to true. If the target is successfully deleted, it will recursively
|
|
|
|
// move up the tree, deleting empty parent directories until it finds one
|
|
|
|
// with files in it. Returns nil for a non-empty directory even when
|
|
|
|
// recursive is set to false.
|
|
|
|
func deleteFile(basePath, deletePath string, recursive bool) error {
|
|
|
|
if basePath == "" || deletePath == "" {
|
|
|
|
return nil
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
isObjectDir := HasSuffix(deletePath, SlashSeparator)
|
2020-03-11 11:56:36 -04:00
|
|
|
basePath = filepath.Clean(basePath)
|
|
|
|
deletePath = filepath.Clean(deletePath)
|
|
|
|
if !strings.HasPrefix(deletePath, basePath) || deletePath == basePath {
|
2016-04-08 13:37:38 -04:00
|
|
|
return nil
|
|
|
|
}
|
2017-08-03 23:04:28 -04:00
|
|
|
|
2020-03-11 11:56:36 -04:00
|
|
|
var err error
|
|
|
|
if recursive {
|
|
|
|
err = os.RemoveAll(deletePath)
|
|
|
|
} else {
|
|
|
|
err = os.Remove(deletePath)
|
|
|
|
}
|
|
|
|
if err != nil {
|
2019-03-18 10:46:20 -04:00
|
|
|
switch {
|
|
|
|
case isSysErrNotEmpty(err):
|
2020-06-12 23:04:01 -04:00
|
|
|
// if object is a directory, but if its not empty
|
|
|
|
// return FileNotFound to indicate its an empty prefix.
|
|
|
|
if isObjectDir {
|
|
|
|
return errFileNotFound
|
|
|
|
}
|
2019-03-18 10:46:20 -04:00
|
|
|
// Ignore errors if the directory is not empty. The server relies on
|
|
|
|
// this functionality, and sometimes uses recursion that should not
|
|
|
|
// error on parent directories.
|
2017-08-03 23:04:28 -04:00
|
|
|
return nil
|
2019-03-18 10:46:20 -04:00
|
|
|
case os.IsNotExist(err):
|
2016-10-17 19:38:46 -04:00
|
|
|
return errFileNotFound
|
2019-03-18 10:46:20 -04:00
|
|
|
case os.IsPermission(err):
|
2016-10-17 19:38:46 -04:00
|
|
|
return errFileAccessDenied
|
2019-03-18 10:46:20 -04:00
|
|
|
case isSysErrIO(err):
|
2018-07-27 18:32:19 -04:00
|
|
|
return errFaultyDisk
|
2019-03-18 10:46:20 -04:00
|
|
|
default:
|
|
|
|
return err
|
2016-10-17 19:38:46 -04:00
|
|
|
}
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2017-08-03 23:04:28 -04:00
|
|
|
|
2020-04-08 14:32:58 -04:00
|
|
|
deletePath = filepath.Dir(deletePath)
|
2018-02-20 18:33:26 -05:00
|
|
|
|
2020-03-11 11:56:36 -04:00
|
|
|
// Delete parent directory obviously not recursively. Errors for
|
|
|
|
// parent directories shouldn't trickle down.
|
|
|
|
deleteFile(basePath, deletePath, false)
|
posix: do not upstream errors in deleteFile (#4771)
This commit changes posix's deleteFile() to not upstream errors from
removing parent directories. This fixes a race condition.
The race condition occurs when multiple deleteFile()s are called on the
same parent directory, but different child files. Because deleteFile()
recursively removes parent directories if they are empty, but
deleteFile() errors if the selected deletePath does not exist, there was
an opportunity for a race condition. The two processes would remove the
child directories successfully, then depend on the parent directory
still existing. In some cases this is an invalid assumption, because
other processes can remove the parent directory beforehand. This commit
changes deleteFile() to not upstream an error if one occurs, because the
only required error should be from the immediate deletePath, not from a
parent path.
In the specific bug report, multiple CompleteMultipartUpload requests
would launch multiple deleteFile() requests. Because they chain up on
parent directories, ultimately at the end, there would be multiple
remove files for the ultimate parent directory,
.minio.sys/multipart/{bucket}. Because only one will succeed and one
will fail, an error would be upstreamed saying that the file does not
exist, and the CompleteMultipartUpload code interpreted this as
NoSuchKey, or that the object/part id doesn't exist. This was faulty
behavior and is now fixed.
The added test fails before this change and passes after this change.
Fixes: https://github.com/minio/minio/issues/4727
2017-08-04 19:51:20 -04:00
|
|
|
|
|
|
|
return nil
|
2016-04-08 20:13:16 -04:00
|
|
|
}
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// DeleteFile - delete a file at path.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) DeleteFile(volume, path string) (err error) {
|
2019-12-12 09:02:37 -05:00
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
2016-05-18 00:22:27 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-10-01 16:12:15 -04:00
|
|
|
|
2016-05-18 00:22:27 -04:00
|
|
|
// Stat a volume entry.
|
2020-03-11 11:56:36 -04:00
|
|
|
_, err = os.Stat(volumeDir)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
2016-05-18 00:22:27 -04:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return errVolumeNotFound
|
2019-10-01 16:12:15 -04:00
|
|
|
} else if os.IsPermission(err) {
|
|
|
|
return errVolumeAccessDenied
|
2018-07-27 18:32:19 -04:00
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return errFaultyDisk
|
2016-05-18 00:22:27 -04:00
|
|
|
}
|
2016-04-13 14:32:47 -04:00
|
|
|
return err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
2019-08-06 15:08:58 -04:00
|
|
|
// Following code is needed so that we retain SlashSeparator suffix if any in
|
2016-04-13 14:32:47 -04:00
|
|
|
// path argument.
|
2016-05-05 04:39:26 -04:00
|
|
|
filePath := pathJoin(volumeDir, path)
|
2020-03-11 11:56:36 -04:00
|
|
|
if err = checkPathLength(filePath); err != nil {
|
2016-05-11 15:55:02 -04:00
|
|
|
return err
|
|
|
|
}
|
2016-04-08 13:37:38 -04:00
|
|
|
|
|
|
|
// Delete file and delete parent directory as well if its empty.
|
2020-03-11 11:56:36 -04:00
|
|
|
return deleteFile(volumeDir, filePath, false)
|
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
|
|
|
}
|
2016-04-29 15:17:48 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) DeleteFileBulk(volume string, paths []string) (errs []error, err error) {
|
2020-03-06 16:44:24 -05:00
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stat a volume entry.
|
|
|
|
_, err = os.Stat(volumeDir)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil, errVolumeNotFound
|
|
|
|
} else if os.IsPermission(err) {
|
|
|
|
return nil, errVolumeAccessDenied
|
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return nil, errFaultyDisk
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-05-13 15:25:49 -04:00
|
|
|
errs = make([]error, len(paths))
|
2020-03-06 16:44:24 -05:00
|
|
|
// Following code is needed so that we retain SlashSeparator
|
|
|
|
// suffix if any in path argument.
|
2019-05-13 15:25:49 -04:00
|
|
|
for idx, path := range paths {
|
2020-03-06 16:44:24 -05:00
|
|
|
filePath := pathJoin(volumeDir, path)
|
|
|
|
errs[idx] = checkPathLength(filePath)
|
|
|
|
if errs[idx] != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Delete file and delete parent directory as well if its empty.
|
2020-03-11 11:56:36 -04:00
|
|
|
errs[idx] = deleteFile(volumeDir, filePath, false)
|
2019-05-13 15:25:49 -04:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// RenameData - rename source path to destination path atomically, metadata and data directory.
|
|
|
|
func (s *xlStorage) RenameData(srcVolume, srcPath, dataDir, dstVolume, dstPath string) (err error) {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
|
|
|
srcVolumeDir, err := s.getVolDir(srcVolume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
dstVolumeDir, err := s.getVolDir(dstVolume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stat a volume entry.
|
|
|
|
_, err = os.Stat(srcVolumeDir)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return errVolumeNotFound
|
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return errFaultyDisk
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
_, err = os.Stat(dstVolumeDir)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return errVolumeNotFound
|
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return errFaultyDisk
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
srcFilePath := slashpath.Join(srcVolumeDir, pathJoin(srcPath, xlStorageFormatFile))
|
|
|
|
dstFilePath := slashpath.Join(dstVolumeDir, pathJoin(dstPath, xlStorageFormatFile))
|
|
|
|
|
|
|
|
var srcDataPath string
|
|
|
|
var dstDataPath string
|
|
|
|
if dataDir != "" {
|
|
|
|
srcDataPath = retainSlash(pathJoin(srcVolumeDir, srcPath, dataDir))
|
|
|
|
// make sure to always use path.Join here, do not use pathJoin as
|
|
|
|
// it would additionally add `/` at the end and it comes in the
|
|
|
|
// way of renameAll(), parentDir creation.
|
|
|
|
dstDataPath = slashpath.Join(dstVolumeDir, dstPath, dataDir)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = checkPathLength(srcFilePath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = checkPathLength(dstFilePath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
srcBuf, err := ioutil.ReadFile(srcFilePath)
|
|
|
|
if err != nil {
|
|
|
|
return osErrToFileErr(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
fi, err := getFileInfo(srcBuf, dstVolume, dstPath, "")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
dstBuf, err := ioutil.ReadFile(dstFilePath)
|
2020-06-19 13:58:17 -04:00
|
|
|
if err != nil {
|
|
|
|
if !os.IsNotExist(err) {
|
|
|
|
return osErrToFileErr(err)
|
|
|
|
}
|
|
|
|
err = s.renameLegacyMetadata(dstVolume, dstPath)
|
|
|
|
if err != nil && err != errFileNotFound {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
dstBuf, err = ioutil.ReadFile(dstFilePath)
|
|
|
|
if err != nil && !os.IsNotExist(err) {
|
|
|
|
return osErrToFileErr(err)
|
|
|
|
}
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
var xlMeta xlMetaV2
|
|
|
|
var legacyPreserved bool
|
|
|
|
if len(dstBuf) > 0 {
|
|
|
|
if isXL2V1Format(dstBuf) {
|
|
|
|
if err = xlMeta.Load(dstBuf); err != nil {
|
|
|
|
logger.LogIf(s.ctx, err)
|
|
|
|
return errFileCorrupt
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// This code-path is to preserve the legacy data.
|
|
|
|
xlMetaLegacy := &xlMetaV1Object{}
|
|
|
|
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
|
|
|
if err := json.Unmarshal(dstBuf, xlMetaLegacy); err != nil {
|
|
|
|
logger.LogIf(s.ctx, err)
|
|
|
|
return errFileCorrupt
|
|
|
|
}
|
|
|
|
if err = xlMeta.AddLegacy(xlMetaLegacy); err != nil {
|
|
|
|
logger.LogIf(s.ctx, err)
|
|
|
|
return errFileCorrupt
|
|
|
|
}
|
|
|
|
legacyPreserved = true
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// It is possible that some drives may not have `xl.meta` file
|
|
|
|
// in such scenarios verify if atleast `part.1` files exist
|
|
|
|
// to verify for legacy version.
|
|
|
|
currentDataPath := pathJoin(dstVolumeDir, dstPath)
|
|
|
|
entries, err := readDirN(currentDataPath, 1)
|
|
|
|
if err != nil && err != errFileNotFound {
|
|
|
|
return osErrToFileErr(err)
|
|
|
|
}
|
|
|
|
for _, entry := range entries {
|
|
|
|
if entry == xlStorageFormatFile {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if strings.HasSuffix(entry, slashSeparator) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if strings.HasPrefix(entry, "part.") {
|
|
|
|
legacyPreserved = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if legacyPreserved {
|
|
|
|
// Preserve all the legacy data, could be slow, but at max there can be 10,000 parts.
|
|
|
|
currentDataPath := pathJoin(dstVolumeDir, dstPath)
|
|
|
|
entries, err := readDir(currentDataPath)
|
|
|
|
if err != nil {
|
|
|
|
return osErrToFileErr(err)
|
|
|
|
}
|
|
|
|
legacyDataPath := pathJoin(dstVolumeDir, dstPath, legacyDataDir)
|
|
|
|
// legacy data dir means its old content, honor system umask.
|
|
|
|
if err = os.Mkdir(legacyDataPath, 0777); err != nil {
|
|
|
|
if isSysErrIO(err) {
|
|
|
|
return errFaultyDisk
|
|
|
|
}
|
|
|
|
return osErrToFileErr(err)
|
|
|
|
}
|
|
|
|
|
2020-07-11 12:37:34 -04:00
|
|
|
// Sync all the previous directory operations.
|
2020-06-19 13:58:17 -04:00
|
|
|
globalSync()
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
for _, entry := range entries {
|
|
|
|
if entry == xlStorageFormatFile {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = os.Rename(pathJoin(currentDataPath, entry), pathJoin(legacyDataPath, entry)); err != nil {
|
|
|
|
if isSysErrIO(err) {
|
|
|
|
return errFaultyDisk
|
|
|
|
}
|
|
|
|
return osErrToFileErr(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sync all the metadata operations once renames are done.
|
|
|
|
globalSync()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var oldDstDataPath string
|
|
|
|
if fi.VersionID == "" {
|
|
|
|
// return the latest "null" versionId info
|
|
|
|
ofi, err := xlMeta.ToFileInfo(dstVolume, dstPath, nullVersionID)
|
|
|
|
if err == nil {
|
|
|
|
// Purge the destination path as we are not preserving anything
|
|
|
|
// versioned object was not requested.
|
|
|
|
oldDstDataPath = pathJoin(dstVolumeDir, dstPath, ofi.DataDir)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = xlMeta.AddVersion(fi); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
dstBuf, err = xlMeta.MarshalMsg(append(xlHeader[:], xlVersionV1[:]...))
|
|
|
|
if err != nil {
|
|
|
|
return errFileCorrupt
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = s.WriteAll(srcVolume, pathJoin(srcPath, xlStorageFormatFile), bytes.NewReader(dstBuf)); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = renameAll(srcFilePath, dstFilePath); err != nil {
|
|
|
|
if isSysErrIO(err) {
|
|
|
|
return errFaultyDisk
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if srcDataPath != "" {
|
|
|
|
removeAll(oldDstDataPath)
|
|
|
|
removeAll(dstDataPath)
|
|
|
|
if err = renameAll(srcDataPath, dstDataPath); err != nil {
|
|
|
|
if isSysErrIO(err) {
|
|
|
|
return errFaultyDisk
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove parent dir of the source file if empty
|
|
|
|
if parentDir := slashpath.Dir(srcFilePath); isDirEmpty(parentDir) {
|
|
|
|
deleteFile(srcVolumeDir, parentDir, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
if srcDataPath != "" {
|
|
|
|
if parentDir := slashpath.Dir(srcDataPath); isDirEmpty(parentDir) {
|
|
|
|
deleteFile(srcVolumeDir, parentDir, false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-05-28 18:13:15 -04:00
|
|
|
// RenameFile - rename source path to destination path atomically.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err error) {
|
2019-12-12 09:02:37 -05:00
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
2016-05-18 00:22:27 -04:00
|
|
|
srcVolumeDir, err := s.getVolDir(srcVolume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
dstVolumeDir, err := s.getVolDir(dstVolume)
|
2016-04-29 15:17:48 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-05-18 00:22:27 -04:00
|
|
|
// Stat a volume entry.
|
2018-02-20 15:20:18 -05:00
|
|
|
_, err = os.Stat(srcVolumeDir)
|
2016-04-29 15:17:48 -04:00
|
|
|
if err != nil {
|
2016-05-18 00:22:27 -04:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return errVolumeNotFound
|
2018-07-27 18:32:19 -04:00
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return errFaultyDisk
|
2016-05-18 00:22:27 -04:00
|
|
|
}
|
2016-04-29 15:17:48 -04:00
|
|
|
return err
|
|
|
|
}
|
2018-02-20 15:20:18 -05:00
|
|
|
_, err = os.Stat(dstVolumeDir)
|
2016-05-18 00:22:27 -04:00
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return errVolumeNotFound
|
2018-07-27 18:32:19 -04:00
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return errFaultyDisk
|
2016-05-18 00:22:27 -04:00
|
|
|
}
|
2020-04-11 14:15:30 -04:00
|
|
|
return err
|
2016-05-18 00:22:27 -04:00
|
|
|
}
|
|
|
|
|
2019-12-06 02:16:06 -05:00
|
|
|
srcIsDir := HasSuffix(srcPath, SlashSeparator)
|
|
|
|
dstIsDir := HasSuffix(dstPath, SlashSeparator)
|
2016-05-16 17:31:28 -04:00
|
|
|
// Either src and dst have to be directories or files, else return error.
|
2016-05-13 14:52:36 -04:00
|
|
|
if !(srcIsDir && dstIsDir || !srcIsDir && !dstIsDir) {
|
|
|
|
return errFileAccessDenied
|
|
|
|
}
|
2016-06-17 14:57:51 -04:00
|
|
|
srcFilePath := slashpath.Join(srcVolumeDir, srcPath)
|
2018-02-20 15:20:18 -05:00
|
|
|
if err = checkPathLength(srcFilePath); err != nil {
|
2016-06-17 14:57:51 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
dstFilePath := slashpath.Join(dstVolumeDir, dstPath)
|
2018-02-20 15:20:18 -05:00
|
|
|
if err = checkPathLength(dstFilePath); err != nil {
|
2016-06-17 14:57:51 -04:00
|
|
|
return err
|
|
|
|
}
|
2016-05-13 14:52:36 -04:00
|
|
|
if srcIsDir {
|
2018-02-20 15:20:18 -05:00
|
|
|
// If source is a directory, we expect the destination to be non-existent but we
|
|
|
|
// we still need to allow overwriting an empty directory since it represents
|
|
|
|
// an object empty directory.
|
|
|
|
_, err = os.Stat(dstFilePath)
|
2018-07-27 18:32:19 -04:00
|
|
|
if isSysErrIO(err) {
|
|
|
|
return errFaultyDisk
|
|
|
|
}
|
2018-02-20 15:20:18 -05:00
|
|
|
if err == nil && !isDirEmpty(dstFilePath) {
|
2016-05-13 14:52:36 -04:00
|
|
|
return errFileAccessDenied
|
|
|
|
}
|
2019-03-26 17:57:44 -04:00
|
|
|
if err != nil && !os.IsNotExist(err) {
|
2016-05-13 14:52:36 -04:00
|
|
|
return err
|
|
|
|
}
|
2019-03-26 17:57:44 -04:00
|
|
|
// Empty destination remove it before rename.
|
|
|
|
if isDirEmpty(dstFilePath) {
|
|
|
|
if err = os.Remove(dstFilePath); err != nil {
|
|
|
|
if isSysErrNotEmpty(err) {
|
|
|
|
return errFileAccessDenied
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2016-05-13 14:52:36 -04:00
|
|
|
}
|
2018-01-13 12:13:02 -05:00
|
|
|
|
|
|
|
if err = renameAll(srcFilePath, dstFilePath); err != nil {
|
2018-07-27 18:32:19 -04:00
|
|
|
if isSysErrIO(err) {
|
|
|
|
return errFaultyDisk
|
|
|
|
}
|
2016-05-03 19:10:24 -04:00
|
|
|
return err
|
|
|
|
}
|
2016-11-21 19:34:57 -05:00
|
|
|
|
|
|
|
// Remove parent dir of the source file if empty
|
2016-11-30 23:56:16 -05:00
|
|
|
if parentDir := slashpath.Dir(srcFilePath); isDirEmpty(parentDir) {
|
2020-03-11 11:56:36 -04:00
|
|
|
deleteFile(srcVolumeDir, parentDir, false)
|
2016-11-21 19:34:57 -05:00
|
|
|
}
|
|
|
|
|
2016-05-03 19:10:24 -04:00
|
|
|
return nil
|
2016-04-29 15:17:48 -04:00
|
|
|
}
|
2019-07-08 16:51:18 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) bitrotVerify(partPath string, partSize int64, algo BitrotAlgorithm, sum []byte, shardSize int64) error {
|
2019-07-08 16:51:18 -04:00
|
|
|
// Open the file for reading.
|
2020-06-12 23:04:01 -04:00
|
|
|
file, err := os.Open(partPath)
|
2019-07-08 16:51:18 -04:00
|
|
|
if err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
return osErrToFileErr(err)
|
2019-07-08 16:51:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Close the file descriptor.
|
|
|
|
defer file.Close()
|
|
|
|
|
|
|
|
if algo != HighwayHash256S {
|
|
|
|
bufp := s.pool.Get().(*[]byte)
|
|
|
|
defer s.pool.Put(bufp)
|
|
|
|
|
|
|
|
h := algo.New()
|
|
|
|
if _, err = io.CopyBuffer(h, file, *bufp); err != nil {
|
2019-10-01 16:12:15 -04:00
|
|
|
// Premature failure in reading the object,file is corrupt.
|
|
|
|
return errFileCorrupt
|
2019-07-08 16:51:18 -04:00
|
|
|
}
|
|
|
|
if !bytes.Equal(h.Sum(nil), sum) {
|
2019-10-01 16:12:15 -04:00
|
|
|
return errFileCorrupt
|
2019-07-08 16:51:18 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
buf := make([]byte, shardSize)
|
|
|
|
h := algo.New()
|
|
|
|
hashBuf := make([]byte, h.Size())
|
|
|
|
fi, err := file.Stat()
|
|
|
|
if err != nil {
|
2019-10-01 16:12:15 -04:00
|
|
|
// Unable to stat on the file, return an expected error
|
|
|
|
// for healing code to fix this file.
|
2019-07-08 16:51:18 -04:00
|
|
|
return err
|
|
|
|
}
|
2019-07-12 19:29:44 -04:00
|
|
|
|
2019-10-01 16:12:15 -04:00
|
|
|
size := fi.Size()
|
|
|
|
|
2019-09-11 16:49:53 -04:00
|
|
|
// Calculate the size of the bitrot file and compare
|
|
|
|
// it with the actual file size.
|
2020-06-12 23:04:01 -04:00
|
|
|
if size != bitrotShardFileSize(partSize, shardSize, algo) {
|
2019-10-01 16:12:15 -04:00
|
|
|
return errFileCorrupt
|
2019-07-12 19:29:44 -04:00
|
|
|
}
|
|
|
|
|
2019-10-01 16:12:15 -04:00
|
|
|
var n int
|
2019-07-08 16:51:18 -04:00
|
|
|
for {
|
|
|
|
if size == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
h.Reset()
|
2019-10-01 16:12:15 -04:00
|
|
|
n, err = file.Read(hashBuf)
|
2019-07-08 16:51:18 -04:00
|
|
|
if err != nil {
|
2019-10-01 16:12:15 -04:00
|
|
|
// Read's failed for object with right size, file is corrupt.
|
2019-07-08 16:51:18 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
size -= int64(n)
|
|
|
|
if size < int64(len(buf)) {
|
|
|
|
buf = buf[:size]
|
|
|
|
}
|
|
|
|
n, err = file.Read(buf)
|
|
|
|
if err != nil {
|
2019-10-01 16:12:15 -04:00
|
|
|
// Read's failed for object with right size, at different offsets.
|
2019-07-08 16:51:18 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
size -= int64(n)
|
|
|
|
h.Write(buf)
|
|
|
|
if !bytes.Equal(h.Sum(nil), hashBuf) {
|
2019-10-01 16:12:15 -04:00
|
|
|
return errFileCorrupt
|
2019-07-08 16:51:18 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
func (s *xlStorage) VerifyFile(volume, path string, fi FileInfo) (err error) {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, 1)
|
|
|
|
defer func() {
|
|
|
|
atomic.AddInt32(&s.activeIOCount, -1)
|
|
|
|
}()
|
|
|
|
|
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stat a volume entry.
|
|
|
|
_, err = os.Stat(volumeDir)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return errVolumeNotFound
|
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return errFaultyDisk
|
|
|
|
} else if os.IsPermission(err) {
|
|
|
|
return errVolumeAccessDenied
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
erasure := fi.Erasure
|
|
|
|
for _, part := range fi.Parts {
|
|
|
|
checksumInfo := erasure.GetChecksumInfo(part.Number)
|
|
|
|
partPath := pathJoin(volumeDir, path, fi.DataDir, fmt.Sprintf("part.%d", part.Number))
|
|
|
|
if err := s.bitrotVerify(partPath,
|
|
|
|
erasure.ShardFileSize(part.Size),
|
|
|
|
checksumInfo.Algorithm,
|
|
|
|
checksumInfo.Hash, erasure.ShardSize()); err != nil {
|
|
|
|
if !IsErr(err, []error{
|
|
|
|
errFileNotFound,
|
|
|
|
errVolumeNotFound,
|
|
|
|
errFileCorrupt,
|
|
|
|
}...) {
|
|
|
|
logger.GetReqInfo(s.ctx).AppendTags("disk", s.String())
|
|
|
|
logger.LogIf(s.ctx, err)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|