2023-07-11 10:46:58 -04:00
|
|
|
// Copyright (c) 2015-2023 MinIO, Inc.
|
2021-04-18 15:41:13 -04:00
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2015-10-16 14:26:01 -04:00
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2015-10-16 14:26:01 -04:00
|
|
|
|
|
|
|
import (
|
2020-06-12 13:28:21 -04:00
|
|
|
"bytes"
|
2018-04-05 18:04:40 -04:00
|
|
|
"context"
|
2019-11-22 12:50:17 -05:00
|
|
|
"crypto/rand"
|
2023-12-29 18:52:41 -05:00
|
|
|
"encoding/binary"
|
2019-09-11 13:21:43 -04:00
|
|
|
"errors"
|
2020-06-12 23:04:01 -04:00
|
|
|
"fmt"
|
2016-04-08 13:37:38 -04:00
|
|
|
"io"
|
2016-04-08 20:13:16 -04:00
|
|
|
"os"
|
2021-02-08 13:15:12 -05:00
|
|
|
pathutil "path"
|
2016-05-28 18:13:15 -04:00
|
|
|
"path/filepath"
|
2016-06-20 09:18:47 -04:00
|
|
|
"runtime"
|
2024-04-23 13:15:52 -04:00
|
|
|
"slices"
|
2023-07-11 10:46:58 -04:00
|
|
|
"strconv"
|
2018-02-20 18:33:26 -05:00
|
|
|
"strings"
|
2016-10-26 20:14:05 -04:00
|
|
|
"sync"
|
2022-08-16 10:13:49 -04:00
|
|
|
"sync/atomic"
|
2021-08-21 03:12:29 -04:00
|
|
|
"syscall"
|
2018-05-23 06:11:29 -04:00
|
|
|
"time"
|
2015-10-16 14:26:01 -04:00
|
|
|
|
2020-09-12 03:08:12 -04:00
|
|
|
"github.com/dustin/go-humanize"
|
2022-11-28 13:20:55 -05:00
|
|
|
"github.com/google/uuid"
|
2019-10-25 13:37:53 -04:00
|
|
|
jsoniter "github.com/json-iterator/go"
|
2022-12-08 13:42:44 -05:00
|
|
|
"github.com/klauspost/filepathx"
|
2023-06-19 20:53:08 -04:00
|
|
|
"github.com/minio/madmin-go/v3"
|
2021-06-01 17:59:40 -04:00
|
|
|
"github.com/minio/minio/internal/bucket/lifecycle"
|
2024-02-23 16:28:14 -05:00
|
|
|
"github.com/minio/minio/internal/cachevalue"
|
2023-10-30 17:42:11 -04:00
|
|
|
"github.com/minio/minio/internal/config/storageclass"
|
2021-06-01 17:59:40 -04:00
|
|
|
"github.com/minio/minio/internal/disk"
|
|
|
|
xioutil "github.com/minio/minio/internal/ioutil"
|
|
|
|
"github.com/minio/minio/internal/logger"
|
2023-12-29 18:52:41 -05:00
|
|
|
"github.com/pkg/xattr"
|
2015-10-16 14:26:01 -04:00
|
|
|
)
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
const (
|
2021-09-29 19:40:28 -04:00
|
|
|
nullVersionID = "null"
|
2021-05-15 15:56:58 -04:00
|
|
|
|
2021-01-12 13:20:39 -05:00
|
|
|
// Small file threshold below which data accompanies metadata from storage layer.
|
|
|
|
smallFileThreshold = 128 * humanize.KiByte // Optimized for NVMe/SSDs
|
2022-03-10 20:36:13 -05:00
|
|
|
|
2021-01-12 13:20:39 -05:00
|
|
|
// For hardrives it is possible to set this to a lower value to avoid any
|
|
|
|
// spike in latency. But currently we are simply keeping it optimal for SSDs.
|
2021-01-07 22:27:31 -05:00
|
|
|
|
2022-01-14 13:01:25 -05:00
|
|
|
// bigFileThreshold is the point where we add readahead to put operations.
|
|
|
|
bigFileThreshold = 128 * humanize.MiByte
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// XL metadata file carries per object metadata.
|
|
|
|
xlStorageFormatFile = "xl.meta"
|
2024-04-23 13:15:52 -04:00
|
|
|
|
|
|
|
// XL metadata file backup file carries previous per object metadata.
|
|
|
|
xlStorageFormatFileBackup = "xl.meta.bkp"
|
2016-04-08 13:37:38 -04:00
|
|
|
)
|
|
|
|
|
2021-03-18 17:09:55 -04:00
|
|
|
var alignedBuf []byte
|
|
|
|
|
|
|
|
func init() {
|
2021-09-29 19:40:28 -04:00
|
|
|
alignedBuf = disk.AlignedBlock(xioutil.DirectioAlignSize)
|
2021-03-18 17:09:55 -04:00
|
|
|
_, _ = rand.Read(alignedBuf)
|
|
|
|
}
|
|
|
|
|
2018-06-07 03:01:40 -04:00
|
|
|
// isValidVolname verifies a volname name in accordance with object
|
|
|
|
// layer requirements.
|
|
|
|
func isValidVolname(volname string) bool {
|
|
|
|
if len(volname) < 3 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
// Volname shouldn't have reserved characters in Windows.
|
|
|
|
return !strings.ContainsAny(volname, `\:*?\"<>|`)
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// xlStorage - implements StorageAPI interface.
|
|
|
|
type xlStorage struct {
|
2023-12-29 18:52:41 -05:00
|
|
|
// Indicate of NSScanner is in progress in this disk
|
|
|
|
scanning int32
|
|
|
|
|
2023-07-26 14:31:40 -04:00
|
|
|
drivePath string
|
|
|
|
endpoint Endpoint
|
2020-05-19 17:27:20 -04:00
|
|
|
|
2020-08-04 17:55:53 -04:00
|
|
|
globalSync bool
|
2022-06-07 09:44:01 -04:00
|
|
|
oDirect bool // indicates if this disk supports ODirect
|
2018-06-27 21:59:38 -04:00
|
|
|
|
2019-10-25 13:37:53 -04:00
|
|
|
diskID string
|
|
|
|
|
posix: cache disk ID for a short while (#8564)
`*posix.getDiskID()` takes up to 30% of all CPU due to the `os.Stat` call on `GET` calls.
Before:
```
Operation: GET - Concurrency: 12
* Average: 1333.97 MB/s, 1365.99 obj/s, 1365.98 ops ended/s (4m59.975s)
* First Byte: Average: 7.801487ms, Median: 7.9974ms, Best: 1.9822ms, Worst: 110.0021ms
Aggregated, split into 299 x 1s time segments:
* Fastest: 1453.50 MB/s, 1488.38 obj/s, 1492.00 ops ended/s (1s)
* 50% Median: 1360.47 MB/s, 1393.12 obj/s, 1393.00 ops ended/s (1s)
* Slowest: 978.68 MB/s, 1002.17 obj/s, 1004.00 ops ended/s (1s)
```
After:
```
Operation: GET - Concurrency: 12
* Average: 1706.07 MB/s, 1747.02 obj/s, 1747.01 ops ended/s (4m59.985s)
* First Byte: Average: 5.797886ms, Median: 5.9959ms, Best: 996.3µs, Worst: 84.0007ms
Aggregated, split into 299 x 1s time segments:
* Fastest: 1830.03 MB/s, 1873.96 obj/s, 1872.00 ops ended/s (1s)
* 50% Median: 1735.04 MB/s, 1776.68 obj/s, 1776.00 ops ended/s (1s)
* Slowest: 994.94 MB/s, 1018.82 obj/s, 1018.00 ops ended/s (1s)
```
TLDR; `os.Stat` is not free.
2019-11-29 05:57:14 -05:00
|
|
|
formatFileInfo os.FileInfo
|
2023-12-29 18:52:41 -05:00
|
|
|
formatFile string
|
2021-01-11 05:27:04 -05:00
|
|
|
formatLegacy bool
|
posix: cache disk ID for a short while (#8564)
`*posix.getDiskID()` takes up to 30% of all CPU due to the `os.Stat` call on `GET` calls.
Before:
```
Operation: GET - Concurrency: 12
* Average: 1333.97 MB/s, 1365.99 obj/s, 1365.98 ops ended/s (4m59.975s)
* First Byte: Average: 7.801487ms, Median: 7.9974ms, Best: 1.9822ms, Worst: 110.0021ms
Aggregated, split into 299 x 1s time segments:
* Fastest: 1453.50 MB/s, 1488.38 obj/s, 1492.00 ops ended/s (1s)
* 50% Median: 1360.47 MB/s, 1393.12 obj/s, 1393.00 ops ended/s (1s)
* Slowest: 978.68 MB/s, 1002.17 obj/s, 1004.00 ops ended/s (1s)
```
After:
```
Operation: GET - Concurrency: 12
* Average: 1706.07 MB/s, 1747.02 obj/s, 1747.01 ops ended/s (4m59.985s)
* First Byte: Average: 5.797886ms, Median: 5.9959ms, Best: 996.3µs, Worst: 84.0007ms
Aggregated, split into 299 x 1s time segments:
* Fastest: 1830.03 MB/s, 1873.96 obj/s, 1872.00 ops ended/s (1s)
* 50% Median: 1735.04 MB/s, 1776.68 obj/s, 1776.00 ops ended/s (1s)
* Slowest: 994.94 MB/s, 1018.82 obj/s, 1018.00 ops ended/s (1s)
```
TLDR; `os.Stat` is not free.
2019-11-29 05:57:14 -05:00
|
|
|
formatLastCheck time.Time
|
2019-10-25 13:37:53 -04:00
|
|
|
|
2024-02-23 16:28:14 -05:00
|
|
|
diskInfoCache *cachevalue.Cache[DiskInfo]
|
2019-10-25 13:37:53 -04:00
|
|
|
sync.RWMutex
|
2022-01-24 14:28:45 -05:00
|
|
|
formatData []byte
|
|
|
|
|
2024-03-03 02:45:39 -05:00
|
|
|
nrRequests uint64
|
|
|
|
major, minor uint32
|
2024-04-05 11:17:08 -04:00
|
|
|
fsType string
|
2023-09-11 17:48:54 -04:00
|
|
|
|
2024-03-09 21:53:48 -05:00
|
|
|
immediatePurge chan string
|
|
|
|
|
2021-08-18 21:10:36 -04:00
|
|
|
// mutex to prevent concurrent read operations overloading walks.
|
2023-07-31 18:20:48 -04:00
|
|
|
rotational bool
|
2023-07-24 12:30:19 -04:00
|
|
|
walkMu *sync.Mutex
|
|
|
|
walkReadMu *sync.Mutex
|
2016-03-28 12:52:09 -04:00
|
|
|
}
|
|
|
|
|
2016-05-11 15:55:02 -04:00
|
|
|
// checkPathLength - returns error if given path name length more than 255
|
|
|
|
func checkPathLength(pathName string) error {
|
2016-07-03 14:17:08 -04:00
|
|
|
// Apple OS X path length is limited to 1016
|
|
|
|
if runtime.GOOS == "darwin" && len(pathName) > 1016 {
|
|
|
|
return errFileNameTooLong
|
|
|
|
}
|
|
|
|
|
2020-04-28 20:32:46 -04:00
|
|
|
// Disallow more than 1024 characters on windows, there
|
|
|
|
// are no known name_max limits on Windows.
|
2020-05-14 02:55:38 -04:00
|
|
|
if runtime.GOOS == "windows" && len(pathName) > 1024 {
|
2020-05-04 16:11:56 -04:00
|
|
|
return errFileNameTooLong
|
2018-11-26 00:05:14 -05:00
|
|
|
}
|
2016-10-31 12:34:44 -04:00
|
|
|
|
2020-04-28 20:32:46 -04:00
|
|
|
// On Unix we reject paths if they are just '.', '..' or '/'
|
|
|
|
if pathName == "." || pathName == ".." || pathName == slashSeparator {
|
|
|
|
return errFileAccessDenied
|
|
|
|
}
|
2016-05-11 15:55:02 -04:00
|
|
|
|
2020-04-28 20:32:46 -04:00
|
|
|
// Check each path segment length is > 255 on all Unix
|
|
|
|
// platforms, look for this value as NAME_MAX in
|
|
|
|
// /usr/include/linux/limits.h
|
|
|
|
var count int64
|
|
|
|
for _, p := range pathName {
|
|
|
|
switch p {
|
|
|
|
case '/':
|
|
|
|
count = 0 // Reset
|
2020-05-14 02:55:38 -04:00
|
|
|
case '\\':
|
|
|
|
if runtime.GOOS == globalWindowsOSName {
|
|
|
|
count = 0
|
|
|
|
}
|
2020-04-28 20:32:46 -04:00
|
|
|
default:
|
|
|
|
count++
|
|
|
|
if count > 255 {
|
|
|
|
return errFileNameTooLong
|
|
|
|
}
|
2016-05-11 15:55:02 -04:00
|
|
|
}
|
2016-06-13 05:53:09 -04:00
|
|
|
} // Success.
|
2016-05-11 15:55:02 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-01-22 18:38:21 -05:00
|
|
|
func getValidPath(path string) (string, error) {
|
2018-04-09 23:56:09 -04:00
|
|
|
if path == "" {
|
|
|
|
return path, errInvalidArgument
|
|
|
|
}
|
|
|
|
|
|
|
|
var err error
|
|
|
|
// Disallow relative paths, figure out absolute paths.
|
|
|
|
path, err = filepath.Abs(path)
|
|
|
|
if err != nil {
|
|
|
|
return path, err
|
|
|
|
}
|
|
|
|
|
2021-03-23 17:51:27 -04:00
|
|
|
fi, err := Lstat(path)
|
2020-11-23 11:36:49 -05:00
|
|
|
if err != nil && !osIsNotExist(err) {
|
2018-04-09 23:56:09 -04:00
|
|
|
return path, err
|
|
|
|
}
|
2020-11-23 11:36:49 -05:00
|
|
|
if osIsNotExist(err) {
|
2018-04-09 23:56:09 -04:00
|
|
|
// Disk not found create it.
|
2023-09-13 11:14:36 -04:00
|
|
|
if err = mkdirAll(path, 0o777, ""); err != nil {
|
2018-04-09 23:56:09 -04:00
|
|
|
return path, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if fi != nil && !fi.IsDir() {
|
2020-06-12 23:04:01 -04:00
|
|
|
return path, errDiskNotDir
|
2018-04-09 23:56:09 -04:00
|
|
|
}
|
|
|
|
|
2018-06-12 19:36:31 -04:00
|
|
|
return path, nil
|
2018-04-09 23:56:09 -04:00
|
|
|
}
|
|
|
|
|
2024-01-17 23:41:23 -05:00
|
|
|
// Make Erasure backend meta volumes.
|
|
|
|
func makeFormatErasureMetaVolumes(disk StorageAPI) error {
|
|
|
|
if disk == nil {
|
|
|
|
return errDiskNotFound
|
|
|
|
}
|
|
|
|
volumes := []string{
|
|
|
|
minioMetaTmpDeletedBucket, // creates .minio.sys/tmp as well as .minio.sys/tmp/.trash
|
|
|
|
minioMetaMultipartBucket, // creates .minio.sys/multipart
|
|
|
|
dataUsageBucket, // creates .minio.sys/buckets
|
|
|
|
minioConfigBucket, // creates .minio.sys/config
|
|
|
|
}
|
|
|
|
// Attempt to create MinIO internal buckets.
|
|
|
|
return disk.MakeVolBulk(context.TODO(), volumes...)
|
|
|
|
}
|
|
|
|
|
2020-08-25 13:55:15 -04:00
|
|
|
// Initialize a new storage disk.
|
2022-10-31 10:27:50 -04:00
|
|
|
func newXLStorage(ep Endpoint, cleanUp bool) (s *xlStorage, err error) {
|
2024-03-09 21:53:48 -05:00
|
|
|
immediatePurgeQueue := 100000
|
|
|
|
if globalIsTesting || globalIsCICD {
|
|
|
|
immediatePurgeQueue = 1
|
|
|
|
}
|
2024-02-14 13:37:34 -05:00
|
|
|
s = &xlStorage{
|
2024-03-09 21:53:48 -05:00
|
|
|
drivePath: ep.Path,
|
|
|
|
endpoint: ep,
|
|
|
|
globalSync: globalFSOSync,
|
|
|
|
diskInfoCache: cachevalue.New[DiskInfo](),
|
|
|
|
immediatePurge: make(chan string, immediatePurgeQueue),
|
2016-06-13 05:53:09 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2024-03-09 21:53:48 -05:00
|
|
|
defer func() {
|
|
|
|
if err == nil {
|
|
|
|
go s.cleanupTrashImmediateCallers(GlobalContext)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2024-02-14 13:37:34 -05:00
|
|
|
s.drivePath, err = getValidPath(ep.Path)
|
2023-07-24 12:30:19 -04:00
|
|
|
if err != nil {
|
2024-02-14 13:37:34 -05:00
|
|
|
s.drivePath = ep.Path
|
|
|
|
return s, err
|
|
|
|
}
|
|
|
|
|
|
|
|
info, err := disk.GetInfo(s.drivePath, true)
|
|
|
|
if err != nil {
|
|
|
|
return s, err
|
2023-07-24 12:30:19 -04:00
|
|
|
}
|
2024-03-03 02:45:39 -05:00
|
|
|
s.major = info.Major
|
|
|
|
s.minor = info.Minor
|
2024-04-05 11:17:08 -04:00
|
|
|
s.fsType = info.FSType
|
2023-07-24 12:30:19 -04:00
|
|
|
|
2022-06-03 15:53:42 -04:00
|
|
|
if !globalIsCICD && !globalIsErasureSD {
|
2024-01-24 16:36:44 -05:00
|
|
|
var rootDrive bool
|
2022-02-23 13:11:33 -05:00
|
|
|
if globalRootDiskThreshold > 0 {
|
|
|
|
// Use MINIO_ROOTDISK_THRESHOLD_SIZE to figure out if
|
2023-07-24 12:30:19 -04:00
|
|
|
// this disk is a root disk. treat those disks with
|
2024-01-24 16:36:44 -05:00
|
|
|
// size less than or equal to the threshold as rootDrives.
|
|
|
|
rootDrive = info.Total <= globalRootDiskThreshold
|
2022-02-23 13:11:33 -05:00
|
|
|
} else {
|
2024-02-14 13:37:34 -05:00
|
|
|
rootDrive, err = disk.IsRootDisk(s.drivePath, SlashSeparator)
|
2022-02-23 13:11:33 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-12-12 19:10:07 -05:00
|
|
|
}
|
2024-01-24 16:36:44 -05:00
|
|
|
if rootDrive {
|
2024-03-03 02:45:39 -05:00
|
|
|
return s, errDriveIsRoot
|
2024-01-24 16:36:44 -05:00
|
|
|
}
|
2020-08-18 17:37:26 -04:00
|
|
|
}
|
|
|
|
|
2023-09-11 17:48:54 -04:00
|
|
|
// Sanitize before setting it
|
|
|
|
if info.NRRequests > 0 {
|
|
|
|
s.nrRequests = info.NRRequests
|
|
|
|
}
|
|
|
|
|
2023-07-24 12:30:19 -04:00
|
|
|
// We stagger listings only on HDDs.
|
|
|
|
if info.Rotational == nil || *info.Rotational {
|
2023-07-31 18:20:48 -04:00
|
|
|
s.rotational = true
|
2023-07-24 12:30:19 -04:00
|
|
|
s.walkMu = &sync.Mutex{}
|
|
|
|
s.walkReadMu = &sync.Mutex{}
|
|
|
|
}
|
|
|
|
|
2022-10-31 10:27:50 -04:00
|
|
|
if cleanUp {
|
2023-07-26 14:31:40 -04:00
|
|
|
bgFormatErasureCleanupTmp(s.drivePath) // cleanup any old data.
|
2022-10-31 10:27:50 -04:00
|
|
|
}
|
2022-01-24 14:28:45 -05:00
|
|
|
|
2023-07-26 14:31:40 -04:00
|
|
|
formatData, formatFi, err := formatErasureMigrate(s.drivePath)
|
2022-01-24 14:28:45 -05:00
|
|
|
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
|
|
|
if os.IsPermission(err) {
|
2024-02-14 13:37:34 -05:00
|
|
|
return s, errDiskAccessDenied
|
2022-01-24 14:28:45 -05:00
|
|
|
} else if isSysErrIO(err) {
|
2024-02-14 13:37:34 -05:00
|
|
|
return s, errFaultyDisk
|
2022-01-24 14:28:45 -05:00
|
|
|
}
|
2024-02-14 13:37:34 -05:00
|
|
|
return s, err
|
2021-01-22 18:38:21 -05:00
|
|
|
}
|
2022-01-24 14:28:45 -05:00
|
|
|
s.formatData = formatData
|
|
|
|
s.formatFileInfo = formatFi
|
2023-12-29 18:52:41 -05:00
|
|
|
s.formatFile = pathJoin(s.drivePath, minioMetaBucket, formatConfigFile)
|
2021-01-22 18:38:21 -05:00
|
|
|
|
2024-01-17 23:41:23 -05:00
|
|
|
// Create all necessary bucket folders if possible.
|
|
|
|
if err = makeFormatErasureMetaVolumes(s); err != nil {
|
2024-02-14 13:37:34 -05:00
|
|
|
return s, err
|
2024-01-17 23:41:23 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(s.formatData) > 0 {
|
2022-01-24 14:28:45 -05:00
|
|
|
format := &formatErasureV3{}
|
|
|
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
|
|
|
if err = json.Unmarshal(s.formatData, &format); err != nil {
|
|
|
|
return s, errCorruptedFormat
|
2021-12-03 12:25:17 -05:00
|
|
|
}
|
2024-04-11 13:45:28 -04:00
|
|
|
m, n, err := findDiskIndexByDiskID(format, format.Erasure.This)
|
|
|
|
if err != nil {
|
|
|
|
return s, err
|
|
|
|
}
|
|
|
|
diskID := format.Erasure.This
|
|
|
|
if m != ep.SetIdx || n != ep.DiskIdx {
|
|
|
|
storageLogOnceIf(context.Background(),
|
|
|
|
fmt.Errorf("unexpected drive ordering on pool: %s: found drive at (set=%s, drive=%s), expected at (set=%s, drive=%s): %s(%s): %w",
|
|
|
|
humanize.Ordinal(ep.PoolIdx+1), humanize.Ordinal(m+1), humanize.Ordinal(n+1), humanize.Ordinal(ep.SetIdx+1), humanize.Ordinal(ep.DiskIdx+1),
|
|
|
|
s, s.diskID, errInconsistentDisk), "drive-order-format-json")
|
|
|
|
return s, errInconsistentDisk
|
|
|
|
}
|
|
|
|
s.diskID = diskID
|
2022-01-24 14:28:45 -05:00
|
|
|
s.formatLastCheck = time.Now()
|
|
|
|
s.formatLegacy = format.Erasure.DistributionAlgo == formatErasureVersionV2DistributionAlgoV1
|
|
|
|
}
|
|
|
|
|
2023-10-09 20:08:40 -04:00
|
|
|
// Return an error if ODirect is not supported. Single disk will have
|
|
|
|
// oDirect off.
|
|
|
|
if globalIsErasureSD || !disk.ODirectPlatform {
|
|
|
|
s.oDirect = false
|
2024-03-09 21:53:48 -05:00
|
|
|
} else if err := s.checkODirectDiskSupport(info.FSType); err == nil {
|
2023-10-05 22:54:49 -04:00
|
|
|
s.oDirect = true
|
|
|
|
} else {
|
2023-10-09 20:08:40 -04:00
|
|
|
return s, err
|
2023-07-26 14:31:40 -04:00
|
|
|
}
|
|
|
|
|
2024-03-19 16:26:24 -04:00
|
|
|
// Initialize DiskInfo cache
|
|
|
|
s.diskInfoCache.InitOnce(time.Second, cachevalue.Opts{},
|
2024-05-08 20:51:34 -04:00
|
|
|
func(ctx context.Context) (DiskInfo, error) {
|
2024-03-19 16:26:24 -04:00
|
|
|
dcinfo := DiskInfo{}
|
|
|
|
di, err := getDiskInfo(s.drivePath)
|
|
|
|
if err != nil {
|
|
|
|
return dcinfo, err
|
|
|
|
}
|
|
|
|
dcinfo.Major = di.Major
|
|
|
|
dcinfo.Minor = di.Minor
|
|
|
|
dcinfo.Total = di.Total
|
|
|
|
dcinfo.Free = di.Free
|
|
|
|
dcinfo.Used = di.Used
|
|
|
|
dcinfo.UsedInodes = di.Files - di.Ffree
|
|
|
|
dcinfo.FreeInodes = di.Ffree
|
|
|
|
dcinfo.FSType = di.FSType
|
|
|
|
diskID, err := s.GetDiskID()
|
|
|
|
// Healing is 'true' when
|
|
|
|
// - if we found an unformatted disk (no 'format.json')
|
|
|
|
// - if we found healing tracker 'healing.bin'
|
|
|
|
dcinfo.Healing = errors.Is(err, errUnformattedDisk) || (s.Healing() != nil)
|
|
|
|
dcinfo.ID = diskID
|
|
|
|
return dcinfo, err
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
2017-07-10 21:14:48 -04:00
|
|
|
// Success.
|
2022-01-24 14:28:45 -05:00
|
|
|
return s, nil
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-01-25 02:03:38 -05:00
|
|
|
|
2016-06-29 14:25:35 -04:00
|
|
|
// getDiskInfo returns given disk information.
|
2023-07-26 14:31:40 -04:00
|
|
|
func getDiskInfo(drivePath string) (di disk.Info, err error) {
|
|
|
|
if err = checkPathLength(drivePath); err == nil {
|
2023-07-31 18:20:48 -04:00
|
|
|
di, err = disk.GetInfo(drivePath, false)
|
2016-06-29 14:25:35 -04:00
|
|
|
}
|
2019-07-25 16:35:27 -04:00
|
|
|
switch {
|
2020-11-23 11:36:49 -05:00
|
|
|
case osIsNotExist(err):
|
2016-06-29 14:25:35 -04:00
|
|
|
err = errDiskNotFound
|
2019-07-25 16:35:27 -04:00
|
|
|
case isSysErrTooLong(err):
|
|
|
|
err = errFileNameTooLong
|
|
|
|
case isSysErrIO(err):
|
|
|
|
err = errFaultyDisk
|
2016-06-29 14:25:35 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return di, err
|
|
|
|
}
|
|
|
|
|
2016-10-05 15:48:07 -04:00
|
|
|
// Implements stringer compatible interface.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) String() string {
|
2023-07-26 14:31:40 -04:00
|
|
|
return s.drivePath
|
2016-10-05 15:48:07 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) Hostname() string {
|
2020-09-28 22:39:32 -04:00
|
|
|
return s.endpoint.Host
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *xlStorage) Endpoint() Endpoint {
|
|
|
|
return s.endpoint
|
2020-01-13 16:09:10 -05:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (*xlStorage) Close() error {
|
2016-11-23 18:48:10 -05:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) IsOnline() bool {
|
2019-10-25 13:37:53 -04:00
|
|
|
return true
|
2016-11-23 18:48:10 -05:00
|
|
|
}
|
|
|
|
|
2021-05-11 12:19:15 -04:00
|
|
|
func (s *xlStorage) LastConn() time.Time {
|
|
|
|
return time.Time{}
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) IsLocal() bool {
|
2020-05-19 17:27:20 -04:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2021-03-04 17:36:23 -05:00
|
|
|
// Retrieve location indexes.
|
|
|
|
func (s *xlStorage) GetDiskLoc() (poolIdx, setIdx, diskIdx int) {
|
2024-04-11 13:45:28 -04:00
|
|
|
return s.endpoint.PoolIdx, s.endpoint.SetIdx, s.endpoint.DiskIdx
|
2021-03-04 17:36:23 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *xlStorage) Healing() *healingTracker {
|
2023-07-26 14:31:40 -04:00
|
|
|
healingFile := pathJoin(s.drivePath, minioMetaBucket,
|
2020-09-28 22:39:32 -04:00
|
|
|
bucketMetaPrefix, healingTrackerFilename)
|
2022-09-19 14:05:16 -04:00
|
|
|
b, err := os.ReadFile(healingFile)
|
2021-03-04 17:36:23 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
2024-05-09 13:15:03 -04:00
|
|
|
if len(b) == 0 {
|
|
|
|
// 'healing.bin' might be truncated
|
|
|
|
return nil
|
|
|
|
}
|
2023-04-18 17:49:56 -04:00
|
|
|
h := newHealingTracker()
|
2021-03-04 17:36:23 -05:00
|
|
|
_, err = h.UnmarshalMsg(b)
|
2024-04-04 08:04:40 -04:00
|
|
|
bugLogIf(GlobalContext, err)
|
2023-04-18 17:49:56 -04:00
|
|
|
return h
|
2020-09-28 22:39:32 -04:00
|
|
|
}
|
|
|
|
|
2022-09-22 13:41:06 -04:00
|
|
|
// checkODirectDiskSupport asks the disk to write some data
|
|
|
|
// with O_DIRECT support, return an error if any and return
|
|
|
|
// errUnsupportedDisk if there is no O_DIRECT support
|
2024-03-09 21:53:48 -05:00
|
|
|
func (s *xlStorage) checkODirectDiskSupport(fsType string) error {
|
2023-05-01 12:47:49 -04:00
|
|
|
if !disk.ODirectPlatform {
|
|
|
|
return errUnsupportedDisk
|
|
|
|
}
|
|
|
|
|
2024-03-09 21:53:48 -05:00
|
|
|
// We know XFS already supports O_DIRECT no need to check.
|
|
|
|
if fsType == "XFS" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// For all other FS pay the price of not using our recommended filesystem.
|
|
|
|
|
2022-09-22 13:41:06 -04:00
|
|
|
// Check if backend is writable and supports O_DIRECT
|
|
|
|
uuid := mustGetUUID()
|
2023-07-26 14:31:40 -04:00
|
|
|
filePath := pathJoin(s.drivePath, minioMetaTmpDeletedBucket, ".writable-check-"+uuid+".tmp")
|
2023-09-13 11:14:36 -04:00
|
|
|
|
|
|
|
// Create top level directories if they don't exist.
|
|
|
|
// with mode 0o777 mkdir honors system umask.
|
|
|
|
mkdirAll(pathutil.Dir(filePath), 0o777, s.drivePath) // don't need to fail here
|
|
|
|
|
2022-09-22 13:41:06 -04:00
|
|
|
w, err := s.openFileDirect(filePath, os.O_CREATE|os.O_WRONLY|os.O_EXCL)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
_, err = w.Write(alignedBuf)
|
|
|
|
w.Close()
|
|
|
|
if err != nil {
|
|
|
|
if isSysErrInvalidArg(err) {
|
|
|
|
err = errUnsupportedDisk
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-12-21 13:08:26 -05:00
|
|
|
// readsMetadata and returns disk mTime information for xl.meta
|
|
|
|
func (s *xlStorage) readMetadataWithDMTime(ctx context.Context, itemPath string) ([]byte, time.Time, error) {
|
2021-09-17 17:11:01 -04:00
|
|
|
if contextCanceled(ctx) {
|
2021-12-21 13:08:26 -05:00
|
|
|
return nil, time.Time{}, ctx.Err()
|
2021-09-17 17:11:01 -04:00
|
|
|
}
|
|
|
|
|
2021-08-26 19:23:12 -04:00
|
|
|
if err := checkPathLength(itemPath); err != nil {
|
2021-12-21 13:08:26 -05:00
|
|
|
return nil, time.Time{}, err
|
2021-08-26 19:23:12 -04:00
|
|
|
}
|
|
|
|
|
2022-08-01 16:22:43 -04:00
|
|
|
f, err := OpenFile(itemPath, readMode, 0o666)
|
2021-05-21 14:41:25 -04:00
|
|
|
if err != nil {
|
2021-12-21 13:08:26 -05:00
|
|
|
return nil, time.Time{}, err
|
2021-05-21 14:41:25 -04:00
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
stat, err := f.Stat()
|
|
|
|
if err != nil {
|
2021-12-21 13:08:26 -05:00
|
|
|
return nil, time.Time{}, err
|
2021-05-21 14:41:25 -04:00
|
|
|
}
|
2021-08-21 03:12:29 -04:00
|
|
|
if stat.IsDir() {
|
2021-12-21 13:08:26 -05:00
|
|
|
return nil, time.Time{}, &os.PathError{
|
2021-08-21 03:12:29 -04:00
|
|
|
Op: "open",
|
|
|
|
Path: itemPath,
|
|
|
|
Err: syscall.EISDIR,
|
|
|
|
}
|
|
|
|
}
|
2021-12-21 13:08:26 -05:00
|
|
|
buf, err := readXLMetaNoData(f, stat.Size())
|
2022-03-14 12:07:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, stat.ModTime().UTC(), fmt.Errorf("%w -> %s", err, itemPath)
|
|
|
|
}
|
2021-12-21 13:08:26 -05:00
|
|
|
return buf, stat.ModTime().UTC(), err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *xlStorage) readMetadata(ctx context.Context, itemPath string) ([]byte, error) {
|
2024-01-24 13:08:31 -05:00
|
|
|
return xioutil.WithDeadline[[]byte](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) ([]byte, error) {
|
|
|
|
buf, _, err := s.readMetadataWithDMTime(ctx, itemPath)
|
|
|
|
return buf, err
|
2023-08-02 00:52:31 -04:00
|
|
|
})
|
2021-05-21 14:41:25 -04:00
|
|
|
}
|
|
|
|
|
2024-01-02 16:51:24 -05:00
|
|
|
func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates chan<- dataUsageEntry, scanMode madmin.HealScanMode, weSleep func() bool) (dataUsageCache, error) {
|
2022-08-16 10:13:49 -04:00
|
|
|
atomic.AddInt32(&s.scanning, 1)
|
|
|
|
defer atomic.AddInt32(&s.scanning, -1)
|
2023-09-25 11:26:29 -04:00
|
|
|
|
2023-04-11 22:21:34 -04:00
|
|
|
var err error
|
2023-07-26 14:31:40 -04:00
|
|
|
stopFn := globalScannerMetrics.log(scannerMetricScanBucketDrive, s.drivePath, cache.Info.Name)
|
2023-04-11 22:21:34 -04:00
|
|
|
defer func() {
|
|
|
|
res := make(map[string]string)
|
|
|
|
if err != nil {
|
|
|
|
res["err"] = err.Error()
|
|
|
|
}
|
|
|
|
stopFn(res)
|
|
|
|
}()
|
2022-08-16 10:13:49 -04:00
|
|
|
|
2021-05-19 17:38:30 -04:00
|
|
|
// Updates must be closed before we return.
|
2024-01-28 13:04:17 -05:00
|
|
|
defer xioutil.SafeClose(updates)
|
2020-12-27 01:58:06 -05:00
|
|
|
var lc *lifecycle.Lifecycle
|
|
|
|
|
2020-06-12 13:28:21 -04:00
|
|
|
// Check if the current bucket has a configured lifecycle policy
|
2020-12-27 01:58:06 -05:00
|
|
|
if globalLifecycleSys != nil {
|
|
|
|
lc, err = globalLifecycleSys.Get(cache.Info.Name)
|
2022-11-10 10:17:45 -05:00
|
|
|
if err == nil && lc.HasActiveRules("") {
|
2020-12-27 01:58:06 -05:00
|
|
|
cache.Info.lifeCycle = lc
|
2020-12-13 15:05:54 -05:00
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
}
|
|
|
|
|
2021-06-01 22:59:11 -04:00
|
|
|
// Check if the current bucket has replication configuration
|
2022-04-24 05:36:31 -04:00
|
|
|
if rcfg, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, cache.Info.Name); err == nil {
|
2021-06-01 22:59:11 -04:00
|
|
|
if rcfg.HasActiveRules("", true) {
|
2021-09-18 16:31:35 -04:00
|
|
|
tgts, err := globalBucketTargetSys.ListBucketTargets(ctx, cache.Info.Name)
|
|
|
|
if err == nil {
|
|
|
|
cache.Info.replication = replicationConfig{
|
|
|
|
Config: rcfg,
|
|
|
|
remotes: tgts,
|
|
|
|
}
|
2021-06-01 22:59:11 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-05-31 05:57:57 -04:00
|
|
|
|
|
|
|
vcfg, _ := globalBucketVersioningSys.Get(cache.Info.Name)
|
|
|
|
|
2020-12-27 01:58:06 -05:00
|
|
|
// return initialized object layer
|
2020-10-09 12:59:52 -04:00
|
|
|
objAPI := newObjectLayerFn()
|
2021-04-27 11:24:44 -04:00
|
|
|
// object layer not initialized, return.
|
|
|
|
if objAPI == nil {
|
|
|
|
return cache, errServerNotInitialized
|
|
|
|
}
|
2020-12-04 12:32:35 -05:00
|
|
|
|
2022-01-26 11:34:56 -05:00
|
|
|
poolIdx, setIdx, _ := s.GetDiskLoc()
|
|
|
|
|
2023-07-17 12:52:05 -04:00
|
|
|
disks, err := objAPI.GetDisks(poolIdx, setIdx)
|
|
|
|
if err != nil {
|
|
|
|
return cache, err
|
|
|
|
}
|
|
|
|
|
|
|
|
cache.Info.updates = updates
|
|
|
|
|
2023-07-26 14:31:40 -04:00
|
|
|
dataUsageInfo, err := scanDataFolder(ctx, disks, s.drivePath, cache, func(item scannerItem) (sizeSummary, error) {
|
2020-06-12 23:04:01 -04:00
|
|
|
// Look for `xl.meta/xl.json' at the leaf.
|
|
|
|
if !strings.HasSuffix(item.Path, SlashSeparator+xlStorageFormatFile) &&
|
|
|
|
!strings.HasSuffix(item.Path, SlashSeparator+xlStorageFormatFileV1) {
|
|
|
|
// if no xl.meta/xl.json found, skip the file.
|
2020-12-07 16:47:48 -05:00
|
|
|
return sizeSummary{}, errSkipFile
|
2019-12-12 09:02:37 -05:00
|
|
|
}
|
2023-07-26 14:31:40 -04:00
|
|
|
stopFn := globalScannerMetrics.log(scannerMetricScanObject, s.drivePath, pathJoin(item.bucket, item.objectPath()))
|
2023-02-21 12:33:33 -05:00
|
|
|
res := make(map[string]string, 8)
|
|
|
|
defer func() {
|
|
|
|
stopFn(res)
|
|
|
|
}()
|
2019-12-12 09:02:37 -05:00
|
|
|
|
2022-07-05 17:45:49 -04:00
|
|
|
doneSz := globalScannerMetrics.timeSize(scannerMetricReadMetadata)
|
2021-09-17 17:11:01 -04:00
|
|
|
buf, err := s.readMetadata(ctx, item.Path)
|
2022-07-05 17:45:49 -04:00
|
|
|
doneSz(len(buf))
|
2023-07-11 10:46:58 -04:00
|
|
|
res["metasize"] = strconv.Itoa(len(buf))
|
2020-01-21 17:07:49 -05:00
|
|
|
if err != nil {
|
2023-02-23 22:33:31 -05:00
|
|
|
res["err"] = err.Error()
|
2020-12-07 16:47:48 -05:00
|
|
|
return sizeSummary{}, errSkipFile
|
2019-12-12 09:02:37 -05:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Remove filename which is the meta file.
|
|
|
|
item.transformMetaDir()
|
|
|
|
|
2023-09-02 10:49:24 -04:00
|
|
|
fivs, err := getFileInfoVersions(buf, item.bucket, item.objectPath(), false)
|
2023-07-11 10:46:58 -04:00
|
|
|
metaDataPoolPut(buf)
|
2020-05-24 14:19:17 -04:00
|
|
|
if err != nil {
|
2023-02-23 22:33:31 -05:00
|
|
|
res["err"] = err.Error()
|
2020-12-07 16:47:48 -05:00
|
|
|
return sizeSummary{}, errSkipFile
|
2020-05-24 14:19:17 -04:00
|
|
|
}
|
2023-02-21 12:33:33 -05:00
|
|
|
|
2020-12-07 16:47:48 -05:00
|
|
|
sizeS := sizeSummary{}
|
2023-10-30 12:59:51 -04:00
|
|
|
for _, tier := range globalTierConfigMgr.ListTiers() {
|
|
|
|
if sizeS.tiers == nil {
|
|
|
|
sizeS.tiers = make(map[string]tierStats)
|
|
|
|
}
|
|
|
|
sizeS.tiers[tier.Name] = tierStats{}
|
2021-10-23 21:38:33 -04:00
|
|
|
}
|
2023-10-30 17:42:11 -04:00
|
|
|
if sizeS.tiers != nil {
|
|
|
|
sizeS.tiers[storageclass.STANDARD] = tierStats{}
|
|
|
|
sizeS.tiers[storageclass.RRS] = tierStats{}
|
|
|
|
}
|
2022-07-05 17:45:49 -04:00
|
|
|
|
|
|
|
done := globalScannerMetrics.time(scannerMetricApplyAll)
|
2024-02-28 02:02:14 -05:00
|
|
|
objInfos, err := item.applyVersionActions(ctx, objAPI, fivs.Versions, globalExpiryState)
|
2022-07-05 17:45:49 -04:00
|
|
|
done()
|
|
|
|
|
2021-11-19 20:54:10 -05:00
|
|
|
if err != nil {
|
2023-02-23 22:33:31 -05:00
|
|
|
res["err"] = err.Error()
|
2021-11-19 20:54:10 -05:00
|
|
|
return sizeSummary{}, errSkipFile
|
|
|
|
}
|
2022-05-31 05:57:57 -04:00
|
|
|
|
|
|
|
versioned := vcfg != nil && vcfg.Versioned(item.objectPath())
|
|
|
|
|
2023-11-28 11:39:21 -05:00
|
|
|
var objDeleted bool
|
2023-04-06 17:10:01 -04:00
|
|
|
for _, oi := range objInfos {
|
2022-07-05 17:45:49 -04:00
|
|
|
done = globalScannerMetrics.time(scannerMetricApplyVersion)
|
2023-11-28 11:39:21 -05:00
|
|
|
var sz int64
|
|
|
|
objDeleted, sz = item.applyActions(ctx, objAPI, oi, &sizeS)
|
2022-07-05 17:45:49 -04:00
|
|
|
done()
|
2023-08-09 11:55:19 -04:00
|
|
|
|
2023-11-28 11:39:21 -05:00
|
|
|
// DeleteAllVersionsAction: The object and all its
|
|
|
|
// versions are expired and
|
|
|
|
// doesn't contribute toward data usage.
|
|
|
|
if objDeleted {
|
|
|
|
break
|
|
|
|
}
|
2023-08-09 11:55:19 -04:00
|
|
|
actualSz, err := oi.GetActualSize()
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-07-18 13:49:40 -04:00
|
|
|
if oi.DeleteMarker {
|
|
|
|
sizeS.deleteMarkers++
|
|
|
|
}
|
2023-07-24 20:13:25 -04:00
|
|
|
if oi.VersionID != "" && sz == actualSz {
|
2021-05-11 21:36:15 -04:00
|
|
|
sizeS.versions++
|
|
|
|
}
|
|
|
|
sizeS.totalSize += sz
|
2021-10-23 21:38:33 -04:00
|
|
|
|
2023-10-30 12:59:51 -04:00
|
|
|
// Skip tier accounting if object version is a delete-marker or a free-version
|
|
|
|
// tracking deleted transitioned objects
|
2021-10-23 21:38:33 -04:00
|
|
|
switch {
|
2023-10-30 12:59:51 -04:00
|
|
|
case oi.DeleteMarker, oi.TransitionedObject.FreeVersion:
|
2021-10-23 21:38:33 -04:00
|
|
|
continue
|
|
|
|
}
|
2023-10-30 12:59:51 -04:00
|
|
|
tier := oi.StorageClass
|
|
|
|
if tier == "" {
|
2023-10-30 17:42:11 -04:00
|
|
|
tier = storageclass.STANDARD // no SC means "STANDARD"
|
2023-10-30 12:59:51 -04:00
|
|
|
}
|
2021-10-23 21:38:33 -04:00
|
|
|
if oi.TransitionedObject.Status == lifecycle.TransitionComplete {
|
|
|
|
tier = oi.TransitionedObject.Tier
|
|
|
|
}
|
2023-10-30 12:59:51 -04:00
|
|
|
if sizeS.tiers != nil {
|
|
|
|
if st, ok := sizeS.tiers[tier]; ok {
|
|
|
|
sizeS.tiers[tier] = st.add(oi.tierStats())
|
|
|
|
}
|
|
|
|
}
|
2020-07-21 20:49:56 -04:00
|
|
|
}
|
2021-11-19 20:54:10 -05:00
|
|
|
|
|
|
|
// apply tier sweep action on free versions
|
|
|
|
for _, freeVersion := range fivs.FreeVersions {
|
2022-05-31 05:57:57 -04:00
|
|
|
oi := freeVersion.ToObjectInfo(item.bucket, item.objectPath(), versioned)
|
2022-07-05 17:45:49 -04:00
|
|
|
done = globalScannerMetrics.time(scannerMetricTierObjSweep)
|
2024-03-02 00:11:03 -05:00
|
|
|
globalExpiryState.enqueueFreeVersion(oi)
|
2022-07-05 17:45:49 -04:00
|
|
|
done()
|
2021-11-19 20:54:10 -05:00
|
|
|
}
|
2023-02-21 12:33:33 -05:00
|
|
|
|
|
|
|
// These are rather expensive. Skip if nobody listens.
|
|
|
|
if globalTrace.NumSubscribers(madmin.TraceScanner) > 0 {
|
2023-10-30 12:59:51 -04:00
|
|
|
if len(fivs.FreeVersions) > 0 {
|
|
|
|
res["free-versions"] = strconv.Itoa(len(fivs.FreeVersions))
|
|
|
|
}
|
|
|
|
|
2023-02-21 12:33:33 -05:00
|
|
|
if sizeS.versions > 0 {
|
2023-07-11 10:46:58 -04:00
|
|
|
res["versions"] = strconv.FormatUint(sizeS.versions, 10)
|
2023-02-21 12:33:33 -05:00
|
|
|
}
|
2023-07-11 10:46:58 -04:00
|
|
|
res["size"] = strconv.FormatInt(sizeS.totalSize, 10)
|
2023-10-30 17:42:11 -04:00
|
|
|
for name, tier := range sizeS.tiers {
|
|
|
|
res["tier-size-"+name] = strconv.FormatUint(tier.TotalSize, 10)
|
|
|
|
res["tier-versions-"+name] = strconv.Itoa(tier.NumVersions)
|
2023-02-21 12:33:33 -05:00
|
|
|
}
|
|
|
|
if sizeS.failedCount > 0 {
|
|
|
|
res["repl-failed"] = fmt.Sprintf("%d versions, %d bytes", sizeS.failedCount, sizeS.failedSize)
|
|
|
|
}
|
|
|
|
if sizeS.pendingCount > 0 {
|
|
|
|
res["repl-pending"] = fmt.Sprintf("%d versions, %d bytes", sizeS.pendingCount, sizeS.pendingSize)
|
|
|
|
}
|
|
|
|
for tgt, st := range sizeS.replTargetStats {
|
2023-07-11 10:46:58 -04:00
|
|
|
res["repl-size-"+tgt] = strconv.FormatInt(st.replicatedSize, 10)
|
2023-08-30 04:00:59 -04:00
|
|
|
res["repl-count-"+tgt] = strconv.FormatInt(st.replicatedCount, 10)
|
2023-02-21 12:33:33 -05:00
|
|
|
if st.failedCount > 0 {
|
|
|
|
res["repl-failed-"+tgt] = fmt.Sprintf("%d versions, %d bytes", st.failedCount, st.failedSize)
|
|
|
|
}
|
|
|
|
if st.pendingCount > 0 {
|
|
|
|
res["repl-pending-"+tgt] = fmt.Sprintf("%d versions, %d bytes", st.pendingCount, st.pendingSize)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-11-28 11:39:21 -05:00
|
|
|
if objDeleted {
|
|
|
|
// we return errIgnoreFileContrib to signal this function's
|
|
|
|
// callers to skip this object's contribution towards
|
|
|
|
// usage.
|
|
|
|
return sizeSummary{}, errIgnoreFileContrib
|
|
|
|
}
|
2020-12-07 16:47:48 -05:00
|
|
|
return sizeS, nil
|
2024-01-02 16:51:24 -05:00
|
|
|
}, scanMode, weSleep)
|
2020-03-18 19:19:29 -04:00
|
|
|
if err != nil {
|
|
|
|
return dataUsageInfo, err
|
|
|
|
}
|
2020-06-12 13:28:21 -04:00
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
dataUsageInfo.Info.LastUpdate = time.Now()
|
2019-12-12 09:02:37 -05:00
|
|
|
return dataUsageInfo, nil
|
|
|
|
}
|
|
|
|
|
2023-12-29 18:52:41 -05:00
|
|
|
func (s *xlStorage) getDeleteAttribute() uint64 {
|
|
|
|
attr := "user.total_deletes"
|
|
|
|
buf, err := xattr.LGet(s.formatFile, attr)
|
|
|
|
if err != nil {
|
|
|
|
// We start off with '0' if we can read the attributes
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return binary.LittleEndian.Uint64(buf[:8])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *xlStorage) getWriteAttribute() uint64 {
|
|
|
|
attr := "user.total_writes"
|
|
|
|
buf, err := xattr.LGet(s.formatFile, attr)
|
|
|
|
if err != nil {
|
|
|
|
// We start off with '0' if we can read the attributes
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return binary.LittleEndian.Uint64(buf[:8])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *xlStorage) setDeleteAttribute(deleteCount uint64) error {
|
|
|
|
attr := "user.total_deletes"
|
|
|
|
|
|
|
|
data := make([]byte, 8)
|
|
|
|
binary.LittleEndian.PutUint64(data, deleteCount)
|
|
|
|
return xattr.LSet(s.formatFile, attr, data)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *xlStorage) setWriteAttribute(writeCount uint64) error {
|
|
|
|
attr := "user.total_writes"
|
|
|
|
|
|
|
|
data := make([]byte, 8)
|
|
|
|
binary.LittleEndian.PutUint64(data, writeCount)
|
|
|
|
return xattr.LSet(s.formatFile, attr, data)
|
|
|
|
}
|
|
|
|
|
2016-08-25 20:16:34 -04:00
|
|
|
// DiskInfo provides current information about disk space usage,
|
|
|
|
// total free inodes and underlying filesystem.
|
2024-05-08 20:51:34 -04:00
|
|
|
func (s *xlStorage) DiskInfo(ctx context.Context, _ DiskInfoOptions) (info DiskInfo, err error) {
|
|
|
|
info, err = s.diskInfoCache.GetWithCtx(ctx)
|
2024-03-03 02:45:39 -05:00
|
|
|
info.NRRequests = s.nrRequests
|
|
|
|
info.Rotational = s.rotational
|
2024-01-12 04:48:36 -05:00
|
|
|
info.MountPath = s.drivePath
|
|
|
|
info.Endpoint = s.endpoint.String()
|
2023-12-29 18:52:41 -05:00
|
|
|
info.Scanning = atomic.LoadInt32(&s.scanning) == 1
|
2020-07-13 12:51:07 -04:00
|
|
|
return info, err
|
2016-08-25 20:16:34 -04:00
|
|
|
}
|
|
|
|
|
2016-05-18 00:22:27 -04:00
|
|
|
// getVolDir - will convert incoming volume names to
|
2016-04-13 14:32:47 -04:00
|
|
|
// corresponding valid volume names on the backend in a platform
|
|
|
|
// compatible way for all operating systems. If volume is not found
|
|
|
|
// an error is generated.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) getVolDir(volume string) (string, error) {
|
2018-04-23 23:27:33 -04:00
|
|
|
if volume == "" || volume == "." || volume == ".." {
|
|
|
|
return "", errVolumeNotFound
|
2016-04-13 14:32:47 -04:00
|
|
|
}
|
2023-07-26 14:31:40 -04:00
|
|
|
volumeDir := pathJoin(s.drivePath, volume)
|
2016-05-18 00:22:27 -04:00
|
|
|
return volumeDir, nil
|
2016-04-13 14:32:47 -04:00
|
|
|
}
|
2016-04-08 13:37:38 -04:00
|
|
|
|
2021-07-25 01:03:38 -04:00
|
|
|
func (s *xlStorage) checkFormatJSON() (os.FileInfo, error) {
|
2023-12-29 18:52:41 -05:00
|
|
|
fi, err := Lstat(s.formatFile)
|
2019-10-25 13:37:53 -04:00
|
|
|
if err != nil {
|
|
|
|
// If the disk is still not initialized.
|
2020-11-23 11:36:49 -05:00
|
|
|
if osIsNotExist(err) {
|
2023-07-26 14:31:40 -04:00
|
|
|
if err = Access(s.drivePath); err == nil {
|
2020-07-21 16:54:06 -04:00
|
|
|
// Disk is present but missing `format.json`
|
2021-07-25 01:03:38 -04:00
|
|
|
return nil, errUnformattedDisk
|
2020-07-13 12:51:07 -04:00
|
|
|
}
|
2020-11-23 11:36:49 -05:00
|
|
|
if osIsNotExist(err) {
|
2021-07-25 01:03:38 -04:00
|
|
|
return nil, errDiskNotFound
|
2020-11-23 11:36:49 -05:00
|
|
|
} else if osIsPermission(err) {
|
2021-07-25 01:03:38 -04:00
|
|
|
return nil, errDiskAccessDenied
|
2020-07-13 12:51:07 -04:00
|
|
|
}
|
2024-04-04 08:04:40 -04:00
|
|
|
storageLogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors
|
2024-01-12 17:48:44 -05:00
|
|
|
return nil, errCorruptedBackend
|
2020-11-23 11:36:49 -05:00
|
|
|
} else if osIsPermission(err) {
|
2021-07-25 01:03:38 -04:00
|
|
|
return nil, errDiskAccessDenied
|
2020-03-27 17:48:30 -04:00
|
|
|
}
|
2024-04-04 08:04:40 -04:00
|
|
|
storageLogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors
|
2024-01-12 17:48:44 -05:00
|
|
|
return nil, errCorruptedBackend
|
2021-07-25 01:03:38 -04:00
|
|
|
}
|
|
|
|
return fi, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetDiskID - returns the cached disk uuid
|
|
|
|
func (s *xlStorage) GetDiskID() (string, error) {
|
|
|
|
s.RLock()
|
|
|
|
diskID := s.diskID
|
|
|
|
fileInfo := s.formatFileInfo
|
|
|
|
lastCheck := s.formatLastCheck
|
|
|
|
|
2022-01-24 14:28:45 -05:00
|
|
|
// check if we have a valid disk ID that is less than 1 seconds old.
|
|
|
|
if fileInfo != nil && diskID != "" && time.Since(lastCheck) <= 1*time.Second {
|
2021-07-25 01:03:38 -04:00
|
|
|
s.RUnlock()
|
|
|
|
return diskID, nil
|
|
|
|
}
|
|
|
|
s.RUnlock()
|
|
|
|
|
|
|
|
fi, err := s.checkFormatJSON()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
2019-10-25 13:37:53 -04:00
|
|
|
}
|
|
|
|
|
2020-03-27 17:48:30 -04:00
|
|
|
if xioutil.SameFile(fi, fileInfo) && diskID != "" {
|
2021-01-11 05:27:04 -05:00
|
|
|
s.Lock()
|
2019-10-25 13:37:53 -04:00
|
|
|
// If the file has not changed, just return the cached diskID information.
|
posix: cache disk ID for a short while (#8564)
`*posix.getDiskID()` takes up to 30% of all CPU due to the `os.Stat` call on `GET` calls.
Before:
```
Operation: GET - Concurrency: 12
* Average: 1333.97 MB/s, 1365.99 obj/s, 1365.98 ops ended/s (4m59.975s)
* First Byte: Average: 7.801487ms, Median: 7.9974ms, Best: 1.9822ms, Worst: 110.0021ms
Aggregated, split into 299 x 1s time segments:
* Fastest: 1453.50 MB/s, 1488.38 obj/s, 1492.00 ops ended/s (1s)
* 50% Median: 1360.47 MB/s, 1393.12 obj/s, 1393.00 ops ended/s (1s)
* Slowest: 978.68 MB/s, 1002.17 obj/s, 1004.00 ops ended/s (1s)
```
After:
```
Operation: GET - Concurrency: 12
* Average: 1706.07 MB/s, 1747.02 obj/s, 1747.01 ops ended/s (4m59.985s)
* First Byte: Average: 5.797886ms, Median: 5.9959ms, Best: 996.3µs, Worst: 84.0007ms
Aggregated, split into 299 x 1s time segments:
* Fastest: 1830.03 MB/s, 1873.96 obj/s, 1872.00 ops ended/s (1s)
* 50% Median: 1735.04 MB/s, 1776.68 obj/s, 1776.00 ops ended/s (1s)
* Slowest: 994.94 MB/s, 1018.82 obj/s, 1018.00 ops ended/s (1s)
```
TLDR; `os.Stat` is not free.
2019-11-29 05:57:14 -05:00
|
|
|
s.formatLastCheck = time.Now()
|
2021-01-11 05:27:04 -05:00
|
|
|
s.Unlock()
|
2019-10-25 13:37:53 -04:00
|
|
|
return diskID, nil
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
2019-10-25 13:37:53 -04:00
|
|
|
|
2023-12-29 18:52:41 -05:00
|
|
|
b, err := os.ReadFile(s.formatFile)
|
2016-10-31 12:34:44 -04:00
|
|
|
if err != nil {
|
2020-08-03 21:17:48 -04:00
|
|
|
// If the disk is still not initialized.
|
2020-11-23 11:36:49 -05:00
|
|
|
if osIsNotExist(err) {
|
2023-07-26 14:31:40 -04:00
|
|
|
if err = Access(s.drivePath); err == nil {
|
2020-08-03 21:17:48 -04:00
|
|
|
// Disk is present but missing `format.json`
|
|
|
|
return "", errUnformattedDisk
|
|
|
|
}
|
2020-11-23 11:36:49 -05:00
|
|
|
if osIsNotExist(err) {
|
2020-08-03 21:17:48 -04:00
|
|
|
return "", errDiskNotFound
|
2020-11-23 11:36:49 -05:00
|
|
|
} else if osIsPermission(err) {
|
2020-08-03 21:17:48 -04:00
|
|
|
return "", errDiskAccessDenied
|
|
|
|
}
|
2024-04-04 08:04:40 -04:00
|
|
|
storageLogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors
|
2024-01-12 17:48:44 -05:00
|
|
|
return "", errCorruptedBackend
|
2020-11-23 11:36:49 -05:00
|
|
|
} else if osIsPermission(err) {
|
2020-08-03 21:17:48 -04:00
|
|
|
return "", errDiskAccessDenied
|
|
|
|
}
|
2024-04-04 08:04:40 -04:00
|
|
|
storageLogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors
|
2024-01-12 17:48:44 -05:00
|
|
|
return "", errCorruptedBackend
|
2016-10-31 12:34:44 -04:00
|
|
|
}
|
2020-08-03 21:17:48 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
format := &formatErasureV3{}
|
2022-01-02 12:15:06 -05:00
|
|
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
2019-10-25 13:37:53 -04:00
|
|
|
if err = json.Unmarshal(b, &format); err != nil {
|
2024-04-04 08:04:40 -04:00
|
|
|
bugLogIf(GlobalContext, err) // log unexpected errors
|
2020-03-27 17:48:30 -04:00
|
|
|
return "", errCorruptedFormat
|
2019-02-20 16:32:29 -05:00
|
|
|
}
|
2020-08-03 21:17:48 -04:00
|
|
|
|
2024-04-11 13:45:28 -04:00
|
|
|
m, n, err := findDiskIndexByDiskID(format, format.Erasure.This)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
diskID = format.Erasure.This
|
|
|
|
ep := s.endpoint
|
|
|
|
if m != ep.SetIdx || n != ep.DiskIdx {
|
|
|
|
storageLogOnceIf(GlobalContext,
|
|
|
|
fmt.Errorf("unexpected drive ordering on pool: %s: found drive at (set=%s, drive=%s), expected at (set=%s, drive=%s): %s(%s): %w",
|
|
|
|
humanize.Ordinal(ep.PoolIdx+1), humanize.Ordinal(m+1), humanize.Ordinal(n+1), humanize.Ordinal(ep.SetIdx+1), humanize.Ordinal(ep.DiskIdx+1),
|
|
|
|
s, s.diskID, errInconsistentDisk), "drive-order-format-json")
|
|
|
|
return "", errInconsistentDisk
|
|
|
|
}
|
2021-01-11 05:27:04 -05:00
|
|
|
s.Lock()
|
2024-04-11 13:45:28 -04:00
|
|
|
s.diskID = diskID
|
2021-01-16 15:08:02 -05:00
|
|
|
s.formatLegacy = format.Erasure.DistributionAlgo == formatErasureVersionV2DistributionAlgoV1
|
2019-10-25 13:37:53 -04:00
|
|
|
s.formatFileInfo = fi
|
2024-04-11 13:45:28 -04:00
|
|
|
s.formatData = b
|
posix: cache disk ID for a short while (#8564)
`*posix.getDiskID()` takes up to 30% of all CPU due to the `os.Stat` call on `GET` calls.
Before:
```
Operation: GET - Concurrency: 12
* Average: 1333.97 MB/s, 1365.99 obj/s, 1365.98 ops ended/s (4m59.975s)
* First Byte: Average: 7.801487ms, Median: 7.9974ms, Best: 1.9822ms, Worst: 110.0021ms
Aggregated, split into 299 x 1s time segments:
* Fastest: 1453.50 MB/s, 1488.38 obj/s, 1492.00 ops ended/s (1s)
* 50% Median: 1360.47 MB/s, 1393.12 obj/s, 1393.00 ops ended/s (1s)
* Slowest: 978.68 MB/s, 1002.17 obj/s, 1004.00 ops ended/s (1s)
```
After:
```
Operation: GET - Concurrency: 12
* Average: 1706.07 MB/s, 1747.02 obj/s, 1747.01 ops ended/s (4m59.985s)
* First Byte: Average: 5.797886ms, Median: 5.9959ms, Best: 996.3µs, Worst: 84.0007ms
Aggregated, split into 299 x 1s time segments:
* Fastest: 1830.03 MB/s, 1873.96 obj/s, 1872.00 ops ended/s (1s)
* 50% Median: 1735.04 MB/s, 1776.68 obj/s, 1776.00 ops ended/s (1s)
* Slowest: 994.94 MB/s, 1018.82 obj/s, 1018.00 ops ended/s (1s)
```
TLDR; `os.Stat` is not free.
2019-11-29 05:57:14 -05:00
|
|
|
s.formatLastCheck = time.Now()
|
2024-04-11 13:45:28 -04:00
|
|
|
s.Unlock()
|
|
|
|
return diskID, nil
|
2016-10-31 12:34:44 -04:00
|
|
|
}
|
|
|
|
|
2019-10-25 13:37:53 -04:00
|
|
|
// Make a volume entry.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *xlStorage) SetDiskID(id string) {
|
|
|
|
// NO-OP for xlStorage as it is handled either by xlStorageDiskIDCheck{} for local disks or
|
2019-10-25 13:37:53 -04:00
|
|
|
// storage rest server for remote disks.
|
|
|
|
}
|
|
|
|
|
2021-01-22 18:38:21 -05:00
|
|
|
func (s *xlStorage) MakeVolBulk(ctx context.Context, volumes ...string) error {
|
2019-12-23 19:31:03 -05:00
|
|
|
for _, volume := range volumes {
|
2022-03-09 14:38:54 -05:00
|
|
|
err := s.MakeVol(ctx, volume)
|
|
|
|
if err != nil && !errors.Is(err, errVolumeExists) {
|
2021-12-09 18:55:42 -05:00
|
|
|
return err
|
2019-12-23 19:31:03 -05:00
|
|
|
}
|
2022-03-09 14:38:54 -05:00
|
|
|
diskHealthCheckOK(ctx, err)
|
2019-12-23 19:31:03 -05:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-04-13 14:32:47 -04:00
|
|
|
// Make a volume entry.
|
2021-01-22 18:38:21 -05:00
|
|
|
func (s *xlStorage) MakeVol(ctx context.Context, volume string) error {
|
2018-04-23 23:27:33 -04:00
|
|
|
if !isValidVolname(volume) {
|
|
|
|
return errInvalidArgument
|
|
|
|
}
|
|
|
|
|
2016-05-18 00:22:27 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2018-01-29 21:43:13 -05:00
|
|
|
|
2021-03-29 11:07:23 -04:00
|
|
|
if err = Access(volumeDir); err != nil {
|
2018-01-29 21:43:13 -05:00
|
|
|
// Volume does not exist we proceed to create.
|
2020-11-23 11:36:49 -05:00
|
|
|
if osIsNotExist(err) {
|
2018-01-29 21:43:13 -05:00
|
|
|
// Make a volume entry, with mode 0777 mkdir honors system umask.
|
2023-09-13 11:14:36 -04:00
|
|
|
err = mkdirAll(volumeDir, 0o777, s.drivePath)
|
2018-01-29 21:43:13 -05:00
|
|
|
}
|
2020-11-23 11:36:49 -05:00
|
|
|
if osIsPermission(err) {
|
2016-07-02 04:59:28 -04:00
|
|
|
return errDiskAccessDenied
|
2018-07-27 18:32:19 -04:00
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return errFaultyDisk
|
2016-07-02 04:59:28 -04:00
|
|
|
}
|
|
|
|
return err
|
2016-04-13 14:32:47 -04:00
|
|
|
}
|
2018-01-29 21:43:13 -05:00
|
|
|
|
|
|
|
// Stat succeeds we return errVolumeExists.
|
|
|
|
return errVolumeExists
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListVols - list volumes.
|
2022-08-04 10:23:05 -04:00
|
|
|
func (s *xlStorage) ListVols(ctx context.Context) (volsInfo []VolInfo, err error) {
|
2023-07-26 14:31:40 -04:00
|
|
|
return listVols(ctx, s.drivePath)
|
2015-10-16 14:26:01 -04:00
|
|
|
}
|
config/main: Re-write config files - add to new config v3
- New config format.
```
{
"version": "3",
"address": ":9000",
"backend": {
"type": "fs",
"disk": "/path"
},
"credential": {
"accessKey": "WLGDGYAQYIGI833EV05A",
"secretKey": "BYvgJM101sHngl2uzjXS/OBF/aMxAN06JrJ3qJlF"
},
"region": "us-east-1",
"logger": {
"file": {
"enable": false,
"fileName": "",
"level": "error"
},
"syslog": {
"enable": false,
"address": "",
"level": "debug"
},
"console": {
"enable": true,
"level": "fatal"
}
}
}
```
New command lines in lieu of supporting XL.
Minio initialize filesystem backend.
~~~
$ minio init fs <path>
~~~
Minio initialize XL backend.
~~~
$ minio init xl <url1>...<url16>
~~~
For 'fs' backend it starts the server.
~~~
$ minio server
~~~
For 'xl' backend it waits for servers to join.
~~~
$ minio server
... [PROGRESS BAR] of servers connecting
~~~
Now on other servers execute 'join' and they connect.
~~~
....
minio join <url1> -- from <url2> && minio server
minio join <url1> -- from <url3> && minio server
...
...
minio join <url1> -- from <url16> && minio server
~~~
2016-02-12 18:27:10 -05:00
|
|
|
|
2023-07-26 14:31:40 -04:00
|
|
|
// List all the volumes from drivePath.
|
2022-08-04 10:23:05 -04:00
|
|
|
func listVols(ctx context.Context, dirPath string) ([]VolInfo, error) {
|
2016-08-11 22:57:14 -04:00
|
|
|
if err := checkPathLength(dirPath); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
entries, err := readDir(dirPath)
|
|
|
|
if err != nil {
|
2022-08-04 10:23:05 -04:00
|
|
|
if errors.Is(err, errFileAccessDenied) {
|
|
|
|
return nil, errDiskAccessDenied
|
|
|
|
} else if errors.Is(err, errFileNotFound) {
|
|
|
|
return nil, errDiskNotFound
|
|
|
|
}
|
|
|
|
return nil, err
|
2016-08-11 22:57:14 -04:00
|
|
|
}
|
2020-09-14 23:44:18 -04:00
|
|
|
volsInfo := make([]VolInfo, 0, len(entries))
|
2016-08-11 22:57:14 -04:00
|
|
|
for _, entry := range entries {
|
2021-02-08 13:15:12 -05:00
|
|
|
if !HasSuffix(entry, SlashSeparator) || !isValidVolname(pathutil.Clean(entry)) {
|
2016-08-11 22:57:14 -04:00
|
|
|
// Skip if entry is neither a directory not a valid volume name.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
volsInfo = append(volsInfo, VolInfo{
|
2021-02-08 13:15:12 -05:00
|
|
|
Name: pathutil.Clean(entry),
|
2016-08-11 22:57:14 -04:00
|
|
|
})
|
|
|
|
}
|
|
|
|
return volsInfo, nil
|
|
|
|
}
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// StatVol - get volume info.
|
2020-09-04 12:45:06 -04:00
|
|
|
func (s *xlStorage) StatVol(ctx context.Context, volume string) (vol VolInfo, err error) {
|
2016-04-13 14:32:47 -04:00
|
|
|
// Verify if volume is valid and it exists.
|
2016-05-18 00:22:27 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
|
|
|
return VolInfo{}, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2021-03-29 11:07:23 -04:00
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// Stat a volume entry.
|
|
|
|
var st os.FileInfo
|
2021-03-23 17:51:27 -04:00
|
|
|
st, err = Lstat(volumeDir)
|
2016-04-08 13:37:38 -04:00
|
|
|
if err != nil {
|
fix: use buffers only when necessary for io.Copy() (#11229)
Use separate sync.Pool for writes/reads
Avoid passing buffers for io.CopyBuffer()
if the writer or reader implement io.WriteTo or io.ReadFrom
respectively then its useless for sync.Pool to allocate
buffers on its own since that will be completely ignored
by the io.CopyBuffer Go implementation.
Improve this wherever we see this to be optimal.
This allows us to be more efficient on memory usage.
```
385 // copyBuffer is the actual implementation of Copy and CopyBuffer.
386 // if buf is nil, one is allocated.
387 func copyBuffer(dst Writer, src Reader, buf []byte) (written int64, err error) {
388 // If the reader has a WriteTo method, use it to do the copy.
389 // Avoids an allocation and a copy.
390 if wt, ok := src.(WriterTo); ok {
391 return wt.WriteTo(dst)
392 }
393 // Similarly, if the writer has a ReadFrom method, use it to do the copy.
394 if rt, ok := dst.(ReaderFrom); ok {
395 return rt.ReadFrom(src)
396 }
```
From readahead package
```
// WriteTo writes data to w until there's no more data to write or when an error occurs.
// The return value n is the number of bytes written.
// Any error encountered during the write is also returned.
func (a *reader) WriteTo(w io.Writer) (n int64, err error) {
if a.err != nil {
return 0, a.err
}
n = 0
for {
err = a.fill()
if err != nil {
return n, err
}
n2, err := w.Write(a.cur.buffer())
a.cur.inc(n2)
n += int64(n2)
if err != nil {
return n, err
}
```
2021-01-06 12:36:55 -05:00
|
|
|
switch {
|
|
|
|
case osIsNotExist(err):
|
2016-04-08 13:37:38 -04:00
|
|
|
return VolInfo{}, errVolumeNotFound
|
fix: use buffers only when necessary for io.Copy() (#11229)
Use separate sync.Pool for writes/reads
Avoid passing buffers for io.CopyBuffer()
if the writer or reader implement io.WriteTo or io.ReadFrom
respectively then its useless for sync.Pool to allocate
buffers on its own since that will be completely ignored
by the io.CopyBuffer Go implementation.
Improve this wherever we see this to be optimal.
This allows us to be more efficient on memory usage.
```
385 // copyBuffer is the actual implementation of Copy and CopyBuffer.
386 // if buf is nil, one is allocated.
387 func copyBuffer(dst Writer, src Reader, buf []byte) (written int64, err error) {
388 // If the reader has a WriteTo method, use it to do the copy.
389 // Avoids an allocation and a copy.
390 if wt, ok := src.(WriterTo); ok {
391 return wt.WriteTo(dst)
392 }
393 // Similarly, if the writer has a ReadFrom method, use it to do the copy.
394 if rt, ok := dst.(ReaderFrom); ok {
395 return rt.ReadFrom(src)
396 }
```
From readahead package
```
// WriteTo writes data to w until there's no more data to write or when an error occurs.
// The return value n is the number of bytes written.
// Any error encountered during the write is also returned.
func (a *reader) WriteTo(w io.Writer) (n int64, err error) {
if a.err != nil {
return 0, a.err
}
n = 0
for {
err = a.fill()
if err != nil {
return n, err
}
n2, err := w.Write(a.cur.buffer())
a.cur.inc(n2)
n += int64(n2)
if err != nil {
return n, err
}
```
2021-01-06 12:36:55 -05:00
|
|
|
case osIsPermission(err):
|
|
|
|
return VolInfo{}, errDiskAccessDenied
|
|
|
|
case isSysErrIO(err):
|
2018-07-27 18:32:19 -04:00
|
|
|
return VolInfo{}, errFaultyDisk
|
fix: use buffers only when necessary for io.Copy() (#11229)
Use separate sync.Pool for writes/reads
Avoid passing buffers for io.CopyBuffer()
if the writer or reader implement io.WriteTo or io.ReadFrom
respectively then its useless for sync.Pool to allocate
buffers on its own since that will be completely ignored
by the io.CopyBuffer Go implementation.
Improve this wherever we see this to be optimal.
This allows us to be more efficient on memory usage.
```
385 // copyBuffer is the actual implementation of Copy and CopyBuffer.
386 // if buf is nil, one is allocated.
387 func copyBuffer(dst Writer, src Reader, buf []byte) (written int64, err error) {
388 // If the reader has a WriteTo method, use it to do the copy.
389 // Avoids an allocation and a copy.
390 if wt, ok := src.(WriterTo); ok {
391 return wt.WriteTo(dst)
392 }
393 // Similarly, if the writer has a ReadFrom method, use it to do the copy.
394 if rt, ok := dst.(ReaderFrom); ok {
395 return rt.ReadFrom(src)
396 }
```
From readahead package
```
// WriteTo writes data to w until there's no more data to write or when an error occurs.
// The return value n is the number of bytes written.
// Any error encountered during the write is also returned.
func (a *reader) WriteTo(w io.Writer) (n int64, err error) {
if a.err != nil {
return 0, a.err
}
n = 0
for {
err = a.fill()
if err != nil {
return n, err
}
n2, err := w.Write(a.cur.buffer())
a.cur.inc(n2)
n += int64(n2)
if err != nil {
return n, err
}
```
2021-01-06 12:36:55 -05:00
|
|
|
default:
|
|
|
|
return VolInfo{}, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-04-08 20:13:16 -04:00
|
|
|
}
|
2021-02-05 13:12:25 -05:00
|
|
|
// As os.Lstat() doesn't carry other than ModTime(), use ModTime()
|
2016-04-16 15:48:41 -04:00
|
|
|
// as CreatedTime.
|
2016-04-13 14:32:47 -04:00
|
|
|
createdTime := st.ModTime()
|
2016-04-08 13:37:38 -04:00
|
|
|
return VolInfo{
|
2016-04-13 14:32:47 -04:00
|
|
|
Name: volume,
|
|
|
|
Created: createdTime,
|
2016-04-08 13:37:38 -04:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteVol - delete a volume.
|
2020-09-04 12:45:06 -04:00
|
|
|
func (s *xlStorage) DeleteVol(ctx context.Context, volume string, forceDelete bool) (err error) {
|
2016-04-13 14:32:47 -04:00
|
|
|
// Verify if volume is valid and it exists.
|
2016-05-18 00:22:27 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2020-03-28 00:52:59 -04:00
|
|
|
|
|
|
|
if forceDelete {
|
2022-07-11 12:15:54 -04:00
|
|
|
err = s.moveToTrash(volumeDir, true, true)
|
2020-03-28 00:52:59 -04:00
|
|
|
} else {
|
2021-03-23 17:51:27 -04:00
|
|
|
err = Remove(volumeDir)
|
2020-03-28 00:52:59 -04:00
|
|
|
}
|
|
|
|
|
2016-04-16 15:48:41 -04:00
|
|
|
if err != nil {
|
2018-08-06 13:26:40 -04:00
|
|
|
switch {
|
2021-10-12 12:24:00 -04:00
|
|
|
case errors.Is(err, errFileNotFound):
|
|
|
|
return errVolumeNotFound
|
2020-11-23 11:36:49 -05:00
|
|
|
case osIsNotExist(err):
|
2016-04-16 15:48:41 -04:00
|
|
|
return errVolumeNotFound
|
2018-08-06 13:26:40 -04:00
|
|
|
case isSysErrNotEmpty(err):
|
2016-04-16 15:48:41 -04:00
|
|
|
return errVolumeNotEmpty
|
2020-11-23 11:36:49 -05:00
|
|
|
case osIsPermission(err):
|
2018-04-09 23:56:09 -04:00
|
|
|
return errDiskAccessDenied
|
2018-08-06 13:26:40 -04:00
|
|
|
case isSysErrIO(err):
|
2018-07-27 18:32:19 -04:00
|
|
|
return errFaultyDisk
|
2018-08-06 13:26:40 -04:00
|
|
|
default:
|
|
|
|
return err
|
2016-04-16 15:48:41 -04:00
|
|
|
}
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-03-28 00:52:38 -04:00
|
|
|
return nil
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
2016-05-05 15:51:56 -04:00
|
|
|
// ListDir - return all the entries at the given directory path.
|
2019-08-06 15:08:58 -04:00
|
|
|
// If an entry is a directory it will be returned with a trailing SlashSeparator.
|
2024-01-30 15:43:25 -05:00
|
|
|
func (s *xlStorage) ListDir(ctx context.Context, origvolume, volume, dirPath string, count int) (entries []string, err error) {
|
2021-09-17 17:11:01 -04:00
|
|
|
if contextCanceled(ctx) {
|
|
|
|
return nil, ctx.Err()
|
|
|
|
}
|
|
|
|
|
2024-01-30 15:43:25 -05:00
|
|
|
if origvolume != "" {
|
|
|
|
if !skipAccessChecks(origvolume) {
|
|
|
|
origvolumeDir, err := s.getVolDir(origvolume)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if err = Access(origvolumeDir); err != nil {
|
|
|
|
return nil, convertAccessError(err, errVolumeAccessDenied)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-13 14:32:47 -04:00
|
|
|
// Verify if volume is valid and it exists.
|
2016-05-18 00:22:27 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
2016-04-13 14:32:47 -04:00
|
|
|
if err != nil {
|
2016-05-05 15:51:56 -04:00
|
|
|
return nil, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2020-04-08 22:42:57 -04:00
|
|
|
|
2020-06-17 16:58:38 -04:00
|
|
|
dirPathAbs := pathJoin(volumeDir, dirPath)
|
2018-05-08 22:08:21 -04:00
|
|
|
if count > 0 {
|
2020-06-17 16:58:38 -04:00
|
|
|
entries, err = readDirN(dirPathAbs, count)
|
2019-04-23 17:54:28 -04:00
|
|
|
} else {
|
2020-06-17 16:58:38 -04:00
|
|
|
entries, err = readDir(dirPathAbs)
|
2018-05-08 22:08:21 -04:00
|
|
|
}
|
2020-04-08 22:42:57 -04:00
|
|
|
if err != nil {
|
2024-01-30 15:43:25 -05:00
|
|
|
if errors.Is(err, errFileNotFound) && !skipAccessChecks(volume) {
|
|
|
|
if ierr := Access(volumeDir); ierr != nil {
|
|
|
|
return nil, convertAccessError(ierr, errVolumeAccessDenied)
|
fix: use buffers only when necessary for io.Copy() (#11229)
Use separate sync.Pool for writes/reads
Avoid passing buffers for io.CopyBuffer()
if the writer or reader implement io.WriteTo or io.ReadFrom
respectively then its useless for sync.Pool to allocate
buffers on its own since that will be completely ignored
by the io.CopyBuffer Go implementation.
Improve this wherever we see this to be optimal.
This allows us to be more efficient on memory usage.
```
385 // copyBuffer is the actual implementation of Copy and CopyBuffer.
386 // if buf is nil, one is allocated.
387 func copyBuffer(dst Writer, src Reader, buf []byte) (written int64, err error) {
388 // If the reader has a WriteTo method, use it to do the copy.
389 // Avoids an allocation and a copy.
390 if wt, ok := src.(WriterTo); ok {
391 return wt.WriteTo(dst)
392 }
393 // Similarly, if the writer has a ReadFrom method, use it to do the copy.
394 if rt, ok := dst.(ReaderFrom); ok {
395 return rt.ReadFrom(src)
396 }
```
From readahead package
```
// WriteTo writes data to w until there's no more data to write or when an error occurs.
// The return value n is the number of bytes written.
// Any error encountered during the write is also returned.
func (a *reader) WriteTo(w io.Writer) (n int64, err error) {
if a.err != nil {
return 0, a.err
}
n = 0
for {
err = a.fill()
if err != nil {
return n, err
}
n2, err := w.Write(a.cur.buffer())
a.cur.inc(n2)
n += int64(n2)
if err != nil {
return n, err
}
```
2021-01-06 12:36:55 -05:00
|
|
|
}
|
|
|
|
}
|
2020-04-08 22:42:57 -04:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-04-23 17:54:28 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
return entries, nil
|
|
|
|
}
|
|
|
|
|
2021-11-01 13:50:07 -04:00
|
|
|
func (s *xlStorage) deleteVersions(ctx context.Context, volume, path string, fis ...FileInfo) error {
|
2023-08-25 10:58:11 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-12-09 13:17:51 -05:00
|
|
|
discard := true
|
|
|
|
|
2022-10-17 12:39:21 -04:00
|
|
|
var legacyJSON bool
|
2023-12-09 13:17:51 -05:00
|
|
|
buf, _, err := s.readAllData(ctx, volume, volumeDir, pathJoin(volumeDir, path, xlStorageFormatFile), discard)
|
2021-11-01 13:50:07 -04:00
|
|
|
if err != nil {
|
2023-08-02 00:52:31 -04:00
|
|
|
if !errors.Is(err, errFileNotFound) {
|
2021-11-01 13:50:07 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-08-02 00:52:31 -04:00
|
|
|
s.RLock()
|
|
|
|
legacy := s.formatLegacy
|
|
|
|
s.RUnlock()
|
|
|
|
if legacy {
|
2023-12-09 13:17:51 -05:00
|
|
|
buf, _, err = s.readAllData(ctx, volume, volumeDir, pathJoin(volumeDir, path, xlStorageFormatFileV1), discard)
|
2023-08-02 00:52:31 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
legacyJSON = true
|
2021-11-01 13:50:07 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(buf) == 0 {
|
2024-02-12 11:46:46 -05:00
|
|
|
if errors.Is(err, errFileNotFound) && !skipAccessChecks(volume) {
|
|
|
|
if aerr := Access(volumeDir); aerr != nil && osIsNotExist(aerr) {
|
|
|
|
return errVolumeNotFound
|
|
|
|
}
|
|
|
|
}
|
2021-11-01 13:50:07 -04:00
|
|
|
return errFileNotFound
|
|
|
|
}
|
|
|
|
|
2022-10-17 12:39:21 -04:00
|
|
|
if legacyJSON {
|
2021-11-01 13:50:07 -04:00
|
|
|
// Delete the meta file, if there are no more versions the
|
|
|
|
// top level parent is automatically removed.
|
2022-07-11 12:15:54 -04:00
|
|
|
return s.deleteFile(volumeDir, pathJoin(volumeDir, path), true, false)
|
2021-11-01 13:50:07 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
var xlMeta xlMetaV2
|
2022-10-17 12:39:21 -04:00
|
|
|
if err = xlMeta.LoadOrConvert(buf); err != nil {
|
2021-11-01 13:50:07 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, fi := range fis {
|
2022-03-04 23:01:26 -05:00
|
|
|
dataDir, err := xlMeta.DeleteVersion(fi)
|
2021-11-01 13:50:07 -04:00
|
|
|
if err != nil {
|
2022-01-13 17:28:07 -05:00
|
|
|
if !fi.Deleted && (err == errFileNotFound || err == errFileVersionNotFound) {
|
2022-03-04 23:01:26 -05:00
|
|
|
// Ignore these since they do not exist
|
2022-01-13 17:28:07 -05:00
|
|
|
continue
|
|
|
|
}
|
2021-11-01 13:50:07 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if dataDir != "" {
|
|
|
|
versionID := fi.VersionID
|
|
|
|
if versionID == "" {
|
|
|
|
versionID = nullVersionID
|
|
|
|
}
|
2022-04-20 13:22:05 -04:00
|
|
|
|
2021-11-01 13:50:07 -04:00
|
|
|
// PR #11758 used DataDir, preserve it
|
|
|
|
// for users who might have used master
|
|
|
|
// branch
|
2022-04-20 13:22:05 -04:00
|
|
|
xlMeta.data.remove(versionID, dataDir)
|
|
|
|
|
|
|
|
// We need to attempt delete "dataDir" on the disk
|
|
|
|
// due to a CopyObject() bug where it might have
|
|
|
|
// inlined the data incorrectly, to avoid a situation
|
|
|
|
// where we potentially leave "DataDir"
|
|
|
|
filePath := pathJoin(volumeDir, path, dataDir)
|
|
|
|
if err = checkPathLength(filePath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-07-11 12:15:54 -04:00
|
|
|
if err = s.moveToTrash(filePath, true, false); err != nil {
|
2022-04-20 13:22:05 -04:00
|
|
|
if err != errFileNotFound {
|
2021-11-01 13:50:07 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-04 23:01:26 -05:00
|
|
|
lastVersion := len(xlMeta.versions) == 0
|
2021-11-01 13:50:07 -04:00
|
|
|
if !lastVersion {
|
|
|
|
buf, err = xlMeta.AppendTo(metaDataPoolGet())
|
|
|
|
defer metaDataPoolPut(buf)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return s.WriteAll(ctx, volume, pathJoin(path, xlStorageFormatFile), buf)
|
|
|
|
}
|
|
|
|
|
2024-02-12 11:30:40 -05:00
|
|
|
return s.deleteFile(volumeDir, pathJoin(volumeDir, path, xlStorageFormatFile), true, false)
|
2021-11-01 13:50:07 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// DeleteVersions deletes slice of versions, it can be same object
|
|
|
|
// or multiple objects.
|
2023-12-29 18:52:41 -05:00
|
|
|
func (s *xlStorage) DeleteVersions(ctx context.Context, volume string, versions []FileInfoVersions, opts DeleteOptions) []error {
|
2020-06-12 23:04:01 -04:00
|
|
|
errs := make([]error, len(versions))
|
2021-03-24 12:08:05 -04:00
|
|
|
|
2021-11-01 13:50:07 -04:00
|
|
|
for i, fiv := range versions {
|
2022-01-06 13:47:49 -05:00
|
|
|
if contextCanceled(ctx) {
|
|
|
|
errs[i] = ctx.Err()
|
|
|
|
continue
|
|
|
|
}
|
2023-11-27 12:15:06 -05:00
|
|
|
w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout())
|
2023-07-27 10:33:05 -04:00
|
|
|
if err := w.Run(func() error { return s.deleteVersions(ctx, volume, fiv.Name, fiv.Versions...) }); err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
errs[i] = err
|
|
|
|
}
|
2022-03-09 14:38:54 -05:00
|
|
|
diskHealthCheckOK(ctx, errs[i])
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return errs
|
|
|
|
}
|
|
|
|
|
2024-03-09 21:53:48 -05:00
|
|
|
func (s *xlStorage) cleanupTrashImmediateCallers(ctx context.Context) {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case entry := <-s.immediatePurge:
|
|
|
|
removeAll(entry)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-19 16:26:24 -04:00
|
|
|
const almostFilledPercent = 0.05
|
|
|
|
|
|
|
|
func (s *xlStorage) diskAlmostFilled() bool {
|
|
|
|
info, err := s.diskInfoCache.Get()
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if info.Used == 0 || info.UsedInodes == 0 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return (float64(info.Free)/float64(info.Used)) < almostFilledPercent || (float64(info.FreeInodes)/float64(info.UsedInodes)) < almostFilledPercent
|
|
|
|
}
|
|
|
|
|
2023-11-29 01:35:16 -05:00
|
|
|
func (s *xlStorage) moveToTrash(filePath string, recursive, immediatePurge bool) (err error) {
|
2021-10-12 12:24:00 -04:00
|
|
|
pathUUID := mustGetUUID()
|
2023-07-26 14:31:40 -04:00
|
|
|
targetPath := pathutil.Join(s.drivePath, minioMetaTmpDeletedBucket, pathUUID)
|
2022-07-11 12:15:54 -04:00
|
|
|
|
2021-10-12 12:24:00 -04:00
|
|
|
if recursive {
|
2024-02-29 11:28:33 -05:00
|
|
|
err = renameAll(filePath, targetPath, pathutil.Join(s.drivePath, minioMetaBucket))
|
2022-07-11 12:15:54 -04:00
|
|
|
} else {
|
2023-11-27 20:36:02 -05:00
|
|
|
err = Rename(filePath, targetPath)
|
|
|
|
}
|
|
|
|
|
2024-03-09 21:53:48 -05:00
|
|
|
var targetPath2 string
|
|
|
|
if immediatePurge && HasSuffix(filePath, SlashSeparator) {
|
|
|
|
// With immediate purge also attempt deleting for `__XL_DIR__` folder/directory objects.
|
|
|
|
targetPath2 = pathutil.Join(s.drivePath, minioMetaTmpDeletedBucket, mustGetUUID())
|
|
|
|
renameAll(encodeDirObject(filePath), targetPath2, pathutil.Join(s.drivePath, minioMetaBucket))
|
|
|
|
}
|
|
|
|
|
2023-11-27 20:36:02 -05:00
|
|
|
// ENOSPC is a valid error from rename(); remove instead of rename in that case
|
2024-02-24 12:11:14 -05:00
|
|
|
if errors.Is(err, errDiskFull) || isSysErrNoSpace(err) {
|
2023-11-27 20:36:02 -05:00
|
|
|
if recursive {
|
|
|
|
err = removeAll(filePath)
|
|
|
|
} else {
|
|
|
|
err = Remove(filePath)
|
2023-09-13 11:14:36 -04:00
|
|
|
}
|
2023-11-29 01:35:16 -05:00
|
|
|
return err // Avoid the immediate purge since not needed
|
2022-07-11 12:15:54 -04:00
|
|
|
}
|
|
|
|
|
2023-11-27 20:36:02 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-03-19 16:26:24 -04:00
|
|
|
if !immediatePurge && s.diskAlmostFilled() {
|
|
|
|
immediatePurge = true
|
|
|
|
}
|
|
|
|
|
2022-07-11 12:15:54 -04:00
|
|
|
// immediately purge the target
|
2023-11-29 01:35:16 -05:00
|
|
|
if immediatePurge {
|
2024-03-09 21:53:48 -05:00
|
|
|
for _, target := range []string{
|
|
|
|
targetPath,
|
|
|
|
targetPath2,
|
|
|
|
} {
|
|
|
|
if target == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case s.immediatePurge <- target:
|
|
|
|
default:
|
|
|
|
// Too much back pressure, we will perform the delete
|
|
|
|
// blocking at this point we need to serialize operations.
|
|
|
|
removeAll(target)
|
|
|
|
}
|
|
|
|
}
|
2022-07-11 12:15:54 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2021-10-12 12:24:00 -04:00
|
|
|
}
|
|
|
|
|
2021-02-03 13:33:43 -05:00
|
|
|
// DeleteVersion - deletes FileInfo metadata for path at `xl.meta`. forceDelMarker
|
|
|
|
// will force creating a new `xl.meta` to create a new delete marker
|
2023-12-29 18:52:41 -05:00
|
|
|
func (s *xlStorage) DeleteVersion(ctx context.Context, volume, path string, fi FileInfo, forceDelMarker bool, opts DeleteOptions) (err error) {
|
2020-06-12 23:04:01 -04:00
|
|
|
if HasSuffix(path, SlashSeparator) {
|
2022-07-11 12:15:54 -04:00
|
|
|
return s.Delete(ctx, volume, path, DeleteOptions{
|
|
|
|
Recursive: false,
|
2023-11-29 01:35:16 -05:00
|
|
|
Immediate: false,
|
2022-07-11 12:15:54 -04:00
|
|
|
})
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2023-11-22 16:46:17 -05:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Validate file path length, before reading.
|
|
|
|
filePath := pathJoin(volumeDir, path)
|
|
|
|
if err = checkPathLength(filePath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-10-17 12:39:21 -04:00
|
|
|
var legacyJSON bool
|
2023-12-09 13:17:51 -05:00
|
|
|
buf, _, err := s.readAllData(ctx, volume, volumeDir, pathJoin(filePath, xlStorageFormatFile), true)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err != nil {
|
2023-08-02 00:52:31 -04:00
|
|
|
if !errors.Is(err, errFileNotFound) {
|
2021-02-01 16:23:50 -05:00
|
|
|
return err
|
2020-11-29 00:15:45 -05:00
|
|
|
}
|
2021-11-30 21:30:06 -05:00
|
|
|
metaDataPoolPut(buf) // Never used, return it
|
2021-02-03 13:33:43 -05:00
|
|
|
if fi.Deleted && forceDelMarker {
|
2021-02-01 16:23:50 -05:00
|
|
|
// Create a new xl.meta with a delete marker in it
|
2024-01-30 15:43:25 -05:00
|
|
|
return s.WriteMetadata(ctx, "", volume, path, fi)
|
2021-02-01 16:23:50 -05:00
|
|
|
}
|
2021-10-28 20:02:22 -04:00
|
|
|
|
2023-08-02 00:52:31 -04:00
|
|
|
s.RLock()
|
|
|
|
legacy := s.formatLegacy
|
|
|
|
s.RUnlock()
|
|
|
|
if legacy {
|
|
|
|
buf, err = s.ReadAll(ctx, volume, pathJoin(path, xlStorageFormatFileV1))
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, errFileNotFound) && fi.VersionID != "" {
|
|
|
|
return errFileVersionNotFound
|
|
|
|
}
|
|
|
|
return err
|
2021-09-17 22:34:48 -04:00
|
|
|
}
|
2023-08-02 00:52:31 -04:00
|
|
|
legacyJSON = true
|
2021-02-01 16:23:50 -05:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(buf) == 0 {
|
2020-11-29 00:15:45 -05:00
|
|
|
if fi.VersionID != "" {
|
|
|
|
return errFileVersionNotFound
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
return errFileNotFound
|
|
|
|
}
|
|
|
|
|
2022-10-17 12:39:21 -04:00
|
|
|
if legacyJSON {
|
2020-06-12 23:04:01 -04:00
|
|
|
// Delete the meta file, if there are no more versions the
|
|
|
|
// top level parent is automatically removed.
|
2022-07-11 12:15:54 -04:00
|
|
|
return s.deleteFile(volumeDir, pathJoin(volumeDir, path), true, false)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
var xlMeta xlMetaV2
|
2022-10-17 12:39:21 -04:00
|
|
|
if err = xlMeta.LoadOrConvert(buf); err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-03-04 23:01:26 -05:00
|
|
|
dataDir, err := xlMeta.DeleteVersion(fi)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-04-19 13:30:42 -04:00
|
|
|
if dataDir != "" {
|
|
|
|
versionID := fi.VersionID
|
|
|
|
if versionID == "" {
|
|
|
|
versionID = nullVersionID
|
|
|
|
}
|
|
|
|
// PR #11758 used DataDir, preserve it
|
|
|
|
// for users who might have used master
|
|
|
|
// branch
|
2022-04-20 13:22:05 -04:00
|
|
|
xlMeta.data.remove(versionID, dataDir)
|
|
|
|
|
|
|
|
// We need to attempt delete "dataDir" on the disk
|
|
|
|
// due to a CopyObject() bug where it might have
|
|
|
|
// inlined the data incorrectly, to avoid a situation
|
|
|
|
// where we potentially leave "DataDir"
|
|
|
|
filePath := pathJoin(volumeDir, path, dataDir)
|
|
|
|
if err = checkPathLength(filePath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-07-11 12:15:54 -04:00
|
|
|
if err = s.moveToTrash(filePath, true, false); err != nil {
|
2022-04-20 13:22:05 -04:00
|
|
|
if err != errFileNotFound {
|
2021-03-29 20:00:55 -04:00
|
|
|
return err
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2021-04-19 13:30:42 -04:00
|
|
|
}
|
2021-11-30 21:30:06 -05:00
|
|
|
|
2022-03-04 23:01:26 -05:00
|
|
|
if len(xlMeta.versions) != 0 {
|
2022-12-12 04:40:04 -05:00
|
|
|
// xl.meta must still exist for other versions, dataDir is purged.
|
2021-08-23 14:17:27 -04:00
|
|
|
buf, err = xlMeta.AppendTo(metaDataPoolGet())
|
|
|
|
defer metaDataPoolPut(buf)
|
2021-03-29 20:00:55 -04:00
|
|
|
if err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
return err
|
|
|
|
}
|
2021-01-20 16:12:12 -05:00
|
|
|
|
2020-11-02 19:14:31 -05:00
|
|
|
return s.WriteAll(ctx, volume, pathJoin(path, xlStorageFormatFile), buf)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2024-04-23 13:15:52 -04:00
|
|
|
if opts.UndoWrite && opts.OldDataDir != "" {
|
|
|
|
return renameAll(pathJoin(filePath, opts.OldDataDir, xlStorageFormatFileBackup), pathJoin(filePath, xlStorageFormatFile), filePath)
|
|
|
|
}
|
|
|
|
|
2024-02-12 11:30:40 -05:00
|
|
|
return s.deleteFile(volumeDir, pathJoin(volumeDir, path, xlStorageFormatFile), true, false)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2021-04-04 16:32:31 -04:00
|
|
|
// Updates only metadata for a given version.
|
2023-12-29 18:52:41 -05:00
|
|
|
func (s *xlStorage) UpdateMetadata(ctx context.Context, volume, path string, fi FileInfo, opts UpdateMetadataOpts) (err error) {
|
2021-04-04 16:32:31 -04:00
|
|
|
if len(fi.Metadata) == 0 {
|
|
|
|
return errInvalidArgument
|
|
|
|
}
|
|
|
|
|
2024-04-23 13:15:52 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Validate file path length, before reading.
|
|
|
|
filePath := pathJoin(volumeDir, path)
|
|
|
|
if err = checkPathLength(filePath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-04-04 16:32:31 -04:00
|
|
|
buf, err := s.ReadAll(ctx, volume, pathJoin(path, xlStorageFormatFile))
|
|
|
|
if err != nil {
|
2024-04-23 13:15:52 -04:00
|
|
|
if err == errFileNotFound && fi.VersionID != "" {
|
|
|
|
return errFileVersionNotFound
|
2021-04-04 16:32:31 -04:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
defer metaDataPoolPut(buf)
|
2021-04-04 16:32:31 -04:00
|
|
|
|
|
|
|
if !isXL2V1Format(buf) {
|
|
|
|
return errFileVersionNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
var xlMeta xlMetaV2
|
|
|
|
if err = xlMeta.Load(buf); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = xlMeta.UpdateObjectVersion(fi); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-11-18 15:15:22 -05:00
|
|
|
wbuf, err := xlMeta.AppendTo(metaDataPoolGet())
|
2021-04-04 16:32:31 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-11-18 15:15:22 -05:00
|
|
|
defer metaDataPoolPut(wbuf)
|
2021-04-04 16:32:31 -04:00
|
|
|
|
2024-04-23 13:15:52 -04:00
|
|
|
return s.writeAll(ctx, volume, pathJoin(path, xlStorageFormatFile), wbuf, !opts.NoPersistence, volumeDir)
|
2021-04-04 16:32:31 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// WriteMetadata - writes FileInfo metadata for path at `xl.meta`
|
2024-01-30 15:43:25 -05:00
|
|
|
func (s *xlStorage) WriteMetadata(ctx context.Context, origvolume, volume, path string, fi FileInfo) (err error) {
|
2021-08-10 14:12:22 -04:00
|
|
|
if fi.Fresh {
|
2024-01-30 15:43:25 -05:00
|
|
|
if origvolume != "" {
|
|
|
|
origvolumeDir, err := s.getVolDir(origvolume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if !skipAccessChecks(origvolume) {
|
|
|
|
// Stat a volume entry.
|
|
|
|
if err = Access(origvolumeDir); err != nil {
|
|
|
|
return convertAccessError(err, errVolumeAccessDenied)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-10 14:12:22 -04:00
|
|
|
var xlMeta xlMetaV2
|
|
|
|
if err := xlMeta.AddVersion(fi); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-08-23 14:17:27 -04:00
|
|
|
buf, err := xlMeta.AppendTo(metaDataPoolGet())
|
|
|
|
defer metaDataPoolPut(buf)
|
2021-08-10 14:12:22 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// First writes for special situations do not write to stable storage.
|
|
|
|
// this is currently used by
|
|
|
|
// - emphemeral objects such as objects created during listObjects() calls
|
|
|
|
// - newMultipartUpload() call..
|
2024-04-23 13:15:52 -04:00
|
|
|
return s.writeAll(ctx, volume, pathJoin(path, xlStorageFormatFile), buf, false, "")
|
2021-08-10 14:12:22 -04:00
|
|
|
}
|
|
|
|
|
2020-09-04 12:45:06 -04:00
|
|
|
buf, err := s.ReadAll(ctx, volume, pathJoin(path, xlStorageFormatFile))
|
2020-06-12 23:04:01 -04:00
|
|
|
if err != nil && err != errFileNotFound {
|
|
|
|
return err
|
|
|
|
}
|
2021-08-23 14:17:27 -04:00
|
|
|
defer metaDataPoolPut(buf)
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
var xlMeta xlMetaV2
|
|
|
|
if !isXL2V1Format(buf) {
|
2021-09-14 14:34:25 -04:00
|
|
|
// This is both legacy and without proper version.
|
2023-09-01 19:19:18 -04:00
|
|
|
if err = xlMeta.AddVersion(fi); err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
return err
|
|
|
|
}
|
2021-04-03 12:03:42 -04:00
|
|
|
|
2021-08-23 14:17:27 -04:00
|
|
|
buf, err = xlMeta.AppendTo(metaDataPoolGet())
|
|
|
|
defer metaDataPoolPut(buf)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err = xlMeta.Load(buf); err != nil {
|
2021-09-14 14:34:25 -04:00
|
|
|
// Corrupted data, reset and write.
|
|
|
|
xlMeta = xlMetaV2{}
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2021-04-03 12:03:42 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
if err = xlMeta.AddVersion(fi); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-04-03 12:03:42 -04:00
|
|
|
|
2021-08-23 14:17:27 -04:00
|
|
|
buf, err = xlMeta.AppendTo(metaDataPoolGet())
|
|
|
|
defer metaDataPoolPut(buf)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-02 19:14:31 -05:00
|
|
|
return s.WriteAll(ctx, volume, pathJoin(path, xlStorageFormatFile), buf)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2021-01-11 05:27:04 -05:00
|
|
|
func (s *xlStorage) renameLegacyMetadata(volumeDir, path string) (err error) {
|
|
|
|
s.RLock()
|
|
|
|
legacy := s.formatLegacy
|
|
|
|
s.RUnlock()
|
|
|
|
if !legacy {
|
|
|
|
// if its not a legacy backend then this function is
|
|
|
|
// a no-op always returns errFileNotFound
|
|
|
|
return errFileNotFound
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Validate file path length, before reading.
|
|
|
|
filePath := pathJoin(volumeDir, path)
|
|
|
|
if err = checkPathLength(filePath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
srcFilePath := pathJoin(filePath, xlStorageFormatFileV1)
|
|
|
|
dstFilePath := pathJoin(filePath, xlStorageFormatFile)
|
2020-06-19 13:58:17 -04:00
|
|
|
|
|
|
|
// Renaming xl.json to xl.meta should be fully synced to disk.
|
2020-07-11 12:37:34 -04:00
|
|
|
defer func() {
|
2023-03-09 18:15:30 -05:00
|
|
|
if err == nil && s.globalSync {
|
|
|
|
// Sync to disk only upon success.
|
|
|
|
globalSync()
|
2020-07-11 12:37:34 -04:00
|
|
|
}
|
|
|
|
}()
|
2020-06-19 13:58:17 -04:00
|
|
|
|
2021-03-23 17:51:27 -04:00
|
|
|
if err = Rename(srcFilePath, dstFilePath); err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
switch {
|
|
|
|
case isSysErrNotDir(err):
|
|
|
|
return errFileNotFound
|
|
|
|
case isSysErrPathNotFound(err):
|
|
|
|
return errFileNotFound
|
|
|
|
case isSysErrCrossDevice(err):
|
|
|
|
return fmt.Errorf("%w (%s)->(%s)", errCrossDeviceLink, srcFilePath, dstFilePath)
|
2020-11-23 11:36:49 -05:00
|
|
|
case osIsNotExist(err):
|
2020-06-12 23:04:01 -04:00
|
|
|
return errFileNotFound
|
2020-11-23 11:36:49 -05:00
|
|
|
case osIsExist(err):
|
2020-06-12 23:04:01 -04:00
|
|
|
// This is returned only when destination is a directory and we
|
|
|
|
// are attempting a rename from file to directory.
|
|
|
|
return errIsNotRegular
|
|
|
|
default:
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-08-28 18:16:41 -04:00
|
|
|
func (s *xlStorage) readRaw(ctx context.Context, volume, volumeDir, filePath string, readData bool) (buf []byte, dmTime time.Time, err error) {
|
2023-11-22 16:46:17 -05:00
|
|
|
if filePath == "" {
|
|
|
|
return nil, dmTime, errFileNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
xlPath := pathJoin(filePath, xlStorageFormatFile)
|
2021-08-12 13:05:24 -04:00
|
|
|
if readData {
|
2023-12-09 13:17:51 -05:00
|
|
|
buf, dmTime, err = s.readAllData(ctx, volume, volumeDir, xlPath, false)
|
2021-08-12 13:05:24 -04:00
|
|
|
} else {
|
2023-11-22 16:46:17 -05:00
|
|
|
buf, dmTime, err = s.readMetadataWithDMTime(ctx, xlPath)
|
2021-08-12 13:05:24 -04:00
|
|
|
if err != nil {
|
|
|
|
if osIsNotExist(err) {
|
2023-08-28 18:16:41 -04:00
|
|
|
if !skipAccessChecks(volume) {
|
|
|
|
if aerr := Access(volumeDir); aerr != nil && osIsNotExist(aerr) {
|
|
|
|
return nil, time.Time{}, errVolumeNotFound
|
|
|
|
}
|
2021-08-12 13:05:24 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
err = osErrToFileErr(err)
|
|
|
|
}
|
|
|
|
}
|
2021-08-12 17:27:22 -04:00
|
|
|
|
2023-11-22 16:46:17 -05:00
|
|
|
s.RLock()
|
|
|
|
legacy := s.formatLegacy
|
|
|
|
s.RUnlock()
|
|
|
|
|
|
|
|
if err != nil && errors.Is(err, errFileNotFound) && legacy {
|
2023-12-09 13:17:51 -05:00
|
|
|
buf, dmTime, err = s.readAllData(ctx, volume, volumeDir, pathJoin(filePath, xlStorageFormatFileV1), false)
|
2023-11-22 16:46:17 -05:00
|
|
|
if err != nil {
|
2022-04-20 15:49:05 -04:00
|
|
|
return nil, time.Time{}, err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(buf) == 0 {
|
2023-11-22 16:46:17 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, time.Time{}, err
|
|
|
|
}
|
2022-04-20 15:49:05 -04:00
|
|
|
return nil, time.Time{}, errFileNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
return buf, dmTime, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadXL reads from path/xl.meta, does not interpret the data it read. This
|
|
|
|
// is a raw call equivalent of ReadVersion().
|
|
|
|
func (s *xlStorage) ReadXL(ctx context.Context, volume, path string, readData bool) (RawFileInfo, error) {
|
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return RawFileInfo{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Validate file path length, before reading.
|
|
|
|
filePath := pathJoin(volumeDir, path)
|
|
|
|
if err = checkPathLength(filePath); err != nil {
|
|
|
|
return RawFileInfo{}, err
|
|
|
|
}
|
|
|
|
|
2024-02-05 04:04:26 -05:00
|
|
|
buf, _, err := s.readRaw(ctx, volume, volumeDir, filePath, readData)
|
2022-04-20 15:49:05 -04:00
|
|
|
return RawFileInfo{
|
2024-02-05 04:04:26 -05:00
|
|
|
Buf: buf,
|
2022-04-20 15:49:05 -04:00
|
|
|
}, err
|
|
|
|
}
|
|
|
|
|
2023-11-21 00:33:47 -05:00
|
|
|
// ReadOptions optional inputs for ReadVersion
|
|
|
|
type ReadOptions struct {
|
|
|
|
ReadData bool
|
|
|
|
Healing bool
|
|
|
|
}
|
|
|
|
|
2022-04-20 15:49:05 -04:00
|
|
|
// ReadVersion - reads metadata and returns FileInfo at path `xl.meta`
|
|
|
|
// for all objects less than `32KiB` this call returns data as well
|
|
|
|
// along with metadata.
|
2024-01-30 15:43:25 -05:00
|
|
|
func (s *xlStorage) ReadVersion(ctx context.Context, origvolume, volume, path, versionID string, opts ReadOptions) (fi FileInfo, err error) {
|
|
|
|
if origvolume != "" {
|
|
|
|
origvolumeDir, err := s.getVolDir(origvolume)
|
|
|
|
if err != nil {
|
|
|
|
return fi, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if !skipAccessChecks(origvolume) {
|
|
|
|
// Stat a volume entry.
|
|
|
|
if err = Access(origvolumeDir); err != nil {
|
|
|
|
return fi, convertAccessError(err, errVolumeAccessDenied)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-20 15:49:05 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return fi, err
|
|
|
|
}
|
2023-11-22 16:46:17 -05:00
|
|
|
|
2022-04-20 15:49:05 -04:00
|
|
|
// Validate file path length, before reading.
|
|
|
|
filePath := pathJoin(volumeDir, path)
|
|
|
|
if err = checkPathLength(filePath); err != nil {
|
|
|
|
return fi, err
|
|
|
|
}
|
|
|
|
|
2023-11-21 00:33:47 -05:00
|
|
|
readData := opts.ReadData
|
|
|
|
|
2024-02-05 04:04:26 -05:00
|
|
|
buf, _, err := s.readRaw(ctx, volume, volumeDir, filePath, readData)
|
2022-04-20 15:49:05 -04:00
|
|
|
if err != nil {
|
|
|
|
if err == errFileNotFound {
|
|
|
|
if versionID != "" {
|
|
|
|
return fi, errFileVersionNotFound
|
|
|
|
}
|
2019-04-23 17:54:28 -04:00
|
|
|
}
|
2022-04-20 15:49:05 -04:00
|
|
|
return fi, err
|
2019-04-23 17:54:28 -04:00
|
|
|
}
|
|
|
|
|
2023-09-02 10:49:24 -04:00
|
|
|
fi, err = getFileInfo(buf, volume, path, versionID, readData, true)
|
2021-01-07 22:27:31 -05:00
|
|
|
if err != nil {
|
|
|
|
return fi, err
|
|
|
|
}
|
|
|
|
|
2021-08-23 14:17:27 -04:00
|
|
|
if len(fi.Data) == 0 {
|
2021-08-12 17:27:22 -04:00
|
|
|
// We did not read inline data, so we have no references.
|
2021-08-23 14:17:27 -04:00
|
|
|
defer metaDataPoolPut(buf)
|
2021-08-12 17:27:22 -04:00
|
|
|
}
|
|
|
|
|
2021-01-07 22:27:31 -05:00
|
|
|
if readData {
|
2021-03-29 20:00:55 -04:00
|
|
|
if len(fi.Data) > 0 || fi.Size == 0 {
|
2021-08-13 11:25:54 -04:00
|
|
|
if fi.InlineData() {
|
|
|
|
// If written with header we are fine.
|
|
|
|
return fi, nil
|
|
|
|
}
|
|
|
|
if fi.Size == 0 || !(fi.VersionID != "" && fi.VersionID != nullVersionID) {
|
|
|
|
// If versioned we have no conflicts.
|
|
|
|
fi.SetInlineData()
|
|
|
|
return fi, nil
|
|
|
|
}
|
|
|
|
|
2023-11-21 00:33:47 -05:00
|
|
|
// For overwritten objects without header we might have a
|
|
|
|
// conflict with data written later. Check the data path
|
|
|
|
// if there is a part with data.
|
2021-08-13 11:25:54 -04:00
|
|
|
partPath := fmt.Sprintf("part.%d", fi.Parts[0].Number)
|
|
|
|
dataPath := pathJoin(path, fi.DataDir, partPath)
|
2023-07-26 14:31:40 -04:00
|
|
|
_, lerr := Lstat(pathJoin(volumeDir, dataPath))
|
|
|
|
if lerr != nil {
|
2021-08-13 11:25:54 -04:00
|
|
|
// Set the inline header, our inlined data is fine.
|
|
|
|
fi.SetInlineData()
|
|
|
|
return fi, nil
|
2021-05-25 19:33:06 -04:00
|
|
|
}
|
2021-08-13 11:25:54 -04:00
|
|
|
// Data exists on disk, remove the version from metadata.
|
|
|
|
fi.Data = nil
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
2021-04-03 12:03:42 -04:00
|
|
|
|
2021-01-07 22:27:31 -05:00
|
|
|
// Reading data for small objects when
|
|
|
|
// - object has not yet transitioned
|
2021-04-04 16:32:31 -04:00
|
|
|
// - object size lesser than 128KiB
|
2021-01-07 22:27:31 -05:00
|
|
|
// - object has maximum of 1 parts
|
2021-07-28 14:20:16 -04:00
|
|
|
if fi.TransitionStatus == "" &&
|
|
|
|
fi.DataDir != "" && fi.Size <= smallFileThreshold &&
|
|
|
|
len(fi.Parts) == 1 {
|
2021-01-22 18:38:21 -05:00
|
|
|
partPath := fmt.Sprintf("part.%d", fi.Parts[0].Number)
|
2021-07-28 14:20:16 -04:00
|
|
|
dataPath := pathJoin(volumeDir, path, fi.DataDir, partPath)
|
2023-12-09 13:17:51 -05:00
|
|
|
fi.Data, _, err = s.readAllData(ctx, volume, volumeDir, dataPath, false)
|
2021-01-07 22:27:31 -05:00
|
|
|
if err != nil {
|
|
|
|
return FileInfo{}, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-13 17:54:01 -05:00
|
|
|
if !skipAccessChecks(volume) && !opts.Healing && fi.TransitionStatus == "" && !fi.InlineData() && len(fi.Data) == 0 && fi.DataDir != "" && fi.DataDir != emptyUUID && fi.VersionPurgeStatus().Empty() {
|
2023-11-21 00:33:47 -05:00
|
|
|
// Verify if the dataDir is present or not when the data
|
|
|
|
// is not inlined to make sure we return correct errors
|
|
|
|
// during HeadObject().
|
|
|
|
|
|
|
|
// Healing must not come here and return error, since healing
|
|
|
|
// deals with dataDirs directly, let healing fix things automatically.
|
|
|
|
if lerr := Access(pathJoin(volumeDir, path, fi.DataDir)); lerr != nil {
|
|
|
|
if os.IsNotExist(lerr) {
|
|
|
|
// Data dir is missing we must return errFileCorrupted
|
|
|
|
return FileInfo{}, errFileCorrupt
|
|
|
|
}
|
|
|
|
return FileInfo{}, osErrToFileErr(lerr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-07 22:27:31 -05:00
|
|
|
return fi, nil
|
|
|
|
}
|
|
|
|
|
2023-12-09 13:17:51 -05:00
|
|
|
func (s *xlStorage) readAllData(ctx context.Context, volume, volumeDir string, filePath string, discard bool) (buf []byte, dmTime time.Time, err error) {
|
2023-11-22 16:46:17 -05:00
|
|
|
if filePath == "" {
|
|
|
|
return nil, dmTime, errFileNotFound
|
|
|
|
}
|
|
|
|
|
2021-12-21 13:08:26 -05:00
|
|
|
if contextCanceled(ctx) {
|
|
|
|
return nil, time.Time{}, ctx.Err()
|
|
|
|
}
|
|
|
|
|
2023-12-09 13:17:51 -05:00
|
|
|
f, err := OpenFile(filePath, readMode, 0o666)
|
2021-01-07 22:27:31 -05:00
|
|
|
if err != nil {
|
2023-03-06 11:56:10 -05:00
|
|
|
switch {
|
|
|
|
case osIsNotExist(err):
|
2021-01-07 22:27:31 -05:00
|
|
|
// Check if the object doesn't exist because its bucket
|
|
|
|
// is missing in order to return the correct error.
|
2023-08-28 18:16:41 -04:00
|
|
|
if !skipAccessChecks(volume) {
|
|
|
|
if err = Access(volumeDir); err != nil && osIsNotExist(err) {
|
|
|
|
return nil, dmTime, errVolumeNotFound
|
|
|
|
}
|
2021-01-07 22:27:31 -05:00
|
|
|
}
|
2021-12-21 13:08:26 -05:00
|
|
|
return nil, dmTime, errFileNotFound
|
2023-03-06 11:56:10 -05:00
|
|
|
case osIsPermission(err):
|
2021-12-21 13:08:26 -05:00
|
|
|
return nil, dmTime, errFileAccessDenied
|
2023-03-06 11:56:10 -05:00
|
|
|
case isSysErrNotDir(err) || isSysErrIsDir(err):
|
2021-12-21 13:08:26 -05:00
|
|
|
return nil, dmTime, errFileNotFound
|
2023-03-06 11:56:10 -05:00
|
|
|
case isSysErrHandleInvalid(err):
|
2021-01-07 22:27:31 -05:00
|
|
|
// This case is special and needs to be handled for windows.
|
2021-12-21 13:08:26 -05:00
|
|
|
return nil, dmTime, errFileNotFound
|
2023-03-06 11:56:10 -05:00
|
|
|
case isSysErrIO(err):
|
2021-12-21 13:08:26 -05:00
|
|
|
return nil, dmTime, errFaultyDisk
|
2023-03-06 11:56:10 -05:00
|
|
|
case isSysErrTooManyFiles(err):
|
2021-12-21 13:08:26 -05:00
|
|
|
return nil, dmTime, errTooManyOpenFiles
|
2023-03-06 11:56:10 -05:00
|
|
|
case isSysErrInvalidArg(err):
|
2021-03-23 17:51:27 -04:00
|
|
|
st, _ := Lstat(filePath)
|
2021-02-24 03:14:16 -05:00
|
|
|
if st != nil && st.IsDir() {
|
|
|
|
// Linux returns InvalidArg for directory O_DIRECT
|
|
|
|
// we need to keep this fallback code to return correct
|
|
|
|
// errors upwards.
|
2021-12-21 13:08:26 -05:00
|
|
|
return nil, dmTime, errFileNotFound
|
2021-02-24 03:14:16 -05:00
|
|
|
}
|
2021-12-21 13:08:26 -05:00
|
|
|
return nil, dmTime, errUnsupportedDisk
|
2021-01-07 22:27:31 -05:00
|
|
|
}
|
2021-12-21 13:08:26 -05:00
|
|
|
return nil, dmTime, err
|
2021-01-07 22:27:31 -05:00
|
|
|
}
|
2023-12-09 13:17:51 -05:00
|
|
|
|
|
|
|
if discard {
|
2024-02-22 01:26:06 -05:00
|
|
|
// This discard is mostly true for DELETEEs
|
2023-12-09 13:17:51 -05:00
|
|
|
// so we need to make sure we do not keep
|
|
|
|
// page-cache references after.
|
|
|
|
defer disk.Fdatasync(f)
|
2021-09-28 13:02:56 -04:00
|
|
|
}
|
2021-10-28 20:02:22 -04:00
|
|
|
|
2023-12-09 13:17:51 -05:00
|
|
|
defer f.Close()
|
|
|
|
|
2021-10-28 20:02:22 -04:00
|
|
|
// Get size for precise allocation.
|
|
|
|
stat, err := f.Stat()
|
2021-01-22 18:38:21 -05:00
|
|
|
if err != nil {
|
2023-12-09 13:17:51 -05:00
|
|
|
buf, err = io.ReadAll(f)
|
2021-12-21 13:08:26 -05:00
|
|
|
return buf, dmTime, osErrToFileErr(err)
|
2021-10-28 20:02:22 -04:00
|
|
|
}
|
|
|
|
if stat.IsDir() {
|
2021-12-21 13:08:26 -05:00
|
|
|
return nil, dmTime, errFileNotFound
|
2021-10-28 20:02:22 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Read into appropriate buffer.
|
|
|
|
sz := stat.Size()
|
|
|
|
if sz <= metaDataReadDefault {
|
|
|
|
buf = metaDataPoolGet()
|
|
|
|
buf = buf[:sz]
|
|
|
|
} else {
|
|
|
|
buf = make([]byte, sz)
|
2021-01-22 18:38:21 -05:00
|
|
|
}
|
2023-12-09 13:17:51 -05:00
|
|
|
|
2021-10-28 20:02:22 -04:00
|
|
|
// Read file...
|
2023-12-09 13:17:51 -05:00
|
|
|
_, err = io.ReadFull(f, buf)
|
2021-04-05 11:52:28 -04:00
|
|
|
|
2021-12-21 13:08:26 -05:00
|
|
|
return buf, stat.ModTime().UTC(), osErrToFileErr(err)
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
2022-04-20 15:49:05 -04:00
|
|
|
// ReadAll is a raw call, reads content at any path and returns the buffer.
|
2020-09-04 12:45:06 -04:00
|
|
|
func (s *xlStorage) ReadAll(ctx context.Context, volume string, path string) (buf []byte, err error) {
|
2022-01-24 14:28:45 -05:00
|
|
|
// Specific optimization to avoid re-read from the drives for `format.json`
|
|
|
|
// in-case the caller is a network operation.
|
|
|
|
if volume == minioMetaBucket && path == formatConfigFile {
|
|
|
|
s.RLock()
|
2022-01-24 20:09:12 -05:00
|
|
|
formatData := make([]byte, len(s.formatData))
|
|
|
|
copy(formatData, s.formatData)
|
2022-01-24 14:28:45 -05:00
|
|
|
s.RUnlock()
|
|
|
|
if len(formatData) > 0 {
|
|
|
|
return formatData, nil
|
|
|
|
}
|
|
|
|
}
|
2016-06-25 17:51:06 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2016-06-25 17:51:06 -04:00
|
|
|
// Validate file path length, before reading.
|
|
|
|
filePath := pathJoin(volumeDir, path)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err = checkPathLength(filePath); err != nil {
|
2016-06-25 17:51:06 -04:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-11-22 16:46:17 -05:00
|
|
|
buf, _, err = s.readAllData(ctx, volume, volumeDir, filePath, false)
|
2021-12-21 13:08:26 -05:00
|
|
|
return buf, err
|
2016-06-25 17:51:06 -04:00
|
|
|
}
|
|
|
|
|
2016-05-28 18:13:15 -04:00
|
|
|
// ReadFile reads exactly len(buf) bytes into buf. It returns the
|
|
|
|
// number of bytes copied. The error is EOF only if no bytes were
|
|
|
|
// read. On return, n == len(buf) if and only if err == nil. n == 0
|
2016-11-21 02:42:53 -05:00
|
|
|
// for io.EOF.
|
2017-05-16 17:21:52 -04:00
|
|
|
//
|
2016-11-21 02:42:53 -05:00
|
|
|
// If an EOF happens after reading some but not all the bytes,
|
2017-09-25 14:32:56 -04:00
|
|
|
// ReadFile returns ErrUnexpectedEOF.
|
|
|
|
//
|
|
|
|
// If the BitrotVerifier is not nil or not verified ReadFile
|
|
|
|
// tries to verify whether the disk has bitrot.
|
2017-05-16 17:21:52 -04:00
|
|
|
//
|
|
|
|
// Additionally ReadFile also starts reading from an offset. ReadFile
|
|
|
|
// semantics are same as io.ReadFull.
|
2020-09-04 12:45:06 -04:00
|
|
|
func (s *xlStorage) ReadFile(ctx context.Context, volume string, path string, offset int64, buffer []byte, verifier *BitrotVerifier) (int64, error) {
|
2018-08-06 18:14:08 -04:00
|
|
|
if offset < 0 {
|
|
|
|
return 0, errInvalidArgument
|
|
|
|
}
|
|
|
|
|
2016-05-18 00:22:27 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
2016-05-28 18:13:15 -04:00
|
|
|
return 0, err
|
2016-05-18 00:22:27 -04:00
|
|
|
}
|
2020-01-17 16:34:43 -05:00
|
|
|
|
|
|
|
var n int
|
|
|
|
|
2023-07-26 14:31:40 -04:00
|
|
|
if !skipAccessChecks(volume) {
|
|
|
|
// Stat a volume entry.
|
|
|
|
if err = Access(volumeDir); err != nil {
|
|
|
|
return 0, convertAccessError(err, errFileAccessDenied)
|
|
|
|
}
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-04-13 14:32:47 -04:00
|
|
|
|
2016-06-25 17:51:06 -04:00
|
|
|
// Validate effective path length before reading.
|
2016-05-05 04:39:26 -04:00
|
|
|
filePath := pathJoin(volumeDir, path)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err = checkPathLength(filePath); err != nil {
|
2016-05-28 18:13:15 -04:00
|
|
|
return 0, err
|
2016-05-11 15:55:02 -04:00
|
|
|
}
|
2016-06-25 17:51:06 -04:00
|
|
|
|
|
|
|
// Open the file for reading.
|
2022-08-01 16:22:43 -04:00
|
|
|
file, err := OpenFile(filePath, readMode, 0o666)
|
2016-04-08 13:37:38 -04:00
|
|
|
if err != nil {
|
2018-08-06 13:26:40 -04:00
|
|
|
switch {
|
2020-11-23 11:36:49 -05:00
|
|
|
case osIsNotExist(err):
|
2016-05-28 18:13:15 -04:00
|
|
|
return 0, errFileNotFound
|
2020-11-23 11:36:49 -05:00
|
|
|
case osIsPermission(err):
|
2016-05-28 18:13:15 -04:00
|
|
|
return 0, errFileAccessDenied
|
2018-08-06 13:26:40 -04:00
|
|
|
case isSysErrNotDir(err):
|
2016-07-29 00:57:11 -04:00
|
|
|
return 0, errFileAccessDenied
|
2018-08-06 13:26:40 -04:00
|
|
|
case isSysErrIO(err):
|
2018-07-27 18:32:19 -04:00
|
|
|
return 0, errFaultyDisk
|
2019-05-02 10:09:57 -04:00
|
|
|
case isSysErrTooManyFiles(err):
|
|
|
|
return 0, errTooManyOpenFiles
|
2018-08-06 13:26:40 -04:00
|
|
|
default:
|
|
|
|
return 0, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
}
|
2016-07-27 22:22:32 -04:00
|
|
|
|
|
|
|
// Close the file descriptor.
|
|
|
|
defer file.Close()
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
st, err := file.Stat()
|
|
|
|
if err != nil {
|
2016-05-28 18:13:15 -04:00
|
|
|
return 0, err
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-07-29 00:57:11 -04:00
|
|
|
|
2017-05-16 17:21:52 -04:00
|
|
|
// Verify it is a regular file, otherwise subsequent Seek is
|
|
|
|
// undefined.
|
2016-04-08 13:37:38 -04:00
|
|
|
if !st.Mode().IsRegular() {
|
2016-07-29 00:57:11 -04:00
|
|
|
return 0, errIsNotRegular
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2016-07-29 00:57:11 -04:00
|
|
|
|
2018-08-06 18:14:08 -04:00
|
|
|
if verifier == nil {
|
|
|
|
n, err = file.ReadAt(buffer, offset)
|
|
|
|
return int64(n), err
|
|
|
|
}
|
2017-05-16 17:21:52 -04:00
|
|
|
|
2018-08-06 18:14:08 -04:00
|
|
|
h := verifier.algorithm.New()
|
fix: use buffers only when necessary for io.Copy() (#11229)
Use separate sync.Pool for writes/reads
Avoid passing buffers for io.CopyBuffer()
if the writer or reader implement io.WriteTo or io.ReadFrom
respectively then its useless for sync.Pool to allocate
buffers on its own since that will be completely ignored
by the io.CopyBuffer Go implementation.
Improve this wherever we see this to be optimal.
This allows us to be more efficient on memory usage.
```
385 // copyBuffer is the actual implementation of Copy and CopyBuffer.
386 // if buf is nil, one is allocated.
387 func copyBuffer(dst Writer, src Reader, buf []byte) (written int64, err error) {
388 // If the reader has a WriteTo method, use it to do the copy.
389 // Avoids an allocation and a copy.
390 if wt, ok := src.(WriterTo); ok {
391 return wt.WriteTo(dst)
392 }
393 // Similarly, if the writer has a ReadFrom method, use it to do the copy.
394 if rt, ok := dst.(ReaderFrom); ok {
395 return rt.ReadFrom(src)
396 }
```
From readahead package
```
// WriteTo writes data to w until there's no more data to write or when an error occurs.
// The return value n is the number of bytes written.
// Any error encountered during the write is also returned.
func (a *reader) WriteTo(w io.Writer) (n int64, err error) {
if a.err != nil {
return 0, a.err
}
n = 0
for {
err = a.fill()
if err != nil {
return n, err
}
n2, err := w.Write(a.cur.buffer())
a.cur.inc(n2)
n += int64(n2)
if err != nil {
return n, err
}
```
2021-01-06 12:36:55 -05:00
|
|
|
if _, err = io.Copy(h, io.LimitReader(file, offset)); err != nil {
|
2018-08-06 18:14:08 -04:00
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if n, err = io.ReadFull(file, buffer); err != nil {
|
|
|
|
return int64(n), err
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err = h.Write(buffer); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
fix: use buffers only when necessary for io.Copy() (#11229)
Use separate sync.Pool for writes/reads
Avoid passing buffers for io.CopyBuffer()
if the writer or reader implement io.WriteTo or io.ReadFrom
respectively then its useless for sync.Pool to allocate
buffers on its own since that will be completely ignored
by the io.CopyBuffer Go implementation.
Improve this wherever we see this to be optimal.
This allows us to be more efficient on memory usage.
```
385 // copyBuffer is the actual implementation of Copy and CopyBuffer.
386 // if buf is nil, one is allocated.
387 func copyBuffer(dst Writer, src Reader, buf []byte) (written int64, err error) {
388 // If the reader has a WriteTo method, use it to do the copy.
389 // Avoids an allocation and a copy.
390 if wt, ok := src.(WriterTo); ok {
391 return wt.WriteTo(dst)
392 }
393 // Similarly, if the writer has a ReadFrom method, use it to do the copy.
394 if rt, ok := dst.(ReaderFrom); ok {
395 return rt.ReadFrom(src)
396 }
```
From readahead package
```
// WriteTo writes data to w until there's no more data to write or when an error occurs.
// The return value n is the number of bytes written.
// Any error encountered during the write is also returned.
func (a *reader) WriteTo(w io.Writer) (n int64, err error) {
if a.err != nil {
return 0, a.err
}
n = 0
for {
err = a.fill()
if err != nil {
return n, err
}
n2, err := w.Write(a.cur.buffer())
a.cur.inc(n2)
n += int64(n2)
if err != nil {
return n, err
}
```
2021-01-06 12:36:55 -05:00
|
|
|
if _, err = io.Copy(h, file); err != nil {
|
2018-08-06 18:14:08 -04:00
|
|
|
return 0, err
|
2017-05-16 17:21:52 -04:00
|
|
|
}
|
2016-05-28 18:13:15 -04:00
|
|
|
|
2019-02-13 07:59:36 -05:00
|
|
|
if !bytes.Equal(h.Sum(nil), verifier.sum) {
|
2019-10-01 16:12:15 -04:00
|
|
|
return 0, errFileCorrupt
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
2018-08-06 18:14:08 -04:00
|
|
|
|
|
|
|
return int64(len(buffer)), nil
|
2016-04-08 20:13:16 -04:00
|
|
|
}
|
|
|
|
|
2022-01-24 14:28:45 -05:00
|
|
|
func (s *xlStorage) openFileDirect(path string, mode int) (f *os.File, err error) {
|
|
|
|
w, err := OpenFileDirectIO(path, mode, 0o666)
|
|
|
|
if err != nil {
|
|
|
|
switch {
|
|
|
|
case isSysErrInvalidArg(err):
|
|
|
|
return nil, errUnsupportedDisk
|
|
|
|
case osIsPermission(err):
|
|
|
|
return nil, errDiskAccessDenied
|
|
|
|
case isSysErrIO(err):
|
|
|
|
return nil, errFaultyDisk
|
|
|
|
case isSysErrNotDir(err):
|
|
|
|
return nil, errDiskNotDir
|
|
|
|
case os.IsNotExist(err):
|
|
|
|
return nil, errDiskNotFound
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return w, nil
|
|
|
|
}
|
|
|
|
|
2024-04-23 13:15:52 -04:00
|
|
|
func (s *xlStorage) openFileSync(filePath string, mode int, skipParent string) (f *os.File, err error) {
|
|
|
|
return s.openFile(filePath, mode|writeMode, skipParent)
|
2016-10-29 15:44:44 -04:00
|
|
|
}
|
|
|
|
|
2024-04-23 13:15:52 -04:00
|
|
|
func (s *xlStorage) openFile(filePath string, mode int, skipParent string) (f *os.File, err error) {
|
|
|
|
if skipParent == "" {
|
|
|
|
skipParent = s.drivePath
|
|
|
|
}
|
2021-08-10 14:12:22 -04:00
|
|
|
// Create top level directories if they don't exist.
|
|
|
|
// with mode 0777 mkdir honors system umask.
|
2024-04-23 13:15:52 -04:00
|
|
|
if err = mkdirAll(pathutil.Dir(filePath), 0o777, skipParent); err != nil {
|
2021-08-10 14:12:22 -04:00
|
|
|
return nil, osErrToFileErr(err)
|
|
|
|
}
|
|
|
|
|
2022-01-02 12:15:06 -05:00
|
|
|
w, err := OpenFile(filePath, mode, 0o666)
|
2021-08-10 14:12:22 -04:00
|
|
|
if err != nil {
|
|
|
|
// File path cannot be verified since one of the parents is a file.
|
|
|
|
switch {
|
|
|
|
case isSysErrIsDir(err):
|
|
|
|
return nil, errIsNotRegular
|
|
|
|
case osIsPermission(err):
|
|
|
|
return nil, errFileAccessDenied
|
2023-04-04 11:00:08 -04:00
|
|
|
case isSysErrNotDir(err):
|
|
|
|
return nil, errFileAccessDenied
|
2021-08-10 14:12:22 -04:00
|
|
|
case isSysErrIO(err):
|
|
|
|
return nil, errFaultyDisk
|
|
|
|
case isSysErrTooManyFiles(err):
|
|
|
|
return nil, errTooManyOpenFiles
|
|
|
|
default:
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return w, nil
|
|
|
|
}
|
|
|
|
|
2023-11-10 13:10:14 -05:00
|
|
|
type sendFileReader struct {
|
|
|
|
io.Reader
|
|
|
|
io.Closer
|
|
|
|
}
|
|
|
|
|
2019-01-17 07:58:18 -05:00
|
|
|
// ReadFileStream - Returns the read stream of the file.
|
2020-09-04 12:45:06 -04:00
|
|
|
func (s *xlStorage) ReadFileStream(ctx context.Context, volume, path string, offset, length int64) (io.ReadCloser, error) {
|
2019-01-17 07:58:18 -05:00
|
|
|
if offset < 0 {
|
|
|
|
return nil, errInvalidArgument
|
|
|
|
}
|
|
|
|
|
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Validate effective path length before reading.
|
|
|
|
filePath := pathJoin(volumeDir, path)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err = checkPathLength(filePath); err != nil {
|
2019-01-17 07:58:18 -05:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-11-10 13:10:14 -05:00
|
|
|
file, err := OpenFile(filePath, readMode, 0o666)
|
2019-01-17 07:58:18 -05:00
|
|
|
if err != nil {
|
|
|
|
switch {
|
2020-11-23 11:36:49 -05:00
|
|
|
case osIsNotExist(err):
|
2023-08-28 18:16:41 -04:00
|
|
|
if !skipAccessChecks(volume) {
|
|
|
|
if err = Access(volumeDir); err != nil && osIsNotExist(err) {
|
|
|
|
return nil, errVolumeNotFound
|
|
|
|
}
|
2021-01-01 12:44:36 -05:00
|
|
|
}
|
2019-01-17 07:58:18 -05:00
|
|
|
return nil, errFileNotFound
|
2020-11-23 11:36:49 -05:00
|
|
|
case osIsPermission(err):
|
2019-01-17 07:58:18 -05:00
|
|
|
return nil, errFileAccessDenied
|
|
|
|
case isSysErrNotDir(err):
|
|
|
|
return nil, errFileAccessDenied
|
|
|
|
case isSysErrIO(err):
|
|
|
|
return nil, errFaultyDisk
|
2019-05-02 10:09:57 -04:00
|
|
|
case isSysErrTooManyFiles(err):
|
|
|
|
return nil, errTooManyOpenFiles
|
2021-01-22 18:38:21 -05:00
|
|
|
case isSysErrInvalidArg(err):
|
|
|
|
return nil, errUnsupportedDisk
|
2019-01-17 07:58:18 -05:00
|
|
|
default:
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-11 23:59:11 -04:00
|
|
|
if length < 0 {
|
|
|
|
return file, nil
|
|
|
|
}
|
|
|
|
|
2019-01-17 07:58:18 -05:00
|
|
|
st, err := file.Stat()
|
|
|
|
if err != nil {
|
2021-02-28 18:33:03 -05:00
|
|
|
file.Close()
|
2019-01-17 07:58:18 -05:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify it is a regular file, otherwise subsequent Seek is
|
|
|
|
// undefined.
|
|
|
|
if !st.Mode().IsRegular() {
|
2021-02-28 18:33:03 -05:00
|
|
|
file.Close()
|
2019-01-17 07:58:18 -05:00
|
|
|
return nil, errIsNotRegular
|
|
|
|
}
|
|
|
|
|
2021-10-13 22:49:14 -04:00
|
|
|
if st.Size() < offset+length {
|
|
|
|
// Expected size cannot be satisfied for
|
|
|
|
// requested offset and length
|
|
|
|
file.Close()
|
|
|
|
return nil, errFileCorrupt
|
|
|
|
}
|
|
|
|
|
2021-02-28 18:33:03 -05:00
|
|
|
if offset > 0 {
|
|
|
|
if _, err = file.Seek(offset, io.SeekStart); err != nil {
|
2021-09-29 19:40:28 -04:00
|
|
|
file.Close()
|
2021-02-28 18:33:03 -05:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-10 13:10:14 -05:00
|
|
|
return &sendFileReader{Reader: io.LimitReader(file, length), Closer: file}, nil
|
2019-01-17 07:58:18 -05:00
|
|
|
}
|
|
|
|
|
2020-02-06 23:13:55 -05:00
|
|
|
// closeWrapper converts a function to an io.Closer
|
|
|
|
type closeWrapper func() error
|
|
|
|
|
|
|
|
// Close calls the wrapped function.
|
|
|
|
func (c closeWrapper) Close() error {
|
|
|
|
return c()
|
|
|
|
}
|
|
|
|
|
2019-01-17 07:58:18 -05:00
|
|
|
// CreateFile - creates the file.
|
2024-01-30 15:43:25 -05:00
|
|
|
func (s *xlStorage) CreateFile(ctx context.Context, origvolume, volume, path string, fileSize int64, r io.Reader) (err error) {
|
|
|
|
if origvolume != "" {
|
|
|
|
origvolumeDir, err := s.getVolDir(origvolume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if !skipAccessChecks(origvolume) {
|
|
|
|
// Stat a volume entry.
|
|
|
|
if err = Access(origvolumeDir); err != nil {
|
|
|
|
return convertAccessError(err, errVolumeAccessDenied)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-24 12:08:05 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
filePath := pathJoin(volumeDir, path)
|
|
|
|
if err = checkPathLength(filePath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
parentFilePath := pathutil.Dir(filePath)
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
2021-03-24 12:35:45 -04:00
|
|
|
if volume == minioMetaTmpBucket {
|
2021-04-20 13:44:39 -04:00
|
|
|
// only cleanup parent path if the
|
|
|
|
// parent volume name is minioMetaTmpBucket
|
2021-03-24 12:35:45 -04:00
|
|
|
removeAll(parentFilePath)
|
|
|
|
}
|
2021-03-24 12:08:05 -04:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2024-04-23 13:15:52 -04:00
|
|
|
return s.writeAllDirect(ctx, filePath, fileSize, r, os.O_CREATE|os.O_WRONLY|os.O_EXCL, volumeDir)
|
2022-08-29 14:19:29 -04:00
|
|
|
}
|
2019-04-24 00:25:06 -04:00
|
|
|
|
2024-04-23 13:15:52 -04:00
|
|
|
func (s *xlStorage) writeAllDirect(ctx context.Context, filePath string, fileSize int64, r io.Reader, flags int, skipParent string) (err error) {
|
2023-07-28 18:37:53 -04:00
|
|
|
if contextCanceled(ctx) {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
|
2024-04-23 13:15:52 -04:00
|
|
|
if skipParent == "" {
|
|
|
|
skipParent = s.drivePath
|
|
|
|
}
|
|
|
|
|
2019-04-24 00:25:06 -04:00
|
|
|
// Create top level directories if they don't exist.
|
|
|
|
// with mode 0777 mkdir honors system umask.
|
2022-08-29 14:19:29 -04:00
|
|
|
parentFilePath := pathutil.Dir(filePath)
|
2024-04-23 13:15:52 -04:00
|
|
|
if err = mkdirAll(parentFilePath, 0o777, skipParent); err != nil {
|
2021-03-17 12:38:38 -04:00
|
|
|
return osErrToFileErr(err)
|
2019-04-24 00:25:06 -04:00
|
|
|
}
|
|
|
|
|
2023-09-11 23:59:11 -04:00
|
|
|
odirectEnabled := globalAPIConfig.odirectEnabled() && s.oDirect && fileSize > 0
|
2023-07-29 18:17:56 -04:00
|
|
|
|
2022-06-07 09:44:01 -04:00
|
|
|
var w *os.File
|
|
|
|
if odirectEnabled {
|
2022-08-29 14:19:29 -04:00
|
|
|
w, err = OpenFileDirectIO(filePath, flags, 0o666)
|
2022-06-07 09:44:01 -04:00
|
|
|
} else {
|
2022-08-29 14:19:29 -04:00
|
|
|
w, err = OpenFile(filePath, flags, 0o666)
|
2022-06-07 09:44:01 -04:00
|
|
|
}
|
2019-04-24 00:25:06 -04:00
|
|
|
if err != nil {
|
2021-03-17 12:38:38 -04:00
|
|
|
return osErrToFileErr(err)
|
2016-10-29 15:44:44 -04:00
|
|
|
}
|
2019-05-22 16:47:15 -04:00
|
|
|
|
2021-05-15 15:56:58 -04:00
|
|
|
var bufp *[]byte
|
2023-03-06 11:56:10 -05:00
|
|
|
switch {
|
2024-04-08 05:22:27 -04:00
|
|
|
case fileSize <= xioutil.SmallBlock:
|
2022-08-29 14:19:29 -04:00
|
|
|
bufp = xioutil.ODirectPoolSmall.Get().(*[]byte)
|
|
|
|
defer xioutil.ODirectPoolSmall.Put(bufp)
|
2023-03-06 11:56:10 -05:00
|
|
|
default:
|
2021-09-29 19:40:28 -04:00
|
|
|
bufp = xioutil.ODirectPoolLarge.Get().(*[]byte)
|
|
|
|
defer xioutil.ODirectPoolLarge.Put(bufp)
|
2021-05-15 15:56:58 -04:00
|
|
|
}
|
2019-01-17 07:58:18 -05:00
|
|
|
|
2022-06-07 09:44:01 -04:00
|
|
|
var written int64
|
|
|
|
if odirectEnabled {
|
|
|
|
written, err = xioutil.CopyAligned(diskHealthWriter(ctx, w), r, *bufp, fileSize, w)
|
|
|
|
} else {
|
|
|
|
written, err = io.CopyBuffer(diskHealthWriter(ctx, w), r, *bufp)
|
|
|
|
}
|
2019-05-22 16:47:15 -04:00
|
|
|
if err != nil {
|
2023-09-26 14:04:00 -04:00
|
|
|
w.Close()
|
2019-05-22 16:47:15 -04:00
|
|
|
return err
|
2019-01-17 07:58:18 -05:00
|
|
|
}
|
2019-04-24 00:25:06 -04:00
|
|
|
|
2021-03-18 17:09:55 -04:00
|
|
|
if written < fileSize && fileSize >= 0 {
|
2023-09-26 14:04:00 -04:00
|
|
|
w.Close()
|
2019-05-22 16:47:15 -04:00
|
|
|
return errLessData
|
2021-03-18 17:09:55 -04:00
|
|
|
} else if written > fileSize && fileSize >= 0 {
|
2023-09-26 14:04:00 -04:00
|
|
|
w.Close()
|
2019-05-22 16:47:15 -04:00
|
|
|
return errMoreData
|
2019-01-17 07:58:18 -05:00
|
|
|
}
|
2019-05-22 16:47:15 -04:00
|
|
|
|
2022-08-29 14:19:29 -04:00
|
|
|
// Only interested in flushing the size_t not mtime/atime
|
2023-09-26 14:04:00 -04:00
|
|
|
if err = Fdatasync(w); err != nil {
|
|
|
|
w.Close()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Dealing with error returns from close() - 'man 2 close'
|
|
|
|
//
|
|
|
|
// A careful programmer will check the return value of close(), since it is quite possible that
|
|
|
|
// errors on a previous write(2) operation are reported only on the final close() that releases
|
|
|
|
// the open file descriptor.
|
|
|
|
//
|
|
|
|
// Failing to check the return value when closing a file may lead to silent loss of data.
|
|
|
|
// This can especially be observed with NFS and with disk quota.
|
|
|
|
return w.Close()
|
2016-10-29 15:44:44 -04:00
|
|
|
}
|
|
|
|
|
2024-04-23 13:15:52 -04:00
|
|
|
func (s *xlStorage) writeAll(ctx context.Context, volume string, path string, b []byte, sync bool, skipParent string) (err error) {
|
2023-07-28 18:37:53 -04:00
|
|
|
if contextCanceled(ctx) {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
|
2021-03-24 12:08:05 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
filePath := pathJoin(volumeDir, path)
|
|
|
|
if err = checkPathLength(filePath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-08-29 14:19:29 -04:00
|
|
|
flags := os.O_CREATE | os.O_WRONLY | os.O_TRUNC
|
|
|
|
|
2021-08-10 14:12:22 -04:00
|
|
|
var w *os.File
|
|
|
|
if sync {
|
2024-04-15 04:25:46 -04:00
|
|
|
// Perform DirectIO along with fdatasync for larger xl.meta, mostly when
|
2022-08-29 14:19:29 -04:00
|
|
|
// xl.meta has "inlined data" we prefer writing O_DIRECT and then doing
|
|
|
|
// fdatasync() at the end instead of opening the file with O_DSYNC.
|
|
|
|
//
|
|
|
|
// This is an optimization mainly to ensure faster I/O.
|
|
|
|
if len(b) > xioutil.DirectioAlignSize {
|
|
|
|
r := bytes.NewReader(b)
|
2024-04-23 13:15:52 -04:00
|
|
|
return s.writeAllDirect(ctx, filePath, r.Size(), r, flags, skipParent)
|
2022-08-29 14:19:29 -04:00
|
|
|
}
|
2024-04-23 13:15:52 -04:00
|
|
|
w, err = s.openFileSync(filePath, flags, skipParent)
|
2021-08-10 14:12:22 -04:00
|
|
|
} else {
|
2024-04-23 13:15:52 -04:00
|
|
|
w, err = s.openFile(filePath, flags, skipParent)
|
2021-08-10 14:12:22 -04:00
|
|
|
}
|
2018-11-14 09:18:35 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-02-24 03:14:16 -05:00
|
|
|
|
2020-11-02 19:14:31 -05:00
|
|
|
n, err := w.Write(b)
|
|
|
|
if err != nil {
|
2023-09-26 14:04:00 -04:00
|
|
|
w.Close()
|
2020-11-02 19:14:31 -05:00
|
|
|
return err
|
|
|
|
}
|
2021-02-24 03:14:16 -05:00
|
|
|
|
2020-11-02 19:14:31 -05:00
|
|
|
if n != len(b) {
|
2023-09-26 14:04:00 -04:00
|
|
|
w.Close()
|
2020-11-02 19:14:31 -05:00
|
|
|
return io.ErrShortWrite
|
|
|
|
}
|
2021-02-24 03:14:16 -05:00
|
|
|
|
2023-09-26 14:04:00 -04:00
|
|
|
// Dealing with error returns from close() - 'man 2 close'
|
|
|
|
//
|
|
|
|
// A careful programmer will check the return value of close(), since it is quite possible that
|
|
|
|
// errors on a previous write(2) operation are reported only on the final close() that releases
|
|
|
|
// the open file descriptor.
|
|
|
|
//
|
|
|
|
// Failing to check the return value when closing a file may lead to silent loss of data.
|
|
|
|
// This can especially be observed with NFS and with disk quota.
|
|
|
|
return w.Close()
|
2018-11-14 09:18:35 -05:00
|
|
|
}
|
|
|
|
|
2021-08-10 14:12:22 -04:00
|
|
|
func (s *xlStorage) WriteAll(ctx context.Context, volume string, path string, b []byte) (err error) {
|
2024-04-15 04:25:46 -04:00
|
|
|
// Specific optimization to avoid re-read from the drives for `format.json`
|
|
|
|
// in-case the caller is a network operation.
|
|
|
|
if volume == minioMetaBucket && path == formatConfigFile {
|
|
|
|
s.Lock()
|
|
|
|
s.formatData = b
|
|
|
|
s.Unlock()
|
|
|
|
}
|
|
|
|
|
2024-04-23 13:15:52 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return s.writeAll(ctx, volume, path, b, true, volumeDir)
|
2021-08-10 14:12:22 -04:00
|
|
|
}
|
|
|
|
|
2016-10-29 15:44:44 -04:00
|
|
|
// AppendFile - append a byte array at path, if file doesn't exist at
|
|
|
|
// path this call explicitly creates it.
|
2020-09-04 12:45:06 -04:00
|
|
|
func (s *xlStorage) AppendFile(ctx context.Context, volume string, path string, buf []byte) (err error) {
|
2021-02-24 03:14:16 -05:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-07-26 14:31:40 -04:00
|
|
|
if !skipAccessChecks(volume) {
|
|
|
|
// Stat a volume entry.
|
|
|
|
if err = Access(volumeDir); err != nil {
|
|
|
|
return convertAccessError(err, errVolumeAccessDenied)
|
|
|
|
}
|
2021-02-24 03:14:16 -05:00
|
|
|
}
|
|
|
|
|
2021-03-24 12:08:05 -04:00
|
|
|
filePath := pathJoin(volumeDir, path)
|
|
|
|
if err = checkPathLength(filePath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-12-11 19:22:56 -05:00
|
|
|
var w *os.File
|
2019-04-24 00:25:06 -04:00
|
|
|
// Create file if not found. Not doing O_DIRECT here to avoid the code that does buffer aligned writes.
|
|
|
|
// AppendFile() is only used by healing code to heal objects written in old format.
|
2024-04-23 13:15:52 -04:00
|
|
|
w, err = s.openFileSync(filePath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, volumeDir)
|
2016-10-29 15:44:44 -04:00
|
|
|
if err != nil {
|
2016-06-19 18:31:13 -04:00
|
|
|
return err
|
2016-05-04 15:18:40 -04:00
|
|
|
}
|
2021-02-24 03:14:16 -05:00
|
|
|
defer w.Close()
|
2018-11-14 09:18:35 -05:00
|
|
|
|
2021-02-24 03:14:16 -05:00
|
|
|
n, err := w.Write(buf)
|
|
|
|
if err != nil {
|
2018-11-14 09:18:35 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-02-24 03:14:16 -05:00
|
|
|
if n != len(buf) {
|
|
|
|
return io.ErrShortWrite
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// CheckParts check if path has necessary parts available.
|
2020-09-04 12:45:06 -04:00
|
|
|
func (s *xlStorage) CheckParts(ctx context.Context, volume string, path string, fi FileInfo) error {
|
2016-05-18 00:22:27 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
return err
|
2016-05-18 00:22:27 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
for _, part := range fi.Parts {
|
|
|
|
partPath := pathJoin(path, fi.DataDir, fmt.Sprintf("part.%d", part.Number))
|
|
|
|
filePath := pathJoin(volumeDir, partPath)
|
|
|
|
if err = checkPathLength(filePath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-03-23 17:51:27 -04:00
|
|
|
st, err := Lstat(filePath)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err != nil {
|
2023-07-26 14:31:40 -04:00
|
|
|
if osIsNotExist(err) {
|
2023-08-28 18:16:41 -04:00
|
|
|
if !skipAccessChecks(volume) {
|
|
|
|
// Stat a volume entry.
|
|
|
|
if verr := Access(volumeDir); verr != nil {
|
|
|
|
if osIsNotExist(verr) {
|
|
|
|
return errVolumeNotFound
|
|
|
|
}
|
|
|
|
return verr
|
2023-07-26 14:31:40 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
return osErrToFileErr(err)
|
|
|
|
}
|
|
|
|
if st.Mode().IsDir() {
|
|
|
|
return errFileNotFound
|
|
|
|
}
|
2020-09-01 15:06:45 -04:00
|
|
|
// Check if shard is truncated.
|
|
|
|
if st.Size() < fi.Erasure.ShardFileSize(part.Size) {
|
|
|
|
return errFileCorrupt
|
|
|
|
}
|
2016-05-11 15:55:02 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-03-11 11:56:36 -04:00
|
|
|
// deleteFile deletes a file or a directory if its empty unless recursive
|
|
|
|
// is set to true. If the target is successfully deleted, it will recursively
|
|
|
|
// move up the tree, deleting empty parent directories until it finds one
|
|
|
|
// with files in it. Returns nil for a non-empty directory even when
|
|
|
|
// recursive is set to false.
|
2023-11-29 01:35:16 -05:00
|
|
|
func (s *xlStorage) deleteFile(basePath, deletePath string, recursive, immediate bool) error {
|
2020-03-11 11:56:36 -04:00
|
|
|
if basePath == "" || deletePath == "" {
|
|
|
|
return nil
|
|
|
|
}
|
2024-03-09 21:53:48 -05:00
|
|
|
|
|
|
|
bp := pathutil.Clean(basePath) // do not override basepath / or deletePath /
|
|
|
|
dp := pathutil.Clean(deletePath)
|
|
|
|
if !strings.HasPrefix(dp, bp) || dp == bp {
|
2016-04-08 13:37:38 -04:00
|
|
|
return nil
|
|
|
|
}
|
2017-08-03 23:04:28 -04:00
|
|
|
|
2020-03-11 11:56:36 -04:00
|
|
|
var err error
|
|
|
|
if recursive {
|
2023-11-29 01:35:16 -05:00
|
|
|
err = s.moveToTrash(deletePath, true, immediate)
|
2020-03-11 11:56:36 -04:00
|
|
|
} else {
|
2021-03-23 17:51:27 -04:00
|
|
|
err = Remove(deletePath)
|
2020-03-11 11:56:36 -04:00
|
|
|
}
|
|
|
|
if err != nil {
|
2019-03-18 10:46:20 -04:00
|
|
|
switch {
|
|
|
|
case isSysErrNotEmpty(err):
|
2020-06-12 23:04:01 -04:00
|
|
|
// if object is a directory, but if its not empty
|
|
|
|
// return FileNotFound to indicate its an empty prefix.
|
2024-03-09 21:53:48 -05:00
|
|
|
if HasSuffix(deletePath, SlashSeparator) {
|
2020-06-12 23:04:01 -04:00
|
|
|
return errFileNotFound
|
|
|
|
}
|
2023-06-06 13:12:06 -04:00
|
|
|
// if we have .DS_Store only on macOS
|
|
|
|
if runtime.GOOS == globalMacOSName {
|
|
|
|
storeFilePath := pathJoin(deletePath, ".DS_Store")
|
|
|
|
_, err := Stat(storeFilePath)
|
2024-01-18 02:03:17 -05:00
|
|
|
// .DS_Store exists
|
2023-06-06 13:12:06 -04:00
|
|
|
if err == nil {
|
|
|
|
// delete first
|
|
|
|
Remove(storeFilePath)
|
|
|
|
// try again
|
|
|
|
Remove(deletePath)
|
|
|
|
}
|
|
|
|
}
|
2019-03-18 10:46:20 -04:00
|
|
|
// Ignore errors if the directory is not empty. The server relies on
|
|
|
|
// this functionality, and sometimes uses recursion that should not
|
|
|
|
// error on parent directories.
|
2017-08-03 23:04:28 -04:00
|
|
|
return nil
|
2020-11-23 11:36:49 -05:00
|
|
|
case osIsNotExist(err):
|
2021-06-07 12:35:08 -04:00
|
|
|
return nil
|
2021-10-12 12:24:00 -04:00
|
|
|
case errors.Is(err, errFileNotFound):
|
|
|
|
return nil
|
2020-11-23 11:36:49 -05:00
|
|
|
case osIsPermission(err):
|
2016-10-17 19:38:46 -04:00
|
|
|
return errFileAccessDenied
|
2019-03-18 10:46:20 -04:00
|
|
|
case isSysErrIO(err):
|
2018-07-27 18:32:19 -04:00
|
|
|
return errFaultyDisk
|
2019-03-18 10:46:20 -04:00
|
|
|
default:
|
|
|
|
return err
|
2016-10-17 19:38:46 -04:00
|
|
|
}
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
2017-08-03 23:04:28 -04:00
|
|
|
|
2020-03-11 11:56:36 -04:00
|
|
|
// Delete parent directory obviously not recursively. Errors for
|
|
|
|
// parent directories shouldn't trickle down.
|
2024-03-09 21:53:48 -05:00
|
|
|
s.deleteFile(basePath, pathutil.Dir(pathutil.Clean(deletePath)), false, false)
|
posix: do not upstream errors in deleteFile (#4771)
This commit changes posix's deleteFile() to not upstream errors from
removing parent directories. This fixes a race condition.
The race condition occurs when multiple deleteFile()s are called on the
same parent directory, but different child files. Because deleteFile()
recursively removes parent directories if they are empty, but
deleteFile() errors if the selected deletePath does not exist, there was
an opportunity for a race condition. The two processes would remove the
child directories successfully, then depend on the parent directory
still existing. In some cases this is an invalid assumption, because
other processes can remove the parent directory beforehand. This commit
changes deleteFile() to not upstream an error if one occurs, because the
only required error should be from the immediate deletePath, not from a
parent path.
In the specific bug report, multiple CompleteMultipartUpload requests
would launch multiple deleteFile() requests. Because they chain up on
parent directories, ultimately at the end, there would be multiple
remove files for the ultimate parent directory,
.minio.sys/multipart/{bucket}. Because only one will succeed and one
will fail, an error would be upstreamed saying that the file does not
exist, and the CompleteMultipartUpload code interpreted this as
NoSuchKey, or that the object/part id doesn't exist. This was faulty
behavior and is now fixed.
The added test fails before this change and passes after this change.
Fixes: https://github.com/minio/minio/issues/4727
2017-08-04 19:51:20 -04:00
|
|
|
|
|
|
|
return nil
|
2016-04-08 20:13:16 -04:00
|
|
|
}
|
|
|
|
|
2016-04-08 13:37:38 -04:00
|
|
|
// DeleteFile - delete a file at path.
|
2022-07-11 12:15:54 -04:00
|
|
|
func (s *xlStorage) Delete(ctx context.Context, volume string, path string, deleteOpts DeleteOptions) (err error) {
|
2016-05-18 00:22:27 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-10-01 16:12:15 -04:00
|
|
|
|
2023-07-26 14:31:40 -04:00
|
|
|
if !skipAccessChecks(volume) {
|
|
|
|
// Stat a volume entry.
|
|
|
|
if err = Access(volumeDir); err != nil {
|
|
|
|
return convertAccessError(err, errVolumeAccessDenied)
|
|
|
|
}
|
2016-04-08 13:37:38 -04:00
|
|
|
}
|
|
|
|
|
2019-08-06 15:08:58 -04:00
|
|
|
// Following code is needed so that we retain SlashSeparator suffix if any in
|
2016-04-13 14:32:47 -04:00
|
|
|
// path argument.
|
2016-05-05 04:39:26 -04:00
|
|
|
filePath := pathJoin(volumeDir, path)
|
2020-03-11 11:56:36 -04:00
|
|
|
if err = checkPathLength(filePath); err != nil {
|
2016-05-11 15:55:02 -04:00
|
|
|
return err
|
|
|
|
}
|
2016-04-08 13:37:38 -04:00
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
// Delete file and delete parent directory as well if it's empty.
|
2023-11-29 01:35:16 -05:00
|
|
|
return s.deleteFile(volumeDir, filePath, deleteOpts.Recursive, deleteOpts.Immediate)
|
2019-05-13 15:25:49 -04:00
|
|
|
}
|
|
|
|
|
2022-01-13 14:07:41 -05:00
|
|
|
func skipAccessChecks(volume string) (ok bool) {
|
2023-06-18 21:20:15 -04:00
|
|
|
for _, prefix := range []string{
|
|
|
|
minioMetaTmpDeletedBucket,
|
2023-07-26 14:31:40 -04:00
|
|
|
minioMetaTmpBucket,
|
2023-11-22 16:46:17 -05:00
|
|
|
minioMetaMultipartBucket,
|
|
|
|
minioMetaBucket,
|
2023-06-18 21:20:15 -04:00
|
|
|
} {
|
|
|
|
if strings.HasPrefix(volume, prefix) {
|
|
|
|
return true
|
|
|
|
}
|
2022-01-12 21:49:01 -05:00
|
|
|
}
|
2022-01-13 14:07:41 -05:00
|
|
|
return ok
|
2022-01-12 21:49:01 -05:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// RenameData - rename source path to destination path atomically, metadata and data directory.
|
2024-04-23 13:15:52 -04:00
|
|
|
func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string, opts RenameOptions) (res RenameDataResp, err error) {
|
2021-04-05 11:52:28 -04:00
|
|
|
defer func() {
|
2023-03-09 18:15:30 -05:00
|
|
|
ignoredErrs := []error{
|
|
|
|
errFileNotFound,
|
|
|
|
errVolumeNotFound,
|
|
|
|
errFileVersionNotFound,
|
|
|
|
errDiskNotFound,
|
|
|
|
errUnformattedDisk,
|
2023-08-04 13:40:21 -04:00
|
|
|
errMaxVersionsExceeded,
|
2023-12-12 19:11:17 -05:00
|
|
|
errFileAccessDenied,
|
2023-03-09 18:15:30 -05:00
|
|
|
}
|
|
|
|
if err != nil && !IsErr(err, ignoredErrs...) && !contextCanceled(ctx) {
|
2022-03-04 13:31:33 -05:00
|
|
|
// Only log these errors if context is not yet canceled.
|
2024-04-04 08:04:40 -04:00
|
|
|
storageLogOnceIf(ctx, fmt.Errorf("drive:%s, srcVolume: %s, srcPath: %s, dstVolume: %s:, dstPath: %s - error %v",
|
2023-07-26 14:31:40 -04:00
|
|
|
s.drivePath,
|
2021-12-15 12:18:09 -05:00
|
|
|
srcVolume, srcPath,
|
|
|
|
dstVolume, dstPath,
|
2023-12-12 19:11:17 -05:00
|
|
|
err), "xl-storage-rename-data-"+dstVolume)
|
2021-11-21 13:41:30 -05:00
|
|
|
}
|
2023-12-29 18:52:41 -05:00
|
|
|
if s.globalSync {
|
2023-03-09 18:15:30 -05:00
|
|
|
globalSync()
|
2021-04-05 11:52:28 -04:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
srcVolumeDir, err := s.getVolDir(srcVolume)
|
|
|
|
if err != nil {
|
2024-04-23 13:15:52 -04:00
|
|
|
return res, err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
dstVolumeDir, err := s.getVolDir(dstVolume)
|
|
|
|
if err != nil {
|
2024-04-23 13:15:52 -04:00
|
|
|
return res, err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2022-01-12 21:49:01 -05:00
|
|
|
if !skipAccessChecks(srcVolume) {
|
|
|
|
// Stat a volume entry.
|
|
|
|
if err = Access(srcVolumeDir); err != nil {
|
2024-04-23 13:15:52 -04:00
|
|
|
return res, convertAccessError(err, errVolumeAccessDenied)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
}
|
2020-12-18 11:51:09 -05:00
|
|
|
|
2022-01-12 21:49:01 -05:00
|
|
|
if !skipAccessChecks(dstVolume) {
|
|
|
|
if err = Access(dstVolumeDir); err != nil {
|
2024-04-23 13:15:52 -04:00
|
|
|
return res, convertAccessError(err, errVolumeAccessDenied)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-08 13:15:12 -05:00
|
|
|
srcFilePath := pathutil.Join(srcVolumeDir, pathJoin(srcPath, xlStorageFormatFile))
|
|
|
|
dstFilePath := pathutil.Join(dstVolumeDir, pathJoin(dstPath, xlStorageFormatFile))
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2021-04-02 01:12:03 -04:00
|
|
|
var srcDataPath string
|
|
|
|
var dstDataPath string
|
2021-08-23 16:14:55 -04:00
|
|
|
var dataDir string
|
|
|
|
if !fi.IsRemote() {
|
|
|
|
dataDir = retainSlash(fi.DataDir)
|
|
|
|
}
|
2021-04-26 21:24:06 -04:00
|
|
|
if dataDir != "" {
|
2021-04-02 01:12:03 -04:00
|
|
|
srcDataPath = retainSlash(pathJoin(srcVolumeDir, srcPath, dataDir))
|
|
|
|
// make sure to always use path.Join here, do not use pathJoin as
|
|
|
|
// it would additionally add `/` at the end and it comes in the
|
|
|
|
// way of renameAll(), parentDir creation.
|
|
|
|
dstDataPath = pathutil.Join(dstVolumeDir, dstPath, dataDir)
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
if err = checkPathLength(srcFilePath); err != nil {
|
2024-04-23 13:15:52 -04:00
|
|
|
return res, err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if err = checkPathLength(dstFilePath); err != nil {
|
2024-04-23 13:15:52 -04:00
|
|
|
return res, err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2024-04-23 13:15:52 -04:00
|
|
|
s.RLock()
|
|
|
|
formatLegacy := s.formatLegacy
|
|
|
|
s.RUnlock()
|
|
|
|
|
2021-02-24 03:14:16 -05:00
|
|
|
dstBuf, err := xioutil.ReadFile(dstFilePath)
|
2020-06-19 13:58:17 -04:00
|
|
|
if err != nil {
|
2021-08-03 16:26:57 -04:00
|
|
|
// handle situations when dstFilePath is 'file'
|
|
|
|
// for example such as someone is trying to
|
|
|
|
// upload an object such as `prefix/object/xl.meta`
|
|
|
|
// where `prefix/object` is already an object
|
|
|
|
if isSysErrNotDir(err) && runtime.GOOS != globalWindowsOSName {
|
|
|
|
// NOTE: On windows the error happens at
|
|
|
|
// next line and returns appropriate error.
|
2024-04-23 13:15:52 -04:00
|
|
|
return res, errFileAccessDenied
|
2021-08-03 16:26:57 -04:00
|
|
|
}
|
2020-11-23 11:36:49 -05:00
|
|
|
if !osIsNotExist(err) {
|
2024-04-23 13:15:52 -04:00
|
|
|
return res, osErrToFileErr(err)
|
2020-06-19 13:58:17 -04:00
|
|
|
}
|
2024-04-23 13:15:52 -04:00
|
|
|
if formatLegacy {
|
|
|
|
// errFileNotFound comes here.
|
|
|
|
err = s.renameLegacyMetadata(dstVolumeDir, dstPath)
|
|
|
|
if err != nil && err != errFileNotFound {
|
|
|
|
return res, err
|
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
dstBuf, err = xioutil.ReadFile(dstFilePath)
|
|
|
|
if err != nil && !osIsNotExist(err) {
|
|
|
|
return res, osErrToFileErr(err)
|
|
|
|
}
|
2020-06-19 13:58:17 -04:00
|
|
|
}
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2024-04-28 13:53:50 -04:00
|
|
|
// Preserve all the legacy data, could be slow, but at max there can be 10,000 parts.
|
|
|
|
currentDataPath := pathJoin(dstVolumeDir, dstPath)
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
var xlMeta xlMetaV2
|
|
|
|
var legacyPreserved bool
|
2024-04-28 13:53:50 -04:00
|
|
|
var legacyEntries []string
|
2020-06-12 23:04:01 -04:00
|
|
|
if len(dstBuf) > 0 {
|
|
|
|
if isXL2V1Format(dstBuf) {
|
|
|
|
if err = xlMeta.Load(dstBuf); err != nil {
|
2021-09-14 14:34:25 -04:00
|
|
|
// Data appears corrupt. Drop data.
|
|
|
|
xlMeta = xlMetaV2{}
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// This code-path is to preserve the legacy data.
|
|
|
|
xlMetaLegacy := &xlMetaV1Object{}
|
2022-01-02 12:15:06 -05:00
|
|
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
2020-06-12 23:04:01 -04:00
|
|
|
if err := json.Unmarshal(dstBuf, xlMetaLegacy); err != nil {
|
2024-04-04 08:04:40 -04:00
|
|
|
storageLogOnceIf(ctx, err, "read-data-unmarshal-"+dstFilePath)
|
2021-09-14 14:34:25 -04:00
|
|
|
// Data appears corrupt. Drop data.
|
|
|
|
} else {
|
2021-12-02 14:29:16 -05:00
|
|
|
xlMetaLegacy.DataDir = legacyDataDir
|
2021-09-14 14:34:25 -04:00
|
|
|
if err = xlMeta.AddLegacy(xlMetaLegacy); err != nil {
|
2024-04-04 08:04:40 -04:00
|
|
|
storageLogOnceIf(ctx, err, "read-data-add-legacy-"+dstFilePath)
|
2021-09-14 14:34:25 -04:00
|
|
|
}
|
|
|
|
legacyPreserved = true
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// It is possible that some drives may not have `xl.meta` file
|
2024-01-18 02:03:17 -05:00
|
|
|
// in such scenarios verify if at least `part.1` files exist
|
2020-06-12 23:04:01 -04:00
|
|
|
// to verify for legacy version.
|
2021-04-06 14:33:42 -04:00
|
|
|
if formatLegacy {
|
2021-01-19 13:01:06 -05:00
|
|
|
// We only need this code if we are moving
|
|
|
|
// from `xl.json` to `xl.meta`, we can avoid
|
|
|
|
// one extra readdir operation here for all
|
|
|
|
// new deployments.
|
2024-04-28 13:53:50 -04:00
|
|
|
entries, err := readDir(currentDataPath)
|
2021-01-19 13:01:06 -05:00
|
|
|
if err != nil && err != errFileNotFound {
|
2024-04-23 13:15:52 -04:00
|
|
|
return res, osErrToFileErr(err)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2021-01-19 13:01:06 -05:00
|
|
|
for _, entry := range entries {
|
|
|
|
if entry == xlStorageFormatFile || strings.HasSuffix(entry, slashSeparator) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if strings.HasPrefix(entry, "part.") {
|
|
|
|
legacyPreserved = true
|
2024-04-28 13:53:50 -04:00
|
|
|
legacyEntries = entries
|
2021-01-19 13:01:06 -05:00
|
|
|
break
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-23 13:15:52 -04:00
|
|
|
var legacyDataPath string
|
|
|
|
if formatLegacy {
|
|
|
|
legacyDataPath = pathJoin(dstVolumeDir, dstPath, legacyDataDir)
|
|
|
|
if legacyPreserved {
|
2024-04-28 13:53:50 -04:00
|
|
|
if contextCanceled(ctx) {
|
|
|
|
return res, ctx.Err()
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2024-04-28 13:53:50 -04:00
|
|
|
if len(legacyEntries) > 0 {
|
|
|
|
// legacy data dir means its old content, honor system umask.
|
|
|
|
if err = mkdirAll(legacyDataPath, 0o777, dstVolumeDir); err != nil {
|
|
|
|
// any failed mkdir-calls delete them.
|
|
|
|
s.deleteFile(dstVolumeDir, legacyDataPath, true, false)
|
|
|
|
return res, osErrToFileErr(err)
|
2024-04-23 13:15:52 -04:00
|
|
|
}
|
2024-04-28 13:53:50 -04:00
|
|
|
for _, entry := range legacyEntries {
|
|
|
|
// Skip xl.meta renames further, also ignore any directories such as `legacyDataDir`
|
|
|
|
if entry == xlStorageFormatFile || strings.HasSuffix(entry, slashSeparator) {
|
|
|
|
continue
|
|
|
|
}
|
2021-06-07 12:35:08 -04:00
|
|
|
|
2024-04-28 13:53:50 -04:00
|
|
|
if err = Rename(pathJoin(currentDataPath, entry), pathJoin(legacyDataPath, entry)); err != nil {
|
|
|
|
// Any failed rename calls un-roll previous transaction.
|
|
|
|
s.deleteFile(dstVolumeDir, legacyDataPath, true, false)
|
2024-04-23 13:15:52 -04:00
|
|
|
|
2024-04-28 13:53:50 -04:00
|
|
|
return res, osErrToFileErr(err)
|
|
|
|
}
|
2024-04-23 13:15:52 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-23 13:15:52 -04:00
|
|
|
// Set skipParent to skip mkdirAll() calls for deeply nested objects
|
|
|
|
// - if its an overwrite
|
|
|
|
// - if its a versioned object
|
|
|
|
//
|
|
|
|
// This can potentiall reduce syscalls by strings.Split(path, "/")
|
|
|
|
// times relative to the object name.
|
|
|
|
skipParent := dstVolumeDir
|
|
|
|
if len(dstBuf) > 0 {
|
|
|
|
skipParent = pathutil.Dir(dstFilePath)
|
|
|
|
}
|
2022-12-06 16:43:18 -05:00
|
|
|
|
2024-04-23 13:15:52 -04:00
|
|
|
var reqVID string
|
2020-06-12 23:04:01 -04:00
|
|
|
if fi.VersionID == "" {
|
2022-12-06 16:43:18 -05:00
|
|
|
reqVID = nullVersionID
|
|
|
|
} else {
|
|
|
|
reqVID = fi.VersionID
|
|
|
|
}
|
|
|
|
|
|
|
|
// Empty fi.VersionID indicates that versioning is either
|
|
|
|
// suspended or disabled on this bucket. RenameData will replace
|
|
|
|
// the 'null' version. We add a free-version to track its tiered
|
|
|
|
// content for asynchronous deletion.
|
2023-04-09 13:25:37 -04:00
|
|
|
//
|
|
|
|
// Note: RestoreObject and HealObject requests don't end up replacing the
|
|
|
|
// null version and therefore don't require the free-version to track
|
|
|
|
// anything
|
|
|
|
if fi.VersionID == "" && !fi.IsRestoreObjReq() && !fi.Healing() {
|
2022-12-06 16:43:18 -05:00
|
|
|
// Note: Restore object request reuses PutObject/Multipart
|
|
|
|
// upload to copy back its data from the remote tier. This
|
|
|
|
// doesn't replace the existing version, so we don't need to add
|
|
|
|
// a free-version.
|
|
|
|
xlMeta.AddFreeVersion(fi)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2021-11-17 18:49:12 -05:00
|
|
|
// indicates if RenameData() is called by healing.
|
2024-04-23 13:15:52 -04:00
|
|
|
healing := fi.Healing()
|
|
|
|
|
|
|
|
// Replace the data of null version or any other existing version-id
|
|
|
|
_, ver, err := xlMeta.findVersionStr(reqVID)
|
|
|
|
if err == nil {
|
|
|
|
dataDir := ver.getDataDir()
|
|
|
|
if dataDir != "" && (xlMeta.SharedDataDirCountStr(reqVID, dataDir) == 0) {
|
|
|
|
// Purge the destination path as we are not preserving anything
|
|
|
|
// versioned object was not requested.
|
|
|
|
res.OldDataDir = dataDir
|
|
|
|
if healing {
|
|
|
|
// if old destination path is same as new destination path
|
|
|
|
// there is nothing to purge, this is true in case of healing
|
|
|
|
// avoid setting OldDataDir at that point.
|
|
|
|
res.OldDataDir = ""
|
|
|
|
} else {
|
|
|
|
xlMeta.data.remove(reqVID, dataDir)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-11-17 18:49:12 -05:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
if err = xlMeta.AddVersion(fi); err != nil {
|
2021-06-07 12:35:08 -04:00
|
|
|
if legacyPreserved {
|
|
|
|
// Any failed rename calls un-roll previous transaction.
|
2022-07-11 12:15:54 -04:00
|
|
|
s.deleteFile(dstVolumeDir, legacyDataPath, true, false)
|
2021-06-07 12:35:08 -04:00
|
|
|
}
|
2024-04-23 13:15:52 -04:00
|
|
|
return res, err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2024-04-23 13:15:52 -04:00
|
|
|
if len(xlMeta.versions) <= 10 {
|
|
|
|
// any number of versions beyond this is excessive
|
|
|
|
// avoid healing such objects in this manner, let
|
|
|
|
// it heal during the regular scanner cycle.
|
|
|
|
dst := []byte{}
|
|
|
|
for _, ver := range xlMeta.versions {
|
|
|
|
dst = slices.Grow(dst, 16)
|
|
|
|
copy(dst[len(dst):], ver.header.VersionID[:])
|
|
|
|
}
|
|
|
|
res.Sign = dst
|
2022-09-05 19:51:37 -04:00
|
|
|
}
|
|
|
|
|
2024-04-23 13:15:52 -04:00
|
|
|
newDstBuf, err := xlMeta.AppendTo(metaDataPoolGet())
|
|
|
|
defer metaDataPoolPut(newDstBuf)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err != nil {
|
2021-06-07 12:35:08 -04:00
|
|
|
if legacyPreserved {
|
2022-07-11 12:15:54 -04:00
|
|
|
s.deleteFile(dstVolumeDir, legacyDataPath, true, false)
|
2021-06-07 12:35:08 -04:00
|
|
|
}
|
2024-04-23 13:15:52 -04:00
|
|
|
return res, errFileCorrupt
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2024-04-28 13:53:50 -04:00
|
|
|
if contextCanceled(ctx) {
|
|
|
|
return res, ctx.Err()
|
|
|
|
}
|
|
|
|
|
2024-04-23 13:15:52 -04:00
|
|
|
if err = s.WriteAll(ctx, srcVolume, pathJoin(srcPath, xlStorageFormatFile), newDstBuf); err != nil {
|
2024-02-20 10:05:57 -05:00
|
|
|
if legacyPreserved {
|
|
|
|
s.deleteFile(dstVolumeDir, legacyDataPath, true, false)
|
2021-04-02 01:12:03 -04:00
|
|
|
}
|
2024-04-23 13:15:52 -04:00
|
|
|
return res, osErrToFileErr(err)
|
2024-02-20 10:05:57 -05:00
|
|
|
}
|
|
|
|
diskHealthCheckOK(ctx, err)
|
2021-03-29 20:00:55 -04:00
|
|
|
|
2024-04-23 13:15:52 -04:00
|
|
|
notInline := srcDataPath != "" && len(fi.Data) == 0 && fi.Size > 0
|
|
|
|
if notInline {
|
2024-02-20 10:05:57 -05:00
|
|
|
if healing {
|
2024-04-23 13:15:52 -04:00
|
|
|
// renameAll only for objects that have xl.meta not saved inline.
|
|
|
|
// this must be done in healing only, otherwise it is expected
|
|
|
|
// that for fresh PutObject() call dstDataPath can never exist.
|
|
|
|
// if its an overwrite then the caller deletes the DataDir
|
|
|
|
// in a separate RPC call.
|
|
|
|
s.moveToTrash(dstDataPath, true, false)
|
|
|
|
|
2024-02-20 10:05:57 -05:00
|
|
|
// If we are healing we should purge any legacyDataPath content,
|
|
|
|
// that was previously preserved during PutObject() call
|
|
|
|
// on a versioned bucket.
|
|
|
|
s.moveToTrash(legacyDataPath, true, false)
|
|
|
|
}
|
2024-04-28 13:53:50 -04:00
|
|
|
if contextCanceled(ctx) {
|
|
|
|
return res, ctx.Err()
|
|
|
|
}
|
2024-04-23 13:15:52 -04:00
|
|
|
if err = renameAll(srcDataPath, dstDataPath, skipParent); err != nil {
|
2021-06-07 12:35:08 -04:00
|
|
|
if legacyPreserved {
|
|
|
|
// Any failed rename calls un-roll previous transaction.
|
2022-07-11 12:15:54 -04:00
|
|
|
s.deleteFile(dstVolumeDir, legacyDataPath, true, false)
|
2021-06-07 12:35:08 -04:00
|
|
|
}
|
2024-04-23 13:15:52 -04:00
|
|
|
// if its a partial rename() do not attempt to delete recursively.
|
2024-02-20 10:05:57 -05:00
|
|
|
s.deleteFile(dstVolumeDir, dstDataPath, false, false)
|
2024-04-23 13:15:52 -04:00
|
|
|
return res, osErrToFileErr(err)
|
2021-04-02 01:12:03 -04:00
|
|
|
}
|
2024-04-28 13:53:50 -04:00
|
|
|
diskHealthCheckOK(ctx, err)
|
2024-02-20 10:05:57 -05:00
|
|
|
}
|
2021-06-07 12:35:08 -04:00
|
|
|
|
2024-04-24 21:14:08 -04:00
|
|
|
// If we have oldDataDir then we must preserve current xl.meta
|
|
|
|
// as backup, in-case needing renames().
|
|
|
|
if res.OldDataDir != "" {
|
2024-04-28 13:53:50 -04:00
|
|
|
if contextCanceled(ctx) {
|
|
|
|
return res, ctx.Err()
|
|
|
|
}
|
|
|
|
|
2024-04-23 13:15:52 -04:00
|
|
|
// preserve current xl.meta inside the oldDataDir.
|
|
|
|
if err = s.writeAll(ctx, dstVolume, pathJoin(dstPath, res.OldDataDir, xlStorageFormatFileBackup), dstBuf, true, skipParent); err != nil {
|
|
|
|
if legacyPreserved {
|
|
|
|
s.deleteFile(dstVolumeDir, legacyDataPath, true, false)
|
|
|
|
}
|
|
|
|
return res, osErrToFileErr(err)
|
|
|
|
}
|
|
|
|
diskHealthCheckOK(ctx, err)
|
|
|
|
}
|
|
|
|
|
2024-04-28 13:53:50 -04:00
|
|
|
if contextCanceled(ctx) {
|
|
|
|
return res, ctx.Err()
|
|
|
|
}
|
|
|
|
|
2024-02-20 10:05:57 -05:00
|
|
|
// Commit meta-file
|
2024-04-23 13:15:52 -04:00
|
|
|
if err = renameAll(srcFilePath, dstFilePath, skipParent); err != nil {
|
2024-02-20 10:05:57 -05:00
|
|
|
if legacyPreserved {
|
|
|
|
// Any failed rename calls un-roll previous transaction.
|
|
|
|
s.deleteFile(dstVolumeDir, legacyDataPath, true, false)
|
2021-04-02 01:12:03 -04:00
|
|
|
}
|
2024-04-23 13:15:52 -04:00
|
|
|
// if its a partial rename() do not attempt to delete recursively.
|
|
|
|
// this can be healed since all parts are available.
|
2024-02-20 10:05:57 -05:00
|
|
|
s.deleteFile(dstVolumeDir, dstDataPath, false, false)
|
2024-04-23 13:15:52 -04:00
|
|
|
return res, osErrToFileErr(err)
|
2020-10-24 00:54:58 -04:00
|
|
|
}
|
|
|
|
|
2023-08-17 12:37:55 -04:00
|
|
|
if srcVolume != minioMetaMultipartBucket {
|
|
|
|
// srcFilePath is some-times minioMetaTmpBucket, an attempt to
|
|
|
|
// remove the temporary folder is enough since at this point
|
|
|
|
// ideally all transaction should be complete.
|
|
|
|
Remove(pathutil.Dir(srcFilePath))
|
|
|
|
} else {
|
|
|
|
s.deleteFile(srcVolumeDir, pathutil.Dir(srcFilePath), true, false)
|
|
|
|
}
|
2024-04-23 13:15:52 -04:00
|
|
|
return res, nil
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2016-05-28 18:13:15 -04:00
|
|
|
// RenameFile - rename source path to destination path atomically.
|
2020-09-04 12:45:06 -04:00
|
|
|
func (s *xlStorage) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) (err error) {
|
2016-05-18 00:22:27 -04:00
|
|
|
srcVolumeDir, err := s.getVolDir(srcVolume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
dstVolumeDir, err := s.getVolDir(dstVolume)
|
2016-04-29 15:17:48 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-01-13 14:07:41 -05:00
|
|
|
if !skipAccessChecks(srcVolume) {
|
|
|
|
// Stat a volume entry.
|
|
|
|
if err = Access(srcVolumeDir); err != nil {
|
|
|
|
if osIsNotExist(err) {
|
|
|
|
return errVolumeNotFound
|
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return errFaultyDisk
|
|
|
|
}
|
|
|
|
return err
|
2016-05-18 00:22:27 -04:00
|
|
|
}
|
2016-04-29 15:17:48 -04:00
|
|
|
}
|
2022-01-13 14:07:41 -05:00
|
|
|
if !skipAccessChecks(dstVolume) {
|
|
|
|
if err = Access(dstVolumeDir); err != nil {
|
|
|
|
if osIsNotExist(err) {
|
|
|
|
return errVolumeNotFound
|
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return errFaultyDisk
|
|
|
|
}
|
|
|
|
return err
|
2016-05-18 00:22:27 -04:00
|
|
|
}
|
|
|
|
}
|
2019-12-06 02:16:06 -05:00
|
|
|
srcIsDir := HasSuffix(srcPath, SlashSeparator)
|
|
|
|
dstIsDir := HasSuffix(dstPath, SlashSeparator)
|
2016-05-16 17:31:28 -04:00
|
|
|
// Either src and dst have to be directories or files, else return error.
|
2016-05-13 14:52:36 -04:00
|
|
|
if !(srcIsDir && dstIsDir || !srcIsDir && !dstIsDir) {
|
|
|
|
return errFileAccessDenied
|
|
|
|
}
|
2021-02-08 13:15:12 -05:00
|
|
|
srcFilePath := pathutil.Join(srcVolumeDir, srcPath)
|
2018-02-20 15:20:18 -05:00
|
|
|
if err = checkPathLength(srcFilePath); err != nil {
|
2016-06-17 14:57:51 -04:00
|
|
|
return err
|
|
|
|
}
|
2021-02-08 13:15:12 -05:00
|
|
|
dstFilePath := pathutil.Join(dstVolumeDir, dstPath)
|
2018-02-20 15:20:18 -05:00
|
|
|
if err = checkPathLength(dstFilePath); err != nil {
|
2016-06-17 14:57:51 -04:00
|
|
|
return err
|
|
|
|
}
|
2016-05-13 14:52:36 -04:00
|
|
|
if srcIsDir {
|
2018-02-20 15:20:18 -05:00
|
|
|
// If source is a directory, we expect the destination to be non-existent but we
|
|
|
|
// we still need to allow overwriting an empty directory since it represents
|
|
|
|
// an object empty directory.
|
2021-03-23 17:51:27 -04:00
|
|
|
dirInfo, err := Lstat(dstFilePath)
|
2018-07-27 18:32:19 -04:00
|
|
|
if isSysErrIO(err) {
|
|
|
|
return errFaultyDisk
|
|
|
|
}
|
2021-03-19 18:42:01 -04:00
|
|
|
if err != nil {
|
|
|
|
if !osIsNotExist(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if !dirInfo.IsDir() {
|
|
|
|
return errFileAccessDenied
|
|
|
|
}
|
2021-03-23 17:51:27 -04:00
|
|
|
if err = Remove(dstFilePath); err != nil {
|
2022-07-24 03:43:11 -04:00
|
|
|
if isSysErrNotEmpty(err) || isSysErrNotDir(err) {
|
2019-03-26 17:57:44 -04:00
|
|
|
return errFileAccessDenied
|
2022-07-24 03:43:11 -04:00
|
|
|
} else if isSysErrIO(err) {
|
|
|
|
return errFaultyDisk
|
2019-03-26 17:57:44 -04:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2016-05-13 14:52:36 -04:00
|
|
|
}
|
2018-01-13 12:13:02 -05:00
|
|
|
|
2023-11-22 16:46:17 -05:00
|
|
|
if err = renameAll(srcFilePath, dstFilePath, dstVolumeDir); err != nil {
|
2022-07-24 03:43:11 -04:00
|
|
|
if isSysErrNotEmpty(err) || isSysErrNotDir(err) {
|
|
|
|
return errFileAccessDenied
|
|
|
|
}
|
2020-08-07 16:22:53 -04:00
|
|
|
return osErrToFileErr(err)
|
2016-05-03 19:10:24 -04:00
|
|
|
}
|
2016-11-21 19:34:57 -05:00
|
|
|
|
|
|
|
// Remove parent dir of the source file if empty
|
2021-03-19 18:42:01 -04:00
|
|
|
parentDir := pathutil.Dir(srcFilePath)
|
2022-07-11 12:15:54 -04:00
|
|
|
s.deleteFile(srcVolumeDir, parentDir, false, false)
|
2016-11-21 19:34:57 -05:00
|
|
|
|
2016-05-03 19:10:24 -04:00
|
|
|
return nil
|
2016-04-29 15:17:48 -04:00
|
|
|
}
|
2019-07-08 16:51:18 -04:00
|
|
|
|
2022-03-09 14:38:54 -05:00
|
|
|
func (s *xlStorage) bitrotVerify(ctx context.Context, partPath string, partSize int64, algo BitrotAlgorithm, sum []byte, shardSize int64) error {
|
2019-07-08 16:51:18 -04:00
|
|
|
// Open the file for reading.
|
2022-08-01 16:22:43 -04:00
|
|
|
file, err := OpenFile(partPath, readMode, 0o666)
|
2019-07-08 16:51:18 -04:00
|
|
|
if err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
return osErrToFileErr(err)
|
2019-07-08 16:51:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Close the file descriptor.
|
|
|
|
defer file.Close()
|
|
|
|
fi, err := file.Stat()
|
|
|
|
if err != nil {
|
2019-10-01 16:12:15 -04:00
|
|
|
// Unable to stat on the file, return an expected error
|
|
|
|
// for healing code to fix this file.
|
2019-07-08 16:51:18 -04:00
|
|
|
return err
|
|
|
|
}
|
2022-03-09 14:38:54 -05:00
|
|
|
return bitrotVerify(diskHealthReader(ctx, file), fi.Size(), partSize, algo, sum, shardSize)
|
2019-07-08 16:51:18 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2020-09-04 12:45:06 -04:00
|
|
|
func (s *xlStorage) VerifyFile(ctx context.Context, volume, path string, fi FileInfo) (err error) {
|
2020-06-12 23:04:01 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-07-26 14:31:40 -04:00
|
|
|
if !skipAccessChecks(volume) {
|
|
|
|
// Stat a volume entry.
|
|
|
|
if err = Access(volumeDir); err != nil {
|
|
|
|
return convertAccessError(err, errVolumeAccessDenied)
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
erasure := fi.Erasure
|
|
|
|
for _, part := range fi.Parts {
|
|
|
|
checksumInfo := erasure.GetChecksumInfo(part.Number)
|
|
|
|
partPath := pathJoin(volumeDir, path, fi.DataDir, fmt.Sprintf("part.%d", part.Number))
|
2022-03-09 14:38:54 -05:00
|
|
|
if err := s.bitrotVerify(ctx, partPath,
|
2020-06-12 23:04:01 -04:00
|
|
|
erasure.ShardFileSize(part.Size),
|
|
|
|
checksumInfo.Algorithm,
|
|
|
|
checksumInfo.Hash, erasure.ShardSize()); err != nil {
|
|
|
|
if !IsErr(err, []error{
|
|
|
|
errFileNotFound,
|
|
|
|
errVolumeNotFound,
|
|
|
|
errFileCorrupt,
|
2023-09-01 19:19:18 -04:00
|
|
|
errFileAccessDenied,
|
|
|
|
errFileVersionNotFound,
|
2020-06-12 23:04:01 -04:00
|
|
|
}...) {
|
2021-11-21 13:41:30 -05:00
|
|
|
logger.GetReqInfo(ctx).AppendTags("disk", s.String())
|
2024-04-04 08:04:40 -04:00
|
|
|
storageLogOnceIf(ctx, err, partPath)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2021-07-09 14:29:16 -04:00
|
|
|
|
2022-07-19 11:35:29 -04:00
|
|
|
// ReadMultiple will read multiple files and send each back as response.
|
|
|
|
// Files are read and returned in the given order.
|
|
|
|
// The resp channel is closed before the call returns.
|
|
|
|
// Only a canceled context will return an error.
|
|
|
|
func (s *xlStorage) ReadMultiple(ctx context.Context, req ReadMultipleReq, resp chan<- ReadMultipleResp) error {
|
2024-01-28 13:04:17 -05:00
|
|
|
defer xioutil.SafeClose(resp)
|
2022-07-19 11:35:29 -04:00
|
|
|
|
2023-07-26 14:31:40 -04:00
|
|
|
volumeDir := pathJoin(s.drivePath, req.Bucket)
|
2022-07-21 19:47:58 -04:00
|
|
|
found := 0
|
2022-07-19 11:35:29 -04:00
|
|
|
for _, f := range req.Files {
|
|
|
|
if contextCanceled(ctx) {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
r := ReadMultipleResp{
|
|
|
|
Bucket: req.Bucket,
|
|
|
|
Prefix: req.Prefix,
|
|
|
|
File: f,
|
|
|
|
}
|
|
|
|
var data []byte
|
|
|
|
var mt time.Time
|
|
|
|
fullPath := pathJoin(volumeDir, req.Prefix, f)
|
2023-11-27 12:15:06 -05:00
|
|
|
w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout())
|
2023-07-27 10:33:05 -04:00
|
|
|
if err := w.Run(func() (err error) {
|
|
|
|
if req.MetadataOnly {
|
|
|
|
data, mt, err = s.readMetadataWithDMTime(ctx, fullPath)
|
|
|
|
} else {
|
2023-12-09 13:17:51 -05:00
|
|
|
data, mt, err = s.readAllData(ctx, req.Bucket, volumeDir, fullPath, true)
|
2023-07-27 10:33:05 -04:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}); err != nil {
|
2022-07-19 11:35:29 -04:00
|
|
|
if !IsErr(err, errFileNotFound, errVolumeNotFound) {
|
|
|
|
r.Exists = true
|
|
|
|
r.Error = err.Error()
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
case resp <- r:
|
|
|
|
}
|
|
|
|
if req.AbortOn404 && !r.Exists {
|
|
|
|
// We stop at first file not found.
|
|
|
|
// We have already reported the error, return nil.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
diskHealthCheckOK(ctx, nil)
|
|
|
|
if req.MaxSize > 0 && int64(len(data)) > req.MaxSize {
|
|
|
|
r.Exists = true
|
|
|
|
r.Error = fmt.Sprintf("max size (%d) exceeded: %d", req.MaxSize, len(data))
|
2024-02-20 18:00:35 -05:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
case resp <- r:
|
|
|
|
continue
|
|
|
|
}
|
2022-07-19 11:35:29 -04:00
|
|
|
}
|
2022-07-21 19:47:58 -04:00
|
|
|
found++
|
2022-07-19 11:35:29 -04:00
|
|
|
r.Exists = true
|
|
|
|
r.Data = data
|
|
|
|
r.Modtime = mt
|
2024-02-20 18:00:35 -05:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
case resp <- r:
|
|
|
|
}
|
2022-07-21 19:47:58 -04:00
|
|
|
if req.MaxResults > 0 && found >= req.MaxResults {
|
|
|
|
return nil
|
|
|
|
}
|
2022-07-19 11:35:29 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-10-01 14:50:00 -04:00
|
|
|
func (s *xlStorage) StatInfoFile(ctx context.Context, volume, path string, glob bool) (stat []StatInfo, err error) {
|
2021-07-09 14:29:16 -04:00
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return stat, err
|
|
|
|
}
|
|
|
|
|
2022-01-02 12:15:06 -05:00
|
|
|
files := []string{pathJoin(volumeDir, path)}
|
2021-10-01 14:50:00 -04:00
|
|
|
if glob {
|
2022-12-08 13:42:44 -05:00
|
|
|
files, err = filepathx.Glob(filepath.Join(volumeDir, path))
|
2021-10-01 14:50:00 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-07-09 14:29:16 -04:00
|
|
|
}
|
2021-10-01 14:50:00 -04:00
|
|
|
for _, filePath := range files {
|
|
|
|
if err := checkPathLength(filePath); err != nil {
|
|
|
|
return stat, err
|
|
|
|
}
|
|
|
|
st, _ := Lstat(filePath)
|
|
|
|
if st == nil {
|
2023-08-28 18:16:41 -04:00
|
|
|
if !skipAccessChecks(volume) {
|
|
|
|
// Stat a volume entry.
|
|
|
|
if verr := Access(volumeDir); verr != nil {
|
|
|
|
return stat, convertAccessError(verr, errVolumeAccessDenied)
|
|
|
|
}
|
2023-07-26 14:31:40 -04:00
|
|
|
}
|
2021-10-01 14:50:00 -04:00
|
|
|
return stat, errPathNotFound
|
|
|
|
}
|
|
|
|
name, err := filepath.Rel(volumeDir, filePath)
|
|
|
|
if err != nil {
|
|
|
|
name = filePath
|
|
|
|
}
|
2021-11-16 12:28:29 -05:00
|
|
|
stat = append(stat, StatInfo{
|
|
|
|
Name: filepath.ToSlash(name),
|
|
|
|
Size: st.Size(),
|
|
|
|
Dir: st.IsDir(),
|
|
|
|
Mode: uint32(st.Mode()),
|
|
|
|
ModTime: st.ModTime(),
|
|
|
|
})
|
2021-07-09 14:29:16 -04:00
|
|
|
}
|
2021-10-01 14:50:00 -04:00
|
|
|
return stat, nil
|
2021-07-09 14:29:16 -04:00
|
|
|
}
|
2022-11-28 13:20:55 -05:00
|
|
|
|
|
|
|
// CleanAbandonedData will read metadata of the object on disk
|
|
|
|
// and delete any data directories and inline data that isn't referenced in metadata.
|
|
|
|
// Metadata itself is not modified, only inline data.
|
|
|
|
func (s *xlStorage) CleanAbandonedData(ctx context.Context, volume string, path string) error {
|
|
|
|
if volume == "" || path == "" {
|
|
|
|
return nil // Ignore
|
|
|
|
}
|
|
|
|
|
|
|
|
volumeDir, err := s.getVolDir(volume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
baseDir := pathJoin(volumeDir, path+slashSeparator)
|
|
|
|
metaPath := pathutil.Join(baseDir, xlStorageFormatFile)
|
2023-12-09 13:17:51 -05:00
|
|
|
buf, _, err := s.readAllData(ctx, volume, volumeDir, metaPath, true)
|
2022-11-28 13:20:55 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metaDataPoolPut(buf)
|
|
|
|
|
|
|
|
if !isXL2V1Format(buf) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var xl xlMetaV2
|
|
|
|
err = xl.LoadOrConvert(buf)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
foundDirs := make(map[string]struct{}, len(xl.versions))
|
|
|
|
err = readDirFn(baseDir, func(name string, typ os.FileMode) error {
|
|
|
|
if !typ.IsDir() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// See if directory has a UUID name.
|
|
|
|
base := filepath.Base(name)
|
|
|
|
_, err := uuid.Parse(base)
|
|
|
|
if err == nil {
|
|
|
|
foundDirs[base] = struct{}{}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
wantDirs, err := xl.getDataDirs()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete all directories we expect to be there.
|
|
|
|
for _, dir := range wantDirs {
|
|
|
|
delete(foundDirs, dir)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete excessive directories.
|
|
|
|
// Do not abort on context errors.
|
|
|
|
for dir := range foundDirs {
|
|
|
|
toRemove := pathJoin(volumeDir, path, dir+SlashSeparator)
|
2024-04-23 13:15:52 -04:00
|
|
|
err = s.deleteFile(volumeDir, toRemove, true, true)
|
2022-11-28 13:20:55 -05:00
|
|
|
diskHealthCheckOK(ctx, err)
|
2024-04-23 13:15:52 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-11-28 13:20:55 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Do the same for inline data
|
|
|
|
dirs, err := xl.data.list()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2024-04-23 13:15:52 -04:00
|
|
|
|
2022-11-28 13:20:55 -05:00
|
|
|
// Clear and repopulate
|
|
|
|
for k := range foundDirs {
|
|
|
|
delete(foundDirs, k)
|
|
|
|
}
|
2024-04-23 13:15:52 -04:00
|
|
|
|
2022-11-28 13:20:55 -05:00
|
|
|
// Populate into map
|
|
|
|
for _, k := range dirs {
|
|
|
|
foundDirs[k] = struct{}{}
|
|
|
|
}
|
2024-04-23 13:15:52 -04:00
|
|
|
|
2022-11-28 13:20:55 -05:00
|
|
|
// Delete all directories we expect to be there.
|
|
|
|
for _, dir := range wantDirs {
|
|
|
|
delete(foundDirs, dir)
|
|
|
|
}
|
|
|
|
|
2024-04-23 13:15:52 -04:00
|
|
|
// Nothing to delete
|
|
|
|
if len(foundDirs) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-11-28 13:20:55 -05:00
|
|
|
// Delete excessive inline entries.
|
2024-04-23 13:15:52 -04:00
|
|
|
// Convert to slice.
|
|
|
|
dirs = dirs[:0]
|
|
|
|
for dir := range foundDirs {
|
|
|
|
dirs = append(dirs, dir)
|
|
|
|
}
|
|
|
|
if xl.data.remove(dirs...) {
|
|
|
|
newBuf, err := xl.AppendTo(metaDataPoolGet())
|
|
|
|
if err == nil {
|
|
|
|
defer metaDataPoolPut(newBuf)
|
|
|
|
return s.WriteAll(ctx, volume, pathJoin(path, xlStorageFormatFile), buf)
|
2022-11-28 13:20:55 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2023-03-06 11:56:10 -05:00
|
|
|
|
|
|
|
func convertAccessError(err, permErr error) error {
|
|
|
|
switch {
|
|
|
|
case osIsNotExist(err):
|
|
|
|
return errVolumeNotFound
|
|
|
|
case isSysErrIO(err):
|
|
|
|
return errFaultyDisk
|
|
|
|
case osIsPermission(err):
|
|
|
|
return permErr
|
|
|
|
default:
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|