re-implement pickValidInfo dataDir, move to quorum calculation (#13681)

dataDir loosely based on maxima is incorrect and does not
work in all situations such as disks in the following order

- xl.json migration to xl.meta there may be partial xl.json's
  leftover if some disks are not yet connected when the disk
  is yet to come up, since xl.json mtime and xl.meta is
  same the dataDir maxima doesn't work properly leading to
  quorum issues.

- its also possible that XLV1 might be true among the disks
  available, make sure to keep FileInfo based on common quorum
  and skip unexpected disks with the older data format.

Also, this PR tests upgrade from older to a newer release if the 
data is readable and matches the checksum.

NOTE: this is just initial work we can build on top of this to do further tests.
This commit is contained in:
Harshavardhana 2021-11-21 10:41:30 -08:00 committed by GitHub
parent 36b5426f6e
commit c791de0e1e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 378 additions and 155 deletions

View File

@ -17,7 +17,7 @@ jobs:
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
go-version: [1.16.x, 1.17.x] go-version: [1.17.x]
os: [ubuntu-latest] os: [ubuntu-latest]
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2

View File

@ -17,7 +17,7 @@ jobs:
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
go-version: [1.16.x, 1.17.x] go-version: [1.17.x]
os: [ubuntu-latest, windows-latest] os: [ubuntu-latest, windows-latest]
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2

31
.github/workflows/upgrade-ci-cd.yaml vendored Normal file
View File

@ -0,0 +1,31 @@
name: Upgrade old version tests
on:
pull_request:
branches:
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
jobs:
build:
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.17.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v1
- uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Start upgrade tests
run: |
make test-upgrade

View File

@ -42,6 +42,10 @@ test: verifiers build ## builds minio, runs linters, tests
@echo "Running unit tests" @echo "Running unit tests"
@GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null @GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
test-upgrade: build
@echo "Running minio upgrade tests"
@(env bash $(PWD)/buildscripts/minio-upgrade.sh)
test-race: verifiers build ## builds minio, runs linters, tests (race) test-race: verifiers build ## builds minio, runs linters, tests (race)
@echo "Running unit tests under -race" @echo "Running unit tests under -race"
@(env bash $(PWD)/buildscripts/race.sh) @(env bash $(PWD)/buildscripts/race.sh)

View File

@ -0,0 +1,77 @@
#!/bin/bash
trap 'cleanup $LINENO' ERR
# shellcheck disable=SC2120
cleanup() {
MINIO_VERSION=dev docker-compose \
-f "buildscripts/upgrade-tests/compose.yml" \
rm -s -f
docker volume prune -f
}
__init__() {
sudo apt install curl -y
export GOPATH=/tmp/gopath
export PATH=${PATH}:${GOPATH}/bin
go install github.com/minio/mc@latest
TAG=minio/minio:dev make docker
MINIO_VERSION=RELEASE.2019-12-19T22-52-26Z docker-compose \
-f "buildscripts/upgrade-tests/compose.yml" \
up -d --build
until (mc alias set minio http://127.0.0.1:9000 minioadmin minioadmin); do
echo "...waiting..." && sleep 5;
done
mc mb minio/minio-test/
mc cp ./minio minio/minio-test/to-read/
mc cp /etc/hosts minio/minio-test/to-read/hosts
mc policy set download minio/minio-test
mc cat minio/minio-test/to-read/minio | sha256sum
mc cat ./minio | sha256sum
curl -s http://127.0.0.1:9000/minio-test/to-read/hosts | sha256sum
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" stop
}
verify_checksum_after_heal() {
sum1=$(curl -s "$2" | sha256sum);
mc admin heal --json -r "$1" >/dev/null; # test after healing
sum1_heal=$(curl -s "$2" | sha256sum);
if [ "${sum1_heal}" != "${sum1}" ]; then
echo "mismatch expected ${sum1_heal}, got ${sum1}"
exit 1;
fi
}
verify_checksum_mc() {
expected=$(mc cat "$1" | sha256sum)
got=$(mc cat "$2" | sha256sum)
if [ "${expected}" != "${got}" ]; then
echo "mismatch expected ${expected}, got ${got}"
exit 1;
fi
}
main() {
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" up -d --build
until (mc alias set minio http://127.0.0.1:9000 minioadmin minioadmin); do
echo "...waiting..." && sleep 5
done
verify_checksum_after_heal minio/minio-test http://127.0.0.1:9000/minio-test/to-read/hosts
verify_checksum_mc ./minio minio/minio-test/to-read/minio
verify_checksum_mc /etc/hosts minio/minio-test/to-read/hosts
cleanup
}
( __init__ "$@" && main "$@" )

View File

@ -0,0 +1,81 @@
version: '3.7'
# Settings and configurations that are common for all containers
x-minio-common: &minio-common
image: minio/minio:${MINIO_VERSION}
command: server http://minio{1...4}/data{1...3}
env_file:
- ./minio.env
expose:
- "9000"
- "9001"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
# starts 4 docker containers running minio server instances.
# using nginx reverse proxy, load balancing, you can access
# it through port 9000.
services:
minio1:
<<: *minio-common
hostname: minio1
volumes:
- data1-1:/data1
- data1-2:/data2
- data1-3:/data3
minio2:
<<: *minio-common
hostname: minio2
volumes:
- data2-1:/data1
- data2-2:/data2
- data2-3:/data3
minio3:
<<: *minio-common
hostname: minio3
volumes:
- data3-1:/data1
- data3-2:/data2
- data3-3:/data3
minio4:
<<: *minio-common
hostname: minio4
volumes:
- data4-1:/data1
- data4-2:/data2
- data4-3:/data3
nginx:
image: nginx:1.19.2-alpine
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf:ro
ports:
- "9000:9000"
- "9001:9001"
depends_on:
- minio1
- minio2
- minio3
- minio4
## By default this config uses default local driver,
## For custom volumes replace with volume driver configuration.
volumes:
data1-1:
data1-2:
data1-3:
data2-1:
data2-2:
data2-3:
data3-1:
data3-2:
data3-3:
data4-1:
data4-2:
data4-3:

View File

@ -0,0 +1,3 @@
MINIO_ACCESS_KEY=minioadmin
MINIO_SECRET_KEY=minioadmin
MINIO_BROWSER=off

View File

@ -0,0 +1,68 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
# include /etc/nginx/conf.d/*.conf;
upstream minio {
server minio1:9000;
server minio2:9000;
server minio3:9000;
server minio4:9000;
}
# main minio
server {
listen 9000;
listen [::]:9000;
server_name localhost;
# To allow special characters in headers
ignore_invalid_headers off;
# Allow any size file to be uploaded.
# Set to a value such as 1000m; to restrict file size to a specific value
client_max_body_size 0;
# To disable buffering
proxy_buffering off;
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_connect_timeout 300;
# Default is HTTP/1, keepalive is only enabled in HTTP/1.1
proxy_http_version 1.1;
proxy_set_header Connection "";
chunked_transfer_encoding off;
proxy_pass http://minio;
}
}
}

View File

@ -26,33 +26,30 @@ import (
) )
// commonTime returns a maximally occurring time from a list of time. // commonTime returns a maximally occurring time from a list of time.
func commonTime(modTimes []time.Time, dataDirs []string) (modTime time.Time, dataDir string) { func commonTime(modTimes []time.Time) (modTime time.Time) {
var maxima int // Counter for remembering max occurrence of elements.
timeOccurenceMap := make(map[int64]int, len(modTimes)) timeOccurenceMap := make(map[int64]int, len(modTimes))
dataDirMap := make(map[int64]string, len(modTimes))
// Ignore the uuid sentinel and count the rest. // Ignore the uuid sentinel and count the rest.
for i, t := range modTimes { for _, t := range modTimes {
if t.Equal(timeSentinel) { if t.Equal(timeSentinel) {
continue continue
} }
dataDirMap[t.UnixNano()] = dataDirs[i]
timeOccurenceMap[t.UnixNano()]++ timeOccurenceMap[t.UnixNano()]++
} }
var maxima int // Counter for remembering max occurrence of elements.
// Find the common cardinality from previously collected // Find the common cardinality from previously collected
// occurrences of elements. // occurrences of elements.
for nano, count := range timeOccurenceMap { for nano, count := range timeOccurenceMap {
t := time.Unix(0, nano).UTC() t := time.Unix(0, nano).UTC()
if count > maxima || (count == maxima && t.After(modTime)) { if count > maxima || (count == maxima && t.After(modTime)) {
maxima = count maxima = count
dataDir = dataDirMap[nano]
modTime = t modTime = t
} }
} }
// Return the collected common uuid. // Return the collected common modTime.
return modTime, dataDir return modTime
} }
// Beginning of unix time is treated as sentinel value here. // Beginning of unix time is treated as sentinel value here.
@ -82,10 +79,14 @@ func listObjectModtimes(partsMetadata []FileInfo, errs []error) (modTimes []time
return modTimes return modTimes
} }
const ( func filterOnlineDisksInplace(fi FileInfo, partsMetadata []FileInfo, onlineDisks []StorageAPI) {
errorDir = "error-dir" for i, meta := range partsMetadata {
delMarkerDir = "" if fi.XLV1 == meta.XLV1 {
) continue
}
onlineDisks[i] = nil
}
}
// Notes: // Notes:
// There are 5 possible states a disk could be in, // There are 5 possible states a disk could be in,
@ -111,43 +112,25 @@ const (
// - a slice of disks where disk having 'older' xl.meta (or nothing) // - a slice of disks where disk having 'older' xl.meta (or nothing)
// are set to nil. // are set to nil.
// - latest (in time) of the maximally occurring modTime(s). // - latest (in time) of the maximally occurring modTime(s).
func listOnlineDisks(disks []StorageAPI, partsMetadata []FileInfo, errs []error) (onlineDisks []StorageAPI, modTime time.Time, dataDir string) { func listOnlineDisks(disks []StorageAPI, partsMetadata []FileInfo, errs []error) (onlineDisks []StorageAPI, modTime time.Time) {
onlineDisks = make([]StorageAPI, len(disks)) onlineDisks = make([]StorageAPI, len(disks))
// List all the file commit ids from parts metadata. // List all the file commit ids from parts metadata.
modTimes := listObjectModtimes(partsMetadata, errs) modTimes := listObjectModtimes(partsMetadata, errs)
dataDirs := getDataDirs(partsMetadata, errs)
// Reduce list of UUIDs to a single common value. // Reduce list of UUIDs to a single common value.
modTime, dataDir = commonTime(modTimes, dataDirs) modTime = commonTime(modTimes)
// Create a new online disks slice, which have common uuid. // Create a new online disks slice, which have common uuid.
for index, t := range modTimes { for index, t := range modTimes {
if partsMetadata[index].IsValid() && t.Equal(modTime) && partsMetadata[index].DataDir == dataDir { if partsMetadata[index].IsValid() && t.Equal(modTime) {
onlineDisks[index] = disks[index] onlineDisks[index] = disks[index]
} else { } else {
onlineDisks[index] = nil onlineDisks[index] = nil
} }
} }
return onlineDisks, modTime, dataDir return onlineDisks, modTime
}
func getDataDirs(partsMetadata []FileInfo, errs []error) []string {
dataDirs := make([]string, len(partsMetadata))
for idx, fi := range partsMetadata {
if errs[idx] != nil {
dataDirs[idx] = errorDir
continue
}
if fi.Deleted {
dataDirs[idx] = delMarkerDir
} else {
dataDirs[idx] = fi.DataDir
}
}
return dataDirs
} }
// Returns the latest updated FileInfo files and error in case of failure. // Returns the latest updated FileInfo files and error in case of failure.
@ -161,18 +144,16 @@ func getLatestFileInfo(ctx context.Context, partsMetadata []FileInfo, errs []err
// List all the file commit ids from parts metadata. // List all the file commit ids from parts metadata.
modTimes := listObjectModtimes(partsMetadata, errs) modTimes := listObjectModtimes(partsMetadata, errs)
dataDirs := getDataDirs(partsMetadata, errs)
// Count all latest updated FileInfo values // Count all latest updated FileInfo values
var count int var count int
var latestFileInfo FileInfo var latestFileInfo FileInfo
// Reduce list of UUIDs to a single common value - i.e. the last updated Time // Reduce list of UUIDs to a single common value - i.e. the last updated Time
modTime, dataDir := commonTime(modTimes, dataDirs) modTime := commonTime(modTimes)
// Interate through all the modTimes and count the FileInfo(s) with latest time. // Interate through all the modTimes and count the FileInfo(s) with latest time.
for index, t := range modTimes { for index, t := range modTimes {
if partsMetadata[index].IsValid() && t.Equal(modTime) && dataDir == partsMetadata[index].DataDir { if partsMetadata[index].IsValid() && t.Equal(modTime) {
latestFileInfo = partsMetadata[index] latestFileInfo = partsMetadata[index]
count++ count++
} }
@ -195,8 +176,8 @@ func getLatestFileInfo(ctx context.Context, partsMetadata []FileInfo, errs []err
func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetadata []FileInfo, func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetadata []FileInfo,
errs []error, bucket, object string, scanMode madmin.HealScanMode) ([]StorageAPI, []error) { errs []error, bucket, object string, scanMode madmin.HealScanMode) ([]StorageAPI, []error) {
// List of disks having latest version of the object er.meta (by modtime) // List of disks having latest version of the object xl.meta (by modtime)
_, modTime, dataDir := listOnlineDisks(onlineDisks, partsMetadata, errs) _, modTime := listOnlineDisks(onlineDisks, partsMetadata, errs)
availableDisks := make([]StorageAPI, len(onlineDisks)) availableDisks := make([]StorageAPI, len(onlineDisks))
dataErrs := make([]error, len(onlineDisks)) dataErrs := make([]error, len(onlineDisks))
@ -239,7 +220,7 @@ func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetad
} }
meta := partsMetadata[i] meta := partsMetadata[i]
if !meta.ModTime.Equal(modTime) || meta.DataDir != dataDir { if !meta.ModTime.Equal(modTime) {
dataErrs[i] = errFileCorrupt dataErrs[i] = errFileCorrupt
partsMetadata[i] = FileInfo{} partsMetadata[i] = FileInfo{}
continue continue

View File

@ -34,10 +34,8 @@ import (
func TestCommonTime(t *testing.T) { func TestCommonTime(t *testing.T) {
// List of test cases for common modTime. // List of test cases for common modTime.
testCases := []struct { testCases := []struct {
times []time.Time times []time.Time
dataDirs []string time time.Time
time time.Time
dataDir string
}{ }{
{ {
// 1. Tests common times when slice has varying time elements. // 1. Tests common times when slice has varying time elements.
@ -49,17 +47,8 @@ func TestCommonTime(t *testing.T) {
time.Unix(0, 2).UTC(), time.Unix(0, 2).UTC(),
time.Unix(0, 3).UTC(), time.Unix(0, 3).UTC(),
time.Unix(0, 1).UTC(), time.Unix(0, 1).UTC(),
}, []string{
errorDir,
delMarkerDir,
"cd3b36c0-49e6-11ec-8087-73a2b2fd4016",
"cd3b36c0-49e6-11ec-8087-73a2b2fd4016",
"cd3b36c0-49e6-11ec-8087-73a2b2fd4016",
"cd3b36c0-49e6-11ec-8087-73a2b2fd4016",
"cd3b36c0-49e6-11ec-8087-73a2b2fd4016",
}, },
time.Unix(0, 3).UTC(), time.Unix(0, 3).UTC(),
"cd3b36c0-49e6-11ec-8087-73a2b2fd4016",
}, },
{ {
// 2. Tests common time obtained when all elements are equal. // 2. Tests common time obtained when all elements are equal.
@ -71,17 +60,8 @@ func TestCommonTime(t *testing.T) {
time.Unix(0, 3).UTC(), time.Unix(0, 3).UTC(),
time.Unix(0, 3).UTC(), time.Unix(0, 3).UTC(),
time.Unix(0, 3).UTC(), time.Unix(0, 3).UTC(),
}, []string{
errorDir,
delMarkerDir,
delMarkerDir,
delMarkerDir,
delMarkerDir,
delMarkerDir,
delMarkerDir,
}, },
time.Unix(0, 3).UTC(), time.Unix(0, 3).UTC(),
delMarkerDir,
}, },
{ {
// 3. Tests common time obtained when elements have a mixture // 3. Tests common time obtained when elements have a mixture
@ -97,17 +77,8 @@ func TestCommonTime(t *testing.T) {
timeSentinel, timeSentinel,
timeSentinel, timeSentinel,
timeSentinel, timeSentinel,
}, []string{
errorDir,
delMarkerDir,
delMarkerDir,
delMarkerDir,
delMarkerDir,
delMarkerDir,
delMarkerDir,
}, },
time.Unix(0, 3).UTC(), time.Unix(0, 3).UTC(),
delMarkerDir,
}, },
} }
@ -115,13 +86,9 @@ func TestCommonTime(t *testing.T) {
// common modtime. Tests fail if modtime does not match. // common modtime. Tests fail if modtime does not match.
for i, testCase := range testCases { for i, testCase := range testCases {
// Obtain a common mod time from modTimes slice. // Obtain a common mod time from modTimes slice.
ctime, dataDir := commonTime(testCase.times, testCase.dataDirs) ctime := commonTime(testCase.times)
if !testCase.time.Equal(ctime) { if !testCase.time.Equal(ctime) {
t.Errorf("Test case %d, expect to pass but failed. Wanted modTime: %s, got modTime: %s\n", i+1, testCase.time, ctime) t.Errorf("Test case %d, expect to pass but failed. Wanted modTime: %s, got modTime: %s\n", i+1, testCase.time, ctime)
continue
}
if dataDir != testCase.dataDir {
t.Errorf("Test case %d, expect to pass but failed. Wanted dataDir: %s, got dataDir: %s\n", i+1, testCase.dataDir, dataDir)
} }
} }
} }
@ -278,15 +245,11 @@ func TestListOnlineDisks(t *testing.T) {
} }
onlineDisks, modTime, dataDir := listOnlineDisks(erasureDisks, partsMetadata, test.errs) onlineDisks, modTime := listOnlineDisks(erasureDisks, partsMetadata, test.errs)
if !modTime.Equal(test.expectedTime) { if !modTime.Equal(test.expectedTime) {
t.Fatalf("Expected modTime to be equal to %v but was found to be %v", t.Fatalf("Expected modTime to be equal to %v but was found to be %v",
test.expectedTime, modTime) test.expectedTime, modTime)
} }
if fi.DataDir != dataDir {
t.Fatalf("Expected dataDir to be equal to %v but was found to be %v",
fi.DataDir, dataDir)
}
availableDisks, newErrs := disksWithAllParts(ctx, onlineDisks, partsMetadata, test.errs, bucket, object, madmin.HealDeepScan) availableDisks, newErrs := disksWithAllParts(ctx, onlineDisks, partsMetadata, test.errs, bucket, object, madmin.HealDeepScan)
test.errs = newErrs test.errs = newErrs
@ -395,13 +358,14 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
for i, test := range testCases { for i, test := range testCases {
test := test test := test
t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) { t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) {
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{}) _, err := obj.PutObject(ctx, bucket, object,
mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", true) partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", true)
fi, err := getLatestFileInfo(ctx, partsMetadata, errs, getReadQuorum(len(disks))) _, err = getLatestFileInfo(ctx, partsMetadata, errs, getReadQuorum(len(disks)))
if err != nil { if err != nil {
t.Fatalf("Failed to getLatestFileInfo %v", err) t.Fatalf("Failed to getLatestFileInfo %v", err)
} }
@ -462,17 +426,12 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
t.Fatalf("Failed to getLatestFileInfo %v", err) t.Fatalf("Failed to getLatestFileInfo %v", err)
} }
onlineDisks, modTime, dataDir := listOnlineDisks(erasureDisks, partsMetadata, test.errs) onlineDisks, modTime := listOnlineDisks(erasureDisks, partsMetadata, test.errs)
if !modTime.Equal(test.expectedTime) { if !modTime.Equal(test.expectedTime) {
t.Fatalf("Expected modTime to be equal to %v but was found to be %v", t.Fatalf("Expected modTime to be equal to %v but was found to be %v",
test.expectedTime, modTime) test.expectedTime, modTime)
} }
if fi.DataDir != dataDir {
t.Fatalf("Expected dataDir to be equal to %v but was found to be %v",
fi.DataDir, dataDir)
}
availableDisks, newErrs := disksWithAllParts(ctx, onlineDisks, partsMetadata, test.errs, bucket, object, madmin.HealDeepScan) availableDisks, newErrs := disksWithAllParts(ctx, onlineDisks, partsMetadata, test.errs, bucket, object, madmin.HealDeepScan)
test.errs = newErrs test.errs = newErrs
@ -527,7 +486,7 @@ func TestDisksWithAllParts(t *testing.T) {
t.Fatalf("Failed to read xl meta data %v", err) t.Fatalf("Failed to read xl meta data %v", err)
} }
erasureDisks, _, _ = listOnlineDisks(erasureDisks, partsMetadata, errs) erasureDisks, _ = listOnlineDisks(erasureDisks, partsMetadata, errs)
filteredDisks, errs := disksWithAllParts(ctx, erasureDisks, partsMetadata, errs, bucket, object, madmin.HealDeepScan) filteredDisks, errs := disksWithAllParts(ctx, erasureDisks, partsMetadata, errs, bucket, object, madmin.HealDeepScan)

View File

@ -294,15 +294,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
// List of disks having latest version of the object er.meta // List of disks having latest version of the object er.meta
// (by modtime). // (by modtime).
_, modTime, dataDir := listOnlineDisks(storageDisks, partsMetadata, errs) _, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// make sure all parts metadata dataDir is same as returned by listOnlineDisks()
// the reason is its possible that some of the disks might have stale data, for those
// we simply override them with maximally occurring 'dataDir' - this ensures that
// disksWithAllParts() verifies same dataDir across all drives.
for i := range partsMetadata {
partsMetadata[i].DataDir = dataDir
}
// List of disks having all parts as per latest metadata. // List of disks having all parts as per latest metadata.
// NOTE: do not pass in latestDisks to diskWithAllParts since // NOTE: do not pass in latestDisks to diskWithAllParts since
@ -318,7 +310,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
// Latest FileInfo for reference. If a valid metadata is not // Latest FileInfo for reference. If a valid metadata is not
// present, it is as good as object not found. // present, it is as good as object not found.
latestMeta, err := pickValidFileInfo(ctx, partsMetadata, modTime, dataDir, result.DataBlocks) latestMeta, err := pickValidFileInfo(ctx, partsMetadata, modTime, result.DataBlocks)
if err != nil { if err != nil {
return result, toObjectErr(err, bucket, object, versionID) return result, toObjectErr(err, bucket, object, versionID)
} }
@ -551,6 +543,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
if disk == OfflineDisk { if disk == OfflineDisk {
continue continue
} }
// record the index of the updated disks // record the index of the updated disks
partsMetadata[i].Erasure.Index = i + 1 partsMetadata[i].Erasure.Index = i + 1

View File

@ -179,6 +179,10 @@ func shuffleDisksAndPartsMetadataByIndex(disks []StorageAPI, metaArr []FileInfo,
inconsistent++ inconsistent++
continue continue
} }
if meta.XLV1 != fi.XLV1 {
inconsistent++
continue
}
// check if erasure distribution order matches the index // check if erasure distribution order matches the index
// position if this is not correct we discard the disk // position if this is not correct we discard the disk
// and move to collect others // and move to collect others
@ -228,6 +232,9 @@ func shuffleDisksAndPartsMetadata(disks []StorageAPI, partsMetadata []FileInfo,
// if object was ever written previously. // if object was ever written previously.
continue continue
} }
if !init && fi.XLV1 != partsMetadata[index].XLV1 {
continue
}
blockIndex := distribution[index] blockIndex := distribution[index]
shuffledPartsMetadata[blockIndex-1] = partsMetadata[index] shuffledPartsMetadata[blockIndex-1] = partsMetadata[index]
shuffledDisks[blockIndex-1] = disks[index] shuffledDisks[blockIndex-1] = disks[index]

View File

@ -282,7 +282,7 @@ func (fi FileInfo) ObjectToPartOffset(ctx context.Context, offset int64) (partIn
return 0, 0, InvalidRange{} return 0, 0, InvalidRange{}
} }
func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (FileInfo, error) { func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, quorum int) (FileInfo, error) {
// with less quorum return error. // with less quorum return error.
if quorum < 2 { if quorum < 2 {
return FileInfo{}, errErasureReadQuorum return FileInfo{}, errErasureReadQuorum
@ -290,7 +290,9 @@ func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.
metaHashes := make([]string, len(metaArr)) metaHashes := make([]string, len(metaArr))
h := sha256.New() h := sha256.New()
for i, meta := range metaArr { for i, meta := range metaArr {
if meta.IsValid() && meta.ModTime.Equal(modTime) && meta.DataDir == dataDir { if meta.IsValid() && meta.ModTime.Equal(modTime) {
fmt.Fprintf(h, "%v", meta.XLV1)
fmt.Fprintf(h, "%v", meta.GetDataDir())
for _, part := range meta.Parts { for _, part := range meta.Parts {
fmt.Fprintf(h, "part.%d", part.Number) fmt.Fprintf(h, "part.%d", part.Number)
} }
@ -351,8 +353,8 @@ func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.
// pickValidFileInfo - picks one valid FileInfo content and returns from a // pickValidFileInfo - picks one valid FileInfo content and returns from a
// slice of FileInfo. // slice of FileInfo.
func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (FileInfo, error) { func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Time, quorum int) (FileInfo, error) {
return findFileInfoInQuorum(ctx, metaArr, modTime, dataDir, quorum) return findFileInfoInQuorum(ctx, metaArr, modTime, quorum)
} }
// writeUniqueFileInfo - writes unique `xl.meta` content for each disk concurrently. // writeUniqueFileInfo - writes unique `xl.meta` content for each disk concurrently.

View File

@ -178,28 +178,24 @@ func TestFindFileInfoInQuorum(t *testing.T) {
tests := []struct { tests := []struct {
fis []FileInfo fis []FileInfo
modTime time.Time modTime time.Time
dataDir string
expectedErr error expectedErr error
expectedQuorum int expectedQuorum int
}{ }{
{ {
fis: getNFInfo(16, 16, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21"), fis: getNFInfo(16, 16, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21"),
modTime: time.Unix(1603863445, 0), modTime: time.Unix(1603863445, 0),
dataDir: "36a21454-a2ca-11eb-bbaa-93a81c686f21",
expectedErr: nil, expectedErr: nil,
expectedQuorum: 8, expectedQuorum: 8,
}, },
{ {
fis: getNFInfo(16, 7, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21"), fis: getNFInfo(16, 7, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21"),
modTime: time.Unix(1603863445, 0), modTime: time.Unix(1603863445, 0),
dataDir: "36a21454-a2ca-11eb-bbaa-93a81c686f21",
expectedErr: errErasureReadQuorum, expectedErr: errErasureReadQuorum,
expectedQuorum: 8, expectedQuorum: 8,
}, },
{ {
fis: getNFInfo(16, 16, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21"), fis: getNFInfo(16, 16, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21"),
modTime: time.Unix(1603863445, 0), modTime: time.Unix(1603863445, 0),
dataDir: "36a21454-a2ca-11eb-bbaa-93a81c686f21",
expectedErr: errErasureReadQuorum, expectedErr: errErasureReadQuorum,
expectedQuorum: 0, expectedQuorum: 0,
}, },
@ -208,7 +204,7 @@ func TestFindFileInfoInQuorum(t *testing.T) {
for _, test := range tests { for _, test := range tests {
test := test test := test
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
_, err := findFileInfoInQuorum(context.Background(), test.fis, test.modTime, test.dataDir, test.expectedQuorum) _, err := findFileInfoInQuorum(context.Background(), test.fis, test.modTime, test.expectedQuorum)
if err != test.expectedErr { if err != test.expectedErr {
t.Errorf("Expected %s, got %s", test.expectedErr, err) t.Errorf("Expected %s, got %s", test.expectedErr, err)
} }

View File

@ -67,10 +67,10 @@ func (er erasureObjects) checkUploadIDExists(ctx context.Context, bucket, object
} }
// List all online disks. // List all online disks.
_, modTime, dataDir := listOnlineDisks(disks, metaArr, errs) _, modTime := listOnlineDisks(disks, metaArr, errs)
// Pick latest valid metadata. // Pick latest valid metadata.
_, err = pickValidFileInfo(ctx, metaArr, modTime, dataDir, readQuorum) _, err = pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
return err return err
} }
@ -460,10 +460,10 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
} }
// List all online disks. // List all online disks.
onlineDisks, modTime, dataDir := listOnlineDisks(storageDisks, partsMetadata, errs) onlineDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// Pick one from the first valid metadata. // Pick one from the first valid metadata.
fi, err := pickValidFileInfo(pctx, partsMetadata, modTime, dataDir, writeQuorum) fi, err := pickValidFileInfo(pctx, partsMetadata, modTime, writeQuorum)
if err != nil { if err != nil {
return pi, err return pi, err
} }
@ -568,10 +568,10 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
} }
// Get current highest version based on re-read partsMetadata. // Get current highest version based on re-read partsMetadata.
onlineDisks, modTime, dataDir = listOnlineDisks(onlineDisks, partsMetadata, errs) onlineDisks, modTime = listOnlineDisks(onlineDisks, partsMetadata, errs)
// Pick one from the first valid metadata. // Pick one from the first valid metadata.
fi, err = pickValidFileInfo(wctx, partsMetadata, modTime, dataDir, writeQuorum) fi, err = pickValidFileInfo(wctx, partsMetadata, modTime, writeQuorum)
if err != nil { if err != nil {
return pi, err return pi, err
} }
@ -656,10 +656,10 @@ func (er erasureObjects) GetMultipartInfo(ctx context.Context, bucket, object, u
return result, toObjectErr(reducedErr, minioMetaMultipartBucket, uploadIDPath) return result, toObjectErr(reducedErr, minioMetaMultipartBucket, uploadIDPath)
} }
_, modTime, dataDir := listOnlineDisks(storageDisks, partsMetadata, errs) _, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// Pick one from the first valid metadata. // Pick one from the first valid metadata.
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, dataDir, readQuorum) fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, readQuorum)
if err != nil { if err != nil {
return result, err return result, err
} }
@ -706,10 +706,10 @@ func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, up
return result, toObjectErr(reducedErr, minioMetaMultipartBucket, uploadIDPath) return result, toObjectErr(reducedErr, minioMetaMultipartBucket, uploadIDPath)
} }
_, modTime, dataDir := listOnlineDisks(storageDisks, partsMetadata, errs) _, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// Pick one from the first valid metadata. // Pick one from the first valid metadata.
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, dataDir, writeQuorum) fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum)
if err != nil { if err != nil {
return result, err return result, err
} }
@ -801,10 +801,10 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
return oi, toObjectErr(reducedErr, bucket, object) return oi, toObjectErr(reducedErr, bucket, object)
} }
onlineDisks, modTime, dataDir := listOnlineDisks(storageDisks, partsMetadata, errs) onlineDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// Pick one from the first valid metadata. // Pick one from the first valid metadata.
fi, err := pickValidFileInfo(rctx, partsMetadata, modTime, dataDir, writeQuorum) fi, err := pickValidFileInfo(rctx, partsMetadata, modTime, writeQuorum)
if err != nil { if err != nil {
return oi, err return oi, err
} }

View File

@ -88,10 +88,10 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
} }
// List all online disks. // List all online disks.
onlineDisks, modTime, dataDir := listOnlineDisks(storageDisks, metaArr, errs) onlineDisks, modTime := listOnlineDisks(storageDisks, metaArr, errs)
// Pick latest valid metadata. // Pick latest valid metadata.
fi, err := pickValidFileInfo(ctx, metaArr, modTime, dataDir, readQuorum) fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
if err != nil { if err != nil {
return oi, toObjectErr(err, srcBucket, srcObject) return oi, toObjectErr(err, srcBucket, srcObject)
} }
@ -102,6 +102,8 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
return fi.ToObjectInfo(srcBucket, srcObject), toObjectErr(errMethodNotAllowed, srcBucket, srcObject) return fi.ToObjectInfo(srcBucket, srcObject), toObjectErr(errMethodNotAllowed, srcBucket, srcObject)
} }
filterOnlineDisksInplace(fi, metaArr, onlineDisks)
versionID := srcInfo.VersionID versionID := srcInfo.VersionID
if srcInfo.versionOnly { if srcInfo.versionOnly {
versionID = dstOpts.VersionID versionID = dstOpts.VersionID
@ -404,14 +406,16 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
} }
// List all online disks. // List all online disks.
onlineDisks, modTime, dataDir := listOnlineDisks(disks, metaArr, errs) onlineDisks, modTime := listOnlineDisks(disks, metaArr, errs)
// Pick latest valid metadata. // Pick latest valid metadata.
fi, err = pickValidFileInfo(ctx, metaArr, modTime, dataDir, readQuorum) fi, err = pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
if err != nil { if err != nil {
return fi, nil, nil, err return fi, nil, nil, err
} }
filterOnlineDisksInplace(fi, metaArr, onlineDisks)
// if one of the disk is offline, return right here no need // if one of the disk is offline, return right here no need
// to attempt a heal on the object. // to attempt a heal on the object.
if countErrs(errs, errDiskNotFound) > 0 { if countErrs(errs, errDiskNotFound) > 0 {
@ -424,7 +428,7 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
missingBlocks++ missingBlocks++
continue continue
} }
if metaArr[i].IsValid() && metaArr[i].ModTime.Equal(fi.ModTime) && metaArr[i].DataDir == fi.DataDir { if metaArr[i].IsValid() && metaArr[i].ModTime.Equal(fi.ModTime) {
continue continue
} }
missingBlocks++ missingBlocks++
@ -1455,17 +1459,20 @@ func (er erasureObjects) PutObjectMetadata(ctx context.Context, bucket, object s
} }
// List all online disks. // List all online disks.
_, modTime, dataDir := listOnlineDisks(disks, metaArr, errs) onlineDisks, modTime := listOnlineDisks(disks, metaArr, errs)
// Pick latest valid metadata. // Pick latest valid metadata.
fi, err := pickValidFileInfo(ctx, metaArr, modTime, dataDir, readQuorum) fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
if err != nil { if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
if fi.Deleted { if fi.Deleted {
return ObjectInfo{}, toObjectErr(errMethodNotAllowed, bucket, object) return ObjectInfo{}, toObjectErr(errMethodNotAllowed, bucket, object)
} }
filterOnlineDisksInplace(fi, metaArr, onlineDisks)
// if version-id is not specified retention is supposed to be set on the latest object. // if version-id is not specified retention is supposed to be set on the latest object.
if opts.VersionID == "" { if opts.VersionID == "" {
opts.VersionID = fi.VersionID opts.VersionID = fi.VersionID
@ -1483,7 +1490,7 @@ func (er erasureObjects) PutObjectMetadata(ctx context.Context, bucket, object s
fi.ModTime = opts.MTime fi.ModTime = opts.MTime
fi.VersionID = opts.VersionID fi.VersionID = opts.VersionID
if err = er.updateObjectMeta(ctx, bucket, object, fi); err != nil { if err = er.updateObjectMeta(ctx, bucket, object, fi, onlineDisks); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
@ -1512,10 +1519,10 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin
} }
// List all online disks. // List all online disks.
_, modTime, dataDir := listOnlineDisks(disks, metaArr, errs) onlineDisks, modTime := listOnlineDisks(disks, metaArr, errs)
// Pick latest valid metadata. // Pick latest valid metadata.
fi, err := pickValidFileInfo(ctx, metaArr, modTime, dataDir, readQuorum) fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum)
if err != nil { if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
@ -1526,12 +1533,14 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin
return ObjectInfo{}, toObjectErr(errMethodNotAllowed, bucket, object) return ObjectInfo{}, toObjectErr(errMethodNotAllowed, bucket, object)
} }
filterOnlineDisksInplace(fi, metaArr, onlineDisks)
fi.Metadata[xhttp.AmzObjectTagging] = tags fi.Metadata[xhttp.AmzObjectTagging] = tags
for k, v := range opts.UserDefined { for k, v := range opts.UserDefined {
fi.Metadata[k] = v fi.Metadata[k] = v
} }
if err = er.updateObjectMeta(ctx, bucket, object, fi); err != nil { if err = er.updateObjectMeta(ctx, bucket, object, fi, onlineDisks); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
@ -1539,30 +1548,28 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin
} }
// updateObjectMeta will update the metadata of a file. // updateObjectMeta will update the metadata of a file.
func (er erasureObjects) updateObjectMeta(ctx context.Context, bucket, object string, fi FileInfo) error { func (er erasureObjects) updateObjectMeta(ctx context.Context, bucket, object string, fi FileInfo, onlineDisks []StorageAPI) error {
if len(fi.Metadata) == 0 { if len(fi.Metadata) == 0 {
return nil return nil
} }
disks := er.getDisks() g := errgroup.WithNErrs(len(onlineDisks))
g := errgroup.WithNErrs(len(disks))
// Start writing `xl.meta` to all disks in parallel. // Start writing `xl.meta` to all disks in parallel.
for index := range disks { for index := range onlineDisks {
index := index index := index
g.Go(func() error { g.Go(func() error {
if disks[index] == nil { if onlineDisks[index] == nil {
return errDiskNotFound return errDiskNotFound
} }
return disks[index].UpdateMetadata(ctx, bucket, object, fi) return onlineDisks[index].UpdateMetadata(ctx, bucket, object, fi)
}, index) }, index)
} }
// Wait for all the routines. // Wait for all the routines.
mErrs := g.Wait() mErrs := g.Wait()
return reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, getWriteQuorum(len(disks))) return reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, getWriteQuorum(len(onlineDisks)))
} }
// DeleteObjectTags - delete object tags from an existing object // DeleteObjectTags - delete object tags from an existing object

View File

@ -700,7 +700,7 @@ func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCache
for k, v := range meta { for k, v := range meta {
fi.Metadata[k] = v fi.Metadata[k] = v
} }
err := er.updateObjectMeta(ctx, minioMetaBucket, o.objectPath(0), fi) err := er.updateObjectMeta(ctx, minioMetaBucket, o.objectPath(0), fi, er.getDisks())
if err == nil { if err == nil {
break break
} }

View File

@ -136,7 +136,7 @@ func renameAll(srcFilePath, dstFilePath string) (err error) {
switch { switch {
case isSysErrNotDir(err) && !osIsNotExist(err): case isSysErrNotDir(err) && !osIsNotExist(err):
// Windows can have both isSysErrNotDir(err) and osIsNotExist(err) returning // Windows can have both isSysErrNotDir(err) and osIsNotExist(err) returning
// true if the source file path contains an inexistant directory. In that case, // true if the source file path contains an non-existent directory. In that case,
// we want to return errFileNotFound instead, which will honored in subsequent // we want to return errFileNotFound instead, which will honored in subsequent
// switch cases // switch cases
return errFileAccessDenied return errFileAccessDenied

View File

@ -186,6 +186,21 @@ type FileInfo struct {
Idx int `msg:"i"` Idx int `msg:"i"`
} }
// GetDataDir returns an expected dataDir given FileInfo
// - deleteMarker returns "delete-marker"
// - returns "legacy" if FileInfo is XLV1 and DataDir is
// empty, returns DataDir otherwise
// - returns "dataDir"
func (fi FileInfo) GetDataDir() string {
if fi.Deleted {
return "delete-marker"
}
if fi.XLV1 && fi.DataDir == "" {
return "legacy"
}
return fi.DataDir
}
// InlineData returns true if object contents are inlined alongside its metadata. // InlineData returns true if object contents are inlined alongside its metadata.
func (fi FileInfo) InlineData() bool { func (fi FileInfo) InlineData() bool {
_, ok := fi.Metadata[ReservedMetadataPrefixLower+"inline-data"] _, ok := fi.Metadata[ReservedMetadataPrefixLower+"inline-data"]

View File

@ -105,8 +105,6 @@ type xlStorage struct {
formatLastCheck time.Time formatLastCheck time.Time
diskInfoCache timedValue diskInfoCache timedValue
ctx context.Context
sync.RWMutex sync.RWMutex
// mutex to prevent concurrent read operations overloading walks. // mutex to prevent concurrent read operations overloading walks.
@ -255,7 +253,6 @@ func newXLStorage(ep Endpoint) (*xlStorage, error) {
diskPath: path, diskPath: path,
endpoint: ep, endpoint: ep,
globalSync: env.Get(config.EnvFSOSync, config.EnableOff) == config.EnableOn, globalSync: env.Get(config.EnvFSOSync, config.EnableOff) == config.EnableOn,
ctx: GlobalContext,
rootDisk: rootDisk, rootDisk: rootDisk,
poolIndex: -1, poolIndex: -1,
setIndex: -1, setIndex: -1,
@ -1952,6 +1949,9 @@ func (s *xlStorage) Delete(ctx context.Context, volume string, path string, recu
// RenameData - rename source path to destination path atomically, metadata and data directory. // RenameData - rename source path to destination path atomically, metadata and data directory.
func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string) (err error) { func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string) (err error) {
defer func() { defer func() {
if err != nil {
logger.LogIf(ctx, err)
}
if err == nil { if err == nil {
if s.globalSync { if s.globalSync {
globalSync() globalSync()
@ -2045,7 +2045,7 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
if len(dstBuf) > 0 { if len(dstBuf) > 0 {
if isXL2V1Format(dstBuf) { if isXL2V1Format(dstBuf) {
if err = xlMeta.Load(dstBuf); err != nil { if err = xlMeta.Load(dstBuf); err != nil {
logger.LogIf(s.ctx, err) logger.LogIf(ctx, err)
// Data appears corrupt. Drop data. // Data appears corrupt. Drop data.
xlMeta = xlMetaV2{} xlMeta = xlMetaV2{}
} }
@ -2054,11 +2054,11 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
xlMetaLegacy := &xlMetaV1Object{} xlMetaLegacy := &xlMetaV1Object{}
var json = jsoniter.ConfigCompatibleWithStandardLibrary var json = jsoniter.ConfigCompatibleWithStandardLibrary
if err := json.Unmarshal(dstBuf, xlMetaLegacy); err != nil { if err := json.Unmarshal(dstBuf, xlMetaLegacy); err != nil {
logger.LogIf(s.ctx, err) logger.LogIf(ctx, err)
// Data appears corrupt. Drop data. // Data appears corrupt. Drop data.
} else { } else {
if err = xlMeta.AddLegacy(xlMetaLegacy); err != nil { if err = xlMeta.AddLegacy(xlMetaLegacy); err != nil {
logger.LogIf(s.ctx, err) logger.LogIf(ctx, err)
} }
legacyPreserved = true legacyPreserved = true
} }
@ -2195,7 +2195,6 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
s.deleteFile(dstVolumeDir, legacyDataPath, true) s.deleteFile(dstVolumeDir, legacyDataPath, true)
} }
s.deleteFile(dstVolumeDir, dstDataPath, false) s.deleteFile(dstVolumeDir, dstDataPath, false)
if err != errFileNotFound { if err != errFileNotFound {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
} }
@ -2373,8 +2372,8 @@ func (s *xlStorage) VerifyFile(ctx context.Context, volume, path string, fi File
errVolumeNotFound, errVolumeNotFound,
errFileCorrupt, errFileCorrupt,
}...) { }...) {
logger.GetReqInfo(s.ctx).AppendTags("disk", s.String()) logger.GetReqInfo(ctx).AppendTags("disk", s.String())
logger.LogIf(s.ctx, err) logger.LogIf(ctx, err)
} }
return err return err
} }