mirror of https://github.com/minio/minio.git
Do not use inline data size in xl.meta quorum calculation (#14831)
* Do not use inline data size in xl.meta quorum calculation Data shards of one object can different inline/not-inline decision in multiple disks. This happens with outdated disks when inline decision changes. For example, enabling bucket versioning configuration will change the small file threshold. When the parity of an object becomes low, GET object can return 503 because it is not unable to calculate the xl.meta quorum, just because some xl.meta has inline data and other are not. So this commit will be disable taking the size of the inline data into consideration when calculating the xl.meta quorum. * Add tests for simulatenous inline/notinline object Co-authored-by: Anis Elleuch <anis@min.io>
This commit is contained in:
parent
5041bfcb5c
commit
77dc99e71d
|
@ -178,10 +178,6 @@ func shuffleDisksAndPartsMetadataByIndex(disks []StorageAPI, metaArr []FileInfo,
|
|||
inconsistent++
|
||||
continue
|
||||
}
|
||||
if len(fi.Data) != len(meta.Data) {
|
||||
inconsistent++
|
||||
continue
|
||||
}
|
||||
if meta.XLV1 != fi.XLV1 {
|
||||
inconsistent++
|
||||
continue
|
||||
|
@ -229,12 +225,6 @@ func shuffleDisksAndPartsMetadata(disks []StorageAPI, partsMetadata []FileInfo,
|
|||
// if object was ever written previously.
|
||||
continue
|
||||
}
|
||||
if !init && len(fi.Data) != len(partsMetadata[index].Data) {
|
||||
// Check for length of data parts only when
|
||||
// fi.ModTime is not empty - ModTime is always set,
|
||||
// if object was ever written previously.
|
||||
continue
|
||||
}
|
||||
if !init && fi.XLV1 != partsMetadata[index].XLV1 {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -297,8 +297,6 @@ func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.
|
|||
fmt.Fprintf(h, "part.%d", part.Number)
|
||||
}
|
||||
fmt.Fprintf(h, "%v", meta.Erasure.Distribution)
|
||||
// make sure that length of Data is same
|
||||
fmt.Fprintf(h, "%v", len(meta.Data))
|
||||
|
||||
// ILM transition fields
|
||||
fmt.Fprint(h, meta.TransitionStatus)
|
||||
|
|
|
@ -20,11 +20,14 @@ package cmd
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
crand "crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
|
@ -1048,3 +1051,63 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
// In some deployments, one object has data inlined in one disk and not inlined in other disks.
|
||||
func TestGetObjectInlineNotInline(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Create a backend with 4 disks named disk{1...4}, this name convention
|
||||
// because we will unzip some object data from a sample archive.
|
||||
const numDisks = 4
|
||||
path, err := ioutil.TempDir(globalTestTmpDir, "minio-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var fsDirs []string
|
||||
for i := 1; i <= numDisks; i++ {
|
||||
fsDirs = append(fsDirs, filepath.Join(path, fmt.Sprintf("disk%d", i)))
|
||||
}
|
||||
|
||||
objLayer, _, err := initObjectLayer(ctx, mustGetPoolEndpoints(fsDirs...))
|
||||
if err != nil {
|
||||
removeRoots(fsDirs)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// cleaning up of temporary test directories
|
||||
defer objLayer.Shutdown(context.Background())
|
||||
defer removeRoots(fsDirs)
|
||||
|
||||
// Create a testbucket
|
||||
err = objLayer.MakeBucketWithLocation(ctx, "testbucket", BucketOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Unzip sample object data to the existing disks
|
||||
err = unzipArchive("testdata/xl-meta-inline-notinline.zip", path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Try to read the object and check its md5sum
|
||||
gr, err := objLayer.GetObjectNInfo(ctx, "testbucket", "file", nil, nil, readLock, ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Expected GetObject to succeed, but failed with %v", err)
|
||||
}
|
||||
|
||||
h := md5.New()
|
||||
_, err = io.Copy(h, gr)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected GetObject reading data to succeed, but failed with %v", err)
|
||||
}
|
||||
gr.Close()
|
||||
|
||||
const expectedHash = "fffb6377948ebea75ad2b8058e849ef5"
|
||||
foundHash := fmt.Sprintf("%x", h.Sum(nil))
|
||||
if foundHash != expectedHash {
|
||||
t.Fatalf("Expected data to have md5sum = `%s`, found `%s`", expectedHash, foundHash)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
|
@ -46,6 +47,7 @@ import (
|
|||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
@ -2323,3 +2325,35 @@ func uploadTestObject(t *testing.T, apiRouter http.Handler, creds auth.Credentia
|
|||
checkRespErr(rec, http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
// unzip a file into a specific target dir - used to unzip sample data in cmd/testdata/
|
||||
func unzipArchive(zipFilePath, targetDir string) error {
|
||||
zipReader, err := zip.OpenReader(zipFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, file := range zipReader.Reader.File {
|
||||
zippedFile, err := file.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = func() (err error) {
|
||||
defer zippedFile.Close()
|
||||
extractedFilePath := filepath.Join(targetDir, file.Name)
|
||||
if file.FileInfo().IsDir() {
|
||||
return os.MkdirAll(extractedFilePath, file.Mode())
|
||||
}
|
||||
outputFile, err := os.OpenFile(extractedFilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer outputFile.Close()
|
||||
_, err = io.Copy(outputFile, zippedFile)
|
||||
return err
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
Binary file not shown.
Loading…
Reference in New Issue