mirror of
https://github.com/minio/minio.git
synced 2025-04-08 13:45:37 -04:00
Merge pull request #2149 from harshavardhana/hash-order
XL/metadata: use new hashOrder algorithm for newXLMeta. (#2147)
This commit is contained in:
commit
cb415ef12e
@ -4,9 +4,9 @@ language: go
|
|||||||
|
|
||||||
os:
|
os:
|
||||||
- linux
|
- linux
|
||||||
## Turning off for now.
|
- osx
|
||||||
# - osx
|
|
||||||
# osx_image: xcode7.2
|
osx_image: xcode7.2
|
||||||
|
|
||||||
env:
|
env:
|
||||||
- ARCH=x86_64
|
- ARCH=x86_64
|
||||||
|
@ -16,7 +16,10 @@
|
|||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import "path"
|
import (
|
||||||
|
"path"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
// getLoadBalancedQuorumDisks - fetches load balanced sufficiently
|
// getLoadBalancedQuorumDisks - fetches load balanced sufficiently
|
||||||
// randomized quorum disk slice.
|
// randomized quorum disk slice.
|
||||||
@ -29,7 +32,7 @@ func (xl xlObjects) getLoadBalancedQuorumDisks() (disks []StorageAPI) {
|
|||||||
// randomized) disk slice.
|
// randomized) disk slice.
|
||||||
func (xl xlObjects) getLoadBalancedDisks() (disks []StorageAPI) {
|
func (xl xlObjects) getLoadBalancedDisks() (disks []StorageAPI) {
|
||||||
// Based on the random shuffling return back randomized disks.
|
// Based on the random shuffling return back randomized disks.
|
||||||
for _, i := range randInts(len(xl.storageDisks)) {
|
for _, i := range hashOrder(time.Now().UTC().String(), len(xl.storageDisks)) {
|
||||||
disks = append(disks, xl.storageDisks[i-1])
|
disks = append(disks, xl.storageDisks[i-1])
|
||||||
}
|
}
|
||||||
return disks
|
return disks
|
||||||
|
@ -107,9 +107,8 @@ type xlMetaV1 struct {
|
|||||||
Parts []objectPartInfo `json:"parts,omitempty"`
|
Parts []objectPartInfo `json:"parts,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// newXLMetaV1 - initializes new xlMetaV1, adds version, allocates a
|
// newXLMetaV1 - initializes new xlMetaV1, adds version, allocates a fresh erasure info.
|
||||||
// fresh erasure info.
|
func newXLMetaV1(object string, dataBlocks, parityBlocks int) (xlMeta xlMetaV1) {
|
||||||
func newXLMetaV1(dataBlocks, parityBlocks int) (xlMeta xlMetaV1) {
|
|
||||||
xlMeta = xlMetaV1{}
|
xlMeta = xlMetaV1{}
|
||||||
xlMeta.Version = "1.0.0"
|
xlMeta.Version = "1.0.0"
|
||||||
xlMeta.Format = "xl"
|
xlMeta.Format = "xl"
|
||||||
@ -119,7 +118,7 @@ func newXLMetaV1(dataBlocks, parityBlocks int) (xlMeta xlMetaV1) {
|
|||||||
DataBlocks: dataBlocks,
|
DataBlocks: dataBlocks,
|
||||||
ParityBlocks: parityBlocks,
|
ParityBlocks: parityBlocks,
|
||||||
BlockSize: blockSizeV1,
|
BlockSize: blockSizeV1,
|
||||||
Distribution: randInts(dataBlocks + parityBlocks),
|
Distribution: hashOrder(object, dataBlocks+parityBlocks),
|
||||||
}
|
}
|
||||||
return xlMeta
|
return xlMeta
|
||||||
}
|
}
|
||||||
|
@ -39,7 +39,7 @@ func TestXLMetaV1(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create a XLMetaV1 structure to test on.
|
// Create a XLMetaV1 structure to test on.
|
||||||
meta := newXLMetaV1(8, 8)
|
meta := newXLMetaV1("minio", 8, 8)
|
||||||
|
|
||||||
// Add 5 parts.
|
// Add 5 parts.
|
||||||
for _, test := range testCases {
|
for _, test := range testCases {
|
||||||
|
@ -252,7 +252,7 @@ func (xl xlObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
|
|||||||
// all the disks. `uploads.json` carries metadata regarding on going
|
// all the disks. `uploads.json` carries metadata regarding on going
|
||||||
// multipart operation on the object.
|
// multipart operation on the object.
|
||||||
func (xl xlObjects) newMultipartUpload(bucket string, object string, meta map[string]string) (uploadID string, err error) {
|
func (xl xlObjects) newMultipartUpload(bucket string, object string, meta map[string]string) (uploadID string, err error) {
|
||||||
xlMeta := newXLMetaV1(xl.dataBlocks, xl.parityBlocks)
|
xlMeta := newXLMetaV1(object, xl.dataBlocks, xl.parityBlocks)
|
||||||
// If not set default to "application/octet-stream"
|
// If not set default to "application/octet-stream"
|
||||||
if meta["content-type"] == "" {
|
if meta["content-type"] == "" {
|
||||||
contentType := "application/octet-stream"
|
contentType := "application/octet-stream"
|
||||||
|
@ -368,7 +368,7 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
|
|||||||
tempObj := uniqueID
|
tempObj := uniqueID
|
||||||
|
|
||||||
// Initialize xl meta.
|
// Initialize xl meta.
|
||||||
xlMeta := newXLMetaV1(xl.dataBlocks, xl.parityBlocks)
|
xlMeta := newXLMetaV1(object, xl.dataBlocks, xl.parityBlocks)
|
||||||
|
|
||||||
// Read metadata associated with the object from all disks.
|
// Read metadata associated with the object from all disks.
|
||||||
partsMetadata, errs := xl.readAllXLMetadata(bucket, object)
|
partsMetadata, errs := xl.readAllXLMetadata(bucket, object)
|
||||||
|
@ -18,9 +18,9 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"math/rand"
|
"errors"
|
||||||
|
"hash/crc32"
|
||||||
"path"
|
"path"
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Validates if we have quorum based on the errors with errDiskNotFound.
|
// Validates if we have quorum based on the errors with errDiskNotFound.
|
||||||
@ -48,19 +48,19 @@ func diskCount(disks []StorageAPI) int {
|
|||||||
return diskCount
|
return diskCount
|
||||||
}
|
}
|
||||||
|
|
||||||
// randInts - uses Knuth Fisher-Yates shuffle algorithm for generating uniform shuffling.
|
// hashOrder - returns consistent hashed integers of count slice, based on the input token.
|
||||||
func randInts(count int) []int {
|
func hashOrder(token string, count int) []int {
|
||||||
rand.Seed(time.Now().UTC().UnixNano()) // Seed with current time.
|
if count < 0 {
|
||||||
ints := make([]int, count)
|
panic(errors.New("hashOrder count cannot be negative"))
|
||||||
for i := 0; i < count; i++ {
|
|
||||||
ints[i] = i + 1
|
|
||||||
}
|
}
|
||||||
for i := 0; i < count; i++ {
|
nums := make([]int, count)
|
||||||
// Choose index uniformly in [i, count-1]
|
tokenCrc := crc32.Checksum([]byte(token), crc32.IEEETable)
|
||||||
r := i + rand.Intn(count-i)
|
|
||||||
ints[r], ints[i] = ints[i], ints[r]
|
start := int(uint32(tokenCrc)%uint32(count)) | 1
|
||||||
|
for i := 1; i <= count; i++ {
|
||||||
|
nums[i-1] = 1 + ((start + i) % count)
|
||||||
}
|
}
|
||||||
return ints
|
return nums
|
||||||
}
|
}
|
||||||
|
|
||||||
// readXLMeta reads `xl.json` and returns back XL metadata structure.
|
// readXLMeta reads `xl.json` and returns back XL metadata structure.
|
||||||
|
@ -19,6 +19,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -136,3 +137,30 @@ func TestNewXL(t *testing.T) {
|
|||||||
t.Fatalf("Unable to initialize erasure, %s", err)
|
t.Fatalf("Unable to initialize erasure, %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestHashOrder - test order of ints in array
|
||||||
|
func TestHashOrder(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
objectName string
|
||||||
|
hashedOrder []int
|
||||||
|
}{
|
||||||
|
// cases which should pass the test.
|
||||||
|
// passing in valid object name.
|
||||||
|
{"object", []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}},
|
||||||
|
{"The Shining Script <v1>.pdf", []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}},
|
||||||
|
{"Cost Benefit Analysis (2009-2010).pptx", []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}},
|
||||||
|
{"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", []int{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2}},
|
||||||
|
{"SHØRT", []int{11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
|
||||||
|
{"There are far too many object names, and far too few bucket names!", []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}},
|
||||||
|
{"a/b/c/", []int{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2}},
|
||||||
|
{"/a/b/c", []int{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6}},
|
||||||
|
{string([]byte{0xff, 0xfe, 0xfd}), []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
hashedOrder := hashOrder(testCase.objectName, 16)
|
||||||
|
if !reflect.DeepEqual(testCase.hashedOrder, hashedOrder) {
|
||||||
|
t.Errorf("Test case %d: Expected \"%#v\" but failed \"%#v\"", i+1, testCase.hashedOrder, hashedOrder)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user