mirror of
https://github.com/minio/minio.git
synced 2024-12-24 22:25:54 -05:00
XL: Remove deadcode unionChecksumInfo. (#2261)
This commit is contained in:
parent
646ff2c64d
commit
5d118141cd
@ -34,7 +34,6 @@ const (
|
|||||||
globalMinioCertFile = "public.crt"
|
globalMinioCertFile = "public.crt"
|
||||||
globalMinioKeyFile = "private.key"
|
globalMinioKeyFile = "private.key"
|
||||||
globalMinioConfigFile = "config.json"
|
globalMinioConfigFile = "config.json"
|
||||||
globalMinioProfilePath = "profile"
|
|
||||||
// Add new global values here.
|
// Add new global values here.
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -55,8 +54,6 @@ var (
|
|||||||
|
|
||||||
// global colors.
|
// global colors.
|
||||||
var (
|
var (
|
||||||
colorWhite = color.New(color.FgWhite).SprintfFunc()
|
colorBlue = color.New(color.FgBlue).SprintfFunc()
|
||||||
colorWhiteBold = color.New(color.FgWhite, color.Bold).SprintfFunc()
|
colorBold = color.New(color.Bold).SprintFunc()
|
||||||
colorBlue = color.New(color.FgBlue).SprintfFunc()
|
|
||||||
colorBold = color.New(color.Bold).SprintFunc()
|
|
||||||
)
|
)
|
||||||
|
@ -40,8 +40,3 @@ func getIPsFromHosts(hosts []string) (ips []net.IP) {
|
|||||||
sort.Sort(sort.Reverse(byLastOctet(ips)))
|
sort.Sort(sort.Reverse(byLastOctet(ips)))
|
||||||
return ips
|
return ips
|
||||||
}
|
}
|
||||||
|
|
||||||
// getHostToIP - parses a host string into net.IP value.
|
|
||||||
func getHostToIP(host string) net.IP {
|
|
||||||
return net.ParseIP(host)
|
|
||||||
}
|
|
||||||
|
@ -18,7 +18,6 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"hash/crc32"
|
"hash/crc32"
|
||||||
"path"
|
"path"
|
||||||
)
|
)
|
||||||
@ -76,17 +75,22 @@ func diskCount(disks []StorageAPI) int {
|
|||||||
return diskCount
|
return diskCount
|
||||||
}
|
}
|
||||||
|
|
||||||
// hashOrder - returns consistent hashed integers of count slice, based on the input token.
|
// hashOrder - hashes input key to return returns consistent
|
||||||
func hashOrder(token string, count int) []int {
|
// hashed integer slice. Returned integer order is salted
|
||||||
if count < 0 {
|
// with an input key. This results in consistent order.
|
||||||
panic(errors.New("hashOrder count cannot be negative"))
|
// NOTE: collisions are fine, we are not looking for uniqueness
|
||||||
|
// in the slices returned.
|
||||||
|
func hashOrder(key string, cardinality int) []int {
|
||||||
|
if cardinality < 0 {
|
||||||
|
// Returns an empty int slice for negative cardinality.
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
nums := make([]int, count)
|
nums := make([]int, cardinality)
|
||||||
tokenCrc := crc32.Checksum([]byte(token), crc32.IEEETable)
|
keyCrc := crc32.Checksum([]byte(key), crc32.IEEETable)
|
||||||
|
|
||||||
start := int(uint32(tokenCrc)%uint32(count)) | 1
|
start := int(uint32(keyCrc)%uint32(cardinality)) | 1
|
||||||
for i := 1; i <= count; i++ {
|
for i := 1; i <= cardinality; i++ {
|
||||||
nums[i-1] = 1 + ((start + i) % count)
|
nums[i-1] = 1 + ((start + i) % cardinality)
|
||||||
}
|
}
|
||||||
return nums
|
return nums
|
||||||
}
|
}
|
||||||
@ -108,44 +112,6 @@ func readXLMeta(disk StorageAPI, bucket string, object string) (xlMeta xlMetaV1,
|
|||||||
return xlMeta, nil
|
return xlMeta, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Uses a map to find union of checksums of parts that were concurrently written
|
|
||||||
// but committed before this part. N B For a different, concurrent upload of
|
|
||||||
// the same part, the ongoing request's data/metadata prevails.
|
|
||||||
// cur - corresponds to parts written to disk before the ongoing putObjectPart request
|
|
||||||
// updated - corresponds to parts written to disk while the ongoing putObjectPart is in progress
|
|
||||||
// curPartName - name of the part that is being written
|
|
||||||
// returns []checkSumInfo containing the set union of checksums of parts that
|
|
||||||
// have been written so far incl. the part being written.
|
|
||||||
func unionChecksumInfos(cur []checkSumInfo, updated []checkSumInfo, curPartName string) []checkSumInfo {
|
|
||||||
checksumSet := make(map[string]checkSumInfo)
|
|
||||||
var checksums []checkSumInfo
|
|
||||||
|
|
||||||
checksums = cur
|
|
||||||
for _, cksum := range checksums {
|
|
||||||
checksumSet[cksum.Name] = cksum
|
|
||||||
}
|
|
||||||
|
|
||||||
checksums = updated
|
|
||||||
for _, cksum := range checksums {
|
|
||||||
// skip updating checksum of the part that is
|
|
||||||
// written in this request because the checksum
|
|
||||||
// from cur, corresponding to this part,
|
|
||||||
// should remain.
|
|
||||||
if cksum.Name == curPartName {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
checksumSet[cksum.Name] = cksum
|
|
||||||
}
|
|
||||||
|
|
||||||
// Form the checksumInfo to be committed in xl.json
|
|
||||||
// from the map.
|
|
||||||
var finalChecksums []checkSumInfo
|
|
||||||
for _, cksum := range checksumSet {
|
|
||||||
finalChecksums = append(finalChecksums, cksum)
|
|
||||||
}
|
|
||||||
return finalChecksums
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return ordered partsMetadata depeinding on distribution.
|
// Return ordered partsMetadata depeinding on distribution.
|
||||||
func getOrderedPartsMetadata(distribution []int, partsMetadata []xlMetaV1) (orderedPartsMetadata []xlMetaV1) {
|
func getOrderedPartsMetadata(distribution []int, partsMetadata []xlMetaV1) (orderedPartsMetadata []xlMetaV1) {
|
||||||
orderedPartsMetadata = make([]xlMetaV1, len(partsMetadata))
|
orderedPartsMetadata = make([]xlMetaV1, len(partsMetadata))
|
||||||
|
@ -16,9 +16,13 @@
|
|||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import "testing"
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
// Test for reduceErrs.
|
// Test for reduceErrs, reduceErr reduces collection
|
||||||
|
// of errors into a single maximal error with in the list.
|
||||||
func TestReduceErrs(t *testing.T) {
|
func TestReduceErrs(t *testing.T) {
|
||||||
// List all of all test cases to validate various cases of reduce errors.
|
// List all of all test cases to validate various cases of reduce errors.
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
@ -57,48 +61,35 @@ func TestReduceErrs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test for unionChecksums
|
// TestHashOrder - test order of ints in array
|
||||||
func TestUnionChecksumInfos(t *testing.T) {
|
func TestHashOrder(t *testing.T) {
|
||||||
cur := []checkSumInfo{
|
testCases := []struct {
|
||||||
{"part.1", "dummy", "cur-hash.1"},
|
objectName string
|
||||||
{"part.2", "dummy", "cur-hash.2"},
|
hashedOrder []int
|
||||||
{"part.3", "dummy", "cur-hash.3"},
|
}{
|
||||||
{"part.4", "dummy", "cur-hash.4"},
|
// cases which should pass the test.
|
||||||
{"part.5", "dummy", "cur-hash.5"},
|
// passing in valid object name.
|
||||||
|
{"object", []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}},
|
||||||
|
{"The Shining Script <v1>.pdf", []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}},
|
||||||
|
{"Cost Benefit Analysis (2009-2010).pptx", []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}},
|
||||||
|
{"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", []int{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2}},
|
||||||
|
{"SHØRT", []int{11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
|
||||||
|
{"There are far too many object names, and far too few bucket names!", []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}},
|
||||||
|
{"a/b/c/", []int{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2}},
|
||||||
|
{"/a/b/c", []int{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6}},
|
||||||
|
{string([]byte{0xff, 0xfe, 0xfd}), []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}},
|
||||||
}
|
}
|
||||||
updated := []checkSumInfo{
|
|
||||||
{"part.1", "dummy", "updated-hash.1"},
|
|
||||||
{"part.2", "dummy", "updated-hash.2"},
|
|
||||||
{"part.3", "dummy", "updated-hash.3"},
|
|
||||||
}
|
|
||||||
curPartcksum := cur[0] // part.1 is the current part being written
|
|
||||||
|
|
||||||
// Verify that hash of current part being written must be from cur []checkSumInfo
|
// Tests hashing order to be consistent.
|
||||||
finalChecksums := unionChecksumInfos(cur, updated, curPartcksum.Name)
|
for i, testCase := range testCases {
|
||||||
for _, cksum := range finalChecksums {
|
hashedOrder := hashOrder(testCase.objectName, 16)
|
||||||
if cksum.Name == curPartcksum.Name && cksum.Hash != curPartcksum.Hash {
|
if !reflect.DeepEqual(testCase.hashedOrder, hashedOrder) {
|
||||||
t.Errorf("expected Hash = %s but received Hash = %s\n", curPartcksum.Hash, cksum.Hash)
|
t.Errorf("Test case %d: Expected \"%#v\" but failed \"%#v\"", i+1, testCase.hashedOrder, hashedOrder)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that all part checksums are present in the union and nothing more.
|
// Tests hashing order to fail for when order is '-1'.
|
||||||
// Map to store all unique part names
|
if hashedOrder := hashOrder("This will fail", -1); hashedOrder != nil {
|
||||||
allPartNames := make(map[string]struct{})
|
t.Errorf("Test: Expect \"nil\" but failed \"%#v\"", hashedOrder)
|
||||||
// Insert part names from cur and updated []checkSumInfo
|
|
||||||
for _, cksum := range cur {
|
|
||||||
allPartNames[cksum.Name] = struct{}{}
|
|
||||||
}
|
|
||||||
for _, cksum := range updated {
|
|
||||||
allPartNames[cksum.Name] = struct{}{}
|
|
||||||
}
|
|
||||||
// All parts must have an entry in the []checkSumInfo returned from unionChecksums
|
|
||||||
for _, finalcksum := range finalChecksums {
|
|
||||||
if _, ok := allPartNames[finalcksum.Name]; !ok {
|
|
||||||
t.Errorf("expected to find %s but not present in the union, where current part is %s\n",
|
|
||||||
finalcksum.Name, curPartcksum.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(finalChecksums) != len(allPartNames) {
|
|
||||||
t.Error("Union of Checksums doesn't have same number of elements as unique parts in total")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,6 @@ package main
|
|||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -137,30 +136,3 @@ func TestNewXL(t *testing.T) {
|
|||||||
t.Fatalf("Unable to initialize erasure, %s", err)
|
t.Fatalf("Unable to initialize erasure, %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestHashOrder - test order of ints in array
|
|
||||||
func TestHashOrder(t *testing.T) {
|
|
||||||
testCases := []struct {
|
|
||||||
objectName string
|
|
||||||
hashedOrder []int
|
|
||||||
}{
|
|
||||||
// cases which should pass the test.
|
|
||||||
// passing in valid object name.
|
|
||||||
{"object", []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}},
|
|
||||||
{"The Shining Script <v1>.pdf", []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}},
|
|
||||||
{"Cost Benefit Analysis (2009-2010).pptx", []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}},
|
|
||||||
{"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", []int{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2}},
|
|
||||||
{"SHØRT", []int{11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
|
|
||||||
{"There are far too many object names, and far too few bucket names!", []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}},
|
|
||||||
{"a/b/c/", []int{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2}},
|
|
||||||
{"/a/b/c", []int{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6}},
|
|
||||||
{string([]byte{0xff, 0xfe, 0xfd}), []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
|
||||||
hashedOrder := hashOrder(testCase.objectName, 16)
|
|
||||||
if !reflect.DeepEqual(testCase.hashedOrder, hashedOrder) {
|
|
||||||
t.Errorf("Test case %d: Expected \"%#v\" but failed \"%#v\"", i+1, testCase.hashedOrder, hashedOrder)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
Loading…
Reference in New Issue
Block a user