mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
Fixing ineffssaign errors (#2608)
This commit is contained in:
parent
a1f922315b
commit
1ce339abeb
@ -44,9 +44,9 @@ func DamerauLevenshteinDistance(a string, b string) int {
|
||||
for j := 0; j <= len(b); j++ {
|
||||
d[0][j] = j
|
||||
}
|
||||
var cost int
|
||||
for i := 1; i <= len(a); i++ {
|
||||
for j := 1; j <= len(b); j++ {
|
||||
cost := 0
|
||||
if a[i-1] == b[j-1] {
|
||||
cost = 0
|
||||
} else {
|
||||
|
@ -66,7 +66,11 @@ func TestErasureHealFile(t *testing.T) {
|
||||
copy(latest, disks)
|
||||
latest[0] = nil
|
||||
outDated[0] = disks[0]
|
||||
|
||||
healCheckSums, err := erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*1024*1024, blockSize, dataBlocks, parityBlocks, bitRotAlgo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Checksum of the healed file should match.
|
||||
if checkSums[0] != healCheckSums[0] {
|
||||
t.Error("Healing failed, data does not match.")
|
||||
@ -116,7 +120,7 @@ func TestErasureHealFile(t *testing.T) {
|
||||
latest[index] = nil
|
||||
outDated[index] = disks[index]
|
||||
}
|
||||
healCheckSums, err = erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*1024*1024, blockSize, dataBlocks, parityBlocks, bitRotAlgo)
|
||||
_, err = erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*1024*1024, blockSize, dataBlocks, parityBlocks, bitRotAlgo)
|
||||
if err == nil {
|
||||
t.Error("Expected erasureHealFile() to fail when the number of available disks <= parityBlocks")
|
||||
}
|
||||
|
@ -721,7 +721,7 @@ func TestNsLockMapDeleteLockInfoEntryForVolumePath(t *testing.T) {
|
||||
t.Fatalf("Entry for <volume> %s, <path> %s should have existed.", param.volume, param.path)
|
||||
}
|
||||
// first delete the entry for the operation ID.
|
||||
err = nsMutex.deleteLockInfoEntryForOps(param, testCases[0].opsID)
|
||||
_ = nsMutex.deleteLockInfoEntryForOps(param, testCases[0].opsID)
|
||||
actualErr = nsMutex.deleteLockInfoEntryForVolumePath(param)
|
||||
if actualErr != nil {
|
||||
t.Fatalf("Expected the error to be <nil>, but got <ERROR> %s", actualErr)
|
||||
|
@ -38,7 +38,7 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr
|
||||
|
||||
errMsg := "Bucket not found: minio-bucket"
|
||||
// opearation expected to fail since the bucket on which NewMultipartUpload is being initiated doesn't exist.
|
||||
uploadID, err := obj.NewMultipartUpload(bucket, object, nil)
|
||||
_, err := obj.NewMultipartUpload(bucket, object, nil)
|
||||
if err == nil {
|
||||
t.Fatalf("%s: Expected to fail since the NewMultipartUpload is intialized on a non-existent bucket.", instanceType)
|
||||
}
|
||||
@ -53,7 +53,7 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
uploadID, err = obj.NewMultipartUpload(bucket, object, nil)
|
||||
uploadID, err := obj.NewMultipartUpload(bucket, object, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
@ -349,10 +349,8 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
// Size of object.
|
||||
size := objInfo.Size
|
||||
|
||||
// Save metadata.
|
||||
metadata := make(map[string]string)
|
||||
// Save other metadata if available.
|
||||
metadata = objInfo.UserDefined
|
||||
metadata := objInfo.UserDefined
|
||||
|
||||
// Do not set `md5sum` as CopyObject will not keep the
|
||||
// same md5sum as the source.
|
||||
|
@ -625,6 +625,9 @@ func testListBuckets(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
||||
|
||||
// add three and test exists + prefix.
|
||||
err = obj.MakeBucket("bucket22")
|
||||
if err != nil {
|
||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
||||
buckets, err = obj.ListBuckets()
|
||||
if err != nil {
|
||||
|
@ -89,7 +89,6 @@ func parseDirents(dirPath string, buf []byte) (entries []string, err error) {
|
||||
// Could happen if it was deleted in the middle while
|
||||
// this list was being performed.
|
||||
if os.IsNotExist(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
|
@ -41,7 +41,9 @@ func TestUNCPaths(t *testing.T) {
|
||||
// Instantiate posix object to manage a disk
|
||||
var err error
|
||||
err = os.Mkdir("c:\\testdisk", 0700)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Cleanup on exit of test
|
||||
defer os.RemoveAll("c:\\testdisk")
|
||||
|
||||
@ -74,7 +76,9 @@ func TestUNCPathENOTDIR(t *testing.T) {
|
||||
var err error
|
||||
// Instantiate posix object to manage a disk
|
||||
err = os.Mkdir("c:\\testdisk", 0700)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Cleanup on exit of test
|
||||
defer os.RemoveAll("c:\\testdisk")
|
||||
var fs StorageAPI
|
||||
|
@ -1364,6 +1364,7 @@ func (s *TestSuiteCommon) TestListObjectsHandler(c *C) {
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
getContent, err := ioutil.ReadAll(response.Body)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(strings.Contains(string(getContent), "<Key>bar</Key>"), Equals, true)
|
||||
|
||||
// create listObjectsV2 request with valid parameters
|
||||
@ -1377,6 +1378,7 @@ func (s *TestSuiteCommon) TestListObjectsHandler(c *C) {
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
getContent, err = ioutil.ReadAll(response.Body)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(strings.Contains(string(getContent), "<Key>bar</Key>"), Equals, true)
|
||||
c.Assert(strings.Contains(string(getContent), "<Owner><ID></ID><DisplayName></DisplayName></Owner>"), Equals, true)
|
||||
|
||||
@ -1960,6 +1962,7 @@ func (s *TestSuiteCommon) TestObjectMultipartAbort(c *C) {
|
||||
|
||||
// execute the HTTP request initiating the new multipart upload.
|
||||
response, err = client.Do(request)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
// parse the response body and obtain the new upload ID.
|
||||
@ -1977,6 +1980,7 @@ func (s *TestSuiteCommon) TestObjectMultipartAbort(c *C) {
|
||||
|
||||
// execute the HTTP request initiating the new multipart upload.
|
||||
response, err = client.Do(request)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
|
||||
// parse the response body and obtain the new upload ID.
|
||||
@ -2193,6 +2197,7 @@ func (s *TestSuiteCommon) TestObjectMultipartListError(c *C) {
|
||||
c.Assert(err, IsNil)
|
||||
// execute the HTTP request initiating the new multipart upload.
|
||||
response, err = client.Do(request)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(response.StatusCode, Equals, http.StatusOK)
|
||||
// parse the response body and obtain the new upload ID.
|
||||
decoder := xml.NewDecoder(response.Body)
|
||||
|
@ -42,9 +42,6 @@ func newJWT(expiry time.Duration) (*JWT, error) {
|
||||
if serverConfig == nil {
|
||||
return nil, errors.New("Server not initialzed")
|
||||
}
|
||||
if expiry == 0 {
|
||||
expiry = defaultTokenExpiry
|
||||
}
|
||||
|
||||
// Save access, secret keys.
|
||||
cred := serverConfig.GetCredential()
|
||||
|
@ -91,7 +91,7 @@ func TestByteErrors(t *testing.T) {
|
||||
t.Errorf("Expected error, got %v", got)
|
||||
}
|
||||
// Empty string.
|
||||
got, err = strconvBytes("")
|
||||
_, err = strconvBytes("")
|
||||
if err == nil {
|
||||
t.Errorf("Expected error parsing nothing")
|
||||
}
|
||||
|
@ -468,6 +468,9 @@ func newTestWebRPCRequest(rpcMethod string, authorization string, data interface
|
||||
}
|
||||
encapsulatedData := genericJSON{JSONRPC: "2.0", ID: "1", Method: rpcMethod, Params: data}
|
||||
jsonData, err := json.Marshal(encapsulatedData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := newWebRPCRequest(rpcMethod, authorization, bytes.NewReader(jsonData))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -379,7 +379,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
|
||||
xlMeta := pickValidXLMeta(partsMetadata, modTime)
|
||||
|
||||
onlineDisks = getOrderedDisks(xlMeta.Erasure.Distribution, onlineDisks)
|
||||
partsMetadata = getOrderedPartsMetadata(xlMeta.Erasure.Distribution, partsMetadata)
|
||||
_ = getOrderedPartsMetadata(xlMeta.Erasure.Distribution, partsMetadata)
|
||||
|
||||
// Need a unique name for the part being written in minioMetaBucket to
|
||||
// accommodate concurrent PutObjectPart requests
|
||||
|
@ -126,7 +126,9 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
|
||||
object := "object"
|
||||
// Create object "obj" under bucket "bucket".
|
||||
_, err = obj.PutObject(bucket, object, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// for a 16 disk setup, quorum is 9. To simulate disks not found yet
|
||||
// quorum is available, we remove disks leaving quorum disks behind.
|
||||
for i := range xl.storageDisks[:7] {
|
||||
|
Loading…
Reference in New Issue
Block a user