Fixing ineffssaign errors (#2608)

This commit is contained in:
Karthic Rao 2016-09-02 11:40:50 +05:30 committed by Harshavardhana
parent a1f922315b
commit 1ce339abeb
14 changed files with 32 additions and 17 deletions

View File

@ -44,9 +44,9 @@ func DamerauLevenshteinDistance(a string, b string) int {
for j := 0; j <= len(b); j++ { for j := 0; j <= len(b); j++ {
d[0][j] = j d[0][j] = j
} }
var cost int
for i := 1; i <= len(a); i++ { for i := 1; i <= len(a); i++ {
for j := 1; j <= len(b); j++ { for j := 1; j <= len(b); j++ {
cost := 0
if a[i-1] == b[j-1] { if a[i-1] == b[j-1] {
cost = 0 cost = 0
} else { } else {

View File

@ -66,7 +66,11 @@ func TestErasureHealFile(t *testing.T) {
copy(latest, disks) copy(latest, disks)
latest[0] = nil latest[0] = nil
outDated[0] = disks[0] outDated[0] = disks[0]
healCheckSums, err := erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*1024*1024, blockSize, dataBlocks, parityBlocks, bitRotAlgo) healCheckSums, err := erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*1024*1024, blockSize, dataBlocks, parityBlocks, bitRotAlgo)
if err != nil {
t.Fatal(err)
}
// Checksum of the healed file should match. // Checksum of the healed file should match.
if checkSums[0] != healCheckSums[0] { if checkSums[0] != healCheckSums[0] {
t.Error("Healing failed, data does not match.") t.Error("Healing failed, data does not match.")
@ -116,7 +120,7 @@ func TestErasureHealFile(t *testing.T) {
latest[index] = nil latest[index] = nil
outDated[index] = disks[index] outDated[index] = disks[index]
} }
healCheckSums, err = erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*1024*1024, blockSize, dataBlocks, parityBlocks, bitRotAlgo) _, err = erasureHealFile(latest, outDated, "testbucket", "testobject1", "testbucket", "testobject1", 1*1024*1024, blockSize, dataBlocks, parityBlocks, bitRotAlgo)
if err == nil { if err == nil {
t.Error("Expected erasureHealFile() to fail when the number of available disks <= parityBlocks") t.Error("Expected erasureHealFile() to fail when the number of available disks <= parityBlocks")
} }

View File

@ -721,7 +721,7 @@ func TestNsLockMapDeleteLockInfoEntryForVolumePath(t *testing.T) {
t.Fatalf("Entry for <volume> %s, <path> %s should have existed.", param.volume, param.path) t.Fatalf("Entry for <volume> %s, <path> %s should have existed.", param.volume, param.path)
} }
// first delete the entry for the operation ID. // first delete the entry for the operation ID.
err = nsMutex.deleteLockInfoEntryForOps(param, testCases[0].opsID) _ = nsMutex.deleteLockInfoEntryForOps(param, testCases[0].opsID)
actualErr = nsMutex.deleteLockInfoEntryForVolumePath(param) actualErr = nsMutex.deleteLockInfoEntryForVolumePath(param)
if actualErr != nil { if actualErr != nil {
t.Fatalf("Expected the error to be <nil>, but got <ERROR> %s", actualErr) t.Fatalf("Expected the error to be <nil>, but got <ERROR> %s", actualErr)

View File

@ -38,7 +38,7 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr
errMsg := "Bucket not found: minio-bucket" errMsg := "Bucket not found: minio-bucket"
// opearation expected to fail since the bucket on which NewMultipartUpload is being initiated doesn't exist. // opearation expected to fail since the bucket on which NewMultipartUpload is being initiated doesn't exist.
uploadID, err := obj.NewMultipartUpload(bucket, object, nil) _, err := obj.NewMultipartUpload(bucket, object, nil)
if err == nil { if err == nil {
t.Fatalf("%s: Expected to fail since the NewMultipartUpload is intialized on a non-existent bucket.", instanceType) t.Fatalf("%s: Expected to fail since the NewMultipartUpload is intialized on a non-existent bucket.", instanceType)
} }
@ -53,7 +53,7 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
uploadID, err = obj.NewMultipartUpload(bucket, object, nil) uploadID, err := obj.NewMultipartUpload(bucket, object, nil)
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }

View File

@ -349,10 +349,8 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
// Size of object. // Size of object.
size := objInfo.Size size := objInfo.Size
// Save metadata.
metadata := make(map[string]string)
// Save other metadata if available. // Save other metadata if available.
metadata = objInfo.UserDefined metadata := objInfo.UserDefined
// Do not set `md5sum` as CopyObject will not keep the // Do not set `md5sum` as CopyObject will not keep the
// same md5sum as the source. // same md5sum as the source.

View File

@ -625,6 +625,9 @@ func testListBuckets(obj ObjectLayer, instanceType string, c TestErrHandler) {
// add three and test exists + prefix. // add three and test exists + prefix.
err = obj.MakeBucket("bucket22") err = obj.MakeBucket("bucket22")
if err != nil {
c.Fatalf("%s: <ERROR> %s", instanceType, err)
}
buckets, err = obj.ListBuckets() buckets, err = obj.ListBuckets()
if err != nil { if err != nil {

View File

@ -89,7 +89,6 @@ func parseDirents(dirPath string, buf []byte) (entries []string, err error) {
// Could happen if it was deleted in the middle while // Could happen if it was deleted in the middle while
// this list was being performed. // this list was being performed.
if os.IsNotExist(err) { if os.IsNotExist(err) {
err = nil
continue continue
} }
return nil, err return nil, err

View File

@ -41,7 +41,9 @@ func TestUNCPaths(t *testing.T) {
// Instantiate posix object to manage a disk // Instantiate posix object to manage a disk
var err error var err error
err = os.Mkdir("c:\\testdisk", 0700) err = os.Mkdir("c:\\testdisk", 0700)
if err != nil {
t.Fatal(err)
}
// Cleanup on exit of test // Cleanup on exit of test
defer os.RemoveAll("c:\\testdisk") defer os.RemoveAll("c:\\testdisk")
@ -74,7 +76,9 @@ func TestUNCPathENOTDIR(t *testing.T) {
var err error var err error
// Instantiate posix object to manage a disk // Instantiate posix object to manage a disk
err = os.Mkdir("c:\\testdisk", 0700) err = os.Mkdir("c:\\testdisk", 0700)
if err != nil {
t.Fatal(err)
}
// Cleanup on exit of test // Cleanup on exit of test
defer os.RemoveAll("c:\\testdisk") defer os.RemoveAll("c:\\testdisk")
var fs StorageAPI var fs StorageAPI

View File

@ -1364,6 +1364,7 @@ func (s *TestSuiteCommon) TestListObjectsHandler(c *C) {
c.Assert(response.StatusCode, Equals, http.StatusOK) c.Assert(response.StatusCode, Equals, http.StatusOK)
getContent, err := ioutil.ReadAll(response.Body) getContent, err := ioutil.ReadAll(response.Body)
c.Assert(err, IsNil)
c.Assert(strings.Contains(string(getContent), "<Key>bar</Key>"), Equals, true) c.Assert(strings.Contains(string(getContent), "<Key>bar</Key>"), Equals, true)
// create listObjectsV2 request with valid parameters // create listObjectsV2 request with valid parameters
@ -1377,6 +1378,7 @@ func (s *TestSuiteCommon) TestListObjectsHandler(c *C) {
c.Assert(response.StatusCode, Equals, http.StatusOK) c.Assert(response.StatusCode, Equals, http.StatusOK)
getContent, err = ioutil.ReadAll(response.Body) getContent, err = ioutil.ReadAll(response.Body)
c.Assert(err, IsNil)
c.Assert(strings.Contains(string(getContent), "<Key>bar</Key>"), Equals, true) c.Assert(strings.Contains(string(getContent), "<Key>bar</Key>"), Equals, true)
c.Assert(strings.Contains(string(getContent), "<Owner><ID></ID><DisplayName></DisplayName></Owner>"), Equals, true) c.Assert(strings.Contains(string(getContent), "<Owner><ID></ID><DisplayName></DisplayName></Owner>"), Equals, true)
@ -1960,6 +1962,7 @@ func (s *TestSuiteCommon) TestObjectMultipartAbort(c *C) {
// execute the HTTP request initiating the new multipart upload. // execute the HTTP request initiating the new multipart upload.
response, err = client.Do(request) response, err = client.Do(request)
c.Assert(err, IsNil)
c.Assert(response.StatusCode, Equals, http.StatusOK) c.Assert(response.StatusCode, Equals, http.StatusOK)
// parse the response body and obtain the new upload ID. // parse the response body and obtain the new upload ID.
@ -1977,6 +1980,7 @@ func (s *TestSuiteCommon) TestObjectMultipartAbort(c *C) {
// execute the HTTP request initiating the new multipart upload. // execute the HTTP request initiating the new multipart upload.
response, err = client.Do(request) response, err = client.Do(request)
c.Assert(err, IsNil)
c.Assert(response.StatusCode, Equals, http.StatusOK) c.Assert(response.StatusCode, Equals, http.StatusOK)
// parse the response body and obtain the new upload ID. // parse the response body and obtain the new upload ID.
@ -2193,6 +2197,7 @@ func (s *TestSuiteCommon) TestObjectMultipartListError(c *C) {
c.Assert(err, IsNil) c.Assert(err, IsNil)
// execute the HTTP request initiating the new multipart upload. // execute the HTTP request initiating the new multipart upload.
response, err = client.Do(request) response, err = client.Do(request)
c.Assert(err, IsNil)
c.Assert(response.StatusCode, Equals, http.StatusOK) c.Assert(response.StatusCode, Equals, http.StatusOK)
// parse the response body and obtain the new upload ID. // parse the response body and obtain the new upload ID.
decoder := xml.NewDecoder(response.Body) decoder := xml.NewDecoder(response.Body)

View File

@ -42,9 +42,6 @@ func newJWT(expiry time.Duration) (*JWT, error) {
if serverConfig == nil { if serverConfig == nil {
return nil, errors.New("Server not initialzed") return nil, errors.New("Server not initialzed")
} }
if expiry == 0 {
expiry = defaultTokenExpiry
}
// Save access, secret keys. // Save access, secret keys.
cred := serverConfig.GetCredential() cred := serverConfig.GetCredential()

View File

@ -91,7 +91,7 @@ func TestByteErrors(t *testing.T) {
t.Errorf("Expected error, got %v", got) t.Errorf("Expected error, got %v", got)
} }
// Empty string. // Empty string.
got, err = strconvBytes("") _, err = strconvBytes("")
if err == nil { if err == nil {
t.Errorf("Expected error parsing nothing") t.Errorf("Expected error parsing nothing")
} }

View File

@ -468,6 +468,9 @@ func newTestWebRPCRequest(rpcMethod string, authorization string, data interface
} }
encapsulatedData := genericJSON{JSONRPC: "2.0", ID: "1", Method: rpcMethod, Params: data} encapsulatedData := genericJSON{JSONRPC: "2.0", ID: "1", Method: rpcMethod, Params: data}
jsonData, err := json.Marshal(encapsulatedData) jsonData, err := json.Marshal(encapsulatedData)
if err != nil {
return nil, err
}
req, err := newWebRPCRequest(rpcMethod, authorization, bytes.NewReader(jsonData)) req, err := newWebRPCRequest(rpcMethod, authorization, bytes.NewReader(jsonData))
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -379,7 +379,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
xlMeta := pickValidXLMeta(partsMetadata, modTime) xlMeta := pickValidXLMeta(partsMetadata, modTime)
onlineDisks = getOrderedDisks(xlMeta.Erasure.Distribution, onlineDisks) onlineDisks = getOrderedDisks(xlMeta.Erasure.Distribution, onlineDisks)
partsMetadata = getOrderedPartsMetadata(xlMeta.Erasure.Distribution, partsMetadata) _ = getOrderedPartsMetadata(xlMeta.Erasure.Distribution, partsMetadata)
// Need a unique name for the part being written in minioMetaBucket to // Need a unique name for the part being written in minioMetaBucket to
// accommodate concurrent PutObjectPart requests // accommodate concurrent PutObjectPart requests

View File

@ -126,7 +126,9 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
object := "object" object := "object"
// Create object "obj" under bucket "bucket". // Create object "obj" under bucket "bucket".
_, err = obj.PutObject(bucket, object, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil) _, err = obj.PutObject(bucket, object, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil)
if err != nil {
t.Fatal(err)
}
// for a 16 disk setup, quorum is 9. To simulate disks not found yet // for a 16 disk setup, quorum is 9. To simulate disks not found yet
// quorum is available, we remove disks leaving quorum disks behind. // quorum is available, we remove disks leaving quorum disks behind.
for i := range xl.storageDisks[:7] { for i := range xl.storageDisks[:7] {