clean up: ineffassign fixes (#2411)

This commit is contained in:
karthic rao 2016-08-12 12:56:30 +05:30 committed by Harshavardhana
parent ef0a108dde
commit 70fd38818e
7 changed files with 19 additions and 11 deletions

View File

@ -92,7 +92,7 @@ func TestErasureCreateFile(t *testing.T) {
// 1 more disk down. 7 disk down in total. Should return quorum error. // 1 more disk down. 7 disk down in total. Should return quorum error.
disks[10] = AppendDiskDown{disks[10].(*posix)} disks[10] = AppendDiskDown{disks[10].(*posix)}
size, _, err = erasureCreateFile(disks, "testbucket", "testobject4", bytes.NewReader(data), blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) _, _, err = erasureCreateFile(disks, "testbucket", "testobject4", bytes.NewReader(data), blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1)
if err != errXLWriteQuorum { if err != errXLWriteQuorum {
t.Errorf("erasureCreateFile returned expected errXLWriteQuorum error, got %s", err) t.Errorf("erasureCreateFile returned expected errXLWriteQuorum error, got %s", err)
} }

View File

@ -257,7 +257,7 @@ func TestErasureReadFileDiskFail(t *testing.T) {
pool := bpool.NewBytePool(chunkSize, len(disks)) pool := bpool.NewBytePool(chunkSize, len(disks))
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
size, err = erasureReadFile(buf, disks, "testbucket", "testobject", 0, length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool) _, err = erasureReadFile(buf, disks, "testbucket", "testobject", 0, length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
@ -270,7 +270,7 @@ func TestErasureReadFileDiskFail(t *testing.T) {
disks[5] = ReadDiskDown{disks[5].(*posix)} disks[5] = ReadDiskDown{disks[5].(*posix)}
buf.Reset() buf.Reset()
size, err = erasureReadFile(buf, disks, "testbucket", "testobject", 0, length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool) _, err = erasureReadFile(buf, disks, "testbucket", "testobject", 0, length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
@ -285,7 +285,7 @@ func TestErasureReadFileDiskFail(t *testing.T) {
disks[11] = ReadDiskDown{disks[11].(*posix)} disks[11] = ReadDiskDown{disks[11].(*posix)}
buf.Reset() buf.Reset()
size, err = erasureReadFile(buf, disks, "testbucket", "testobject", 0, length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool) _, err = erasureReadFile(buf, disks, "testbucket", "testobject", 0, length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
@ -296,7 +296,7 @@ func TestErasureReadFileDiskFail(t *testing.T) {
// 1 more disk down. 7 disks down in total. Read should fail. // 1 more disk down. 7 disks down in total. Read should fail.
disks[12] = ReadDiskDown{disks[12].(*posix)} disks[12] = ReadDiskDown{disks[12].(*posix)}
buf.Reset() buf.Reset()
size, err = erasureReadFile(buf, disks, "testbucket", "testobject", 0, length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool) _, err = erasureReadFile(buf, disks, "testbucket", "testobject", 0, length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool)
if err != errXLReadQuorum { if err != errXLReadQuorum {
t.Fatal("expected errXLReadQuorum error") t.Fatal("expected errXLReadQuorum error")
} }
@ -361,7 +361,7 @@ func TestErasureReadFileOffsetLength(t *testing.T) {
for i, testCase := range testCases { for i, testCase := range testCases {
expected := data[testCase.offset:(testCase.offset + testCase.length)] expected := data[testCase.offset:(testCase.offset + testCase.length)]
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
size, err = erasureReadFile(buf, disks, "testbucket", "testobject", testCase.offset, testCase.length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool) _, err = erasureReadFile(buf, disks, "testbucket", "testobject", testCase.offset, testCase.length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
continue continue
@ -429,7 +429,7 @@ func TestErasureReadFileRandomOffsetLength(t *testing.T) {
expected := data[offset : offset+readLen] expected := data[offset : offset+readLen]
size, err = erasureReadFile(buf, disks, "testbucket", "testobject", offset, readLen, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool) _, err = erasureReadFile(buf, disks, "testbucket", "testobject", offset, readLen, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool)
if err != nil { if err != nil {
t.Fatal(err, offset, readLen) t.Fatal(err, offset, readLen)
} }

View File

@ -92,6 +92,9 @@ func TestCopyBuffer(t *testing.T) {
testFile := "testFile" testFile := "testFile"
testContent := []byte("hello, world") testContent := []byte("hello, world")
err = disk.AppendFile(volume, testFile, testContent) err = disk.AppendFile(volume, testFile, testContent)
if err != nil {
t.Fatalf("AppendFile failed: <ERROR> %s", err)
}
testCases := []struct { testCases := []struct {
writer io.Writer writer io.Writer

View File

@ -726,6 +726,9 @@ func (s *TestSuiteCommon) TestPutBucket(c *C) {
client := http.Client{} client := http.Client{}
response, err := client.Do(request) response, err := client.Do(request)
if err != nil {
c.Fatalf("Put bucket Failed: <ERROR> %s", err)
}
defer response.Body.Close() defer response.Body.Close()
}() }()
} }

View File

@ -178,7 +178,7 @@ func TestExtractSignedHeaders(t *testing.T) {
// case where the headers doesn't contain the one of the signed header in the signed headers list. // case where the headers doesn't contain the one of the signed header in the signed headers list.
signedHeaders = append(signedHeaders, " X-Amz-Credential") signedHeaders = append(signedHeaders, " X-Amz-Credential")
// expected to fail with `ErrUnsignedHeaders`. // expected to fail with `ErrUnsignedHeaders`.
extractedSignedHeaders, errCode = extractSignedHeaders(signedHeaders, inputHeader) _, errCode = extractSignedHeaders(signedHeaders, inputHeader)
if errCode != ErrUnsignedHeaders { if errCode != ErrUnsignedHeaders {
t.Fatalf("Expected the APIErrorCode to %d, but got %d", ErrUnsignedHeaders, errCode) t.Fatalf("Expected the APIErrorCode to %d, but got %d", ErrUnsignedHeaders, errCode)
} }
@ -186,7 +186,7 @@ func TestExtractSignedHeaders(t *testing.T) {
// case where the list of signed headers doesn't contain the host field. // case where the list of signed headers doesn't contain the host field.
signedHeaders = signedHeaders[1:] signedHeaders = signedHeaders[1:]
// expected to fail with `ErrUnsignedHeaders`. // expected to fail with `ErrUnsignedHeaders`.
extractedSignedHeaders, errCode = extractSignedHeaders(signedHeaders, inputHeader) _, errCode = extractSignedHeaders(signedHeaders, inputHeader)
if errCode != ErrUnsignedHeaders { if errCode != ErrUnsignedHeaders {
t.Fatalf("Expected the APIErrorCode to %d, but got %d", ErrUnsignedHeaders, errCode) t.Fatalf("Expected the APIErrorCode to %d, but got %d", ErrUnsignedHeaders, errCode)
} }

View File

@ -336,7 +336,7 @@ func TestListDir(t *testing.T) {
t.Error(err) t.Error(err)
} }
// None of the disks are available, should get errDiskNotFound. // None of the disks are available, should get errDiskNotFound.
entries, _, err = listDir(volume, "", "") _, _, err = listDir(volume, "", "")
if err != errDiskNotFound { if err != errDiskNotFound {
t.Error("expected errDiskNotFound error.") t.Error("expected errDiskNotFound error.")
} }

View File

@ -91,7 +91,9 @@ func TestXLDeleteObjectBasic(t *testing.T) {
// Create object "obj" under bucket "bucket" for Test 7 to pass // Create object "obj" under bucket "bucket" for Test 7 to pass
_, err = xl.PutObject("bucket", "obj", int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil) _, err = xl.PutObject("bucket", "obj", int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil)
if err != nil {
t.Fatalf("XL Object upload failed: <ERROR> %s", err)
}
for i, test := range testCases { for i, test := range testCases {
actualErr := xl.DeleteObject(test.bucket, test.object) actualErr := xl.DeleteObject(test.bucket, test.object)
if test.expectedErr != nil && actualErr != test.expectedErr { if test.expectedErr != nil && actualErr != test.expectedErr {