move to go1.24 (#21114)

This commit is contained in:
Harshavardhana 2025-04-09 07:28:39 -07:00 committed by GitHub
parent a6258668a6
commit 2b34e5b9ae
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
74 changed files with 434 additions and 458 deletions

View File

@ -20,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.23.x]
go-version: [1.24.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4

View File

@ -20,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.23.x]
go-version: [1.24.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4

View File

@ -20,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.23.x]
go-version: [1.24.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4

View File

@ -20,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.23.x]
go-version: [1.24.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4

View File

@ -20,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.23.x]
go-version: [1.24.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4

View File

@ -20,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.23.x]
go-version: [1.24.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4

View File

@ -61,7 +61,7 @@ jobs:
# are turned off - i.e. if ldap="", then ldap server is not enabled for
# the tests.
matrix:
go-version: [1.23.x]
go-version: [1.24.x]
ldap: ["", "localhost:389"]
etcd: ["", "http://localhost:2379"]
openid: ["", "http://127.0.0.1:5556/dex"]

View File

@ -29,7 +29,7 @@ jobs:
- name: setup-go-step
uses: actions/setup-go@v5
with:
go-version: 1.23.x
go-version: 1.24.x
- name: github sha short
id: vars

View File

@ -21,7 +21,7 @@ jobs:
strategy:
matrix:
go-version: [1.23.x]
go-version: [1.24.x]
steps:
- uses: actions/checkout@v4

View File

@ -20,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.23.x]
go-version: [1.24.x]
os: [ubuntu-latest]
steps:

View File

@ -20,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.23.x]
go-version: [1.24.x]
os: [ubuntu-latest]
steps:

View File

@ -21,7 +21,8 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.23.5
go-version: 1.24.0
cached: false
- name: Get official govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
shell: bash

View File

@ -1,4 +1,4 @@
FROM golang:1.23-alpine as build
FROM golang:1.24-alpine as build
ARG TARGETARCH
ARG RELEASE

View File

@ -1,4 +1,4 @@
FROM golang:1.23-alpine AS build
FROM golang:1.24-alpine AS build
ARG TARGETARCH
ARG RELEASE

View File

@ -1,4 +1,4 @@
FROM golang:1.23-alpine AS build
FROM golang:1.24-alpine AS build
ARG TARGETARCH
ARG RELEASE

View File

@ -122,7 +122,7 @@ You can also connect using any S3-compatible tool, such as the MinIO Client `mc`
## Install from Source
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.21](https://golang.org/dl/#stable)
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.24](https://golang.org/dl/#stable)
```sh
go install github.com/minio/minio@latest

View File

@ -263,7 +263,7 @@ func buildAdminRequest(queryVal url.Values, method, path string,
}
func TestAdminServerInfo(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
adminTestBed, err := prepareAdminErasureTestBed(ctx)

View File

@ -18,7 +18,6 @@
package cmd
import (
"context"
"errors"
"testing"
@ -64,7 +63,7 @@ var toAPIErrorTests = []struct {
}
func TestAPIErrCode(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
for i, testCase := range toAPIErrorTests {
errCode := toAPIErrorCode(ctx, testCase.err)
if errCode != testCase.errCode {

View File

@ -413,7 +413,7 @@ func TestIsReqAuthenticated(t *testing.T) {
}
func TestCheckAdminRequestAuthType(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
objLayer, fsDir, err := prepareFS(ctx)
@ -450,7 +450,7 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
}
func TestValidateAdminSignature(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
objLayer, fsDir, err := prepareFS(ctx)

View File

@ -35,7 +35,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err = obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
err = obj.MakeBucket(b.Context(), bucket, MakeBucketOptions{})
if err != nil {
b.Fatal(err)
}
@ -54,7 +54,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
// insert the object.
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
objInfo, err := obj.PutObject(b.Context(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
@ -76,7 +76,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
object := getRandomObjectName()
// create bucket.
err = obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
err = obj.MakeBucket(b.Context(), bucket, MakeBucketOptions{})
if err != nil {
b.Fatal(err)
}
@ -90,7 +90,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
textData := generateBytesData(objSize)
// generate md5sum for the generated data.
// md5sum of the data to written is required as input for NewMultipartUpload.
res, err := obj.NewMultipartUpload(context.Background(), bucket, object, ObjectOptions{})
res, err := obj.NewMultipartUpload(b.Context(), bucket, object, ObjectOptions{})
if err != nil {
b.Fatal(err)
}
@ -113,7 +113,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
}
md5hex := getMD5Hash(textPartData)
var partInfo PartInfo
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, res.UploadID, j,
partInfo, err = obj.PutObjectPart(b.Context(), bucket, object, res.UploadID, j,
mustGetPutObjReader(b, bytes.NewReader(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
@ -130,7 +130,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
// create a temp Erasure/FS backend.
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(b.Context())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
@ -146,7 +146,7 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
// create a temp Erasure/FS backend.
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(b.Context())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
@ -162,7 +162,7 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) {
// create a temp Erasure/FS backend.
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(b.Context())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
@ -196,7 +196,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err := obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
err := obj.MakeBucket(b.Context(), bucket, MakeBucketOptions{})
if err != nil {
b.Fatal(err)
}
@ -218,7 +218,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
i := 0
for pb.Next() {
// insert the object.
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
objInfo, err := obj.PutObject(b.Context(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)

View File

@ -18,7 +18,6 @@
package cmd
import (
"context"
"io"
"testing"
)
@ -34,7 +33,7 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
t.Fatal(err)
}
disk.MakeVol(context.Background(), volume)
disk.MakeVol(t.Context(), volume)
writer := newBitrotWriter(disk, "", volume, filePath, 35, bitrotAlgo, 10)

View File

@ -18,7 +18,6 @@
package cmd
import (
"context"
"testing"
"github.com/minio/minio/internal/bucket/replication"
@ -184,7 +183,7 @@ var parseReplicationDecisionTest = []struct {
func TestParseReplicateDecision(t *testing.T) {
for i, test := range parseReplicationDecisionTest {
dsc, err := parseReplicateDecision(context.Background(), "bucket", test.expDsc.String())
dsc, err := parseReplicateDecision(t.Context(), "bucket", test.expDsc.String())
if err != nil {
if test.expErr != err {
t.Errorf("Test%d (%s): Expected parse error got %t , want %t", i+1, test.name, err, test.expErr)

View File

@ -18,7 +18,6 @@
package cmd
import (
"context"
"fmt"
"net/http"
"testing"
@ -86,7 +85,7 @@ var replicationConfigTests = []struct {
}
func TestReplicationResync(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
for i, test := range replicationConfigTests {
if sync := test.rcfg.Resync(ctx, test.info, test.dsc, test.tgtStatuses); sync.mustResync() != test.expectedSync {
t.Errorf("Test%d (%s): Resync got %t , want %t", i+1, test.name, sync.mustResync(), test.expectedSync)

View File

@ -26,7 +26,7 @@ import (
)
func TestServerConfig(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
objLayer, fsDir, err := prepareFS(ctx)
@ -56,7 +56,7 @@ func TestServerConfig(t *testing.T) {
t.Errorf("Expecting region `us-west-1` found %s", globalSite.Region())
}
if err := saveServerConfig(context.Background(), objLayer, globalServerConfig); err != nil {
if err := saveServerConfig(t.Context(), objLayer, globalServerConfig); err != nil {
t.Fatalf("Unable to save updated config file %s", err)
}

View File

@ -18,7 +18,6 @@
package cmd
import (
"context"
"encoding/xml"
"fmt"
"slices"
@ -38,7 +37,7 @@ import (
func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) {
// Prepare object layer
objAPI, disks, err := prepareErasure(context.Background(), 8)
objAPI, disks, err := prepareErasure(t.Context(), 8)
if err != nil {
t.Fatalf("Failed to initialize object layer: %v", err)
}
@ -102,7 +101,7 @@ func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) {
}
globalBucketMetadataSys.Set(bucket, meta)
// Prepare lifecycle expiration workers
es := newExpiryState(context.Background(), objAPI, 0)
es := newExpiryState(t.Context(), objAPI, 0)
globalExpiryState = es
// Prepare object versions
@ -276,7 +275,7 @@ func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) {
sizeS sizeSummary
gots []ObjectInfo
)
item.applyActions(context.TODO(), objAPI, test.objInfos, test.lr, &sizeS, func(oi ObjectInfo, sz, _ int64, _ *sizeSummary) {
item.applyActions(t.Context(), objAPI, test.objInfos, test.lr, &sizeS, func(oi ObjectInfo, sz, _ int64, _ *sizeSummary) {
if sz != 0 {
gots = append(gots, oi)
}
@ -399,7 +398,7 @@ func TestEvalActionFromLifecycle(t *testing.T) {
for i, test := range tests {
t.Run(fmt.Sprintf("TestEvalAction-%d", i), func(t *testing.T) {
gotEvent := evalActionFromLifecycle(context.Background(), test.ilm, *test.retention, nil, test.obj)
gotEvent := evalActionFromLifecycle(t.Context(), test.ilm, *test.retention, nil, test.obj)
if gotEvent.Action != test.want {
t.Fatalf("Expected %v but got %v", test.want, gotEvent.Action)
}

View File

@ -70,7 +70,7 @@ func TestDataUsageUpdate(t *testing.T) {
})
weSleep := func() bool { return false }
got, err := scanDataFolder(context.Background(), nil, &xls, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize, 0, weSleep)
got, err := scanDataFolder(t.Context(), nil, &xls, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize, 0, weSleep)
if err != nil {
t.Fatal(err)
}
@ -180,7 +180,7 @@ func TestDataUsageUpdate(t *testing.T) {
}
// Changed dir must be picked up in this many cycles.
for i := 0; i < dataUsageUpdateDirCycles; i++ {
got, err = scanDataFolder(context.Background(), nil, &xls, got, getSize, 0, weSleep)
got, err = scanDataFolder(t.Context(), nil, &xls, got, getSize, 0, weSleep)
got.Info.NextCycle++
if err != nil {
t.Fatal(err)
@ -294,7 +294,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
return DiskInfo{Total: 1 << 40, Free: 1 << 40}, nil
})
got, err := scanDataFolder(context.Background(), nil, &xls, dataUsageCache{Info: dataUsageCacheInfo{Name: "bucket"}}, getSize, 0, weSleep)
got, err := scanDataFolder(t.Context(), nil, &xls, dataUsageCache{Info: dataUsageCacheInfo{Name: "bucket"}}, getSize, 0, weSleep)
if err != nil {
t.Fatal(err)
}
@ -429,7 +429,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
}
// Changed dir must be picked up in this many cycles.
for i := 0; i < dataUsageUpdateDirCycles; i++ {
got, err = scanDataFolder(context.Background(), nil, &xls, got, getSize, 0, weSleep)
got, err = scanDataFolder(t.Context(), nil, &xls, got, getSize, 0, weSleep)
got.Info.NextCycle++
if err != nil {
t.Fatal(err)
@ -582,7 +582,7 @@ func TestDataUsageCacheSerialize(t *testing.T) {
return DiskInfo{Total: 1 << 40, Free: 1 << 40}, nil
})
weSleep := func() bool { return false }
want, err := scanDataFolder(context.Background(), nil, &xls, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize, 0, weSleep)
want, err := scanDataFolder(t.Context(), nil, &xls, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize, 0, weSleep)
if err != nil {
t.Fatal(err)
}

View File

@ -89,7 +89,7 @@ func TestErasureDecode(t *testing.T) {
if err != nil {
t.Fatalf("Test %d: failed to create test setup: %v", i, err)
}
erasure, err := NewErasure(context.Background(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
erasure, err := NewErasure(t.Context(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
if err != nil {
t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)
}
@ -108,7 +108,7 @@ func TestErasureDecode(t *testing.T) {
for i, disk := range disks {
writers[i] = newBitrotWriter(disk, "", "testbucket", "object", erasure.ShardFileSize(test.data), writeAlgorithm, erasure.ShardSize())
}
n, err := erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
n, err := erasure.Encode(t.Context(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers)
if err != nil {
t.Fatalf("Test %d: failed to create erasure test file: %v", i, err)
@ -134,7 +134,7 @@ func TestErasureDecode(t *testing.T) {
}
writer := bytes.NewBuffer(nil)
_, err = erasure.Decode(context.Background(), writer, bitrotReaders, test.offset, test.length, test.data, nil)
_, err = erasure.Decode(t.Context(), writer, bitrotReaders, test.offset, test.length, test.data, nil)
closeBitrotReaders(bitrotReaders)
if err != nil && !test.shouldFail {
t.Errorf("Test %d: should pass but failed with: %v", i, err)
@ -177,7 +177,7 @@ func TestErasureDecode(t *testing.T) {
bitrotReaders[0] = nil
}
writer.Reset()
_, err = erasure.Decode(context.Background(), writer, bitrotReaders, test.offset, test.length, test.data, nil)
_, err = erasure.Decode(t.Context(), writer, bitrotReaders, test.offset, test.length, test.data, nil)
closeBitrotReaders(bitrotReaders)
if err != nil && !test.shouldFailQuorum {
t.Errorf("Test %d: should pass but failed with: %v", i, err)
@ -211,7 +211,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
return
}
disks := setup.disks
erasure, err := NewErasure(context.Background(), dataBlocks, parityBlocks, blockSize)
erasure, err := NewErasure(t.Context(), dataBlocks, parityBlocks, blockSize)
if err != nil {
t.Fatalf("failed to create ErasureStorage: %v", err)
}
@ -236,7 +236,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
// Create a test file to read from.
buffer := make([]byte, blockSize, 2*blockSize)
n, err := erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
n, err := erasure.Encode(t.Context(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers)
if err != nil {
t.Fatal(err)
@ -266,7 +266,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
tillOffset := erasure.ShardFileOffset(offset, readLen, length)
bitrotReaders[index] = newStreamingBitrotReader(disk, nil, "testbucket", "object", tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize())
}
_, err = erasure.Decode(context.Background(), buf, bitrotReaders, offset, readLen, length, nil)
_, err = erasure.Decode(t.Context(), buf, bitrotReaders, offset, readLen, length, nil)
closeBitrotReaders(bitrotReaders)
if err != nil {
t.Fatal(err, offset, readLen)

View File

@ -88,7 +88,7 @@ func TestErasureEncode(t *testing.T) {
t.Fatalf("Test %d: failed to create test setup: %v", i, err)
}
disks := setup.disks
erasure, err := NewErasure(context.Background(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
erasure, err := NewErasure(t.Context(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
if err != nil {
t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)
}
@ -105,7 +105,7 @@ func TestErasureEncode(t *testing.T) {
}
writers[i] = newBitrotWriter(disk, "", "testbucket", "object", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize())
}
n, err := erasure.Encode(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1)
n, err := erasure.Encode(t.Context(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers)
if err != nil && !test.shouldFail {
t.Errorf("Test %d: should pass but failed with: %v", i, err)
@ -140,7 +140,7 @@ func TestErasureEncode(t *testing.T) {
if test.offDisks > 0 {
writers[0] = nil
}
n, err = erasure.Encode(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1)
n, err = erasure.Encode(t.Context(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers)
if err != nil && !test.shouldFailQuorum {
t.Errorf("Test %d: should pass but failed with: %v", i, err)

View File

@ -19,7 +19,6 @@ package cmd
import (
"bytes"
"context"
"crypto/rand"
"io"
"os"
@ -75,7 +74,7 @@ func TestErasureHeal(t *testing.T) {
t.Fatalf("Test %d: failed to setup Erasure environment: %v", i, err)
}
disks := setup.disks
erasure, err := NewErasure(context.Background(), test.dataBlocks, test.disks-test.dataBlocks, test.blocksize)
erasure, err := NewErasure(t.Context(), test.dataBlocks, test.disks-test.dataBlocks, test.blocksize)
if err != nil {
t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)
}
@ -88,7 +87,7 @@ func TestErasureHeal(t *testing.T) {
for i, disk := range disks {
writers[i] = newBitrotWriter(disk, "", "testbucket", "testobject", erasure.ShardFileSize(test.size), test.algorithm, erasure.ShardSize())
}
_, err = erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
_, err = erasure.Encode(t.Context(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers)
if err != nil {
t.Fatalf("Test %d: failed to create random test data: %v", i, err)
@ -132,7 +131,7 @@ func TestErasureHeal(t *testing.T) {
}
// test case setup is complete - now call Heal()
err = erasure.Heal(context.Background(), staleWriters, readers, test.size, nil)
err = erasure.Heal(t.Context(), staleWriters, readers, test.size, nil)
closeBitrotReaders(readers)
closeBitrotWriters(staleWriters)
if err != nil && !test.shouldFail {

View File

@ -152,7 +152,7 @@ func TestListOnlineDisks(t *testing.T) {
t.Skip()
}
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, disks, err := prepareErasure16(ctx)
@ -160,7 +160,7 @@ func TestListOnlineDisks(t *testing.T) {
t.Fatalf("Prepare Erasure backend failed - %v", err)
}
setObjectLayer(obj)
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(disks)
type tamperKind int
@ -276,7 +276,7 @@ func TestListOnlineDisks(t *testing.T) {
// and check if that disk
// appears in outDatedDisks.
tamperedIndex = index
dErr := erasureDisks[index].Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
dErr := erasureDisks[index].Delete(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
Recursive: false,
Immediate: false,
})
@ -328,7 +328,7 @@ func TestListOnlineDisks(t *testing.T) {
// TestListOnlineDisksSmallObjects - checks if listOnlineDisks and outDatedDisks
// are consistent with each other.
func TestListOnlineDisksSmallObjects(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, disks, err := prepareErasure16(ctx)
@ -336,7 +336,7 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
t.Fatalf("Prepare Erasure backend failed - %v", err)
}
setObjectLayer(obj)
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(disks)
type tamperKind int
@ -456,7 +456,7 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
// and check if that disk
// appears in outDatedDisks.
tamperedIndex = index
dErr := erasureDisks[index].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
dErr := erasureDisks[index].Delete(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
Recursive: false,
Immediate: false,
})
@ -507,14 +507,14 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
}
func TestDisksWithAllParts(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, disks, err := prepareErasure16(ctx)
if err != nil {
t.Fatalf("Prepare Erasure backend failed - %v", err)
}
setObjectLayer(obj)
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(disks)
bucket := "bucket"

View File

@ -311,14 +311,14 @@ func TestIsObjectDangling(t *testing.T) {
// Tests both object and bucket healing.
func TestHealing(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, fsDirs, err := prepareErasure16(ctx)
if err != nil {
t.Fatal(err)
}
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
// initialize the server and obtain the credentials and root.
// credentials are necessary to sign the HTTP request.
@ -353,7 +353,7 @@ func TestHealing(t *testing.T) {
}
disk := er.getDisks()[0]
fileInfoPreHeal, err := disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPreHeal, err := disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@ -376,7 +376,7 @@ func TestHealing(t *testing.T) {
t.Fatal(err)
}
fileInfoPostHeal, err := disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPostHeal, err := disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@ -395,7 +395,7 @@ func TestHealing(t *testing.T) {
// gone down when an object was replaced by a new object.
fileInfoOutDated := fileInfoPreHeal
fileInfoOutDated.ModTime = time.Now()
err = disk.WriteMetadata(context.Background(), "", bucket, object, fileInfoOutDated)
err = disk.WriteMetadata(t.Context(), "", bucket, object, fileInfoOutDated)
if err != nil {
t.Fatal(err)
}
@ -405,7 +405,7 @@ func TestHealing(t *testing.T) {
t.Fatal(err)
}
fileInfoPostHeal, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPostHeal, err = disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@ -457,7 +457,7 @@ func TestHealing(t *testing.T) {
t.Fatal(err)
}
// Stat the bucket to make sure that it was created.
_, err = er.getDisks()[0].StatVol(context.Background(), bucket)
_, err = er.getDisks()[0].StatVol(t.Context(), bucket)
if err != nil {
t.Fatal(err)
}
@ -465,14 +465,14 @@ func TestHealing(t *testing.T) {
// Tests both object and bucket healing.
func TestHealingVersioned(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, fsDirs, err := prepareErasure16(ctx)
if err != nil {
t.Fatal(err)
}
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
// initialize the server and obtain the credentials and root.
// credentials are necessary to sign the HTTP request.
@ -513,11 +513,11 @@ func TestHealingVersioned(t *testing.T) {
}
disk := er.getDisks()[0]
fileInfoPreHeal1, err := disk.ReadVersion(context.Background(), "", bucket, object, oi1.VersionID, ReadOptions{ReadData: false, Healing: true})
fileInfoPreHeal1, err := disk.ReadVersion(t.Context(), "", bucket, object, oi1.VersionID, ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
fileInfoPreHeal2, err := disk.ReadVersion(context.Background(), "", bucket, object, oi2.VersionID, ReadOptions{ReadData: false, Healing: true})
fileInfoPreHeal2, err := disk.ReadVersion(t.Context(), "", bucket, object, oi2.VersionID, ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@ -540,11 +540,11 @@ func TestHealingVersioned(t *testing.T) {
t.Fatal(err)
}
fileInfoPostHeal1, err := disk.ReadVersion(context.Background(), "", bucket, object, oi1.VersionID, ReadOptions{ReadData: false, Healing: true})
fileInfoPostHeal1, err := disk.ReadVersion(t.Context(), "", bucket, object, oi1.VersionID, ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
fileInfoPostHeal2, err := disk.ReadVersion(context.Background(), "", bucket, object, oi2.VersionID, ReadOptions{ReadData: false, Healing: true})
fileInfoPostHeal2, err := disk.ReadVersion(t.Context(), "", bucket, object, oi2.VersionID, ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@ -566,7 +566,7 @@ func TestHealingVersioned(t *testing.T) {
// gone down when an object was replaced by a new object.
fileInfoOutDated := fileInfoPreHeal1
fileInfoOutDated.ModTime = time.Now()
err = disk.WriteMetadata(context.Background(), "", bucket, object, fileInfoOutDated)
err = disk.WriteMetadata(t.Context(), "", bucket, object, fileInfoOutDated)
if err != nil {
t.Fatal(err)
}
@ -576,7 +576,7 @@ func TestHealingVersioned(t *testing.T) {
t.Fatal(err)
}
fileInfoPostHeal1, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPostHeal1, err = disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@ -586,7 +586,7 @@ func TestHealingVersioned(t *testing.T) {
t.Fatal("HealObject failed")
}
fileInfoPostHeal2, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPostHeal2, err = disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@ -638,14 +638,14 @@ func TestHealingVersioned(t *testing.T) {
t.Fatal(err)
}
// Stat the bucket to make sure that it was created.
_, err = er.getDisks()[0].StatVol(context.Background(), bucket)
_, err = er.getDisks()[0].StatVol(t.Context(), bucket)
if err != nil {
t.Fatal(err)
}
}
func TestHealingDanglingObject(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
resetGlobalHealState()
@ -724,7 +724,7 @@ func TestHealingDanglingObject(t *testing.T) {
// Restore...
setDisks(orgDisks[:4]...)
fileInfoPreHeal, err := disks[0].ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPreHeal, err := disks[0].ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@ -741,7 +741,7 @@ func TestHealingDanglingObject(t *testing.T) {
t.Fatal(err)
}
fileInfoPostHeal, err := disks[0].ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPostHeal, err := disks[0].ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@ -771,7 +771,7 @@ func TestHealingDanglingObject(t *testing.T) {
setDisks(orgDisks[:4]...)
disk := getDisk(0)
fileInfoPreHeal, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPreHeal, err = disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@ -789,7 +789,7 @@ func TestHealingDanglingObject(t *testing.T) {
}
disk = getDisk(0)
fileInfoPostHeal, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPostHeal, err = disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@ -820,7 +820,7 @@ func TestHealingDanglingObject(t *testing.T) {
setDisks(orgDisks[:4]...)
disk = getDisk(0)
fileInfoPreHeal, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPreHeal, err = disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@ -838,7 +838,7 @@ func TestHealingDanglingObject(t *testing.T) {
}
disk = getDisk(0)
fileInfoPostHeal, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPostHeal, err = disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@ -849,7 +849,7 @@ func TestHealingDanglingObject(t *testing.T) {
}
func TestHealCorrectQuorum(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
resetGlobalHealState()
@ -933,7 +933,7 @@ func TestHealCorrectQuorum(t *testing.T) {
}
for i := 0; i < nfi.Erasure.ParityBlocks; i++ {
erasureDisks[i].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
erasureDisks[i].Delete(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
Recursive: false,
Immediate: false,
})
@ -960,7 +960,7 @@ func TestHealCorrectQuorum(t *testing.T) {
}
for i := 0; i < nfi.Erasure.ParityBlocks; i++ {
erasureDisks[i].Delete(context.Background(), minioMetaBucket, pathJoin(cfgFile, xlStorageFormatFile), DeleteOptions{
erasureDisks[i].Delete(t.Context(), minioMetaBucket, pathJoin(cfgFile, xlStorageFormatFile), DeleteOptions{
Recursive: false,
Immediate: false,
})
@ -980,7 +980,7 @@ func TestHealCorrectQuorum(t *testing.T) {
}
func TestHealObjectCorruptedPools(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
resetGlobalHealState()
@ -1044,7 +1044,7 @@ func TestHealObjectCorruptedPools(t *testing.T) {
er := set.sets[0]
erasureDisks := er.getDisks()
firstDisk := erasureDisks[0]
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
err = firstDisk.Delete(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
Recursive: false,
Immediate: false,
})
@ -1063,11 +1063,11 @@ func TestHealObjectCorruptedPools(t *testing.T) {
t.Fatalf("Failed to getLatestFileInfo - %v", err)
}
if _, err = firstDisk.StatInfoFile(context.Background(), bucket, object+"/"+xlStorageFormatFile, false); err != nil {
if _, err = firstDisk.StatInfoFile(t.Context(), bucket, object+"/"+xlStorageFormatFile, false); err != nil {
t.Errorf("Expected xl.meta file to be present but stat failed - %v", err)
}
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
err = firstDisk.Delete(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
Recursive: false,
Immediate: false,
})
@ -1075,7 +1075,7 @@ func TestHealObjectCorruptedPools(t *testing.T) {
t.Errorf("Failure during deleting part.1 - %v", err)
}
err = firstDisk.WriteAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), []byte{})
err = firstDisk.WriteAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), []byte{})
if err != nil {
t.Errorf("Failure during creating part.1 - %v", err)
}
@ -1095,7 +1095,7 @@ func TestHealObjectCorruptedPools(t *testing.T) {
t.Fatalf("FileInfo not equal after healing: %v != %v", fi, nfi)
}
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
err = firstDisk.Delete(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
Recursive: false,
Immediate: false,
})
@ -1104,7 +1104,7 @@ func TestHealObjectCorruptedPools(t *testing.T) {
}
bdata := bytes.Repeat([]byte("b"), int(nfi.Size))
err = firstDisk.WriteAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), bdata)
err = firstDisk.WriteAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), bdata)
if err != nil {
t.Errorf("Failure during creating part.1 - %v", err)
}
@ -1127,7 +1127,7 @@ func TestHealObjectCorruptedPools(t *testing.T) {
// Test 4: checks if HealObject returns an error when xl.meta is not found
// in more than read quorum number of disks, to create a corrupted situation.
for i := 0; i <= nfi.Erasure.DataBlocks; i++ {
erasureDisks[i].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
erasureDisks[i].Delete(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
Recursive: false,
Immediate: false,
})
@ -1148,7 +1148,7 @@ func TestHealObjectCorruptedPools(t *testing.T) {
}
for i := 0; i < (nfi.Erasure.DataBlocks + nfi.Erasure.ParityBlocks); i++ {
stats, _ := erasureDisks[i].StatInfoFile(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), false)
stats, _ := erasureDisks[i].StatInfoFile(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), false)
if len(stats) != 0 {
t.Errorf("Expected xl.meta file to be not present, but succeeded")
}
@ -1156,7 +1156,7 @@ func TestHealObjectCorruptedPools(t *testing.T) {
}
func TestHealObjectCorruptedXLMeta(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
resetGlobalHealState()
@ -1222,7 +1222,7 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
t.Fatalf("Failed to getLatestFileInfo - %v", err)
}
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
err = firstDisk.Delete(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
Recursive: false,
Immediate: false,
})
@ -1235,7 +1235,7 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
t.Fatalf("Failed to heal object - %v", err)
}
if _, err = firstDisk.StatInfoFile(context.Background(), bucket, object+"/"+xlStorageFormatFile, false); err != nil {
if _, err = firstDisk.StatInfoFile(t.Context(), bucket, object+"/"+xlStorageFormatFile, false); err != nil {
t.Errorf("Expected xl.meta file to be present but stat failed - %v", err)
}
@ -1250,7 +1250,7 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
}
// Test 2: Test with a corrupted xl.meta
err = firstDisk.WriteAll(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), []byte("abcd"))
err = firstDisk.WriteAll(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), []byte("abcd"))
if err != nil {
t.Errorf("Failure during creating part.1 - %v", err)
}
@ -1273,7 +1273,7 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
// Test 3: checks if HealObject returns an error when xl.meta is not found
// in more than read quorum number of disks, to create a corrupted situation.
for i := 0; i <= nfi2.Erasure.DataBlocks; i++ {
erasureDisks[i].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
erasureDisks[i].Delete(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
Recursive: false,
Immediate: false,
})
@ -1295,7 +1295,7 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
}
func TestHealObjectCorruptedParts(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
resetGlobalHealState()
@ -1362,18 +1362,18 @@ func TestHealObjectCorruptedParts(t *testing.T) {
t.Fatalf("Failed to getLatestFileInfo - %v", err)
}
part1Disk1Origin, err := firstDisk.ReadAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"))
part1Disk1Origin, err := firstDisk.ReadAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"))
if err != nil {
t.Fatalf("Failed to read a file - %v", err)
}
part1Disk2Origin, err := secondDisk.ReadAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"))
part1Disk2Origin, err := secondDisk.ReadAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"))
if err != nil {
t.Fatalf("Failed to read a file - %v", err)
}
// Test 1, remove part.1
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
err = firstDisk.Delete(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
Recursive: false,
Immediate: false,
})
@ -1386,7 +1386,7 @@ func TestHealObjectCorruptedParts(t *testing.T) {
t.Fatalf("Failed to heal object - %v", err)
}
part1Replaced, err := firstDisk.ReadAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"))
part1Replaced, err := firstDisk.ReadAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"))
if err != nil {
t.Fatalf("Failed to read a file - %v", err)
}
@ -1396,7 +1396,7 @@ func TestHealObjectCorruptedParts(t *testing.T) {
}
// Test 2, Corrupt part.1
err = firstDisk.WriteAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), []byte("foobytes"))
err = firstDisk.WriteAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), []byte("foobytes"))
if err != nil {
t.Fatalf("Failed to write a file - %v", err)
}
@ -1406,7 +1406,7 @@ func TestHealObjectCorruptedParts(t *testing.T) {
t.Fatalf("Failed to heal object - %v", err)
}
part1Replaced, err = firstDisk.ReadAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"))
part1Replaced, err = firstDisk.ReadAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"))
if err != nil {
t.Fatalf("Failed to read a file - %v", err)
}
@ -1416,12 +1416,12 @@ func TestHealObjectCorruptedParts(t *testing.T) {
}
// Test 3, Corrupt one part and remove data in another disk
err = firstDisk.WriteAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), []byte("foobytes"))
err = firstDisk.WriteAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), []byte("foobytes"))
if err != nil {
t.Fatalf("Failed to write a file - %v", err)
}
err = secondDisk.Delete(context.Background(), bucket, object, DeleteOptions{
err = secondDisk.Delete(t.Context(), bucket, object, DeleteOptions{
Recursive: true,
Immediate: false,
})
@ -1434,7 +1434,7 @@ func TestHealObjectCorruptedParts(t *testing.T) {
t.Fatalf("Failed to heal object - %v", err)
}
partReconstructed, err := firstDisk.ReadAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"))
partReconstructed, err := firstDisk.ReadAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"))
if err != nil {
t.Fatalf("Failed to read a file - %v", err)
}
@ -1443,7 +1443,7 @@ func TestHealObjectCorruptedParts(t *testing.T) {
t.Fatalf("part.1 not healed correctly")
}
partReconstructed, err = secondDisk.ReadAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"))
partReconstructed, err = secondDisk.ReadAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"))
if err != nil {
t.Fatalf("Failed to read a file - %v", err)
}
@ -1455,7 +1455,7 @@ func TestHealObjectCorruptedParts(t *testing.T) {
// Tests healing of object.
func TestHealObjectErasure(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
nDisks := 16
@ -1512,7 +1512,7 @@ func TestHealObjectErasure(t *testing.T) {
}
// Delete the whole object folder
err = firstDisk.Delete(context.Background(), bucket, object, DeleteOptions{
err = firstDisk.Delete(t.Context(), bucket, object, DeleteOptions{
Recursive: true,
Immediate: false,
})
@ -1525,7 +1525,7 @@ func TestHealObjectErasure(t *testing.T) {
t.Fatalf("Failed to heal object - %v", err)
}
if _, err = firstDisk.StatInfoFile(context.Background(), bucket, object+"/"+xlStorageFormatFile, false); err != nil {
if _, err = firstDisk.StatInfoFile(t.Context(), bucket, object+"/"+xlStorageFormatFile, false); err != nil {
t.Errorf("Expected xl.meta file to be present but stat failed - %v", err)
}
@ -1534,7 +1534,7 @@ func TestHealObjectErasure(t *testing.T) {
er.getDisks = func() []StorageAPI {
// Nil more than half the disks, to remove write quorum.
for i := 0; i <= len(erasureDisks)/2; i++ {
err := erasureDisks[i].Delete(context.Background(), bucket, object, DeleteOptions{
err := erasureDisks[i].Delete(t.Context(), bucket, object, DeleteOptions{
Recursive: true,
Immediate: false,
})
@ -1560,7 +1560,7 @@ func TestHealObjectErasure(t *testing.T) {
// Tests healing of empty directories
func TestHealEmptyDirectoryErasure(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
nDisks := 16
@ -1596,7 +1596,7 @@ func TestHealEmptyDirectoryErasure(t *testing.T) {
z := obj.(*erasureServerPools)
er := z.serverPools[0].sets[0]
firstDisk := er.getDisks()[0]
err = firstDisk.DeleteVol(context.Background(), pathJoin(bucket, encodeDirObject(object)), true)
err = firstDisk.DeleteVol(t.Context(), pathJoin(bucket, encodeDirObject(object)), true)
if err != nil {
t.Fatalf("Failed to delete a file - %v", err)
}
@ -1608,7 +1608,7 @@ func TestHealEmptyDirectoryErasure(t *testing.T) {
}
// Check if the empty directory is restored in the first disk
_, err = firstDisk.StatVol(context.Background(), pathJoin(bucket, encodeDirObject(object)))
_, err = firstDisk.StatVol(t.Context(), pathJoin(bucket, encodeDirObject(object)))
if err != nil {
t.Fatalf("Expected object to be present but stat failed - %v", err)
}
@ -1656,7 +1656,7 @@ func TestHealLastDataShard(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
nDisks := 16

View File

@ -99,11 +99,11 @@ func TestReduceErrs(t *testing.T) {
}
// Validates list of all the testcases for returning valid errors.
for i, testCase := range testCases {
gotErr := reduceReadQuorumErrs(context.Background(), testCase.errs, testCase.ignoredErrs, 5)
gotErr := reduceReadQuorumErrs(t.Context(), testCase.errs, testCase.ignoredErrs, 5)
if gotErr != testCase.err {
t.Errorf("Test %d : expected %s, got %s", i+1, testCase.err, gotErr)
}
gotNewErr := reduceWriteQuorumErrs(context.Background(), testCase.errs, testCase.ignoredErrs, 6)
gotNewErr := reduceWriteQuorumErrs(t.Context(), testCase.errs, testCase.ignoredErrs, 6)
if gotNewErr != errErasureWriteQuorum {
t.Errorf("Test %d : expected %s, got %s", i+1, errErasureWriteQuorum, gotErr)
}
@ -148,7 +148,7 @@ func TestHashOrder(t *testing.T) {
}
func TestShuffleDisks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
nDisks := 16
@ -196,7 +196,7 @@ func testShuffleDisks(t *testing.T, z *erasureServerPools) {
// TestEvalDisks tests the behavior of evalDisks
func TestEvalDisks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
nDisks := 16

View File

@ -18,7 +18,6 @@
package cmd
import (
"context"
"fmt"
"slices"
"strconv"
@ -146,7 +145,7 @@ func TestObjectToPartOffset(t *testing.T) {
// Test them.
for _, testCase := range testCases {
index, offset, err := fi.ObjectToPartOffset(context.Background(), testCase.offset)
index, offset, err := fi.ObjectToPartOffset(t.Context(), testCase.offset)
if err != testCase.expectedErr {
t.Fatalf("%+v: expected = %s, got: %s", testCase, testCase.expectedErr, err)
}
@ -272,7 +271,7 @@ func TestFindFileInfoInQuorum(t *testing.T) {
for _, test := range tests {
test := test
t.Run("", func(t *testing.T) {
fi, err := findFileInfoInQuorum(context.Background(), test.fis, test.modTime, "", test.expectedQuorum)
fi, err := findFileInfoInQuorum(t.Context(), test.fis, test.modTime, "", test.expectedQuorum)
_, ok1 := err.(InsufficientReadQuorum)
_, ok2 := test.expectedErr.(InsufficientReadQuorum)
if ok1 != ok2 {

View File

@ -36,7 +36,7 @@ import (
)
func TestRepeatPutObjectPart(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
var objLayer ObjectLayer
@ -50,7 +50,7 @@ func TestRepeatPutObjectPart(t *testing.T) {
}
// cleaning up of temporary test directories
defer objLayer.Shutdown(context.Background())
defer objLayer.Shutdown(t.Context())
defer removeRoots(disks)
err = objLayer.MakeBucket(ctx, "bucket1", MakeBucketOptions{})
@ -91,7 +91,7 @@ func TestErasureDeleteObjectBasic(t *testing.T) {
{"bucket", "dir/obj", nil},
}
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Create an instance of xl backend
@ -99,7 +99,7 @@ func TestErasureDeleteObjectBasic(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer xl.Shutdown(context.Background())
defer xl.Shutdown(t.Context())
err = xl.MakeBucket(ctx, "bucket", MakeBucketOptions{})
if err != nil {
@ -132,7 +132,7 @@ func TestErasureDeleteObjectBasic(t *testing.T) {
}
func TestDeleteObjectsVersionedTwoPools(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, fsDirs, err := prepareErasurePools()
@ -201,7 +201,7 @@ func TestDeleteObjectsVersionedTwoPools(t *testing.T) {
}
func TestDeleteObjectsVersioned(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, fsDirs, err := prepareErasure(ctx, 16)
@ -280,7 +280,7 @@ func TestDeleteObjectsVersioned(t *testing.T) {
}
func TestErasureDeleteObjectsErasureSet(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, fsDirs, err := prepareErasureSets32(ctx)
@ -353,7 +353,7 @@ func TestErasureDeleteObjectsErasureSet(t *testing.T) {
}
func TestErasureDeleteObjectDiskNotFound(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Create an instance of xl backend.
@ -362,7 +362,7 @@ func TestErasureDeleteObjectDiskNotFound(t *testing.T) {
t.Fatal(err)
}
// Cleanup backend directories
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(fsDirs)
z := obj.(*erasureServerPools)
@ -422,7 +422,7 @@ func TestErasureDeleteObjectDiskNotFound(t *testing.T) {
}
func TestErasureDeleteObjectDiskNotFoundErasure4(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Create an instance of xl backend.
@ -431,7 +431,7 @@ func TestErasureDeleteObjectDiskNotFoundErasure4(t *testing.T) {
t.Fatal(err)
}
// Cleanup backend directories
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(fsDirs)
z := obj.(*erasureServerPools)
@ -482,7 +482,7 @@ func TestErasureDeleteObjectDiskNotFoundErasure4(t *testing.T) {
}
func TestErasureDeleteObjectDiskNotFoundErr(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Create an instance of xl backend.
@ -491,7 +491,7 @@ func TestErasureDeleteObjectDiskNotFoundErr(t *testing.T) {
t.Fatal(err)
}
// Cleanup backend directories
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(fsDirs)
z := obj.(*erasureServerPools)
@ -553,7 +553,7 @@ func TestErasureDeleteObjectDiskNotFoundErr(t *testing.T) {
}
func TestGetObjectNoQuorum(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Create an instance of xl backend.
@ -562,7 +562,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
t.Fatal(err)
}
// Cleanup backend directories.
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(fsDirs)
z := obj.(*erasureServerPools)
@ -662,7 +662,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
}
func TestHeadObjectNoQuorum(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Create an instance of xl backend.
@ -671,7 +671,7 @@ func TestHeadObjectNoQuorum(t *testing.T) {
t.Fatal(err)
}
// Cleanup backend directories.
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(fsDirs)
z := obj.(*erasureServerPools)
@ -739,7 +739,7 @@ func TestHeadObjectNoQuorum(t *testing.T) {
}
func TestPutObjectNoQuorum(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Create an instance of xl backend.
@ -749,7 +749,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
}
// Cleanup backend directories.
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(fsDirs)
z := obj.(*erasureServerPools)
@ -802,7 +802,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
}
func TestPutObjectNoQuorumSmall(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Create an instance of xl backend.
@ -812,7 +812,7 @@ func TestPutObjectNoQuorumSmall(t *testing.T) {
}
// Cleanup backend directories.
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(fsDirs)
z := obj.(*erasureServerPools)
@ -869,7 +869,7 @@ func TestPutObjectNoQuorumSmall(t *testing.T) {
// Test PutObject twice, one small and another bigger
// than small data threshold and checks reading them again
func TestPutObjectSmallInlineData(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
const numberOfDisks = 4
@ -881,7 +881,7 @@ func TestPutObjectSmallInlineData(t *testing.T) {
}
// Cleanup backend directories.
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(fsDirs)
bucket := "bucket"
@ -1131,7 +1131,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
// In some deployments, one object has data inlined in one disk and not inlined in other disks.
func TestGetObjectInlineNotInline(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Create a backend with 4 disks named disk{1...4}, this name convention
@ -1151,7 +1151,7 @@ func TestGetObjectInlineNotInline(t *testing.T) {
}
// cleaning up of temporary test directories
defer objLayer.Shutdown(context.Background())
defer objLayer.Shutdown(t.Context())
defer removeRoots(fsDirs)
// Create a testbucket
@ -1192,7 +1192,7 @@ func TestGetObjectWithOutdatedDisks(t *testing.T) {
t.Skip()
}
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Create an instance of xl backend.
@ -1202,7 +1202,7 @@ func TestGetObjectWithOutdatedDisks(t *testing.T) {
}
// Cleanup backend directories.
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(fsDirs)
z := obj.(*erasureServerPools)

View File

@ -159,7 +159,7 @@ func TestCrcHashMod(t *testing.T) {
// TestNewErasure - tests initialization of all input disks
// and constructs a valid `Erasure` object
func TestNewErasureSets(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
nDisks := 16 // Maximum disks.

View File

@ -19,7 +19,6 @@ package cmd
import (
"bytes"
"context"
"crypto/rand"
"io"
"testing"
@ -52,11 +51,11 @@ func TestErasureEncodeDecode(t *testing.T) {
buffer := make([]byte, len(data), 2*len(data))
copy(buffer, data)
erasure, err := NewErasure(context.Background(), test.dataBlocks, test.parityBlocks, blockSizeV2)
erasure, err := NewErasure(t.Context(), test.dataBlocks, test.parityBlocks, blockSizeV2)
if err != nil {
t.Fatalf("Test %d: failed to create erasure: %v", i, err)
}
encoded, err := erasure.EncodeData(context.Background(), buffer)
encoded, err := erasure.EncodeData(t.Context(), buffer)
if err != nil {
t.Fatalf("Test %d: failed to encode data: %v", i, err)
}
@ -69,7 +68,7 @@ func TestErasureEncodeDecode(t *testing.T) {
}
if test.reconstructParity {
err = erasure.DecodeDataAndParityBlocks(context.Background(), encoded)
err = erasure.DecodeDataAndParityBlocks(t.Context(), encoded)
} else {
err = erasure.DecodeDataBlocks(encoded)
}
@ -98,7 +97,7 @@ func TestErasureEncodeDecode(t *testing.T) {
}
decodedData := new(bytes.Buffer)
if _, err = writeDataBlocks(context.Background(), decodedData, decoded, test.dataBlocks, 0, int64(len(data))); err != nil {
if _, err = writeDataBlocks(t.Context(), decodedData, decoded, test.dataBlocks, 0, int64(len(data))); err != nil {
t.Errorf("Test %d: failed to write data blocks: %v", i, err)
}
if !bytes.Equal(decodedData.Bytes(), data) {
@ -127,7 +126,7 @@ func newErasureTestSetup(tb testing.TB, dataBlocks int, parityBlocks int, blockS
if err != nil {
return nil, err
}
err = disks[i].MakeVol(context.Background(), "testbucket")
err = disks[i].MakeVol(tb.Context(), "testbucket")
if err != nil {
return nil, err
}

View File

@ -33,7 +33,7 @@ import (
// Tests validate bucket LocationConstraint.
func TestIsValidLocationConstraint(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, fsDir, err := prepareFS(ctx)
@ -163,7 +163,7 @@ func TestExtractMetadataHeaders(t *testing.T) {
// Validate if the extracting headers.
for i, testCase := range testCases {
metadata := make(map[string]string)
err := extractMetadataFromMime(context.Background(), textproto.MIMEHeader(testCase.header), metadata)
err := extractMetadataFromMime(t.Context(), textproto.MIMEHeader(testCase.header), metadata)
if err != nil && !testCase.shouldFail {
t.Fatalf("Test %d failed to extract metadata: %v", i+1, err)
}

View File

@ -37,7 +37,7 @@ func getTokenString(accessKey, secretKey string) (string, error) {
// Tests web request authenticator.
func TestWebRequestAuthenticate(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, fsDir, err := prepareFS(ctx)
@ -94,7 +94,7 @@ func TestWebRequestAuthenticate(t *testing.T) {
}
func BenchmarkParseJWTStandardClaims(b *testing.B) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(b.Context())
defer cancel()
obj, fsDir, err := prepareFS(ctx)
@ -125,7 +125,7 @@ func BenchmarkParseJWTStandardClaims(b *testing.B) {
}
func BenchmarkParseJWTMapClaims(b *testing.B) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(b.Context())
defer cancel()
obj, fsDir, err := prepareFS(ctx)
@ -158,7 +158,7 @@ func BenchmarkParseJWTMapClaims(b *testing.B) {
}
func BenchmarkAuthenticateNode(b *testing.B) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(b.Context())
defer cancel()
obj, fsDir, err := prepareFS(ctx)

View File

@ -18,7 +18,6 @@
package cmd
import (
"context"
"encoding/json"
"fmt"
"net/http"
@ -769,7 +768,7 @@ func TestKMSHandlerNotConfiguredOrInvalidCreds(t *testing.T) {
}
func setupKMSTest(t *testing.T, enableKMS bool) (*adminErasureTestBed, func()) {
adminTestBed, err := prepareAdminErasureTestBed(context.Background())
adminTestBed, err := prepareAdminErasureTestBed(t.Context())
if err != nil {
t.Fatal(err)
}
@ -810,7 +809,7 @@ func buildKMSRequest(t *testing.T, method, path, accessKey, secretKey string, qu
// setupKMSUser is a test helper that creates a new user with the provided access key and secret key
// and applies the given policy to the user.
func setupKMSUser(t *testing.T, accessKey, secretKey, p string) {
ctx := context.Background()
ctx := t.Context()
createUserParams := madmin.AddOrUpdateUserReq{
SecretKey: secretKey,
Status: madmin.AccountEnabled,

View File

@ -18,7 +18,6 @@
package cmd
import (
"context"
"encoding/hex"
"fmt"
"math/rand"
@ -34,7 +33,7 @@ func TestLocalLockerExpire(t *testing.T) {
rResources := make([]string, 1000)
quorum := 0
l := newLocker()
ctx := context.Background()
ctx := t.Context()
for i := range wResources {
arg := dsync.LockArgs{
UID: mustGetUUID(),
@ -112,7 +111,7 @@ func TestLocalLockerUnlock(t *testing.T) {
wUIDs := make([]string, n)
rUIDs := make([]string, 0, n*2)
l := newLocker()
ctx := context.Background()
ctx := t.Context()
quorum := 0
for i := range wResources {
names := [m]string{}
@ -287,7 +286,7 @@ func Test_localLocker_expireOldLocksExpire(t *testing.T) {
for i := 0; i < readers; i++ {
rng.Read(tmp[:])
ok, err := l.RLock(context.Background(), dsync.LockArgs{
ok, err := l.RLock(t.Context(), dsync.LockArgs{
UID: uuid.NewString(),
Resources: res,
Source: hex.EncodeToString(tmp[:8]),
@ -374,7 +373,7 @@ func Test_localLocker_RUnlock(t *testing.T) {
for i := 0; i < readers; i++ {
rng.Read(tmp[:])
ok, err := l.RLock(context.Background(), dsync.LockArgs{
ok, err := l.RLock(t.Context(), dsync.LockArgs{
UID: uuid.NewString(),
Resources: res,
Source: hex.EncodeToString(tmp[:8]),
@ -398,7 +397,7 @@ func Test_localLocker_RUnlock(t *testing.T) {
}
start := time.Now()
for _, lock := range toUnLock {
ok, err := l.ForceUnlock(context.Background(), lock)
ok, err := l.ForceUnlock(t.Context(), lock)
if err != nil || !ok {
t.Fatal(err)
}
@ -423,7 +422,7 @@ func Test_localLocker_RUnlock(t *testing.T) {
}
start = time.Now()
for _, lock := range toUnLock {
ok, err := l.RUnlock(context.TODO(), lock)
ok, err := l.RUnlock(t.Context(), lock)
if err != nil || !ok {
t.Fatal(err)
}

View File

@ -37,7 +37,7 @@ func TestLockRESTlient(t *testing.T) {
}
endpointLocal.IsLocal = true
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
err = initGlobalLockGrid(ctx, []PoolEndpoints{{Endpoints: Endpoints{endpoint, endpointLocal}}})
if err != nil {
@ -50,22 +50,22 @@ func TestLockRESTlient(t *testing.T) {
}
// Attempt all calls.
_, err = lkClient.RLock(context.Background(), dsync.LockArgs{})
_, err = lkClient.RLock(t.Context(), dsync.LockArgs{})
if err == nil {
t.Fatal("Expected for Rlock to fail")
}
_, err = lkClient.Lock(context.Background(), dsync.LockArgs{})
_, err = lkClient.Lock(t.Context(), dsync.LockArgs{})
if err == nil {
t.Fatal("Expected for Lock to fail")
}
_, err = lkClient.RUnlock(context.Background(), dsync.LockArgs{})
_, err = lkClient.RUnlock(t.Context(), dsync.LockArgs{})
if err == nil {
t.Fatal("Expected for RUnlock to fail")
}
_, err = lkClient.Unlock(context.Background(), dsync.LockArgs{})
_, err = lkClient.Unlock(t.Context(), dsync.LockArgs{})
if err == nil {
t.Fatal("Expected for Unlock to fail")
}

View File

@ -53,7 +53,7 @@ func createLockTestServer(ctx context.Context, t *testing.T) (string, *lockRESTS
// Test function to remove lock entries from map based on name & uid combination
func TestLockRpcServerRemoveEntry(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
testPath, locker, _ := createLockTestServer(ctx, t)

View File

@ -19,7 +19,6 @@ package cmd
import (
"bytes"
"context"
"io"
"os"
"reflect"
@ -278,7 +277,7 @@ func Test_metacacheReader_readAll(t *testing.T) {
var wg sync.WaitGroup
wg.Add(1)
go func() {
readErr = r.readAll(context.Background(), objs)
readErr = r.readAll(t.Context(), objs)
wg.Done()
}()
want := loadMetacacheSampleNames

View File

@ -18,7 +18,6 @@
package cmd
import (
"context"
"runtime"
"testing"
"time"
@ -33,8 +32,8 @@ import (
func TestGetSource(t *testing.T) {
currentSource := func() string { return getSource(2) }
gotSource := currentSource()
// Hard coded line number, 35, in the "expectedSource" value
expectedSource := "[namespace-lock_test.go:35:TestGetSource()]"
// Hard coded line number, 34, in the "expectedSource" value
expectedSource := "[namespace-lock_test.go:34:TestGetSource()]"
if gotSource != expectedSource {
t.Errorf("expected : %s, got : %s", expectedSource, gotSource)
}
@ -44,7 +43,7 @@ func TestGetSource(t *testing.T) {
func TestNSLockRace(t *testing.T) {
t.Skip("long test skip it")
ctx := context.Background()
ctx := t.Context()
for i := 0; i < 10000; i++ {
nsLk := newNSLock(false)

View File

@ -163,14 +163,14 @@ func testListObjectsVersionedFolders(obj ObjectLayer, instanceType string, t1 Te
testCase.prefix, "marker:", testCase.marker, "delimiter:",
testCase.delimiter, "maxkeys:", testCase.maxKeys)
resultV, err = obj.ListObjectVersions(context.Background(), testCase.bucketName,
resultV, err = obj.ListObjectVersions(t.Context(), testCase.bucketName,
testCase.prefix, testCase.marker, "", testCase.delimiter, testCase.maxKeys)
} else {
t.Log("ListObjects, bucket:", testCase.bucketName, "prefix:",
testCase.prefix, "marker:", testCase.marker, "delimiter:",
testCase.delimiter, "maxkeys:", testCase.maxKeys)
resultL, err = obj.ListObjects(context.Background(), testCase.bucketName,
resultL, err = obj.ListObjects(t.Context(), testCase.bucketName,
testCase.prefix, testCase.marker, testCase.delimiter, testCase.maxKeys)
}
if err != nil && testCase.shouldPass {
@ -944,7 +944,7 @@ func _testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler, v
testCase := testCase
t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) {
t.Log("ListObjects, bucket:", testCase.bucketName, "prefix:", testCase.prefix, "marker:", testCase.marker, "delimiter:", testCase.delimiter, "maxkeys:", testCase.maxKeys)
result, err := obj.ListObjects(context.Background(), testCase.bucketName,
result, err := obj.ListObjects(t.Context(), testCase.bucketName,
testCase.prefix, testCase.marker, testCase.delimiter, int(testCase.maxKeys))
if err != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
@ -1675,7 +1675,7 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand
for i, testCase := range testCases {
testCase := testCase
t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) {
result, err := obj.ListObjectVersions(context.Background(), testCase.bucketName,
result, err := obj.ListObjectVersions(t.Context(), testCase.bucketName,
testCase.prefix, testCase.marker, "", testCase.delimiter, int(testCase.maxKeys))
if err != nil && testCase.shouldPass {
t.Errorf("%s: Expected to pass, but failed with: <ERROR> %s", instanceType, err.Error())
@ -1830,7 +1830,7 @@ func testListObjectsContinuation(obj ObjectLayer, instanceType string, t1 TestEr
var foundPrefixes []string
marker := ""
for {
result, err := obj.ListObjects(context.Background(), testCase.bucketName,
result, err := obj.ListObjects(t.Context(), testCase.bucketName,
testCase.prefix, marker, testCase.delimiter, testCase.page)
if err != nil {
t.Fatalf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
@ -1905,7 +1905,7 @@ func BenchmarkListObjects(b *testing.B) {
bucket := "ls-benchmark-bucket"
// Create a bucket.
err := obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
err := obj.MakeBucket(b.Context(), bucket, MakeBucketOptions{})
if err != nil {
b.Fatal(err)
}
@ -1913,7 +1913,7 @@ func BenchmarkListObjects(b *testing.B) {
// Insert objects to be listed and benchmarked later.
for i := 0; i < 20000; i++ {
key := "obj" + strconv.Itoa(i)
_, err = obj.PutObject(context.Background(), bucket, key, mustGetPutObjReader(b, bytes.NewBufferString(key), int64(len(key)), "", ""), ObjectOptions{})
_, err = obj.PutObject(b.Context(), bucket, key, mustGetPutObjReader(b, bytes.NewBufferString(key), int64(len(key)), "", ""), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
@ -1923,7 +1923,7 @@ func BenchmarkListObjects(b *testing.B) {
// List the buckets over and over and over.
for i := 0; i < b.N; i++ {
_, err = obj.ListObjects(context.Background(), bucket, "", "obj9000", "", -1)
_, err = obj.ListObjects(b.Context(), bucket, "", "obj9000", "", -1)
if err != nil {
b.Fatal(err)
}

View File

@ -2170,7 +2170,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
testCase := testCase
t.(*testing.T).Run("", func(t *testing.T) {
opts = ObjectOptions{}
actualResult, actualErr := obj.CompleteMultipartUpload(context.Background(), testCase.bucket, testCase.object, testCase.uploadID, testCase.parts, ObjectOptions{})
actualResult, actualErr := obj.CompleteMultipartUpload(t.Context(), testCase.bucket, testCase.object, testCase.uploadID, testCase.parts, ObjectOptions{})
if actualErr != nil && testCase.shouldPass {
t.Errorf("%s: Expected to pass, but failed with: <ERROR> %s", instanceType, actualErr)
}

View File

@ -18,7 +18,6 @@
package cmd
import (
"context"
"net/http"
"net/http/httptest"
"reflect"
@ -32,7 +31,7 @@ import (
func TestGetAndValidateAttributesOpts(t *testing.T) {
globalBucketVersioningSys = &BucketVersioningSys{}
bucket := minioMetaBucket
ctx := context.Background()
ctx := t.Context()
testCases := []struct {
name string
headers http.Header

View File

@ -19,7 +19,6 @@ package cmd
import (
"bytes"
"context"
"net/http"
"net/http/httptest"
"testing"
@ -108,7 +107,7 @@ func TestCheckPreconditions(t *testing.T) {
request.Header.Set(xhttp.IfModifiedSince, tc.ifModifiedSince)
request.Header.Set(xhttp.IfMatch, tc.ifMatch)
request.Header.Set(xhttp.IfUnmodifiedSince, tc.ifUnmodifiedSince)
actualFlag := checkPreconditions(context.Background(), recorder, request, tc.objInfo, ObjectOptions{})
actualFlag := checkPreconditions(t.Context(), recorder, request, tc.objInfo, ObjectOptions{})
if tc.expectedFlag != actualFlag {
t.Errorf("test: %s, got flag: %v, want: %v", tc.name, actualFlag, tc.expectedFlag)
}
@ -170,7 +169,7 @@ func TestCheckPreconditions(t *testing.T) {
request.Header.Set(xhttp.IfModifiedSince, tc.ifModifiedSince)
request.Header.Set(xhttp.IfMatch, tc.ifMatch)
request.Header.Set(xhttp.IfUnmodifiedSince, tc.ifUnmodifiedSince)
actualFlag := checkPreconditions(context.Background(), recorder, request, tc.objInfo, ObjectOptions{})
actualFlag := checkPreconditions(t.Context(), recorder, request, tc.objInfo, ObjectOptions{})
if tc.expectedFlag != actualFlag {
t.Errorf("test: %s, got flag: %v, want: %v", tc.name, actualFlag, tc.expectedFlag)
}

View File

@ -76,7 +76,7 @@ func TestServerConfigFile(t *testing.T) {
// Tests initializing new object layer.
func TestNewObjectLayer(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Tests for ErasureSD object layer.
nDisks := 1

View File

@ -49,7 +49,7 @@ func TestStripStandardPorts(t *testing.T) {
// Test printing server common message.
func TestPrintServerCommonMessage(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, fsDir, err := prepareFS(ctx)
@ -67,7 +67,7 @@ func TestPrintServerCommonMessage(t *testing.T) {
// Tests print cli access message.
func TestPrintCLIAccessMsg(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, fsDir, err := prepareFS(ctx)
@ -85,7 +85,7 @@ func TestPrintCLIAccessMsg(t *testing.T) {
// Test print startup message.
func TestPrintStartupMessage(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, fsDir, err := prepareFS(ctx)

View File

@ -42,7 +42,7 @@ func TestResourceListSorting(t *testing.T) {
// Tests presigned v2 signature.
func TestDoesPresignedV2SignatureMatch(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, fsDir, err := prepareFS(ctx)
@ -164,7 +164,7 @@ func TestDoesPresignedV2SignatureMatch(t *testing.T) {
// TestValidateV2AuthHeader - Tests validate the logic of V2 Authorization header validator.
func TestValidateV2AuthHeader(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, fsDir, err := prepareFS(ctx)
@ -238,7 +238,7 @@ func TestValidateV2AuthHeader(t *testing.T) {
}
func TestDoesPolicySignatureV2Match(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, fsDir, err := prepareFS(ctx)

View File

@ -30,7 +30,7 @@ import (
)
func TestCheckValid(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
objLayer, fsDir, err := prepareFS(ctx)

View File

@ -37,7 +37,7 @@ func niceError(code APIErrorCode) string {
}
func TestDoesPolicySignatureMatch(t *testing.T) {
_, fsDir, err := prepareFS(context.Background())
_, fsDir, err := prepareFS(t.Context())
if err != nil {
t.Fatal(err)
}
@ -100,7 +100,7 @@ func TestDoesPolicySignatureMatch(t *testing.T) {
}
func TestDoesPresignedSignatureMatch(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, fsDir, err := prepareFS(ctx)

View File

@ -19,7 +19,6 @@ package cmd
import (
"bytes"
"context"
"errors"
"math/rand"
"reflect"
@ -41,7 +40,7 @@ func testStorageAPIDiskInfo(t *testing.T, storage StorageAPI) {
}
for i, testCase := range testCases {
_, err := storage.DiskInfo(context.Background(), DiskInfoOptions{Metrics: true})
_, err := storage.DiskInfo(t.Context(), DiskInfoOptions{Metrics: true})
expectErr := (err != nil)
if expectErr != testCase.expectErr {
@ -54,7 +53,7 @@ func testStorageAPIDiskInfo(t *testing.T, storage StorageAPI) {
}
func testStorageAPIStatInfoFile(t *testing.T, storage StorageAPI) {
err := storage.AppendFile(context.Background(), "foo", pathJoin("myobject", xlStorageFormatFile), []byte("foo"))
err := storage.AppendFile(t.Context(), "foo", pathJoin("myobject", xlStorageFormatFile), []byte("foo"))
if err != nil {
t.Fatalf("unexpected error %v", err)
}
@ -70,7 +69,7 @@ func testStorageAPIStatInfoFile(t *testing.T, storage StorageAPI) {
}
for i, testCase := range testCases {
_, err := storage.StatInfoFile(context.Background(), testCase.volumeName, testCase.objectName+"/"+xlStorageFormatFile, false)
_, err := storage.StatInfoFile(t.Context(), testCase.volumeName, testCase.objectName+"/"+xlStorageFormatFile, false)
expectErr := (err != nil)
if expectErr != testCase.expectErr {
@ -80,7 +79,7 @@ func testStorageAPIStatInfoFile(t *testing.T, storage StorageAPI) {
}
func testStorageAPIListDir(t *testing.T, storage StorageAPI) {
err := storage.AppendFile(context.Background(), "foo", "path/to/myobject", []byte("foo"))
err := storage.AppendFile(t.Context(), "foo", "path/to/myobject", []byte("foo"))
if err != nil {
t.Fatalf("unexpected error %v", err)
}
@ -97,7 +96,7 @@ func testStorageAPIListDir(t *testing.T, storage StorageAPI) {
}
for i, testCase := range testCases {
result, err := storage.ListDir(context.Background(), "", testCase.volumeName, testCase.prefix, -1)
result, err := storage.ListDir(t.Context(), "", testCase.volumeName, testCase.prefix, -1)
expectErr := (err != nil)
if expectErr != testCase.expectErr {
@ -113,7 +112,7 @@ func testStorageAPIListDir(t *testing.T, storage StorageAPI) {
}
func testStorageAPIReadAll(t *testing.T, storage StorageAPI) {
err := storage.AppendFile(context.Background(), "foo", "myobject", []byte("foo"))
err := storage.AppendFile(t.Context(), "foo", "myobject", []byte("foo"))
if err != nil {
t.Fatalf("unexpected error %v", err)
}
@ -130,7 +129,7 @@ func testStorageAPIReadAll(t *testing.T, storage StorageAPI) {
}
for i, testCase := range testCases {
result, err := storage.ReadAll(context.Background(), testCase.volumeName, testCase.objectName)
result, err := storage.ReadAll(t.Context(), testCase.volumeName, testCase.objectName)
expectErr := (err != nil)
if expectErr != testCase.expectErr {
@ -146,7 +145,7 @@ func testStorageAPIReadAll(t *testing.T, storage StorageAPI) {
}
func testStorageAPIReadFile(t *testing.T, storage StorageAPI) {
err := storage.AppendFile(context.Background(), "foo", "myobject", []byte("foo"))
err := storage.AppendFile(t.Context(), "foo", "myobject", []byte("foo"))
if err != nil {
t.Fatalf("unexpected error %v", err)
}
@ -167,7 +166,7 @@ func testStorageAPIReadFile(t *testing.T, storage StorageAPI) {
result := make([]byte, 100)
for i, testCase := range testCases {
result = result[testCase.offset:3]
_, err := storage.ReadFile(context.Background(), testCase.volumeName, testCase.objectName, testCase.offset, result, nil)
_, err := storage.ReadFile(t.Context(), testCase.volumeName, testCase.objectName, testCase.offset, result, nil)
expectErr := (err != nil)
if expectErr != testCase.expectErr {
@ -209,7 +208,7 @@ func testStorageAPIAppendFile(t *testing.T, storage StorageAPI) {
if testCase.ignoreIfWindows && runtime.GOOS == "windows" {
continue
}
err := storage.AppendFile(context.Background(), testCase.volumeName, testCase.objectName, testCase.data)
err := storage.AppendFile(t.Context(), testCase.volumeName, testCase.objectName, testCase.data)
expectErr := (err != nil)
if expectErr != testCase.expectErr {
@ -217,7 +216,7 @@ func testStorageAPIAppendFile(t *testing.T, storage StorageAPI) {
}
if !testCase.expectErr {
data, err := storage.ReadAll(context.Background(), testCase.volumeName, testCase.objectName)
data, err := storage.ReadAll(t.Context(), testCase.volumeName, testCase.objectName)
if err != nil {
t.Fatal(err)
}
@ -230,7 +229,7 @@ func testStorageAPIAppendFile(t *testing.T, storage StorageAPI) {
}
func testStorageAPIDeleteFile(t *testing.T, storage StorageAPI) {
err := storage.AppendFile(context.Background(), "foo", "myobject", []byte("foo"))
err := storage.AppendFile(t.Context(), "foo", "myobject", []byte("foo"))
if err != nil {
t.Fatalf("unexpected error %v", err)
}
@ -248,7 +247,7 @@ func testStorageAPIDeleteFile(t *testing.T, storage StorageAPI) {
}
for i, testCase := range testCases {
err := storage.Delete(context.Background(), testCase.volumeName, testCase.objectName, DeleteOptions{
err := storage.Delete(t.Context(), testCase.volumeName, testCase.objectName, DeleteOptions{
Recursive: false,
Immediate: false,
})
@ -261,12 +260,12 @@ func testStorageAPIDeleteFile(t *testing.T, storage StorageAPI) {
}
func testStorageAPIRenameFile(t *testing.T, storage StorageAPI) {
err := storage.AppendFile(context.Background(), "foo", "myobject", []byte("foo"))
err := storage.AppendFile(t.Context(), "foo", "myobject", []byte("foo"))
if err != nil {
t.Fatalf("unexpected error %v", err)
}
err = storage.AppendFile(context.Background(), "foo", "otherobject", []byte("foo"))
err = storage.AppendFile(t.Context(), "foo", "otherobject", []byte("foo"))
if err != nil {
t.Fatalf("unexpected error %v", err)
}
@ -285,7 +284,7 @@ func testStorageAPIRenameFile(t *testing.T, storage StorageAPI) {
}
for i, testCase := range testCases {
err := storage.RenameFile(context.Background(), testCase.volumeName, testCase.objectName, testCase.destVolumeName, testCase.destObjectName)
err := storage.RenameFile(t.Context(), testCase.volumeName, testCase.objectName, testCase.destVolumeName, testCase.destObjectName)
expectErr := (err != nil)
if expectErr != testCase.expectErr {
@ -341,11 +340,11 @@ func newStorageRESTHTTPServerClient(t testing.TB) *storageRESTClient {
registerStorageRESTHandlers(tg.Mux[1], poolEps, tg.Managers[1])
storage := globalLocalSetDrives[0][0][0]
if err = storage.MakeVol(context.Background(), "foo"); err != nil {
if err = storage.MakeVol(t.Context(), "foo"); err != nil {
t.Fatalf("unexpected error %v", err)
}
if err = storage.MakeVol(context.Background(), "bar"); err != nil {
if err = storage.MakeVol(t.Context(), "bar"); err != nil {
t.Fatalf("unexpected error %v", err)
}
@ -355,7 +354,7 @@ func newStorageRESTHTTPServerClient(t testing.TB) *storageRESTClient {
}
for {
_, err := restClient.DiskInfo(context.Background(), DiskInfoOptions{})
_, err := restClient.DiskInfo(t.Context(), DiskInfoOptions{})
if err == nil || errors.Is(err, errUnformattedDisk) {
break
}

View File

@ -947,7 +947,7 @@ func TestIAMExportImportWithLDAP(t *testing.T) {
}
func TestIAMImportAssetWithLDAP(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
ctx, cancel := context.WithTimeout(t.Context(), testDefaultTimeout)
defer cancel()
exportContentStrings := map[string]string{

View File

@ -1932,7 +1932,7 @@ func ExecObjectLayerTestWithDirs(t TestErrHandler, objTest objTestTypeWithDirs)
// ExecObjectLayerDiskAlteredTest - executes object layer tests while altering
// disks in between tests. Creates Erasure ObjectLayer instance and runs test for Erasure layer.
func ExecObjectLayerDiskAlteredTest(t *testing.T, objTest objTestDiskNotFoundType) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
objLayer, fsDirs, err := prepareErasure16(ctx)
@ -1956,7 +1956,7 @@ type objTestStaleFilesType func(obj ObjectLayer, instanceType string, dirs []str
// ExecObjectLayerStaleFilesTest - executes object layer tests those leaves stale
// files/directories under .minio/tmp. Creates Erasure ObjectLayer instance and runs test for Erasure layer.
func ExecObjectLayerStaleFilesTest(t *testing.T, objTest objTestStaleFilesType) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
nDisks := 16
@ -2274,7 +2274,7 @@ func TestToErrIsNil(t *testing.T) {
if toStorageErr(nil) != nil {
t.Errorf("Test expected to return nil, failed instead got a non-nil value %s", toStorageErr(nil))
}
ctx := context.Background()
ctx := t.Context()
if toAPIError(ctx, nil) != noError {
t.Errorf("Test expected error code to be ErrNone, failed instead provided %s", toAPIError(ctx, nil).Code)
}

View File

@ -21,7 +21,6 @@ import (
"bufio"
"bytes"
"compress/gzip"
"context"
"encoding/base64"
"encoding/binary"
"encoding/json"
@ -1082,7 +1081,7 @@ func Test_mergeEntryChannels(t *testing.T) {
entries = append(entries, ch)
}
out := make(chan metaCacheEntry, 1)
err := mergeEntryChannels(context.Background(), entries, out, 1)
err := mergeEntryChannels(t.Context(), entries, out, 1)
if err != nil {
t.Fatal(err)
}

View File

@ -19,7 +19,6 @@ package cmd
import (
"bytes"
"context"
"crypto/rand"
"io"
"net/url"
@ -142,7 +141,7 @@ func newXLStorageTestSetup(tb testing.TB) (*xlStorageDiskIDCheck, string, error)
}
// Create a sample format.json file
if err = storage.WriteAll(context.Background(), minioMetaBucket, formatConfigFile, []byte(`{"version":"1","format":"xl","id":"592a41c2-b7cc-4130-b883-c4b5cb15965b","xl":{"version":"3","this":"da017d62-70e3-45f1-8a1a-587707e69ad1","sets":[["e07285a6-8c73-4962-89c6-047fb939f803","33b8d431-482d-4376-b63c-626d229f0a29","cff6513a-4439-4dc1-bcaa-56c9e880c352","da017d62-70e3-45f1-8a1a-587707e69ad1","9c9f21d5-1f15-4737-bce6-835faa0d9626","0a59b346-1424-4fc2-9fa2-a2e80541d0c1","7924a3dc-b69a-4971-9a2e-014966d6aebb","4d2b8dd9-4e48-444b-bdca-c89194b26042"]],"distributionAlgo":"CRCMOD"}}`)); err != nil {
if err = storage.WriteAll(tb.Context(), minioMetaBucket, formatConfigFile, []byte(`{"version":"1","format":"xl","id":"592a41c2-b7cc-4130-b883-c4b5cb15965b","xl":{"version":"3","this":"da017d62-70e3-45f1-8a1a-587707e69ad1","sets":[["e07285a6-8c73-4962-89c6-047fb939f803","33b8d431-482d-4376-b63c-626d229f0a29","cff6513a-4439-4dc1-bcaa-56c9e880c352","da017d62-70e3-45f1-8a1a-587707e69ad1","9c9f21d5-1f15-4737-bce6-835faa0d9626","0a59b346-1424-4fc2-9fa2-a2e80541d0c1","7924a3dc-b69a-4971-9a2e-014966d6aebb","4d2b8dd9-4e48-444b-bdca-c89194b26042"]],"distributionAlgo":"CRCMOD"}}`)); err != nil {
return nil, "", err
}
@ -243,15 +242,15 @@ func TestXLStorageReadVersionLegacy(t *testing.T) {
}
// Create files for the test cases.
if err = xlStorage.MakeVol(context.Background(), "exists-legacy"); err != nil {
if err = xlStorage.MakeVol(t.Context(), "exists-legacy"); err != nil {
t.Fatalf("Unable to create a volume \"exists-legacy\", %s", err)
}
if err = xlStorage.AppendFile(context.Background(), "exists-legacy", "as-file/xl.json", []byte(legacyJSON)); err != nil {
if err = xlStorage.AppendFile(t.Context(), "exists-legacy", "as-file/xl.json", []byte(legacyJSON)); err != nil {
t.Fatalf("Unable to create a file \"as-file\", %s", err)
}
fi, err := xlStorage.ReadVersion(context.Background(), "", "exists-legacy", "as-file", "", ReadOptions{})
fi, err := xlStorage.ReadVersion(t.Context(), "", "exists-legacy", "as-file", "", ReadOptions{})
if err != nil {
t.Fatalf("Unable to read older 'xl.json' content: %s", err)
}
@ -273,19 +272,19 @@ func TestXLStorageReadVersion(t *testing.T) {
fi, _ := getFileInfo(xlMeta, "exists", "as-file", "", fileInfoOpts{Data: false})
// Create files for the test cases.
if err = xlStorage.MakeVol(context.Background(), "exists"); err != nil {
if err = xlStorage.MakeVol(t.Context(), "exists"); err != nil {
t.Fatalf("Unable to create a volume \"exists\", %s", err)
}
if err = xlStorage.AppendFile(context.Background(), "exists", "as-directory/as-file/xl.meta", xlMeta); err != nil {
if err = xlStorage.AppendFile(t.Context(), "exists", "as-directory/as-file/xl.meta", xlMeta); err != nil {
t.Fatalf("Unable to create a file \"as-directory/as-file\", %s", err)
}
if err = xlStorage.AppendFile(context.Background(), "exists", "as-file/xl.meta", xlMeta); err != nil {
if err = xlStorage.AppendFile(t.Context(), "exists", "as-file/xl.meta", xlMeta); err != nil {
t.Fatalf("Unable to create a file \"as-file\", %s", err)
}
if err = xlStorage.AppendFile(context.Background(), "exists", "as-file-parent/xl.meta", xlMeta); err != nil {
if err = xlStorage.AppendFile(t.Context(), "exists", "as-file-parent/xl.meta", xlMeta); err != nil {
t.Fatalf("Unable to create a file \"as-file-parent\", %s", err)
}
if err = xlStorage.MakeVol(context.Background(), "exists/as-file/"+fi.DataDir); err != nil {
if err = xlStorage.MakeVol(t.Context(), "exists/as-file/"+fi.DataDir); err != nil {
t.Fatalf("Unable to create a dataDir %s, %s", fi.DataDir, err)
}
@ -341,7 +340,7 @@ func TestXLStorageReadVersion(t *testing.T) {
// Run through all the test cases and validate for ReadVersion.
for i, testCase := range testCases {
_, err = xlStorage.ReadVersion(context.Background(), "", testCase.volume, testCase.path, "", ReadOptions{})
_, err = xlStorage.ReadVersion(t.Context(), "", testCase.volume, testCase.path, "", ReadOptions{})
if err != testCase.err {
t.Fatalf("TestXLStorage %d: Expected err \"%s\", got err \"%s\"", i+1, testCase.err, err)
}
@ -357,16 +356,16 @@ func TestXLStorageReadAll(t *testing.T) {
}
// Create files for the test cases.
if err = xlStorage.MakeVol(context.Background(), "exists"); err != nil {
if err = xlStorage.MakeVol(t.Context(), "exists"); err != nil {
t.Fatalf("Unable to create a volume \"exists\", %s", err)
}
if err = xlStorage.AppendFile(context.Background(), "exists", "as-directory/as-file", []byte("Hello, World")); err != nil {
if err = xlStorage.AppendFile(t.Context(), "exists", "as-directory/as-file", []byte("Hello, World")); err != nil {
t.Fatalf("Unable to create a file \"as-directory/as-file\", %s", err)
}
if err = xlStorage.AppendFile(context.Background(), "exists", "as-file", []byte("Hello, World")); err != nil {
if err = xlStorage.AppendFile(t.Context(), "exists", "as-file", []byte("Hello, World")); err != nil {
t.Fatalf("Unable to create a file \"as-file\", %s", err)
}
if err = xlStorage.AppendFile(context.Background(), "exists", "as-file-parent", []byte("Hello, World")); err != nil {
if err = xlStorage.AppendFile(t.Context(), "exists", "as-file-parent", []byte("Hello, World")); err != nil {
t.Fatalf("Unable to create a file \"as-file-parent\", %s", err)
}
@ -423,7 +422,7 @@ func TestXLStorageReadAll(t *testing.T) {
var dataRead []byte
// Run through all the test cases and validate for ReadAll.
for i, testCase := range testCases {
dataRead, err = xlStorage.ReadAll(context.Background(), testCase.volume, testCase.path)
dataRead, err = xlStorage.ReadAll(t.Context(), testCase.volume, testCase.path)
if err != testCase.err {
t.Errorf("TestXLStorage %d: Expected err \"%v\", got err \"%v\"", i+1, testCase.err, err)
continue
@ -529,7 +528,7 @@ func TestXLStorageMakeVol(t *testing.T) {
}
for i, testCase := range testCases {
if err := xlStorage.MakeVol(context.Background(), testCase.volName); err != testCase.expectedErr {
if err := xlStorage.MakeVol(t.Context(), testCase.volName); err != testCase.expectedErr {
t.Fatalf("TestXLStorage %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
}
}
@ -561,7 +560,7 @@ func TestXLStorageMakeVol(t *testing.T) {
t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err)
}
if err := xlStorageNew.MakeVol(context.Background(), "test-vol"); err != errDiskAccessDenied {
if err := xlStorageNew.MakeVol(t.Context(), "test-vol"); err != errDiskAccessDenied {
t.Fatalf("expected: %s, got: %s", errDiskAccessDenied, err)
}
}
@ -576,7 +575,7 @@ func TestXLStorageDeleteVol(t *testing.T) {
}
// Setup test environment.
if err = xlStorage.MakeVol(context.Background(), "success-vol"); err != nil {
if err = xlStorage.MakeVol(t.Context(), "success-vol"); err != nil {
t.Fatalf("Unable to create volume, %s", err)
}
@ -620,7 +619,7 @@ func TestXLStorageDeleteVol(t *testing.T) {
}
for i, testCase := range testCases {
if err = xlStorage.DeleteVol(context.Background(), testCase.volName, false); err != testCase.expectedErr {
if err = xlStorage.DeleteVol(t.Context(), testCase.volName, false); err != testCase.expectedErr {
t.Fatalf("TestXLStorage: %d, expected: %s, got: %s", i+1, testCase.expectedErr, err)
}
}
@ -662,7 +661,7 @@ func TestXLStorageDeleteVol(t *testing.T) {
t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err)
}
if err = xlStorageNew.DeleteVol(context.Background(), "mybucket", false); err != errDiskAccessDenied {
if err = xlStorageNew.DeleteVol(t.Context(), "mybucket", false); err != errDiskAccessDenied {
t.Fatalf("expected: Permission error, got: %s", err)
}
}
@ -676,7 +675,7 @@ func TestXLStorageDeleteVol(t *testing.T) {
// TestXLStorage for delete on an removed disk.
// should fail with disk not found.
err = xlStorageDeletedStorage.DeleteVol(context.Background(), "Del-Vol", false)
err = xlStorageDeletedStorage.DeleteVol(t.Context(), "Del-Vol", false)
if err != errDiskNotFound {
t.Errorf("Expected: \"Drive not found\", got \"%s\"", err)
}
@ -691,7 +690,7 @@ func TestXLStorageStatVol(t *testing.T) {
}
// Setup test environment.
if err = xlStorage.MakeVol(context.Background(), "success-vol"); err != nil {
if err = xlStorage.MakeVol(t.Context(), "success-vol"); err != nil {
t.Fatalf("Unable to create volume, %s", err)
}
@ -718,7 +717,7 @@ func TestXLStorageStatVol(t *testing.T) {
for i, testCase := range testCases {
var volInfo VolInfo
volInfo, err = xlStorage.StatVol(context.Background(), testCase.volName)
volInfo, err = xlStorage.StatVol(t.Context(), testCase.volName)
if err != testCase.expectedErr {
t.Fatalf("TestXLStorage case : %d, Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
}
@ -740,7 +739,7 @@ func TestXLStorageStatVol(t *testing.T) {
// TestXLStorage for delete on an removed disk.
// should fail with disk not found.
_, err = xlStorageDeletedStorage.StatVol(context.Background(), "Stat vol")
_, err = xlStorageDeletedStorage.StatVol(t.Context(), "Stat vol")
if err != errDiskNotFound {
t.Errorf("Expected: \"Drive not found\", got \"%s\"", err)
}
@ -756,18 +755,18 @@ func TestXLStorageListVols(t *testing.T) {
var volInfos []VolInfo
// TestXLStorage empty list vols.
if volInfos, err = xlStorage.ListVols(context.Background()); err != nil {
if volInfos, err = xlStorage.ListVols(t.Context()); err != nil {
t.Fatalf("expected: <nil>, got: %s", err)
} else if len(volInfos) != 1 {
t.Fatalf("expected: one entry, got: %v", volInfos)
}
// TestXLStorage non-empty list vols.
if err = xlStorage.MakeVol(context.Background(), "success-vol"); err != nil {
if err = xlStorage.MakeVol(t.Context(), "success-vol"); err != nil {
t.Fatalf("Unable to create volume, %s", err)
}
volInfos, err = xlStorage.ListVols(context.Background())
volInfos, err = xlStorage.ListVols(t.Context())
if err != nil {
t.Fatalf("expected: <nil>, got: %s", err)
}
@ -788,7 +787,7 @@ func TestXLStorageListVols(t *testing.T) {
// removing the path and simulating disk failure
os.RemoveAll(path)
// should fail with errDiskNotFound.
if _, err = xlStorage.ListVols(context.Background()); err != errDiskNotFound {
if _, err = xlStorage.ListVols(t.Context()); err != errDiskNotFound {
t.Errorf("Expected to fail with \"%s\", but instead failed with \"%s\"", errDiskNotFound, err)
}
}
@ -809,13 +808,13 @@ func TestXLStorageListDir(t *testing.T) {
// removing the disk, used to recreate disk not found error.
os.RemoveAll(diskPath)
// Setup test environment.
if err = xlStorage.MakeVol(context.Background(), "success-vol"); err != nil {
if err = xlStorage.MakeVol(t.Context(), "success-vol"); err != nil {
t.Fatalf("Unable to create volume, %s", err)
}
if err = xlStorage.AppendFile(context.Background(), "success-vol", "abc/def/ghi/success-file", []byte("Hello, world")); err != nil {
if err = xlStorage.AppendFile(t.Context(), "success-vol", "abc/def/ghi/success-file", []byte("Hello, world")); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
if err = xlStorage.AppendFile(context.Background(), "success-vol", "abc/xyz/ghi/success-file", []byte("Hello, world")); err != nil {
if err = xlStorage.AppendFile(t.Context(), "success-vol", "abc/xyz/ghi/success-file", []byte("Hello, world")); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
@ -874,7 +873,7 @@ func TestXLStorageListDir(t *testing.T) {
for i, testCase := range testCases {
var dirList []string
dirList, err = xlStorage.ListDir(context.Background(), "", testCase.srcVol, testCase.srcPath, -1)
dirList, err = xlStorage.ListDir(t.Context(), "", testCase.srcVol, testCase.srcPath, -1)
if err != testCase.expectedErr {
t.Errorf("TestXLStorage case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
}
@ -906,7 +905,7 @@ func TestXLStorageListDir(t *testing.T) {
t.Fatalf("Unable to initialize xlStorage, %s", err)
}
if err = xlStorageNew.Delete(context.Background(), "mybucket", "myobject", DeleteOptions{
if err = xlStorageNew.Delete(t.Context(), "mybucket", "myobject", DeleteOptions{
Recursive: false,
Immediate: false,
}); err != errFileAccessDenied {
@ -916,7 +915,7 @@ func TestXLStorageListDir(t *testing.T) {
// TestXLStorage for delete on an removed disk.
// should fail with disk not found.
err = xlStorageDeletedStorage.Delete(context.Background(), "del-vol", "my-file", DeleteOptions{
err = xlStorageDeletedStorage.Delete(t.Context(), "del-vol", "my-file", DeleteOptions{
Recursive: false,
Immediate: false,
})
@ -938,17 +937,17 @@ func TestXLStorageDeleteFile(t *testing.T) {
}
// Setup test environment.
if err = xlStorage.MakeVol(context.Background(), "success-vol"); err != nil {
if err = xlStorage.MakeVol(t.Context(), "success-vol"); err != nil {
t.Fatalf("Unable to create volume, %s", err)
}
if err = xlStorage.AppendFile(context.Background(), "success-vol", "success-file", []byte("Hello, world")); err != nil {
if err = xlStorage.AppendFile(t.Context(), "success-vol", "success-file", []byte("Hello, world")); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
if err = xlStorage.MakeVol(context.Background(), "no-permissions"); err != nil {
if err = xlStorage.MakeVol(t.Context(), "no-permissions"); err != nil {
t.Fatalf("Unable to create volume, %s", err.Error())
}
if err = xlStorage.AppendFile(context.Background(), "no-permissions", "dir/file", []byte("Hello, world")); err != nil {
if err = xlStorage.AppendFile(t.Context(), "no-permissions", "dir/file", []byte("Hello, world")); err != nil {
t.Fatalf("Unable to create file, %s", err.Error())
}
// Parent directory must have write permissions, this is read + execute.
@ -1002,7 +1001,7 @@ func TestXLStorageDeleteFile(t *testing.T) {
}
for i, testCase := range testCases {
if err = xlStorage.Delete(context.Background(), testCase.srcVol, testCase.srcPath, DeleteOptions{
if err = xlStorage.Delete(t.Context(), testCase.srcVol, testCase.srcPath, DeleteOptions{
Recursive: false,
Immediate: false,
}); err != testCase.expectedErr {
@ -1029,7 +1028,7 @@ func TestXLStorageDeleteFile(t *testing.T) {
t.Fatalf("Unable to initialize xlStorage, %s", err)
}
if err = xlStorageNew.Delete(context.Background(), "mybucket", "myobject", DeleteOptions{
if err = xlStorageNew.Delete(t.Context(), "mybucket", "myobject", DeleteOptions{
Recursive: false,
Immediate: false,
}); err != errFileAccessDenied {
@ -1050,7 +1049,7 @@ func TestXLStorageDeleteFile(t *testing.T) {
// TestXLStorage for delete on an removed disk.
// should fail with disk not found.
err = xlStorageDeletedStorage.Delete(context.Background(), "del-vol", "my-file", DeleteOptions{
err = xlStorageDeletedStorage.Delete(t.Context(), "del-vol", "my-file", DeleteOptions{
Recursive: false,
Immediate: false,
})
@ -1069,7 +1068,7 @@ func TestXLStorageReadFile(t *testing.T) {
volume := "success-vol"
// Setup test environment.
if err = xlStorage.MakeVol(context.Background(), volume); err != nil {
if err = xlStorage.MakeVol(t.Context(), volume); err != nil {
t.Fatalf("Unable to create volume, %s", err)
}
@ -1156,7 +1155,7 @@ func TestXLStorageReadFile(t *testing.T) {
v := NewBitrotVerifier(SHA256, getSHA256Sum([]byte("hello, world")))
// Create test files for further reading.
for i, appendFile := range appendFiles {
err = xlStorage.AppendFile(context.Background(), volume, appendFile.fileName, []byte("hello, world"))
err = xlStorage.AppendFile(t.Context(), volume, appendFile.fileName, []byte("hello, world"))
if err != appendFile.expectedErr {
t.Fatalf("Creating file failed: %d %#v, expected: %s, got: %s", i+1, appendFile, appendFile.expectedErr, err)
}
@ -1165,7 +1164,7 @@ func TestXLStorageReadFile(t *testing.T) {
{
buf := make([]byte, 5)
// Test for negative offset.
if _, err = xlStorage.ReadFile(context.Background(), volume, "myobject", -1, buf, v); err == nil {
if _, err = xlStorage.ReadFile(t.Context(), volume, "myobject", -1, buf, v); err == nil {
t.Fatalf("expected: error, got: <nil>")
}
}
@ -1176,7 +1175,7 @@ func TestXLStorageReadFile(t *testing.T) {
var n int64
// Common read buffer.
buf := make([]byte, testCase.bufSize)
n, err = xlStorage.ReadFile(context.Background(), testCase.volume, testCase.fileName, testCase.offset, buf, v)
n, err = xlStorage.ReadFile(t.Context(), testCase.volume, testCase.fileName, testCase.offset, buf, v)
if err != nil && testCase.expectedErr != nil {
// Validate if the type string of the errors are an exact match.
if err.Error() != testCase.expectedErr.Error() {
@ -1252,7 +1251,7 @@ func TestXLStorageReadFile(t *testing.T) {
// Common read buffer.
buf := make([]byte, 10)
if _, err = xlStoragePermStorage.ReadFile(context.Background(), "mybucket", "myobject", 0, buf, v); err != errFileAccessDenied {
if _, err = xlStoragePermStorage.ReadFile(t.Context(), "mybucket", "myobject", 0, buf, v); err != errFileAccessDenied {
t.Errorf("expected: %s, got: %s", errFileAccessDenied, err)
}
}
@ -1293,14 +1292,14 @@ func TestXLStorageReadFileWithVerify(t *testing.T) {
if err != nil {
t.Fatalf("Unable to create xlStorage test setup, %s", err)
}
if err = xlStorage.MakeVol(context.Background(), volume); err != nil {
if err = xlStorage.MakeVol(t.Context(), volume); err != nil {
t.Fatalf("Unable to create volume %s: %v", volume, err)
}
data := make([]byte, 8*1024)
if _, err = io.ReadFull(rand.Reader, data); err != nil {
t.Fatalf("Unable to create generate random data: %v", err)
}
if err = xlStorage.AppendFile(context.Background(), volume, object, data); err != nil {
if err = xlStorage.AppendFile(t.Context(), volume, object, data); err != nil {
t.Fatalf("Unable to create object: %v", err)
}
@ -1312,7 +1311,7 @@ func TestXLStorageReadFileWithVerify(t *testing.T) {
}
buffer := make([]byte, test.length)
n, err := xlStorage.ReadFile(context.Background(), volume, test.file, int64(test.offset), buffer, NewBitrotVerifier(test.algorithm, h.Sum(nil)))
n, err := xlStorage.ReadFile(t.Context(), volume, test.file, int64(test.offset), buffer, NewBitrotVerifier(test.algorithm, h.Sum(nil)))
switch {
case err == nil && test.expError != nil:
@ -1335,7 +1334,7 @@ func TestXLStorageFormatFileChange(t *testing.T) {
t.Fatalf("Unable to create xlStorage test setup, %s", err)
}
if err = xlStorage.MakeVol(context.Background(), volume); err != nil {
if err = xlStorage.MakeVol(t.Context(), volume); err != nil {
t.Fatalf("MakeVol failed with %s", err)
}
@ -1344,7 +1343,7 @@ func TestXLStorageFormatFileChange(t *testing.T) {
t.Fatalf("ioutil.WriteFile failed with %s", err)
}
err = xlStorage.MakeVol(context.Background(), volume)
err = xlStorage.MakeVol(t.Context(), volume)
if err != errVolumeExists {
t.Fatalf("MakeVol expected to fail with errDiskNotFound but failed with %s", err)
}
@ -1359,7 +1358,7 @@ func TestXLStorageAppendFile(t *testing.T) {
}
// Setup test environment.
if err = xlStorage.MakeVol(context.Background(), "success-vol"); err != nil {
if err = xlStorage.MakeVol(t.Context(), "success-vol"); err != nil {
t.Fatalf("Unable to create volume, %s", err)
}
@ -1389,7 +1388,7 @@ func TestXLStorageAppendFile(t *testing.T) {
}
for i, testCase := range testCases {
if err = xlStorage.AppendFile(context.Background(), "success-vol", testCase.fileName, []byte("hello, world")); err != testCase.expectedErr {
if err = xlStorage.AppendFile(t.Context(), "success-vol", testCase.fileName, []byte("hello, world")); err != testCase.expectedErr {
t.Errorf("Case: %d, expected: %s, got: %s", i+1, testCase.expectedErr, err)
}
}
@ -1414,14 +1413,14 @@ func TestXLStorageAppendFile(t *testing.T) {
t.Fatalf("Unable to initialize xlStorage, %s", err)
}
if err = xlStoragePermStorage.AppendFile(context.Background(), "mybucket", "myobject", []byte("hello, world")); err != errFileAccessDenied {
if err = xlStoragePermStorage.AppendFile(t.Context(), "mybucket", "myobject", []byte("hello, world")); err != errFileAccessDenied {
t.Fatalf("expected: errFileAccessDenied error, got: %s", err)
}
}
// TestXLStorage case with invalid volume name.
// A valid volume name should be at least of size 3.
err = xlStorage.AppendFile(context.Background(), "bn", "yes", []byte("hello, world"))
err = xlStorage.AppendFile(t.Context(), "bn", "yes", []byte("hello, world"))
if err != errVolumeNotFound {
t.Fatalf("expected: \"Invalid argument error\", got: \"%s\"", err)
}
@ -1436,32 +1435,32 @@ func TestXLStorageRenameFile(t *testing.T) {
}
// Setup test environment.
if err := xlStorage.MakeVol(context.Background(), "src-vol"); err != nil {
if err := xlStorage.MakeVol(t.Context(), "src-vol"); err != nil {
t.Fatalf("Unable to create volume, %s", err)
}
if err := xlStorage.MakeVol(context.Background(), "dest-vol"); err != nil {
if err := xlStorage.MakeVol(t.Context(), "dest-vol"); err != nil {
t.Fatalf("Unable to create volume, %s", err)
}
if err := xlStorage.AppendFile(context.Background(), "src-vol", "file1", []byte("Hello, world")); err != nil {
if err := xlStorage.AppendFile(t.Context(), "src-vol", "file1", []byte("Hello, world")); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
if err := xlStorage.AppendFile(context.Background(), "src-vol", "file2", []byte("Hello, world")); err != nil {
if err := xlStorage.AppendFile(t.Context(), "src-vol", "file2", []byte("Hello, world")); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
if err := xlStorage.AppendFile(context.Background(), "src-vol", "file3", []byte("Hello, world")); err != nil {
if err := xlStorage.AppendFile(t.Context(), "src-vol", "file3", []byte("Hello, world")); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
if err := xlStorage.AppendFile(context.Background(), "src-vol", "file4", []byte("Hello, world")); err != nil {
if err := xlStorage.AppendFile(t.Context(), "src-vol", "file4", []byte("Hello, world")); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
if err := xlStorage.AppendFile(context.Background(), "src-vol", "file5", []byte("Hello, world")); err != nil {
if err := xlStorage.AppendFile(t.Context(), "src-vol", "file5", []byte("Hello, world")); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
if err := xlStorage.AppendFile(context.Background(), "src-vol", "path/to/file1", []byte("Hello, world")); err != nil {
if err := xlStorage.AppendFile(t.Context(), "src-vol", "path/to/file1", []byte("Hello, world")); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
@ -1638,7 +1637,7 @@ func TestXLStorageRenameFile(t *testing.T) {
}
for i, testCase := range testCases {
if err := xlStorage.RenameFile(context.Background(), testCase.srcVol, testCase.srcPath, testCase.destVol, testCase.destPath); err != testCase.expectedErr {
if err := xlStorage.RenameFile(t.Context(), testCase.srcVol, testCase.srcPath, testCase.destVol, testCase.destPath); err != testCase.expectedErr {
t.Fatalf("TestXLStorage %d: Expected the error to be : \"%v\", got: \"%v\".", i+1, testCase.expectedErr, err)
}
}
@ -1651,7 +1650,7 @@ func TestXLStorageDeleteVersion(t *testing.T) {
if err != nil {
t.Fatalf("Unable to create xlStorage test setup, %s", err)
}
ctx := context.Background()
ctx := t.Context()
volume := "myvol-vol"
object := "my-object"
@ -1744,19 +1743,19 @@ func TestXLStorageStatInfoFile(t *testing.T) {
}
// Setup test environment.
if err := xlStorage.MakeVol(context.Background(), "success-vol"); err != nil {
if err := xlStorage.MakeVol(t.Context(), "success-vol"); err != nil {
t.Fatalf("Unable to create volume, %s", err)
}
if err := xlStorage.AppendFile(context.Background(), "success-vol", pathJoin("success-file", xlStorageFormatFile), []byte("Hello, world")); err != nil {
if err := xlStorage.AppendFile(t.Context(), "success-vol", pathJoin("success-file", xlStorageFormatFile), []byte("Hello, world")); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
if err := xlStorage.AppendFile(context.Background(), "success-vol", pathJoin("path/to/success-file", xlStorageFormatFile), []byte("Hello, world")); err != nil {
if err := xlStorage.AppendFile(t.Context(), "success-vol", pathJoin("path/to/success-file", xlStorageFormatFile), []byte("Hello, world")); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
if err := xlStorage.MakeVol(context.Background(), "success-vol/path/to/"+xlStorageFormatFile); err != nil {
if err := xlStorage.MakeVol(t.Context(), "success-vol/path/to/"+xlStorageFormatFile); err != nil {
t.Fatalf("Unable to create path, %s", err)
}
@ -1817,7 +1816,7 @@ func TestXLStorageStatInfoFile(t *testing.T) {
}
for i, testCase := range testCases {
_, err := xlStorage.StatInfoFile(context.Background(), testCase.srcVol, testCase.srcPath+"/"+xlStorageFormatFile, false)
_, err := xlStorage.StatInfoFile(t.Context(), testCase.srcVol, testCase.srcPath+"/"+xlStorageFormatFile, false)
if err != testCase.expectedErr {
t.Errorf("TestXLStorage case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
}
@ -1840,7 +1839,7 @@ func TestXLStorageVerifyFile(t *testing.T) {
volName := "testvol"
fileName := "testfile"
if err := storage.MakeVol(context.Background(), volName); err != nil {
if err := storage.MakeVol(t.Context(), volName); err != nil {
t.Fatal(err)
}
@ -1854,29 +1853,29 @@ func TestXLStorageVerifyFile(t *testing.T) {
h := algo.New()
h.Write(data)
hashBytes := h.Sum(nil)
if err := storage.WriteAll(context.Background(), volName, fileName, data); err != nil {
if err := storage.WriteAll(t.Context(), volName, fileName, data); err != nil {
t.Fatal(err)
}
if err := storage.storage.bitrotVerify(context.Background(), pathJoin(path, volName, fileName), size, algo, hashBytes, 0); err != nil {
if err := storage.storage.bitrotVerify(t.Context(), pathJoin(path, volName, fileName), size, algo, hashBytes, 0); err != nil {
t.Fatal(err)
}
// 2) Whole-file bitrot check on corrupted file
if err := storage.AppendFile(context.Background(), volName, fileName, []byte("a")); err != nil {
if err := storage.AppendFile(t.Context(), volName, fileName, []byte("a")); err != nil {
t.Fatal(err)
}
// Check if VerifyFile reports the incorrect file length (the correct length is `size+1`)
if err := storage.storage.bitrotVerify(context.Background(), pathJoin(path, volName, fileName), size, algo, hashBytes, 0); err == nil {
if err := storage.storage.bitrotVerify(t.Context(), pathJoin(path, volName, fileName), size, algo, hashBytes, 0); err == nil {
t.Fatal("expected to fail bitrot check")
}
// Check if bitrot fails
if err := storage.storage.bitrotVerify(context.Background(), pathJoin(path, volName, fileName), size+1, algo, hashBytes, 0); err == nil {
if err := storage.storage.bitrotVerify(t.Context(), pathJoin(path, volName, fileName), size+1, algo, hashBytes, 0); err == nil {
t.Fatal("expected to fail bitrot check")
}
if err := storage.Delete(context.Background(), volName, fileName, DeleteOptions{
if err := storage.Delete(t.Context(), volName, fileName, DeleteOptions{
Recursive: false,
Immediate: false,
}); err != nil {
@ -1904,7 +1903,7 @@ func TestXLStorageVerifyFile(t *testing.T) {
t.Fatal(err)
}
w.(io.Closer).Close()
if err := storage.storage.bitrotVerify(context.Background(), pathJoin(path, volName, fileName), size, algo, nil, shardSize); err != nil {
if err := storage.storage.bitrotVerify(t.Context(), pathJoin(path, volName, fileName), size, algo, nil, shardSize); err != nil {
t.Fatal(err)
}
@ -1919,10 +1918,10 @@ func TestXLStorageVerifyFile(t *testing.T) {
t.Fatal(err)
}
f.Close()
if err := storage.storage.bitrotVerify(context.Background(), pathJoin(path, volName, fileName), size, algo, nil, shardSize); err == nil {
if err := storage.storage.bitrotVerify(t.Context(), pathJoin(path, volName, fileName), size, algo, nil, shardSize); err == nil {
t.Fatal("expected to fail bitrot check")
}
if err := storage.storage.bitrotVerify(context.Background(), pathJoin(path, volName, fileName), size+1, algo, nil, shardSize); err == nil {
if err := storage.storage.bitrotVerify(t.Context(), pathJoin(path, volName, fileName), size+1, algo, nil, shardSize); err == nil {
t.Fatal("expected to fail bitrot check")
}
}
@ -1937,8 +1936,8 @@ func TestXLStorageReadMetadata(t *testing.T) {
t.Fatal(err)
}
disk.MakeVol(context.Background(), volume)
if _, err := disk.readMetadata(context.Background(), pathJoin(tmpDir, volume, object)); err != errFileNameTooLong {
disk.MakeVol(t.Context(), volume)
if _, err := disk.readMetadata(t.Context(), pathJoin(tmpDir, volume, object)); err != errFileNameTooLong {
t.Fatalf("Unexpected error from readMetadata - expect %v: got %v", errFileNameTooLong, err)
}
}

View File

@ -21,7 +21,6 @@
package cmd
import (
"context"
"os"
"path"
"syscall"
@ -55,7 +54,7 @@ func TestIsValidUmaskVol(t *testing.T) {
// Attempt to create a volume to verify the permissions later.
// MakeVol creates 0777.
if err = disk.MakeVol(context.Background(), testCase.volName); err != nil {
if err = disk.MakeVol(t.Context(), testCase.volName); err != nil {
t.Fatalf("Creating a volume failed with %s expected to pass.", err)
}
@ -93,18 +92,18 @@ func TestIsValidUmaskFile(t *testing.T) {
// Attempt to create a volume to verify the permissions later.
// MakeVol creates directory with 0777 perms.
if err = disk.MakeVol(context.Background(), testCase.volName); err != nil {
if err = disk.MakeVol(t.Context(), testCase.volName); err != nil {
t.Fatalf("Creating a volume failed with %s expected to pass.", err)
}
// Attempt to create a file to verify the permissions later.
// AppendFile creates file with 0666 perms.
if err = disk.AppendFile(context.Background(), testCase.volName, pathJoin("hello-world.txt", xlStorageFormatFile), []byte("Hello World")); err != nil {
if err = disk.AppendFile(t.Context(), testCase.volName, pathJoin("hello-world.txt", xlStorageFormatFile), []byte("Hello World")); err != nil {
t.Fatalf("Create a file `test` failed with %s expected to pass.", err)
}
// CheckFile - stat the file.
if _, err := disk.StatInfoFile(context.Background(), testCase.volName, "hello-world.txt/"+xlStorageFormatFile, false); err != nil {
if _, err := disk.StatInfoFile(t.Context(), testCase.volName, "hello-world.txt/"+xlStorageFormatFile, false); err != nil {
t.Fatalf("Stat failed with %s expected to pass.", err)
}
}

4
go.mod
View File

@ -1,8 +1,8 @@
module github.com/minio/minio
go 1.23.0
go 1.24.0
toolchain go1.23.6
toolchain go1.24.2
require (
cloud.google.com/go/storage v1.46.0

View File

@ -46,7 +46,7 @@ func TestCacheCtx(t *testing.T) {
},
)
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
cancel() // cancel context to test.
_, err := cache.GetWithCtx(ctx)
@ -54,7 +54,7 @@ func TestCacheCtx(t *testing.T) {
t.Fatalf("expected context.Canceled err, got %v", err)
}
ctx, cancel = context.WithCancel(context.Background())
ctx, cancel = context.WithCancel(t.Context())
defer cancel()
t1, err := cache.GetWithCtx(ctx)

View File

@ -19,7 +19,6 @@ package openid
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
@ -148,7 +147,7 @@ func TestJWTHMACType(t *testing.T) {
}
var claims jwtgo.MapClaims
if err = cfg.Validate(context.Background(), DummyRoleARN, token, "", "", claims); err != nil {
if err = cfg.Validate(t.Context(), DummyRoleARN, token, "", "", claims); err != nil {
t.Fatal(err)
}
}
@ -200,7 +199,7 @@ func TestJWT(t *testing.T) {
}
var claims jwtgo.MapClaims
if err = cfg.Validate(context.Background(), DummyRoleARN, u.Query().Get("Token"), "", "", claims); err == nil {
if err = cfg.Validate(t.Context(), DummyRoleARN, u.Query().Get("Token"), "", "", claims); err == nil {
t.Fatal(err)
}
}

View File

@ -33,14 +33,14 @@ const (
func testSimpleWriteLock(t *testing.T, duration time.Duration) (locked bool) {
drwm1 := NewDRWMutex(ds, "simplelock")
ctx1, cancel1 := context.WithCancel(context.Background())
ctx1, cancel1 := context.WithCancel(t.Context())
if !drwm1.GetRLock(ctx1, cancel1, id, source, Options{Timeout: time.Second}) {
panic("Failed to acquire read lock")
}
// fmt.Println("1st read lock acquired, waiting...")
drwm2 := NewDRWMutex(ds, "simplelock")
ctx2, cancel2 := context.WithCancel(context.Background())
ctx2, cancel2 := context.WithCancel(t.Context())
if !drwm2.GetRLock(ctx2, cancel2, id, source, Options{Timeout: time.Second}) {
panic("Failed to acquire read lock")
}
@ -48,25 +48,25 @@ func testSimpleWriteLock(t *testing.T, duration time.Duration) (locked bool) {
go func() {
time.Sleep(2 * testDrwMutexAcquireTimeout)
drwm1.RUnlock(context.Background())
drwm1.RUnlock(t.Context())
// fmt.Println("1st read lock released, waiting...")
}()
go func() {
time.Sleep(3 * testDrwMutexAcquireTimeout)
drwm2.RUnlock(context.Background())
drwm2.RUnlock(t.Context())
// fmt.Println("2nd read lock released, waiting...")
}()
drwm3 := NewDRWMutex(ds, "simplelock")
// fmt.Println("Trying to acquire write lock, waiting...")
ctx3, cancel3 := context.WithCancel(context.Background())
ctx3, cancel3 := context.WithCancel(t.Context())
locked = drwm3.GetLock(ctx3, cancel3, id, source, Options{Timeout: duration})
if locked {
// fmt.Println("Write lock acquired, waiting...")
time.Sleep(testDrwMutexAcquireTimeout)
drwm3.Unlock(context.Background())
drwm3.Unlock(t.Context())
}
// fmt.Println("Write lock failed due to timeout")
return
@ -94,26 +94,26 @@ func testDualWriteLock(t *testing.T, duration time.Duration) (locked bool) {
drwm1 := NewDRWMutex(ds, "duallock")
// fmt.Println("Getting initial write lock")
ctx1, cancel1 := context.WithCancel(context.Background())
ctx1, cancel1 := context.WithCancel(t.Context())
if !drwm1.GetLock(ctx1, cancel1, id, source, Options{Timeout: time.Second}) {
panic("Failed to acquire initial write lock")
}
go func() {
time.Sleep(3 * testDrwMutexAcquireTimeout)
drwm1.Unlock(context.Background())
drwm1.Unlock(t.Context())
// fmt.Println("Initial write lock released, waiting...")
}()
// fmt.Println("Trying to acquire 2nd write lock, waiting...")
drwm2 := NewDRWMutex(ds, "duallock")
ctx2, cancel2 := context.WithCancel(context.Background())
ctx2, cancel2 := context.WithCancel(t.Context())
locked = drwm2.GetLock(ctx2, cancel2, id, source, Options{Timeout: duration})
if locked {
// fmt.Println("2nd write lock acquired, waiting...")
time.Sleep(testDrwMutexAcquireTimeout)
drwm2.Unlock(context.Background())
drwm2.Unlock(t.Context())
}
// fmt.Println("2nd write lock failed due to timeout")
return
@ -268,7 +268,7 @@ func TestUnlockPanic(t *testing.T) {
}
}()
mu := NewDRWMutex(ds, "test")
mu.Unlock(context.Background())
mu.Unlock(t.Context())
}
// Borrowed from rwmutex_test.go
@ -278,10 +278,10 @@ func TestUnlockPanic2(t *testing.T) {
if recover() == nil {
t.Fatalf("unlock of unlocked RWMutex did not panic")
}
mu.RUnlock(context.Background()) // Unlock, so -test.count > 1 works
mu.RUnlock(t.Context()) // Unlock, so -test.count > 1 works
}()
mu.RLock(id, source)
mu.Unlock(context.Background())
mu.Unlock(t.Context())
}
// Borrowed from rwmutex_test.go
@ -292,7 +292,7 @@ func TestRUnlockPanic(t *testing.T) {
}
}()
mu := NewDRWMutex(ds, "test")
mu.RUnlock(context.Background())
mu.RUnlock(t.Context())
}
// Borrowed from rwmutex_test.go
@ -302,10 +302,10 @@ func TestRUnlockPanic2(t *testing.T) {
if recover() == nil {
t.Fatalf("read unlock of unlocked RWMutex did not panic")
}
mu.Unlock(context.Background()) // Unlock, so -test.count > 1 works
mu.Unlock(t.Context()) // Unlock, so -test.count > 1 works
}()
mu.Lock(id, source)
mu.RUnlock(context.Background())
mu.RUnlock(t.Context())
}
// Borrowed from rwmutex_test.go
@ -320,14 +320,14 @@ func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
foo++
if foo%writeRatio == 0 {
rwm.Lock(id, source)
rwm.Unlock(context.Background())
rwm.Unlock(b.Context())
} else {
rwm.RLock(id, source)
for i := 0; i != localWork; i++ {
foo *= 2
foo /= 2
}
rwm.RUnlock(context.Background())
rwm.RUnlock(b.Context())
}
}
_ = foo

View File

@ -69,7 +69,7 @@ func TestSimpleLock(t *testing.T) {
// fmt.Println("Lock acquired, waiting...")
time.Sleep(testDrwMutexRefreshCallTimeout)
dm.Unlock(context.Background())
dm.Unlock(t.Context())
}
func TestSimpleLockUnlockMultipleTimes(t *testing.T) {
@ -77,23 +77,23 @@ func TestSimpleLockUnlockMultipleTimes(t *testing.T) {
dm.Lock(id, source)
time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond)
dm.Unlock(context.Background())
dm.Unlock(t.Context())
dm.Lock(id, source)
time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond)
dm.Unlock(context.Background())
dm.Unlock(t.Context())
dm.Lock(id, source)
time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond)
dm.Unlock(context.Background())
dm.Unlock(t.Context())
dm.Lock(id, source)
time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond)
dm.Unlock(context.Background())
dm.Unlock(t.Context())
dm.Lock(id, source)
time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond)
dm.Unlock(context.Background())
dm.Unlock(t.Context())
}
// Test two locks for same resource, one succeeds, one fails (after timeout)
@ -108,7 +108,7 @@ func TestTwoSimultaneousLocksForSameResource(t *testing.T) {
time.Sleep(5 * testDrwMutexAcquireTimeout)
// fmt.Println("Unlocking dm1")
dm1st.Unlock(context.Background())
dm1st.Unlock(t.Context())
}()
dm2nd.Lock(id, source)
@ -116,7 +116,7 @@ func TestTwoSimultaneousLocksForSameResource(t *testing.T) {
// fmt.Printf("2nd lock obtained after 1st lock is released\n")
time.Sleep(testDrwMutexRefreshCallTimeout * 2)
dm2nd.Unlock(context.Background())
dm2nd.Unlock(t.Context())
}
// Test three locks for same resource, one succeeds, one fails (after timeout)
@ -134,7 +134,7 @@ func TestThreeSimultaneousLocksForSameResource(t *testing.T) {
time.Sleep(2 * testDrwMutexAcquireTimeout)
// fmt.Println("Unlocking dm1")
dm1st.Unlock(context.Background())
dm1st.Unlock(t.Context())
}()
expect += 2 * testDrwMutexAcquireTimeout
@ -151,7 +151,7 @@ func TestThreeSimultaneousLocksForSameResource(t *testing.T) {
time.Sleep(2 * testDrwMutexAcquireTimeout)
// fmt.Println("Unlocking dm2")
dm2nd.Unlock(context.Background())
dm2nd.Unlock(t.Context())
}()
dm3rd.Lock(id, source)
@ -159,7 +159,7 @@ func TestThreeSimultaneousLocksForSameResource(t *testing.T) {
// fmt.Printf("3rd lock obtained after 1st & 2nd locks are released\n")
time.Sleep(testDrwMutexRefreshCallTimeout)
dm3rd.Unlock(context.Background())
dm3rd.Unlock(t.Context())
}()
expect += 2*testDrwMutexAcquireTimeout + testDrwMutexRefreshCallTimeout
@ -173,7 +173,7 @@ func TestThreeSimultaneousLocksForSameResource(t *testing.T) {
time.Sleep(2 * testDrwMutexAcquireTimeout)
// fmt.Println("Unlocking dm3")
dm3rd.Unlock(context.Background())
dm3rd.Unlock(t.Context())
}()
dm2nd.Lock(id, source)
@ -181,7 +181,7 @@ func TestThreeSimultaneousLocksForSameResource(t *testing.T) {
// fmt.Printf("2nd lock obtained after 1st & 3rd locks are released\n")
time.Sleep(testDrwMutexRefreshCallTimeout)
dm2nd.Unlock(context.Background())
dm2nd.Unlock(t.Context())
}()
expect += 2*testDrwMutexAcquireTimeout + testDrwMutexRefreshCallTimeout
@ -201,8 +201,8 @@ func TestTwoSimultaneousLocksForDifferentResources(t *testing.T) {
dm1.Lock(id, source)
dm2.Lock(id, source)
dm1.Unlock(context.Background())
dm2.Unlock(context.Background())
dm1.Unlock(t.Context())
dm2.Unlock(t.Context())
}
// Test refreshing lock - refresh should always return true
@ -214,7 +214,7 @@ func TestSuccessfulLockRefresh(t *testing.T) {
dm := NewDRWMutex(ds, "aap")
dm.refreshInterval = testDrwMutexRefreshInterval
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
if !dm.GetLock(ctx, cancel, id, source, Options{Timeout: 5 * time.Minute}) {
t.Fatal("GetLock() should be successful")
@ -230,7 +230,7 @@ func TestSuccessfulLockRefresh(t *testing.T) {
}
// Should be safe operation in all cases
dm.Unlock(context.Background())
dm.Unlock(t.Context())
}
// Test canceling context while quorum servers report lock not found
@ -250,7 +250,7 @@ func TestFailedRefreshLock(t *testing.T) {
var wg sync.WaitGroup
wg.Add(1)
ctx, cl := context.WithCancel(context.Background())
ctx, cl := context.WithCancel(t.Context())
cancel := func() {
cl()
wg.Done()
@ -267,7 +267,7 @@ func TestFailedRefreshLock(t *testing.T) {
}
// Should be safe operation in all cases
dm.Unlock(context.Background())
dm.Unlock(t.Context())
}
// Test Unlock should not timeout
@ -278,7 +278,7 @@ func TestUnlockShouldNotTimeout(t *testing.T) {
dm := NewDRWMutex(ds, "aap")
dm.refreshInterval = testDrwMutexUnlockCallTimeout
if !dm.GetLock(context.Background(), nil, id, source, Options{Timeout: 5 * time.Minute}) {
if !dm.GetLock(t.Context(), nil, id, source, Options{Timeout: 5 * time.Minute}) {
t.Fatal("GetLock() should be successful")
}
@ -290,7 +290,7 @@ func TestUnlockShouldNotTimeout(t *testing.T) {
unlockReturned := make(chan struct{}, 1)
go func() {
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
ctx, cancel := context.WithTimeout(t.Context(), 500*time.Millisecond)
defer cancel()
dm.Unlock(ctx)
// Unlock is not blocking. Try to get a new lock.
@ -344,7 +344,7 @@ func BenchmarkMutexUncontended(b *testing.B) {
mu := PaddedMutex{NewDRWMutex(ds, "")}
for pb.Next() {
mu.Lock(id, source)
mu.Unlock(context.Background())
mu.Unlock(b.Context())
}
})
}
@ -361,7 +361,7 @@ func benchmarkMutex(b *testing.B, slack, work bool) {
foo := 0
for pb.Next() {
mu.Lock(id, source)
mu.Unlock(context.Background())
mu.Unlock(b.Context())
if work {
for i := 0; i < 100; i++ {
foo *= 2
@ -410,7 +410,7 @@ func BenchmarkMutexNoSpin(b *testing.B) {
m.Lock(id, source)
acc0 -= 100
acc1 += 100
m.Unlock(context.Background())
m.Unlock(b.Context())
} else {
for i := 0; i < len(data); i += 4 {
data[i]++
@ -442,7 +442,7 @@ func BenchmarkMutexSpin(b *testing.B) {
m.Lock(id, source)
acc0 -= 100
acc1 += 100
m.Unlock(context.Background())
m.Unlock(b.Context())
for i := 0; i < len(data); i += 4 {
data[i]++
}

View File

@ -18,7 +18,6 @@
package etag
import (
"context"
"io"
"net/http"
"strings"
@ -138,7 +137,7 @@ var readerTests = []struct { // Reference values computed by: echo <content> | m
func TestReader(t *testing.T) {
for i, test := range readerTests {
reader := NewReader(context.Background(), strings.NewReader(test.Content), test.ETag, nil)
reader := NewReader(t.Context(), strings.NewReader(test.Content), test.ETag, nil)
if _, err := io.Copy(io.Discard, reader); err != nil {
t.Fatalf("Test %d: read failed: %v", i, err)
}

View File

@ -18,7 +18,6 @@
package event
import (
"context"
"encoding/xml"
"reflect"
"strings"
@ -252,9 +251,9 @@ func TestQueueValidate(t *testing.T) {
panic(err)
}
targetList1 := NewTargetList(context.Background())
targetList1 := NewTargetList(t.Context())
targetList2 := NewTargetList(context.Background())
targetList2 := NewTargetList(t.Context())
if err := targetList2.Add(&ExampleTarget{TargetID{"1", "webhook"}, false, false}); err != nil {
panic(err)
}
@ -596,9 +595,9 @@ func TestConfigValidate(t *testing.T) {
panic(err)
}
targetList1 := NewTargetList(context.Background())
targetList1 := NewTargetList(t.Context())
targetList2 := NewTargetList(context.Background())
targetList2 := NewTargetList(t.Context())
if err := targetList2.Add(&ExampleTarget{TargetID{"1", "webhook"}, false, false}); err != nil {
panic(err)
}
@ -928,9 +927,9 @@ func TestParseConfig(t *testing.T) {
</NotificationConfiguration>
`)
targetList1 := NewTargetList(context.Background())
targetList1 := NewTargetList(t.Context())
targetList2 := NewTargetList(context.Background())
targetList2 := NewTargetList(t.Context())
if err := targetList2.Add(&ExampleTarget{TargetID{"1", "webhook"}, false, false}); err != nil {
panic(err)
}

View File

@ -18,7 +18,6 @@
package event
import (
"context"
"crypto/rand"
"errors"
"reflect"
@ -86,14 +85,14 @@ func (target ExampleTarget) FlushQueueStore() error {
}
func TestTargetListAdd(t *testing.T) {
targetListCase1 := NewTargetList(context.Background())
targetListCase1 := NewTargetList(t.Context())
targetListCase2 := NewTargetList(context.Background())
targetListCase2 := NewTargetList(t.Context())
if err := targetListCase2.Add(&ExampleTarget{TargetID{"2", "testcase"}, false, false}); err != nil {
panic(err)
}
targetListCase3 := NewTargetList(context.Background())
targetListCase3 := NewTargetList(t.Context())
if err := targetListCase3.Add(&ExampleTarget{TargetID{"3", "testcase"}, false, false}); err != nil {
panic(err)
}
@ -141,14 +140,14 @@ func TestTargetListAdd(t *testing.T) {
}
func TestTargetListExists(t *testing.T) {
targetListCase1 := NewTargetList(context.Background())
targetListCase1 := NewTargetList(t.Context())
targetListCase2 := NewTargetList(context.Background())
targetListCase2 := NewTargetList(t.Context())
if err := targetListCase2.Add(&ExampleTarget{TargetID{"2", "testcase"}, false, false}); err != nil {
panic(err)
}
targetListCase3 := NewTargetList(context.Background())
targetListCase3 := NewTargetList(t.Context())
if err := targetListCase3.Add(&ExampleTarget{TargetID{"3", "testcase"}, false, false}); err != nil {
panic(err)
}
@ -173,14 +172,14 @@ func TestTargetListExists(t *testing.T) {
}
func TestTargetListList(t *testing.T) {
targetListCase1 := NewTargetList(context.Background())
targetListCase1 := NewTargetList(t.Context())
targetListCase2 := NewTargetList(context.Background())
targetListCase2 := NewTargetList(t.Context())
if err := targetListCase2.Add(&ExampleTarget{TargetID{"2", "testcase"}, false, false}); err != nil {
panic(err)
}
targetListCase3 := NewTargetList(context.Background())
targetListCase3 := NewTargetList(t.Context())
if err := targetListCase3.Add(&ExampleTarget{TargetID{"3", "testcase"}, false, false}); err != nil {
panic(err)
}
@ -220,7 +219,7 @@ func TestTargetListList(t *testing.T) {
}
func TestNewTargetList(t *testing.T) {
if result := NewTargetList(context.Background()); result == nil {
if result := NewTargetList(t.Context()); result == nil {
t.Fatalf("test: result: expected: <non-nil>, got: <nil>")
}
}

View File

@ -78,7 +78,7 @@ func benchmarkGridRequests(b *testing.B, n int) {
for par := 1; par <= 32; par *= 2 {
b.Run("par="+strconv.Itoa(par*runtime.GOMAXPROCS(0)), func(b *testing.B) {
defer timeout(60 * time.Second)()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
ctx, cancel := context.WithTimeout(b.Context(), 30*time.Second)
defer cancel()
b.ReportAllocs()
b.SetBytes(int64(len(payload) * 2))
@ -135,7 +135,7 @@ func benchmarkGridRequests(b *testing.B, n int) {
for par := 1; par <= 32; par *= 2 {
b.Run("par="+strconv.Itoa(par*runtime.GOMAXPROCS(0)), func(b *testing.B) {
defer timeout(60 * time.Second)()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
ctx, cancel := context.WithTimeout(b.Context(), 30*time.Second)
defer cancel()
b.ReportAllocs()
b.ResetTimer()
@ -285,7 +285,7 @@ func benchmarkGridStreamRespOnly(b *testing.B, n int) {
if conn == nil {
b.Fatal("No connection")
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
ctx, cancel := context.WithTimeout(b.Context(), 30*time.Second)
// Send the payload.
t := time.Now()
st, err := conn.NewStream(ctx, handlerTest, payload)
@ -396,7 +396,7 @@ func benchmarkGridStreamReqOnly(b *testing.B, n int) {
if conn == nil {
b.Fatal("No connection")
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
ctx, cancel := context.WithTimeout(b.Context(), 30*time.Second)
// Send the payload.
t := time.Now()
st, err := conn.NewStream(ctx, handlerTest, payload)
@ -512,7 +512,7 @@ func benchmarkGridStreamTwoway(b *testing.B, n int) {
if conn == nil {
b.Fatal("No connection")
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
ctx, cancel := context.WithTimeout(b.Context(), 30*time.Second)
// Send the payload.
t := time.Now()
st, err := conn.NewStream(ctx, handlerTest, payload)

View File

@ -51,7 +51,7 @@ func TestDisconnect(t *testing.T) {
// We fake a local and remote server.
localHost := hosts[0]
remoteHost := hosts[1]
local, err := NewManager(context.Background(), ManagerOptions{
local, err := NewManager(t.Context(), ManagerOptions{
Dialer: ConnectWS(dialer.DialContext,
dummyNewToken,
nil),
@ -75,7 +75,7 @@ func TestDisconnect(t *testing.T) {
return nil, &err
}))
remote, err := NewManager(context.Background(), ManagerOptions{
remote, err := NewManager(t.Context(), ManagerOptions{
Dialer: ConnectWS(dialer.DialContext,
dummyNewToken,
nil),
@ -131,14 +131,14 @@ func TestDisconnect(t *testing.T) {
// local to remote
remoteConn := local.Connection(remoteHost)
errFatal(remoteConn.WaitForConnect(context.Background()))
errFatal(remoteConn.WaitForConnect(t.Context()))
const testPayload = "Hello Grid World!"
gotResp := make(chan struct{})
go func() {
start := time.Now()
t.Log("Roundtrip: sending request")
resp, err := remoteConn.Request(context.Background(), handlerTest, []byte(testPayload))
resp, err := remoteConn.Request(t.Context(), handlerTest, []byte(testPayload))
t.Log("Roundtrip:", time.Since(start), resp, err)
gotResp <- struct{}{}
}()
@ -148,9 +148,9 @@ func TestDisconnect(t *testing.T) {
<-gotResp
// Must reconnect
errFatal(remoteConn.WaitForConnect(context.Background()))
errFatal(remoteConn.WaitForConnect(t.Context()))
stream, err := remoteConn.NewStream(context.Background(), handlerTest2, []byte(testPayload))
stream, err := remoteConn.NewStream(t.Context(), handlerTest2, []byte(testPayload))
errFatal(err)
go func() {
for resp := range stream.responses {
@ -162,7 +162,7 @@ func TestDisconnect(t *testing.T) {
<-gotCall
remote.debugMsg(debugKillOutbound)
local.debugMsg(debugKillOutbound)
errFatal(remoteConn.WaitForConnect(context.Background()))
errFatal(remoteConn.WaitForConnect(t.Context()))
<-gotResp
// Killing should cancel the context on the request.

View File

@ -74,14 +74,14 @@ func TestSingleRoundtrip(t *testing.T) {
// local to remote
remoteConn := local.Connection(remoteHost)
remoteConn.WaitForConnect(context.Background())
remoteConn.WaitForConnect(t.Context())
defer testlogger.T.SetErrorTB(t)()
t.Run("localToRemote", func(t *testing.T) {
const testPayload = "Hello Grid World!"
start := time.Now()
resp, err := remoteConn.Request(context.Background(), handlerTest, []byte(testPayload))
resp, err := remoteConn.Request(t.Context(), handlerTest, []byte(testPayload))
errFatal(err)
if string(resp) != testPayload {
t.Errorf("want %q, got %q", testPayload, string(resp))
@ -92,7 +92,7 @@ func TestSingleRoundtrip(t *testing.T) {
t.Run("localToRemoteErr", func(t *testing.T) {
const testPayload = "Hello Grid World!"
start := time.Now()
resp, err := remoteConn.Request(context.Background(), handlerTest2, []byte(testPayload))
resp, err := remoteConn.Request(t.Context(), handlerTest2, []byte(testPayload))
t.Log("Roundtrip:", time.Since(start))
if len(resp) != 0 {
t.Errorf("want nil, got %q", string(resp))
@ -107,7 +107,7 @@ func TestSingleRoundtrip(t *testing.T) {
testPayload := bytes.Repeat([]byte("?"), 1<<20)
start := time.Now()
resp, err := remoteConn.Request(context.Background(), handlerTest, testPayload)
resp, err := remoteConn.Request(t.Context(), handlerTest, testPayload)
errFatal(err)
if string(resp) != string(testPayload) {
t.Errorf("want %q, got %q", testPayload, string(resp))
@ -119,7 +119,7 @@ func TestSingleRoundtrip(t *testing.T) {
testPayload := bytes.Repeat([]byte("!"), 1<<10)
start := time.Now()
resp, err := remoteConn.Request(context.Background(), handlerTest2, testPayload)
resp, err := remoteConn.Request(t.Context(), handlerTest2, testPayload)
if len(resp) != 0 {
t.Errorf("want nil, got %q", string(resp))
}
@ -159,19 +159,19 @@ func TestSingleRoundtripNotReady(t *testing.T) {
// local to remote
remoteConn := local.Connection(remoteHost)
remoteConn.WaitForConnect(context.Background())
remoteConn.WaitForConnect(t.Context())
defer testlogger.T.SetErrorTB(t)()
t.Run("localToRemote", func(t *testing.T) {
const testPayload = "Hello Grid World!"
// Single requests should have remote errors.
_, err := remoteConn.Request(context.Background(), handlerTest, []byte(testPayload))
_, err := remoteConn.Request(t.Context(), handlerTest, []byte(testPayload))
if _, ok := err.(*RemoteErr); !ok {
t.Fatalf("Unexpected error: %v, %T", err, err)
}
// Streams should not be able to set up until registered.
// Thus, the error is a local error.
_, err = remoteConn.NewStream(context.Background(), handlerTest, []byte(testPayload))
_, err = remoteConn.NewStream(t.Context(), handlerTest, []byte(testPayload))
if !errors.Is(err, ErrUnknownHandler) {
t.Fatalf("Unexpected error: %v, %T", err, err)
}
@ -226,7 +226,7 @@ func TestSingleRoundtripGenerics(t *testing.T) {
start := time.Now()
req := testRequest{Num: 1, String: testPayload}
resp, err := h1.Call(context.Background(), remoteConn, &req)
resp, err := h1.Call(t.Context(), remoteConn, &req)
errFatal(err)
if resp.OrgString != testPayload {
t.Errorf("want %q, got %q", testPayload, resp.OrgString)
@ -235,7 +235,7 @@ func TestSingleRoundtripGenerics(t *testing.T) {
h1.PutResponse(resp)
start = time.Now()
resp, err = h2.Call(context.Background(), remoteConn, &testRequest{Num: 1, String: testPayload})
resp, err = h2.Call(t.Context(), remoteConn, &testRequest{Num: 1, String: testPayload})
t.Log("Roundtrip:", time.Since(start))
if err != RemoteErr(testPayload) {
t.Errorf("want error %v(%T), got %v(%T)", RemoteErr(testPayload), RemoteErr(testPayload), err, err)
@ -290,7 +290,7 @@ func TestSingleRoundtripGenericsRecycle(t *testing.T) {
start := time.Now()
req := NewMSSWith(map[string]string{"test": testPayload})
resp, err := h1.Call(context.Background(), remoteConn, req)
resp, err := h1.Call(t.Context(), remoteConn, req)
errFatal(err)
if resp.Get("test") != testPayload {
t.Errorf("want %q, got %q", testPayload, resp.Get("test"))
@ -299,7 +299,7 @@ func TestSingleRoundtripGenericsRecycle(t *testing.T) {
h1.PutResponse(resp)
start = time.Now()
resp, err = h2.Call(context.Background(), remoteConn, NewMSSWith(map[string]string{"err": testPayload}))
resp, err = h2.Call(t.Context(), remoteConn, NewMSSWith(map[string]string{"err": testPayload}))
t.Log("Roundtrip:", time.Since(start))
if err != RemoteErr(testPayload) {
t.Errorf("want error %v(%T), got %v(%T)", RemoteErr(testPayload), RemoteErr(testPayload), err, err)
@ -479,7 +479,7 @@ func testStreamRoundtrip(t *testing.T, local, remote *Manager) {
const testPayload = "Hello Grid World!"
start := time.Now()
stream, err := remoteConn.NewStream(context.Background(), handlerTest, []byte(testPayload))
stream, err := remoteConn.NewStream(t.Context(), handlerTest, []byte(testPayload))
errFatal(err)
var n int
stream.Requests <- []byte(strconv.Itoa(n))
@ -544,7 +544,7 @@ func testStreamCancel(t *testing.T, local, remote *Manager) {
remoteConn := local.Connection(remoteHost)
const testPayload = "Hello Grid World!"
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
st, err := remoteConn.NewStream(ctx, handler, []byte(testPayload))
errFatal(err)
clientCanceled := make(chan time.Time, 1)
@ -659,7 +659,7 @@ func testStreamDeadline(t *testing.T, local, remote *Manager) {
remoteConn := local.Connection(remoteHost)
const testPayload = "Hello Grid World!"
ctx, cancel := context.WithTimeout(context.Background(), wantDL)
ctx, cancel := context.WithTimeout(t.Context(), wantDL)
defer cancel()
st, err := remoteConn.NewStream(ctx, handler, []byte(testPayload))
errFatal(err)
@ -735,7 +735,7 @@ func testServerOutCongestion(t *testing.T, local, remote *Manager) {
remoteConn := local.Connection(remoteHost)
const testPayload = "Hello Grid World!"
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
ctx, cancel := context.WithTimeout(t.Context(), time.Minute)
defer cancel()
st, err := remoteConn.NewStream(ctx, handlerTest, []byte(testPayload))
errFatal(err)
@ -813,7 +813,7 @@ func testServerInCongestion(t *testing.T, local, remote *Manager) {
remoteConn := local.Connection(remoteHost)
const testPayload = "Hello Grid World!"
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
ctx, cancel := context.WithTimeout(t.Context(), time.Minute)
defer cancel()
st, err := remoteConn.NewStream(ctx, handlerTest, []byte(testPayload))
errFatal(err)
@ -893,7 +893,7 @@ func testGenericsStreamRoundtrip(t *testing.T, local, remote *Manager) {
const testPayload = "Hello Grid World!"
start := time.Now()
stream, err := handler.Call(context.Background(), remoteConn, &testRequest{Num: 1, String: testPayload})
stream, err := handler.Call(t.Context(), remoteConn, &testRequest{Num: 1, String: testPayload})
errFatal(err)
go func() {
defer close(stream.Requests)
@ -970,7 +970,7 @@ func testGenericsStreamRoundtripSubroute(t *testing.T, local, remote *Manager) {
remoteSub := remoteConn.Subroute(strings.Join([]string{"subroute", "1"}, "/"))
start := time.Now()
stream, err := handler.Call(context.Background(), remoteSub, &testRequest{Num: 1, String: testPayload})
stream, err := handler.Call(t.Context(), remoteSub, &testRequest{Num: 1, String: testPayload})
errFatal(err)
go func() {
defer close(stream.Requests)
@ -1043,7 +1043,7 @@ func testServerStreamResponseBlocked(t *testing.T, local, remote *Manager) {
remoteConn := local.Connection(remoteHost)
const testPayload = "Hello Grid World!"
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)
st, err := remoteConn.NewStream(ctx, handlerTest, []byte(testPayload))
errFatal(err)
@ -1125,7 +1125,7 @@ func testServerStreamNoPing(t *testing.T, local, remote *Manager, inCap int) {
remoteConn.debugMsg(debugSetClientPingDuration, 100*time.Millisecond)
defer remoteConn.debugMsg(debugSetClientPingDuration, clientPingInterval)
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
ctx, cancel := context.WithTimeout(t.Context(), time.Minute)
defer cancel()
st, err := remoteConn.NewStream(ctx, handlerTest, []byte(testPayload))
errFatal(err)
@ -1198,7 +1198,7 @@ func testServerStreamPingRunning(t *testing.T, local, remote *Manager, inCap int
remoteConn.debugMsg(debugSetClientPingDuration, 100*time.Millisecond)
defer remoteConn.debugMsg(debugSetClientPingDuration, clientPingInterval)
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
ctx, cancel := context.WithTimeout(t.Context(), time.Minute)
defer cancel()
st, err := remoteConn.NewStream(ctx, handlerTest, []byte(testPayload))
errFatal(err)

View File

@ -19,7 +19,6 @@ package hash
import (
"bytes"
"context"
"encoding/base64"
"encoding/hex"
"fmt"
@ -31,7 +30,7 @@ import (
// Tests functions like Size(), MD5*(), SHA256*()
func TestHashReaderHelperMethods(t *testing.T) {
r, err := NewReader(context.Background(), bytes.NewReader([]byte("abcd")), 4, "e2fc714c4727ee9395f324cd2e7f331f", "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", 4)
r, err := NewReader(t.Context(), bytes.NewReader([]byte("abcd")), 4, "e2fc714c4727ee9395f324cd2e7f331f", "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", 4)
if err != nil {
t.Fatal(err)
}
@ -195,7 +194,7 @@ func TestHashReaderVerification(t *testing.T) {
}
for i, testCase := range testCases {
t.Run(fmt.Sprintf("case-%d", i+1), func(t *testing.T) {
r, err := NewReader(context.Background(), testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize)
r, err := NewReader(t.Context(), testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize)
if err != nil {
t.Fatalf("Test %q: Initializing reader failed %s", testCase.desc, err)
}
@ -214,7 +213,7 @@ func TestHashReaderVerification(t *testing.T) {
}
func mustReader(t *testing.T, src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64) *Reader {
r, err := NewReader(context.Background(), src, size, md5Hex, sha256Hex, actualSize)
r, err := NewReader(t.Context(), src, size, md5Hex, sha256Hex, actualSize)
if err != nil {
t.Fatal(err)
}
@ -304,7 +303,7 @@ func TestHashReaderInvalidArguments(t *testing.T) {
for i, testCase := range testCases {
t.Run(fmt.Sprintf("case-%d", i+1), func(t *testing.T) {
_, err := NewReader(context.Background(), testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize)
_, err := NewReader(t.Context(), testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize)
if err != nil && testCase.success {
t.Errorf("Test %q: Expected success, but got error %s instead", testCase.desc, err)
}

View File

@ -18,7 +18,6 @@
package http
import (
"context"
"crypto/tls"
"net"
"runtime"
@ -153,7 +152,7 @@ func TestNewHTTPListener(t *testing.T) {
}
for testIdx, testCase := range testCases {
listener, listenErrs := newHTTPListener(context.Background(),
listener, listenErrs := newHTTPListener(t.Context(),
testCase.serverAddrs,
TCPOptions{},
)
@ -192,7 +191,7 @@ func TestHTTPListenerStartClose(t *testing.T) {
nextTest:
for i, testCase := range testCases {
listener, errs := newHTTPListener(context.Background(),
listener, errs := newHTTPListener(t.Context(),
testCase.serverAddrs,
TCPOptions{},
)
@ -246,7 +245,7 @@ func TestHTTPListenerAddr(t *testing.T) {
nextTest:
for i, testCase := range testCases {
listener, errs := newHTTPListener(context.Background(),
listener, errs := newHTTPListener(t.Context(),
testCase.serverAddrs,
TCPOptions{},
)
@ -297,7 +296,7 @@ func TestHTTPListenerAddrs(t *testing.T) {
nextTest:
for i, testCase := range testCases {
listener, errs := newHTTPListener(context.Background(),
listener, errs := newHTTPListener(t.Context(),
testCase.serverAddrs,
TCPOptions{},
)

View File

@ -19,7 +19,6 @@ package kms
import (
"bytes"
"context"
"encoding/base64"
"testing"
)
@ -30,11 +29,11 @@ func TestSingleKeyRoundtrip(t *testing.T) {
t.Fatalf("Failed to initialize KMS: %v", err)
}
key, err := KMS.GenerateKey(context.Background(), &GenerateKeyRequest{Name: "my-key"})
key, err := KMS.GenerateKey(t.Context(), &GenerateKeyRequest{Name: "my-key"})
if err != nil {
t.Fatalf("Failed to generate key: %v", err)
}
plaintext, err := KMS.Decrypt(context.TODO(), &DecryptRequest{
plaintext, err := KMS.Decrypt(t.Context(), &DecryptRequest{
Name: key.KeyID,
Ciphertext: key.Ciphertext,
})
@ -57,7 +56,7 @@ func TestDecryptKey(t *testing.T) {
if err != nil {
t.Fatalf("Test %d: failed to decode plaintext key: %v", i, err)
}
plaintext, err := KMS.Decrypt(context.TODO(), &DecryptRequest{
plaintext, err := KMS.Decrypt(t.Context(), &DecryptRequest{
Name: test.KeyID,
Ciphertext: []byte(test.Ciphertext),
AssociatedData: test.Context,

View File

@ -30,7 +30,7 @@ import (
)
func testSimpleWriteLock(t *testing.T, duration time.Duration) (locked bool) {
ctx := context.Background()
ctx := t.Context()
lrwm := NewLRWMutex()
if !lrwm.GetRLock(ctx, "", "object1", time.Second) {
@ -87,7 +87,7 @@ func TestSimpleWriteLockTimedOut(t *testing.T) {
}
func testDualWriteLock(t *testing.T, duration time.Duration) (locked bool) {
ctx := context.Background()
ctx := t.Context()
lrwm := NewLRWMutex()
// fmt.Println("Getting initial write lock")