Remove deprecated io/ioutil (#15707)

This commit is contained in:
Klaus Post
2022-09-19 20:05:16 +02:00
committed by GitHub
parent 0b6175b742
commit ff12080ff5
89 changed files with 315 additions and 370 deletions

View File

@@ -25,7 +25,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"time"
@@ -74,7 +73,7 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
return
}
data, err := ioutil.ReadAll(r.Body)
data, err := io.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
@@ -671,7 +670,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
if objectAPI == nil {
return
}
data, err := ioutil.ReadAll(r.Body)
data, err := io.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
@@ -871,7 +870,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
continue
}
bucketPolicyBytes, err := ioutil.ReadAll(io.LimitReader(reader, sz))
bucketPolicyBytes, err := io.ReadAll(io.LimitReader(reader, sz))
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
@@ -1032,7 +1031,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
continue
}
case bucketQuotaConfigFile:
data, err := ioutil.ReadAll(reader)
data, err := io.ReadAll(reader)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue

View File

@@ -22,7 +22,6 @@ import (
"context"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"strings"
"time"
@@ -319,7 +318,7 @@ func (a adminAPIHandlers) SRPeerGetIDPSettings(w http.ResponseWriter, r *http.Re
}
func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptionKey string) error {
data, err := ioutil.ReadAll(body)
data, err := io.ReadAll(body)
if err != nil {
return SRError{
Cause: err,

View File

@@ -22,7 +22,6 @@ import (
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
"os"
"sort"
@@ -229,7 +228,7 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
return
}
data, err := ioutil.ReadAll(r.Body)
data, err := io.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
@@ -1464,7 +1463,7 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
return
}
iamPolicyBytes, err := ioutil.ReadAll(io.LimitReader(r.Body, r.ContentLength))
iamPolicyBytes, err := io.ReadAll(io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
@@ -1848,7 +1847,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
data, err := ioutil.ReadAll(r.Body)
data, err := io.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
@@ -1871,7 +1870,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
default:
defer f.Close()
var allPolicies map[string]iampolicy.Policy
data, err = ioutil.ReadAll(f)
data, err = io.ReadAll(f)
if err != nil {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, allPoliciesFile, ""), r.URL)
return
@@ -1906,7 +1905,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
default:
defer f.Close()
var userAccts map[string]madmin.AddOrUpdateUserReq
data, err := ioutil.ReadAll(f)
data, err := io.ReadAll(f)
if err != nil {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, allUsersFile, ""), r.URL)
return
@@ -1983,7 +1982,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
default:
defer f.Close()
var grpInfos map[string]GroupInfo
data, err := ioutil.ReadAll(f)
data, err := io.ReadAll(f)
if err != nil {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, allGroupsFile, ""), r.URL)
return
@@ -2021,7 +2020,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
default:
defer f.Close()
var serviceAcctReqs map[string]madmin.SRSvcAccCreate
data, err := ioutil.ReadAll(f)
data, err := io.ReadAll(f)
if err != nil {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, allSvcAcctsFile, ""), r.URL)
return
@@ -2117,7 +2116,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
default:
defer f.Close()
var userPolicyMap map[string]MappedPolicy
data, err := ioutil.ReadAll(f)
data, err := io.ReadAll(f)
if err != nil {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, userPolicyMappingsFile, ""), r.URL)
return
@@ -2156,7 +2155,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
default:
defer f.Close()
var grpPolicyMap map[string]MappedPolicy
data, err := ioutil.ReadAll(f)
data, err := io.ReadAll(f)
if err != nil {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, groupPolicyMappingsFile, ""), r.URL)
return
@@ -2185,7 +2184,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
default:
defer f.Close()
var userPolicyMap map[string]MappedPolicy
data, err := ioutil.ReadAll(f)
data, err := io.ReadAll(f)
if err != nil {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, stsUserPolicyMappingsFile, ""), r.URL)
return
@@ -2224,7 +2223,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
default:
defer f.Close()
var grpPolicyMap map[string]MappedPolicy
data, err := ioutil.ReadAll(f)
data, err := io.ReadAll(f)
if err != nil {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, stsGroupPolicyMappingsFile, ""), r.URL)
return

View File

@@ -24,7 +24,7 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"io"
"net/http"
"net/url"
"os"
@@ -388,7 +388,7 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
req.ContentLength = int64(len(buf))
sum := sha256.Sum256(buf)
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum[:]))
req.Body = ioutil.NopCloser(bytes.NewReader(buf))
req.Body = io.NopCloser(bytes.NewReader(buf))
req = signer.SignV4(*req, accessKey, secretKey, "", "")
// 3.1 Execute the request.

View File

@@ -27,7 +27,6 @@ import (
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"math"
"math/rand"
"net/http"
@@ -758,7 +757,7 @@ func (a adminAPIHandlers) ProfileHandler(w http.ResponseWriter, r *http.Request)
}
}
// read request body
io.CopyN(ioutil.Discard, r.Body, 1)
io.CopyN(io.Discard, r.Body, 1)
globalProfilerMu.Lock()

View File

@@ -22,7 +22,6 @@ import (
"context"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
@@ -220,7 +219,7 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
adminTestBed.router.ServeHTTP(rec, req)
if rec.Code != http.StatusOK {
resp, _ := ioutil.ReadAll(rec.Body)
resp, _ := io.ReadAll(rec.Body)
t.Errorf("Expected to receive %d status code but received %d. Body (%s)",
http.StatusOK, rec.Code, string(resp))
}

View File

@@ -25,7 +25,6 @@ import (
"encoding/hex"
"errors"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
@@ -333,14 +332,14 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
var locationConstraint string
if action == policy.CreateBucketAction {
// To extract region from XML in request body, get copy of request body.
payload, err := ioutil.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
payload, err := io.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
if err != nil {
logger.LogIf(ctx, err, logger.Application)
return cred, owner, ErrMalformedXML
}
// Populate payload to extract location constraint.
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
r.Body = io.NopCloser(bytes.NewReader(payload))
var s3Error APIErrorCode
locationConstraint, s3Error = parseLocationConstraint(r)
@@ -349,7 +348,7 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
}
// Populate payload again to handle it in HTTP handler.
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
r.Body = io.NopCloser(bytes.NewReader(payload))
}
if cred.AccessKey != "" {
logger.GetReqInfo(ctx).AccessKey = cred.AccessKey

View File

@@ -21,7 +21,6 @@ import (
"bytes"
"context"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
@@ -44,7 +43,7 @@ func TestGetRequestAuthType(t *testing.T) {
req *http.Request
authT authType
}
nopCloser := ioutil.NopCloser(io.LimitReader(&nullReader{}, 1024))
nopCloser := io.NopCloser(io.LimitReader(&nullReader{}, 1024))
testCases := []testCase{
// Test case - 1
// Check for generic signature v4 header.
@@ -406,7 +405,7 @@ func TestIsReqAuthenticated(t *testing.T) {
for i, testCase := range testCases {
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region, serviceS3)
if s3Error != testCase.s3Error {
if _, err := ioutil.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
if _, err := io.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code)
}
}

View File

@@ -21,7 +21,7 @@ import (
"bytes"
"encoding/xml"
"fmt"
"io/ioutil"
"io"
"net/http"
"net/http/httptest"
"strconv"
@@ -885,7 +885,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
}
// read the response body.
actualContent, err = ioutil.ReadAll(rec.Body)
actualContent, err = io.ReadAll(rec.Body)
if err != nil {
t.Fatalf("Test %d : MinIO %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
}

View File

@@ -21,7 +21,6 @@ import (
"bytes"
"encoding/json"
"io"
"io/ioutil"
"net/http"
humanize "github.com/dustin/go-humanize"
@@ -79,7 +78,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
return
}
bucketPolicyBytes, err := ioutil.ReadAll(io.LimitReader(r.Body, r.ContentLength))
bucketPolicyBytes, err := io.ReadAll(io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return

View File

@@ -21,7 +21,6 @@ import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"reflect"
@@ -487,7 +486,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
t.Fatalf("Case %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testCase.expectedRespStatus, recV4.Code)
}
// read the response body.
bucketPolicyReadBuf, err := ioutil.ReadAll(recV4.Body)
bucketPolicyReadBuf, err := io.ReadAll(recV4.Body)
if err != nil {
t.Fatalf("Test %d: %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
}
@@ -525,7 +524,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
t.Fatalf("Case %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testCase.expectedRespStatus, recV2.Code)
}
// read the response body.
bucketPolicyReadBuf, err = ioutil.ReadAll(recV2.Body)
bucketPolicyReadBuf, err = io.ReadAll(recV2.Body)
if err != nil {
t.Fatalf("Test %d: %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
}

View File

@@ -27,7 +27,6 @@ import (
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"math/rand"
"net"
"net/http"
@@ -564,7 +563,7 @@ func readFromSecret(sp string) (string, error) {
if isFile(pathJoin("/run/secrets/", sp)) {
sp = pathJoin("/run/secrets/", sp)
}
credBuf, err := ioutil.ReadFile(sp)
credBuf, err := os.ReadFile(sp)
if err != nil {
if os.IsNotExist(err) { // ignore if file doesn't exist.
return "", nil

View File

@@ -19,7 +19,7 @@ package cmd
import (
"errors"
"io/ioutil"
"os"
"reflect"
"testing"
)
@@ -45,7 +45,7 @@ func Test_readFromSecret(t *testing.T) {
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "testfile")
tmpfile, err := os.CreateTemp("", "testfile")
if err != nil {
t.Error(err)
}
@@ -157,7 +157,7 @@ MINIO_ROOT_PASSWORD=minio123`,
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "testfile")
tmpfile, err := os.CreateTemp("", "testfile")
if err != nil {
t.Error(err)
}

View File

@@ -21,7 +21,7 @@ import (
"bytes"
"context"
"errors"
"io/ioutil"
"io"
"net/http"
"github.com/minio/minio/internal/hash"
@@ -41,7 +41,7 @@ func readConfigWithMetadata(ctx context.Context, store objectIO, configFile stri
}
defer r.Close()
buf, err := ioutil.ReadAll(r)
buf, err := io.ReadAll(r)
if err != nil {
return nil, ObjectInfo{}, err
}

View File

@@ -19,7 +19,6 @@ package cmd
import (
"fmt"
"io/ioutil"
"os"
"testing"
@@ -47,7 +46,7 @@ func TestServerConfigMigrateV1(t *testing.T) {
// Create a V1 config json file and store it
configJSON := "{ \"version\":\"1\", \"accessKeyId\":\"abcde\", \"secretAccessKey\":\"abcdefgh\"}"
configPath := rootPath + "/fsUsers.json"
if err := ioutil.WriteFile(configPath, []byte(configJSON), 0o644); err != nil {
if err := os.WriteFile(configPath, []byte(configJSON), 0o644); err != nil {
t.Fatal("Unexpected error: ", err)
}
@@ -167,7 +166,7 @@ func TestServerConfigMigrateV2toV33(t *testing.T) {
configPath := rootPath + SlashSeparator + minioConfigFile
// Create a corrupted config file
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\","), 0o644); err != nil {
if err := os.WriteFile(configPath, []byte("{ \"version\":\"2\","), 0o644); err != nil {
t.Fatal("Unexpected error: ", err)
}
// Fire a migrateConfig()
@@ -180,7 +179,7 @@ func TestServerConfigMigrateV2toV33(t *testing.T) {
// Create a V2 config json file and store it
configJSON := "{ \"version\":\"2\", \"credentials\": {\"accessKeyId\":\"" + accessKey + "\", \"secretAccessKey\":\"" + secretKey + "\", \"region\":\"us-east-1\"}, \"mongoLogger\":{\"addr\":\"127.0.0.1:3543\", \"db\":\"foodb\", \"collection\":\"foo\"}, \"syslogLogger\":{\"network\":\"127.0.0.1:543\", \"addr\":\"addr\"}, \"fileLogger\":{\"filename\":\"log.out\"}}"
if err := ioutil.WriteFile(configPath, []byte(configJSON), 0o644); err != nil {
if err := os.WriteFile(configPath, []byte(configJSON), 0o644); err != nil {
t.Fatal("Unexpected error: ", err)
}
@@ -226,7 +225,7 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
configPath := rootPath + SlashSeparator + minioConfigFile
// Create a corrupted config file
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"test\":"), 0o644); err != nil {
if err := os.WriteFile(configPath, []byte("{ \"version\":\"2\", \"test\":"), 0o644); err != nil {
t.Fatal("Unexpected error: ", err)
}
@@ -320,7 +319,7 @@ func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
for i := 3; i <= 17; i++ {
// Create a corrupted config file
if err := ioutil.WriteFile(configPath, []byte(fmt.Sprintf("{ \"version\":\"%d\", \"credential\": { \"accessKey\": 1 } }", i)),
if err := os.WriteFile(configPath, []byte(fmt.Sprintf("{ \"version\":\"%d\", \"credential\": { \"accessKey\": 1 } }", i)),
0o644); err != nil {
t.Fatal("Unexpected error: ", err)
}
@@ -332,7 +331,7 @@ func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
}
// Create a corrupted config file for version '2'.
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"credentials\": { \"accessKeyId\": 1 } }"), 0o644); err != nil {
if err := os.WriteFile(configPath, []byte("{ \"version\":\"2\", \"credentials\": { \"accessKeyId\": 1 } }"), 0o644); err != nil {
t.Fatal("Unexpected error: ", err)
}

View File

@@ -24,7 +24,6 @@ import (
"encoding/binary"
"errors"
"io"
"io/ioutil"
"os"
"path"
"sort"
@@ -296,7 +295,7 @@ func (d *dataUpdateTracker) startSaver(ctx context.Context, interval time.Durati
}
for _, drive := range drives {
cacheFormatPath := pathJoin(drive, dataUpdateTrackerFilename)
err := ioutil.WriteFile(cacheFormatPath, buf.Bytes(), os.ModePerm)
err := os.WriteFile(cacheFormatPath, buf.Bytes(), os.ModePerm)
if err != nil {
if osIsNotExist(err) {
continue

View File

@@ -22,7 +22,6 @@ import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
@@ -502,7 +501,7 @@ func createUsageTestFiles(t *testing.T, base, bucket string, files []usageTestFi
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(filepath.Join(base, bucket, f.name), make([]byte, f.size), os.ModePerm)
err = os.WriteFile(filepath.Join(base, bucket, f.name), make([]byte, f.size), os.ModePerm)
if err != nil {
t.Fatal(err)
}
@@ -520,7 +519,7 @@ func generateUsageTestFiles(t *testing.T, base, bucket string, nFolders, nFiles,
}
for j := 0; j < nFiles; j++ {
name := filepath.Join(base, bucket, fmt.Sprint(i), fmt.Sprint(j)+".txt")
err = ioutil.WriteFile(name, pl, os.ModePerm)
err = os.WriteFile(name, pl, os.ModePerm)
if err != nil {
t.Fatal(err)
}

View File

@@ -27,7 +27,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path"
@@ -851,7 +850,7 @@ func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Read
// Caches the object to disk
func (c *diskCache) put(ctx context.Context, bucket, object string, data io.Reader, size int64, rs *HTTPRangeSpec, opts ObjectOptions, incHitsOnly, writeback bool) (oi ObjectInfo, err error) {
if !c.diskSpaceAvailable(size) {
io.Copy(ioutil.Discard, data)
io.Copy(io.Discard, data)
return oi, errDiskFull
}
cachePath := getCacheSHADir(c.dir, bucket, object)
@@ -1311,7 +1310,7 @@ func (c *diskCache) NewMultipartUpload(ctx context.Context, bucket, object, uID
func (c *diskCache) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data io.Reader, size int64, opts ObjectOptions) (partInfo PartInfo, err error) {
oi := PartInfo{}
if !c.diskSpaceAvailable(size) {
io.Copy(ioutil.Discard, data)
io.Copy(io.Discard, data)
return oi, errDiskFull
}
cachePath := getMultipartCacheSHADir(c.dir, bucket, object)

View File

@@ -21,7 +21,6 @@ package cmd
import (
"errors"
"io/ioutil"
"os"
"github.com/djherbis/atime"
@@ -30,7 +29,7 @@ import (
// Return error if Atime is disabled on the O/S
func checkAtimeSupport(dir string) (err error) {
file, err := ioutil.TempFile(dir, "prefix")
file, err := os.CreateTemp(dir, "prefix")
if err != nil {
return
}

View File

@@ -23,7 +23,6 @@ package cmd
import (
"errors"
"io"
"io/ioutil"
"os"
"time"
@@ -32,7 +31,7 @@ import (
// Return error if Atime is disabled on the O/S
func checkAtimeSupport(dir string) (err error) {
file, err := ioutil.TempFile(dir, "prefix")
file, err := os.CreateTemp(dir, "prefix")
if err != nil {
return
}
@@ -45,7 +44,7 @@ func checkAtimeSupport(dir string) (err error) {
// add a sleep to ensure atime change is detected
time.Sleep(10 * time.Millisecond)
if _, err = io.Copy(ioutil.Discard, file); err != nil {
if _, err = io.Copy(io.Discard, file); err != nil {
return
}

View File

@@ -22,7 +22,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"testing"
)
@@ -46,7 +45,7 @@ type DummyDataGen struct {
// Given the function:
//
// f := func(r io.Reader) string {
// b, _ := ioutil.ReadAll(r)
// b, _ := io.ReadAll(r)
// return string(b)
// }
//
@@ -115,7 +114,7 @@ func (d *DummyDataGen) Seek(offset int64, whence int) (int64, error) {
func TestDummyDataGenerator(t *testing.T) {
readAll := func(r io.Reader) string {
b, _ := ioutil.ReadAll(r)
b, _ := io.ReadAll(r)
return string(b)
}
checkEq := func(a, b string) {

View File

@@ -25,7 +25,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
@@ -206,7 +205,7 @@ func TestDeleteObjectsVersioned(t *testing.T) {
}
}
if _, err = ioutil.ReadFile(pathJoin(fsDirs[0], bucketName, "dir/obj1", "xl.meta")); err == nil {
if _, err = os.ReadFile(pathJoin(fsDirs[0], bucketName, "dir/obj1", "xl.meta")); err == nil {
t.Fatalf("xl.meta still present after removal")
}
}
@@ -543,7 +542,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
}
}
if gr != nil {
_, err = io.Copy(ioutil.Discard, gr)
_, err = io.Copy(io.Discard, gr)
if err != toObjectErr(errErasureReadQuorum, bucket, object) {
t.Errorf("Expected GetObject to fail with %v, but failed with %v", toObjectErr(errErasureReadQuorum, bucket, object), err)
}
@@ -588,7 +587,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
}
}
if gr != nil {
_, err = io.Copy(ioutil.Discard, gr)
_, err = io.Copy(io.Discard, gr)
if err != toObjectErr(errErasureReadQuorum, bucket, object) {
t.Errorf("Expected GetObject to fail with %v, but failed with %v", toObjectErr(errErasureReadQuorum, bucket, object), err)
}

View File

@@ -22,7 +22,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
@@ -421,7 +420,7 @@ func migrateOldCache(ctx context.Context, c *diskCache) error {
// get old cached metadata
oldMetaPath := pathJoin(oldCacheBucketsPath, bucket, object, cacheMetaJSONFile)
metaPath := pathJoin(destdir, cacheMetaJSONFile)
metaBytes, err := ioutil.ReadFile(oldMetaPath)
metaBytes, err := os.ReadFile(oldMetaPath)
if err != nil {
return err
}
@@ -459,7 +458,7 @@ func migrateOldCache(ctx context.Context, c *diskCache) error {
return err
}
if err = ioutil.WriteFile(metaPath, jsonData, 0o644); err != nil {
if err = os.WriteFile(metaPath, jsonData, 0o644); err != nil {
return err
}
}

View File

@@ -23,11 +23,11 @@ import (
"errors"
"fmt"
"io/fs"
"io/ioutil"
"os"
"reflect"
"sync"
humanize "github.com/dustin/go-humanize"
"github.com/dustin/go-humanize"
"github.com/minio/minio/internal/color"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/config/storageclass"
@@ -203,7 +203,7 @@ func formatErasureMigrate(export string) ([]byte, fs.FileInfo, error) {
}
migrate := func(formatPath string, formatData []byte) ([]byte, fs.FileInfo, error) {
if err = ioutil.WriteFile(formatPath, formatData, 0o666); err != nil {
if err = os.WriteFile(formatPath, formatData, 0o666); err != nil {
return nil, nil, err
}
formatFi, err := Lstat(formatPath)

View File

@@ -22,7 +22,6 @@ import (
"encoding/hex"
"encoding/json"
"errors"
"io/ioutil"
"os"
"reflect"
"testing"
@@ -122,7 +121,7 @@ func TestFormatErasureMigrate(t *testing.T) {
t.Fatal(err)
}
if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0o644)); err != nil {
if err = os.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0o644)); err != nil {
t.Fatal(err)
}
@@ -140,7 +139,7 @@ func TestFormatErasureMigrate(t *testing.T) {
t.Fatalf("expected version: %s, got: %s", formatErasureVersionV3, migratedVersion)
}
b, err = ioutil.ReadFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile))
b, err = os.ReadFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile))
if err != nil {
t.Fatal(err)
}
@@ -170,7 +169,7 @@ func TestFormatErasureMigrate(t *testing.T) {
t.Fatal(err)
}
if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0o644)); err != nil {
if err = os.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0o644)); err != nil {
t.Fatal(err)
}
@@ -190,7 +189,7 @@ func TestFormatErasureMigrate(t *testing.T) {
t.Fatal(err)
}
if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0o644)); err != nil {
if err = os.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0o644)); err != nil {
t.Fatal(err)
}

View File

@@ -20,7 +20,6 @@ package cmd
import (
"bytes"
"io"
"io/ioutil"
"os"
"path"
"testing"
@@ -267,7 +266,7 @@ func TestFSDeletes(t *testing.T) {
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(pathJoin(path, "success-vol", "not-empty", "file"), []byte("data"), 0o777)
err = os.WriteFile(pathJoin(path, "success-vol", "not-empty", "file"), []byte("data"), 0o777)
if err != nil {
t.Fatal(err)
}
@@ -363,7 +362,7 @@ func BenchmarkFSDeleteFile(b *testing.B) {
// We need to create and delete the file sequentially inside the benchmark.
for i := 0; i < b.N; i++ {
b.StopTimer()
err = ioutil.WriteFile(filename, []byte("data"), 0o777)
err = os.WriteFile(filename, []byte("data"), 0o777)
if err != nil {
b.Fatal(err)
}
@@ -540,7 +539,7 @@ func TestFSRemoveMeta(t *testing.T) {
func TestFSIsFile(t *testing.T) {
filePath := pathJoin(t.TempDir(), "tmpfile")
if err := ioutil.WriteFile(filePath, nil, 0o777); err != nil {
if err := os.WriteFile(filePath, nil, 0o777); err != nil {
t.Fatalf("Unable to create file %s", filePath)
}

View File

@@ -22,7 +22,6 @@ import (
"encoding/hex"
"encoding/json"
"io"
"io/ioutil"
"os"
pathutil "path"
@@ -216,7 +215,7 @@ func (m *fsMetaV1) ReadFrom(ctx context.Context, lk *lock.LockedFile) (n int64,
return 0, err
}
fsMetaBuf, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size()))
fsMetaBuf, err = io.ReadAll(io.NewSectionReader(lk, 0, fi.Size()))
if err != nil {
logger.LogIf(ctx, err)
return 0, err

View File

@@ -22,7 +22,7 @@ import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"io"
"os"
"path/filepath"
"sort"
@@ -246,7 +246,7 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
return nil, err
}
if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0o666); err != nil {
if err = os.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0o666); err != nil {
logger.LogIf(ctx, err)
return nil, err
}
@@ -525,7 +525,7 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
}
defer rc.Close()
fsMetaBytes, err := ioutil.ReadAll(rc)
fsMetaBytes, err := io.ReadAll(rc)
if err != nil {
return result, toObjectErr(err, bucket, object)
}

View File

@@ -22,7 +22,6 @@ import (
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/user"
@@ -868,7 +867,7 @@ func (fs *FSObjects) getObjectInfoNoFSLock(ctx context.Context, bucket, object s
rc, _, err := fsOpenFile(ctx, fsMetaPath, 0)
if err == nil {
fsMetaBuf, rerr := ioutil.ReadAll(rc)
fsMetaBuf, rerr := io.ReadAll(rc)
rc.Close()
if rerr == nil {
json := jsoniter.ConfigCompatibleWithStandardLibrary

View File

@@ -21,7 +21,7 @@ import (
"context"
"errors"
"fmt"
"io/ioutil"
"io"
"log"
"net/url"
"os"
@@ -279,7 +279,7 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
UseTLSConfig(newTLSConfig(getCert)).
UseShutdownTimeout(ctx.Duration("shutdown-timeout")).
UseBaseContext(GlobalContext).
UseCustomLogger(log.New(ioutil.Discard, "", 0)) // Turn-off random logging by Go stdlib
UseCustomLogger(log.New(io.Discard, "", 0)) // Turn-off random logging by Go stdlib
go func() {
globalHTTPServerErrorCh <- httpServer.Start(GlobalContext)

View File

@@ -18,8 +18,8 @@
package cmd
import (
"io/ioutil"
"net/http"
"os"
"runtime"
"strconv"
"sync"
@@ -56,7 +56,7 @@ type apiConfig struct {
const cgroupLimitFile = "/sys/fs/cgroup/memory/memory.limit_in_bytes"
func cgroupLimit(limitFile string) (limit uint64) {
buf, err := ioutil.ReadFile(limitFile)
buf, err := os.ReadFile(limitFile)
if err != nil {
return 9223372036854771712
}

View File

@@ -23,7 +23,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/textproto"
@@ -305,7 +304,7 @@ func extractPostPolicyFormValues(ctx context.Context, form *multipart.Form) (fil
b.WriteString(v)
}
fileSize = int64(b.Len())
filePart = ioutil.NopCloser(b)
filePart = io.NopCloser(b)
return filePart, fileName, fileSize, formValues, nil
}

View File

@@ -21,7 +21,7 @@ import (
"bytes"
"context"
"encoding/xml"
"io/ioutil"
"io"
"net/http"
"net/textproto"
"os"
@@ -45,13 +45,13 @@ func TestIsValidLocationContraint(t *testing.T) {
// Corrupted XML
malformedReq := &http.Request{
Body: ioutil.NopCloser(bytes.NewReader([]byte("<>"))),
Body: io.NopCloser(bytes.NewReader([]byte("<>"))),
ContentLength: int64(len("<>")),
}
// Not an XML
badRequest := &http.Request{
Body: ioutil.NopCloser(bytes.NewReader([]byte("garbage"))),
Body: io.NopCloser(bytes.NewReader([]byte("garbage"))),
ContentLength: int64(len("garbage")),
}
@@ -61,7 +61,7 @@ func TestIsValidLocationContraint(t *testing.T) {
createBucketConfig.Location = location
createBucketConfigBytes, _ := xml.Marshal(createBucketConfig)
createBucketConfigBuffer := bytes.NewReader(createBucketConfigBytes)
req.Body = ioutil.NopCloser(createBucketConfigBuffer)
req.Body = io.NopCloser(createBucketConfigBuffer)
req.ContentLength = int64(createBucketConfigBuffer.Len())
return req
}

View File

@@ -20,7 +20,7 @@ package cmd
import (
"bufio"
"bytes"
"io/ioutil"
"io"
"net/http"
"net/url"
"sort"
@@ -49,7 +49,7 @@ func BenchmarkLockArgs(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
req.Body = ioutil.NopCloser(bytes.NewReader(argBytes))
req.Body = io.NopCloser(bytes.NewReader(argBytes))
getLockArgs(req)
}
}
@@ -68,7 +68,7 @@ func BenchmarkLockArgsOld(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
req.Body = ioutil.NopCloser(bytes.NewReader([]byte(`obj.txt`)))
req.Body = io.NopCloser(bytes.NewReader([]byte(`obj.txt`)))
getLockArgsOld(req)
}
}

File diff suppressed because one or more lines are too long

View File

@@ -23,7 +23,6 @@ import (
"crypto/md5"
"encoding/hex"
"errors"
"io/ioutil"
"os"
"path"
"testing"
@@ -352,7 +351,7 @@ func testObjectAPIPutObjectStaleFiles(obj ObjectLayer, instanceType string, disk
for _, disk := range disks {
tmpMetaDir := path.Join(disk, minioMetaTmpBucket)
files, err := ioutil.ReadDir(tmpMetaDir)
files, err := os.ReadDir(tmpMetaDir)
if err != nil {
t.Fatal(err)
}
@@ -431,9 +430,9 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
for _, disk := range disks {
tmpMetaDir := path.Join(disk, minioMetaTmpBucket)
files, err := ioutil.ReadDir(tmpMetaDir)
files, err := os.ReadDir(tmpMetaDir)
if err != nil {
// Its OK to have non-existen tmpMetaDir.
// It's OK to have non-existing tmpMetaDir.
if osIsNotExist(err) {
continue
}

View File

@@ -29,7 +29,6 @@ import (
"hash"
"hash/crc32"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
@@ -540,7 +539,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
t.Fatalf("Case %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testCase.expectedRespStatus, rec.Code)
}
// read the response body.
actualContent, err := ioutil.ReadAll(rec.Body)
actualContent, err := io.ReadAll(rec.Body)
if err != nil {
t.Fatalf("Test %d: %s: Failed reading response body: <ERROR> %v", i+1, instanceType, err)
}
@@ -584,7 +583,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
}
// read the response body.
actualContent, err = ioutil.ReadAll(recV2.Body)
actualContent, err = io.ReadAll(recV2.Body)
if err != nil {
t.Fatalf("Test %d: %s: Failed to read response body: <ERROR> %v", i+1, instanceType, err)
}
@@ -739,7 +738,7 @@ func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName str
// Check response code (we make only valid requests in
// this test)
if rec.Code != http.StatusPartialContent && rec.Code != http.StatusOK {
bd, err1 := ioutil.ReadAll(rec.Body)
bd, err1 := io.ReadAll(rec.Body)
t.Fatalf("%s Object: %s Case %d ByteRange: %s: Got response status `%d` and body: %s,%v",
instanceType, object, i+1, byteRange, rec.Code, string(bd), err1)
}
@@ -922,7 +921,7 @@ func testAPIGetObjectWithPartNumberHandler(obj ObjectLayer, instanceType, bucket
// Check response code (we make only valid requests in this test)
if rec.Code != http.StatusPartialContent && rec.Code != http.StatusOK {
bd, err1 := ioutil.ReadAll(rec.Body)
bd, err1 := io.ReadAll(rec.Body)
t.Fatalf("%s Object: %s ObjectIndex %d PartNumber: %d: Got response status `%d` and body: %s,%v",
instanceType, object, oindex, partNumber, rec.Code, string(bd), err1)
}
@@ -1245,7 +1244,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
i+1, instanceType, testCase.expectedRespStatus, rec.Code, testCase.fault)
}
// read the response body.
actualContent, err := ioutil.ReadAll(rec.Body)
actualContent, err := io.ReadAll(rec.Body)
if err != nil {
t.Fatalf("Test %d: %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
}
@@ -3078,7 +3077,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
}
// read the response body.
actualContent, err = ioutil.ReadAll(rec.Body)
actualContent, err = io.ReadAll(rec.Body)
if err != nil {
t.Fatalf("Test %d : MinIO %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
}
@@ -3490,7 +3489,7 @@ func testAPIPutObjectPartHandlerStreaming(obj ObjectLayer, instanceType, bucketN
// Get uploadID of the mulitpart upload initiated.
var mpartResp InitiateMultipartUploadResponse
mpartRespBytes, err := ioutil.ReadAll(rec.Result().Body)
mpartRespBytes, err := io.ReadAll(rec.Result().Body)
if err != nil {
t.Fatalf("[%s] Failed to read NewMultipartUpload response <ERROR> %v", instanceType, err)
}
@@ -3531,7 +3530,7 @@ func testAPIPutObjectPartHandlerStreaming(obj ObjectLayer, instanceType, bucketN
apiRouter.ServeHTTP(rec, req)
if test.expectedErr != noAPIErr {
errBytes, err := ioutil.ReadAll(rec.Result().Body)
errBytes, err := io.ReadAll(rec.Result().Body)
if err != nil {
t.Fatalf("Test %d %s Failed to read error response from upload part request %s/%s: <ERROR> %v",
i+1, instanceType, bucketName, testObject, err)
@@ -3796,7 +3795,7 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
if rec.Code != http.StatusOK {
var errBytes []byte
// read the response body.
errBytes, err = ioutil.ReadAll(rec.Result().Body)
errBytes, err = io.ReadAll(rec.Result().Body)
if err != nil {
t.Fatalf("%s, Failed to read error response from upload part request \"%s\"/\"%s\": <ERROR> %v.",
reqType, bucketName, test.objectName, err)
@@ -3878,7 +3877,7 @@ func testAPIListObjectPartsHandlerPreSign(obj ObjectLayer, instanceType, bucketN
// Get uploadID of the mulitpart upload initiated.
var mpartResp InitiateMultipartUploadResponse
mpartRespBytes, err := ioutil.ReadAll(rec.Result().Body)
mpartRespBytes, err := io.ReadAll(rec.Result().Body)
if err != nil {
t.Fatalf("[%s] Failed to read NewMultipartUpload response <ERROR> %v", instanceType, err)
}
@@ -4103,7 +4102,7 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str
var errBytes []byte
// read the response body.
errBytes, err = ioutil.ReadAll(rec.Result().Body)
errBytes, err = io.ReadAll(rec.Result().Body)
if err != nil {
t.Fatalf("%s,Failed to read error response list object parts request %s/%s: <ERROR> %v", reqType, bucketName, testObject, err)
}

View File

@@ -19,7 +19,6 @@ package cmd
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
@@ -36,7 +35,7 @@ func TestReadDirFail(t *testing.T) {
}
file := path.Join(os.TempDir(), "issue")
if err := ioutil.WriteFile(file, []byte(""), 0o644); err != nil {
if err := os.WriteFile(file, []byte(""), 0o644); err != nil {
t.Fatal(err)
}
defer os.RemoveAll(file)
@@ -80,7 +79,7 @@ func setupTestReadDirFiles(t *testing.T) (testResults []result) {
entries := []string{}
for i := 0; i < 10; i++ {
name := fmt.Sprintf("file-%d", i)
if err := ioutil.WriteFile(filepath.Join(dir, name), []byte{}, os.ModePerm); err != nil {
if err := os.WriteFile(filepath.Join(dir, name), []byte{}, os.ModePerm); err != nil {
// For cleanup, its required to add these entries into test results.
testResults = append(testResults, result{dir, entries})
t.Fatalf("Unable to create file, %s", err)
@@ -105,7 +104,7 @@ func setupTestReadDirGeneric(t *testing.T) (testResults []result) {
entries := []string{"mydir/"}
for i := 0; i < 10; i++ {
name := fmt.Sprintf("file-%d", i)
if err := ioutil.WriteFile(filepath.Join(dir, "mydir", name), []byte{}, os.ModePerm); err != nil {
if err := os.WriteFile(filepath.Join(dir, "mydir", name), []byte{}, os.ModePerm); err != nil {
// For cleanup, its required to add these entries into test results.
testResults = append(testResults, result{dir, entries})
t.Fatalf("Unable to write file, %s", err)
@@ -130,7 +129,7 @@ func setupTestReadDirSymlink(t *testing.T) (testResults []result) {
for i := 0; i < 10; i++ {
name1 := fmt.Sprintf("file-%d", i)
name2 := fmt.Sprintf("file-%d", i+10)
if err := ioutil.WriteFile(filepath.Join(dir, name1), []byte{}, os.ModePerm); err != nil {
if err := os.WriteFile(filepath.Join(dir, name1), []byte{}, os.ModePerm); err != nil {
// For cleanup, its required to add these entries into test results.
testResults = append(testResults, result{dir, entries})
t.Fatalf("Unable to create a file, %s", err)
@@ -235,7 +234,7 @@ func TestReadDirN(t *testing.T) {
dir := t.TempDir()
for c := 1; c <= testCase.numFiles; c++ {
err := ioutil.WriteFile(filepath.Join(dir, fmt.Sprintf("%d", c)), []byte{}, os.ModePerm)
err := os.WriteFile(filepath.Join(dir, fmt.Sprintf("%d", c)), []byte{}, os.ModePerm)
if err != nil {
os.RemoveAll(dir)
t.Fatalf("Unable to create a file, %s", err)

View File

@@ -22,7 +22,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"sync"
@@ -185,7 +184,7 @@ func selfSpeedTest(ctx context.Context, opts speedTestOpts) (SpeedTestResult, er
fbr := firstByteRecorder{
r: r,
}
n, err := io.Copy(ioutil.Discard, &fbr)
n, err := io.Copy(io.Discard, &fbr)
r.Close()
if err == nil {
response := time.Since(t)

View File

@@ -22,7 +22,7 @@ import (
"context"
"encoding/base64"
"fmt"
"io/ioutil"
"io"
"mime/multipart"
"net/http"
"net/http/httptest"
@@ -222,7 +222,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr
}
if testCase.malformedBody {
// Change the request body.
req.Body = ioutil.NopCloser(bytes.NewReader([]byte("Hello,")))
req.Body = io.NopCloser(bytes.NewReader([]byte("Hello,")))
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
// Call the ServeHTTP to execute the handler.

View File

@@ -24,7 +24,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"sort"
"strings"
@@ -184,7 +183,7 @@ func (api objectAPIHandlers) getObjectInArchiveFileHandler(ctx context.Context,
return
}
} else {
rc = ioutil.NopCloser(bytes.NewReader([]byte{}))
rc = io.NopCloser(bytes.NewReader([]byte{}))
}
defer rc.Close()
@@ -317,7 +316,7 @@ func getFilesListFromZIPObject(ctx context.Context, objectAPI ObjectLayer, bucke
if err != nil {
return nil, ObjectInfo{}, err
}
b, err := ioutil.ReadAll(gr)
b, err := io.ReadAll(gr)
gr.Close()
if err != nil {
return nil, ObjectInfo{}, err

View File

@@ -23,7 +23,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
@@ -35,7 +34,7 @@ import (
"time"
"github.com/minio/cli"
minio "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/bucket/bandwidth"
@@ -516,7 +515,7 @@ func serverMain(ctx *cli.Context) {
UseIdleTimeout(ctx.Duration("idle-timeout")).
UseReadHeaderTimeout(ctx.Duration("read-header-timeout")).
UseBaseContext(GlobalContext).
UseCustomLogger(log.New(ioutil.Discard, "", 0)) // Turn-off random logging by Go stdlib
UseCustomLogger(log.New(io.Discard, "", 0)) // Turn-off random logging by Go stdlib
go func() {
globalHTTPServerErrorCh <- httpServer.Start(GlobalContext)

View File

@@ -23,7 +23,6 @@ import (
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
@@ -34,7 +33,7 @@ import (
"testing"
"time"
humanize "github.com/dustin/go-humanize"
"github.com/dustin/go-humanize"
"github.com/minio/minio-go/v7/pkg/set"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/pkg/bucket/policy"
@@ -65,7 +64,7 @@ func (c *check) Assert(gotValue interface{}, expectedValue interface{}) {
}
func verifyError(c *check, response *http.Response, code, description string, statusCode int) {
data, err := ioutil.ReadAll(response.Body)
data, err := io.ReadAll(response.Body)
c.Assert(err, nil)
errorResponse := APIErrorResponse{}
err = xml.Unmarshal(data, &errorResponse)
@@ -384,7 +383,7 @@ func (s *TestSuiteCommon) TestBucketPolicy(c *check) {
c.Assert(err, nil)
c.Assert(response.StatusCode, http.StatusOK)
bucketPolicyReadBuf, err := ioutil.ReadAll(response.Body)
bucketPolicyReadBuf, err := io.ReadAll(response.Body)
c.Assert(err, nil)
// Verify if downloaded policy matches with previously uploaded.
expectedPolicy, err := policy.ParseConfig(strings.NewReader(bucketPolicyStr), bucketName)
@@ -593,7 +592,7 @@ func (s *TestSuiteCommon) TestDeleteMultipleObjects(c *check) {
c.Assert(response.StatusCode, http.StatusOK)
deleteResp := DeleteObjectsResponse{}
delRespBytes, err := ioutil.ReadAll(response.Body)
delRespBytes, err := io.ReadAll(response.Body)
c.Assert(err, nil)
err = xml.Unmarshal(delRespBytes, &deleteResp)
c.Assert(err, nil)
@@ -616,7 +615,7 @@ func (s *TestSuiteCommon) TestDeleteMultipleObjects(c *check) {
c.Assert(response.StatusCode, http.StatusOK)
deleteResp = DeleteObjectsResponse{}
delRespBytes, err = ioutil.ReadAll(response.Body)
delRespBytes, err = io.ReadAll(response.Body)
c.Assert(err, nil)
err = xml.Unmarshal(delRespBytes, &deleteResp)
c.Assert(err, nil)
@@ -756,7 +755,7 @@ func (s *TestSuiteCommon) TestEmptyObject(c *check) {
var buffer bytes.Buffer
// extract the body of the response.
responseBody, err := ioutil.ReadAll(response.Body)
responseBody, err := io.ReadAll(response.Body)
c.Assert(err, nil)
// assert the http response body content.
c.Assert(true, bytes.Equal(responseBody, buffer.Bytes()))
@@ -877,7 +876,7 @@ func (s *TestSuiteCommon) TestMultipleObjects(c *check) {
c.Assert(response.StatusCode, http.StatusOK)
// extract the response body.
responseBody, err := ioutil.ReadAll(response.Body)
responseBody, err := io.ReadAll(response.Body)
c.Assert(err, nil)
// assert the content body for the expected object data.
c.Assert(true, bytes.Equal(responseBody, []byte("hello one")))
@@ -906,7 +905,7 @@ func (s *TestSuiteCommon) TestMultipleObjects(c *check) {
c.Assert(response.StatusCode, http.StatusOK)
// verify response data
responseBody, err = ioutil.ReadAll(response.Body)
responseBody, err = io.ReadAll(response.Body)
c.Assert(err, nil)
c.Assert(true, bytes.Equal(responseBody, []byte("hello two")))
@@ -933,7 +932,7 @@ func (s *TestSuiteCommon) TestMultipleObjects(c *check) {
c.Assert(response.StatusCode, http.StatusOK)
// verify object.
responseBody, err = ioutil.ReadAll(response.Body)
responseBody, err = io.ReadAll(response.Body)
c.Assert(err, nil)
c.Assert(true, bytes.Equal(responseBody, []byte("hello three")))
}
@@ -1059,7 +1058,7 @@ func (s *TestSuiteCommon) TestCopyObject(c *check) {
c.Assert(response.StatusCode, http.StatusOK)
// reading the response body.
// response body is expected to have the copied content of the first uploaded object.
object, err := ioutil.ReadAll(response.Body)
object, err := io.ReadAll(response.Body)
c.Assert(err, nil)
c.Assert(string(object), "hello world")
}
@@ -1233,7 +1232,7 @@ func (s *TestSuiteCommon) TestSHA256Mismatch(c *check) {
// Set the body to generate signature mismatch.
helloReader := bytes.NewReader([]byte("Hello, World"))
request.ContentLength = helloReader.Size()
request.Body = ioutil.NopCloser(helloReader)
request.Body = io.NopCloser(helloReader)
c.Assert(err, nil)
// execute the HTTP request.
@@ -1562,7 +1561,7 @@ func (s *TestSuiteCommon) TestPartialContent(c *check) {
response, err = s.client.Do(request)
c.Assert(err, nil)
c.Assert(response.StatusCode, http.StatusPartialContent)
partialObject, err := ioutil.ReadAll(response.Body)
partialObject, err := io.ReadAll(response.Body)
c.Assert(err, nil)
c.Assert(string(partialObject), "Wo")
@@ -1628,7 +1627,7 @@ func (s *TestSuiteCommon) TestListObjectsHandler(c *check) {
c.Assert(err, nil)
c.Assert(response.StatusCode, http.StatusOK)
getContent, err := ioutil.ReadAll(response.Body)
getContent, err := io.ReadAll(response.Body)
c.Assert(err, nil)
for _, expectedStr := range testCase.expectedStrings {
@@ -1703,7 +1702,7 @@ func (s *TestSuiteCommon) TestListObjectsSpecialCharactersHandler(c *check) {
c.Assert(err, nil)
c.Assert(response.StatusCode, http.StatusOK)
getContent, err := ioutil.ReadAll(response.Body)
getContent, err := io.ReadAll(response.Body)
c.Assert(err, nil)
for _, expectedStr := range testCase.expectedStrings {
@@ -1837,7 +1836,7 @@ func (s *TestSuiteCommon) TestGetObjectLarge10MiB(c *check) {
c.Assert(err, nil)
c.Assert(response.StatusCode, http.StatusOK)
// extract the content from response body.
getContent, err := ioutil.ReadAll(response.Body)
getContent, err := io.ReadAll(response.Body)
c.Assert(err, nil)
// Compare putContent and getContent.
@@ -1898,7 +1897,7 @@ func (s *TestSuiteCommon) TestGetObjectLarge11MiB(c *check) {
c.Assert(err, nil)
c.Assert(response.StatusCode, http.StatusOK)
// fetch the content from response body.
getContent, err := ioutil.ReadAll(response.Body)
getContent, err := io.ReadAll(response.Body)
c.Assert(err, nil)
// Get etag of the response content.
@@ -1986,7 +1985,7 @@ func (s *TestSuiteCommon) TestGetPartialObjectMisAligned(c *check) {
// Since only part of the object is requested, expecting response status to be http.StatusPartialContent .
c.Assert(response.StatusCode, http.StatusPartialContent)
// parse the HTTP response body.
getContent, err := ioutil.ReadAll(response.Body)
getContent, err := io.ReadAll(response.Body)
c.Assert(err, nil)
// Compare putContent and getContent.
@@ -2052,7 +2051,7 @@ func (s *TestSuiteCommon) TestGetPartialObjectLarge11MiB(c *check) {
// Since only part of the object is requested, expecting response status to be http.StatusPartialContent .
c.Assert(response.StatusCode, http.StatusPartialContent)
// read the downloaded content from the response body.
getContent, err := ioutil.ReadAll(response.Body)
getContent, err := io.ReadAll(response.Body)
c.Assert(err, nil)
// Compare putContent and getContent.
@@ -2119,7 +2118,7 @@ func (s *TestSuiteCommon) TestGetPartialObjectLarge10MiB(c *check) {
// Since only part of the object is requested, expecting response status to be http.StatusPartialContent .
c.Assert(response.StatusCode, http.StatusPartialContent)
// read the downloaded content from the response body.
getContent, err := ioutil.ReadAll(response.Body)
getContent, err := io.ReadAll(response.Body)
c.Assert(err, nil)
// Compare putContent and getContent.

View File

@@ -22,7 +22,6 @@ import (
"crypto/hmac"
"encoding/hex"
"io"
"io/ioutil"
"net/http"
"strconv"
"strings"
@@ -83,12 +82,12 @@ func skipContentSha256Cksum(r *http.Request) bool {
// Returns SHA256 for calculating canonical-request.
func getContentSha256Cksum(r *http.Request, stype serviceType) string {
if stype == serviceSTS {
payload, err := ioutil.ReadAll(io.LimitReader(r.Body, stsRequestBodyLimit))
payload, err := io.ReadAll(io.LimitReader(r.Body, stsRequestBodyLimit))
if err != nil {
logger.CriticalIf(GlobalContext, err)
}
sum256 := sha256.Sum256(payload)
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
r.Body = io.NopCloser(bytes.NewReader(payload))
return hex.EncodeToString(sum256[:])
}

View File

@@ -20,7 +20,7 @@ package cmd
import (
"bytes"
"encoding/gob"
"io/ioutil"
"io"
"testing"
"time"
@@ -125,7 +125,7 @@ func BenchmarkEncodeDiskInfoMsgp(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := msgp.Encode(ioutil.Discard, &v)
err := msgp.Encode(io.Discard, &v)
if err != nil {
b.Fatal(err)
}
@@ -146,7 +146,7 @@ func BenchmarkEncodeDiskInfoGOB(b *testing.B) {
Error: "",
}
enc := gob.NewEncoder(ioutil.Discard)
enc := gob.NewEncoder(io.Discard)
b.SetBytes(1)
b.ReportAllocs()
b.ResetTimer()
@@ -200,7 +200,7 @@ func BenchmarkEncodeFileInfoMsgp(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := msgp.Encode(ioutil.Discard, &v)
err := msgp.Encode(io.Discard, &v)
if err != nil {
b.Fatal(err)
}
@@ -209,7 +209,7 @@ func BenchmarkEncodeFileInfoMsgp(b *testing.B) {
func BenchmarkEncodeFileInfoGOB(b *testing.B) {
v := FileInfo{Volume: "testbucket", Name: "src/compress/zlib/reader_test.go", VersionID: "", IsLatest: true, Deleted: false, DataDir: "5e0153cc-621a-4267-8cb6-4919140d53b3", XLV1: false, ModTime: UTCNow(), Size: 3430, Mode: 0x0, Metadata: map[string]string{"X-Minio-Internal-Server-Side-Encryption-Iv": "jIJPsrkkVYYMvc7edBrNl+7zcM7+ZwXqMb/YAjBO/ck=", "X-Minio-Internal-Server-Side-Encryption-S3-Kms-Key-Id": "my-minio-key", "X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key": "IAAfAP2p7ZLv3UpLwBnsKkF2mtWba0qoY42tymK0szRgGvAxBNcXyHXYooe9dQpeeEJWgKUa/8R61oCy1mFwIg==", "X-Minio-Internal-Server-Side-Encryption-S3-Sealed-Key": "IAAfAPFYRDkHVirJBJxBixNj3PLWt78dFuUTyTLIdLG820J7XqLPBO4gpEEEWw/DoTsJIb+apnaem+rKtQ1h3Q==", "X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256", "content-type": "application/octet-stream", "etag": "20000f00e2c3709dc94905c6ce31e1cadbd1c064e14acdcd44cf0ac2db777eeedd88d639fcd64de16851ade8b21a9a1a"}, Parts: []ObjectPartInfo{{ETag: "", Number: 1, Size: 3430, ActualSize: 3398}}, Erasure: ErasureInfo{Algorithm: "reedsolomon", DataBlocks: 2, ParityBlocks: 2, BlockSize: 10485760, Index: 3, Distribution: []int{3, 4, 1, 2}, Checksums: []ChecksumInfo{{PartNumber: 1, Algorithm: 0x3, Hash: []uint8{}}}}}
enc := gob.NewEncoder(ioutil.Discard)
enc := gob.NewEncoder(io.Discard)
b.SetBytes(1)
b.ReportAllocs()
b.ResetTimer()

View File

@@ -25,7 +25,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/url"
"path"
"strconv"
@@ -363,7 +362,7 @@ func (client *storageRESTClient) CreateFile(ctx context.Context, volume, path st
values.Set(storageRESTVolume, volume)
values.Set(storageRESTFilePath, path)
values.Set(storageRESTLength, strconv.Itoa(int(size)))
respBody, err := client.call(ctx, storageRESTMethodCreateFile, values, ioutil.NopCloser(reader), size)
respBody, err := client.call(ctx, storageRESTMethodCreateFile, values, io.NopCloser(reader), size)
defer xhttp.DrainBody(respBody)
if err != nil {
return err
@@ -541,7 +540,7 @@ func (client *storageRESTClient) ReadAll(ctx context.Context, volume string, pat
return nil, err
}
defer xhttp.DrainBody(respBody)
return ioutil.ReadAll(respBody)
return io.ReadAll(respBody)
}
// ReadFileStream - returns a reader for the requested file.

View File

@@ -26,7 +26,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os/user"
"path"
@@ -943,7 +942,7 @@ func waitForHTTPResponse(respBody io.Reader) (io.Reader, error) {
case 0:
return reader, nil
case 1:
errorText, err := ioutil.ReadAll(reader)
errorText, err := io.ReadAll(reader)
if err != nil {
return nil, err
}
@@ -1077,7 +1076,7 @@ func waitForHTTPStream(respBody io.ReadCloser, w io.Writer) error {
}
return err
case 1:
errorText, err := ioutil.ReadAll(respBody)
errorText, err := io.ReadAll(respBody)
if err != nil {
return err
}

View File

@@ -38,7 +38,6 @@ import (
"flag"
"fmt"
"io"
"io/ioutil"
"math/big"
"math/rand"
"net"
@@ -100,7 +99,7 @@ func TestMain(m *testing.M) {
globalIsDistErasure = false
// Disable printing console messages during tests.
color.Output = ioutil.Discard
color.Output = io.Discard
// Minimum is error logs for testing
logger.MinimumLogLevel = logger.ErrorLvl
@@ -530,12 +529,12 @@ func truncateChunkByHalfSigv4(req *http.Request) (*http.Request, error) {
newChunkHdr := []byte(fmt.Sprintf("%s"+s3ChunkSignatureStr+"%s\r\n",
hexChunkSize, chunkSignature))
newChunk, err := ioutil.ReadAll(bufReader)
newChunk, err := io.ReadAll(bufReader)
if err != nil {
return nil, err
}
newReq := req
newReq.Body = ioutil.NopCloser(
newReq.Body = io.NopCloser(
bytes.NewReader(bytes.Join([][]byte{newChunkHdr, newChunk[:len(newChunk)/2]},
[]byte(""))),
)
@@ -552,14 +551,14 @@ func malformDataSigV4(req *http.Request, newByte byte) (*http.Request, error) {
newChunkHdr := []byte(fmt.Sprintf("%s"+s3ChunkSignatureStr+"%s\r\n",
hexChunkSize, chunkSignature))
newChunk, err := ioutil.ReadAll(bufReader)
newChunk, err := io.ReadAll(bufReader)
if err != nil {
return nil, err
}
newChunk[0] = newByte
newReq := req
newReq.Body = ioutil.NopCloser(
newReq.Body = io.NopCloser(
bytes.NewReader(bytes.Join([][]byte{newChunkHdr, newChunk},
[]byte(""))),
)
@@ -579,13 +578,13 @@ func malformChunkSizeSigV4(req *http.Request, badSize int64) (*http.Request, err
newHexChunkSize := []byte(fmt.Sprintf("%x", n))
newChunkHdr := []byte(fmt.Sprintf("%s"+s3ChunkSignatureStr+"%s\r\n",
newHexChunkSize, chunkSignature))
newChunk, err := ioutil.ReadAll(bufReader)
newChunk, err := io.ReadAll(bufReader)
if err != nil {
return nil, err
}
newReq := req
newReq.Body = ioutil.NopCloser(
newReq.Body = io.NopCloser(
bytes.NewReader(bytes.Join([][]byte{newChunkHdr, newChunk},
[]byte(""))),
)
@@ -711,10 +710,10 @@ func newTestStreamingRequest(method, urlStr string, dataLength, chunkSize int64,
}
if body == nil {
// this is added to avoid panic during ioutil.ReadAll(req.Body).
// this is added to avoid panic during io.ReadAll(req.Body).
// th stack trace can be found here https://github.com/minio/minio/pull/2074 .
// This is very similar to https://github.com/golang/go/issues/7527.
req.Body = ioutil.NopCloser(bytes.NewReader([]byte("")))
req.Body = io.NopCloser(bytes.NewReader([]byte("")))
}
contentLength := calculateStreamContentLength(dataLength, chunkSize)
@@ -728,7 +727,7 @@ func newTestStreamingRequest(method, urlStr string, dataLength, chunkSize int64,
body.Seek(0, 0)
// Add body
req.Body = ioutil.NopCloser(body)
req.Body = io.NopCloser(body)
req.ContentLength = contentLength
return req, nil
@@ -779,7 +778,7 @@ func assembleStreamingChunks(req *http.Request, body io.ReadSeeker, chunkSize in
}
}
req.Body = ioutil.NopCloser(bytes.NewReader(stream))
req.Body = io.NopCloser(bytes.NewReader(stream))
return req, nil
}
@@ -1077,7 +1076,7 @@ func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeek
case body == nil:
hashedPayload = getSHA256Hash([]byte{})
default:
payloadBytes, err := ioutil.ReadAll(body)
payloadBytes, err := io.ReadAll(body)
if err != nil {
return nil, err
}
@@ -1455,7 +1454,7 @@ func getListenNotificationURL(endPoint, bucketName string, prefixes, suffixes, e
func getRandomDisks(N int) ([]string, error) {
var erasureDisks []string
for i := 0; i < N; i++ {
path, err := ioutil.TempDir(globalTestTmpDir, "minio-")
path, err := os.MkdirTemp(globalTestTmpDir, "minio-")
if err != nil {
// Remove directories created so far.
removeRoots(erasureDisks)
@@ -1586,14 +1585,14 @@ func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketN
rec := httptest.NewRecorder()
// reading the body to preserve it so that it can be used again for second attempt of sending unsigned HTTP request.
// If the body is read in the handler the same request cannot be made use of.
buf, err := ioutil.ReadAll(anonReq.Body)
buf, err := io.ReadAll(anonReq.Body)
if err != nil {
t.Fatal(failTestStr(anonTestStr, err.Error()))
}
// creating 2 read closer (to set as request body) from the body content.
readerOne := ioutil.NopCloser(bytes.NewBuffer(buf))
readerTwo := ioutil.NopCloser(bytes.NewBuffer(buf))
readerOne := io.NopCloser(bytes.NewBuffer(buf))
readerTwo := io.NopCloser(bytes.NewBuffer(buf))
anonReq.Body = readerOne
@@ -1610,7 +1609,7 @@ func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketN
if anonReq.Method != http.MethodHead {
// read the response body.
var actualContent []byte
actualContent, err = ioutil.ReadAll(rec.Body)
actualContent, err = io.ReadAll(rec.Body)
if err != nil {
t.Fatal(failTestStr(anonTestStr, fmt.Sprintf("Failed parsing response body: <ERROR> %v", err)))
}
@@ -1640,7 +1639,7 @@ func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketN
// verify the response body for `ErrAccessDenied` message =.
if anonReq.Method != http.MethodHead {
// read the response body.
actualContent, err := ioutil.ReadAll(rec.Body)
actualContent, err := io.ReadAll(rec.Body)
if err != nil {
t.Fatal(failTestStr(unknownSignTestStr, fmt.Sprintf("Failed parsing response body: <ERROR> %v", err)))
}
@@ -1692,7 +1691,7 @@ func ExecObjectLayerAPINilTest(t TestErrHandler, bucketName, objectName, instanc
// for other type of HTTP requests compare the response body content with the expected one.
if req.Method != http.MethodHead {
// read the response body.
actualContent, err := ioutil.ReadAll(rec.Body)
actualContent, err := io.ReadAll(rec.Body)
if err != nil {
t.Fatalf("MinIO %s: Failed parsing response body: <ERROR> %v", instanceType, err)
}
@@ -2234,7 +2233,7 @@ func uploadTestObject(t *testing.T, apiRouter http.Handler, creds auth.Credentia
checkRespErr := func(rec *httptest.ResponseRecorder, exp int) {
if rec.Code != exp {
b, err := ioutil.ReadAll(rec.Body)
b, err := io.ReadAll(rec.Body)
t.Fatalf("Expected: %v, Got: %v, Body: %s, err: %v", exp, rec.Code, string(b), err)
}
}

View File

@@ -26,7 +26,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
@@ -334,7 +333,7 @@ func downloadReleaseURL(u *url.URL, timeout time.Duration, mode string) (content
}
}
contentBytes, err := ioutil.ReadAll(resp.Body)
contentBytes, err := io.ReadAll(resp.Body)
if err != nil {
return content, AdminError{
Code: AdminUpdateUnexpectedFailure,
@@ -522,7 +521,7 @@ func downloadBinary(u *url.URL, mode string) (readerReturn []byte, err error) {
}
// convert a Reader to bytes
binaryFile, err := ioutil.ReadAll(reader)
binaryFile, err := io.ReadAll(reader)
if err != nil {
return nil, err
}

View File

@@ -20,7 +20,6 @@ package cmd
import (
"encoding/hex"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
@@ -233,7 +232,7 @@ func TestIsKubernetes(t *testing.T) {
// Tests if the environment we are running is Helm chart.
func TestGetHelmVersion(t *testing.T) {
createTempFile := func(content string) string {
tmpfile, err := ioutil.TempFile("", "helm-testfile-")
tmpfile, err := os.CreateTemp("", "helm-testfile-")
if err != nil {
t.Fatalf("Unable to create temporary file. %s", err)
}

View File

@@ -26,7 +26,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
@@ -301,7 +300,7 @@ func startProfiler(profilerType string) (minioProfiler, error) {
// library creates to store profiling data.
switch madmin.ProfilerType(profilerType) {
case madmin.ProfilerCPU:
dirPath, err := ioutil.TempDir("", "profile")
dirPath, err := os.MkdirTemp("", "profile")
if err != nil {
return nil, err
}
@@ -331,7 +330,7 @@ func startProfiler(profilerType string) (minioProfiler, error) {
if n := runtime.NumGoroutine(); n > 10000 && !globalIsCICD {
return nil, fmt.Errorf("unable to perform CPU IO profile with %d goroutines", n)
}
dirPath, err := ioutil.TempDir("", "profile")
dirPath, err := os.MkdirTemp("", "profile")
if err != nil {
return nil, err
}
@@ -396,7 +395,7 @@ func startProfiler(profilerType string) (minioProfiler, error) {
return buf.Bytes(), err
}
case madmin.ProfilerTrace:
dirPath, err := ioutil.TempDir("", "profile")
dirPath, err := os.MkdirTemp("", "profile")
if err != nil {
return nil, err
}
@@ -1184,7 +1183,7 @@ func MockOpenIDTestUserInteraction(ctx context.Context, pro OpenIDClientAppParam
return "", fmt.Errorf("request err: %v", err)
}
// {
// bodyBuf, err := ioutil.ReadAll(resp.Body)
// bodyBuf, err := io.ReadAll(resp.Body)
// if err != nil {
// return "", fmt.Errorf("Error reading body: %v", err)
// }
@@ -1206,7 +1205,7 @@ func MockOpenIDTestUserInteraction(ctx context.Context, pro OpenIDClientAppParam
return "", fmt.Errorf("post form err: %v", err)
}
// fmt.Printf("resp: %#v %#v\n", resp.StatusCode, resp.Header)
// bodyBuf, err := ioutil.ReadAll(resp.Body)
// bodyBuf, err := io.ReadAll(resp.Body)
// if err != nil {
// return "", fmt.Errorf("Error reading body: %v", err)
// }

View File

@@ -22,7 +22,6 @@ import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
@@ -343,7 +342,7 @@ func TestJSONLoad(t *testing.T) {
// Test jsonSave.
func TestJSONSave(t *testing.T) {
f, err := ioutil.TempFile("", "")
f, err := os.CreateTemp("", "")
if err != nil {
t.Fatal(err)
}

View File

@@ -37,7 +37,6 @@ import (
"github.com/klauspost/compress/zstd"
"github.com/minio/minio/internal/bucket/lifecycle"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/ioutil"
)
func TestReadXLMetaNoData(t *testing.T) {
@@ -405,7 +404,7 @@ func TestDeleteVersionWithSharedDataDir(t *testing.T) {
}
func Benchmark_mergeXLV2Versions(b *testing.B) {
data, err := ioutil.ReadFile("testdata/xl.meta-v1.2.zst")
data, err := os.ReadFile("testdata/xl.meta-v1.2.zst")
if err != nil {
b.Fatal(err)
}
@@ -454,7 +453,7 @@ func Benchmark_mergeXLV2Versions(b *testing.B) {
}
func Benchmark_xlMetaV2Shallow_Load(b *testing.B) {
data, err := ioutil.ReadFile("testdata/xl.meta-v1.2.zst")
data, err := os.ReadFile("testdata/xl.meta-v1.2.zst")
if err != nil {
b.Fatal(err)
}
@@ -501,7 +500,7 @@ func Benchmark_xlMetaV2Shallow_Load(b *testing.B) {
func Test_xlMetaV2Shallow_Load(t *testing.T) {
// Load Legacy
data, err := ioutil.ReadFile("testdata/xl.meta-v1.2.zst")
data, err := os.ReadFile("testdata/xl.meta-v1.2.zst")
if err != nil {
t.Fatal(err)
}
@@ -561,7 +560,7 @@ func Test_xlMetaV2Shallow_Load(t *testing.T) {
}
func Test_mergeXLV2Versions(t *testing.T) {
dataZ, err := ioutil.ReadFile("testdata/xl-meta-consist.zip")
dataZ, err := os.ReadFile("testdata/xl-meta-consist.zip")
if err != nil {
t.Fatal(err)
}

View File

@@ -24,7 +24,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
pathutil "path"
@@ -379,7 +378,7 @@ func (s *xlStorage) SetDiskLoc(poolIdx, setIdx, diskIdx int) {
func (s *xlStorage) Healing() *healingTracker {
healingFile := pathJoin(s.diskPath, minioMetaBucket,
bucketMetaPrefix, healingTrackerFilename)
b, err := ioutil.ReadFile(healingFile)
b, err := os.ReadFile(healingFile)
if err != nil {
return nil
}
@@ -676,7 +675,7 @@ func (s *xlStorage) GetDiskID() (string, error) {
}
formatFile := pathJoin(s.diskPath, minioMetaBucket, formatConfigFile)
b, err := ioutil.ReadFile(formatFile)
b, err := os.ReadFile(formatFile)
if err != nil {
// If the disk is still not initialized.
if osIsNotExist(err) {
@@ -1471,7 +1470,7 @@ func (s *xlStorage) readAllData(ctx context.Context, volumeDir string, filePath
// Get size for precise allocation.
stat, err := f.Stat()
if err != nil {
buf, err = ioutil.ReadAll(r)
buf, err = io.ReadAll(r)
return buf, dmTime, osErrToFileErr(err)
}
if stat.IsDir() {

View File

@@ -23,7 +23,6 @@ import (
"crypto/rand"
"fmt"
"io"
"io/ioutil"
"os"
slashpath "path"
"runtime"
@@ -145,7 +144,7 @@ func createPermDeniedFile(t *testing.T) (permDeniedDir string) {
t.Fatalf(fmt.Sprintf("Unable to create temporary directory %v. %v", slashpath.Join(permDeniedDir, "mybucket"), err))
}
if err = ioutil.WriteFile(slashpath.Join(permDeniedDir, "mybucket", "myobject"), []byte(""), 0o400); err != nil {
if err = os.WriteFile(slashpath.Join(permDeniedDir, "mybucket", "myobject"), []byte(""), 0o400); err != nil {
t.Fatalf(fmt.Sprintf("Unable to create file %v. %v", slashpath.Join(permDeniedDir, "mybucket", "myobject"), err))
}
@@ -197,7 +196,7 @@ func TestXLStorageIsDirEmpty(t *testing.T) {
// Should give false for not-a-directory.
dir2 := slashpath.Join(tmp, "file")
err := ioutil.WriteFile(dir2, []byte("hello"), 0o777)
err := os.WriteFile(dir2, []byte("hello"), 0o777)
if err != nil {
t.Fatal(err)
}
@@ -254,7 +253,7 @@ func TestXLStorageReadVersion(t *testing.T) {
t.Fatalf("Unable to cfgreate xlStorage test setup, %s", err)
}
xlMeta, _ := ioutil.ReadFile("testdata/xl.meta")
xlMeta, _ := os.ReadFile("testdata/xl.meta")
// Create files for the test cases.
if err = xlStorage.MakeVol(context.Background(), "exists"); err != nil {
@@ -472,7 +471,7 @@ func TestXLStorageMakeVol(t *testing.T) {
// Setup test environment.
// Create a file.
if err := ioutil.WriteFile(slashpath.Join(path, "vol-as-file"), []byte{}, os.ModePerm); err != nil {
if err := os.WriteFile(slashpath.Join(path, "vol-as-file"), []byte{}, os.ModePerm); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
// Create a directory.
@@ -566,7 +565,7 @@ func TestXLStorageDeleteVol(t *testing.T) {
if err = os.Mkdir(vol, 0o777); err != nil {
t.Fatalf("Unable to create directory, %s", err)
}
if err = ioutil.WriteFile(slashpath.Join(vol, "test-file"), []byte{}, os.ModePerm); err != nil {
if err = os.WriteFile(slashpath.Join(vol, "test-file"), []byte{}, os.ModePerm); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
@@ -1328,7 +1327,7 @@ func TestXLStorageFormatFileChange(t *testing.T) {
}
// Change the format.json such that "this" is changed to "randomid".
if err = ioutil.WriteFile(pathJoin(xlStorage.String(), minioMetaBucket, formatConfigFile), []byte(`{"version":"1","format":"xl","id":"592a41c2-b7cc-4130-b883-c4b5cb15965b","xl":{"version":"3","this":"randomid","sets":[["e07285a6-8c73-4962-89c6-047fb939f803","33b8d431-482d-4376-b63c-626d229f0a29","cff6513a-4439-4dc1-bcaa-56c9e880c352","randomid","9c9f21d5-1f15-4737-bce6-835faa0d9626","0a59b346-1424-4fc2-9fa2-a2e80541d0c1","7924a3dc-b69a-4971-9a2e-014966d6aebb","4d2b8dd9-4e48-444b-bdca-c89194b26042"]],"distributionAlgo":"CRCMOD"}}`), 0o644); err != nil {
if err = os.WriteFile(pathJoin(xlStorage.String(), minioMetaBucket, formatConfigFile), []byte(`{"version":"1","format":"xl","id":"592a41c2-b7cc-4130-b883-c4b5cb15965b","xl":{"version":"3","this":"randomid","sets":[["e07285a6-8c73-4962-89c6-047fb939f803","33b8d431-482d-4376-b63c-626d229f0a29","cff6513a-4439-4dc1-bcaa-56c9e880c352","randomid","9c9f21d5-1f15-4737-bce6-835faa0d9626","0a59b346-1424-4fc2-9fa2-a2e80541d0c1","7924a3dc-b69a-4971-9a2e-014966d6aebb","4d2b8dd9-4e48-444b-bdca-c89194b26042"]],"distributionAlgo":"CRCMOD"}}`), 0o644); err != nil {
t.Fatalf("ioutil.WriteFile failed with %s", err)
}