mirror of
https://github.com/minio/minio.git
synced 2024-12-23 21:55:53 -05:00
run gofumpt cleanup across code-base (#14015)
This commit is contained in:
parent
6f474982ed
commit
f527c708f2
14
Makefile
14
Makefile
@ -20,6 +20,7 @@ help: ## print this help
|
||||
getdeps: ## fetch necessary dependencies
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.43.0
|
||||
@echo "Installing gofumpt" && go install mvdan.cc/gofumpt@latest
|
||||
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.7-0.20211026165309-e818a1881b0e
|
||||
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest
|
||||
|
||||
@ -34,13 +35,14 @@ check-gen: ## check for updated autogenerated files
|
||||
|
||||
lint: ## runs golangci-lint suite of linters
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --build-tags kqueue --timeout=10m --config ./.golangci.yml
|
||||
@${GOPATH}/bin/golangci-lint cache clean
|
||||
@${GOPATH}/bin/golangci-lint run --build-tags kqueue --timeout=10m --config ./.golangci.yml
|
||||
@${GOPATH}/bin/gofumpt -s -l .
|
||||
|
||||
check: test
|
||||
test: verifiers build ## builds minio, runs linters, tests
|
||||
@echo "Running unit tests"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./...
|
||||
@CGO_ENABLED=0 go test -tags kqueue ./...
|
||||
|
||||
test-upgrade: build
|
||||
@echo "Running minio upgrade tests"
|
||||
@ -66,18 +68,18 @@ test-site-replication: install ## verify automatic site replication
|
||||
|
||||
verify: ## verify minio various setups
|
||||
@echo "Verifying build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
verify-healing: ## verify healing and replacing disks with minio binary
|
||||
@echo "Verify healing build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing.sh)
|
||||
@(env bash $(PWD)/buildscripts/unaligned-healing.sh)
|
||||
|
||||
build: checks ## builds minio to $(PWD)
|
||||
@echo "Building minio binary to './minio'"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
hotfix-vars:
|
||||
$(eval LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
|
||||
|
@ -155,7 +155,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
||||
return
|
||||
}
|
||||
var target madmin.BucketTarget
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(reqBytes, &target); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||
return
|
||||
|
@ -170,7 +170,7 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
cfg := globalServerConfig.Clone()
|
||||
vars := mux.Vars(r)
|
||||
var buf = &bytes.Buffer{}
|
||||
buf := &bytes.Buffer{}
|
||||
cw := config.NewConfigWriteTo(cfg, vars["key"])
|
||||
if _, err := cw.WriteTo(buf); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
|
@ -134,7 +134,6 @@ func (a adminAPIHandlers) SRPeerBucketOps(w http.ResponseWriter, r *http.Request
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// SRPeerReplicateIAMItem - PUT /minio/admin/v3/site-replication/iam-item
|
||||
|
@ -613,7 +613,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
}
|
||||
|
||||
var createResp = madmin.AddServiceAccountResp{
|
||||
createResp := madmin.AddServiceAccountResp{
|
||||
Credentials: madmin.Credentials{
|
||||
AccessKey: newCred.AccessKey,
|
||||
SecretKey: newCred.SecretKey,
|
||||
@ -814,7 +814,7 @@ func (a adminAPIHandlers) InfoServiceAccount(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
var infoResp = madmin.InfoServiceAccountResp{
|
||||
infoResp := madmin.InfoServiceAccountResp{
|
||||
ParentUser: svcAccount.ParentUser,
|
||||
AccountStatus: svcAccount.Status,
|
||||
ImpliedPolicy: impliedPolicy,
|
||||
@ -891,7 +891,7 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
|
||||
serviceAccountsNames = append(serviceAccountsNames, svc.AccessKey)
|
||||
}
|
||||
|
||||
var listResp = madmin.ListServiceAccountsResp{
|
||||
listResp := madmin.ListServiceAccountsResp{
|
||||
Accounts: serviceAccountsNames,
|
||||
}
|
||||
|
||||
@ -1251,7 +1251,7 @@ func (a adminAPIHandlers) ListBucketPolicies(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
var newPolicies = make(map[string]iampolicy.Policy)
|
||||
newPolicies := make(map[string]iampolicy.Policy)
|
||||
for name, p := range policies {
|
||||
_, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
@ -1283,7 +1283,7 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
var newPolicies = make(map[string]iampolicy.Policy)
|
||||
newPolicies := make(map[string]iampolicy.Policy)
|
||||
for name, p := range policies {
|
||||
_, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
|
@ -653,7 +653,6 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
|
||||
if !strings.Contains(infoStr, `"s3:PutObject"`) || !strings.Contains(infoStr, ":"+bucket+"/") {
|
||||
c.Fatalf("policy contains unexpected content!")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||
|
@ -333,7 +333,6 @@ func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
// Reply with storage information (across nodes in a
|
||||
// distributed setup) as json.
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
|
||||
}
|
||||
|
||||
// DataUsageInfoHandler - GET /minio/admin/v3/datausage
|
||||
@ -1332,7 +1331,7 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
|
||||
if keyID == "" {
|
||||
keyID = stat.DefaultKey
|
||||
}
|
||||
var response = madmin.KMSKeyStatus{
|
||||
response := madmin.KMSKeyStatus{
|
||||
KeyID: keyID,
|
||||
}
|
||||
|
||||
@ -1816,7 +1815,6 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
anonNetwork[anonEndpoint] = status
|
||||
}
|
||||
return anonNetwork
|
||||
|
||||
}
|
||||
|
||||
anonymizeDrives := func(drives []madmin.Disk) []madmin.Disk {
|
||||
@ -1916,7 +1914,6 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func getTLSInfo() madmin.TLSInfo {
|
||||
@ -2042,7 +2039,6 @@ func assignPoolNumbers(servers []madmin.ServerProperties) {
|
||||
}
|
||||
|
||||
func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
||||
|
||||
lambdaMap := make(map[string][]madmin.TargetIDStatus)
|
||||
|
||||
for _, tgt := range globalConfigTargetList.Targets() {
|
||||
@ -2284,7 +2280,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
if si.Mode == 0 {
|
||||
// Not, set it to default.
|
||||
si.Mode = 0600
|
||||
si.Mode = 0o600
|
||||
}
|
||||
header, zerr := zip.FileInfoHeader(dummyFileInfo{
|
||||
name: filename,
|
||||
|
@ -236,8 +236,8 @@ func TestServiceRestartHandler(t *testing.T) {
|
||||
|
||||
// buildAdminRequest - helper function to build an admin API request.
|
||||
func buildAdminRequest(queryVal url.Values, method, path string,
|
||||
contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error) {
|
||||
|
||||
contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error,
|
||||
) {
|
||||
req, err := newTestRequest(method,
|
||||
adminPathPrefix+adminAPIVersionPrefix+path+"?"+queryVal.Encode(),
|
||||
contentLength, bodySeeker)
|
||||
@ -380,5 +380,4 @@ func TestExtractHealInitParams(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -278,8 +278,8 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
||||
// background routine to clean up heal results after the
|
||||
// aforementioned duration.
|
||||
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLayer) (
|
||||
respBytes []byte, apiErr APIError, errMsg string) {
|
||||
|
||||
respBytes []byte, apiErr APIError, errMsg string,
|
||||
) {
|
||||
if h.forceStarted {
|
||||
_, apiErr = ahs.stopHealSequence(pathJoin(h.bucket, h.object))
|
||||
if apiErr.Code != "" {
|
||||
@ -338,8 +338,8 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay
|
||||
// representation. The clientToken helps ensure there aren't
|
||||
// conflicting clients fetching status.
|
||||
func (ahs *allHealState) PopHealStatusJSON(hpath string,
|
||||
clientToken string) ([]byte, APIErrorCode) {
|
||||
|
||||
clientToken string) ([]byte, APIErrorCode,
|
||||
) {
|
||||
// fetch heal state for given path
|
||||
h, exists := ahs.getHealSequence(hpath)
|
||||
if !exists {
|
||||
@ -453,8 +453,8 @@ type healSequence struct {
|
||||
// NewHealSequence - creates healSettings, assumes bucket and
|
||||
// objPrefix are already validated.
|
||||
func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
|
||||
hs madmin.HealOpts, forceStart bool) *healSequence {
|
||||
|
||||
hs madmin.HealOpts, forceStart bool,
|
||||
) *healSequence {
|
||||
reqInfo := &logger.ReqInfo{RemoteHost: clientAddr, API: "Heal", BucketName: bucket}
|
||||
reqInfo.AppendTags("prefix", objPrefix)
|
||||
ctx, cancel := context.WithCancel(logger.SetReqInfo(ctx, reqInfo))
|
||||
|
@ -38,7 +38,6 @@ type adminAPIHandlers struct{}
|
||||
|
||||
// registerAdminRouter - Add handler functions for each service REST API routes.
|
||||
func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
|
||||
adminAPI := adminAPIHandlers{}
|
||||
// Admin router
|
||||
adminRouter := router.PathPrefix(adminPathPrefix).Subrouter()
|
||||
|
@ -2125,7 +2125,7 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
return noError
|
||||
}
|
||||
|
||||
var apiErr = errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
||||
apiErr := errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
||||
e, ok := err.(dns.ErrInvalidBucketName)
|
||||
if ok {
|
||||
code := toAPIErrorCode(ctx, e)
|
||||
@ -2238,7 +2238,6 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
// since S3 only sends one Error XML response.
|
||||
if len(e.Errors) >= 1 {
|
||||
apiErr.Code = e.Errors[0].Reason
|
||||
|
||||
}
|
||||
case azblob.StorageError:
|
||||
apiErr = APIError{
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
|
||||
func TestNewRequestID(t *testing.T) {
|
||||
// Ensure that it returns an alphanumeric result of length 16.
|
||||
var id = mustGetRequestID(UTCNow())
|
||||
id := mustGetRequestID(UTCNow())
|
||||
|
||||
if len(id) != 16 {
|
||||
t.Fail()
|
||||
|
@ -268,7 +268,6 @@ type StringMap map[string]string
|
||||
|
||||
// MarshalXML - StringMap marshals into XML.
|
||||
func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
|
||||
tokens := []xml.Token{start}
|
||||
|
||||
for key, value := range s {
|
||||
@ -417,8 +416,8 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
||||
// serialized to match XML and JSON API spec output.
|
||||
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
listbuckets := make([]Bucket, 0, len(buckets))
|
||||
var data = ListBucketsResponse{}
|
||||
var owner = Owner{
|
||||
data := ListBucketsResponse{}
|
||||
owner := Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
@ -439,14 +438,14 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
var owner = Owner{
|
||||
owner := Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
var data = ListVersionsResponse{}
|
||||
data := ListVersionsResponse{}
|
||||
|
||||
for _, object := range resp.Objects {
|
||||
var content = ObjectVersion{}
|
||||
content := ObjectVersion{}
|
||||
if object.Name == "" {
|
||||
continue
|
||||
}
|
||||
@ -486,7 +485,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem := CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
prefixes = append(prefixes, prefixItem)
|
||||
}
|
||||
@ -497,14 +496,14 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
contents := make([]Object, 0, len(resp.Objects))
|
||||
var owner = Owner{
|
||||
owner := Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
var data = ListObjectsResponse{}
|
||||
data := ListObjectsResponse{}
|
||||
|
||||
for _, object := range resp.Objects {
|
||||
var content = Object{}
|
||||
content := Object{}
|
||||
if object.Name == "" {
|
||||
continue
|
||||
}
|
||||
@ -535,7 +534,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem := CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
prefixes = append(prefixes, prefixItem)
|
||||
}
|
||||
@ -546,14 +545,14 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
|
||||
contents := make([]Object, 0, len(objects))
|
||||
var owner = Owner{
|
||||
owner := Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
DisplayName: "minio",
|
||||
}
|
||||
var data = ListObjectsV2Response{}
|
||||
data := ListObjectsV2Response{}
|
||||
|
||||
for _, object := range objects {
|
||||
var content = Object{}
|
||||
content := Object{}
|
||||
if object.Name == "" {
|
||||
continue
|
||||
}
|
||||
@ -608,7 +607,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
|
||||
commonPrefixes := make([]CommonPrefix, 0, len(prefixes))
|
||||
for _, prefix := range prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem := CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
commonPrefixes = append(commonPrefixes, prefixItem)
|
||||
}
|
||||
@ -821,8 +820,8 @@ func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIE
|
||||
// but accepts the error message directly (this allows messages to be
|
||||
// dynamically generated.)
|
||||
func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError,
|
||||
errBody string, reqURL *url.URL) {
|
||||
|
||||
errBody string, reqURL *url.URL,
|
||||
) {
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
errorResponse := APIErrorResponse{
|
||||
Code: err.Code,
|
||||
|
@ -484,7 +484,6 @@ func registerAPIRouter(router *mux.Router) {
|
||||
// If none of the routes match add default error handler routes
|
||||
apiRouter.NotFoundHandler = collectAPIStats("notfound", httpTraceAll(errorResponseHandler))
|
||||
apiRouter.MethodNotAllowedHandler = collectAPIStats("methodnotallowed", httpTraceAll(methodNotAllowedHandler("S3")))
|
||||
|
||||
}
|
||||
|
||||
// corsHandler handler for CORS (Cross Origin Resource Sharing)
|
||||
|
@ -44,7 +44,6 @@ func TestS3EncodeName(t *testing.T) {
|
||||
if testCase.expectedOutput != outputText {
|
||||
t.Errorf("Expected `%s`, got `%s`", testCase.expectedOutput, outputText)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -115,7 +115,6 @@ func newHealRoutine() *healRoutine {
|
||||
tasks: make(chan healTask),
|
||||
workers: workers,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// healDiskFormat - heals format.json, return value indicates if a
|
||||
|
@ -305,7 +305,6 @@ func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
}
|
||||
}
|
||||
return disksToHeal
|
||||
|
||||
}
|
||||
|
||||
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
|
||||
|
@ -212,7 +212,7 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w
|
||||
// bitrotSelfTest tries to catch any issue in the bitrot implementation
|
||||
// early instead of silently corrupting data.
|
||||
func bitrotSelfTest() {
|
||||
var checksums = map[BitrotAlgorithm]string{
|
||||
checksums := map[BitrotAlgorithm]string{
|
||||
SHA256: "a7677ff19e0182e4d52e3a3db727804abc82a5818749336369552e54b838b004",
|
||||
BLAKE2b512: "e519b7d84b1c3c917985f544773a35cf265dcab10948be3550320d156bab612124a5ae2ae5a8c73c0eea360f68b0e28136f26e858756dbfe7375a7389f26c669",
|
||||
HighwayHash256: "39c0407ed3f01b18d22c85db4aeff11e060ca5f43131b0126731ca197cd42313",
|
||||
|
@ -449,7 +449,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
var objectsToDelete = map[ObjectToDelete]int{}
|
||||
objectsToDelete := map[ObjectToDelete]int{}
|
||||
getObjectInfoFn := objectAPI.GetObjectInfo
|
||||
if api.CacheAPI() != nil {
|
||||
getObjectInfoFn = api.CacheAPI().GetObjectInfo
|
||||
@ -606,8 +606,8 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
|
||||
// Generate response
|
||||
var deleteErrors = make([]DeleteError, 0, len(deleteObjectsReq.Objects))
|
||||
var deletedObjects = make([]DeletedObject, 0, len(deleteObjectsReq.Objects))
|
||||
deleteErrors := make([]DeleteError, 0, len(deleteObjectsReq.Objects))
|
||||
deletedObjects := make([]DeletedObject, 0, len(deleteObjectsReq.Objects))
|
||||
for _, deleteResult := range deleteResults {
|
||||
if deleteResult.errInfo.Code != "" {
|
||||
deleteErrors = append(deleteErrors, deleteResult.errInfo)
|
||||
@ -1806,7 +1806,8 @@ func (api objectAPIHandlers) ResetBucketReplicationStateHandler(w http.ResponseW
|
||||
tgtArns := config.FilterTargetArns(
|
||||
replication.ObjectOpts{
|
||||
OpType: replication.ResyncReplicationType,
|
||||
TargetArn: arn})
|
||||
TargetArn: arn,
|
||||
})
|
||||
|
||||
if len(tgtArns) == 0 {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||
|
@ -81,8 +81,8 @@ func TestGetBucketLocationHandler(t *testing.T) {
|
||||
}
|
||||
|
||||
func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// test cases with sample input and expected output.
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
@ -163,7 +163,6 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodGet, getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@ -210,7 +209,6 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
|
||||
nilBucket := "dummy-bucket"
|
||||
nilReq, err := newTestRequest(http.MethodGet, getBucketLocationURL("", nilBucket), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
@ -225,8 +223,8 @@ func TestHeadBucketHandler(t *testing.T) {
|
||||
}
|
||||
|
||||
func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// test cases with sample input and expected output.
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
@ -282,7 +280,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodHead, getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@ -297,7 +294,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
|
||||
// Test for Anonymous/unsigned http request.
|
||||
anonReq, err := newTestRequest(http.MethodHead, getHEADBucketURL("", bucketName), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||
instanceType, bucketName, err)
|
||||
@ -315,7 +311,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
|
||||
nilBucket := "dummy-bucket"
|
||||
nilReq, err := newTestRequest(http.MethodHead, getHEADBucketURL("", nilBucket), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
@ -331,8 +326,8 @@ func TestListMultipartUploadsHandler(t *testing.T) {
|
||||
|
||||
// testListMultipartUploadsHandler - Tests validate listing of multipart uploads.
|
||||
func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// Collection of non-exhaustive ListMultipartUploads test cases, valid errors
|
||||
// and success responses.
|
||||
testCases := []struct {
|
||||
@ -552,7 +547,6 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
testCases[6].uploadIDMarker, testCases[6].delimiter, testCases[6].maxUploads)
|
||||
|
||||
nilReq, err := newTestRequest(http.MethodGet, url, 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
@ -568,8 +562,8 @@ func TestListBucketsHandler(t *testing.T) {
|
||||
|
||||
// testListBucketsHandler - Tests validate listing of buckets.
|
||||
func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
accessKey string
|
||||
@ -615,7 +609,6 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
|
||||
// verify response for V2 signed HTTP request.
|
||||
reqV2, err := newTestSignedRequestV2(http.MethodGet, getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||
}
|
||||
@ -630,7 +623,6 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
// Test for Anonymous/unsigned http request.
|
||||
// ListBucketsHandler doesn't support bucket policies, setting the policies shouldn't make a difference.
|
||||
anonReq, err := newTestRequest(http.MethodGet, getListBucketURL(""), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request.", instanceType)
|
||||
}
|
||||
@ -646,7 +638,6 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
|
||||
|
||||
nilReq, err := newTestRequest(http.MethodGet, getListBucketURL(""), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
@ -661,8 +652,8 @@ func TestAPIDeleteMultipleObjectsHandler(t *testing.T) {
|
||||
}
|
||||
|
||||
func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
var err error
|
||||
|
||||
contentBytes := []byte("hello")
|
||||
|
@ -150,8 +150,8 @@ func TestBucketLifecycle(t *testing.T) {
|
||||
// Simple tests of bucket lifecycle: PUT, GET, DELETE.
|
||||
// Tests are related and the order is important.
|
||||
func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
creds auth.Credentials, t *testing.T) {
|
||||
|
||||
creds auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// test cases with sample input and expected output.
|
||||
testCases := []struct {
|
||||
method string
|
||||
@ -266,8 +266,8 @@ func testBucketLifecycle(obj ObjectLayer, instanceType, bucketName string, apiRo
|
||||
lifecycleResponse []byte
|
||||
errorResponse APIErrorResponse
|
||||
shouldPass bool
|
||||
}) {
|
||||
|
||||
},
|
||||
) {
|
||||
for i, testCase := range testCases {
|
||||
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
|
||||
rec := httptest.NewRecorder()
|
||||
|
@ -178,9 +178,7 @@ func (t *transitionState) queueTransitionTask(oi ObjectInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
globalTransitionState *transitionState
|
||||
)
|
||||
var globalTransitionState *transitionState
|
||||
|
||||
func newTransitionState(ctx context.Context, objAPI ObjectLayer) *transitionState {
|
||||
return &transitionState{
|
||||
@ -466,9 +464,7 @@ func (sp *SelectParameters) IsEmpty() bool {
|
||||
return sp == nil
|
||||
}
|
||||
|
||||
var (
|
||||
selectParamsXMLName = "SelectParameters"
|
||||
)
|
||||
var selectParamsXMLName = "SelectParameters"
|
||||
|
||||
// UnmarshalXML - decodes XML data.
|
||||
func (sp *SelectParameters) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
|
@ -105,7 +105,7 @@ func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRoute
|
||||
bucketName1 := fmt.Sprintf("%s-1", bucketName)
|
||||
|
||||
const n = 100
|
||||
var start = make(chan struct{})
|
||||
start := make(chan struct{})
|
||||
var ok, errs int
|
||||
var wg sync.WaitGroup
|
||||
var mu sync.Mutex
|
||||
@ -147,8 +147,8 @@ func TestPutBucketPolicyHandler(t *testing.T) {
|
||||
|
||||
// testPutBucketPolicyHandler - Test for Bucket policy end point.
|
||||
func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
bucketName1 := fmt.Sprintf("%s-1", bucketName)
|
||||
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, BucketOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
@ -333,7 +333,6 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// create unsigned HTTP request for PutBucketPolicyHandler.
|
||||
anonReq, err := newTestRequest(http.MethodPut, getPutPolicyURL("", bucketName),
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)))
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||
instanceType, bucketName, err)
|
||||
@ -352,14 +351,12 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
|
||||
nilReq, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", nilBucket),
|
||||
0, nil, "", "", nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
// execute the object layer set to `nil` test.
|
||||
// `ExecObjectLayerAPINilTest` manages the operation.
|
||||
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
|
||||
|
||||
}
|
||||
|
||||
// Wrapper for calling Get Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
@ -465,7 +462,6 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// construct HTTP request for PUT bucket policy endpoint.
|
||||
reqV4, err := newTestSignedRequestV4(http.MethodGet, getGetPolicyURL("", testCase.bucketName),
|
||||
0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||
}
|
||||
@ -540,7 +536,6 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference.
|
||||
// create unsigned HTTP request for PutBucketPolicyHandler.
|
||||
anonReq, err := newTestRequest(http.MethodGet, getPutPolicyURL("", bucketName), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||
instanceType, bucketName, err)
|
||||
@ -559,7 +554,6 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
|
||||
nilReq, err := newTestSignedRequestV4(http.MethodGet, getGetPolicyURL("", nilBucket),
|
||||
0, nil, "", "", nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
@ -575,8 +569,8 @@ func TestDeleteBucketPolicyHandler(t *testing.T) {
|
||||
|
||||
// testDeleteBucketPolicyHandler - Test for Delete bucket policy end point.
|
||||
func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
credentials auth.Credentials, t *testing.T,
|
||||
) {
|
||||
// template for constructing HTTP request body for PUT bucket policy.
|
||||
bucketPolicyTemplate := `{
|
||||
"Version": "2012-10-17",
|
||||
@ -743,7 +737,6 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
// Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference.
|
||||
// create unsigned HTTP request for PutBucketPolicyHandler.
|
||||
anonReq, err := newTestRequest(http.MethodDelete, getPutPolicyURL("", bucketName), 0, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||
instanceType, bucketName, err)
|
||||
@ -762,7 +755,6 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
|
||||
nilReq, err := newTestSignedRequestV4(http.MethodDelete, getDeletePolicyURL("", nilBucket),
|
||||
0, nil, "", "", nil)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
|
@ -200,7 +200,7 @@ func PolicyToBucketAccessPolicy(bucketPolicy *policy.Policy) (*miniogopolicy.Buc
|
||||
}
|
||||
|
||||
var policyInfo miniogopolicy.BucketAccessPolicy
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(data, &policyInfo); err != nil {
|
||||
// This should not happen because data is valid to JSON data.
|
||||
return nil, err
|
||||
@ -218,7 +218,7 @@ func BucketAccessPolicyToPolicy(policyInfo *miniogopolicy.BucketAccessPolicy) (*
|
||||
}
|
||||
|
||||
var bucketPolicy policy.Policy
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(data, &bucketPolicy); err != nil {
|
||||
// This should not happen because data is valid to JSON data.
|
||||
return nil, err
|
||||
|
@ -55,7 +55,6 @@ func (r *ReplicationStats) Delete(bucket string) {
|
||||
r.ulock.Lock()
|
||||
defer r.ulock.Unlock()
|
||||
delete(r.UsageCache, bucket)
|
||||
|
||||
}
|
||||
|
||||
// UpdateReplicaStat updates in-memory replica statistics with new values.
|
||||
|
@ -77,7 +77,8 @@ var replicatedInfosTests = []struct {
|
||||
ReplicationStatus: replication.Failed,
|
||||
OpType: replication.ObjectReplicationType,
|
||||
ReplicationAction: replicateAll,
|
||||
}},
|
||||
},
|
||||
},
|
||||
expectedCompletedSize: 249,
|
||||
expectedReplicationStatusInternal: "arn1=COMPLETED;arn2=FAILED;",
|
||||
expectedReplicationStatus: replication.Failed,
|
||||
@ -102,7 +103,8 @@ var replicatedInfosTests = []struct {
|
||||
ReplicationStatus: replication.Failed,
|
||||
OpType: replication.ObjectReplicationType,
|
||||
ReplicationAction: replicateAll,
|
||||
}},
|
||||
},
|
||||
},
|
||||
expectedCompletedSize: 0,
|
||||
expectedReplicationStatusInternal: "arn1=PENDING;arn2=FAILED;",
|
||||
expectedReplicationStatus: replication.Failed,
|
||||
@ -182,7 +184,6 @@ var parseReplicationDecisionTest = []struct {
|
||||
func TestParseReplicateDecision(t *testing.T) {
|
||||
for i, test := range parseReplicationDecisionTest {
|
||||
dsc, err := parseReplicateDecision(test.expDsc.String())
|
||||
|
||||
if err != nil {
|
||||
if test.expErr != err {
|
||||
t.Errorf("Test%d (%s): Expected parse error got %t , want %t", i+1, test.name, err, test.expErr)
|
||||
|
@ -139,6 +139,7 @@ func (o mustReplicateOptions) ReplicationStatus() (s replication.StatusType) {
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (o mustReplicateOptions) isExistingObjectReplication() bool {
|
||||
return o.opType == replication.ExistingObjectReplicationType
|
||||
}
|
||||
@ -146,6 +147,7 @@ func (o mustReplicateOptions) isExistingObjectReplication() bool {
|
||||
func (o mustReplicateOptions) isMetadataReplication() bool {
|
||||
return o.opType == replication.MetadataReplicationType
|
||||
}
|
||||
|
||||
func getMustReplicateOptions(o ObjectInfo, op replication.Type, opts ObjectOptions) mustReplicateOptions {
|
||||
if !op.Valid() {
|
||||
op = replication.ObjectReplicationType
|
||||
@ -441,7 +443,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
|
||||
}
|
||||
}
|
||||
|
||||
var eventName = event.ObjectReplicationComplete
|
||||
eventName := event.ObjectReplicationComplete
|
||||
if replicationStatus == replication.Failed {
|
||||
eventName = event.ObjectReplicationFailed
|
||||
}
|
||||
@ -523,7 +525,8 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
|
||||
VersionID: versionID,
|
||||
Internal: miniogo.AdvancedGetOptions{
|
||||
ReplicationProxyRequest: "false",
|
||||
}}); isErrMethodNotAllowed(ErrorRespToObjectError(err, dobj.Bucket, dobj.ObjectName)) {
|
||||
},
|
||||
}); isErrMethodNotAllowed(ErrorRespToObjectError(err, dobj.Bucket, dobj.ObjectName)) {
|
||||
if dobj.VersionID == "" {
|
||||
rinfo.ReplicationStatus = replication.Completed
|
||||
return
|
||||
@ -902,7 +905,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
||||
// FIXME: add support for missing replication events
|
||||
// - event.ObjectReplicationMissedThreshold
|
||||
// - event.ObjectReplicationReplicatedAfterThreshold
|
||||
var eventName = event.ObjectReplicationComplete
|
||||
eventName := event.ObjectReplicationComplete
|
||||
if rinfos.ReplicationStatus() == replication.Failed {
|
||||
eventName = event.ObjectReplicationFailed
|
||||
}
|
||||
@ -1058,7 +1061,8 @@ func replicateObjectToTarget(ctx context.Context, ri ReplicateObjectInfo, object
|
||||
VersionID: objInfo.VersionID,
|
||||
Internal: miniogo.AdvancedGetOptions{
|
||||
ReplicationProxyRequest: "false",
|
||||
}})
|
||||
},
|
||||
})
|
||||
if cerr == nil {
|
||||
rAction = getReplicationAction(objInfo, oi, ri.OpType)
|
||||
rinfo.ReplicationStatus = replication.Completed
|
||||
@ -1117,7 +1121,8 @@ func replicateObjectToTarget(ctx context.Context, ri ReplicateObjectInfo, object
|
||||
Internal: miniogo.AdvancedPutOptions{
|
||||
SourceVersionID: objInfo.VersionID,
|
||||
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
|
||||
}}
|
||||
},
|
||||
}
|
||||
if _, err = c.CopyObject(ctx, tgt.Bucket, object, tgt.Bucket, object, getCopyObjMetadata(objInfo, tgt.StorageClass), srcOpts, dstOpts); err != nil {
|
||||
rinfo.ReplicationStatus = replication.Failed
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to replicate metadata for object %s/%s(%s): %s", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||
@ -1213,7 +1218,8 @@ func replicateObjectWithMultipart(ctx context.Context, c *miniogo.Core, bucket,
|
||||
SourceMTime: objInfo.ModTime,
|
||||
// always set this to distinguish between `mc mirror` replication and serverside
|
||||
ReplicationRequest: true,
|
||||
}})
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1357,7 +1363,6 @@ func (p *ReplicationPool) AddWorker() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// AddExistingObjectReplicateWorker adds a worker to queue existing objects that need to be sync'd
|
||||
@ -1671,6 +1676,7 @@ type replicationConfig struct {
|
||||
func (c replicationConfig) Empty() bool {
|
||||
return c.Config == nil
|
||||
}
|
||||
|
||||
func (c replicationConfig) Replicate(opts replication.ObjectOpts) bool {
|
||||
return c.Config.Replicate(opts)
|
||||
}
|
||||
@ -1694,7 +1700,8 @@ func (c replicationConfig) Resync(ctx context.Context, oi ObjectInfo, dsc *Repli
|
||||
DeleteMarker: oi.DeleteMarker,
|
||||
VersionID: oi.VersionID,
|
||||
OpType: replication.DeleteReplicationType,
|
||||
ExistingObject: true}
|
||||
ExistingObject: true,
|
||||
}
|
||||
|
||||
tgtArns := c.Config.FilterTargetArns(opts)
|
||||
// indicates no matching target with Existing object replication enabled.
|
||||
|
@ -75,7 +75,8 @@ var replicationConfigTests = []struct {
|
||||
},
|
||||
{ // 4. existing object replication enabled, versioning enabled; no reset in progress
|
||||
name: "existing object replication enabled, versioning enabled; no reset in progress",
|
||||
info: ObjectInfo{Size: 100,
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatus: replication.Completed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
@ -93,174 +94,192 @@ func TestReplicationResync(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var start = UTCNow().AddDate(0, 0, -1)
|
||||
var replicationConfigTests2 = []struct {
|
||||
info ObjectInfo
|
||||
name string
|
||||
rcfg replicationConfig
|
||||
dsc ReplicateDecision
|
||||
tgtStatuses map[string]replication.StatusType
|
||||
expectedSync bool
|
||||
}{
|
||||
{ // Cases 1-4: existing object replication enabled, versioning enabled, no reset - replication status varies
|
||||
// 1: Pending replication
|
||||
name: "existing object replication on object in Pending replication status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:PENDING;",
|
||||
ReplicationStatus: replication.Pending,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
}}}},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
expectedSync: true,
|
||||
},
|
||||
|
||||
{ // 2. replication status Failed
|
||||
name: "existing object replication on object in Failed replication status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:FAILED",
|
||||
ReplicationStatus: replication.Failed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
}}}},
|
||||
expectedSync: true,
|
||||
},
|
||||
{ // 3. replication status unset
|
||||
name: "existing object replication on pre-existing unreplicated object",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatus: replication.StatusType(""),
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
}}}},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
expectedSync: true,
|
||||
},
|
||||
{ // 4. replication status Complete
|
||||
name: "existing object replication on object in Completed replication status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:COMPLETED",
|
||||
ReplicationStatus: replication.Completed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", false, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
}}}},
|
||||
expectedSync: false,
|
||||
},
|
||||
{ // 5. existing object replication enabled, versioning enabled, replication status Pending & reset ID present
|
||||
name: "existing object replication with reset in progress and object in Pending status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:PENDING;",
|
||||
ReplicationStatus: replication.Pending,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||
},
|
||||
expectedSync: true,
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
},
|
||||
},
|
||||
{ // 6. existing object replication enabled, versioning enabled, replication status Failed & reset ID present
|
||||
name: "existing object replication with reset in progress and object in Failed status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:FAILED;",
|
||||
ReplicationStatus: replication.Failed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||
},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
},
|
||||
expectedSync: true,
|
||||
},
|
||||
{ // 7. existing object replication enabled, versioning enabled, replication status unset & reset ID present
|
||||
name: "existing object replication with reset in progress and object never replicated before",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatus: replication.StatusType(""),
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||
},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
var (
|
||||
start = UTCNow().AddDate(0, 0, -1)
|
||||
replicationConfigTests2 = []struct {
|
||||
info ObjectInfo
|
||||
name string
|
||||
rcfg replicationConfig
|
||||
dsc ReplicateDecision
|
||||
tgtStatuses map[string]replication.StatusType
|
||||
expectedSync bool
|
||||
}{
|
||||
{ // Cases 1-4: existing object replication enabled, versioning enabled, no reset - replication status varies
|
||||
// 1: Pending replication
|
||||
name: "existing object replication on object in Pending replication status",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:PENDING;",
|
||||
ReplicationStatus: replication.Pending,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
}}}},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
expectedSync: true,
|
||||
},
|
||||
|
||||
expectedSync: true,
|
||||
},
|
||||
{ // 2. replication status Failed
|
||||
name: "existing object replication on object in Failed replication status",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:FAILED",
|
||||
ReplicationStatus: replication.Failed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
}}}},
|
||||
expectedSync: true,
|
||||
},
|
||||
{ // 3. replication status unset
|
||||
name: "existing object replication on pre-existing unreplicated object",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatus: replication.StatusType(""),
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
}}}},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
expectedSync: true,
|
||||
},
|
||||
{ // 4. replication status Complete
|
||||
name: "existing object replication on object in Completed replication status",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:COMPLETED",
|
||||
ReplicationStatus: replication.Completed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", false, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
}}}},
|
||||
expectedSync: false,
|
||||
},
|
||||
{ // 5. existing object replication enabled, versioning enabled, replication status Pending & reset ID present
|
||||
name: "existing object replication with reset in progress and object in Pending status",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:PENDING;",
|
||||
ReplicationStatus: replication.Pending,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||
},
|
||||
expectedSync: true,
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{
|
||||
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
},
|
||||
},
|
||||
{ // 6. existing object replication enabled, versioning enabled, replication status Failed & reset ID present
|
||||
name: "existing object replication with reset in progress and object in Failed status",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:FAILED;",
|
||||
ReplicationStatus: replication.Failed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||
},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{
|
||||
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
},
|
||||
expectedSync: true,
|
||||
},
|
||||
{ // 7. existing object replication enabled, versioning enabled, replication status unset & reset ID present
|
||||
name: "existing object replication with reset in progress and object never replicated before",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatus: replication.StatusType(""),
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||
},
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{
|
||||
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
},
|
||||
|
||||
{ // 8. existing object replication enabled, versioning enabled, replication status Complete & reset ID present
|
||||
name: "existing object replication enabled - reset in progress for an object in Completed status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:COMPLETED;",
|
||||
ReplicationStatus: replication.Completed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df8",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||
expectedSync: true,
|
||||
},
|
||||
expectedSync: true,
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
},
|
||||
},
|
||||
{ // 9. existing object replication enabled, versioning enabled, replication status Pending & reset ID different
|
||||
name: "existing object replication enabled, newer reset in progress on object in Pending replication status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:PENDING;",
|
||||
|
||||
ReplicationStatus: replication.Pending,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;%s", UTCNow().AddDate(0, 0, -1).Format(http.TimeFormat), "abc")},
|
||||
ModTime: UTCNow().AddDate(0, 0, -2),
|
||||
{ // 8. existing object replication enabled, versioning enabled, replication status Complete & reset ID present
|
||||
name: "existing object replication enabled - reset in progress for an object in Completed status",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:COMPLETED;",
|
||||
ReplicationStatus: replication.Completed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df8",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||
},
|
||||
expectedSync: true,
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{
|
||||
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
},
|
||||
},
|
||||
expectedSync: true,
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
{ // 9. existing object replication enabled, versioning enabled, replication status Pending & reset ID different
|
||||
name: "existing object replication enabled, newer reset in progress on object in Pending replication status",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:PENDING;",
|
||||
|
||||
ReplicationStatus: replication.Pending,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;%s", UTCNow().AddDate(0, 0, -1).Format(http.TimeFormat), "abc")},
|
||||
ModTime: UTCNow().AddDate(0, 0, -2),
|
||||
},
|
||||
expectedSync: true,
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{
|
||||
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: UTCNow(),
|
||||
}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{ // 10. existing object replication enabled, versioning enabled, replication status Complete & reset done
|
||||
name: "reset done on object in Completed Status - ineligbile for re-replication",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:COMPLETED;",
|
||||
ReplicationStatus: replication.Completed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;%s", start.Format(http.TimeFormat), "xyz")},
|
||||
{ // 10. existing object replication enabled, versioning enabled, replication status Complete & reset done
|
||||
name: "reset done on object in Completed Status - ineligbile for re-replication",
|
||||
info: ObjectInfo{
|
||||
Size: 100,
|
||||
ReplicationStatusInternal: "arn1:COMPLETED;",
|
||||
ReplicationStatus: replication.Completed,
|
||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;%s", start.Format(http.TimeFormat), "xyz")},
|
||||
},
|
||||
expectedSync: false,
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{
|
||||
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: start,
|
||||
}}},
|
||||
},
|
||||
},
|
||||
expectedSync: false,
|
||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||
Arn: "arn1",
|
||||
ResetID: "xyz",
|
||||
ResetBeforeDate: start,
|
||||
}}},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
func TestReplicationResyncwrapper(t *testing.T) {
|
||||
for i, test := range replicationConfigTests2 {
|
||||
|
@ -414,7 +414,7 @@ func parseBucketTargetConfig(bucket string, cdata, cmetadata []byte) (*madmin.Bu
|
||||
return nil, nil
|
||||
}
|
||||
data = cdata
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if len(cmetadata) != 0 {
|
||||
if err := json.Unmarshal(cmetadata, &meta); err != nil {
|
||||
return nil, err
|
||||
|
@ -144,5 +144,4 @@ func (api objectAPIHandlers) GetBucketVersioningHandler(w http.ResponseWriter, r
|
||||
|
||||
// Write bucket versioning configuration to client
|
||||
writeSuccessResponseXML(w, configData)
|
||||
|
||||
}
|
||||
|
@ -68,8 +68,11 @@ import (
|
||||
|
||||
// serverDebugLog will enable debug printing
|
||||
var serverDebugLog = env.Get("_MINIO_SERVER_DEBUG", config.EnableOff) == config.EnableOn
|
||||
var shardDiskTimeDelta time.Duration
|
||||
var defaultAWSCredProvider []credentials.Provider
|
||||
|
||||
var (
|
||||
shardDiskTimeDelta time.Duration
|
||||
defaultAWSCredProvider []credentials.Provider
|
||||
)
|
||||
|
||||
func init() {
|
||||
if runtime.GOOS == "windows" {
|
||||
@ -362,7 +365,6 @@ func newConfigDirFromCtx(ctx *cli.Context, option string, getDefaultDir func() s
|
||||
}
|
||||
|
||||
func handleCommonCmdArgs(ctx *cli.Context) {
|
||||
|
||||
// Get "json" flag from command line argument and
|
||||
// enable json and quite modes if json flag is turned on.
|
||||
globalCLIContext.JSON = ctx.IsSet("json") || ctx.GlobalIsSet("json")
|
||||
@ -669,7 +671,7 @@ func handleCommonEnvVars() {
|
||||
publicIPs := env.Get(config.EnvPublicIPs, "")
|
||||
if len(publicIPs) != 0 {
|
||||
minioEndpoints := strings.Split(publicIPs, config.ValueSeparator)
|
||||
var domainIPs = set.NewStringSet()
|
||||
domainIPs := set.NewStringSet()
|
||||
for _, endpoint := range minioEndpoints {
|
||||
if net.ParseIP(endpoint) == nil {
|
||||
// Checking if the IP is a DNS entry.
|
||||
@ -786,7 +788,7 @@ func handleCommonEnvVars() {
|
||||
logger.Fatal(err, fmt.Sprintf("Unable to load X.509 root CAs for KES from %q", env.Get(config.EnvKESServerCA, globalCertsCADir.Get())))
|
||||
}
|
||||
|
||||
var defaultKeyID = env.Get(config.EnvKESKeyName, "")
|
||||
defaultKeyID := env.Get(config.EnvKESKeyName, "")
|
||||
KMS, err := kms.NewWithConfig(kms.Config{
|
||||
Endpoints: endpoints,
|
||||
DefaultKeyID: defaultKeyID,
|
||||
|
@ -73,7 +73,8 @@ func Test_minioEnvironFromFile(t *testing.T) {
|
||||
expectedErr bool
|
||||
expectedEkvs []envKV
|
||||
}{
|
||||
{`
|
||||
{
|
||||
`
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123`,
|
||||
false,
|
||||
@ -89,7 +90,8 @@ export MINIO_ROOT_PASSWORD=minio123`,
|
||||
},
|
||||
},
|
||||
// Value with double quotes
|
||||
{`export MINIO_ROOT_USER="minio"`,
|
||||
{
|
||||
`export MINIO_ROOT_USER="minio"`,
|
||||
false,
|
||||
[]envKV{
|
||||
{
|
||||
@ -99,7 +101,8 @@ export MINIO_ROOT_PASSWORD=minio123`,
|
||||
},
|
||||
},
|
||||
// Value with single quotes
|
||||
{`export MINIO_ROOT_USER='minio'`,
|
||||
{
|
||||
`export MINIO_ROOT_USER='minio'`,
|
||||
false,
|
||||
[]envKV{
|
||||
{
|
||||
@ -108,7 +111,8 @@ export MINIO_ROOT_PASSWORD=minio123`,
|
||||
},
|
||||
},
|
||||
},
|
||||
{`
|
||||
{
|
||||
`
|
||||
MINIO_ROOT_USER=minio
|
||||
MINIO_ROOT_PASSWORD=minio123`,
|
||||
false,
|
||||
@ -123,7 +127,8 @@ MINIO_ROOT_PASSWORD=minio123`,
|
||||
},
|
||||
},
|
||||
},
|
||||
{`
|
||||
{
|
||||
`
|
||||
export MINIO_ROOT_USERminio
|
||||
export MINIO_ROOT_PASSWORD=minio123`,
|
||||
true,
|
||||
|
@ -50,7 +50,7 @@ import (
|
||||
)
|
||||
|
||||
func initHelp() {
|
||||
var kvs = map[string]config.KVS{
|
||||
kvs := map[string]config.KVS{
|
||||
config.EtcdSubSys: etcd.DefaultKVS,
|
||||
config.CacheSubSys: cache.DefaultKVS,
|
||||
config.CompressionSubSys: compress.DefaultKVS,
|
||||
@ -78,7 +78,7 @@ func initHelp() {
|
||||
config.RegisterDefaultKVS(kvs)
|
||||
|
||||
// Captures help for each sub-system
|
||||
var helpSubSys = config.HelpKVS{
|
||||
helpSubSys := config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: config.SiteSubSys,
|
||||
Description: "label the server and its location",
|
||||
@ -205,7 +205,7 @@ func initHelp() {
|
||||
}
|
||||
}
|
||||
|
||||
var helpMap = map[string]config.HelpKVS{
|
||||
helpMap := map[string]config.HelpKVS{
|
||||
"": helpSubSys, // Help for all sub-systems.
|
||||
config.SiteSubSys: config.SiteHelp,
|
||||
config.RegionSubSys: config.RegionHelp,
|
||||
|
@ -84,7 +84,7 @@ func (dir *ConfigDir) Get() string {
|
||||
|
||||
// Attempts to create all directories, ignores any permission denied errors.
|
||||
func mkdirAllIgnorePerm(path string) error {
|
||||
err := os.MkdirAll(path, 0700)
|
||||
err := os.MkdirAll(path, 0o700)
|
||||
if err != nil {
|
||||
// It is possible in kubernetes like deployments this directory
|
||||
// is already mounted and is not writable, ignore any write errors.
|
||||
|
@ -2445,12 +2445,12 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
|
||||
return err
|
||||
} // if errConfigNotFound proceed to migrate..
|
||||
|
||||
var configFiles = []string{
|
||||
configFiles := []string{
|
||||
getConfigFile(),
|
||||
getConfigFile() + ".deprecated",
|
||||
configFile,
|
||||
}
|
||||
var config = &serverConfigV27{}
|
||||
config := &serverConfigV27{}
|
||||
for _, cfgFile := range configFiles {
|
||||
if _, err = Load(cfgFile, config); err != nil {
|
||||
if !osIsNotExist(err) && !osIsPermission(err) {
|
||||
|
@ -51,7 +51,7 @@ func TestServerConfigMigrateV1(t *testing.T) {
|
||||
// Create a V1 config json file and store it
|
||||
configJSON := "{ \"version\":\"1\", \"accessKeyId\":\"abcde\", \"secretAccessKey\":\"abcdefgh\"}"
|
||||
configPath := rootPath + "/fsUsers.json"
|
||||
if err := ioutil.WriteFile(configPath, []byte(configJSON), 0644); err != nil {
|
||||
if err := ioutil.WriteFile(configPath, []byte(configJSON), 0o644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
@ -181,7 +181,7 @@ func TestServerConfigMigrateV2toV33(t *testing.T) {
|
||||
configPath := rootPath + SlashSeparator + minioConfigFile
|
||||
|
||||
// Create a corrupted config file
|
||||
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\","), 0644); err != nil {
|
||||
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\","), 0o644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
// Fire a migrateConfig()
|
||||
@ -194,7 +194,7 @@ func TestServerConfigMigrateV2toV33(t *testing.T) {
|
||||
|
||||
// Create a V2 config json file and store it
|
||||
configJSON := "{ \"version\":\"2\", \"credentials\": {\"accessKeyId\":\"" + accessKey + "\", \"secretAccessKey\":\"" + secretKey + "\", \"region\":\"us-east-1\"}, \"mongoLogger\":{\"addr\":\"127.0.0.1:3543\", \"db\":\"foodb\", \"collection\":\"foo\"}, \"syslogLogger\":{\"network\":\"127.0.0.1:543\", \"addr\":\"addr\"}, \"fileLogger\":{\"filename\":\"log.out\"}}"
|
||||
if err := ioutil.WriteFile(configPath, []byte(configJSON), 0644); err != nil {
|
||||
if err := ioutil.WriteFile(configPath, []byte(configJSON), 0o644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
@ -244,7 +244,7 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
|
||||
configPath := rootPath + SlashSeparator + minioConfigFile
|
||||
|
||||
// Create a corrupted config file
|
||||
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"test\":"), 0644); err != nil {
|
||||
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"test\":"), 0o644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
@ -343,7 +343,7 @@ func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
|
||||
for i := 3; i <= 17; i++ {
|
||||
// Create a corrupted config file
|
||||
if err = ioutil.WriteFile(configPath, []byte(fmt.Sprintf("{ \"version\":\"%d\", \"credential\": { \"accessKey\": 1 } }", i)),
|
||||
0644); err != nil {
|
||||
0o644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
@ -354,7 +354,7 @@ func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create a corrupted config file for version '2'.
|
||||
if err = ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"credentials\": { \"accessKeyId\": 1 } }"), 0644); err != nil {
|
||||
if err = ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"credentials\": { \"accessKeyId\": 1 } }"), 0o644); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
|
@ -46,8 +46,8 @@ const (
|
||||
)
|
||||
|
||||
func listServerConfigHistory(ctx context.Context, objAPI ObjectLayer, withData bool, count int) (
|
||||
[]madmin.ConfigHistoryEntry, error) {
|
||||
|
||||
[]madmin.ConfigHistoryEntry, error,
|
||||
) {
|
||||
var configHistory []madmin.ConfigHistoryEntry
|
||||
|
||||
// List all kvs
|
||||
@ -140,7 +140,7 @@ func saveServerConfig(ctx context.Context, objAPI ObjectLayer, cfg interface{})
|
||||
return err
|
||||
}
|
||||
|
||||
var configFile = path.Join(minioConfigPrefix, minioConfigFile)
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
if GlobalKMS != nil {
|
||||
data, err = config.EncryptBytes(GlobalKMS, data, kms.Context{
|
||||
minioMetaBucket: path.Join(minioMetaBucket, configFile),
|
||||
@ -153,7 +153,7 @@ func saveServerConfig(ctx context.Context, objAPI ObjectLayer, cfg interface{})
|
||||
}
|
||||
|
||||
func readServerConfig(ctx context.Context, objAPI ObjectLayer) (config.Config, error) {
|
||||
var srvCfg = config.New()
|
||||
srvCfg := config.New()
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
data, err := readConfig(ctx, objAPI, configFile)
|
||||
if err != nil {
|
||||
@ -174,7 +174,7 @@ func readServerConfig(ctx context.Context, objAPI ObjectLayer) (config.Config, e
|
||||
}
|
||||
}
|
||||
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(data, &srvCfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -277,7 +277,6 @@ func scanDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
||||
defer func() {
|
||||
console.Debugf(logPrefix+" Scanner time: %v %s\n", time.Since(t), logSuffix)
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
switch cache.Info.Name {
|
||||
@ -875,8 +874,10 @@ func (i *scannerItem) transformMetaDir() {
|
||||
i.objectName = split[len(split)-1]
|
||||
}
|
||||
|
||||
var applyActionsLogPrefix = color.Green("applyActions:")
|
||||
var applyVersionActionsLogPrefix = color.Green("applyVersionActions:")
|
||||
var (
|
||||
applyActionsLogPrefix = color.Green("applyActions:")
|
||||
applyVersionActionsLogPrefix = color.Green("applyVersionActions:")
|
||||
)
|
||||
|
||||
func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, oi ObjectInfo) (size int64) {
|
||||
if i.debug {
|
||||
@ -979,7 +980,6 @@ func (i *scannerItem) applyTierObjSweep(ctx context.Context, o ObjectLayer, oi O
|
||||
if ignoreNotFoundErr(err) != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// applyNewerNoncurrentVersionLimit removes noncurrent versions older than the most recent NewerNoncurrentVersions configured.
|
||||
@ -1100,7 +1100,6 @@ func applyTransitionRule(obj ObjectInfo) bool {
|
||||
}
|
||||
globalTransitionState.queueTransitionTask(obj)
|
||||
return true
|
||||
|
||||
}
|
||||
|
||||
func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, restoredObject bool) bool {
|
||||
|
@ -105,7 +105,7 @@ func TestDataUpdateTracker(t *testing.T) {
|
||||
defer cancel()
|
||||
dut.start(ctx, tmpDir)
|
||||
|
||||
var tests = []struct {
|
||||
tests := []struct {
|
||||
in string
|
||||
check []string // if not empty, check against these instead.
|
||||
exist bool
|
||||
|
@ -624,7 +624,7 @@ func (d *dataUsageCache) reduceChildrenOf(path dataUsageHash, limit int, compact
|
||||
// Appears to be printed with _MINIO_SERVER_DEBUG=off
|
||||
// console.Debugf(" %d children found, compacting %v\n", total, path)
|
||||
|
||||
var leaves = make([]struct {
|
||||
leaves := make([]struct {
|
||||
objects uint64
|
||||
path dataUsageHash
|
||||
}, total)
|
||||
@ -774,7 +774,7 @@ func (d *dataUsageCache) tiersUsageInfo(buckets []BucketInfo) *allTierStats {
|
||||
// bucketsUsageInfo returns the buckets usage info as a map, with
|
||||
// key as bucket name
|
||||
func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]BucketUsageInfo {
|
||||
var dst = make(map[string]BucketUsageInfo, len(buckets))
|
||||
dst := make(map[string]BucketUsageInfo, len(buckets))
|
||||
for _, bucket := range buckets {
|
||||
e := d.find(bucket.Name)
|
||||
if e == nil {
|
||||
@ -797,7 +797,6 @@ func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]Bucke
|
||||
ReplicationPendingCount: stat.PendingCount,
|
||||
ReplicationFailedCount: stat.FailedCount,
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
dst[bucket.Name] = bui
|
||||
|
@ -40,7 +40,7 @@ const (
|
||||
// storeDataUsageInBackend will store all objects sent on the gui channel until closed.
|
||||
func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan DataUsageInfo) {
|
||||
for dataUsageInfo := range dui {
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
dataUsageJSON, err := json.Marshal(dataUsageInfo)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
@ -105,7 +105,7 @@ func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (DataUsag
|
||||
defer r.Close()
|
||||
|
||||
var dataUsageInfo DataUsageInfo
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.NewDecoder(r).Decode(&dataUsageInfo); err != nil {
|
||||
return DataUsageInfo{}, err
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ func TestDataUsageUpdate(t *testing.T) {
|
||||
}
|
||||
const bucket = "bucket"
|
||||
defer os.RemoveAll(base)
|
||||
var files = []usageTestFile{
|
||||
files := []usageTestFile{
|
||||
{name: "rootfile", size: 10000},
|
||||
{name: "rootfile2", size: 10000},
|
||||
{name: "dir1/d1file", size: 2000},
|
||||
@ -73,7 +73,7 @@ func TestDataUsageUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test dirs
|
||||
var want = []struct {
|
||||
want := []struct {
|
||||
path string
|
||||
isNil bool
|
||||
size, objs int
|
||||
@ -257,7 +257,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
||||
}
|
||||
scannerSleeper.Update(0, 0)
|
||||
defer os.RemoveAll(base)
|
||||
var files = []usageTestFile{
|
||||
files := []usageTestFile{
|
||||
{name: "bucket/rootfile", size: 10000},
|
||||
{name: "bucket/rootfile2", size: 10000},
|
||||
{name: "bucket/dir1/d1file", size: 2000},
|
||||
@ -302,7 +302,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test dirs
|
||||
var want = []struct {
|
||||
want := []struct {
|
||||
path string
|
||||
isNil bool
|
||||
size, objs int
|
||||
@ -543,7 +543,7 @@ func TestDataUsageCacheSerialize(t *testing.T) {
|
||||
}
|
||||
const bucket = "abucket"
|
||||
defer os.RemoveAll(base)
|
||||
var files = []usageTestFile{
|
||||
files := []usageTestFile{
|
||||
{name: "rootfile", size: 10000},
|
||||
{name: "rootfile2", size: 10000},
|
||||
{name: "dir1/d1file", size: 2000},
|
||||
|
@ -190,7 +190,7 @@ func newDiskCache(ctx context.Context, dir string, config cache.Config) (*diskCa
|
||||
quotaPct = config.Quota
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(dir, 0777); err != nil {
|
||||
if err := os.MkdirAll(dir, 0o777); err != nil {
|
||||
return nil, fmt.Errorf("Unable to initialize '%s' dir, %w", dir, err)
|
||||
}
|
||||
cache := diskCache{
|
||||
@ -619,10 +619,10 @@ func (c *diskCache) saveMetadata(ctx context.Context, bucket, object string, met
|
||||
cachedPath := getCacheSHADir(c.dir, bucket, object)
|
||||
metaPath := pathJoin(cachedPath, cacheMetaJSONFile)
|
||||
// Create cache directory if needed
|
||||
if err := os.MkdirAll(cachedPath, 0777); err != nil {
|
||||
if err := os.MkdirAll(cachedPath, 0o777); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0666)
|
||||
f, err := os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0o666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -682,10 +682,10 @@ func (c *diskCache) updateMetadata(ctx context.Context, bucket, object, etag str
|
||||
cachedPath := getCacheSHADir(c.dir, bucket, object)
|
||||
metaPath := pathJoin(cachedPath, cacheMetaJSONFile)
|
||||
// Create cache directory if needed
|
||||
if err := os.MkdirAll(cachedPath, 0777); err != nil {
|
||||
if err := os.MkdirAll(cachedPath, 0o777); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.OpenFile(metaPath, os.O_RDWR, 0666)
|
||||
f, err := os.OpenFile(metaPath, os.O_RDWR, 0o666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -732,7 +732,7 @@ func getCacheWriteBackSHADir(dir, bucket, object string) string {
|
||||
|
||||
// Cache data to disk with bitrot checksum added for each block of 1MB
|
||||
func (c *diskCache) bitrotWriteToCache(cachePath, fileName string, reader io.Reader, size uint64) (int64, string, error) {
|
||||
if err := os.MkdirAll(cachePath, 0777); err != nil {
|
||||
if err := os.MkdirAll(cachePath, 0o777); err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
filePath := pathJoin(cachePath, fileName)
|
||||
@ -807,6 +807,7 @@ func newCacheEncryptReader(content io.Reader, bucket, object string, metadata ma
|
||||
}
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
func newCacheEncryptMetadata(bucket, object string, metadata map[string]string) ([]byte, error) {
|
||||
var sealedKey crypto.SealedKey
|
||||
if globalCacheKMS == nil {
|
||||
@ -827,6 +828,7 @@ func newCacheEncryptMetadata(bucket, object string, metadata map[string]string)
|
||||
metadata[SSECacheEncrypted] = ""
|
||||
return objectKey[:], nil
|
||||
}
|
||||
|
||||
func (c *diskCache) GetLockContext(ctx context.Context, bucket, object string) (RWLocker, LockContext, error) {
|
||||
cachePath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(cachePath)
|
||||
@ -879,12 +881,12 @@ func (c *diskCache) put(ctx context.Context, bucket, object string, data io.Read
|
||||
cachePath = getCacheWriteBackSHADir(c.dir, bucket, object)
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(cachePath, 0777); err != nil {
|
||||
if err := os.MkdirAll(cachePath, 0o777); err != nil {
|
||||
return oi, err
|
||||
}
|
||||
var metadata = cloneMSS(opts.UserDefined)
|
||||
var reader = data
|
||||
var actualSize = uint64(size)
|
||||
metadata := cloneMSS(opts.UserDefined)
|
||||
reader := data
|
||||
actualSize := uint64(size)
|
||||
if globalCacheKMS != nil {
|
||||
reader, err = newCacheEncryptReader(data, bucket, object, metadata)
|
||||
if err != nil {
|
||||
@ -933,14 +935,14 @@ func (c *diskCache) putRange(ctx context.Context, bucket, object string, data io
|
||||
return errDiskFull
|
||||
}
|
||||
cachePath := getCacheSHADir(c.dir, bucket, object)
|
||||
if err := os.MkdirAll(cachePath, 0777); err != nil {
|
||||
if err := os.MkdirAll(cachePath, 0o777); err != nil {
|
||||
return err
|
||||
}
|
||||
var metadata = cloneMSS(opts.UserDefined)
|
||||
var reader = data
|
||||
var actualSize = uint64(rlen)
|
||||
metadata := cloneMSS(opts.UserDefined)
|
||||
reader := data
|
||||
actualSize := uint64(rlen)
|
||||
// objSize is the actual size of object (with encryption overhead if any)
|
||||
var objSize = uint64(size)
|
||||
objSize := uint64(size)
|
||||
if globalCacheKMS != nil {
|
||||
reader, err = newCacheEncryptReader(data, bucket, object, metadata)
|
||||
if err != nil {
|
||||
@ -1269,12 +1271,12 @@ func (c *diskCache) NewMultipartUpload(ctx context.Context, bucket, object, uID
|
||||
|
||||
cachePath := getMultipartCacheSHADir(c.dir, bucket, object)
|
||||
uploadIDDir := path.Join(cachePath, uploadID)
|
||||
if err := os.MkdirAll(uploadIDDir, 0777); err != nil {
|
||||
if err := os.MkdirAll(uploadIDDir, 0o777); err != nil {
|
||||
return uploadID, err
|
||||
}
|
||||
metaPath := pathJoin(uploadIDDir, cacheMetaJSONFile)
|
||||
|
||||
f, err := os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0666)
|
||||
f, err := os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0o666)
|
||||
if err != nil {
|
||||
return uploadID, err
|
||||
}
|
||||
@ -1331,7 +1333,7 @@ func (c *diskCache) PutObjectPart(ctx context.Context, bucket, object, uploadID
|
||||
return oi, errDiskFull
|
||||
}
|
||||
reader := data
|
||||
var actualSize = uint64(size)
|
||||
actualSize := uint64(size)
|
||||
if globalCacheKMS != nil {
|
||||
reader, err = newCachePartEncryptReader(ctx, bucket, object, partID, data, size, meta.Meta)
|
||||
if err != nil {
|
||||
@ -1380,7 +1382,7 @@ func (c *diskCache) SavePartMetadata(ctx context.Context, bucket, object, upload
|
||||
defer uploadLock.Unlock(ulkctx.Cancel)
|
||||
|
||||
metaPath := pathJoin(uploadDir, cacheMetaJSONFile)
|
||||
f, err := os.OpenFile(metaPath, os.O_RDWR, 0666)
|
||||
f, err := os.OpenFile(metaPath, os.O_RDWR, 0o666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1558,7 +1560,7 @@ func (c *diskCache) CompleteMultipartUpload(ctx context.Context, bucket, object,
|
||||
uploadMeta.Hits++
|
||||
metaPath := pathJoin(uploadIDDir, cacheMetaJSONFile)
|
||||
|
||||
f, err := os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0666)
|
||||
f, err := os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0o666)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
|
@ -113,7 +113,6 @@ func cacheControlOpts(o ObjectInfo) *cacheControl {
|
||||
if strings.EqualFold(k, "cache-control") {
|
||||
headerVal = v
|
||||
}
|
||||
|
||||
}
|
||||
if headerVal == "" {
|
||||
return nil
|
||||
@ -581,6 +580,7 @@ func (t *multiWriter) Write(p []byte) (n int, err error) {
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func cacheMultiWriter(w1 io.Writer, w2 *io.PipeWriter) io.Writer {
|
||||
return &multiWriter{backendWriter: w1, cacheWriter: w2}
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ func TestGetCacheControlOpts(t *testing.T) {
|
||||
{"max-age=2592000, no-store", timeSentinel, &cacheControl{maxAge: 2592000, sMaxAge: 0, noStore: true, minFresh: 0, expiry: time.Time{}}, false},
|
||||
{"must-revalidate, max-age=600", timeSentinel, &cacheControl{maxAge: 600, sMaxAge: 0, minFresh: 0, expiry: time.Time{}}, false},
|
||||
{"s-maxAge=2500, max-age=600", timeSentinel, &cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Time{}}, false},
|
||||
{"s-maxAge=2500, max-age=600", expiry, &cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Date(2015, time.October, 21, 07, 28, 00, 00, time.UTC)}, false},
|
||||
{"s-maxAge=2500, max-age=600", expiry, &cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Date(2015, time.October, 21, 0o7, 28, 0o0, 0o0, time.UTC)}, false},
|
||||
{"s-maxAge=2500, max-age=600s", timeSentinel, &cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Time{}}, true},
|
||||
}
|
||||
|
||||
@ -61,7 +61,6 @@ func TestGetCacheControlOpts(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsMetadataSame(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
m1 map[string]string
|
||||
m2 map[string]string
|
||||
@ -148,6 +147,7 @@ func TestNewFileScorer(t *testing.T) {
|
||||
t.Fatal("unexpected file list", fs.queueString())
|
||||
}
|
||||
}
|
||||
|
||||
func TestBytesToClear(t *testing.T) {
|
||||
testCases := []struct {
|
||||
total int64
|
||||
|
@ -137,7 +137,6 @@ func (c *cacheObjects) incHitsToMeta(ctx context.Context, dcache *diskCache, buc
|
||||
|
||||
// Backend metadata could have changed through server side copy - reset cache metadata if that is the case
|
||||
func (c *cacheObjects) updateMetadataIfChanged(ctx context.Context, dcache *diskCache, bucket, object string, bkObjectInfo, cacheObjInfo ObjectInfo, rs *HTTPRangeSpec) error {
|
||||
|
||||
bkMeta := make(map[string]string, len(bkObjectInfo.UserDefined))
|
||||
cacheMeta := make(map[string]string, len(cacheObjInfo.UserDefined))
|
||||
for k, v := range bkObjectInfo.UserDefined {
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
)
|
||||
|
||||
func TestDynamicTimeoutSingleIncrease(t *testing.T) {
|
||||
|
||||
timeout := newDynamicTimeout(time.Minute, time.Second)
|
||||
|
||||
initial := timeout.Timeout()
|
||||
@ -43,7 +42,6 @@ func TestDynamicTimeoutSingleIncrease(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDynamicTimeoutDualIncrease(t *testing.T) {
|
||||
|
||||
timeout := newDynamicTimeout(time.Minute, time.Second)
|
||||
|
||||
initial := timeout.Timeout()
|
||||
@ -66,7 +64,6 @@ func TestDynamicTimeoutDualIncrease(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDynamicTimeoutSingleDecrease(t *testing.T) {
|
||||
|
||||
timeout := newDynamicTimeout(time.Minute, time.Second)
|
||||
|
||||
initial := timeout.Timeout()
|
||||
@ -83,7 +80,6 @@ func TestDynamicTimeoutSingleDecrease(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDynamicTimeoutDualDecrease(t *testing.T) {
|
||||
|
||||
timeout := newDynamicTimeout(time.Minute, time.Second)
|
||||
|
||||
initial := timeout.Timeout()
|
||||
@ -106,7 +102,6 @@ func TestDynamicTimeoutDualDecrease(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDynamicTimeoutManyDecreases(t *testing.T) {
|
||||
|
||||
timeout := newDynamicTimeout(time.Minute, time.Second)
|
||||
|
||||
initial := timeout.Timeout()
|
||||
@ -116,7 +111,6 @@ func TestDynamicTimeoutManyDecreases(t *testing.T) {
|
||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||
timeout.LogSuccess(successTimeout)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
adjusted := timeout.Timeout()
|
||||
@ -151,7 +145,6 @@ func TestDynamicTimeoutConcurrent(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDynamicTimeoutHitMinimum(t *testing.T) {
|
||||
|
||||
const minimum = 30 * time.Second
|
||||
timeout := newDynamicTimeout(time.Minute, minimum)
|
||||
|
||||
@ -172,7 +165,6 @@ func TestDynamicTimeoutHitMinimum(t *testing.T) {
|
||||
}
|
||||
|
||||
func testDynamicTimeoutAdjust(t *testing.T, timeout *dynamicTimeout, f func() float64) {
|
||||
|
||||
const successTimeout = 20 * time.Second
|
||||
|
||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||
@ -192,7 +184,6 @@ func testDynamicTimeoutAdjust(t *testing.T, timeout *dynamicTimeout, f func() fl
|
||||
}
|
||||
|
||||
func TestDynamicTimeoutAdjustExponential(t *testing.T) {
|
||||
|
||||
timeout := newDynamicTimeout(time.Minute, time.Second)
|
||||
|
||||
rand.Seed(0)
|
||||
@ -200,9 +191,7 @@ func TestDynamicTimeoutAdjustExponential(t *testing.T) {
|
||||
initial := timeout.Timeout()
|
||||
|
||||
for try := 0; try < 10; try++ {
|
||||
|
||||
testDynamicTimeoutAdjust(t, timeout, rand.ExpFloat64)
|
||||
|
||||
}
|
||||
|
||||
adjusted := timeout.Timeout()
|
||||
@ -212,7 +201,6 @@ func TestDynamicTimeoutAdjustExponential(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDynamicTimeoutAdjustNormalized(t *testing.T) {
|
||||
|
||||
timeout := newDynamicTimeout(time.Minute, time.Second)
|
||||
|
||||
rand.Seed(0)
|
||||
@ -220,11 +208,9 @@ func TestDynamicTimeoutAdjustNormalized(t *testing.T) {
|
||||
initial := timeout.Timeout()
|
||||
|
||||
for try := 0; try < 10; try++ {
|
||||
|
||||
testDynamicTimeoutAdjust(t, timeout, func() float64 {
|
||||
return 1.0 + rand.NormFloat64()
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
adjusted := timeout.Timeout()
|
||||
|
@ -186,7 +186,7 @@ func rotateKey(oldKey []byte, newKeyID string, newKey []byte, bucket, object str
|
||||
// client provided it. Therefore, we create a copy
|
||||
// of the client provided context and add the bucket
|
||||
// key, if not present.
|
||||
var kmsCtx = kms.Context{}
|
||||
kmsCtx := kms.Context{}
|
||||
for k, v := range ctx {
|
||||
kmsCtx[k] = v
|
||||
}
|
||||
@ -253,7 +253,7 @@ func newEncryptMetadata(kind crypto.Type, keyID string, key []byte, bucket, obje
|
||||
// client provided it. Therefore, we create a copy
|
||||
// of the client provided context and add the bucket
|
||||
// key, if not present.
|
||||
var kmsCtx = kms.Context{}
|
||||
kmsCtx := kms.Context{}
|
||||
for k, v := range ctx {
|
||||
kmsCtx[k] = v
|
||||
}
|
||||
@ -443,7 +443,6 @@ func newDecryptReaderWithObjectKey(client io.Reader, objectEncryptionKey []byte,
|
||||
// DecryptBlocksRequestR - same as DecryptBlocksRequest but with a
|
||||
// reader
|
||||
func DecryptBlocksRequestR(inputReader io.Reader, h http.Header, seqNumber uint32, partStart int, oi ObjectInfo, copySource bool) (io.Reader, error) {
|
||||
|
||||
bucket, object := oi.Bucket, oi.Name
|
||||
// Single part case
|
||||
if !oi.isMultipart() {
|
||||
|
@ -64,7 +64,6 @@ func TestEncryptRequest(t *testing.T) {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
_, _, err := EncryptRequest(content, req, "bucket", "object", test.metadata)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to encrypt request: %v", i, err)
|
||||
}
|
||||
@ -285,14 +284,13 @@ func TestGetDecryptedRange(t *testing.T) {
|
||||
)
|
||||
|
||||
// Single part object tests
|
||||
var (
|
||||
mkSPObj = func(s int64) ObjectInfo {
|
||||
return ObjectInfo{
|
||||
Size: getEncSize(s),
|
||||
UserDefined: udMap(false),
|
||||
}
|
||||
|
||||
mkSPObj := func(s int64) ObjectInfo {
|
||||
return ObjectInfo{
|
||||
Size: getEncSize(s),
|
||||
UserDefined: udMap(false),
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
testSP := []struct {
|
||||
decSz int64
|
||||
@ -325,7 +323,7 @@ func TestGetDecryptedRange(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Case %d: unexpected err: %v", i, err)
|
||||
}
|
||||
var rLen = pkgSz + 32
|
||||
rLen := pkgSz + 32
|
||||
if test.decSz < pkgSz {
|
||||
rLen = test.decSz + 32
|
||||
}
|
||||
@ -341,7 +339,7 @@ func TestGetDecryptedRange(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Case %d: unexpected err: %v", i, err)
|
||||
}
|
||||
var rLen = (pkgSz + 32) * 2
|
||||
rLen := (pkgSz + 32) * 2
|
||||
if test.decSz < 2*pkgSz {
|
||||
rLen = (pkgSz + 32) + (test.decSz - pkgSz + 32)
|
||||
}
|
||||
@ -356,7 +354,7 @@ func TestGetDecryptedRange(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Case %d: unexpected err: %v", i, err)
|
||||
}
|
||||
var rLen = (pkgSz + 32) * 2
|
||||
rLen := (pkgSz + 32) * 2
|
||||
if test.decSz-pkgSz < 2*pkgSz {
|
||||
rLen = (pkgSz + 32) + (test.decSz - pkgSz + 32*2)
|
||||
}
|
||||
@ -551,60 +549,90 @@ var getDefaultOptsTests = []struct {
|
||||
encryptionType encrypt.Type
|
||||
err error
|
||||
}{
|
||||
{headers: http.Header{xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{"AES256"},
|
||||
xhttp.AmzServerSideEncryptionCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
|
||||
{
|
||||
headers: http.Header{
|
||||
xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{"AES256"},
|
||||
xhttp.AmzServerSideEncryptionCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
copySource: false,
|
||||
metadata: nil,
|
||||
encryptionType: encrypt.SSEC,
|
||||
err: nil}, // 0
|
||||
{headers: http.Header{xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{"AES256"},
|
||||
xhttp.AmzServerSideEncryptionCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
|
||||
err: nil,
|
||||
}, // 0
|
||||
{
|
||||
headers: http.Header{
|
||||
xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{"AES256"},
|
||||
xhttp.AmzServerSideEncryptionCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
copySource: true,
|
||||
metadata: nil,
|
||||
encryptionType: "",
|
||||
err: nil}, // 1
|
||||
{headers: http.Header{xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{"AES256"},
|
||||
xhttp.AmzServerSideEncryptionCustomerKey: []string{"Mz"},
|
||||
xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
|
||||
err: nil,
|
||||
}, // 1
|
||||
{
|
||||
headers: http.Header{
|
||||
xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{"AES256"},
|
||||
xhttp.AmzServerSideEncryptionCustomerKey: []string{"Mz"},
|
||||
xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
copySource: false,
|
||||
metadata: nil,
|
||||
encryptionType: "",
|
||||
err: crypto.ErrInvalidCustomerKey}, // 2
|
||||
{headers: http.Header{xhttp.AmzServerSideEncryption: []string{"AES256"}},
|
||||
err: crypto.ErrInvalidCustomerKey,
|
||||
}, // 2
|
||||
{
|
||||
headers: http.Header{xhttp.AmzServerSideEncryption: []string{"AES256"}},
|
||||
copySource: false,
|
||||
metadata: nil,
|
||||
encryptionType: encrypt.S3,
|
||||
err: nil}, // 3
|
||||
{headers: http.Header{},
|
||||
err: nil,
|
||||
}, // 3
|
||||
{
|
||||
headers: http.Header{},
|
||||
copySource: false,
|
||||
metadata: map[string]string{crypto.MetaSealedKeyS3: base64.StdEncoding.EncodeToString(make([]byte, 64)),
|
||||
metadata: map[string]string{
|
||||
crypto.MetaSealedKeyS3: base64.StdEncoding.EncodeToString(make([]byte, 64)),
|
||||
crypto.MetaKeyID: "kms-key",
|
||||
crypto.MetaDataEncryptionKey: "m-key"},
|
||||
crypto.MetaDataEncryptionKey: "m-key",
|
||||
},
|
||||
encryptionType: encrypt.S3,
|
||||
err: nil}, // 4
|
||||
{headers: http.Header{},
|
||||
err: nil,
|
||||
}, // 4
|
||||
{
|
||||
headers: http.Header{},
|
||||
copySource: true,
|
||||
metadata: map[string]string{crypto.MetaSealedKeyS3: base64.StdEncoding.EncodeToString(make([]byte, 64)),
|
||||
metadata: map[string]string{
|
||||
crypto.MetaSealedKeyS3: base64.StdEncoding.EncodeToString(make([]byte, 64)),
|
||||
crypto.MetaKeyID: "kms-key",
|
||||
crypto.MetaDataEncryptionKey: "m-key"},
|
||||
crypto.MetaDataEncryptionKey: "m-key",
|
||||
},
|
||||
encryptionType: "",
|
||||
err: nil}, // 5
|
||||
{headers: http.Header{xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm: []string{"AES256"},
|
||||
xhttp.AmzServerSideEncryptionCopyCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
xhttp.AmzServerSideEncryptionCopyCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
|
||||
err: nil,
|
||||
}, // 5
|
||||
{
|
||||
headers: http.Header{
|
||||
xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm: []string{"AES256"},
|
||||
xhttp.AmzServerSideEncryptionCopyCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
xhttp.AmzServerSideEncryptionCopyCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
copySource: true,
|
||||
metadata: nil,
|
||||
encryptionType: encrypt.SSEC,
|
||||
err: nil}, // 6
|
||||
{headers: http.Header{xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm: []string{"AES256"},
|
||||
xhttp.AmzServerSideEncryptionCopyCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
xhttp.AmzServerSideEncryptionCopyCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
|
||||
err: nil,
|
||||
}, // 6
|
||||
{
|
||||
headers: http.Header{
|
||||
xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm: []string{"AES256"},
|
||||
xhttp.AmzServerSideEncryptionCopyCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||
xhttp.AmzServerSideEncryptionCopyCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||
},
|
||||
copySource: false,
|
||||
metadata: nil,
|
||||
encryptionType: "",
|
||||
err: nil}, // 7
|
||||
err: nil,
|
||||
}, // 7
|
||||
}
|
||||
|
||||
func TestGetDefaultOpts(t *testing.T) {
|
||||
|
@ -90,7 +90,7 @@ func commonSetDriveCount(divisibleSize uint64, setCounts []uint64) (setSize uint
|
||||
// input argument patterns, the symmetry calculation is to ensure that
|
||||
// we also use uniform number of drives common across all ellipses patterns.
|
||||
func possibleSetCountsWithSymmetry(setCounts []uint64, argPatterns []ellipses.ArgPattern) []uint64 {
|
||||
var newSetCounts = make(map[uint64]struct{})
|
||||
newSetCounts := make(map[uint64]struct{})
|
||||
for _, ss := range setCounts {
|
||||
var symmetry bool
|
||||
for _, argPattern := range argPatterns {
|
||||
@ -224,7 +224,7 @@ func (s endpointSet) getEndpoints() (endpoints []string) {
|
||||
// this function also intelligently decides on what will
|
||||
// be the right set size etc.
|
||||
func (s endpointSet) Get() (sets [][]string) {
|
||||
var k = uint64(0)
|
||||
k := uint64(0)
|
||||
endpoints := s.getEndpoints()
|
||||
for i := range s.setIndexes {
|
||||
for j := range s.setIndexes[i] {
|
||||
@ -253,7 +253,7 @@ func getTotalSizes(argPatterns []ellipses.ArgPattern) []uint64 {
|
||||
// of endpoints following the ellipses pattern, this is what is used
|
||||
// by the object layer for initializing itself.
|
||||
func parseEndpointSet(customSetDriveCount uint64, args ...string) (ep endpointSet, err error) {
|
||||
var argPatterns = make([]ellipses.ArgPattern, len(args))
|
||||
argPatterns := make([]ellipses.ArgPattern, len(args))
|
||||
for i, arg := range args {
|
||||
patterns, perr := ellipses.FindEllipsesPatterns(arg)
|
||||
if perr != nil {
|
||||
@ -332,15 +332,13 @@ const (
|
||||
EnvErasureSetDriveCount = "MINIO_ERASURE_SET_DRIVE_COUNT"
|
||||
)
|
||||
|
||||
var (
|
||||
globalCustomErasureDriveCount = false
|
||||
)
|
||||
var globalCustomErasureDriveCount = false
|
||||
|
||||
// CreateServerEndpoints - validates and creates new endpoints from input args, supports
|
||||
// both ellipses and without ellipses transparently.
|
||||
func createServerEndpoints(serverAddr string, args ...string) (
|
||||
endpointServerPools EndpointServerPools, setupType SetupType, err error) {
|
||||
|
||||
endpointServerPools EndpointServerPools, setupType SetupType, err error,
|
||||
) {
|
||||
if len(args) == 0 {
|
||||
return nil, -1, errInvalidArgument
|
||||
}
|
||||
|
@ -72,7 +72,8 @@ func TestGetDivisibleSize(t *testing.T) {
|
||||
testCases := []struct {
|
||||
totalSizes []uint64
|
||||
result uint64
|
||||
}{{[]uint64{24, 32, 16}, 8},
|
||||
}{
|
||||
{[]uint64{24, 32, 16}, 8},
|
||||
{[]uint64{32, 8, 4}, 4},
|
||||
{[]uint64{8, 8, 8}, 8},
|
||||
{[]uint64{24}, 24},
|
||||
@ -168,7 +169,7 @@ func TestGetSetIndexesEnvOverride(t *testing.T) {
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
var argPatterns = make([]ellipses.ArgPattern, len(testCase.args))
|
||||
argPatterns := make([]ellipses.ArgPattern, len(testCase.args))
|
||||
for i, arg := range testCase.args {
|
||||
patterns, err := ellipses.FindEllipsesPatterns(arg)
|
||||
if err != nil {
|
||||
@ -290,7 +291,7 @@ func TestGetSetIndexes(t *testing.T) {
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
var argPatterns = make([]ellipses.ArgPattern, len(testCase.args))
|
||||
argPatterns := make([]ellipses.ArgPattern, len(testCase.args))
|
||||
for i, arg := range testCase.args {
|
||||
patterns, err := ellipses.FindEllipsesPatterns(arg)
|
||||
if err != nil {
|
||||
@ -552,8 +553,10 @@ func TestParseEndpointSet(t *testing.T) {
|
||||
},
|
||||
},
|
||||
nil,
|
||||
[][]uint64{{16, 16, 16, 16, 16, 16, 16, 16,
|
||||
16, 16, 16, 16, 16, 16, 16, 16}},
|
||||
[][]uint64{{
|
||||
16, 16, 16, 16, 16, 16, 16, 16,
|
||||
16, 16, 16, 16, 16, 16, 16, 16,
|
||||
}},
|
||||
},
|
||||
true,
|
||||
},
|
||||
|
@ -238,13 +238,18 @@ func TestCreateEndpoints(t *testing.T) {
|
||||
{"localhost:9000", [][]string{{"https://127.0.0.1:9000/d1", "https://localhost:9001/d1", "https://example.com/d1", "https://example.com/d2"}}, "", Endpoints{}, -1, fmt.Errorf("path '/d1' can not be served by different port on same address")},
|
||||
|
||||
// Erasure Setup with PathEndpointType
|
||||
{":1234", [][]string{{"/d1", "/d2", "/d3", "/d4"}}, ":1234",
|
||||
{
|
||||
":1234",
|
||||
[][]string{{"/d1", "/d2", "/d3", "/d4"}},
|
||||
":1234",
|
||||
Endpoints{
|
||||
Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true},
|
||||
Endpoint{URL: &url.URL{Path: mustAbs("/d2")}, IsLocal: true},
|
||||
Endpoint{URL: &url.URL{Path: mustAbs("/d3")}, IsLocal: true},
|
||||
Endpoint{URL: &url.URL{Path: mustAbs("/d4")}, IsLocal: true},
|
||||
}, ErasureSetupType, nil},
|
||||
},
|
||||
ErasureSetupType, nil,
|
||||
},
|
||||
// DistErasure Setup with URLEndpointType
|
||||
{":9000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://localhost/d3", "http://localhost/d4"}}, ":9000", Endpoints{
|
||||
Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d1"}, IsLocal: true},
|
||||
@ -350,12 +355,18 @@ func TestGetLocalPeer(t *testing.T) {
|
||||
expectedResult string
|
||||
}{
|
||||
{[]string{"/d1", "/d2", "d3", "d4"}, "127.0.0.1:9000"},
|
||||
{[]string{"http://localhost:9000/d1", "http://localhost:9000/d2", "http://example.org:9000/d3", "http://example.com:9000/d4"},
|
||||
"localhost:9000"},
|
||||
{[]string{"http://localhost:9000/d1", "http://example.org:9000/d2", "http://example.com:9000/d3", "http://example.net:9000/d4"},
|
||||
"localhost:9000"},
|
||||
{[]string{"http://localhost:9000/d1", "http://localhost:9001/d2", "http://localhost:9002/d3", "http://localhost:9003/d4"},
|
||||
"localhost:9000"},
|
||||
{
|
||||
[]string{"http://localhost:9000/d1", "http://localhost:9000/d2", "http://example.org:9000/d3", "http://example.com:9000/d4"},
|
||||
"localhost:9000",
|
||||
},
|
||||
{
|
||||
[]string{"http://localhost:9000/d1", "http://example.org:9000/d2", "http://example.com:9000/d3", "http://example.net:9000/d4"},
|
||||
"localhost:9000",
|
||||
},
|
||||
{
|
||||
[]string{"http://localhost:9000/d1", "http://localhost:9001/d2", "http://localhost:9002/d3", "http://localhost:9003/d4"},
|
||||
"localhost:9000",
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
|
@ -91,7 +91,7 @@ func (er erasureObjects) getBucketInfo(ctx context.Context, bucketName string) (
|
||||
storageDisks := er.getDisks()
|
||||
|
||||
g := errgroup.WithNErrs(len(storageDisks))
|
||||
var bucketsInfo = make([]BucketInfo, len(storageDisks))
|
||||
bucketsInfo := make([]BucketInfo, len(storageDisks))
|
||||
// Undo previous make bucket entry on all underlying storage disks.
|
||||
for index := range storageDisks {
|
||||
index := index
|
||||
|
@ -94,7 +94,7 @@ func (e *Erasure) EncodeData(ctx context.Context, data []byte) ([][]byte, error)
|
||||
// It only decodes the data blocks but does not verify them.
|
||||
// It returns an error if the decoding failed.
|
||||
func (e *Erasure) DecodeDataBlocks(data [][]byte) error {
|
||||
var isZero = 0
|
||||
isZero := 0
|
||||
for _, b := range data {
|
||||
if len(b) == 0 {
|
||||
isZero++
|
||||
|
@ -50,7 +50,7 @@ func (er erasureObjects) getLoadBalancedDisks(optimized bool) []StorageAPI {
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var mu sync.Mutex
|
||||
var newDisks = map[uint64][]StorageAPI{}
|
||||
newDisks := map[uint64][]StorageAPI{}
|
||||
// Based on the random shuffling return back randomized disks.
|
||||
for _, i := range hashOrder(UTCNow().String(), len(disks)) {
|
||||
i := i
|
||||
|
@ -226,8 +226,8 @@ func getLatestFileInfo(ctx context.Context, partsMetadata []FileInfo, errs []err
|
||||
// a not-found error or a hash-mismatch error.
|
||||
func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetadata []FileInfo,
|
||||
errs []error, latestMeta FileInfo, bucket, object string,
|
||||
scanMode madmin.HealScanMode) ([]StorageAPI, []error, time.Time) {
|
||||
|
||||
scanMode madmin.HealScanMode) ([]StorageAPI, []error, time.Time,
|
||||
) {
|
||||
var diskMTime time.Time
|
||||
var shardFix bool
|
||||
if !latestMeta.DataShardFixed() {
|
||||
|
@ -811,8 +811,8 @@ func isObjectDirDangling(errs []error) (ok bool) {
|
||||
}
|
||||
|
||||
func (er erasureObjects) purgeObjectDangling(ctx context.Context, bucket, object, versionID string,
|
||||
metaArr []FileInfo, errs []error, dataErrs []error, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
|
||||
metaArr []FileInfo, errs []error, dataErrs []error, opts madmin.HealOpts) (madmin.HealResultItem, error,
|
||||
) {
|
||||
storageDisks := er.getDisks()
|
||||
storageEndpoints := er.getEndpoints()
|
||||
// Check if the object is dangling, if yes and user requested
|
||||
|
@ -87,9 +87,13 @@ func TestReduceErrs(t *testing.T) {
|
||||
errDiskNotFound,
|
||||
}, []error{errDiskNotFound}, errVolumeNotFound},
|
||||
{[]error{}, []error{}, errErasureReadQuorum},
|
||||
{[]error{errFileNotFound, errFileNotFound, errFileNotFound,
|
||||
errFileNotFound, errFileNotFound, nil, nil, nil, nil, nil},
|
||||
nil, nil},
|
||||
{
|
||||
[]error{
|
||||
errFileNotFound, errFileNotFound, errFileNotFound,
|
||||
errFileNotFound, errFileNotFound, nil, nil, nil, nil, nil,
|
||||
},
|
||||
nil, nil,
|
||||
},
|
||||
// Checks if wrapped context cancelation errors are grouped as one.
|
||||
{canceledErrs, nil, context.Canceled},
|
||||
}
|
||||
|
@ -820,7 +820,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
||||
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadataByIndex(onlineDisks, partsMetadata, fi)
|
||||
|
||||
// Save current erasure metadata for validation.
|
||||
var currentFI = fi
|
||||
currentFI := fi
|
||||
|
||||
// Allocate parts similar to incoming slice.
|
||||
fi.Parts = make([]ObjectPartInfo, len(parts))
|
||||
|
@ -145,7 +145,7 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
|
||||
// Read(Closer). When err != nil, the returned reader is always nil.
|
||||
func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||
var unlockOnDefer bool
|
||||
var nsUnlocker = func() {}
|
||||
nsUnlocker := func() {}
|
||||
defer func() {
|
||||
if unlockOnDefer {
|
||||
nsUnlocker()
|
||||
@ -475,7 +475,6 @@ func (er erasureObjects) getObjectInfo(ctx context.Context, bucket, object strin
|
||||
fi, _, _, err := er.getObjectFileInfo(ctx, bucket, object, opts, false)
|
||||
if err != nil {
|
||||
return objInfo, toObjectErr(err, bucket, object)
|
||||
|
||||
}
|
||||
objInfo = fi.ToObjectInfo(bucket, object)
|
||||
if opts.VersionID != "" && !fi.VersionPurgeStatus().Empty() {
|
||||
@ -1177,7 +1176,7 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec
|
||||
}
|
||||
|
||||
// Initialize list of errors.
|
||||
var delObjErrs = make([][]error, len(storageDisks))
|
||||
delObjErrs := make([][]error, len(storageDisks))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
// Remove versions in bulk for each disk
|
||||
@ -1820,6 +1819,7 @@ func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket s
|
||||
})
|
||||
}
|
||||
_, err = er.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{
|
||||
MTime: oi.ModTime})
|
||||
MTime: oi.ModTime,
|
||||
})
|
||||
return setRestoreHeaderFn(oi, err)
|
||||
}
|
||||
|
@ -505,7 +505,6 @@ func TestGetObjectNoQuorum(t *testing.T) {
|
||||
gr.Close()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestHeadObjectNoQuorum(t *testing.T) {
|
||||
|
@ -243,7 +243,7 @@ func (z *erasureServerPools) getAvailablePoolIdx(ctx context.Context, bucket, ob
|
||||
// If there is not enough space the pool will return 0 bytes available.
|
||||
// Negative sizes are seen as 0 bytes.
|
||||
func (z *erasureServerPools) getServerPoolsAvailableSpace(ctx context.Context, bucket, object string, size int64) serverPoolsAvailableSpace {
|
||||
var serverPools = make(serverPoolsAvailableSpace, len(z.serverPools))
|
||||
serverPools := make(serverPoolsAvailableSpace, len(z.serverPools))
|
||||
|
||||
storageInfos := make([][]*DiskInfo, len(z.serverPools))
|
||||
g := errgroup.WithNErrs(len(z.serverPools))
|
||||
@ -659,7 +659,6 @@ func (z *erasureServerPools) MakeBucketWithLocation(ctx context.Context, bucket
|
||||
|
||||
// Success.
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (z *erasureServerPools) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||
@ -674,7 +673,7 @@ func (z *erasureServerPools) GetObjectNInfo(ctx context.Context, bucket, object
|
||||
}
|
||||
|
||||
var unlockOnDefer bool
|
||||
var nsUnlocker = func() {}
|
||||
nsUnlocker := func() {}
|
||||
defer func() {
|
||||
if unlockOnDefer {
|
||||
nsUnlocker()
|
||||
@ -1168,7 +1167,7 @@ func (z *erasureServerPools) ListMultipartUploads(ctx context.Context, bucket, p
|
||||
return z.serverPools[0].ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||
}
|
||||
|
||||
var poolResult = ListMultipartsInfo{}
|
||||
poolResult := ListMultipartsInfo{}
|
||||
poolResult.MaxUploads = maxUploads
|
||||
poolResult.KeyMarker = keyMarker
|
||||
poolResult.Prefix = prefix
|
||||
@ -1287,7 +1286,6 @@ func (z *erasureServerPools) GetMultipartInfo(ctx context.Context, bucket, objec
|
||||
Object: object,
|
||||
UploadID: uploadID,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// ListObjectParts - lists all uploaded parts to an object in hashedSet.
|
||||
@ -1529,7 +1527,7 @@ func (z *erasureServerPools) HealFormat(ctx context.Context, dryRun bool) (madmi
|
||||
ctx = lkctx.Context()
|
||||
defer formatLock.Unlock(lkctx.Cancel)
|
||||
|
||||
var r = madmin.HealResultItem{
|
||||
r := madmin.HealResultItem{
|
||||
Type: madmin.HealItemMetadata,
|
||||
Detail: "disk-format",
|
||||
}
|
||||
@ -1561,7 +1559,7 @@ func (z *erasureServerPools) HealFormat(ctx context.Context, dryRun bool) (madmi
|
||||
}
|
||||
|
||||
func (z *erasureServerPools) HealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||
var r = madmin.HealResultItem{
|
||||
r := madmin.HealResultItem{
|
||||
Type: madmin.HealItemBucket,
|
||||
Bucket: bucket,
|
||||
}
|
||||
|
@ -206,7 +206,7 @@ func (s *erasureSets) connectDisks() {
|
||||
}()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var setsJustConnected = make([]bool, s.setCount)
|
||||
setsJustConnected := make([]bool, s.setCount)
|
||||
diskMap := s.getDiskMap()
|
||||
for _, endpoint := range s.endpoints {
|
||||
if isEndpointConnectionStable(diskMap, endpoint, s.lastConnectDisksOpTime) {
|
||||
@ -398,7 +398,7 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto
|
||||
s.erasureDisks[i] = make([]StorageAPI, setDriveCount)
|
||||
}
|
||||
|
||||
var erasureLockers = map[string]dsync.NetLocker{}
|
||||
erasureLockers := map[string]dsync.NetLocker{}
|
||||
for _, endpoint := range endpoints {
|
||||
if _, ok := erasureLockers[endpoint.Host]; !ok {
|
||||
erasureLockers[endpoint.Host] = newLockAPI(endpoint)
|
||||
@ -406,7 +406,7 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto
|
||||
}
|
||||
|
||||
for i := 0; i < setCount; i++ {
|
||||
var lockerEpSet = set.NewStringSet()
|
||||
lockerEpSet := set.NewStringSet()
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
endpoint := endpoints[i*setDriveCount+j]
|
||||
// Only add lockers per endpoint.
|
||||
@ -865,7 +865,7 @@ func undoDeleteBucketSets(ctx context.Context, bucket string, sets []*erasureObj
|
||||
// that all buckets are present on all sets.
|
||||
func (s *erasureSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) {
|
||||
var listBuckets []BucketInfo
|
||||
var healBuckets = map[string]VolInfo{}
|
||||
healBuckets := map[string]VolInfo{}
|
||||
for _, set := range s.sets {
|
||||
// lists all unique buckets across drives.
|
||||
if err := listAllBuckets(ctx, set.getDisks(), healBuckets, s.defaultParityCount); err != nil {
|
||||
@ -958,13 +958,13 @@ func (s *erasureSets) DeleteObjects(ctx context.Context, bucket string, objects
|
||||
}
|
||||
|
||||
// The result of delete operation on all passed objects
|
||||
var delErrs = make([]error, len(objects))
|
||||
delErrs := make([]error, len(objects))
|
||||
|
||||
// The result of delete objects
|
||||
var delObjects = make([]DeletedObject, len(objects))
|
||||
delObjects := make([]DeletedObject, len(objects))
|
||||
|
||||
// A map between a set and its associated objects
|
||||
var objSetMap = make(map[int][]delObj)
|
||||
objSetMap := make(map[int][]delObj)
|
||||
|
||||
// Group objects by set index
|
||||
for i, object := range objects {
|
||||
@ -1147,7 +1147,7 @@ func formatsToDrivesInfo(endpoints Endpoints, formats []*formatErasureV3, sErrs
|
||||
// result, also populate disks to be healed.
|
||||
for i, format := range formats {
|
||||
drive := endpoints.GetString(i)
|
||||
var state = madmin.DriveStateCorrupt
|
||||
state := madmin.DriveStateCorrupt
|
||||
switch {
|
||||
case format != nil:
|
||||
state = madmin.DriveStateOk
|
||||
@ -1274,7 +1274,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H
|
||||
newFormatSets := newHealFormatSets(refFormat, s.setCount, s.setDriveCount, formats, sErrs)
|
||||
|
||||
if !dryRun {
|
||||
var tmpNewFormats = make([]*formatErasureV3, s.setCount*s.setDriveCount)
|
||||
tmpNewFormats := make([]*formatErasureV3, s.setCount*s.setDriveCount)
|
||||
for i := range newFormatSets {
|
||||
for j := range newFormatSets[i] {
|
||||
if newFormatSets[i][j] == nil {
|
||||
|
@ -162,7 +162,7 @@ func TestNewErasureSets(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var nDisks = 16 // Maximum disks.
|
||||
nDisks := 16 // Maximum disks.
|
||||
var erasureDisks []string
|
||||
for i := 0; i < nDisks; i++ {
|
||||
// Do not attempt to create this path, the test validates
|
||||
|
@ -417,7 +417,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, bf
|
||||
saverWg.Add(1)
|
||||
go func() {
|
||||
// Add jitter to the update time so multiple sets don't sync up.
|
||||
var updateTime = 30*time.Second + time.Duration(float64(10*time.Second)*rand.Float64())
|
||||
updateTime := 30*time.Second + time.Duration(float64(10*time.Second)*rand.Float64())
|
||||
t := time.NewTicker(updateTime)
|
||||
defer t.Stop()
|
||||
defer saverWg.Done()
|
||||
|
@ -77,9 +77,9 @@ type formatCacheVersionDetect struct {
|
||||
// Return a slice of format, to be used to format uninitialized disks.
|
||||
func newFormatCacheV2(drives []string) []*formatCacheV2 {
|
||||
diskCount := len(drives)
|
||||
var disks = make([]string, diskCount)
|
||||
disks := make([]string, diskCount)
|
||||
|
||||
var formats = make([]*formatCacheV2, diskCount)
|
||||
formats := make([]*formatCacheV2, diskCount)
|
||||
|
||||
for i := 0; i < diskCount; i++ {
|
||||
format := &formatCacheV2{}
|
||||
@ -110,7 +110,7 @@ func formatCacheGetVersion(r io.ReadSeeker) (string, error) {
|
||||
// Creates a new cache format.json if unformatted.
|
||||
func createFormatCache(fsFormatPath string, format *formatCacheV1) error {
|
||||
// open file using READ & WRITE permission
|
||||
var file, err = os.OpenFile(fsFormatPath, os.O_RDWR|os.O_CREATE, 0600)
|
||||
file, err := os.OpenFile(fsFormatPath, os.O_RDWR|os.O_CREATE, 0o600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -133,7 +133,7 @@ func createFormatCache(fsFormatPath string, format *formatCacheV1) error {
|
||||
func initFormatCache(ctx context.Context, drives []string) (formats []*formatCacheV2, err error) {
|
||||
nformats := newFormatCacheV2(drives)
|
||||
for i, drive := range drives {
|
||||
if err = os.MkdirAll(pathJoin(drive, minioMetaBucket), 0777); err != nil {
|
||||
if err = os.MkdirAll(pathJoin(drive, minioMetaBucket), 0o777); err != nil {
|
||||
logger.GetReqInfo(ctx).AppendTags("drive", drive)
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, err
|
||||
@ -156,7 +156,6 @@ func loadFormatCache(ctx context.Context, drives []string) ([]*formatCacheV2, bo
|
||||
for i, drive := range drives {
|
||||
cacheFormatPath := pathJoin(drive, minioMetaBucket, formatConfigFile)
|
||||
f, err := os.OpenFile(cacheFormatPath, os.O_RDWR, 0)
|
||||
|
||||
if err != nil {
|
||||
if osIsNotExist(err) {
|
||||
continue
|
||||
@ -232,7 +231,7 @@ func checkFormatCacheValues(migrating bool, formats []*formatCacheV2) (int, erro
|
||||
// checkCacheDisksConsistency - checks if "This" disk uuid on each disk is consistent with all "Disks" slices
|
||||
// across disks.
|
||||
func checkCacheDiskConsistency(formats []*formatCacheV2) error {
|
||||
var disks = make([]string, len(formats))
|
||||
disks := make([]string, len(formats))
|
||||
// Collect currently available disk uuids.
|
||||
for index, format := range formats {
|
||||
if format == nil {
|
||||
@ -413,7 +412,7 @@ func migrateOldCache(ctx context.Context, c *diskCache) error {
|
||||
object = strings.TrimSuffix(object, "/")
|
||||
|
||||
destdir := getCacheSHADir(c.dir, bucket, object)
|
||||
if err := os.MkdirAll(destdir, 0777); err != nil {
|
||||
if err := os.MkdirAll(destdir, 0o777); err != nil {
|
||||
return err
|
||||
}
|
||||
prevCachedPath := path.Join(c.dir, bucket, object)
|
||||
@ -427,7 +426,7 @@ func migrateOldCache(ctx context.Context, c *diskCache) error {
|
||||
}
|
||||
// marshal cache metadata after adding version and stat info
|
||||
meta := &cacheMeta{}
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(metaBytes, &meta); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -459,7 +458,7 @@ func migrateOldCache(ctx context.Context, c *diskCache) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = ioutil.WriteFile(metaPath, jsonData, 0644); err != nil {
|
||||
if err = ioutil.WriteFile(metaPath, jsonData, 0o644); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -475,7 +474,6 @@ func migrateOldCache(ctx context.Context, c *diskCache) error {
|
||||
removeAll(path.Join(c.dir, minioMetaBucket, "buckets"))
|
||||
|
||||
return migrateCacheFormatJSON(cacheFormatPath)
|
||||
|
||||
}
|
||||
|
||||
func migrateCacheFormatJSON(cacheFormatPath string) error {
|
||||
|
@ -245,7 +245,7 @@ func genFormatCacheInvalidDisksOrder() []*formatCacheV2 {
|
||||
formatConfigs[index] = format
|
||||
}
|
||||
// Re order disks for failure case.
|
||||
var disks1 = make([]string, 8)
|
||||
disks1 := make([]string, 8)
|
||||
copy(disks1, disks)
|
||||
disks1[1], disks1[2] = disks[2], disks[1]
|
||||
formatConfigs[2].Cache.Disks = disks1
|
||||
|
@ -242,7 +242,7 @@ func formatErasureMigrateV1ToV2(export, version string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(formatPath, b, 0666)
|
||||
return ioutil.WriteFile(formatPath, b, 0o666)
|
||||
}
|
||||
|
||||
// Migrates V2 for format.json to V3 (Flat hierarchy for multipart)
|
||||
@ -266,7 +266,7 @@ func formatErasureMigrateV2ToV3(export, version string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = mkdirAll(pathJoin(export, minioMetaMultipartBucket), 0755); err != nil {
|
||||
if err = mkdirAll(pathJoin(export, minioMetaMultipartBucket), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -284,12 +284,12 @@ func formatErasureMigrateV2ToV3(export, version string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(formatPath, b, 0666)
|
||||
return ioutil.WriteFile(formatPath, b, 0o666)
|
||||
}
|
||||
|
||||
// countErrs - count a specific error.
|
||||
func countErrs(errs []error, err error) int {
|
||||
var i = 0
|
||||
i := 0
|
||||
for _, err1 := range errs {
|
||||
if err1 == err {
|
||||
i++
|
||||
@ -314,7 +314,7 @@ func loadFormatErasureAll(storageDisks []StorageAPI, heal bool) ([]*formatErasur
|
||||
g := errgroup.WithNErrs(len(storageDisks))
|
||||
|
||||
// Initialize format configs.
|
||||
var formats = make([]*formatErasureV3, len(storageDisks))
|
||||
formats := make([]*formatErasureV3, len(storageDisks))
|
||||
|
||||
// Load format from each disk in parallel
|
||||
for index := range storageDisks {
|
||||
@ -534,7 +534,6 @@ func formatErasureFixDeploymentID(endpoints Endpoints, storageDisks []StorageAPI
|
||||
// Deployment ID needs to be set on all the disks.
|
||||
// Save `format.json` across all disks.
|
||||
return saveFormatErasureAll(GlobalContext, storageDisks, formats)
|
||||
|
||||
}
|
||||
|
||||
// Update only the valid local disks which have not been updated before.
|
||||
@ -662,7 +661,6 @@ func formatErasureV3Check(reference *formatErasureV3, format *formatErasureV3) e
|
||||
|
||||
// Initializes meta volume only on local storage disks.
|
||||
func initErasureMetaVolumesInLocalDisks(storageDisks []StorageAPI, formats []*formatErasureV3) error {
|
||||
|
||||
// Compute the local disks eligible for meta volumes (re)initialization
|
||||
disksToInit := make([]StorageAPI, 0, len(storageDisks))
|
||||
for index := range storageDisks {
|
||||
@ -811,7 +809,6 @@ func fixFormatErasureV3(storageDisks []StorageAPI, endpoints Endpoints, formats
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// initFormatErasure - save Erasure format configuration on all disks.
|
||||
|
@ -124,11 +124,11 @@ func TestFormatErasureMigrate(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = os.MkdirAll(pathJoin(rootPath, minioMetaBucket), os.FileMode(0755)); err != nil {
|
||||
if err = os.MkdirAll(pathJoin(rootPath, minioMetaBucket), os.FileMode(0o755)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0644)); err != nil {
|
||||
if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0o644)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -175,7 +175,7 @@ func TestFormatErasureMigrate(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0644)); err != nil {
|
||||
if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0o644)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -195,7 +195,7 @@ func TestFormatErasureMigrate(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0644)); err != nil {
|
||||
if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0o644)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -113,7 +113,7 @@ func formatFSMigrateV1ToV2(ctx context.Context, wlk *lock.LockedFile, fsPath str
|
||||
return err
|
||||
}
|
||||
|
||||
if err = os.MkdirAll(path.Join(fsPath, minioMetaMultipartBucket), 0755); err != nil {
|
||||
if err = os.MkdirAll(path.Join(fsPath, minioMetaMultipartBucket), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -165,7 +165,7 @@ func formatFSMigrate(ctx context.Context, wlk *lock.LockedFile, fsPath string) e
|
||||
func createFormatFS(fsFormatPath string) error {
|
||||
// Attempt a write lock on formatConfigFile `format.json`
|
||||
// file stored in minioMetaBucket(.minio.sys) directory.
|
||||
lk, err := lock.TryLockedOpenFile(fsFormatPath, os.O_RDWR|os.O_CREATE, 0600)
|
||||
lk, err := lock.TryLockedOpenFile(fsFormatPath, os.O_RDWR|os.O_CREATE, 0o600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ func fsMkdir(ctx context.Context, dirPath string) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = os.Mkdir((dirPath), 0777); err != nil {
|
||||
if err = os.Mkdir((dirPath), 0o777); err != nil {
|
||||
switch {
|
||||
case osIsExist(err):
|
||||
return errVolumeExists
|
||||
@ -309,7 +309,7 @@ func fsCreateFile(ctx context.Context, filePath string, reader io.Reader, falloc
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err := mkdirAll(pathutil.Dir(filePath), 0777); err != nil {
|
||||
if err := mkdirAll(pathutil.Dir(filePath), 0o777); err != nil {
|
||||
switch {
|
||||
case osIsPermission(err):
|
||||
return 0, errFileAccessDenied
|
||||
@ -329,7 +329,7 @@ func fsCreateFile(ctx context.Context, filePath string, reader io.Reader, falloc
|
||||
if globalFSOSync {
|
||||
flags |= os.O_SYNC
|
||||
}
|
||||
writer, err := lock.Open(filePath, flags, 0666)
|
||||
writer, err := lock.Open(filePath, flags, 0o666)
|
||||
if err != nil {
|
||||
return 0, osErrToFileErr(err)
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ func TestFSStats(t *testing.T) {
|
||||
t.Fatalf("Unable to create volume, %s", err)
|
||||
}
|
||||
|
||||
var reader = bytes.NewReader([]byte("Hello, world"))
|
||||
reader := bytes.NewReader([]byte("Hello, world"))
|
||||
if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "success-file"), reader, 0); err != nil {
|
||||
t.Fatalf("Unable to create file, %s", err)
|
||||
}
|
||||
@ -201,7 +201,7 @@ func TestFSCreateAndOpen(t *testing.T) {
|
||||
t.Fatal("Unexpected error", err)
|
||||
}
|
||||
|
||||
var reader = bytes.NewReader([]byte("Hello, world"))
|
||||
reader := bytes.NewReader([]byte("Hello, world"))
|
||||
if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "success-file"), reader, 0); err != nil {
|
||||
t.Fatalf("Unable to create file, %s", err)
|
||||
}
|
||||
@ -259,7 +259,7 @@ func TestFSDeletes(t *testing.T) {
|
||||
t.Fatalf("Unable to create directory, %s", err)
|
||||
}
|
||||
|
||||
var reader = bytes.NewReader([]byte("Hello, world"))
|
||||
reader := bytes.NewReader([]byte("Hello, world"))
|
||||
if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "success-file"), reader, reader.Size()); err != nil {
|
||||
t.Fatalf("Unable to create file, %s", err)
|
||||
}
|
||||
@ -271,7 +271,7 @@ func TestFSDeletes(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = ioutil.WriteFile(pathJoin(path, "success-vol", "not-empty", "file"), []byte("data"), 0777)
|
||||
err = ioutil.WriteFile(pathJoin(path, "success-vol", "not-empty", "file"), []byte("data"), 0o777)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -368,7 +368,7 @@ func BenchmarkFSDeleteFile(b *testing.B) {
|
||||
// We need to create and delete the file sequentially inside the benchmark.
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
err = ioutil.WriteFile(filename, []byte("data"), 0777)
|
||||
err = ioutil.WriteFile(filename, []byte("data"), 0o777)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -395,7 +395,7 @@ func TestFSRemoves(t *testing.T) {
|
||||
t.Fatalf("Unable to create directory, %s", err)
|
||||
}
|
||||
|
||||
var reader = bytes.NewReader([]byte("Hello, world"))
|
||||
reader := bytes.NewReader([]byte("Hello, world"))
|
||||
if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "success-file"), reader, 0); err != nil {
|
||||
t.Fatalf("Unable to create file, %s", err)
|
||||
}
|
||||
@ -514,7 +514,7 @@ func TestFSRemoveMeta(t *testing.T) {
|
||||
|
||||
filePath := pathJoin(fsPath, "success-vol", "success-file")
|
||||
|
||||
var reader = bytes.NewReader([]byte("Hello, world"))
|
||||
reader := bytes.NewReader([]byte("Hello, world"))
|
||||
if _, err = fsCreateFile(GlobalContext, filePath, reader, 0); err != nil {
|
||||
t.Fatalf("Unable to create file, %s", err)
|
||||
}
|
||||
@ -556,7 +556,7 @@ func TestFSIsFile(t *testing.T) {
|
||||
|
||||
filePath := pathJoin(dirPath, "tmpfile")
|
||||
|
||||
if err = ioutil.WriteFile(filePath, nil, 0777); err != nil {
|
||||
if err = ioutil.WriteFile(filePath, nil, 0o777); err != nil {
|
||||
t.Fatalf("Unable to create file %s", filePath)
|
||||
}
|
||||
|
||||
|
@ -91,7 +91,7 @@ func (c *FSChecksumInfoV1) UnmarshalJSON(data []byte) error {
|
||||
}
|
||||
|
||||
var info checksuminfo
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
err := json.Unmarshal(data, &info)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -230,7 +230,7 @@ func (m *fsMetaV1) ReadFrom(ctx context.Context, lk *lock.LockedFile) (n int64,
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(fsMetaBuf, m); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -224,7 +224,7 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
|
||||
uploadID := mustGetUUID()
|
||||
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
|
||||
|
||||
err := mkdirAll(uploadIDDir, 0755)
|
||||
err := mkdirAll(uploadIDDir, 0o755)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return "", err
|
||||
@ -240,7 +240,7 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0666); err != nil {
|
||||
if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0o666); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return "", err
|
||||
}
|
||||
@ -252,8 +252,8 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
|
||||
// object. Internally incoming data is written to '.minio.sys/tmp' location
|
||||
// and safely renamed to '.minio.sys/multipart' for reach parts.
|
||||
func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int,
|
||||
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) {
|
||||
|
||||
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error,
|
||||
) {
|
||||
if srcOpts.VersionID != "" && srcOpts.VersionID != nullVersionID {
|
||||
return pi, VersionNotFound{
|
||||
Bucket: srcBucket,
|
||||
@ -397,7 +397,7 @@ func (fs *FSObjects) GetMultipartInfo(ctx context.Context, bucket, object, uploa
|
||||
}
|
||||
|
||||
var fsMeta fsMetaV1
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(fsMetaBytes, &fsMeta); err != nil {
|
||||
return minfo, toObjectErr(err, bucket, object)
|
||||
}
|
||||
@ -526,7 +526,7 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
|
||||
}
|
||||
|
||||
var fsMeta fsMetaV1
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(fsMetaBytes, &fsMeta); err != nil {
|
||||
return result, err
|
||||
}
|
||||
@ -542,7 +542,6 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
|
||||
//
|
||||
// Implements S3 compatible Complete multipart API.
|
||||
func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, e error) {
|
||||
|
||||
var actualSize int64
|
||||
|
||||
if err := checkCompleteMultipartArgs(ctx, bucket, object, fs); err != nil {
|
||||
|
@ -148,7 +148,7 @@ func (fsi *fsIOPool) Write(path string) (wlk *lock.LockedFile, err error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wlk, err = lock.LockedOpenFile(path, os.O_RDWR, 0666)
|
||||
wlk, err = lock.LockedOpenFile(path, os.O_RDWR, 0o666)
|
||||
if err != nil {
|
||||
switch {
|
||||
case osIsNotExist(err):
|
||||
@ -175,12 +175,12 @@ func (fsi *fsIOPool) Create(path string) (wlk *lock.LockedFile, err error) {
|
||||
}
|
||||
|
||||
// Creates parent if missing.
|
||||
if err = mkdirAll(pathutil.Dir(path), 0777); err != nil {
|
||||
if err = mkdirAll(pathutil.Dir(path), 0o777); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Attempt to create the file.
|
||||
wlk, err = lock.LockedOpenFile(path, os.O_RDWR|os.O_CREATE, 0666)
|
||||
wlk, err = lock.LockedOpenFile(path, os.O_RDWR|os.O_CREATE, 0o666)
|
||||
if err != nil {
|
||||
switch {
|
||||
case osIsPermission(err):
|
||||
@ -220,7 +220,6 @@ func (fsi *fsIOPool) Close(path string) error {
|
||||
|
||||
// If the file is closed, remove it from the reader pool map.
|
||||
if rlkFile.IsClosed() {
|
||||
|
||||
// Purge the cached lock path from map.
|
||||
delete(fsi.readersMap, path)
|
||||
}
|
||||
|
@ -110,5 +110,4 @@ func TestRWPool(t *testing.T) {
|
||||
t.Fatal("Unexpected error", err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
21
cmd/fs-v1.go
21
cmd/fs-v1.go
@ -97,22 +97,21 @@ func initMetaVolumeFS(fsPath, fsUUID string) error {
|
||||
// if it doesn't exist yet.
|
||||
metaBucketPath := pathJoin(fsPath, minioMetaBucket)
|
||||
|
||||
if err := os.MkdirAll(metaBucketPath, 0777); err != nil {
|
||||
if err := os.MkdirAll(metaBucketPath, 0o777); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
metaTmpPath := pathJoin(fsPath, minioMetaTmpBucket, fsUUID)
|
||||
if err := os.MkdirAll(metaTmpPath, 0777); err != nil {
|
||||
if err := os.MkdirAll(metaTmpPath, 0o777); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(pathJoin(fsPath, dataUsageBucket), 0777); err != nil {
|
||||
if err := os.MkdirAll(pathJoin(fsPath, dataUsageBucket), 0o777); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
metaMultipartPath := pathJoin(fsPath, minioMetaMultipartBucket)
|
||||
return os.MkdirAll(metaMultipartPath, 0777)
|
||||
|
||||
return os.MkdirAll(metaMultipartPath, 0o777)
|
||||
}
|
||||
|
||||
// NewFSObjectLayer - initialize new fs object layer.
|
||||
@ -366,7 +365,7 @@ func (fs *FSObjects) scanBucket(ctx context.Context, bucket string, cache dataUs
|
||||
fsMeta := newFSMetaV1()
|
||||
metaOk := false
|
||||
if len(fsMetaBytes) > 0 {
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal(fsMetaBytes, &fsMeta); err == nil {
|
||||
metaOk = true
|
||||
}
|
||||
@ -474,7 +473,7 @@ func (fs *FSObjects) SetBucketPolicy(ctx context.Context, bucket string, p *poli
|
||||
return err
|
||||
}
|
||||
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
configData, err := json.Marshal(p)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -542,7 +541,7 @@ func (fs *FSObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
|
||||
// Ignore any errors returned here.
|
||||
continue
|
||||
}
|
||||
var created = fi.ModTime()
|
||||
created := fi.ModTime()
|
||||
meta, err := globalBucketMetadataSys.Get(fi.Name())
|
||||
if err == nil {
|
||||
created = meta.Created
|
||||
@ -705,7 +704,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
||||
return nil, toObjectErr(err, bucket)
|
||||
}
|
||||
|
||||
var nsUnlocker = func() {}
|
||||
nsUnlocker := func() {}
|
||||
|
||||
if lockType != noLock {
|
||||
// Lock the object before reading.
|
||||
@ -843,7 +842,7 @@ func (fs *FSObjects) getObjectInfoNoFSLock(ctx context.Context, bucket, object s
|
||||
fsMetaBuf, rerr := ioutil.ReadAll(rc)
|
||||
rc.Close()
|
||||
if rerr == nil {
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if rerr = json.Unmarshal(fsMetaBuf, &fsMeta); rerr != nil {
|
||||
// For any error to read fsMeta, set default ETag and proceed.
|
||||
fsMeta = fs.defaultFsJSON(object)
|
||||
@ -1029,7 +1028,7 @@ func (fs *FSObjects) putObject(ctx context.Context, bucket string, object string
|
||||
// with a slash separator, we treat it like a valid operation
|
||||
// and return success.
|
||||
if isObjectDir(object, data.Size()) {
|
||||
if err = mkdirAll(pathJoin(fs.fsPath, bucket, object), 0777); err != nil {
|
||||
if err = mkdirAll(pathJoin(fs.fsPath, bucket, object), 0o777); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
@ -205,7 +205,6 @@ func TestFSDeleteObject(t *testing.T) {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestFSDeleteBucket - tests for fs DeleteBucket
|
||||
@ -263,7 +262,7 @@ func TestFSListBuckets(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create a bucket with invalid name
|
||||
if err := os.MkdirAll(pathJoin(fs.fsPath, "vo^"), 0777); err != nil {
|
||||
if err := os.MkdirAll(pathJoin(fs.fsPath, "vo^"), 0o777); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
f, err := os.Create(pathJoin(fs.fsPath, "test"))
|
||||
|
@ -130,7 +130,6 @@ func FromMinioClientListMultipartsInfo(lmur minio.ListMultipartUploadsResult) Li
|
||||
CommonPrefixes: commonPrefixes,
|
||||
EncodingType: lmur.EncodingType,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// FromMinioClientObjectInfo converts minio ObjectInfo to gateway ObjectInfo
|
||||
|
@ -37,14 +37,12 @@ import (
|
||||
"github.com/minio/pkg/env"
|
||||
)
|
||||
|
||||
var (
|
||||
gatewayCmd = cli.Command{
|
||||
Name: "gateway",
|
||||
Usage: "start object storage gateway",
|
||||
Flags: append(ServerFlags, GlobalFlags...),
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
)
|
||||
var gatewayCmd = cli.Command{
|
||||
Name: "gateway",
|
||||
Usage: "start object storage gateway",
|
||||
Flags: append(ServerFlags, GlobalFlags...),
|
||||
HideHelpCommand: true,
|
||||
}
|
||||
|
||||
// GatewayLocker implements custom NewNSLock implementation
|
||||
type GatewayLocker struct {
|
||||
|
@ -349,7 +349,6 @@ func azureTierToS3StorageClass(tierType string) string {
|
||||
default:
|
||||
return "STANDARD"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// azurePropertiesToS3Meta converts Azure metadata/properties to S3
|
||||
@ -578,7 +577,6 @@ func (a *azureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi min
|
||||
resp, err := a.client.ListContainersSegment(ctx, marker, azblob.ListContainersSegmentOptions{
|
||||
Prefix: bucket,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return bi, azureToObjectError(err, bucket)
|
||||
}
|
||||
@ -604,7 +602,6 @@ func (a *azureObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketI
|
||||
|
||||
for marker.NotDone() {
|
||||
resp, err := a.client.ListContainersSegment(ctx, marker, azblob.ListContainersSegmentOptions{})
|
||||
|
||||
if err != nil {
|
||||
return nil, azureToObjectError(err)
|
||||
}
|
||||
|
@ -192,34 +192,41 @@ func TestAzureCodesToObjectError(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
nil, "ContainerAlreadyExists", 0,
|
||||
minio.BucketExists{Bucket: "bucket"}, "bucket", "",
|
||||
minio.BucketExists{Bucket: "bucket"},
|
||||
"bucket", "",
|
||||
},
|
||||
{
|
||||
nil, "InvalidResourceName", 0,
|
||||
minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
|
||||
minio.BucketNameInvalid{Bucket: "bucket."},
|
||||
"bucket.", "",
|
||||
},
|
||||
{
|
||||
nil, "RequestBodyTooLarge", 0,
|
||||
minio.PartTooBig{}, "", "",
|
||||
minio.PartTooBig{},
|
||||
"", "",
|
||||
},
|
||||
{
|
||||
nil, "InvalidMetadata", 0,
|
||||
minio.UnsupportedMetadata{}, "", "",
|
||||
minio.UnsupportedMetadata{},
|
||||
"", "",
|
||||
},
|
||||
{
|
||||
nil, "", http.StatusNotFound,
|
||||
minio.ObjectNotFound{
|
||||
Bucket: "bucket",
|
||||
Object: "object",
|
||||
}, "bucket", "object",
|
||||
},
|
||||
"bucket", "object",
|
||||
},
|
||||
{
|
||||
nil, "", http.StatusNotFound,
|
||||
minio.BucketNotFound{Bucket: "bucket"}, "bucket", "",
|
||||
minio.BucketNotFound{Bucket: "bucket"},
|
||||
"bucket", "",
|
||||
},
|
||||
{
|
||||
nil, "", http.StatusBadRequest,
|
||||
minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
|
||||
minio.BucketNameInvalid{Bucket: "bucket."},
|
||||
"bucket.", "",
|
||||
},
|
||||
{
|
||||
fmt.Errorf("unhandled azure error"), "", http.StatusForbidden,
|
||||
|
@ -1121,7 +1121,6 @@ func (l *gcsGateway) PutObjectPart(ctx context.Context, bucket string, key strin
|
||||
LastModified: minio.UTCNow(),
|
||||
Size: data.Size(),
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
// gcsGetPartInfo returns PartInfo of a given object part
|
||||
|
@ -72,7 +72,6 @@ func TestToGCSPageToken(t *testing.T) {
|
||||
t.Errorf("Test %d: Expected %s, got %s", i+1, toGCSPageToken(testCase.Name), testCase.Token)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestIsValidGCSProjectIDFormat tests isValidGCSProjectIDFormat
|
||||
@ -166,7 +165,6 @@ func TestGCSMultipartDataName(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFromMinioClientListBucketResultToV2Info(t *testing.T) {
|
||||
|
||||
listBucketResult := miniogo.ListBucketResult{
|
||||
IsTruncated: false,
|
||||
Marker: "testMarker",
|
||||
|
@ -133,7 +133,6 @@ func getKerberosClient() (*krb.Client, error) {
|
||||
realm := env.Get("KRB5REALM", "")
|
||||
if username == "" || realm == "" {
|
||||
return nil, errors.New("empty KRB5USERNAME or KRB5REALM")
|
||||
|
||||
}
|
||||
|
||||
return krb.NewWithKeytab(username, realm, kt, cfg), nil
|
||||
@ -216,7 +215,7 @@ func (g *HDFS) NewGatewayLayer(creds madmin.Credentials) (minio.ObjectLayer, err
|
||||
return nil, fmt.Errorf("unable to initialize hdfsClient: %v", err)
|
||||
}
|
||||
|
||||
if err = clnt.MkdirAll(minio.PathJoin(commonPath, hdfsSeparator, minioMetaTmpBucket), os.FileMode(0755)); err != nil {
|
||||
if err = clnt.MkdirAll(minio.PathJoin(commonPath, hdfsSeparator, minioMetaTmpBucket), os.FileMode(0o755)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -324,7 +323,7 @@ func (n *hdfsObjects) MakeBucketWithLocation(ctx context.Context, bucket string,
|
||||
if !hdfsIsValidBucketName(bucket) {
|
||||
return minio.BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
return hdfsToObjectErr(ctx, n.clnt.Mkdir(n.hdfsPathJoin(bucket), os.FileMode(0755)), bucket)
|
||||
return hdfsToObjectErr(ctx, n.clnt.Mkdir(n.hdfsPathJoin(bucket), os.FileMode(0o755)), bucket)
|
||||
}
|
||||
|
||||
func (n *hdfsObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, err error) {
|
||||
@ -480,7 +479,6 @@ func fileInfoToObjectInfo(bucket string, entry string, fi os.FileInfo) minio.Obj
|
||||
// a path entry to an `os.FileInfo`. It also saves the listed path's `os.FileInfo` in the cache.
|
||||
func (n *hdfsObjects) populateDirectoryListing(filePath string, fileInfos map[string]os.FileInfo) (os.FileInfo, error) {
|
||||
dirReader, err := n.clnt.Open(filePath)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -494,7 +492,6 @@ func (n *hdfsObjects) populateDirectoryListing(filePath string, fileInfos map[st
|
||||
|
||||
fileInfos[key] = dirStat
|
||||
infos, err := dirReader.Readdir(0)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -602,7 +599,6 @@ func (n *hdfsObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
||||
// exit in case of partial read
|
||||
pipeCloser := func() { pr.Close() }
|
||||
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts, pipeCloser)
|
||||
|
||||
}
|
||||
|
||||
func (n *hdfsObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.ObjectInfo, error) {
|
||||
@ -689,7 +685,7 @@ func (n *hdfsObjects) PutObject(ctx context.Context, bucket string, object strin
|
||||
|
||||
// If its a directory create a prefix {
|
||||
if strings.HasSuffix(object, hdfsSeparator) && r.Size() == 0 {
|
||||
if err = n.clnt.MkdirAll(name, os.FileMode(0755)); err != nil {
|
||||
if err = n.clnt.MkdirAll(name, os.FileMode(0o755)); err != nil {
|
||||
n.deleteObject(n.hdfsPathJoin(bucket), name)
|
||||
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
||||
}
|
||||
@ -707,7 +703,7 @@ func (n *hdfsObjects) PutObject(ctx context.Context, bucket string, object strin
|
||||
}
|
||||
dir := path.Dir(name)
|
||||
if dir != "" {
|
||||
if err = n.clnt.MkdirAll(dir, os.FileMode(0755)); err != nil {
|
||||
if err = n.clnt.MkdirAll(dir, os.FileMode(0o755)); err != nil {
|
||||
w.Close()
|
||||
n.deleteObject(n.hdfsPathJoin(bucket), dir)
|
||||
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
||||
@ -839,7 +835,7 @@ func (n *hdfsObjects) CompleteMultipartUpload(ctx context.Context, bucket, objec
|
||||
name := n.hdfsPathJoin(bucket, object)
|
||||
dir := path.Dir(name)
|
||||
if dir != "" {
|
||||
if err = n.clnt.MkdirAll(dir, os.FileMode(0755)); err != nil {
|
||||
if err = n.clnt.MkdirAll(dir, os.FileMode(0o755)); err != nil {
|
||||
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
||||
}
|
||||
}
|
||||
|
@ -89,7 +89,6 @@ func (c *Chain) Retrieve() (credentials.Value, error) {
|
||||
}
|
||||
|
||||
return credentials.Value{}, fmt.Errorf("no credentials found in %s cannot proceed", providers)
|
||||
|
||||
}
|
||||
|
||||
// IsExpired will returned the expired state of the currently cached provider
|
||||
|
@ -139,7 +139,7 @@ func (m gwMetaV1) ObjectToPartOffset(ctx context.Context, offset int64) (partInd
|
||||
|
||||
// Constructs GWMetaV1 using `jsoniter` lib to retrieve each field.
|
||||
func gwMetaUnmarshalJSON(ctx context.Context, gwMetaBuf []byte) (gwMeta gwMetaV1, err error) {
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
err = json.Unmarshal(gwMetaBuf, &gwMeta)
|
||||
return gwMeta, err
|
||||
}
|
||||
|
@ -75,12 +75,10 @@ func (l *s3EncObjects) ListObjects(ctx context.Context, bucket string, prefix st
|
||||
loi.Objects = res.Objects
|
||||
loi.Prefixes = res.Prefixes
|
||||
return loi, nil
|
||||
|
||||
}
|
||||
|
||||
// ListObjectsV2 lists all blobs in S3 bucket filtered by prefix
|
||||
func (l *s3EncObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) {
|
||||
|
||||
var objects []minio.ObjectInfo
|
||||
var prefixes []string
|
||||
var isTruncated bool
|
||||
@ -423,7 +421,6 @@ func (l *s3EncObjects) DeleteObjects(ctx context.Context, bucket string, objects
|
||||
|
||||
// ListMultipartUploads lists all multipart uploads.
|
||||
func (l *s3EncObjects) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, e error) {
|
||||
|
||||
lmi, e = l.s3Objects.ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||
if e != nil {
|
||||
return
|
||||
@ -505,7 +502,6 @@ func (l *s3EncObjects) PutObject(ctx context.Context, bucket string, object stri
|
||||
|
||||
// PutObjectPart puts a part of object in bucket
|
||||
func (l *s3EncObjects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *minio.PutObjReader, opts minio.ObjectOptions) (pi minio.PartInfo, e error) {
|
||||
|
||||
if opts.ServerSideEncryption == nil {
|
||||
return l.s3Objects.PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
||||
}
|
||||
@ -630,7 +626,6 @@ func (l *s3EncObjects) AbortMultipartUpload(ctx context.Context, bucket string,
|
||||
|
||||
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
|
||||
func (l *s3EncObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (oi minio.ObjectInfo, e error) {
|
||||
|
||||
tmpMeta, err := l.getGWMetadata(ctx, bucket, getTmpDareMetaPath(object, uploadID))
|
||||
if err != nil {
|
||||
oi, e = l.s3Objects.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
|
@ -89,7 +89,7 @@ func getBackgroundHealStatus(ctx context.Context, o ObjectLayer) (madmin.BgHealS
|
||||
}
|
||||
}
|
||||
|
||||
var healDisksMap = map[string]struct{}{}
|
||||
healDisksMap := map[string]struct{}{}
|
||||
for _, ep := range getLocalDisksToHeal() {
|
||||
healDisksMap[ep.String()] = struct{}{}
|
||||
}
|
||||
@ -139,7 +139,6 @@ func getBackgroundHealStatus(ctx context.Context, o ObjectLayer) (madmin.BgHealS
|
||||
status.SCParity[storageclass.RRS] = backendInfo.RRSCParity
|
||||
|
||||
return status, true
|
||||
|
||||
}
|
||||
|
||||
func mustGetHealSequence(ctx context.Context) *healSequence {
|
||||
@ -306,7 +305,6 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
|
||||
},
|
||||
finished: nil,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
// Set this such that when we return this function
|
||||
// we let the caller retry this disk again for the
|
||||
|
@ -77,7 +77,6 @@ func availableMemory() (available uint64) {
|
||||
if available != 9223372036854771712 {
|
||||
// This means cgroup memory limit is configured.
|
||||
return
|
||||
|
||||
} // no-limit set proceed to set the limits based on virtual memory.
|
||||
|
||||
} // for all other platforms limits are based on virtual memory.
|
||||
|
@ -307,7 +307,7 @@ func extractPostPolicyFormValues(ctx context.Context, form *multipart.Form) (fil
|
||||
// an ugly way of handling this situation. Refer here
|
||||
// https://golang.org/src/mime/multipart/formdata.go#L61
|
||||
if len(form.File) == 0 {
|
||||
var b = &bytes.Buffer{}
|
||||
b := &bytes.Buffer{}
|
||||
for _, v := range formValues["File"] {
|
||||
b.WriteString(v)
|
||||
}
|
||||
@ -544,7 +544,6 @@ func errorResponseHandler(w http.ResponseWriter, r *http.Request) {
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}, r.URL)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// gets host name for current node
|
||||
|
@ -34,7 +34,6 @@ const (
|
||||
|
||||
// registerHealthCheckRouter - add handler functions for liveness and readiness routes.
|
||||
func registerHealthCheckRouter(router *mux.Router) {
|
||||
|
||||
// Healthcheck router
|
||||
healthRouter := router.PathPrefix(healthCheckPathPrefix).Subrouter()
|
||||
|
||||
|
@ -128,7 +128,7 @@ func (stats *HTTPAPIStats) Dec(api string) {
|
||||
func (stats *HTTPAPIStats) Load() map[string]int {
|
||||
stats.Lock()
|
||||
defer stats.Unlock()
|
||||
var apiStats = make(map[string]int, len(stats.apiStats))
|
||||
apiStats := make(map[string]int, len(stats.apiStats))
|
||||
for k, v := range stats.apiStats {
|
||||
apiStats[k] = v
|
||||
}
|
||||
|
@ -28,13 +28,16 @@ func TestRedactLDAPPwd(t *testing.T) {
|
||||
expectedQuery string
|
||||
}{
|
||||
{"", ""},
|
||||
{"?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername&LDAPPassword=can+youreadthis%3F&Version=2011-06-15",
|
||||
{
|
||||
"?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername&LDAPPassword=can+youreadthis%3F&Version=2011-06-15",
|
||||
"?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername&LDAPPassword=*REDACTED*&Version=2011-06-15",
|
||||
},
|
||||
{"LDAPPassword=can+youreadthis%3F&Version=2011-06-15&?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername",
|
||||
{
|
||||
"LDAPPassword=can+youreadthis%3F&Version=2011-06-15&?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername",
|
||||
"LDAPPassword=*REDACTED*&Version=2011-06-15&?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername",
|
||||
},
|
||||
{"?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername&Version=2011-06-15&LDAPPassword=can+youreadthis%3F",
|
||||
{
|
||||
"?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername&Version=2011-06-15&LDAPPassword=can+youreadthis%3F",
|
||||
"?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername&Version=2011-06-15&LDAPPassword=*REDACTED*",
|
||||
},
|
||||
{
|
||||
|
@ -139,7 +139,7 @@ func getIAMConfig(item interface{}, data []byte, itemPath string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
return json.Unmarshal(data, item)
|
||||
}
|
||||
|
||||
@ -416,7 +416,6 @@ func (ies *IAMEtcdStore) loadGroup(ctx context.Context, group string, m map[stri
|
||||
}
|
||||
m[group] = gi
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) loadGroups(ctx context.Context, m map[string]GroupInfo) error {
|
||||
@ -437,7 +436,6 @@ func (ies *IAMEtcdStore) loadGroups(ctx context.Context, m map[string]GroupInfo)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error {
|
||||
@ -497,7 +495,6 @@ func (ies *IAMEtcdStore) loadMappedPolicies(ctx context.Context, userType IAMUse
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) savePolicyDoc(ctx context.Context, policyName string, p PolicyDoc) error {
|
||||
@ -601,5 +598,4 @@ func (ies *IAMEtcdStore) watch(ctx context.Context, keyPath string) <-chan iamWa
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ func (iamOS *IAMObjectStore) migrateUsersConfigToV1(ctx context.Context) error {
|
||||
next:
|
||||
// 4. check if user identity has old format.
|
||||
identityPath := pathJoin(basePrefix, user, iamIdentityFile)
|
||||
var cred = auth.Credentials{
|
||||
cred := auth.Credentials{
|
||||
AccessKey: user,
|
||||
}
|
||||
if err := iamOS.loadIAMConfig(ctx, &cred, identityPath); err != nil {
|
||||
@ -159,7 +159,6 @@ func (iamOS *IAMObjectStore) migrateUsersConfigToV1(ctx context.Context) error {
|
||||
// has not changed.
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) migrateToV1(ctx context.Context) error {
|
||||
@ -201,7 +200,7 @@ func (iamOS *IAMObjectStore) migrateBackendFormat(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) saveIAMConfig(ctx context.Context, item interface{}, objPath string, opts ...options) error {
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
data, err := json.Marshal(item)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -238,7 +237,7 @@ func (iamOS *IAMObjectStore) loadIAMConfig(ctx context.Context, item interface{}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
return json.Unmarshal(data, item)
|
||||
}
|
||||
|
||||
@ -364,8 +363,8 @@ func (iamOS *IAMObjectStore) loadGroups(ctx context.Context, m map[string]GroupI
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool,
|
||||
m map[string]MappedPolicy) error {
|
||||
|
||||
m map[string]MappedPolicy,
|
||||
) error {
|
||||
var p MappedPolicy
|
||||
err := iamOS.loadIAMConfig(ctx, &p, getMappedPolicyPath(name, userType, isGroup))
|
||||
if err != nil {
|
||||
|
@ -213,7 +213,7 @@ func (d *PolicyDoc) update(p iampolicy.Policy) {
|
||||
// from iampolicy.Policy to PolicyDoc. To avoid a migration, loading supports
|
||||
// both the old and the new formats.
|
||||
func (d *PolicyDoc) parseJSON(data []byte) error {
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
var doc PolicyDoc
|
||||
err := json.Unmarshal(data, &doc)
|
||||
if err != nil {
|
||||
@ -378,7 +378,6 @@ func (c *iamCache) policyDBGet(mode UsersSysType, name string, isGroup bool) ([]
|
||||
|
||||
// IAMStorageAPI defines an interface for the IAM persistence layer
|
||||
type IAMStorageAPI interface {
|
||||
|
||||
// The role of the read-write lock is to prevent go routines from
|
||||
// concurrently reading and writing the IAM storage. The (r)lock()
|
||||
// functions return the iamCache. The cache can be safely written to
|
||||
@ -387,32 +386,23 @@ type IAMStorageAPI interface {
|
||||
unlock()
|
||||
rlock() *iamCache
|
||||
runlock()
|
||||
|
||||
migrateBackendFormat(context.Context) error
|
||||
|
||||
getUsersSysType() UsersSysType
|
||||
|
||||
loadPolicyDoc(ctx context.Context, policy string, m map[string]PolicyDoc) error
|
||||
loadPolicyDocs(ctx context.Context, m map[string]PolicyDoc) error
|
||||
|
||||
loadUser(ctx context.Context, user string, userType IAMUserType, m map[string]auth.Credentials) error
|
||||
loadUsers(ctx context.Context, userType IAMUserType, m map[string]auth.Credentials) error
|
||||
|
||||
loadGroup(ctx context.Context, group string, m map[string]GroupInfo) error
|
||||
loadGroups(ctx context.Context, m map[string]GroupInfo) error
|
||||
|
||||
loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error
|
||||
loadMappedPolicies(ctx context.Context, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error
|
||||
|
||||
saveIAMConfig(ctx context.Context, item interface{}, path string, opts ...options) error
|
||||
loadIAMConfig(ctx context.Context, item interface{}, path string) error
|
||||
deleteIAMConfig(ctx context.Context, path string) error
|
||||
|
||||
savePolicyDoc(ctx context.Context, policyName string, p PolicyDoc) error
|
||||
saveMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, mp MappedPolicy, opts ...options) error
|
||||
saveUserIdentity(ctx context.Context, name string, userType IAMUserType, u UserIdentity, opts ...options) error
|
||||
saveGroupInfo(ctx context.Context, group string, gi GroupInfo) error
|
||||
|
||||
deletePolicyDoc(ctx context.Context, policyName string) error
|
||||
deleteMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool) error
|
||||
deleteUserIdentity(ctx context.Context, name string, userType IAMUserType) error
|
||||
@ -639,7 +629,6 @@ func (store *IAMStoreSys) AddUsersToGroup(ctx context.Context, group string, mem
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// helper function - does not take any locks. Updates only cache if
|
||||
@ -880,7 +869,6 @@ func (store *IAMStoreSys) PolicyDBSet(ctx context.Context, name, policy string,
|
||||
cache.iamGroupPolicyMap[name] = mp
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// PolicyNotificationHandler - loads given policy from storage. If not present,
|
||||
@ -1034,7 +1022,6 @@ func (store *IAMStoreSys) GetPolicyDoc(name string) (r PolicyDoc, err error) {
|
||||
|
||||
// SetPolicy - creates a policy with name.
|
||||
func (store *IAMStoreSys) SetPolicy(ctx context.Context, name string, policy iampolicy.Policy) error {
|
||||
|
||||
if policy.IsEmpty() || name == "" {
|
||||
return errInvalidArgument
|
||||
}
|
||||
@ -1058,7 +1045,6 @@ func (store *IAMStoreSys) SetPolicy(ctx context.Context, name string, policy iam
|
||||
|
||||
cache.iamPolicyDocsMap[name] = d
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// ListPolicies - fetches all policies from storage and updates cache as well.
|
||||
@ -1118,7 +1104,6 @@ func (store *IAMStoreSys) FilterPolicies(policyName string, bucketName string) (
|
||||
defer store.runlock()
|
||||
|
||||
return filterPolicies(cache, policyName, bucketName)
|
||||
|
||||
}
|
||||
|
||||
// GetBucketUsers - returns users (not STS or service accounts) that have access
|
||||
|
@ -170,7 +170,6 @@ func (sys *IAMSys) initStore(objAPI ObjectLayer, etcdClient *etcd.Client) {
|
||||
} else {
|
||||
sys.store = &IAMStoreSys{newIAMEtcdStore(etcdClient, sys.usersSysType)}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Initialized checks if IAM is initialized
|
||||
@ -801,9 +800,7 @@ func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, gro
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
cred auth.Credentials
|
||||
)
|
||||
var cred auth.Credentials
|
||||
|
||||
var err error
|
||||
if len(opts.accessKey) > 0 {
|
||||
|
@ -140,7 +140,6 @@ func (l *localLocker) Unlock(_ context.Context, args dsync.LockArgs) (reply bool
|
||||
}
|
||||
}
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
// removeEntry based on the uid of the lock message, removes a single entry from the
|
||||
|
@ -228,7 +228,7 @@ func Test_metaCacheEntries_resolve(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var inputs = []xlMetaV2{
|
||||
inputs := []xlMetaV2{
|
||||
0: {
|
||||
versions: []xlMetaV2ShallowVersion{
|
||||
{header: xlMetaV2VersionHeader{
|
||||
@ -379,7 +379,7 @@ func Test_metaCacheEntries_resolve(t *testing.T) {
|
||||
for i, xl := range inputs {
|
||||
xl.sortByModTime()
|
||||
var err error
|
||||
var entry = metaCacheEntry{
|
||||
entry := metaCacheEntry{
|
||||
name: "testobject",
|
||||
}
|
||||
entry.metadata, err = xl.AppendTo(nil)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user