mirror of
https://github.com/minio/minio.git
synced 2025-04-04 11:50:36 -04:00
run gofumpt cleanup across code-base (#14015)
This commit is contained in:
parent
6f474982ed
commit
f527c708f2
14
Makefile
14
Makefile
@ -20,6 +20,7 @@ help: ## print this help
|
|||||||
getdeps: ## fetch necessary dependencies
|
getdeps: ## fetch necessary dependencies
|
||||||
@mkdir -p ${GOPATH}/bin
|
@mkdir -p ${GOPATH}/bin
|
||||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.43.0
|
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.43.0
|
||||||
|
@echo "Installing gofumpt" && go install mvdan.cc/gofumpt@latest
|
||||||
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.7-0.20211026165309-e818a1881b0e
|
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.7-0.20211026165309-e818a1881b0e
|
||||||
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest
|
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest
|
||||||
|
|
||||||
@ -34,13 +35,14 @@ check-gen: ## check for updated autogenerated files
|
|||||||
|
|
||||||
lint: ## runs golangci-lint suite of linters
|
lint: ## runs golangci-lint suite of linters
|
||||||
@echo "Running $@ check"
|
@echo "Running $@ check"
|
||||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
@${GOPATH}/bin/golangci-lint cache clean
|
||||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --build-tags kqueue --timeout=10m --config ./.golangci.yml
|
@${GOPATH}/bin/golangci-lint run --build-tags kqueue --timeout=10m --config ./.golangci.yml
|
||||||
|
@${GOPATH}/bin/gofumpt -s -l .
|
||||||
|
|
||||||
check: test
|
check: test
|
||||||
test: verifiers build ## builds minio, runs linters, tests
|
test: verifiers build ## builds minio, runs linters, tests
|
||||||
@echo "Running unit tests"
|
@echo "Running unit tests"
|
||||||
@GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./...
|
@CGO_ENABLED=0 go test -tags kqueue ./...
|
||||||
|
|
||||||
test-upgrade: build
|
test-upgrade: build
|
||||||
@echo "Running minio upgrade tests"
|
@echo "Running minio upgrade tests"
|
||||||
@ -66,18 +68,18 @@ test-site-replication: install ## verify automatic site replication
|
|||||||
|
|
||||||
verify: ## verify minio various setups
|
verify: ## verify minio various setups
|
||||||
@echo "Verifying build with race"
|
@echo "Verifying build with race"
|
||||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
@CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||||
|
|
||||||
verify-healing: ## verify healing and replacing disks with minio binary
|
verify-healing: ## verify healing and replacing disks with minio binary
|
||||||
@echo "Verify healing build with race"
|
@echo "Verify healing build with race"
|
||||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
@CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||||
@(env bash $(PWD)/buildscripts/verify-healing.sh)
|
@(env bash $(PWD)/buildscripts/verify-healing.sh)
|
||||||
@(env bash $(PWD)/buildscripts/unaligned-healing.sh)
|
@(env bash $(PWD)/buildscripts/unaligned-healing.sh)
|
||||||
|
|
||||||
build: checks ## builds minio to $(PWD)
|
build: checks ## builds minio to $(PWD)
|
||||||
@echo "Building minio binary to './minio'"
|
@echo "Building minio binary to './minio'"
|
||||||
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
@CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||||
|
|
||||||
hotfix-vars:
|
hotfix-vars:
|
||||||
$(eval LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
|
$(eval LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
|
||||||
|
@ -155,7 +155,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
var target madmin.BucketTarget
|
var target madmin.BucketTarget
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
if err = json.Unmarshal(reqBytes, &target); err != nil {
|
if err = json.Unmarshal(reqBytes, &target); err != nil {
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
|
||||||
return
|
return
|
||||||
|
@ -170,7 +170,7 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
|||||||
|
|
||||||
cfg := globalServerConfig.Clone()
|
cfg := globalServerConfig.Clone()
|
||||||
vars := mux.Vars(r)
|
vars := mux.Vars(r)
|
||||||
var buf = &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
cw := config.NewConfigWriteTo(cfg, vars["key"])
|
cw := config.NewConfigWriteTo(cfg, vars["key"])
|
||||||
if _, err := cw.WriteTo(buf); err != nil {
|
if _, err := cw.WriteTo(buf); err != nil {
|
||||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||||
|
@ -134,7 +134,6 @@ func (a adminAPIHandlers) SRPeerBucketOps(w http.ResponseWriter, r *http.Request
|
|||||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SRPeerReplicateIAMItem - PUT /minio/admin/v3/site-replication/iam-item
|
// SRPeerReplicateIAMItem - PUT /minio/admin/v3/site-replication/iam-item
|
||||||
|
@ -613,7 +613,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var createResp = madmin.AddServiceAccountResp{
|
createResp := madmin.AddServiceAccountResp{
|
||||||
Credentials: madmin.Credentials{
|
Credentials: madmin.Credentials{
|
||||||
AccessKey: newCred.AccessKey,
|
AccessKey: newCred.AccessKey,
|
||||||
SecretKey: newCred.SecretKey,
|
SecretKey: newCred.SecretKey,
|
||||||
@ -814,7 +814,7 @@ func (a adminAPIHandlers) InfoServiceAccount(w http.ResponseWriter, r *http.Requ
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var infoResp = madmin.InfoServiceAccountResp{
|
infoResp := madmin.InfoServiceAccountResp{
|
||||||
ParentUser: svcAccount.ParentUser,
|
ParentUser: svcAccount.ParentUser,
|
||||||
AccountStatus: svcAccount.Status,
|
AccountStatus: svcAccount.Status,
|
||||||
ImpliedPolicy: impliedPolicy,
|
ImpliedPolicy: impliedPolicy,
|
||||||
@ -891,7 +891,7 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
|
|||||||
serviceAccountsNames = append(serviceAccountsNames, svc.AccessKey)
|
serviceAccountsNames = append(serviceAccountsNames, svc.AccessKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
var listResp = madmin.ListServiceAccountsResp{
|
listResp := madmin.ListServiceAccountsResp{
|
||||||
Accounts: serviceAccountsNames,
|
Accounts: serviceAccountsNames,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1251,7 +1251,7 @@ func (a adminAPIHandlers) ListBucketPolicies(w http.ResponseWriter, r *http.Requ
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var newPolicies = make(map[string]iampolicy.Policy)
|
newPolicies := make(map[string]iampolicy.Policy)
|
||||||
for name, p := range policies {
|
for name, p := range policies {
|
||||||
_, err = json.Marshal(p)
|
_, err = json.Marshal(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1283,7 +1283,7 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var newPolicies = make(map[string]iampolicy.Policy)
|
newPolicies := make(map[string]iampolicy.Policy)
|
||||||
for name, p := range policies {
|
for name, p := range policies {
|
||||||
_, err = json.Marshal(p)
|
_, err = json.Marshal(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -653,7 +653,6 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
|
|||||||
if !strings.Contains(infoStr, `"s3:PutObject"`) || !strings.Contains(infoStr, ":"+bucket+"/") {
|
if !strings.Contains(infoStr, `"s3:PutObject"`) || !strings.Contains(infoStr, ":"+bucket+"/") {
|
||||||
c.Fatalf("policy contains unexpected content!")
|
c.Fatalf("policy contains unexpected content!")
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||||
|
@ -333,7 +333,6 @@ func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Requ
|
|||||||
// Reply with storage information (across nodes in a
|
// Reply with storage information (across nodes in a
|
||||||
// distributed setup) as json.
|
// distributed setup) as json.
|
||||||
writeSuccessResponseJSON(w, jsonBytes)
|
writeSuccessResponseJSON(w, jsonBytes)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DataUsageInfoHandler - GET /minio/admin/v3/datausage
|
// DataUsageInfoHandler - GET /minio/admin/v3/datausage
|
||||||
@ -1332,7 +1331,7 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
|
|||||||
if keyID == "" {
|
if keyID == "" {
|
||||||
keyID = stat.DefaultKey
|
keyID = stat.DefaultKey
|
||||||
}
|
}
|
||||||
var response = madmin.KMSKeyStatus{
|
response := madmin.KMSKeyStatus{
|
||||||
KeyID: keyID,
|
KeyID: keyID,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1816,7 +1815,6 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
|||||||
anonNetwork[anonEndpoint] = status
|
anonNetwork[anonEndpoint] = status
|
||||||
}
|
}
|
||||||
return anonNetwork
|
return anonNetwork
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
anonymizeDrives := func(drives []madmin.Disk) []madmin.Disk {
|
anonymizeDrives := func(drives []madmin.Disk) []madmin.Disk {
|
||||||
@ -1916,7 +1914,6 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTLSInfo() madmin.TLSInfo {
|
func getTLSInfo() madmin.TLSInfo {
|
||||||
@ -2042,7 +2039,6 @@ func assignPoolNumbers(servers []madmin.ServerProperties) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
||||||
|
|
||||||
lambdaMap := make(map[string][]madmin.TargetIDStatus)
|
lambdaMap := make(map[string][]madmin.TargetIDStatus)
|
||||||
|
|
||||||
for _, tgt := range globalConfigTargetList.Targets() {
|
for _, tgt := range globalConfigTargetList.Targets() {
|
||||||
@ -2284,7 +2280,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
|||||||
}
|
}
|
||||||
if si.Mode == 0 {
|
if si.Mode == 0 {
|
||||||
// Not, set it to default.
|
// Not, set it to default.
|
||||||
si.Mode = 0600
|
si.Mode = 0o600
|
||||||
}
|
}
|
||||||
header, zerr := zip.FileInfoHeader(dummyFileInfo{
|
header, zerr := zip.FileInfoHeader(dummyFileInfo{
|
||||||
name: filename,
|
name: filename,
|
||||||
|
@ -236,8 +236,8 @@ func TestServiceRestartHandler(t *testing.T) {
|
|||||||
|
|
||||||
// buildAdminRequest - helper function to build an admin API request.
|
// buildAdminRequest - helper function to build an admin API request.
|
||||||
func buildAdminRequest(queryVal url.Values, method, path string,
|
func buildAdminRequest(queryVal url.Values, method, path string,
|
||||||
contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error) {
|
contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error,
|
||||||
|
) {
|
||||||
req, err := newTestRequest(method,
|
req, err := newTestRequest(method,
|
||||||
adminPathPrefix+adminAPIVersionPrefix+path+"?"+queryVal.Encode(),
|
adminPathPrefix+adminAPIVersionPrefix+path+"?"+queryVal.Encode(),
|
||||||
contentLength, bodySeeker)
|
contentLength, bodySeeker)
|
||||||
@ -380,5 +380,4 @@ func TestExtractHealInitParams(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -278,8 +278,8 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
|||||||
// background routine to clean up heal results after the
|
// background routine to clean up heal results after the
|
||||||
// aforementioned duration.
|
// aforementioned duration.
|
||||||
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLayer) (
|
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLayer) (
|
||||||
respBytes []byte, apiErr APIError, errMsg string) {
|
respBytes []byte, apiErr APIError, errMsg string,
|
||||||
|
) {
|
||||||
if h.forceStarted {
|
if h.forceStarted {
|
||||||
_, apiErr = ahs.stopHealSequence(pathJoin(h.bucket, h.object))
|
_, apiErr = ahs.stopHealSequence(pathJoin(h.bucket, h.object))
|
||||||
if apiErr.Code != "" {
|
if apiErr.Code != "" {
|
||||||
@ -338,8 +338,8 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay
|
|||||||
// representation. The clientToken helps ensure there aren't
|
// representation. The clientToken helps ensure there aren't
|
||||||
// conflicting clients fetching status.
|
// conflicting clients fetching status.
|
||||||
func (ahs *allHealState) PopHealStatusJSON(hpath string,
|
func (ahs *allHealState) PopHealStatusJSON(hpath string,
|
||||||
clientToken string) ([]byte, APIErrorCode) {
|
clientToken string) ([]byte, APIErrorCode,
|
||||||
|
) {
|
||||||
// fetch heal state for given path
|
// fetch heal state for given path
|
||||||
h, exists := ahs.getHealSequence(hpath)
|
h, exists := ahs.getHealSequence(hpath)
|
||||||
if !exists {
|
if !exists {
|
||||||
@ -453,8 +453,8 @@ type healSequence struct {
|
|||||||
// NewHealSequence - creates healSettings, assumes bucket and
|
// NewHealSequence - creates healSettings, assumes bucket and
|
||||||
// objPrefix are already validated.
|
// objPrefix are already validated.
|
||||||
func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
|
func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
|
||||||
hs madmin.HealOpts, forceStart bool) *healSequence {
|
hs madmin.HealOpts, forceStart bool,
|
||||||
|
) *healSequence {
|
||||||
reqInfo := &logger.ReqInfo{RemoteHost: clientAddr, API: "Heal", BucketName: bucket}
|
reqInfo := &logger.ReqInfo{RemoteHost: clientAddr, API: "Heal", BucketName: bucket}
|
||||||
reqInfo.AppendTags("prefix", objPrefix)
|
reqInfo.AppendTags("prefix", objPrefix)
|
||||||
ctx, cancel := context.WithCancel(logger.SetReqInfo(ctx, reqInfo))
|
ctx, cancel := context.WithCancel(logger.SetReqInfo(ctx, reqInfo))
|
||||||
|
@ -38,7 +38,6 @@ type adminAPIHandlers struct{}
|
|||||||
|
|
||||||
// registerAdminRouter - Add handler functions for each service REST API routes.
|
// registerAdminRouter - Add handler functions for each service REST API routes.
|
||||||
func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||||
|
|
||||||
adminAPI := adminAPIHandlers{}
|
adminAPI := adminAPIHandlers{}
|
||||||
// Admin router
|
// Admin router
|
||||||
adminRouter := router.PathPrefix(adminPathPrefix).Subrouter()
|
adminRouter := router.PathPrefix(adminPathPrefix).Subrouter()
|
||||||
|
@ -2125,7 +2125,7 @@ func toAPIError(ctx context.Context, err error) APIError {
|
|||||||
return noError
|
return noError
|
||||||
}
|
}
|
||||||
|
|
||||||
var apiErr = errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
apiErr := errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
||||||
e, ok := err.(dns.ErrInvalidBucketName)
|
e, ok := err.(dns.ErrInvalidBucketName)
|
||||||
if ok {
|
if ok {
|
||||||
code := toAPIErrorCode(ctx, e)
|
code := toAPIErrorCode(ctx, e)
|
||||||
@ -2238,7 +2238,6 @@ func toAPIError(ctx context.Context, err error) APIError {
|
|||||||
// since S3 only sends one Error XML response.
|
// since S3 only sends one Error XML response.
|
||||||
if len(e.Errors) >= 1 {
|
if len(e.Errors) >= 1 {
|
||||||
apiErr.Code = e.Errors[0].Reason
|
apiErr.Code = e.Errors[0].Reason
|
||||||
|
|
||||||
}
|
}
|
||||||
case azblob.StorageError:
|
case azblob.StorageError:
|
||||||
apiErr = APIError{
|
apiErr = APIError{
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
|
|
||||||
func TestNewRequestID(t *testing.T) {
|
func TestNewRequestID(t *testing.T) {
|
||||||
// Ensure that it returns an alphanumeric result of length 16.
|
// Ensure that it returns an alphanumeric result of length 16.
|
||||||
var id = mustGetRequestID(UTCNow())
|
id := mustGetRequestID(UTCNow())
|
||||||
|
|
||||||
if len(id) != 16 {
|
if len(id) != 16 {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
|
@ -268,7 +268,6 @@ type StringMap map[string]string
|
|||||||
|
|
||||||
// MarshalXML - StringMap marshals into XML.
|
// MarshalXML - StringMap marshals into XML.
|
||||||
func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||||
|
|
||||||
tokens := []xml.Token{start}
|
tokens := []xml.Token{start}
|
||||||
|
|
||||||
for key, value := range s {
|
for key, value := range s {
|
||||||
@ -417,8 +416,8 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
|||||||
// serialized to match XML and JSON API spec output.
|
// serialized to match XML and JSON API spec output.
|
||||||
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||||
listbuckets := make([]Bucket, 0, len(buckets))
|
listbuckets := make([]Bucket, 0, len(buckets))
|
||||||
var data = ListBucketsResponse{}
|
data := ListBucketsResponse{}
|
||||||
var owner = Owner{
|
owner := Owner{
|
||||||
ID: globalMinioDefaultOwnerID,
|
ID: globalMinioDefaultOwnerID,
|
||||||
DisplayName: "minio",
|
DisplayName: "minio",
|
||||||
}
|
}
|
||||||
@ -439,14 +438,14 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
|||||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
|
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
|
||||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||||
var owner = Owner{
|
owner := Owner{
|
||||||
ID: globalMinioDefaultOwnerID,
|
ID: globalMinioDefaultOwnerID,
|
||||||
DisplayName: "minio",
|
DisplayName: "minio",
|
||||||
}
|
}
|
||||||
var data = ListVersionsResponse{}
|
data := ListVersionsResponse{}
|
||||||
|
|
||||||
for _, object := range resp.Objects {
|
for _, object := range resp.Objects {
|
||||||
var content = ObjectVersion{}
|
content := ObjectVersion{}
|
||||||
if object.Name == "" {
|
if object.Name == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -486,7 +485,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
|||||||
|
|
||||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||||
for _, prefix := range resp.Prefixes {
|
for _, prefix := range resp.Prefixes {
|
||||||
var prefixItem = CommonPrefix{}
|
prefixItem := CommonPrefix{}
|
||||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||||
prefixes = append(prefixes, prefixItem)
|
prefixes = append(prefixes, prefixItem)
|
||||||
}
|
}
|
||||||
@ -497,14 +496,14 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
|||||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||||
contents := make([]Object, 0, len(resp.Objects))
|
contents := make([]Object, 0, len(resp.Objects))
|
||||||
var owner = Owner{
|
owner := Owner{
|
||||||
ID: globalMinioDefaultOwnerID,
|
ID: globalMinioDefaultOwnerID,
|
||||||
DisplayName: "minio",
|
DisplayName: "minio",
|
||||||
}
|
}
|
||||||
var data = ListObjectsResponse{}
|
data := ListObjectsResponse{}
|
||||||
|
|
||||||
for _, object := range resp.Objects {
|
for _, object := range resp.Objects {
|
||||||
var content = Object{}
|
content := Object{}
|
||||||
if object.Name == "" {
|
if object.Name == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -535,7 +534,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
|||||||
|
|
||||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||||
for _, prefix := range resp.Prefixes {
|
for _, prefix := range resp.Prefixes {
|
||||||
var prefixItem = CommonPrefix{}
|
prefixItem := CommonPrefix{}
|
||||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||||
prefixes = append(prefixes, prefixItem)
|
prefixes = append(prefixes, prefixItem)
|
||||||
}
|
}
|
||||||
@ -546,14 +545,14 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
|||||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
|
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
|
||||||
contents := make([]Object, 0, len(objects))
|
contents := make([]Object, 0, len(objects))
|
||||||
var owner = Owner{
|
owner := Owner{
|
||||||
ID: globalMinioDefaultOwnerID,
|
ID: globalMinioDefaultOwnerID,
|
||||||
DisplayName: "minio",
|
DisplayName: "minio",
|
||||||
}
|
}
|
||||||
var data = ListObjectsV2Response{}
|
data := ListObjectsV2Response{}
|
||||||
|
|
||||||
for _, object := range objects {
|
for _, object := range objects {
|
||||||
var content = Object{}
|
content := Object{}
|
||||||
if object.Name == "" {
|
if object.Name == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -608,7 +607,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
|||||||
|
|
||||||
commonPrefixes := make([]CommonPrefix, 0, len(prefixes))
|
commonPrefixes := make([]CommonPrefix, 0, len(prefixes))
|
||||||
for _, prefix := range prefixes {
|
for _, prefix := range prefixes {
|
||||||
var prefixItem = CommonPrefix{}
|
prefixItem := CommonPrefix{}
|
||||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||||
commonPrefixes = append(commonPrefixes, prefixItem)
|
commonPrefixes = append(commonPrefixes, prefixItem)
|
||||||
}
|
}
|
||||||
@ -821,8 +820,8 @@ func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIE
|
|||||||
// but accepts the error message directly (this allows messages to be
|
// but accepts the error message directly (this allows messages to be
|
||||||
// dynamically generated.)
|
// dynamically generated.)
|
||||||
func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError,
|
func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError,
|
||||||
errBody string, reqURL *url.URL) {
|
errBody string, reqURL *url.URL,
|
||||||
|
) {
|
||||||
reqInfo := logger.GetReqInfo(ctx)
|
reqInfo := logger.GetReqInfo(ctx)
|
||||||
errorResponse := APIErrorResponse{
|
errorResponse := APIErrorResponse{
|
||||||
Code: err.Code,
|
Code: err.Code,
|
||||||
|
@ -484,7 +484,6 @@ func registerAPIRouter(router *mux.Router) {
|
|||||||
// If none of the routes match add default error handler routes
|
// If none of the routes match add default error handler routes
|
||||||
apiRouter.NotFoundHandler = collectAPIStats("notfound", httpTraceAll(errorResponseHandler))
|
apiRouter.NotFoundHandler = collectAPIStats("notfound", httpTraceAll(errorResponseHandler))
|
||||||
apiRouter.MethodNotAllowedHandler = collectAPIStats("methodnotallowed", httpTraceAll(methodNotAllowedHandler("S3")))
|
apiRouter.MethodNotAllowedHandler = collectAPIStats("methodnotallowed", httpTraceAll(methodNotAllowedHandler("S3")))
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// corsHandler handler for CORS (Cross Origin Resource Sharing)
|
// corsHandler handler for CORS (Cross Origin Resource Sharing)
|
||||||
|
@ -44,7 +44,6 @@ func TestS3EncodeName(t *testing.T) {
|
|||||||
if testCase.expectedOutput != outputText {
|
if testCase.expectedOutput != outputText {
|
||||||
t.Errorf("Expected `%s`, got `%s`", testCase.expectedOutput, outputText)
|
t.Errorf("Expected `%s`, got `%s`", testCase.expectedOutput, outputText)
|
||||||
}
|
}
|
||||||
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -115,7 +115,6 @@ func newHealRoutine() *healRoutine {
|
|||||||
tasks: make(chan healTask),
|
tasks: make(chan healTask),
|
||||||
workers: workers,
|
workers: workers,
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// healDiskFormat - heals format.json, return value indicates if a
|
// healDiskFormat - heals format.json, return value indicates if a
|
||||||
|
@ -305,7 +305,6 @@ func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return disksToHeal
|
return disksToHeal
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
|
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
|
||||||
|
@ -212,7 +212,7 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w
|
|||||||
// bitrotSelfTest tries to catch any issue in the bitrot implementation
|
// bitrotSelfTest tries to catch any issue in the bitrot implementation
|
||||||
// early instead of silently corrupting data.
|
// early instead of silently corrupting data.
|
||||||
func bitrotSelfTest() {
|
func bitrotSelfTest() {
|
||||||
var checksums = map[BitrotAlgorithm]string{
|
checksums := map[BitrotAlgorithm]string{
|
||||||
SHA256: "a7677ff19e0182e4d52e3a3db727804abc82a5818749336369552e54b838b004",
|
SHA256: "a7677ff19e0182e4d52e3a3db727804abc82a5818749336369552e54b838b004",
|
||||||
BLAKE2b512: "e519b7d84b1c3c917985f544773a35cf265dcab10948be3550320d156bab612124a5ae2ae5a8c73c0eea360f68b0e28136f26e858756dbfe7375a7389f26c669",
|
BLAKE2b512: "e519b7d84b1c3c917985f544773a35cf265dcab10948be3550320d156bab612124a5ae2ae5a8c73c0eea360f68b0e28136f26e858756dbfe7375a7389f26c669",
|
||||||
HighwayHash256: "39c0407ed3f01b18d22c85db4aeff11e060ca5f43131b0126731ca197cd42313",
|
HighwayHash256: "39c0407ed3f01b18d22c85db4aeff11e060ca5f43131b0126731ca197cd42313",
|
||||||
|
@ -449,7 +449,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var objectsToDelete = map[ObjectToDelete]int{}
|
objectsToDelete := map[ObjectToDelete]int{}
|
||||||
getObjectInfoFn := objectAPI.GetObjectInfo
|
getObjectInfoFn := objectAPI.GetObjectInfo
|
||||||
if api.CacheAPI() != nil {
|
if api.CacheAPI() != nil {
|
||||||
getObjectInfoFn = api.CacheAPI().GetObjectInfo
|
getObjectInfoFn = api.CacheAPI().GetObjectInfo
|
||||||
@ -606,8 +606,8 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Generate response
|
// Generate response
|
||||||
var deleteErrors = make([]DeleteError, 0, len(deleteObjectsReq.Objects))
|
deleteErrors := make([]DeleteError, 0, len(deleteObjectsReq.Objects))
|
||||||
var deletedObjects = make([]DeletedObject, 0, len(deleteObjectsReq.Objects))
|
deletedObjects := make([]DeletedObject, 0, len(deleteObjectsReq.Objects))
|
||||||
for _, deleteResult := range deleteResults {
|
for _, deleteResult := range deleteResults {
|
||||||
if deleteResult.errInfo.Code != "" {
|
if deleteResult.errInfo.Code != "" {
|
||||||
deleteErrors = append(deleteErrors, deleteResult.errInfo)
|
deleteErrors = append(deleteErrors, deleteResult.errInfo)
|
||||||
@ -1806,7 +1806,8 @@ func (api objectAPIHandlers) ResetBucketReplicationStateHandler(w http.ResponseW
|
|||||||
tgtArns := config.FilterTargetArns(
|
tgtArns := config.FilterTargetArns(
|
||||||
replication.ObjectOpts{
|
replication.ObjectOpts{
|
||||||
OpType: replication.ResyncReplicationType,
|
OpType: replication.ResyncReplicationType,
|
||||||
TargetArn: arn})
|
TargetArn: arn,
|
||||||
|
})
|
||||||
|
|
||||||
if len(tgtArns) == 0 {
|
if len(tgtArns) == 0 {
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{
|
||||||
|
@ -81,8 +81,8 @@ func TestGetBucketLocationHandler(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||||
credentials auth.Credentials, t *testing.T) {
|
credentials auth.Credentials, t *testing.T,
|
||||||
|
) {
|
||||||
// test cases with sample input and expected output.
|
// test cases with sample input and expected output.
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
bucketName string
|
bucketName string
|
||||||
@ -163,7 +163,6 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
recV2 := httptest.NewRecorder()
|
recV2 := httptest.NewRecorder()
|
||||||
// construct HTTP request for PUT bucket policy endpoint.
|
// construct HTTP request for PUT bucket policy endpoint.
|
||||||
reqV2, err := newTestSignedRequestV2(http.MethodGet, getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
reqV2, err := newTestSignedRequestV2(http.MethodGet, getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||||
}
|
}
|
||||||
@ -210,7 +209,6 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
|
|
||||||
nilBucket := "dummy-bucket"
|
nilBucket := "dummy-bucket"
|
||||||
nilReq, err := newTestRequest(http.MethodGet, getBucketLocationURL("", nilBucket), 0, nil)
|
nilReq, err := newTestRequest(http.MethodGet, getBucketLocationURL("", nilBucket), 0, nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||||
}
|
}
|
||||||
@ -225,8 +223,8 @@ func TestHeadBucketHandler(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||||
credentials auth.Credentials, t *testing.T) {
|
credentials auth.Credentials, t *testing.T,
|
||||||
|
) {
|
||||||
// test cases with sample input and expected output.
|
// test cases with sample input and expected output.
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
bucketName string
|
bucketName string
|
||||||
@ -282,7 +280,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
|||||||
recV2 := httptest.NewRecorder()
|
recV2 := httptest.NewRecorder()
|
||||||
// construct HTTP request for PUT bucket policy endpoint.
|
// construct HTTP request for PUT bucket policy endpoint.
|
||||||
reqV2, err := newTestSignedRequestV2(http.MethodHead, getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
reqV2, err := newTestSignedRequestV2(http.MethodHead, getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||||
}
|
}
|
||||||
@ -297,7 +294,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
|||||||
|
|
||||||
// Test for Anonymous/unsigned http request.
|
// Test for Anonymous/unsigned http request.
|
||||||
anonReq, err := newTestRequest(http.MethodHead, getHEADBucketURL("", bucketName), 0, nil)
|
anonReq, err := newTestRequest(http.MethodHead, getHEADBucketURL("", bucketName), 0, nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||||
instanceType, bucketName, err)
|
instanceType, bucketName, err)
|
||||||
@ -315,7 +311,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
|||||||
|
|
||||||
nilBucket := "dummy-bucket"
|
nilBucket := "dummy-bucket"
|
||||||
nilReq, err := newTestRequest(http.MethodHead, getHEADBucketURL("", nilBucket), 0, nil)
|
nilReq, err := newTestRequest(http.MethodHead, getHEADBucketURL("", nilBucket), 0, nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||||
}
|
}
|
||||||
@ -331,8 +326,8 @@ func TestListMultipartUploadsHandler(t *testing.T) {
|
|||||||
|
|
||||||
// testListMultipartUploadsHandler - Tests validate listing of multipart uploads.
|
// testListMultipartUploadsHandler - Tests validate listing of multipart uploads.
|
||||||
func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||||
credentials auth.Credentials, t *testing.T) {
|
credentials auth.Credentials, t *testing.T,
|
||||||
|
) {
|
||||||
// Collection of non-exhaustive ListMultipartUploads test cases, valid errors
|
// Collection of non-exhaustive ListMultipartUploads test cases, valid errors
|
||||||
// and success responses.
|
// and success responses.
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
@ -552,7 +547,6 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
|||||||
testCases[6].uploadIDMarker, testCases[6].delimiter, testCases[6].maxUploads)
|
testCases[6].uploadIDMarker, testCases[6].delimiter, testCases[6].maxUploads)
|
||||||
|
|
||||||
nilReq, err := newTestRequest(http.MethodGet, url, 0, nil)
|
nilReq, err := newTestRequest(http.MethodGet, url, 0, nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||||
}
|
}
|
||||||
@ -568,8 +562,8 @@ func TestListBucketsHandler(t *testing.T) {
|
|||||||
|
|
||||||
// testListBucketsHandler - Tests validate listing of buckets.
|
// testListBucketsHandler - Tests validate listing of buckets.
|
||||||
func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||||
credentials auth.Credentials, t *testing.T) {
|
credentials auth.Credentials, t *testing.T,
|
||||||
|
) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
bucketName string
|
bucketName string
|
||||||
accessKey string
|
accessKey string
|
||||||
@ -615,7 +609,6 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
|||||||
|
|
||||||
// verify response for V2 signed HTTP request.
|
// verify response for V2 signed HTTP request.
|
||||||
reqV2, err := newTestSignedRequestV2(http.MethodGet, getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
reqV2, err := newTestSignedRequestV2(http.MethodGet, getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
|
||||||
}
|
}
|
||||||
@ -630,7 +623,6 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
|||||||
// Test for Anonymous/unsigned http request.
|
// Test for Anonymous/unsigned http request.
|
||||||
// ListBucketsHandler doesn't support bucket policies, setting the policies shouldn't make a difference.
|
// ListBucketsHandler doesn't support bucket policies, setting the policies shouldn't make a difference.
|
||||||
anonReq, err := newTestRequest(http.MethodGet, getListBucketURL(""), 0, nil)
|
anonReq, err := newTestRequest(http.MethodGet, getListBucketURL(""), 0, nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("MinIO %s: Failed to create an anonymous request.", instanceType)
|
t.Fatalf("MinIO %s: Failed to create an anonymous request.", instanceType)
|
||||||
}
|
}
|
||||||
@ -646,7 +638,6 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
|||||||
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
|
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
|
||||||
|
|
||||||
nilReq, err := newTestRequest(http.MethodGet, getListBucketURL(""), 0, nil)
|
nilReq, err := newTestRequest(http.MethodGet, getListBucketURL(""), 0, nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||||
}
|
}
|
||||||
@ -661,8 +652,8 @@ func TestAPIDeleteMultipleObjectsHandler(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||||
credentials auth.Credentials, t *testing.T) {
|
credentials auth.Credentials, t *testing.T,
|
||||||
|
) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
contentBytes := []byte("hello")
|
contentBytes := []byte("hello")
|
||||||
|
@ -150,8 +150,8 @@ func TestBucketLifecycle(t *testing.T) {
|
|||||||
// Simple tests of bucket lifecycle: PUT, GET, DELETE.
|
// Simple tests of bucket lifecycle: PUT, GET, DELETE.
|
||||||
// Tests are related and the order is important.
|
// Tests are related and the order is important.
|
||||||
func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||||
creds auth.Credentials, t *testing.T) {
|
creds auth.Credentials, t *testing.T,
|
||||||
|
) {
|
||||||
// test cases with sample input and expected output.
|
// test cases with sample input and expected output.
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
method string
|
method string
|
||||||
@ -266,8 +266,8 @@ func testBucketLifecycle(obj ObjectLayer, instanceType, bucketName string, apiRo
|
|||||||
lifecycleResponse []byte
|
lifecycleResponse []byte
|
||||||
errorResponse APIErrorResponse
|
errorResponse APIErrorResponse
|
||||||
shouldPass bool
|
shouldPass bool
|
||||||
}) {
|
},
|
||||||
|
) {
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
|
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
|
@ -178,9 +178,7 @@ func (t *transitionState) queueTransitionTask(oi ObjectInfo) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var globalTransitionState *transitionState
|
||||||
globalTransitionState *transitionState
|
|
||||||
)
|
|
||||||
|
|
||||||
func newTransitionState(ctx context.Context, objAPI ObjectLayer) *transitionState {
|
func newTransitionState(ctx context.Context, objAPI ObjectLayer) *transitionState {
|
||||||
return &transitionState{
|
return &transitionState{
|
||||||
@ -466,9 +464,7 @@ func (sp *SelectParameters) IsEmpty() bool {
|
|||||||
return sp == nil
|
return sp == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var selectParamsXMLName = "SelectParameters"
|
||||||
selectParamsXMLName = "SelectParameters"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UnmarshalXML - decodes XML data.
|
// UnmarshalXML - decodes XML data.
|
||||||
func (sp *SelectParameters) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
func (sp *SelectParameters) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||||
|
@ -105,7 +105,7 @@ func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRoute
|
|||||||
bucketName1 := fmt.Sprintf("%s-1", bucketName)
|
bucketName1 := fmt.Sprintf("%s-1", bucketName)
|
||||||
|
|
||||||
const n = 100
|
const n = 100
|
||||||
var start = make(chan struct{})
|
start := make(chan struct{})
|
||||||
var ok, errs int
|
var ok, errs int
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
var mu sync.Mutex
|
var mu sync.Mutex
|
||||||
@ -147,8 +147,8 @@ func TestPutBucketPolicyHandler(t *testing.T) {
|
|||||||
|
|
||||||
// testPutBucketPolicyHandler - Test for Bucket policy end point.
|
// testPutBucketPolicyHandler - Test for Bucket policy end point.
|
||||||
func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||||
credentials auth.Credentials, t *testing.T) {
|
credentials auth.Credentials, t *testing.T,
|
||||||
|
) {
|
||||||
bucketName1 := fmt.Sprintf("%s-1", bucketName)
|
bucketName1 := fmt.Sprintf("%s-1", bucketName)
|
||||||
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, BucketOptions{}); err != nil {
|
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, BucketOptions{}); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -333,7 +333,6 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
|||||||
// create unsigned HTTP request for PutBucketPolicyHandler.
|
// create unsigned HTTP request for PutBucketPolicyHandler.
|
||||||
anonReq, err := newTestRequest(http.MethodPut, getPutPolicyURL("", bucketName),
|
anonReq, err := newTestRequest(http.MethodPut, getPutPolicyURL("", bucketName),
|
||||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)))
|
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||||
instanceType, bucketName, err)
|
instanceType, bucketName, err)
|
||||||
@ -352,14 +351,12 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
|||||||
|
|
||||||
nilReq, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", nilBucket),
|
nilReq, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", nilBucket),
|
||||||
0, nil, "", "", nil)
|
0, nil, "", "", nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||||
}
|
}
|
||||||
// execute the object layer set to `nil` test.
|
// execute the object layer set to `nil` test.
|
||||||
// `ExecObjectLayerAPINilTest` manages the operation.
|
// `ExecObjectLayerAPINilTest` manages the operation.
|
||||||
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
|
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrapper for calling Get Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup.
|
// Wrapper for calling Get Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||||
@ -465,7 +462,6 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
|||||||
// construct HTTP request for PUT bucket policy endpoint.
|
// construct HTTP request for PUT bucket policy endpoint.
|
||||||
reqV4, err := newTestSignedRequestV4(http.MethodGet, getGetPolicyURL("", testCase.bucketName),
|
reqV4, err := newTestSignedRequestV4(http.MethodGet, getGetPolicyURL("", testCase.bucketName),
|
||||||
0, nil, testCase.accessKey, testCase.secretKey, nil)
|
0, nil, testCase.accessKey, testCase.secretKey, nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
|
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
|
||||||
}
|
}
|
||||||
@ -540,7 +536,6 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
|||||||
// Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference.
|
// Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference.
|
||||||
// create unsigned HTTP request for PutBucketPolicyHandler.
|
// create unsigned HTTP request for PutBucketPolicyHandler.
|
||||||
anonReq, err := newTestRequest(http.MethodGet, getPutPolicyURL("", bucketName), 0, nil)
|
anonReq, err := newTestRequest(http.MethodGet, getPutPolicyURL("", bucketName), 0, nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||||
instanceType, bucketName, err)
|
instanceType, bucketName, err)
|
||||||
@ -559,7 +554,6 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
|||||||
|
|
||||||
nilReq, err := newTestSignedRequestV4(http.MethodGet, getGetPolicyURL("", nilBucket),
|
nilReq, err := newTestSignedRequestV4(http.MethodGet, getGetPolicyURL("", nilBucket),
|
||||||
0, nil, "", "", nil)
|
0, nil, "", "", nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||||
}
|
}
|
||||||
@ -575,8 +569,8 @@ func TestDeleteBucketPolicyHandler(t *testing.T) {
|
|||||||
|
|
||||||
// testDeleteBucketPolicyHandler - Test for Delete bucket policy end point.
|
// testDeleteBucketPolicyHandler - Test for Delete bucket policy end point.
|
||||||
func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||||
credentials auth.Credentials, t *testing.T) {
|
credentials auth.Credentials, t *testing.T,
|
||||||
|
) {
|
||||||
// template for constructing HTTP request body for PUT bucket policy.
|
// template for constructing HTTP request body for PUT bucket policy.
|
||||||
bucketPolicyTemplate := `{
|
bucketPolicyTemplate := `{
|
||||||
"Version": "2012-10-17",
|
"Version": "2012-10-17",
|
||||||
@ -743,7 +737,6 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
|||||||
// Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference.
|
// Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference.
|
||||||
// create unsigned HTTP request for PutBucketPolicyHandler.
|
// create unsigned HTTP request for PutBucketPolicyHandler.
|
||||||
anonReq, err := newTestRequest(http.MethodDelete, getPutPolicyURL("", bucketName), 0, nil)
|
anonReq, err := newTestRequest(http.MethodDelete, getPutPolicyURL("", bucketName), 0, nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
|
||||||
instanceType, bucketName, err)
|
instanceType, bucketName, err)
|
||||||
@ -762,7 +755,6 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str
|
|||||||
|
|
||||||
nilReq, err := newTestSignedRequestV4(http.MethodDelete, getDeletePolicyURL("", nilBucket),
|
nilReq, err := newTestSignedRequestV4(http.MethodDelete, getDeletePolicyURL("", nilBucket),
|
||||||
0, nil, "", "", nil)
|
0, nil, "", "", nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||||
}
|
}
|
||||||
|
@ -200,7 +200,7 @@ func PolicyToBucketAccessPolicy(bucketPolicy *policy.Policy) (*miniogopolicy.Buc
|
|||||||
}
|
}
|
||||||
|
|
||||||
var policyInfo miniogopolicy.BucketAccessPolicy
|
var policyInfo miniogopolicy.BucketAccessPolicy
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
if err = json.Unmarshal(data, &policyInfo); err != nil {
|
if err = json.Unmarshal(data, &policyInfo); err != nil {
|
||||||
// This should not happen because data is valid to JSON data.
|
// This should not happen because data is valid to JSON data.
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -218,7 +218,7 @@ func BucketAccessPolicyToPolicy(policyInfo *miniogopolicy.BucketAccessPolicy) (*
|
|||||||
}
|
}
|
||||||
|
|
||||||
var bucketPolicy policy.Policy
|
var bucketPolicy policy.Policy
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
if err = json.Unmarshal(data, &bucketPolicy); err != nil {
|
if err = json.Unmarshal(data, &bucketPolicy); err != nil {
|
||||||
// This should not happen because data is valid to JSON data.
|
// This should not happen because data is valid to JSON data.
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -55,7 +55,6 @@ func (r *ReplicationStats) Delete(bucket string) {
|
|||||||
r.ulock.Lock()
|
r.ulock.Lock()
|
||||||
defer r.ulock.Unlock()
|
defer r.ulock.Unlock()
|
||||||
delete(r.UsageCache, bucket)
|
delete(r.UsageCache, bucket)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateReplicaStat updates in-memory replica statistics with new values.
|
// UpdateReplicaStat updates in-memory replica statistics with new values.
|
||||||
|
@ -77,7 +77,8 @@ var replicatedInfosTests = []struct {
|
|||||||
ReplicationStatus: replication.Failed,
|
ReplicationStatus: replication.Failed,
|
||||||
OpType: replication.ObjectReplicationType,
|
OpType: replication.ObjectReplicationType,
|
||||||
ReplicationAction: replicateAll,
|
ReplicationAction: replicateAll,
|
||||||
}},
|
},
|
||||||
|
},
|
||||||
expectedCompletedSize: 249,
|
expectedCompletedSize: 249,
|
||||||
expectedReplicationStatusInternal: "arn1=COMPLETED;arn2=FAILED;",
|
expectedReplicationStatusInternal: "arn1=COMPLETED;arn2=FAILED;",
|
||||||
expectedReplicationStatus: replication.Failed,
|
expectedReplicationStatus: replication.Failed,
|
||||||
@ -102,7 +103,8 @@ var replicatedInfosTests = []struct {
|
|||||||
ReplicationStatus: replication.Failed,
|
ReplicationStatus: replication.Failed,
|
||||||
OpType: replication.ObjectReplicationType,
|
OpType: replication.ObjectReplicationType,
|
||||||
ReplicationAction: replicateAll,
|
ReplicationAction: replicateAll,
|
||||||
}},
|
},
|
||||||
|
},
|
||||||
expectedCompletedSize: 0,
|
expectedCompletedSize: 0,
|
||||||
expectedReplicationStatusInternal: "arn1=PENDING;arn2=FAILED;",
|
expectedReplicationStatusInternal: "arn1=PENDING;arn2=FAILED;",
|
||||||
expectedReplicationStatus: replication.Failed,
|
expectedReplicationStatus: replication.Failed,
|
||||||
@ -182,7 +184,6 @@ var parseReplicationDecisionTest = []struct {
|
|||||||
func TestParseReplicateDecision(t *testing.T) {
|
func TestParseReplicateDecision(t *testing.T) {
|
||||||
for i, test := range parseReplicationDecisionTest {
|
for i, test := range parseReplicationDecisionTest {
|
||||||
dsc, err := parseReplicateDecision(test.expDsc.String())
|
dsc, err := parseReplicateDecision(test.expDsc.String())
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if test.expErr != err {
|
if test.expErr != err {
|
||||||
t.Errorf("Test%d (%s): Expected parse error got %t , want %t", i+1, test.name, err, test.expErr)
|
t.Errorf("Test%d (%s): Expected parse error got %t , want %t", i+1, test.name, err, test.expErr)
|
||||||
|
@ -139,6 +139,7 @@ func (o mustReplicateOptions) ReplicationStatus() (s replication.StatusType) {
|
|||||||
}
|
}
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o mustReplicateOptions) isExistingObjectReplication() bool {
|
func (o mustReplicateOptions) isExistingObjectReplication() bool {
|
||||||
return o.opType == replication.ExistingObjectReplicationType
|
return o.opType == replication.ExistingObjectReplicationType
|
||||||
}
|
}
|
||||||
@ -146,6 +147,7 @@ func (o mustReplicateOptions) isExistingObjectReplication() bool {
|
|||||||
func (o mustReplicateOptions) isMetadataReplication() bool {
|
func (o mustReplicateOptions) isMetadataReplication() bool {
|
||||||
return o.opType == replication.MetadataReplicationType
|
return o.opType == replication.MetadataReplicationType
|
||||||
}
|
}
|
||||||
|
|
||||||
func getMustReplicateOptions(o ObjectInfo, op replication.Type, opts ObjectOptions) mustReplicateOptions {
|
func getMustReplicateOptions(o ObjectInfo, op replication.Type, opts ObjectOptions) mustReplicateOptions {
|
||||||
if !op.Valid() {
|
if !op.Valid() {
|
||||||
op = replication.ObjectReplicationType
|
op = replication.ObjectReplicationType
|
||||||
@ -441,7 +443,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var eventName = event.ObjectReplicationComplete
|
eventName := event.ObjectReplicationComplete
|
||||||
if replicationStatus == replication.Failed {
|
if replicationStatus == replication.Failed {
|
||||||
eventName = event.ObjectReplicationFailed
|
eventName = event.ObjectReplicationFailed
|
||||||
}
|
}
|
||||||
@ -523,7 +525,8 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
|
|||||||
VersionID: versionID,
|
VersionID: versionID,
|
||||||
Internal: miniogo.AdvancedGetOptions{
|
Internal: miniogo.AdvancedGetOptions{
|
||||||
ReplicationProxyRequest: "false",
|
ReplicationProxyRequest: "false",
|
||||||
}}); isErrMethodNotAllowed(ErrorRespToObjectError(err, dobj.Bucket, dobj.ObjectName)) {
|
},
|
||||||
|
}); isErrMethodNotAllowed(ErrorRespToObjectError(err, dobj.Bucket, dobj.ObjectName)) {
|
||||||
if dobj.VersionID == "" {
|
if dobj.VersionID == "" {
|
||||||
rinfo.ReplicationStatus = replication.Completed
|
rinfo.ReplicationStatus = replication.Completed
|
||||||
return
|
return
|
||||||
@ -902,7 +905,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
|||||||
// FIXME: add support for missing replication events
|
// FIXME: add support for missing replication events
|
||||||
// - event.ObjectReplicationMissedThreshold
|
// - event.ObjectReplicationMissedThreshold
|
||||||
// - event.ObjectReplicationReplicatedAfterThreshold
|
// - event.ObjectReplicationReplicatedAfterThreshold
|
||||||
var eventName = event.ObjectReplicationComplete
|
eventName := event.ObjectReplicationComplete
|
||||||
if rinfos.ReplicationStatus() == replication.Failed {
|
if rinfos.ReplicationStatus() == replication.Failed {
|
||||||
eventName = event.ObjectReplicationFailed
|
eventName = event.ObjectReplicationFailed
|
||||||
}
|
}
|
||||||
@ -1058,7 +1061,8 @@ func replicateObjectToTarget(ctx context.Context, ri ReplicateObjectInfo, object
|
|||||||
VersionID: objInfo.VersionID,
|
VersionID: objInfo.VersionID,
|
||||||
Internal: miniogo.AdvancedGetOptions{
|
Internal: miniogo.AdvancedGetOptions{
|
||||||
ReplicationProxyRequest: "false",
|
ReplicationProxyRequest: "false",
|
||||||
}})
|
},
|
||||||
|
})
|
||||||
if cerr == nil {
|
if cerr == nil {
|
||||||
rAction = getReplicationAction(objInfo, oi, ri.OpType)
|
rAction = getReplicationAction(objInfo, oi, ri.OpType)
|
||||||
rinfo.ReplicationStatus = replication.Completed
|
rinfo.ReplicationStatus = replication.Completed
|
||||||
@ -1117,7 +1121,8 @@ func replicateObjectToTarget(ctx context.Context, ri ReplicateObjectInfo, object
|
|||||||
Internal: miniogo.AdvancedPutOptions{
|
Internal: miniogo.AdvancedPutOptions{
|
||||||
SourceVersionID: objInfo.VersionID,
|
SourceVersionID: objInfo.VersionID,
|
||||||
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
|
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
|
||||||
}}
|
},
|
||||||
|
}
|
||||||
if _, err = c.CopyObject(ctx, tgt.Bucket, object, tgt.Bucket, object, getCopyObjMetadata(objInfo, tgt.StorageClass), srcOpts, dstOpts); err != nil {
|
if _, err = c.CopyObject(ctx, tgt.Bucket, object, tgt.Bucket, object, getCopyObjMetadata(objInfo, tgt.StorageClass), srcOpts, dstOpts); err != nil {
|
||||||
rinfo.ReplicationStatus = replication.Failed
|
rinfo.ReplicationStatus = replication.Failed
|
||||||
logger.LogIf(ctx, fmt.Errorf("Unable to replicate metadata for object %s/%s(%s): %s", bucket, objInfo.Name, objInfo.VersionID, err))
|
logger.LogIf(ctx, fmt.Errorf("Unable to replicate metadata for object %s/%s(%s): %s", bucket, objInfo.Name, objInfo.VersionID, err))
|
||||||
@ -1213,7 +1218,8 @@ func replicateObjectWithMultipart(ctx context.Context, c *miniogo.Core, bucket,
|
|||||||
SourceMTime: objInfo.ModTime,
|
SourceMTime: objInfo.ModTime,
|
||||||
// always set this to distinguish between `mc mirror` replication and serverside
|
// always set this to distinguish between `mc mirror` replication and serverside
|
||||||
ReplicationRequest: true,
|
ReplicationRequest: true,
|
||||||
}})
|
},
|
||||||
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1357,7 +1363,6 @@ func (p *ReplicationPool) AddWorker() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddExistingObjectReplicateWorker adds a worker to queue existing objects that need to be sync'd
|
// AddExistingObjectReplicateWorker adds a worker to queue existing objects that need to be sync'd
|
||||||
@ -1671,6 +1676,7 @@ type replicationConfig struct {
|
|||||||
func (c replicationConfig) Empty() bool {
|
func (c replicationConfig) Empty() bool {
|
||||||
return c.Config == nil
|
return c.Config == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c replicationConfig) Replicate(opts replication.ObjectOpts) bool {
|
func (c replicationConfig) Replicate(opts replication.ObjectOpts) bool {
|
||||||
return c.Config.Replicate(opts)
|
return c.Config.Replicate(opts)
|
||||||
}
|
}
|
||||||
@ -1694,7 +1700,8 @@ func (c replicationConfig) Resync(ctx context.Context, oi ObjectInfo, dsc *Repli
|
|||||||
DeleteMarker: oi.DeleteMarker,
|
DeleteMarker: oi.DeleteMarker,
|
||||||
VersionID: oi.VersionID,
|
VersionID: oi.VersionID,
|
||||||
OpType: replication.DeleteReplicationType,
|
OpType: replication.DeleteReplicationType,
|
||||||
ExistingObject: true}
|
ExistingObject: true,
|
||||||
|
}
|
||||||
|
|
||||||
tgtArns := c.Config.FilterTargetArns(opts)
|
tgtArns := c.Config.FilterTargetArns(opts)
|
||||||
// indicates no matching target with Existing object replication enabled.
|
// indicates no matching target with Existing object replication enabled.
|
||||||
|
@ -75,7 +75,8 @@ var replicationConfigTests = []struct {
|
|||||||
},
|
},
|
||||||
{ // 4. existing object replication enabled, versioning enabled; no reset in progress
|
{ // 4. existing object replication enabled, versioning enabled; no reset in progress
|
||||||
name: "existing object replication enabled, versioning enabled; no reset in progress",
|
name: "existing object replication enabled, versioning enabled; no reset in progress",
|
||||||
info: ObjectInfo{Size: 100,
|
info: ObjectInfo{
|
||||||
|
Size: 100,
|
||||||
ReplicationStatus: replication.Completed,
|
ReplicationStatus: replication.Completed,
|
||||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||||
},
|
},
|
||||||
@ -93,174 +94,192 @@ func TestReplicationResync(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var start = UTCNow().AddDate(0, 0, -1)
|
var (
|
||||||
var replicationConfigTests2 = []struct {
|
start = UTCNow().AddDate(0, 0, -1)
|
||||||
info ObjectInfo
|
replicationConfigTests2 = []struct {
|
||||||
name string
|
info ObjectInfo
|
||||||
rcfg replicationConfig
|
name string
|
||||||
dsc ReplicateDecision
|
rcfg replicationConfig
|
||||||
tgtStatuses map[string]replication.StatusType
|
dsc ReplicateDecision
|
||||||
expectedSync bool
|
tgtStatuses map[string]replication.StatusType
|
||||||
}{
|
expectedSync bool
|
||||||
{ // Cases 1-4: existing object replication enabled, versioning enabled, no reset - replication status varies
|
}{
|
||||||
// 1: Pending replication
|
{ // Cases 1-4: existing object replication enabled, versioning enabled, no reset - replication status varies
|
||||||
name: "existing object replication on object in Pending replication status",
|
// 1: Pending replication
|
||||||
info: ObjectInfo{Size: 100,
|
name: "existing object replication on object in Pending replication status",
|
||||||
ReplicationStatusInternal: "arn1:PENDING;",
|
info: ObjectInfo{
|
||||||
ReplicationStatus: replication.Pending,
|
Size: 100,
|
||||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
ReplicationStatusInternal: "arn1:PENDING;",
|
||||||
},
|
ReplicationStatus: replication.Pending,
|
||||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||||
Arn: "arn1",
|
},
|
||||||
}}}},
|
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
Arn: "arn1",
|
||||||
expectedSync: true,
|
}}}},
|
||||||
},
|
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||||
|
expectedSync: true,
|
||||||
{ // 2. replication status Failed
|
|
||||||
name: "existing object replication on object in Failed replication status",
|
|
||||||
info: ObjectInfo{Size: 100,
|
|
||||||
ReplicationStatusInternal: "arn1:FAILED",
|
|
||||||
ReplicationStatus: replication.Failed,
|
|
||||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
|
||||||
},
|
|
||||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
|
||||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
|
||||||
Arn: "arn1",
|
|
||||||
}}}},
|
|
||||||
expectedSync: true,
|
|
||||||
},
|
|
||||||
{ // 3. replication status unset
|
|
||||||
name: "existing object replication on pre-existing unreplicated object",
|
|
||||||
info: ObjectInfo{Size: 100,
|
|
||||||
ReplicationStatus: replication.StatusType(""),
|
|
||||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
|
||||||
},
|
|
||||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
|
||||||
Arn: "arn1",
|
|
||||||
}}}},
|
|
||||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
|
||||||
expectedSync: true,
|
|
||||||
},
|
|
||||||
{ // 4. replication status Complete
|
|
||||||
name: "existing object replication on object in Completed replication status",
|
|
||||||
info: ObjectInfo{Size: 100,
|
|
||||||
ReplicationStatusInternal: "arn1:COMPLETED",
|
|
||||||
ReplicationStatus: replication.Completed,
|
|
||||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
|
||||||
},
|
|
||||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", false, false)}},
|
|
||||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
|
||||||
Arn: "arn1",
|
|
||||||
}}}},
|
|
||||||
expectedSync: false,
|
|
||||||
},
|
|
||||||
{ // 5. existing object replication enabled, versioning enabled, replication status Pending & reset ID present
|
|
||||||
name: "existing object replication with reset in progress and object in Pending status",
|
|
||||||
info: ObjectInfo{Size: 100,
|
|
||||||
ReplicationStatusInternal: "arn1:PENDING;",
|
|
||||||
ReplicationStatus: replication.Pending,
|
|
||||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
|
||||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
|
||||||
},
|
|
||||||
expectedSync: true,
|
|
||||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
|
||||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
|
||||||
Arn: "arn1",
|
|
||||||
ResetID: "xyz",
|
|
||||||
ResetBeforeDate: UTCNow(),
|
|
||||||
}}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{ // 6. existing object replication enabled, versioning enabled, replication status Failed & reset ID present
|
|
||||||
name: "existing object replication with reset in progress and object in Failed status",
|
|
||||||
info: ObjectInfo{Size: 100,
|
|
||||||
ReplicationStatusInternal: "arn1:FAILED;",
|
|
||||||
ReplicationStatus: replication.Failed,
|
|
||||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
|
||||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
|
||||||
},
|
|
||||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
|
||||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
|
||||||
Arn: "arn1",
|
|
||||||
ResetID: "xyz",
|
|
||||||
ResetBeforeDate: UTCNow(),
|
|
||||||
}}},
|
|
||||||
},
|
|
||||||
expectedSync: true,
|
|
||||||
},
|
|
||||||
{ // 7. existing object replication enabled, versioning enabled, replication status unset & reset ID present
|
|
||||||
name: "existing object replication with reset in progress and object never replicated before",
|
|
||||||
info: ObjectInfo{Size: 100,
|
|
||||||
ReplicationStatus: replication.StatusType(""),
|
|
||||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
|
||||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
|
||||||
},
|
|
||||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
|
||||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
|
||||||
Arn: "arn1",
|
|
||||||
ResetID: "xyz",
|
|
||||||
ResetBeforeDate: UTCNow(),
|
|
||||||
}}},
|
|
||||||
},
|
},
|
||||||
|
|
||||||
expectedSync: true,
|
{ // 2. replication status Failed
|
||||||
},
|
name: "existing object replication on object in Failed replication status",
|
||||||
|
info: ObjectInfo{
|
||||||
|
Size: 100,
|
||||||
|
ReplicationStatusInternal: "arn1:FAILED",
|
||||||
|
ReplicationStatus: replication.Failed,
|
||||||
|
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||||
|
},
|
||||||
|
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||||
|
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||||
|
Arn: "arn1",
|
||||||
|
}}}},
|
||||||
|
expectedSync: true,
|
||||||
|
},
|
||||||
|
{ // 3. replication status unset
|
||||||
|
name: "existing object replication on pre-existing unreplicated object",
|
||||||
|
info: ObjectInfo{
|
||||||
|
Size: 100,
|
||||||
|
ReplicationStatus: replication.StatusType(""),
|
||||||
|
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||||
|
},
|
||||||
|
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||||
|
Arn: "arn1",
|
||||||
|
}}}},
|
||||||
|
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||||
|
expectedSync: true,
|
||||||
|
},
|
||||||
|
{ // 4. replication status Complete
|
||||||
|
name: "existing object replication on object in Completed replication status",
|
||||||
|
info: ObjectInfo{
|
||||||
|
Size: 100,
|
||||||
|
ReplicationStatusInternal: "arn1:COMPLETED",
|
||||||
|
ReplicationStatus: replication.Completed,
|
||||||
|
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||||
|
},
|
||||||
|
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", false, false)}},
|
||||||
|
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||||
|
Arn: "arn1",
|
||||||
|
}}}},
|
||||||
|
expectedSync: false,
|
||||||
|
},
|
||||||
|
{ // 5. existing object replication enabled, versioning enabled, replication status Pending & reset ID present
|
||||||
|
name: "existing object replication with reset in progress and object in Pending status",
|
||||||
|
info: ObjectInfo{
|
||||||
|
Size: 100,
|
||||||
|
ReplicationStatusInternal: "arn1:PENDING;",
|
||||||
|
ReplicationStatus: replication.Pending,
|
||||||
|
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||||
|
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||||
|
},
|
||||||
|
expectedSync: true,
|
||||||
|
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||||
|
rcfg: replicationConfig{
|
||||||
|
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||||
|
Arn: "arn1",
|
||||||
|
ResetID: "xyz",
|
||||||
|
ResetBeforeDate: UTCNow(),
|
||||||
|
}}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{ // 6. existing object replication enabled, versioning enabled, replication status Failed & reset ID present
|
||||||
|
name: "existing object replication with reset in progress and object in Failed status",
|
||||||
|
info: ObjectInfo{
|
||||||
|
Size: 100,
|
||||||
|
ReplicationStatusInternal: "arn1:FAILED;",
|
||||||
|
ReplicationStatus: replication.Failed,
|
||||||
|
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||||
|
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||||
|
},
|
||||||
|
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||||
|
rcfg: replicationConfig{
|
||||||
|
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||||
|
Arn: "arn1",
|
||||||
|
ResetID: "xyz",
|
||||||
|
ResetBeforeDate: UTCNow(),
|
||||||
|
}}},
|
||||||
|
},
|
||||||
|
expectedSync: true,
|
||||||
|
},
|
||||||
|
{ // 7. existing object replication enabled, versioning enabled, replication status unset & reset ID present
|
||||||
|
name: "existing object replication with reset in progress and object never replicated before",
|
||||||
|
info: ObjectInfo{
|
||||||
|
Size: 100,
|
||||||
|
ReplicationStatus: replication.StatusType(""),
|
||||||
|
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||||
|
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||||
|
},
|
||||||
|
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||||
|
rcfg: replicationConfig{
|
||||||
|
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||||
|
Arn: "arn1",
|
||||||
|
ResetID: "xyz",
|
||||||
|
ResetBeforeDate: UTCNow(),
|
||||||
|
}}},
|
||||||
|
},
|
||||||
|
|
||||||
{ // 8. existing object replication enabled, versioning enabled, replication status Complete & reset ID present
|
expectedSync: true,
|
||||||
name: "existing object replication enabled - reset in progress for an object in Completed status",
|
|
||||||
info: ObjectInfo{Size: 100,
|
|
||||||
ReplicationStatusInternal: "arn1:COMPLETED;",
|
|
||||||
ReplicationStatus: replication.Completed,
|
|
||||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df8",
|
|
||||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
|
||||||
},
|
},
|
||||||
expectedSync: true,
|
|
||||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
|
||||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
|
||||||
Arn: "arn1",
|
|
||||||
ResetID: "xyz",
|
|
||||||
ResetBeforeDate: UTCNow(),
|
|
||||||
}}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{ // 9. existing object replication enabled, versioning enabled, replication status Pending & reset ID different
|
|
||||||
name: "existing object replication enabled, newer reset in progress on object in Pending replication status",
|
|
||||||
info: ObjectInfo{Size: 100,
|
|
||||||
ReplicationStatusInternal: "arn1:PENDING;",
|
|
||||||
|
|
||||||
ReplicationStatus: replication.Pending,
|
{ // 8. existing object replication enabled, versioning enabled, replication status Complete & reset ID present
|
||||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
name: "existing object replication enabled - reset in progress for an object in Completed status",
|
||||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;%s", UTCNow().AddDate(0, 0, -1).Format(http.TimeFormat), "abc")},
|
info: ObjectInfo{
|
||||||
ModTime: UTCNow().AddDate(0, 0, -2),
|
Size: 100,
|
||||||
|
ReplicationStatusInternal: "arn1:COMPLETED;",
|
||||||
|
ReplicationStatus: replication.Completed,
|
||||||
|
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df8",
|
||||||
|
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())},
|
||||||
|
},
|
||||||
|
expectedSync: true,
|
||||||
|
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||||
|
rcfg: replicationConfig{
|
||||||
|
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||||
|
Arn: "arn1",
|
||||||
|
ResetID: "xyz",
|
||||||
|
ResetBeforeDate: UTCNow(),
|
||||||
|
}}},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
expectedSync: true,
|
{ // 9. existing object replication enabled, versioning enabled, replication status Pending & reset ID different
|
||||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
name: "existing object replication enabled, newer reset in progress on object in Pending replication status",
|
||||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
info: ObjectInfo{
|
||||||
Arn: "arn1",
|
Size: 100,
|
||||||
ResetID: "xyz",
|
ReplicationStatusInternal: "arn1:PENDING;",
|
||||||
ResetBeforeDate: UTCNow(),
|
|
||||||
}}},
|
ReplicationStatus: replication.Pending,
|
||||||
|
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||||
|
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;%s", UTCNow().AddDate(0, 0, -1).Format(http.TimeFormat), "abc")},
|
||||||
|
ModTime: UTCNow().AddDate(0, 0, -2),
|
||||||
|
},
|
||||||
|
expectedSync: true,
|
||||||
|
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||||
|
rcfg: replicationConfig{
|
||||||
|
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||||
|
Arn: "arn1",
|
||||||
|
ResetID: "xyz",
|
||||||
|
ResetBeforeDate: UTCNow(),
|
||||||
|
}}},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
{ // 10. existing object replication enabled, versioning enabled, replication status Complete & reset done
|
||||||
{ // 10. existing object replication enabled, versioning enabled, replication status Complete & reset done
|
name: "reset done on object in Completed Status - ineligbile for re-replication",
|
||||||
name: "reset done on object in Completed Status - ineligbile for re-replication",
|
info: ObjectInfo{
|
||||||
info: ObjectInfo{Size: 100,
|
Size: 100,
|
||||||
ReplicationStatusInternal: "arn1:COMPLETED;",
|
ReplicationStatusInternal: "arn1:COMPLETED;",
|
||||||
ReplicationStatus: replication.Completed,
|
ReplicationStatus: replication.Completed,
|
||||||
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9",
|
||||||
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;%s", start.Format(http.TimeFormat), "xyz")},
|
UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;%s", start.Format(http.TimeFormat), "xyz")},
|
||||||
|
},
|
||||||
|
expectedSync: false,
|
||||||
|
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||||
|
rcfg: replicationConfig{
|
||||||
|
remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
||||||
|
Arn: "arn1",
|
||||||
|
ResetID: "xyz",
|
||||||
|
ResetBeforeDate: start,
|
||||||
|
}}},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
expectedSync: false,
|
}
|
||||||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
)
|
||||||
rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{
|
|
||||||
Arn: "arn1",
|
|
||||||
ResetID: "xyz",
|
|
||||||
ResetBeforeDate: start,
|
|
||||||
}}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReplicationResyncwrapper(t *testing.T) {
|
func TestReplicationResyncwrapper(t *testing.T) {
|
||||||
for i, test := range replicationConfigTests2 {
|
for i, test := range replicationConfigTests2 {
|
||||||
|
@ -414,7 +414,7 @@ func parseBucketTargetConfig(bucket string, cdata, cmetadata []byte) (*madmin.Bu
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
data = cdata
|
data = cdata
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
if len(cmetadata) != 0 {
|
if len(cmetadata) != 0 {
|
||||||
if err := json.Unmarshal(cmetadata, &meta); err != nil {
|
if err := json.Unmarshal(cmetadata, &meta); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -144,5 +144,4 @@ func (api objectAPIHandlers) GetBucketVersioningHandler(w http.ResponseWriter, r
|
|||||||
|
|
||||||
// Write bucket versioning configuration to client
|
// Write bucket versioning configuration to client
|
||||||
writeSuccessResponseXML(w, configData)
|
writeSuccessResponseXML(w, configData)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -68,8 +68,11 @@ import (
|
|||||||
|
|
||||||
// serverDebugLog will enable debug printing
|
// serverDebugLog will enable debug printing
|
||||||
var serverDebugLog = env.Get("_MINIO_SERVER_DEBUG", config.EnableOff) == config.EnableOn
|
var serverDebugLog = env.Get("_MINIO_SERVER_DEBUG", config.EnableOff) == config.EnableOn
|
||||||
var shardDiskTimeDelta time.Duration
|
|
||||||
var defaultAWSCredProvider []credentials.Provider
|
var (
|
||||||
|
shardDiskTimeDelta time.Duration
|
||||||
|
defaultAWSCredProvider []credentials.Provider
|
||||||
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
@ -362,7 +365,6 @@ func newConfigDirFromCtx(ctx *cli.Context, option string, getDefaultDir func() s
|
|||||||
}
|
}
|
||||||
|
|
||||||
func handleCommonCmdArgs(ctx *cli.Context) {
|
func handleCommonCmdArgs(ctx *cli.Context) {
|
||||||
|
|
||||||
// Get "json" flag from command line argument and
|
// Get "json" flag from command line argument and
|
||||||
// enable json and quite modes if json flag is turned on.
|
// enable json and quite modes if json flag is turned on.
|
||||||
globalCLIContext.JSON = ctx.IsSet("json") || ctx.GlobalIsSet("json")
|
globalCLIContext.JSON = ctx.IsSet("json") || ctx.GlobalIsSet("json")
|
||||||
@ -669,7 +671,7 @@ func handleCommonEnvVars() {
|
|||||||
publicIPs := env.Get(config.EnvPublicIPs, "")
|
publicIPs := env.Get(config.EnvPublicIPs, "")
|
||||||
if len(publicIPs) != 0 {
|
if len(publicIPs) != 0 {
|
||||||
minioEndpoints := strings.Split(publicIPs, config.ValueSeparator)
|
minioEndpoints := strings.Split(publicIPs, config.ValueSeparator)
|
||||||
var domainIPs = set.NewStringSet()
|
domainIPs := set.NewStringSet()
|
||||||
for _, endpoint := range minioEndpoints {
|
for _, endpoint := range minioEndpoints {
|
||||||
if net.ParseIP(endpoint) == nil {
|
if net.ParseIP(endpoint) == nil {
|
||||||
// Checking if the IP is a DNS entry.
|
// Checking if the IP is a DNS entry.
|
||||||
@ -786,7 +788,7 @@ func handleCommonEnvVars() {
|
|||||||
logger.Fatal(err, fmt.Sprintf("Unable to load X.509 root CAs for KES from %q", env.Get(config.EnvKESServerCA, globalCertsCADir.Get())))
|
logger.Fatal(err, fmt.Sprintf("Unable to load X.509 root CAs for KES from %q", env.Get(config.EnvKESServerCA, globalCertsCADir.Get())))
|
||||||
}
|
}
|
||||||
|
|
||||||
var defaultKeyID = env.Get(config.EnvKESKeyName, "")
|
defaultKeyID := env.Get(config.EnvKESKeyName, "")
|
||||||
KMS, err := kms.NewWithConfig(kms.Config{
|
KMS, err := kms.NewWithConfig(kms.Config{
|
||||||
Endpoints: endpoints,
|
Endpoints: endpoints,
|
||||||
DefaultKeyID: defaultKeyID,
|
DefaultKeyID: defaultKeyID,
|
||||||
|
@ -73,7 +73,8 @@ func Test_minioEnvironFromFile(t *testing.T) {
|
|||||||
expectedErr bool
|
expectedErr bool
|
||||||
expectedEkvs []envKV
|
expectedEkvs []envKV
|
||||||
}{
|
}{
|
||||||
{`
|
{
|
||||||
|
`
|
||||||
export MINIO_ROOT_USER=minio
|
export MINIO_ROOT_USER=minio
|
||||||
export MINIO_ROOT_PASSWORD=minio123`,
|
export MINIO_ROOT_PASSWORD=minio123`,
|
||||||
false,
|
false,
|
||||||
@ -89,7 +90,8 @@ export MINIO_ROOT_PASSWORD=minio123`,
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Value with double quotes
|
// Value with double quotes
|
||||||
{`export MINIO_ROOT_USER="minio"`,
|
{
|
||||||
|
`export MINIO_ROOT_USER="minio"`,
|
||||||
false,
|
false,
|
||||||
[]envKV{
|
[]envKV{
|
||||||
{
|
{
|
||||||
@ -99,7 +101,8 @@ export MINIO_ROOT_PASSWORD=minio123`,
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Value with single quotes
|
// Value with single quotes
|
||||||
{`export MINIO_ROOT_USER='minio'`,
|
{
|
||||||
|
`export MINIO_ROOT_USER='minio'`,
|
||||||
false,
|
false,
|
||||||
[]envKV{
|
[]envKV{
|
||||||
{
|
{
|
||||||
@ -108,7 +111,8 @@ export MINIO_ROOT_PASSWORD=minio123`,
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{`
|
{
|
||||||
|
`
|
||||||
MINIO_ROOT_USER=minio
|
MINIO_ROOT_USER=minio
|
||||||
MINIO_ROOT_PASSWORD=minio123`,
|
MINIO_ROOT_PASSWORD=minio123`,
|
||||||
false,
|
false,
|
||||||
@ -123,7 +127,8 @@ MINIO_ROOT_PASSWORD=minio123`,
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{`
|
{
|
||||||
|
`
|
||||||
export MINIO_ROOT_USERminio
|
export MINIO_ROOT_USERminio
|
||||||
export MINIO_ROOT_PASSWORD=minio123`,
|
export MINIO_ROOT_PASSWORD=minio123`,
|
||||||
true,
|
true,
|
||||||
|
@ -50,7 +50,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func initHelp() {
|
func initHelp() {
|
||||||
var kvs = map[string]config.KVS{
|
kvs := map[string]config.KVS{
|
||||||
config.EtcdSubSys: etcd.DefaultKVS,
|
config.EtcdSubSys: etcd.DefaultKVS,
|
||||||
config.CacheSubSys: cache.DefaultKVS,
|
config.CacheSubSys: cache.DefaultKVS,
|
||||||
config.CompressionSubSys: compress.DefaultKVS,
|
config.CompressionSubSys: compress.DefaultKVS,
|
||||||
@ -78,7 +78,7 @@ func initHelp() {
|
|||||||
config.RegisterDefaultKVS(kvs)
|
config.RegisterDefaultKVS(kvs)
|
||||||
|
|
||||||
// Captures help for each sub-system
|
// Captures help for each sub-system
|
||||||
var helpSubSys = config.HelpKVS{
|
helpSubSys := config.HelpKVS{
|
||||||
config.HelpKV{
|
config.HelpKV{
|
||||||
Key: config.SiteSubSys,
|
Key: config.SiteSubSys,
|
||||||
Description: "label the server and its location",
|
Description: "label the server and its location",
|
||||||
@ -205,7 +205,7 @@ func initHelp() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var helpMap = map[string]config.HelpKVS{
|
helpMap := map[string]config.HelpKVS{
|
||||||
"": helpSubSys, // Help for all sub-systems.
|
"": helpSubSys, // Help for all sub-systems.
|
||||||
config.SiteSubSys: config.SiteHelp,
|
config.SiteSubSys: config.SiteHelp,
|
||||||
config.RegionSubSys: config.RegionHelp,
|
config.RegionSubSys: config.RegionHelp,
|
||||||
|
@ -84,7 +84,7 @@ func (dir *ConfigDir) Get() string {
|
|||||||
|
|
||||||
// Attempts to create all directories, ignores any permission denied errors.
|
// Attempts to create all directories, ignores any permission denied errors.
|
||||||
func mkdirAllIgnorePerm(path string) error {
|
func mkdirAllIgnorePerm(path string) error {
|
||||||
err := os.MkdirAll(path, 0700)
|
err := os.MkdirAll(path, 0o700)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// It is possible in kubernetes like deployments this directory
|
// It is possible in kubernetes like deployments this directory
|
||||||
// is already mounted and is not writable, ignore any write errors.
|
// is already mounted and is not writable, ignore any write errors.
|
||||||
|
@ -2445,12 +2445,12 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
|
|||||||
return err
|
return err
|
||||||
} // if errConfigNotFound proceed to migrate..
|
} // if errConfigNotFound proceed to migrate..
|
||||||
|
|
||||||
var configFiles = []string{
|
configFiles := []string{
|
||||||
getConfigFile(),
|
getConfigFile(),
|
||||||
getConfigFile() + ".deprecated",
|
getConfigFile() + ".deprecated",
|
||||||
configFile,
|
configFile,
|
||||||
}
|
}
|
||||||
var config = &serverConfigV27{}
|
config := &serverConfigV27{}
|
||||||
for _, cfgFile := range configFiles {
|
for _, cfgFile := range configFiles {
|
||||||
if _, err = Load(cfgFile, config); err != nil {
|
if _, err = Load(cfgFile, config); err != nil {
|
||||||
if !osIsNotExist(err) && !osIsPermission(err) {
|
if !osIsNotExist(err) && !osIsPermission(err) {
|
||||||
|
@ -51,7 +51,7 @@ func TestServerConfigMigrateV1(t *testing.T) {
|
|||||||
// Create a V1 config json file and store it
|
// Create a V1 config json file and store it
|
||||||
configJSON := "{ \"version\":\"1\", \"accessKeyId\":\"abcde\", \"secretAccessKey\":\"abcdefgh\"}"
|
configJSON := "{ \"version\":\"1\", \"accessKeyId\":\"abcde\", \"secretAccessKey\":\"abcdefgh\"}"
|
||||||
configPath := rootPath + "/fsUsers.json"
|
configPath := rootPath + "/fsUsers.json"
|
||||||
if err := ioutil.WriteFile(configPath, []byte(configJSON), 0644); err != nil {
|
if err := ioutil.WriteFile(configPath, []byte(configJSON), 0o644); err != nil {
|
||||||
t.Fatal("Unexpected error: ", err)
|
t.Fatal("Unexpected error: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -181,7 +181,7 @@ func TestServerConfigMigrateV2toV33(t *testing.T) {
|
|||||||
configPath := rootPath + SlashSeparator + minioConfigFile
|
configPath := rootPath + SlashSeparator + minioConfigFile
|
||||||
|
|
||||||
// Create a corrupted config file
|
// Create a corrupted config file
|
||||||
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\","), 0644); err != nil {
|
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\","), 0o644); err != nil {
|
||||||
t.Fatal("Unexpected error: ", err)
|
t.Fatal("Unexpected error: ", err)
|
||||||
}
|
}
|
||||||
// Fire a migrateConfig()
|
// Fire a migrateConfig()
|
||||||
@ -194,7 +194,7 @@ func TestServerConfigMigrateV2toV33(t *testing.T) {
|
|||||||
|
|
||||||
// Create a V2 config json file and store it
|
// Create a V2 config json file and store it
|
||||||
configJSON := "{ \"version\":\"2\", \"credentials\": {\"accessKeyId\":\"" + accessKey + "\", \"secretAccessKey\":\"" + secretKey + "\", \"region\":\"us-east-1\"}, \"mongoLogger\":{\"addr\":\"127.0.0.1:3543\", \"db\":\"foodb\", \"collection\":\"foo\"}, \"syslogLogger\":{\"network\":\"127.0.0.1:543\", \"addr\":\"addr\"}, \"fileLogger\":{\"filename\":\"log.out\"}}"
|
configJSON := "{ \"version\":\"2\", \"credentials\": {\"accessKeyId\":\"" + accessKey + "\", \"secretAccessKey\":\"" + secretKey + "\", \"region\":\"us-east-1\"}, \"mongoLogger\":{\"addr\":\"127.0.0.1:3543\", \"db\":\"foodb\", \"collection\":\"foo\"}, \"syslogLogger\":{\"network\":\"127.0.0.1:543\", \"addr\":\"addr\"}, \"fileLogger\":{\"filename\":\"log.out\"}}"
|
||||||
if err := ioutil.WriteFile(configPath, []byte(configJSON), 0644); err != nil {
|
if err := ioutil.WriteFile(configPath, []byte(configJSON), 0o644); err != nil {
|
||||||
t.Fatal("Unexpected error: ", err)
|
t.Fatal("Unexpected error: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -244,7 +244,7 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
|
|||||||
configPath := rootPath + SlashSeparator + minioConfigFile
|
configPath := rootPath + SlashSeparator + minioConfigFile
|
||||||
|
|
||||||
// Create a corrupted config file
|
// Create a corrupted config file
|
||||||
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"test\":"), 0644); err != nil {
|
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"test\":"), 0o644); err != nil {
|
||||||
t.Fatal("Unexpected error: ", err)
|
t.Fatal("Unexpected error: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -343,7 +343,7 @@ func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
|
|||||||
for i := 3; i <= 17; i++ {
|
for i := 3; i <= 17; i++ {
|
||||||
// Create a corrupted config file
|
// Create a corrupted config file
|
||||||
if err = ioutil.WriteFile(configPath, []byte(fmt.Sprintf("{ \"version\":\"%d\", \"credential\": { \"accessKey\": 1 } }", i)),
|
if err = ioutil.WriteFile(configPath, []byte(fmt.Sprintf("{ \"version\":\"%d\", \"credential\": { \"accessKey\": 1 } }", i)),
|
||||||
0644); err != nil {
|
0o644); err != nil {
|
||||||
t.Fatal("Unexpected error: ", err)
|
t.Fatal("Unexpected error: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -354,7 +354,7 @@ func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create a corrupted config file for version '2'.
|
// Create a corrupted config file for version '2'.
|
||||||
if err = ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"credentials\": { \"accessKeyId\": 1 } }"), 0644); err != nil {
|
if err = ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"credentials\": { \"accessKeyId\": 1 } }"), 0o644); err != nil {
|
||||||
t.Fatal("Unexpected error: ", err)
|
t.Fatal("Unexpected error: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,8 +46,8 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func listServerConfigHistory(ctx context.Context, objAPI ObjectLayer, withData bool, count int) (
|
func listServerConfigHistory(ctx context.Context, objAPI ObjectLayer, withData bool, count int) (
|
||||||
[]madmin.ConfigHistoryEntry, error) {
|
[]madmin.ConfigHistoryEntry, error,
|
||||||
|
) {
|
||||||
var configHistory []madmin.ConfigHistoryEntry
|
var configHistory []madmin.ConfigHistoryEntry
|
||||||
|
|
||||||
// List all kvs
|
// List all kvs
|
||||||
@ -140,7 +140,7 @@ func saveServerConfig(ctx context.Context, objAPI ObjectLayer, cfg interface{})
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var configFile = path.Join(minioConfigPrefix, minioConfigFile)
|
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||||
if GlobalKMS != nil {
|
if GlobalKMS != nil {
|
||||||
data, err = config.EncryptBytes(GlobalKMS, data, kms.Context{
|
data, err = config.EncryptBytes(GlobalKMS, data, kms.Context{
|
||||||
minioMetaBucket: path.Join(minioMetaBucket, configFile),
|
minioMetaBucket: path.Join(minioMetaBucket, configFile),
|
||||||
@ -153,7 +153,7 @@ func saveServerConfig(ctx context.Context, objAPI ObjectLayer, cfg interface{})
|
|||||||
}
|
}
|
||||||
|
|
||||||
func readServerConfig(ctx context.Context, objAPI ObjectLayer) (config.Config, error) {
|
func readServerConfig(ctx context.Context, objAPI ObjectLayer) (config.Config, error) {
|
||||||
var srvCfg = config.New()
|
srvCfg := config.New()
|
||||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||||
data, err := readConfig(ctx, objAPI, configFile)
|
data, err := readConfig(ctx, objAPI, configFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -174,7 +174,7 @@ func readServerConfig(ctx context.Context, objAPI ObjectLayer) (config.Config, e
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
if err = json.Unmarshal(data, &srvCfg); err != nil {
|
if err = json.Unmarshal(data, &srvCfg); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -277,7 +277,6 @@ func scanDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
|
|||||||
defer func() {
|
defer func() {
|
||||||
console.Debugf(logPrefix+" Scanner time: %v %s\n", time.Since(t), logSuffix)
|
console.Debugf(logPrefix+" Scanner time: %v %s\n", time.Since(t), logSuffix)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
switch cache.Info.Name {
|
switch cache.Info.Name {
|
||||||
@ -875,8 +874,10 @@ func (i *scannerItem) transformMetaDir() {
|
|||||||
i.objectName = split[len(split)-1]
|
i.objectName = split[len(split)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
var applyActionsLogPrefix = color.Green("applyActions:")
|
var (
|
||||||
var applyVersionActionsLogPrefix = color.Green("applyVersionActions:")
|
applyActionsLogPrefix = color.Green("applyActions:")
|
||||||
|
applyVersionActionsLogPrefix = color.Green("applyVersionActions:")
|
||||||
|
)
|
||||||
|
|
||||||
func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, oi ObjectInfo) (size int64) {
|
func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, oi ObjectInfo) (size int64) {
|
||||||
if i.debug {
|
if i.debug {
|
||||||
@ -979,7 +980,6 @@ func (i *scannerItem) applyTierObjSweep(ctx context.Context, o ObjectLayer, oi O
|
|||||||
if ignoreNotFoundErr(err) != nil {
|
if ignoreNotFoundErr(err) != nil {
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// applyNewerNoncurrentVersionLimit removes noncurrent versions older than the most recent NewerNoncurrentVersions configured.
|
// applyNewerNoncurrentVersionLimit removes noncurrent versions older than the most recent NewerNoncurrentVersions configured.
|
||||||
@ -1100,7 +1100,6 @@ func applyTransitionRule(obj ObjectInfo) bool {
|
|||||||
}
|
}
|
||||||
globalTransitionState.queueTransitionTask(obj)
|
globalTransitionState.queueTransitionTask(obj)
|
||||||
return true
|
return true
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, restoredObject bool) bool {
|
func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, restoredObject bool) bool {
|
||||||
|
@ -105,7 +105,7 @@ func TestDataUpdateTracker(t *testing.T) {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
dut.start(ctx, tmpDir)
|
dut.start(ctx, tmpDir)
|
||||||
|
|
||||||
var tests = []struct {
|
tests := []struct {
|
||||||
in string
|
in string
|
||||||
check []string // if not empty, check against these instead.
|
check []string // if not empty, check against these instead.
|
||||||
exist bool
|
exist bool
|
||||||
|
@ -624,7 +624,7 @@ func (d *dataUsageCache) reduceChildrenOf(path dataUsageHash, limit int, compact
|
|||||||
// Appears to be printed with _MINIO_SERVER_DEBUG=off
|
// Appears to be printed with _MINIO_SERVER_DEBUG=off
|
||||||
// console.Debugf(" %d children found, compacting %v\n", total, path)
|
// console.Debugf(" %d children found, compacting %v\n", total, path)
|
||||||
|
|
||||||
var leaves = make([]struct {
|
leaves := make([]struct {
|
||||||
objects uint64
|
objects uint64
|
||||||
path dataUsageHash
|
path dataUsageHash
|
||||||
}, total)
|
}, total)
|
||||||
@ -774,7 +774,7 @@ func (d *dataUsageCache) tiersUsageInfo(buckets []BucketInfo) *allTierStats {
|
|||||||
// bucketsUsageInfo returns the buckets usage info as a map, with
|
// bucketsUsageInfo returns the buckets usage info as a map, with
|
||||||
// key as bucket name
|
// key as bucket name
|
||||||
func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]BucketUsageInfo {
|
func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]BucketUsageInfo {
|
||||||
var dst = make(map[string]BucketUsageInfo, len(buckets))
|
dst := make(map[string]BucketUsageInfo, len(buckets))
|
||||||
for _, bucket := range buckets {
|
for _, bucket := range buckets {
|
||||||
e := d.find(bucket.Name)
|
e := d.find(bucket.Name)
|
||||||
if e == nil {
|
if e == nil {
|
||||||
@ -797,7 +797,6 @@ func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]Bucke
|
|||||||
ReplicationPendingCount: stat.PendingCount,
|
ReplicationPendingCount: stat.PendingCount,
|
||||||
ReplicationFailedCount: stat.FailedCount,
|
ReplicationFailedCount: stat.FailedCount,
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dst[bucket.Name] = bui
|
dst[bucket.Name] = bui
|
||||||
|
@ -40,7 +40,7 @@ const (
|
|||||||
// storeDataUsageInBackend will store all objects sent on the gui channel until closed.
|
// storeDataUsageInBackend will store all objects sent on the gui channel until closed.
|
||||||
func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan DataUsageInfo) {
|
func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan DataUsageInfo) {
|
||||||
for dataUsageInfo := range dui {
|
for dataUsageInfo := range dui {
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
dataUsageJSON, err := json.Marshal(dataUsageInfo)
|
dataUsageJSON, err := json.Marshal(dataUsageInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
@ -105,7 +105,7 @@ func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (DataUsag
|
|||||||
defer r.Close()
|
defer r.Close()
|
||||||
|
|
||||||
var dataUsageInfo DataUsageInfo
|
var dataUsageInfo DataUsageInfo
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
if err = json.NewDecoder(r).Decode(&dataUsageInfo); err != nil {
|
if err = json.NewDecoder(r).Decode(&dataUsageInfo); err != nil {
|
||||||
return DataUsageInfo{}, err
|
return DataUsageInfo{}, err
|
||||||
}
|
}
|
||||||
|
@ -41,7 +41,7 @@ func TestDataUsageUpdate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
const bucket = "bucket"
|
const bucket = "bucket"
|
||||||
defer os.RemoveAll(base)
|
defer os.RemoveAll(base)
|
||||||
var files = []usageTestFile{
|
files := []usageTestFile{
|
||||||
{name: "rootfile", size: 10000},
|
{name: "rootfile", size: 10000},
|
||||||
{name: "rootfile2", size: 10000},
|
{name: "rootfile2", size: 10000},
|
||||||
{name: "dir1/d1file", size: 2000},
|
{name: "dir1/d1file", size: 2000},
|
||||||
@ -73,7 +73,7 @@ func TestDataUsageUpdate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test dirs
|
// Test dirs
|
||||||
var want = []struct {
|
want := []struct {
|
||||||
path string
|
path string
|
||||||
isNil bool
|
isNil bool
|
||||||
size, objs int
|
size, objs int
|
||||||
@ -257,7 +257,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
|||||||
}
|
}
|
||||||
scannerSleeper.Update(0, 0)
|
scannerSleeper.Update(0, 0)
|
||||||
defer os.RemoveAll(base)
|
defer os.RemoveAll(base)
|
||||||
var files = []usageTestFile{
|
files := []usageTestFile{
|
||||||
{name: "bucket/rootfile", size: 10000},
|
{name: "bucket/rootfile", size: 10000},
|
||||||
{name: "bucket/rootfile2", size: 10000},
|
{name: "bucket/rootfile2", size: 10000},
|
||||||
{name: "bucket/dir1/d1file", size: 2000},
|
{name: "bucket/dir1/d1file", size: 2000},
|
||||||
@ -302,7 +302,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test dirs
|
// Test dirs
|
||||||
var want = []struct {
|
want := []struct {
|
||||||
path string
|
path string
|
||||||
isNil bool
|
isNil bool
|
||||||
size, objs int
|
size, objs int
|
||||||
@ -543,7 +543,7 @@ func TestDataUsageCacheSerialize(t *testing.T) {
|
|||||||
}
|
}
|
||||||
const bucket = "abucket"
|
const bucket = "abucket"
|
||||||
defer os.RemoveAll(base)
|
defer os.RemoveAll(base)
|
||||||
var files = []usageTestFile{
|
files := []usageTestFile{
|
||||||
{name: "rootfile", size: 10000},
|
{name: "rootfile", size: 10000},
|
||||||
{name: "rootfile2", size: 10000},
|
{name: "rootfile2", size: 10000},
|
||||||
{name: "dir1/d1file", size: 2000},
|
{name: "dir1/d1file", size: 2000},
|
||||||
|
@ -190,7 +190,7 @@ func newDiskCache(ctx context.Context, dir string, config cache.Config) (*diskCa
|
|||||||
quotaPct = config.Quota
|
quotaPct = config.Quota
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.MkdirAll(dir, 0777); err != nil {
|
if err := os.MkdirAll(dir, 0o777); err != nil {
|
||||||
return nil, fmt.Errorf("Unable to initialize '%s' dir, %w", dir, err)
|
return nil, fmt.Errorf("Unable to initialize '%s' dir, %w", dir, err)
|
||||||
}
|
}
|
||||||
cache := diskCache{
|
cache := diskCache{
|
||||||
@ -619,10 +619,10 @@ func (c *diskCache) saveMetadata(ctx context.Context, bucket, object string, met
|
|||||||
cachedPath := getCacheSHADir(c.dir, bucket, object)
|
cachedPath := getCacheSHADir(c.dir, bucket, object)
|
||||||
metaPath := pathJoin(cachedPath, cacheMetaJSONFile)
|
metaPath := pathJoin(cachedPath, cacheMetaJSONFile)
|
||||||
// Create cache directory if needed
|
// Create cache directory if needed
|
||||||
if err := os.MkdirAll(cachedPath, 0777); err != nil {
|
if err := os.MkdirAll(cachedPath, 0o777); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
f, err := os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0666)
|
f, err := os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0o666)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -682,10 +682,10 @@ func (c *diskCache) updateMetadata(ctx context.Context, bucket, object, etag str
|
|||||||
cachedPath := getCacheSHADir(c.dir, bucket, object)
|
cachedPath := getCacheSHADir(c.dir, bucket, object)
|
||||||
metaPath := pathJoin(cachedPath, cacheMetaJSONFile)
|
metaPath := pathJoin(cachedPath, cacheMetaJSONFile)
|
||||||
// Create cache directory if needed
|
// Create cache directory if needed
|
||||||
if err := os.MkdirAll(cachedPath, 0777); err != nil {
|
if err := os.MkdirAll(cachedPath, 0o777); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
f, err := os.OpenFile(metaPath, os.O_RDWR, 0666)
|
f, err := os.OpenFile(metaPath, os.O_RDWR, 0o666)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -732,7 +732,7 @@ func getCacheWriteBackSHADir(dir, bucket, object string) string {
|
|||||||
|
|
||||||
// Cache data to disk with bitrot checksum added for each block of 1MB
|
// Cache data to disk with bitrot checksum added for each block of 1MB
|
||||||
func (c *diskCache) bitrotWriteToCache(cachePath, fileName string, reader io.Reader, size uint64) (int64, string, error) {
|
func (c *diskCache) bitrotWriteToCache(cachePath, fileName string, reader io.Reader, size uint64) (int64, string, error) {
|
||||||
if err := os.MkdirAll(cachePath, 0777); err != nil {
|
if err := os.MkdirAll(cachePath, 0o777); err != nil {
|
||||||
return 0, "", err
|
return 0, "", err
|
||||||
}
|
}
|
||||||
filePath := pathJoin(cachePath, fileName)
|
filePath := pathJoin(cachePath, fileName)
|
||||||
@ -807,6 +807,7 @@ func newCacheEncryptReader(content io.Reader, bucket, object string, metadata ma
|
|||||||
}
|
}
|
||||||
return reader, nil
|
return reader, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCacheEncryptMetadata(bucket, object string, metadata map[string]string) ([]byte, error) {
|
func newCacheEncryptMetadata(bucket, object string, metadata map[string]string) ([]byte, error) {
|
||||||
var sealedKey crypto.SealedKey
|
var sealedKey crypto.SealedKey
|
||||||
if globalCacheKMS == nil {
|
if globalCacheKMS == nil {
|
||||||
@ -827,6 +828,7 @@ func newCacheEncryptMetadata(bucket, object string, metadata map[string]string)
|
|||||||
metadata[SSECacheEncrypted] = ""
|
metadata[SSECacheEncrypted] = ""
|
||||||
return objectKey[:], nil
|
return objectKey[:], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *diskCache) GetLockContext(ctx context.Context, bucket, object string) (RWLocker, LockContext, error) {
|
func (c *diskCache) GetLockContext(ctx context.Context, bucket, object string) (RWLocker, LockContext, error) {
|
||||||
cachePath := getCacheSHADir(c.dir, bucket, object)
|
cachePath := getCacheSHADir(c.dir, bucket, object)
|
||||||
cLock := c.NewNSLockFn(cachePath)
|
cLock := c.NewNSLockFn(cachePath)
|
||||||
@ -879,12 +881,12 @@ func (c *diskCache) put(ctx context.Context, bucket, object string, data io.Read
|
|||||||
cachePath = getCacheWriteBackSHADir(c.dir, bucket, object)
|
cachePath = getCacheWriteBackSHADir(c.dir, bucket, object)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.MkdirAll(cachePath, 0777); err != nil {
|
if err := os.MkdirAll(cachePath, 0o777); err != nil {
|
||||||
return oi, err
|
return oi, err
|
||||||
}
|
}
|
||||||
var metadata = cloneMSS(opts.UserDefined)
|
metadata := cloneMSS(opts.UserDefined)
|
||||||
var reader = data
|
reader := data
|
||||||
var actualSize = uint64(size)
|
actualSize := uint64(size)
|
||||||
if globalCacheKMS != nil {
|
if globalCacheKMS != nil {
|
||||||
reader, err = newCacheEncryptReader(data, bucket, object, metadata)
|
reader, err = newCacheEncryptReader(data, bucket, object, metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -933,14 +935,14 @@ func (c *diskCache) putRange(ctx context.Context, bucket, object string, data io
|
|||||||
return errDiskFull
|
return errDiskFull
|
||||||
}
|
}
|
||||||
cachePath := getCacheSHADir(c.dir, bucket, object)
|
cachePath := getCacheSHADir(c.dir, bucket, object)
|
||||||
if err := os.MkdirAll(cachePath, 0777); err != nil {
|
if err := os.MkdirAll(cachePath, 0o777); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var metadata = cloneMSS(opts.UserDefined)
|
metadata := cloneMSS(opts.UserDefined)
|
||||||
var reader = data
|
reader := data
|
||||||
var actualSize = uint64(rlen)
|
actualSize := uint64(rlen)
|
||||||
// objSize is the actual size of object (with encryption overhead if any)
|
// objSize is the actual size of object (with encryption overhead if any)
|
||||||
var objSize = uint64(size)
|
objSize := uint64(size)
|
||||||
if globalCacheKMS != nil {
|
if globalCacheKMS != nil {
|
||||||
reader, err = newCacheEncryptReader(data, bucket, object, metadata)
|
reader, err = newCacheEncryptReader(data, bucket, object, metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1269,12 +1271,12 @@ func (c *diskCache) NewMultipartUpload(ctx context.Context, bucket, object, uID
|
|||||||
|
|
||||||
cachePath := getMultipartCacheSHADir(c.dir, bucket, object)
|
cachePath := getMultipartCacheSHADir(c.dir, bucket, object)
|
||||||
uploadIDDir := path.Join(cachePath, uploadID)
|
uploadIDDir := path.Join(cachePath, uploadID)
|
||||||
if err := os.MkdirAll(uploadIDDir, 0777); err != nil {
|
if err := os.MkdirAll(uploadIDDir, 0o777); err != nil {
|
||||||
return uploadID, err
|
return uploadID, err
|
||||||
}
|
}
|
||||||
metaPath := pathJoin(uploadIDDir, cacheMetaJSONFile)
|
metaPath := pathJoin(uploadIDDir, cacheMetaJSONFile)
|
||||||
|
|
||||||
f, err := os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0666)
|
f, err := os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0o666)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return uploadID, err
|
return uploadID, err
|
||||||
}
|
}
|
||||||
@ -1331,7 +1333,7 @@ func (c *diskCache) PutObjectPart(ctx context.Context, bucket, object, uploadID
|
|||||||
return oi, errDiskFull
|
return oi, errDiskFull
|
||||||
}
|
}
|
||||||
reader := data
|
reader := data
|
||||||
var actualSize = uint64(size)
|
actualSize := uint64(size)
|
||||||
if globalCacheKMS != nil {
|
if globalCacheKMS != nil {
|
||||||
reader, err = newCachePartEncryptReader(ctx, bucket, object, partID, data, size, meta.Meta)
|
reader, err = newCachePartEncryptReader(ctx, bucket, object, partID, data, size, meta.Meta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1380,7 +1382,7 @@ func (c *diskCache) SavePartMetadata(ctx context.Context, bucket, object, upload
|
|||||||
defer uploadLock.Unlock(ulkctx.Cancel)
|
defer uploadLock.Unlock(ulkctx.Cancel)
|
||||||
|
|
||||||
metaPath := pathJoin(uploadDir, cacheMetaJSONFile)
|
metaPath := pathJoin(uploadDir, cacheMetaJSONFile)
|
||||||
f, err := os.OpenFile(metaPath, os.O_RDWR, 0666)
|
f, err := os.OpenFile(metaPath, os.O_RDWR, 0o666)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1558,7 +1560,7 @@ func (c *diskCache) CompleteMultipartUpload(ctx context.Context, bucket, object,
|
|||||||
uploadMeta.Hits++
|
uploadMeta.Hits++
|
||||||
metaPath := pathJoin(uploadIDDir, cacheMetaJSONFile)
|
metaPath := pathJoin(uploadIDDir, cacheMetaJSONFile)
|
||||||
|
|
||||||
f, err := os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0666)
|
f, err := os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0o666)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return oi, err
|
return oi, err
|
||||||
}
|
}
|
||||||
|
@ -113,7 +113,6 @@ func cacheControlOpts(o ObjectInfo) *cacheControl {
|
|||||||
if strings.EqualFold(k, "cache-control") {
|
if strings.EqualFold(k, "cache-control") {
|
||||||
headerVal = v
|
headerVal = v
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
if headerVal == "" {
|
if headerVal == "" {
|
||||||
return nil
|
return nil
|
||||||
@ -581,6 +580,7 @@ func (t *multiWriter) Write(p []byte) (n int, err error) {
|
|||||||
}
|
}
|
||||||
return len(p), nil
|
return len(p), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func cacheMultiWriter(w1 io.Writer, w2 *io.PipeWriter) io.Writer {
|
func cacheMultiWriter(w1 io.Writer, w2 *io.PipeWriter) io.Writer {
|
||||||
return &multiWriter{backendWriter: w1, cacheWriter: w2}
|
return &multiWriter{backendWriter: w1, cacheWriter: w2}
|
||||||
}
|
}
|
||||||
|
@ -38,7 +38,7 @@ func TestGetCacheControlOpts(t *testing.T) {
|
|||||||
{"max-age=2592000, no-store", timeSentinel, &cacheControl{maxAge: 2592000, sMaxAge: 0, noStore: true, minFresh: 0, expiry: time.Time{}}, false},
|
{"max-age=2592000, no-store", timeSentinel, &cacheControl{maxAge: 2592000, sMaxAge: 0, noStore: true, minFresh: 0, expiry: time.Time{}}, false},
|
||||||
{"must-revalidate, max-age=600", timeSentinel, &cacheControl{maxAge: 600, sMaxAge: 0, minFresh: 0, expiry: time.Time{}}, false},
|
{"must-revalidate, max-age=600", timeSentinel, &cacheControl{maxAge: 600, sMaxAge: 0, minFresh: 0, expiry: time.Time{}}, false},
|
||||||
{"s-maxAge=2500, max-age=600", timeSentinel, &cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Time{}}, false},
|
{"s-maxAge=2500, max-age=600", timeSentinel, &cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Time{}}, false},
|
||||||
{"s-maxAge=2500, max-age=600", expiry, &cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Date(2015, time.October, 21, 07, 28, 00, 00, time.UTC)}, false},
|
{"s-maxAge=2500, max-age=600", expiry, &cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Date(2015, time.October, 21, 0o7, 28, 0o0, 0o0, time.UTC)}, false},
|
||||||
{"s-maxAge=2500, max-age=600s", timeSentinel, &cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Time{}}, true},
|
{"s-maxAge=2500, max-age=600s", timeSentinel, &cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Time{}}, true},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -61,7 +61,6 @@ func TestGetCacheControlOpts(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIsMetadataSame(t *testing.T) {
|
func TestIsMetadataSame(t *testing.T) {
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
m1 map[string]string
|
m1 map[string]string
|
||||||
m2 map[string]string
|
m2 map[string]string
|
||||||
@ -148,6 +147,7 @@ func TestNewFileScorer(t *testing.T) {
|
|||||||
t.Fatal("unexpected file list", fs.queueString())
|
t.Fatal("unexpected file list", fs.queueString())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBytesToClear(t *testing.T) {
|
func TestBytesToClear(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
total int64
|
total int64
|
||||||
|
@ -137,7 +137,6 @@ func (c *cacheObjects) incHitsToMeta(ctx context.Context, dcache *diskCache, buc
|
|||||||
|
|
||||||
// Backend metadata could have changed through server side copy - reset cache metadata if that is the case
|
// Backend metadata could have changed through server side copy - reset cache metadata if that is the case
|
||||||
func (c *cacheObjects) updateMetadataIfChanged(ctx context.Context, dcache *diskCache, bucket, object string, bkObjectInfo, cacheObjInfo ObjectInfo, rs *HTTPRangeSpec) error {
|
func (c *cacheObjects) updateMetadataIfChanged(ctx context.Context, dcache *diskCache, bucket, object string, bkObjectInfo, cacheObjInfo ObjectInfo, rs *HTTPRangeSpec) error {
|
||||||
|
|
||||||
bkMeta := make(map[string]string, len(bkObjectInfo.UserDefined))
|
bkMeta := make(map[string]string, len(bkObjectInfo.UserDefined))
|
||||||
cacheMeta := make(map[string]string, len(cacheObjInfo.UserDefined))
|
cacheMeta := make(map[string]string, len(cacheObjInfo.UserDefined))
|
||||||
for k, v := range bkObjectInfo.UserDefined {
|
for k, v := range bkObjectInfo.UserDefined {
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestDynamicTimeoutSingleIncrease(t *testing.T) {
|
func TestDynamicTimeoutSingleIncrease(t *testing.T) {
|
||||||
|
|
||||||
timeout := newDynamicTimeout(time.Minute, time.Second)
|
timeout := newDynamicTimeout(time.Minute, time.Second)
|
||||||
|
|
||||||
initial := timeout.Timeout()
|
initial := timeout.Timeout()
|
||||||
@ -43,7 +42,6 @@ func TestDynamicTimeoutSingleIncrease(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDynamicTimeoutDualIncrease(t *testing.T) {
|
func TestDynamicTimeoutDualIncrease(t *testing.T) {
|
||||||
|
|
||||||
timeout := newDynamicTimeout(time.Minute, time.Second)
|
timeout := newDynamicTimeout(time.Minute, time.Second)
|
||||||
|
|
||||||
initial := timeout.Timeout()
|
initial := timeout.Timeout()
|
||||||
@ -66,7 +64,6 @@ func TestDynamicTimeoutDualIncrease(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDynamicTimeoutSingleDecrease(t *testing.T) {
|
func TestDynamicTimeoutSingleDecrease(t *testing.T) {
|
||||||
|
|
||||||
timeout := newDynamicTimeout(time.Minute, time.Second)
|
timeout := newDynamicTimeout(time.Minute, time.Second)
|
||||||
|
|
||||||
initial := timeout.Timeout()
|
initial := timeout.Timeout()
|
||||||
@ -83,7 +80,6 @@ func TestDynamicTimeoutSingleDecrease(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDynamicTimeoutDualDecrease(t *testing.T) {
|
func TestDynamicTimeoutDualDecrease(t *testing.T) {
|
||||||
|
|
||||||
timeout := newDynamicTimeout(time.Minute, time.Second)
|
timeout := newDynamicTimeout(time.Minute, time.Second)
|
||||||
|
|
||||||
initial := timeout.Timeout()
|
initial := timeout.Timeout()
|
||||||
@ -106,7 +102,6 @@ func TestDynamicTimeoutDualDecrease(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDynamicTimeoutManyDecreases(t *testing.T) {
|
func TestDynamicTimeoutManyDecreases(t *testing.T) {
|
||||||
|
|
||||||
timeout := newDynamicTimeout(time.Minute, time.Second)
|
timeout := newDynamicTimeout(time.Minute, time.Second)
|
||||||
|
|
||||||
initial := timeout.Timeout()
|
initial := timeout.Timeout()
|
||||||
@ -116,7 +111,6 @@ func TestDynamicTimeoutManyDecreases(t *testing.T) {
|
|||||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||||
timeout.LogSuccess(successTimeout)
|
timeout.LogSuccess(successTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
adjusted := timeout.Timeout()
|
adjusted := timeout.Timeout()
|
||||||
@ -151,7 +145,6 @@ func TestDynamicTimeoutConcurrent(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDynamicTimeoutHitMinimum(t *testing.T) {
|
func TestDynamicTimeoutHitMinimum(t *testing.T) {
|
||||||
|
|
||||||
const minimum = 30 * time.Second
|
const minimum = 30 * time.Second
|
||||||
timeout := newDynamicTimeout(time.Minute, minimum)
|
timeout := newDynamicTimeout(time.Minute, minimum)
|
||||||
|
|
||||||
@ -172,7 +165,6 @@ func TestDynamicTimeoutHitMinimum(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testDynamicTimeoutAdjust(t *testing.T, timeout *dynamicTimeout, f func() float64) {
|
func testDynamicTimeoutAdjust(t *testing.T, timeout *dynamicTimeout, f func() float64) {
|
||||||
|
|
||||||
const successTimeout = 20 * time.Second
|
const successTimeout = 20 * time.Second
|
||||||
|
|
||||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||||
@ -192,7 +184,6 @@ func testDynamicTimeoutAdjust(t *testing.T, timeout *dynamicTimeout, f func() fl
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDynamicTimeoutAdjustExponential(t *testing.T) {
|
func TestDynamicTimeoutAdjustExponential(t *testing.T) {
|
||||||
|
|
||||||
timeout := newDynamicTimeout(time.Minute, time.Second)
|
timeout := newDynamicTimeout(time.Minute, time.Second)
|
||||||
|
|
||||||
rand.Seed(0)
|
rand.Seed(0)
|
||||||
@ -200,9 +191,7 @@ func TestDynamicTimeoutAdjustExponential(t *testing.T) {
|
|||||||
initial := timeout.Timeout()
|
initial := timeout.Timeout()
|
||||||
|
|
||||||
for try := 0; try < 10; try++ {
|
for try := 0; try < 10; try++ {
|
||||||
|
|
||||||
testDynamicTimeoutAdjust(t, timeout, rand.ExpFloat64)
|
testDynamicTimeoutAdjust(t, timeout, rand.ExpFloat64)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
adjusted := timeout.Timeout()
|
adjusted := timeout.Timeout()
|
||||||
@ -212,7 +201,6 @@ func TestDynamicTimeoutAdjustExponential(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDynamicTimeoutAdjustNormalized(t *testing.T) {
|
func TestDynamicTimeoutAdjustNormalized(t *testing.T) {
|
||||||
|
|
||||||
timeout := newDynamicTimeout(time.Minute, time.Second)
|
timeout := newDynamicTimeout(time.Minute, time.Second)
|
||||||
|
|
||||||
rand.Seed(0)
|
rand.Seed(0)
|
||||||
@ -220,11 +208,9 @@ func TestDynamicTimeoutAdjustNormalized(t *testing.T) {
|
|||||||
initial := timeout.Timeout()
|
initial := timeout.Timeout()
|
||||||
|
|
||||||
for try := 0; try < 10; try++ {
|
for try := 0; try < 10; try++ {
|
||||||
|
|
||||||
testDynamicTimeoutAdjust(t, timeout, func() float64 {
|
testDynamicTimeoutAdjust(t, timeout, func() float64 {
|
||||||
return 1.0 + rand.NormFloat64()
|
return 1.0 + rand.NormFloat64()
|
||||||
})
|
})
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
adjusted := timeout.Timeout()
|
adjusted := timeout.Timeout()
|
||||||
|
@ -186,7 +186,7 @@ func rotateKey(oldKey []byte, newKeyID string, newKey []byte, bucket, object str
|
|||||||
// client provided it. Therefore, we create a copy
|
// client provided it. Therefore, we create a copy
|
||||||
// of the client provided context and add the bucket
|
// of the client provided context and add the bucket
|
||||||
// key, if not present.
|
// key, if not present.
|
||||||
var kmsCtx = kms.Context{}
|
kmsCtx := kms.Context{}
|
||||||
for k, v := range ctx {
|
for k, v := range ctx {
|
||||||
kmsCtx[k] = v
|
kmsCtx[k] = v
|
||||||
}
|
}
|
||||||
@ -253,7 +253,7 @@ func newEncryptMetadata(kind crypto.Type, keyID string, key []byte, bucket, obje
|
|||||||
// client provided it. Therefore, we create a copy
|
// client provided it. Therefore, we create a copy
|
||||||
// of the client provided context and add the bucket
|
// of the client provided context and add the bucket
|
||||||
// key, if not present.
|
// key, if not present.
|
||||||
var kmsCtx = kms.Context{}
|
kmsCtx := kms.Context{}
|
||||||
for k, v := range ctx {
|
for k, v := range ctx {
|
||||||
kmsCtx[k] = v
|
kmsCtx[k] = v
|
||||||
}
|
}
|
||||||
@ -443,7 +443,6 @@ func newDecryptReaderWithObjectKey(client io.Reader, objectEncryptionKey []byte,
|
|||||||
// DecryptBlocksRequestR - same as DecryptBlocksRequest but with a
|
// DecryptBlocksRequestR - same as DecryptBlocksRequest but with a
|
||||||
// reader
|
// reader
|
||||||
func DecryptBlocksRequestR(inputReader io.Reader, h http.Header, seqNumber uint32, partStart int, oi ObjectInfo, copySource bool) (io.Reader, error) {
|
func DecryptBlocksRequestR(inputReader io.Reader, h http.Header, seqNumber uint32, partStart int, oi ObjectInfo, copySource bool) (io.Reader, error) {
|
||||||
|
|
||||||
bucket, object := oi.Bucket, oi.Name
|
bucket, object := oi.Bucket, oi.Name
|
||||||
// Single part case
|
// Single part case
|
||||||
if !oi.isMultipart() {
|
if !oi.isMultipart() {
|
||||||
|
@ -64,7 +64,6 @@ func TestEncryptRequest(t *testing.T) {
|
|||||||
req.Header.Set(k, v)
|
req.Header.Set(k, v)
|
||||||
}
|
}
|
||||||
_, _, err := EncryptRequest(content, req, "bucket", "object", test.metadata)
|
_, _, err := EncryptRequest(content, req, "bucket", "object", test.metadata)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %d: Failed to encrypt request: %v", i, err)
|
t.Fatalf("Test %d: Failed to encrypt request: %v", i, err)
|
||||||
}
|
}
|
||||||
@ -285,14 +284,13 @@ func TestGetDecryptedRange(t *testing.T) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Single part object tests
|
// Single part object tests
|
||||||
var (
|
|
||||||
mkSPObj = func(s int64) ObjectInfo {
|
mkSPObj := func(s int64) ObjectInfo {
|
||||||
return ObjectInfo{
|
return ObjectInfo{
|
||||||
Size: getEncSize(s),
|
Size: getEncSize(s),
|
||||||
UserDefined: udMap(false),
|
UserDefined: udMap(false),
|
||||||
}
|
|
||||||
}
|
}
|
||||||
)
|
}
|
||||||
|
|
||||||
testSP := []struct {
|
testSP := []struct {
|
||||||
decSz int64
|
decSz int64
|
||||||
@ -325,7 +323,7 @@ func TestGetDecryptedRange(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Case %d: unexpected err: %v", i, err)
|
t.Errorf("Case %d: unexpected err: %v", i, err)
|
||||||
}
|
}
|
||||||
var rLen = pkgSz + 32
|
rLen := pkgSz + 32
|
||||||
if test.decSz < pkgSz {
|
if test.decSz < pkgSz {
|
||||||
rLen = test.decSz + 32
|
rLen = test.decSz + 32
|
||||||
}
|
}
|
||||||
@ -341,7 +339,7 @@ func TestGetDecryptedRange(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Case %d: unexpected err: %v", i, err)
|
t.Errorf("Case %d: unexpected err: %v", i, err)
|
||||||
}
|
}
|
||||||
var rLen = (pkgSz + 32) * 2
|
rLen := (pkgSz + 32) * 2
|
||||||
if test.decSz < 2*pkgSz {
|
if test.decSz < 2*pkgSz {
|
||||||
rLen = (pkgSz + 32) + (test.decSz - pkgSz + 32)
|
rLen = (pkgSz + 32) + (test.decSz - pkgSz + 32)
|
||||||
}
|
}
|
||||||
@ -356,7 +354,7 @@ func TestGetDecryptedRange(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Case %d: unexpected err: %v", i, err)
|
t.Errorf("Case %d: unexpected err: %v", i, err)
|
||||||
}
|
}
|
||||||
var rLen = (pkgSz + 32) * 2
|
rLen := (pkgSz + 32) * 2
|
||||||
if test.decSz-pkgSz < 2*pkgSz {
|
if test.decSz-pkgSz < 2*pkgSz {
|
||||||
rLen = (pkgSz + 32) + (test.decSz - pkgSz + 32*2)
|
rLen = (pkgSz + 32) + (test.decSz - pkgSz + 32*2)
|
||||||
}
|
}
|
||||||
@ -551,60 +549,90 @@ var getDefaultOptsTests = []struct {
|
|||||||
encryptionType encrypt.Type
|
encryptionType encrypt.Type
|
||||||
err error
|
err error
|
||||||
}{
|
}{
|
||||||
{headers: http.Header{xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{"AES256"},
|
{
|
||||||
xhttp.AmzServerSideEncryptionCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
headers: http.Header{
|
||||||
xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
|
xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{"AES256"},
|
||||||
|
xhttp.AmzServerSideEncryptionCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||||
|
xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||||
|
},
|
||||||
copySource: false,
|
copySource: false,
|
||||||
metadata: nil,
|
metadata: nil,
|
||||||
encryptionType: encrypt.SSEC,
|
encryptionType: encrypt.SSEC,
|
||||||
err: nil}, // 0
|
err: nil,
|
||||||
{headers: http.Header{xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{"AES256"},
|
}, // 0
|
||||||
xhttp.AmzServerSideEncryptionCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
{
|
||||||
xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
|
headers: http.Header{
|
||||||
|
xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{"AES256"},
|
||||||
|
xhttp.AmzServerSideEncryptionCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||||
|
xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||||
|
},
|
||||||
copySource: true,
|
copySource: true,
|
||||||
metadata: nil,
|
metadata: nil,
|
||||||
encryptionType: "",
|
encryptionType: "",
|
||||||
err: nil}, // 1
|
err: nil,
|
||||||
{headers: http.Header{xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{"AES256"},
|
}, // 1
|
||||||
xhttp.AmzServerSideEncryptionCustomerKey: []string{"Mz"},
|
{
|
||||||
xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
|
headers: http.Header{
|
||||||
|
xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{"AES256"},
|
||||||
|
xhttp.AmzServerSideEncryptionCustomerKey: []string{"Mz"},
|
||||||
|
xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||||
|
},
|
||||||
copySource: false,
|
copySource: false,
|
||||||
metadata: nil,
|
metadata: nil,
|
||||||
encryptionType: "",
|
encryptionType: "",
|
||||||
err: crypto.ErrInvalidCustomerKey}, // 2
|
err: crypto.ErrInvalidCustomerKey,
|
||||||
{headers: http.Header{xhttp.AmzServerSideEncryption: []string{"AES256"}},
|
}, // 2
|
||||||
|
{
|
||||||
|
headers: http.Header{xhttp.AmzServerSideEncryption: []string{"AES256"}},
|
||||||
copySource: false,
|
copySource: false,
|
||||||
metadata: nil,
|
metadata: nil,
|
||||||
encryptionType: encrypt.S3,
|
encryptionType: encrypt.S3,
|
||||||
err: nil}, // 3
|
err: nil,
|
||||||
{headers: http.Header{},
|
}, // 3
|
||||||
|
{
|
||||||
|
headers: http.Header{},
|
||||||
copySource: false,
|
copySource: false,
|
||||||
metadata: map[string]string{crypto.MetaSealedKeyS3: base64.StdEncoding.EncodeToString(make([]byte, 64)),
|
metadata: map[string]string{
|
||||||
|
crypto.MetaSealedKeyS3: base64.StdEncoding.EncodeToString(make([]byte, 64)),
|
||||||
crypto.MetaKeyID: "kms-key",
|
crypto.MetaKeyID: "kms-key",
|
||||||
crypto.MetaDataEncryptionKey: "m-key"},
|
crypto.MetaDataEncryptionKey: "m-key",
|
||||||
|
},
|
||||||
encryptionType: encrypt.S3,
|
encryptionType: encrypt.S3,
|
||||||
err: nil}, // 4
|
err: nil,
|
||||||
{headers: http.Header{},
|
}, // 4
|
||||||
|
{
|
||||||
|
headers: http.Header{},
|
||||||
copySource: true,
|
copySource: true,
|
||||||
metadata: map[string]string{crypto.MetaSealedKeyS3: base64.StdEncoding.EncodeToString(make([]byte, 64)),
|
metadata: map[string]string{
|
||||||
|
crypto.MetaSealedKeyS3: base64.StdEncoding.EncodeToString(make([]byte, 64)),
|
||||||
crypto.MetaKeyID: "kms-key",
|
crypto.MetaKeyID: "kms-key",
|
||||||
crypto.MetaDataEncryptionKey: "m-key"},
|
crypto.MetaDataEncryptionKey: "m-key",
|
||||||
|
},
|
||||||
encryptionType: "",
|
encryptionType: "",
|
||||||
err: nil}, // 5
|
err: nil,
|
||||||
{headers: http.Header{xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm: []string{"AES256"},
|
}, // 5
|
||||||
xhttp.AmzServerSideEncryptionCopyCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
{
|
||||||
xhttp.AmzServerSideEncryptionCopyCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
|
headers: http.Header{
|
||||||
|
xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm: []string{"AES256"},
|
||||||
|
xhttp.AmzServerSideEncryptionCopyCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||||
|
xhttp.AmzServerSideEncryptionCopyCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||||
|
},
|
||||||
copySource: true,
|
copySource: true,
|
||||||
metadata: nil,
|
metadata: nil,
|
||||||
encryptionType: encrypt.SSEC,
|
encryptionType: encrypt.SSEC,
|
||||||
err: nil}, // 6
|
err: nil,
|
||||||
{headers: http.Header{xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm: []string{"AES256"},
|
}, // 6
|
||||||
xhttp.AmzServerSideEncryptionCopyCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
{
|
||||||
xhttp.AmzServerSideEncryptionCopyCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
|
headers: http.Header{
|
||||||
|
xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm: []string{"AES256"},
|
||||||
|
xhttp.AmzServerSideEncryptionCopyCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
|
||||||
|
xhttp.AmzServerSideEncryptionCopyCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="},
|
||||||
|
},
|
||||||
copySource: false,
|
copySource: false,
|
||||||
metadata: nil,
|
metadata: nil,
|
||||||
encryptionType: "",
|
encryptionType: "",
|
||||||
err: nil}, // 7
|
err: nil,
|
||||||
|
}, // 7
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetDefaultOpts(t *testing.T) {
|
func TestGetDefaultOpts(t *testing.T) {
|
||||||
|
@ -90,7 +90,7 @@ func commonSetDriveCount(divisibleSize uint64, setCounts []uint64) (setSize uint
|
|||||||
// input argument patterns, the symmetry calculation is to ensure that
|
// input argument patterns, the symmetry calculation is to ensure that
|
||||||
// we also use uniform number of drives common across all ellipses patterns.
|
// we also use uniform number of drives common across all ellipses patterns.
|
||||||
func possibleSetCountsWithSymmetry(setCounts []uint64, argPatterns []ellipses.ArgPattern) []uint64 {
|
func possibleSetCountsWithSymmetry(setCounts []uint64, argPatterns []ellipses.ArgPattern) []uint64 {
|
||||||
var newSetCounts = make(map[uint64]struct{})
|
newSetCounts := make(map[uint64]struct{})
|
||||||
for _, ss := range setCounts {
|
for _, ss := range setCounts {
|
||||||
var symmetry bool
|
var symmetry bool
|
||||||
for _, argPattern := range argPatterns {
|
for _, argPattern := range argPatterns {
|
||||||
@ -224,7 +224,7 @@ func (s endpointSet) getEndpoints() (endpoints []string) {
|
|||||||
// this function also intelligently decides on what will
|
// this function also intelligently decides on what will
|
||||||
// be the right set size etc.
|
// be the right set size etc.
|
||||||
func (s endpointSet) Get() (sets [][]string) {
|
func (s endpointSet) Get() (sets [][]string) {
|
||||||
var k = uint64(0)
|
k := uint64(0)
|
||||||
endpoints := s.getEndpoints()
|
endpoints := s.getEndpoints()
|
||||||
for i := range s.setIndexes {
|
for i := range s.setIndexes {
|
||||||
for j := range s.setIndexes[i] {
|
for j := range s.setIndexes[i] {
|
||||||
@ -253,7 +253,7 @@ func getTotalSizes(argPatterns []ellipses.ArgPattern) []uint64 {
|
|||||||
// of endpoints following the ellipses pattern, this is what is used
|
// of endpoints following the ellipses pattern, this is what is used
|
||||||
// by the object layer for initializing itself.
|
// by the object layer for initializing itself.
|
||||||
func parseEndpointSet(customSetDriveCount uint64, args ...string) (ep endpointSet, err error) {
|
func parseEndpointSet(customSetDriveCount uint64, args ...string) (ep endpointSet, err error) {
|
||||||
var argPatterns = make([]ellipses.ArgPattern, len(args))
|
argPatterns := make([]ellipses.ArgPattern, len(args))
|
||||||
for i, arg := range args {
|
for i, arg := range args {
|
||||||
patterns, perr := ellipses.FindEllipsesPatterns(arg)
|
patterns, perr := ellipses.FindEllipsesPatterns(arg)
|
||||||
if perr != nil {
|
if perr != nil {
|
||||||
@ -332,15 +332,13 @@ const (
|
|||||||
EnvErasureSetDriveCount = "MINIO_ERASURE_SET_DRIVE_COUNT"
|
EnvErasureSetDriveCount = "MINIO_ERASURE_SET_DRIVE_COUNT"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var globalCustomErasureDriveCount = false
|
||||||
globalCustomErasureDriveCount = false
|
|
||||||
)
|
|
||||||
|
|
||||||
// CreateServerEndpoints - validates and creates new endpoints from input args, supports
|
// CreateServerEndpoints - validates and creates new endpoints from input args, supports
|
||||||
// both ellipses and without ellipses transparently.
|
// both ellipses and without ellipses transparently.
|
||||||
func createServerEndpoints(serverAddr string, args ...string) (
|
func createServerEndpoints(serverAddr string, args ...string) (
|
||||||
endpointServerPools EndpointServerPools, setupType SetupType, err error) {
|
endpointServerPools EndpointServerPools, setupType SetupType, err error,
|
||||||
|
) {
|
||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
return nil, -1, errInvalidArgument
|
return nil, -1, errInvalidArgument
|
||||||
}
|
}
|
||||||
|
@ -72,7 +72,8 @@ func TestGetDivisibleSize(t *testing.T) {
|
|||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
totalSizes []uint64
|
totalSizes []uint64
|
||||||
result uint64
|
result uint64
|
||||||
}{{[]uint64{24, 32, 16}, 8},
|
}{
|
||||||
|
{[]uint64{24, 32, 16}, 8},
|
||||||
{[]uint64{32, 8, 4}, 4},
|
{[]uint64{32, 8, 4}, 4},
|
||||||
{[]uint64{8, 8, 8}, 8},
|
{[]uint64{8, 8, 8}, 8},
|
||||||
{[]uint64{24}, 24},
|
{[]uint64{24}, 24},
|
||||||
@ -168,7 +169,7 @@ func TestGetSetIndexesEnvOverride(t *testing.T) {
|
|||||||
for _, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
testCase := testCase
|
testCase := testCase
|
||||||
t.Run("", func(t *testing.T) {
|
t.Run("", func(t *testing.T) {
|
||||||
var argPatterns = make([]ellipses.ArgPattern, len(testCase.args))
|
argPatterns := make([]ellipses.ArgPattern, len(testCase.args))
|
||||||
for i, arg := range testCase.args {
|
for i, arg := range testCase.args {
|
||||||
patterns, err := ellipses.FindEllipsesPatterns(arg)
|
patterns, err := ellipses.FindEllipsesPatterns(arg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -290,7 +291,7 @@ func TestGetSetIndexes(t *testing.T) {
|
|||||||
for _, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
testCase := testCase
|
testCase := testCase
|
||||||
t.Run("", func(t *testing.T) {
|
t.Run("", func(t *testing.T) {
|
||||||
var argPatterns = make([]ellipses.ArgPattern, len(testCase.args))
|
argPatterns := make([]ellipses.ArgPattern, len(testCase.args))
|
||||||
for i, arg := range testCase.args {
|
for i, arg := range testCase.args {
|
||||||
patterns, err := ellipses.FindEllipsesPatterns(arg)
|
patterns, err := ellipses.FindEllipsesPatterns(arg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -552,8 +553,10 @@ func TestParseEndpointSet(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
nil,
|
nil,
|
||||||
[][]uint64{{16, 16, 16, 16, 16, 16, 16, 16,
|
[][]uint64{{
|
||||||
16, 16, 16, 16, 16, 16, 16, 16}},
|
16, 16, 16, 16, 16, 16, 16, 16,
|
||||||
|
16, 16, 16, 16, 16, 16, 16, 16,
|
||||||
|
}},
|
||||||
},
|
},
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
|
@ -238,13 +238,18 @@ func TestCreateEndpoints(t *testing.T) {
|
|||||||
{"localhost:9000", [][]string{{"https://127.0.0.1:9000/d1", "https://localhost:9001/d1", "https://example.com/d1", "https://example.com/d2"}}, "", Endpoints{}, -1, fmt.Errorf("path '/d1' can not be served by different port on same address")},
|
{"localhost:9000", [][]string{{"https://127.0.0.1:9000/d1", "https://localhost:9001/d1", "https://example.com/d1", "https://example.com/d2"}}, "", Endpoints{}, -1, fmt.Errorf("path '/d1' can not be served by different port on same address")},
|
||||||
|
|
||||||
// Erasure Setup with PathEndpointType
|
// Erasure Setup with PathEndpointType
|
||||||
{":1234", [][]string{{"/d1", "/d2", "/d3", "/d4"}}, ":1234",
|
{
|
||||||
|
":1234",
|
||||||
|
[][]string{{"/d1", "/d2", "/d3", "/d4"}},
|
||||||
|
":1234",
|
||||||
Endpoints{
|
Endpoints{
|
||||||
Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true},
|
Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true},
|
||||||
Endpoint{URL: &url.URL{Path: mustAbs("/d2")}, IsLocal: true},
|
Endpoint{URL: &url.URL{Path: mustAbs("/d2")}, IsLocal: true},
|
||||||
Endpoint{URL: &url.URL{Path: mustAbs("/d3")}, IsLocal: true},
|
Endpoint{URL: &url.URL{Path: mustAbs("/d3")}, IsLocal: true},
|
||||||
Endpoint{URL: &url.URL{Path: mustAbs("/d4")}, IsLocal: true},
|
Endpoint{URL: &url.URL{Path: mustAbs("/d4")}, IsLocal: true},
|
||||||
}, ErasureSetupType, nil},
|
},
|
||||||
|
ErasureSetupType, nil,
|
||||||
|
},
|
||||||
// DistErasure Setup with URLEndpointType
|
// DistErasure Setup with URLEndpointType
|
||||||
{":9000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://localhost/d3", "http://localhost/d4"}}, ":9000", Endpoints{
|
{":9000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://localhost/d3", "http://localhost/d4"}}, ":9000", Endpoints{
|
||||||
Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d1"}, IsLocal: true},
|
Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d1"}, IsLocal: true},
|
||||||
@ -350,12 +355,18 @@ func TestGetLocalPeer(t *testing.T) {
|
|||||||
expectedResult string
|
expectedResult string
|
||||||
}{
|
}{
|
||||||
{[]string{"/d1", "/d2", "d3", "d4"}, "127.0.0.1:9000"},
|
{[]string{"/d1", "/d2", "d3", "d4"}, "127.0.0.1:9000"},
|
||||||
{[]string{"http://localhost:9000/d1", "http://localhost:9000/d2", "http://example.org:9000/d3", "http://example.com:9000/d4"},
|
{
|
||||||
"localhost:9000"},
|
[]string{"http://localhost:9000/d1", "http://localhost:9000/d2", "http://example.org:9000/d3", "http://example.com:9000/d4"},
|
||||||
{[]string{"http://localhost:9000/d1", "http://example.org:9000/d2", "http://example.com:9000/d3", "http://example.net:9000/d4"},
|
"localhost:9000",
|
||||||
"localhost:9000"},
|
},
|
||||||
{[]string{"http://localhost:9000/d1", "http://localhost:9001/d2", "http://localhost:9002/d3", "http://localhost:9003/d4"},
|
{
|
||||||
"localhost:9000"},
|
[]string{"http://localhost:9000/d1", "http://example.org:9000/d2", "http://example.com:9000/d3", "http://example.net:9000/d4"},
|
||||||
|
"localhost:9000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
[]string{"http://localhost:9000/d1", "http://localhost:9001/d2", "http://localhost:9002/d3", "http://localhost:9003/d4"},
|
||||||
|
"localhost:9000",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
|
@ -91,7 +91,7 @@ func (er erasureObjects) getBucketInfo(ctx context.Context, bucketName string) (
|
|||||||
storageDisks := er.getDisks()
|
storageDisks := er.getDisks()
|
||||||
|
|
||||||
g := errgroup.WithNErrs(len(storageDisks))
|
g := errgroup.WithNErrs(len(storageDisks))
|
||||||
var bucketsInfo = make([]BucketInfo, len(storageDisks))
|
bucketsInfo := make([]BucketInfo, len(storageDisks))
|
||||||
// Undo previous make bucket entry on all underlying storage disks.
|
// Undo previous make bucket entry on all underlying storage disks.
|
||||||
for index := range storageDisks {
|
for index := range storageDisks {
|
||||||
index := index
|
index := index
|
||||||
|
@ -94,7 +94,7 @@ func (e *Erasure) EncodeData(ctx context.Context, data []byte) ([][]byte, error)
|
|||||||
// It only decodes the data blocks but does not verify them.
|
// It only decodes the data blocks but does not verify them.
|
||||||
// It returns an error if the decoding failed.
|
// It returns an error if the decoding failed.
|
||||||
func (e *Erasure) DecodeDataBlocks(data [][]byte) error {
|
func (e *Erasure) DecodeDataBlocks(data [][]byte) error {
|
||||||
var isZero = 0
|
isZero := 0
|
||||||
for _, b := range data {
|
for _, b := range data {
|
||||||
if len(b) == 0 {
|
if len(b) == 0 {
|
||||||
isZero++
|
isZero++
|
||||||
|
@ -50,7 +50,7 @@ func (er erasureObjects) getLoadBalancedDisks(optimized bool) []StorageAPI {
|
|||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
var mu sync.Mutex
|
var mu sync.Mutex
|
||||||
var newDisks = map[uint64][]StorageAPI{}
|
newDisks := map[uint64][]StorageAPI{}
|
||||||
// Based on the random shuffling return back randomized disks.
|
// Based on the random shuffling return back randomized disks.
|
||||||
for _, i := range hashOrder(UTCNow().String(), len(disks)) {
|
for _, i := range hashOrder(UTCNow().String(), len(disks)) {
|
||||||
i := i
|
i := i
|
||||||
|
@ -226,8 +226,8 @@ func getLatestFileInfo(ctx context.Context, partsMetadata []FileInfo, errs []err
|
|||||||
// a not-found error or a hash-mismatch error.
|
// a not-found error or a hash-mismatch error.
|
||||||
func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetadata []FileInfo,
|
func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetadata []FileInfo,
|
||||||
errs []error, latestMeta FileInfo, bucket, object string,
|
errs []error, latestMeta FileInfo, bucket, object string,
|
||||||
scanMode madmin.HealScanMode) ([]StorageAPI, []error, time.Time) {
|
scanMode madmin.HealScanMode) ([]StorageAPI, []error, time.Time,
|
||||||
|
) {
|
||||||
var diskMTime time.Time
|
var diskMTime time.Time
|
||||||
var shardFix bool
|
var shardFix bool
|
||||||
if !latestMeta.DataShardFixed() {
|
if !latestMeta.DataShardFixed() {
|
||||||
|
@ -811,8 +811,8 @@ func isObjectDirDangling(errs []error) (ok bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (er erasureObjects) purgeObjectDangling(ctx context.Context, bucket, object, versionID string,
|
func (er erasureObjects) purgeObjectDangling(ctx context.Context, bucket, object, versionID string,
|
||||||
metaArr []FileInfo, errs []error, dataErrs []error, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
metaArr []FileInfo, errs []error, dataErrs []error, opts madmin.HealOpts) (madmin.HealResultItem, error,
|
||||||
|
) {
|
||||||
storageDisks := er.getDisks()
|
storageDisks := er.getDisks()
|
||||||
storageEndpoints := er.getEndpoints()
|
storageEndpoints := er.getEndpoints()
|
||||||
// Check if the object is dangling, if yes and user requested
|
// Check if the object is dangling, if yes and user requested
|
||||||
|
@ -87,9 +87,13 @@ func TestReduceErrs(t *testing.T) {
|
|||||||
errDiskNotFound,
|
errDiskNotFound,
|
||||||
}, []error{errDiskNotFound}, errVolumeNotFound},
|
}, []error{errDiskNotFound}, errVolumeNotFound},
|
||||||
{[]error{}, []error{}, errErasureReadQuorum},
|
{[]error{}, []error{}, errErasureReadQuorum},
|
||||||
{[]error{errFileNotFound, errFileNotFound, errFileNotFound,
|
{
|
||||||
errFileNotFound, errFileNotFound, nil, nil, nil, nil, nil},
|
[]error{
|
||||||
nil, nil},
|
errFileNotFound, errFileNotFound, errFileNotFound,
|
||||||
|
errFileNotFound, errFileNotFound, nil, nil, nil, nil, nil,
|
||||||
|
},
|
||||||
|
nil, nil,
|
||||||
|
},
|
||||||
// Checks if wrapped context cancelation errors are grouped as one.
|
// Checks if wrapped context cancelation errors are grouped as one.
|
||||||
{canceledErrs, nil, context.Canceled},
|
{canceledErrs, nil, context.Canceled},
|
||||||
}
|
}
|
||||||
|
@ -820,7 +820,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
|||||||
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadataByIndex(onlineDisks, partsMetadata, fi)
|
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadataByIndex(onlineDisks, partsMetadata, fi)
|
||||||
|
|
||||||
// Save current erasure metadata for validation.
|
// Save current erasure metadata for validation.
|
||||||
var currentFI = fi
|
currentFI := fi
|
||||||
|
|
||||||
// Allocate parts similar to incoming slice.
|
// Allocate parts similar to incoming slice.
|
||||||
fi.Parts = make([]ObjectPartInfo, len(parts))
|
fi.Parts = make([]ObjectPartInfo, len(parts))
|
||||||
|
@ -145,7 +145,7 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
|
|||||||
// Read(Closer). When err != nil, the returned reader is always nil.
|
// Read(Closer). When err != nil, the returned reader is always nil.
|
||||||
func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||||
var unlockOnDefer bool
|
var unlockOnDefer bool
|
||||||
var nsUnlocker = func() {}
|
nsUnlocker := func() {}
|
||||||
defer func() {
|
defer func() {
|
||||||
if unlockOnDefer {
|
if unlockOnDefer {
|
||||||
nsUnlocker()
|
nsUnlocker()
|
||||||
@ -475,7 +475,6 @@ func (er erasureObjects) getObjectInfo(ctx context.Context, bucket, object strin
|
|||||||
fi, _, _, err := er.getObjectFileInfo(ctx, bucket, object, opts, false)
|
fi, _, _, err := er.getObjectFileInfo(ctx, bucket, object, opts, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return objInfo, toObjectErr(err, bucket, object)
|
return objInfo, toObjectErr(err, bucket, object)
|
||||||
|
|
||||||
}
|
}
|
||||||
objInfo = fi.ToObjectInfo(bucket, object)
|
objInfo = fi.ToObjectInfo(bucket, object)
|
||||||
if opts.VersionID != "" && !fi.VersionPurgeStatus().Empty() {
|
if opts.VersionID != "" && !fi.VersionPurgeStatus().Empty() {
|
||||||
@ -1177,7 +1176,7 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Initialize list of errors.
|
// Initialize list of errors.
|
||||||
var delObjErrs = make([][]error, len(storageDisks))
|
delObjErrs := make([][]error, len(storageDisks))
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
// Remove versions in bulk for each disk
|
// Remove versions in bulk for each disk
|
||||||
@ -1820,6 +1819,7 @@ func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket s
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
_, err = er.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{
|
_, err = er.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{
|
||||||
MTime: oi.ModTime})
|
MTime: oi.ModTime,
|
||||||
|
})
|
||||||
return setRestoreHeaderFn(oi, err)
|
return setRestoreHeaderFn(oi, err)
|
||||||
}
|
}
|
||||||
|
@ -505,7 +505,6 @@ func TestGetObjectNoQuorum(t *testing.T) {
|
|||||||
gr.Close()
|
gr.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHeadObjectNoQuorum(t *testing.T) {
|
func TestHeadObjectNoQuorum(t *testing.T) {
|
||||||
|
@ -243,7 +243,7 @@ func (z *erasureServerPools) getAvailablePoolIdx(ctx context.Context, bucket, ob
|
|||||||
// If there is not enough space the pool will return 0 bytes available.
|
// If there is not enough space the pool will return 0 bytes available.
|
||||||
// Negative sizes are seen as 0 bytes.
|
// Negative sizes are seen as 0 bytes.
|
||||||
func (z *erasureServerPools) getServerPoolsAvailableSpace(ctx context.Context, bucket, object string, size int64) serverPoolsAvailableSpace {
|
func (z *erasureServerPools) getServerPoolsAvailableSpace(ctx context.Context, bucket, object string, size int64) serverPoolsAvailableSpace {
|
||||||
var serverPools = make(serverPoolsAvailableSpace, len(z.serverPools))
|
serverPools := make(serverPoolsAvailableSpace, len(z.serverPools))
|
||||||
|
|
||||||
storageInfos := make([][]*DiskInfo, len(z.serverPools))
|
storageInfos := make([][]*DiskInfo, len(z.serverPools))
|
||||||
g := errgroup.WithNErrs(len(z.serverPools))
|
g := errgroup.WithNErrs(len(z.serverPools))
|
||||||
@ -659,7 +659,6 @@ func (z *erasureServerPools) MakeBucketWithLocation(ctx context.Context, bucket
|
|||||||
|
|
||||||
// Success.
|
// Success.
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (z *erasureServerPools) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
func (z *erasureServerPools) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||||
@ -674,7 +673,7 @@ func (z *erasureServerPools) GetObjectNInfo(ctx context.Context, bucket, object
|
|||||||
}
|
}
|
||||||
|
|
||||||
var unlockOnDefer bool
|
var unlockOnDefer bool
|
||||||
var nsUnlocker = func() {}
|
nsUnlocker := func() {}
|
||||||
defer func() {
|
defer func() {
|
||||||
if unlockOnDefer {
|
if unlockOnDefer {
|
||||||
nsUnlocker()
|
nsUnlocker()
|
||||||
@ -1168,7 +1167,7 @@ func (z *erasureServerPools) ListMultipartUploads(ctx context.Context, bucket, p
|
|||||||
return z.serverPools[0].ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
return z.serverPools[0].ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||||
}
|
}
|
||||||
|
|
||||||
var poolResult = ListMultipartsInfo{}
|
poolResult := ListMultipartsInfo{}
|
||||||
poolResult.MaxUploads = maxUploads
|
poolResult.MaxUploads = maxUploads
|
||||||
poolResult.KeyMarker = keyMarker
|
poolResult.KeyMarker = keyMarker
|
||||||
poolResult.Prefix = prefix
|
poolResult.Prefix = prefix
|
||||||
@ -1287,7 +1286,6 @@ func (z *erasureServerPools) GetMultipartInfo(ctx context.Context, bucket, objec
|
|||||||
Object: object,
|
Object: object,
|
||||||
UploadID: uploadID,
|
UploadID: uploadID,
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListObjectParts - lists all uploaded parts to an object in hashedSet.
|
// ListObjectParts - lists all uploaded parts to an object in hashedSet.
|
||||||
@ -1529,7 +1527,7 @@ func (z *erasureServerPools) HealFormat(ctx context.Context, dryRun bool) (madmi
|
|||||||
ctx = lkctx.Context()
|
ctx = lkctx.Context()
|
||||||
defer formatLock.Unlock(lkctx.Cancel)
|
defer formatLock.Unlock(lkctx.Cancel)
|
||||||
|
|
||||||
var r = madmin.HealResultItem{
|
r := madmin.HealResultItem{
|
||||||
Type: madmin.HealItemMetadata,
|
Type: madmin.HealItemMetadata,
|
||||||
Detail: "disk-format",
|
Detail: "disk-format",
|
||||||
}
|
}
|
||||||
@ -1561,7 +1559,7 @@ func (z *erasureServerPools) HealFormat(ctx context.Context, dryRun bool) (madmi
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (z *erasureServerPools) HealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
func (z *erasureServerPools) HealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
||||||
var r = madmin.HealResultItem{
|
r := madmin.HealResultItem{
|
||||||
Type: madmin.HealItemBucket,
|
Type: madmin.HealItemBucket,
|
||||||
Bucket: bucket,
|
Bucket: bucket,
|
||||||
}
|
}
|
||||||
|
@ -206,7 +206,7 @@ func (s *erasureSets) connectDisks() {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
var setsJustConnected = make([]bool, s.setCount)
|
setsJustConnected := make([]bool, s.setCount)
|
||||||
diskMap := s.getDiskMap()
|
diskMap := s.getDiskMap()
|
||||||
for _, endpoint := range s.endpoints {
|
for _, endpoint := range s.endpoints {
|
||||||
if isEndpointConnectionStable(diskMap, endpoint, s.lastConnectDisksOpTime) {
|
if isEndpointConnectionStable(diskMap, endpoint, s.lastConnectDisksOpTime) {
|
||||||
@ -398,7 +398,7 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto
|
|||||||
s.erasureDisks[i] = make([]StorageAPI, setDriveCount)
|
s.erasureDisks[i] = make([]StorageAPI, setDriveCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
var erasureLockers = map[string]dsync.NetLocker{}
|
erasureLockers := map[string]dsync.NetLocker{}
|
||||||
for _, endpoint := range endpoints {
|
for _, endpoint := range endpoints {
|
||||||
if _, ok := erasureLockers[endpoint.Host]; !ok {
|
if _, ok := erasureLockers[endpoint.Host]; !ok {
|
||||||
erasureLockers[endpoint.Host] = newLockAPI(endpoint)
|
erasureLockers[endpoint.Host] = newLockAPI(endpoint)
|
||||||
@ -406,7 +406,7 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < setCount; i++ {
|
for i := 0; i < setCount; i++ {
|
||||||
var lockerEpSet = set.NewStringSet()
|
lockerEpSet := set.NewStringSet()
|
||||||
for j := 0; j < setDriveCount; j++ {
|
for j := 0; j < setDriveCount; j++ {
|
||||||
endpoint := endpoints[i*setDriveCount+j]
|
endpoint := endpoints[i*setDriveCount+j]
|
||||||
// Only add lockers per endpoint.
|
// Only add lockers per endpoint.
|
||||||
@ -865,7 +865,7 @@ func undoDeleteBucketSets(ctx context.Context, bucket string, sets []*erasureObj
|
|||||||
// that all buckets are present on all sets.
|
// that all buckets are present on all sets.
|
||||||
func (s *erasureSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) {
|
func (s *erasureSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) {
|
||||||
var listBuckets []BucketInfo
|
var listBuckets []BucketInfo
|
||||||
var healBuckets = map[string]VolInfo{}
|
healBuckets := map[string]VolInfo{}
|
||||||
for _, set := range s.sets {
|
for _, set := range s.sets {
|
||||||
// lists all unique buckets across drives.
|
// lists all unique buckets across drives.
|
||||||
if err := listAllBuckets(ctx, set.getDisks(), healBuckets, s.defaultParityCount); err != nil {
|
if err := listAllBuckets(ctx, set.getDisks(), healBuckets, s.defaultParityCount); err != nil {
|
||||||
@ -958,13 +958,13 @@ func (s *erasureSets) DeleteObjects(ctx context.Context, bucket string, objects
|
|||||||
}
|
}
|
||||||
|
|
||||||
// The result of delete operation on all passed objects
|
// The result of delete operation on all passed objects
|
||||||
var delErrs = make([]error, len(objects))
|
delErrs := make([]error, len(objects))
|
||||||
|
|
||||||
// The result of delete objects
|
// The result of delete objects
|
||||||
var delObjects = make([]DeletedObject, len(objects))
|
delObjects := make([]DeletedObject, len(objects))
|
||||||
|
|
||||||
// A map between a set and its associated objects
|
// A map between a set and its associated objects
|
||||||
var objSetMap = make(map[int][]delObj)
|
objSetMap := make(map[int][]delObj)
|
||||||
|
|
||||||
// Group objects by set index
|
// Group objects by set index
|
||||||
for i, object := range objects {
|
for i, object := range objects {
|
||||||
@ -1147,7 +1147,7 @@ func formatsToDrivesInfo(endpoints Endpoints, formats []*formatErasureV3, sErrs
|
|||||||
// result, also populate disks to be healed.
|
// result, also populate disks to be healed.
|
||||||
for i, format := range formats {
|
for i, format := range formats {
|
||||||
drive := endpoints.GetString(i)
|
drive := endpoints.GetString(i)
|
||||||
var state = madmin.DriveStateCorrupt
|
state := madmin.DriveStateCorrupt
|
||||||
switch {
|
switch {
|
||||||
case format != nil:
|
case format != nil:
|
||||||
state = madmin.DriveStateOk
|
state = madmin.DriveStateOk
|
||||||
@ -1274,7 +1274,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H
|
|||||||
newFormatSets := newHealFormatSets(refFormat, s.setCount, s.setDriveCount, formats, sErrs)
|
newFormatSets := newHealFormatSets(refFormat, s.setCount, s.setDriveCount, formats, sErrs)
|
||||||
|
|
||||||
if !dryRun {
|
if !dryRun {
|
||||||
var tmpNewFormats = make([]*formatErasureV3, s.setCount*s.setDriveCount)
|
tmpNewFormats := make([]*formatErasureV3, s.setCount*s.setDriveCount)
|
||||||
for i := range newFormatSets {
|
for i := range newFormatSets {
|
||||||
for j := range newFormatSets[i] {
|
for j := range newFormatSets[i] {
|
||||||
if newFormatSets[i][j] == nil {
|
if newFormatSets[i][j] == nil {
|
||||||
|
@ -162,7 +162,7 @@ func TestNewErasureSets(t *testing.T) {
|
|||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
var nDisks = 16 // Maximum disks.
|
nDisks := 16 // Maximum disks.
|
||||||
var erasureDisks []string
|
var erasureDisks []string
|
||||||
for i := 0; i < nDisks; i++ {
|
for i := 0; i < nDisks; i++ {
|
||||||
// Do not attempt to create this path, the test validates
|
// Do not attempt to create this path, the test validates
|
||||||
|
@ -417,7 +417,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, bf
|
|||||||
saverWg.Add(1)
|
saverWg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
// Add jitter to the update time so multiple sets don't sync up.
|
// Add jitter to the update time so multiple sets don't sync up.
|
||||||
var updateTime = 30*time.Second + time.Duration(float64(10*time.Second)*rand.Float64())
|
updateTime := 30*time.Second + time.Duration(float64(10*time.Second)*rand.Float64())
|
||||||
t := time.NewTicker(updateTime)
|
t := time.NewTicker(updateTime)
|
||||||
defer t.Stop()
|
defer t.Stop()
|
||||||
defer saverWg.Done()
|
defer saverWg.Done()
|
||||||
|
@ -77,9 +77,9 @@ type formatCacheVersionDetect struct {
|
|||||||
// Return a slice of format, to be used to format uninitialized disks.
|
// Return a slice of format, to be used to format uninitialized disks.
|
||||||
func newFormatCacheV2(drives []string) []*formatCacheV2 {
|
func newFormatCacheV2(drives []string) []*formatCacheV2 {
|
||||||
diskCount := len(drives)
|
diskCount := len(drives)
|
||||||
var disks = make([]string, diskCount)
|
disks := make([]string, diskCount)
|
||||||
|
|
||||||
var formats = make([]*formatCacheV2, diskCount)
|
formats := make([]*formatCacheV2, diskCount)
|
||||||
|
|
||||||
for i := 0; i < diskCount; i++ {
|
for i := 0; i < diskCount; i++ {
|
||||||
format := &formatCacheV2{}
|
format := &formatCacheV2{}
|
||||||
@ -110,7 +110,7 @@ func formatCacheGetVersion(r io.ReadSeeker) (string, error) {
|
|||||||
// Creates a new cache format.json if unformatted.
|
// Creates a new cache format.json if unformatted.
|
||||||
func createFormatCache(fsFormatPath string, format *formatCacheV1) error {
|
func createFormatCache(fsFormatPath string, format *formatCacheV1) error {
|
||||||
// open file using READ & WRITE permission
|
// open file using READ & WRITE permission
|
||||||
var file, err = os.OpenFile(fsFormatPath, os.O_RDWR|os.O_CREATE, 0600)
|
file, err := os.OpenFile(fsFormatPath, os.O_RDWR|os.O_CREATE, 0o600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -133,7 +133,7 @@ func createFormatCache(fsFormatPath string, format *formatCacheV1) error {
|
|||||||
func initFormatCache(ctx context.Context, drives []string) (formats []*formatCacheV2, err error) {
|
func initFormatCache(ctx context.Context, drives []string) (formats []*formatCacheV2, err error) {
|
||||||
nformats := newFormatCacheV2(drives)
|
nformats := newFormatCacheV2(drives)
|
||||||
for i, drive := range drives {
|
for i, drive := range drives {
|
||||||
if err = os.MkdirAll(pathJoin(drive, minioMetaBucket), 0777); err != nil {
|
if err = os.MkdirAll(pathJoin(drive, minioMetaBucket), 0o777); err != nil {
|
||||||
logger.GetReqInfo(ctx).AppendTags("drive", drive)
|
logger.GetReqInfo(ctx).AppendTags("drive", drive)
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -156,7 +156,6 @@ func loadFormatCache(ctx context.Context, drives []string) ([]*formatCacheV2, bo
|
|||||||
for i, drive := range drives {
|
for i, drive := range drives {
|
||||||
cacheFormatPath := pathJoin(drive, minioMetaBucket, formatConfigFile)
|
cacheFormatPath := pathJoin(drive, minioMetaBucket, formatConfigFile)
|
||||||
f, err := os.OpenFile(cacheFormatPath, os.O_RDWR, 0)
|
f, err := os.OpenFile(cacheFormatPath, os.O_RDWR, 0)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if osIsNotExist(err) {
|
if osIsNotExist(err) {
|
||||||
continue
|
continue
|
||||||
@ -232,7 +231,7 @@ func checkFormatCacheValues(migrating bool, formats []*formatCacheV2) (int, erro
|
|||||||
// checkCacheDisksConsistency - checks if "This" disk uuid on each disk is consistent with all "Disks" slices
|
// checkCacheDisksConsistency - checks if "This" disk uuid on each disk is consistent with all "Disks" slices
|
||||||
// across disks.
|
// across disks.
|
||||||
func checkCacheDiskConsistency(formats []*formatCacheV2) error {
|
func checkCacheDiskConsistency(formats []*formatCacheV2) error {
|
||||||
var disks = make([]string, len(formats))
|
disks := make([]string, len(formats))
|
||||||
// Collect currently available disk uuids.
|
// Collect currently available disk uuids.
|
||||||
for index, format := range formats {
|
for index, format := range formats {
|
||||||
if format == nil {
|
if format == nil {
|
||||||
@ -413,7 +412,7 @@ func migrateOldCache(ctx context.Context, c *diskCache) error {
|
|||||||
object = strings.TrimSuffix(object, "/")
|
object = strings.TrimSuffix(object, "/")
|
||||||
|
|
||||||
destdir := getCacheSHADir(c.dir, bucket, object)
|
destdir := getCacheSHADir(c.dir, bucket, object)
|
||||||
if err := os.MkdirAll(destdir, 0777); err != nil {
|
if err := os.MkdirAll(destdir, 0o777); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
prevCachedPath := path.Join(c.dir, bucket, object)
|
prevCachedPath := path.Join(c.dir, bucket, object)
|
||||||
@ -427,7 +426,7 @@ func migrateOldCache(ctx context.Context, c *diskCache) error {
|
|||||||
}
|
}
|
||||||
// marshal cache metadata after adding version and stat info
|
// marshal cache metadata after adding version and stat info
|
||||||
meta := &cacheMeta{}
|
meta := &cacheMeta{}
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
if err = json.Unmarshal(metaBytes, &meta); err != nil {
|
if err = json.Unmarshal(metaBytes, &meta); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -459,7 +458,7 @@ func migrateOldCache(ctx context.Context, c *diskCache) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = ioutil.WriteFile(metaPath, jsonData, 0644); err != nil {
|
if err = ioutil.WriteFile(metaPath, jsonData, 0o644); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -475,7 +474,6 @@ func migrateOldCache(ctx context.Context, c *diskCache) error {
|
|||||||
removeAll(path.Join(c.dir, minioMetaBucket, "buckets"))
|
removeAll(path.Join(c.dir, minioMetaBucket, "buckets"))
|
||||||
|
|
||||||
return migrateCacheFormatJSON(cacheFormatPath)
|
return migrateCacheFormatJSON(cacheFormatPath)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func migrateCacheFormatJSON(cacheFormatPath string) error {
|
func migrateCacheFormatJSON(cacheFormatPath string) error {
|
||||||
|
@ -245,7 +245,7 @@ func genFormatCacheInvalidDisksOrder() []*formatCacheV2 {
|
|||||||
formatConfigs[index] = format
|
formatConfigs[index] = format
|
||||||
}
|
}
|
||||||
// Re order disks for failure case.
|
// Re order disks for failure case.
|
||||||
var disks1 = make([]string, 8)
|
disks1 := make([]string, 8)
|
||||||
copy(disks1, disks)
|
copy(disks1, disks)
|
||||||
disks1[1], disks1[2] = disks[2], disks[1]
|
disks1[1], disks1[2] = disks[2], disks[1]
|
||||||
formatConfigs[2].Cache.Disks = disks1
|
formatConfigs[2].Cache.Disks = disks1
|
||||||
|
@ -242,7 +242,7 @@ func formatErasureMigrateV1ToV2(export, version string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return ioutil.WriteFile(formatPath, b, 0666)
|
return ioutil.WriteFile(formatPath, b, 0o666)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Migrates V2 for format.json to V3 (Flat hierarchy for multipart)
|
// Migrates V2 for format.json to V3 (Flat hierarchy for multipart)
|
||||||
@ -266,7 +266,7 @@ func formatErasureMigrateV2ToV3(export, version string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = mkdirAll(pathJoin(export, minioMetaMultipartBucket), 0755); err != nil {
|
if err = mkdirAll(pathJoin(export, minioMetaMultipartBucket), 0o755); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -284,12 +284,12 @@ func formatErasureMigrateV2ToV3(export, version string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return ioutil.WriteFile(formatPath, b, 0666)
|
return ioutil.WriteFile(formatPath, b, 0o666)
|
||||||
}
|
}
|
||||||
|
|
||||||
// countErrs - count a specific error.
|
// countErrs - count a specific error.
|
||||||
func countErrs(errs []error, err error) int {
|
func countErrs(errs []error, err error) int {
|
||||||
var i = 0
|
i := 0
|
||||||
for _, err1 := range errs {
|
for _, err1 := range errs {
|
||||||
if err1 == err {
|
if err1 == err {
|
||||||
i++
|
i++
|
||||||
@ -314,7 +314,7 @@ func loadFormatErasureAll(storageDisks []StorageAPI, heal bool) ([]*formatErasur
|
|||||||
g := errgroup.WithNErrs(len(storageDisks))
|
g := errgroup.WithNErrs(len(storageDisks))
|
||||||
|
|
||||||
// Initialize format configs.
|
// Initialize format configs.
|
||||||
var formats = make([]*formatErasureV3, len(storageDisks))
|
formats := make([]*formatErasureV3, len(storageDisks))
|
||||||
|
|
||||||
// Load format from each disk in parallel
|
// Load format from each disk in parallel
|
||||||
for index := range storageDisks {
|
for index := range storageDisks {
|
||||||
@ -534,7 +534,6 @@ func formatErasureFixDeploymentID(endpoints Endpoints, storageDisks []StorageAPI
|
|||||||
// Deployment ID needs to be set on all the disks.
|
// Deployment ID needs to be set on all the disks.
|
||||||
// Save `format.json` across all disks.
|
// Save `format.json` across all disks.
|
||||||
return saveFormatErasureAll(GlobalContext, storageDisks, formats)
|
return saveFormatErasureAll(GlobalContext, storageDisks, formats)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update only the valid local disks which have not been updated before.
|
// Update only the valid local disks which have not been updated before.
|
||||||
@ -662,7 +661,6 @@ func formatErasureV3Check(reference *formatErasureV3, format *formatErasureV3) e
|
|||||||
|
|
||||||
// Initializes meta volume only on local storage disks.
|
// Initializes meta volume only on local storage disks.
|
||||||
func initErasureMetaVolumesInLocalDisks(storageDisks []StorageAPI, formats []*formatErasureV3) error {
|
func initErasureMetaVolumesInLocalDisks(storageDisks []StorageAPI, formats []*formatErasureV3) error {
|
||||||
|
|
||||||
// Compute the local disks eligible for meta volumes (re)initialization
|
// Compute the local disks eligible for meta volumes (re)initialization
|
||||||
disksToInit := make([]StorageAPI, 0, len(storageDisks))
|
disksToInit := make([]StorageAPI, 0, len(storageDisks))
|
||||||
for index := range storageDisks {
|
for index := range storageDisks {
|
||||||
@ -811,7 +809,6 @@ func fixFormatErasureV3(storageDisks []StorageAPI, endpoints Endpoints, formats
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// initFormatErasure - save Erasure format configuration on all disks.
|
// initFormatErasure - save Erasure format configuration on all disks.
|
||||||
|
@ -124,11 +124,11 @@ func TestFormatErasureMigrate(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = os.MkdirAll(pathJoin(rootPath, minioMetaBucket), os.FileMode(0755)); err != nil {
|
if err = os.MkdirAll(pathJoin(rootPath, minioMetaBucket), os.FileMode(0o755)); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0644)); err != nil {
|
if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0o644)); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -175,7 +175,7 @@ func TestFormatErasureMigrate(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0644)); err != nil {
|
if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0o644)); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -195,7 +195,7 @@ func TestFormatErasureMigrate(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0644)); err != nil {
|
if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0o644)); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -113,7 +113,7 @@ func formatFSMigrateV1ToV2(ctx context.Context, wlk *lock.LockedFile, fsPath str
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = os.MkdirAll(path.Join(fsPath, minioMetaMultipartBucket), 0755); err != nil {
|
if err = os.MkdirAll(path.Join(fsPath, minioMetaMultipartBucket), 0o755); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -165,7 +165,7 @@ func formatFSMigrate(ctx context.Context, wlk *lock.LockedFile, fsPath string) e
|
|||||||
func createFormatFS(fsFormatPath string) error {
|
func createFormatFS(fsFormatPath string) error {
|
||||||
// Attempt a write lock on formatConfigFile `format.json`
|
// Attempt a write lock on formatConfigFile `format.json`
|
||||||
// file stored in minioMetaBucket(.minio.sys) directory.
|
// file stored in minioMetaBucket(.minio.sys) directory.
|
||||||
lk, err := lock.TryLockedOpenFile(fsFormatPath, os.O_RDWR|os.O_CREATE, 0600)
|
lk, err := lock.TryLockedOpenFile(fsFormatPath, os.O_RDWR|os.O_CREATE, 0o600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -123,7 +123,7 @@ func fsMkdir(ctx context.Context, dirPath string) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = os.Mkdir((dirPath), 0777); err != nil {
|
if err = os.Mkdir((dirPath), 0o777); err != nil {
|
||||||
switch {
|
switch {
|
||||||
case osIsExist(err):
|
case osIsExist(err):
|
||||||
return errVolumeExists
|
return errVolumeExists
|
||||||
@ -309,7 +309,7 @@ func fsCreateFile(ctx context.Context, filePath string, reader io.Reader, falloc
|
|||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := mkdirAll(pathutil.Dir(filePath), 0777); err != nil {
|
if err := mkdirAll(pathutil.Dir(filePath), 0o777); err != nil {
|
||||||
switch {
|
switch {
|
||||||
case osIsPermission(err):
|
case osIsPermission(err):
|
||||||
return 0, errFileAccessDenied
|
return 0, errFileAccessDenied
|
||||||
@ -329,7 +329,7 @@ func fsCreateFile(ctx context.Context, filePath string, reader io.Reader, falloc
|
|||||||
if globalFSOSync {
|
if globalFSOSync {
|
||||||
flags |= os.O_SYNC
|
flags |= os.O_SYNC
|
||||||
}
|
}
|
||||||
writer, err := lock.Open(filePath, flags, 0666)
|
writer, err := lock.Open(filePath, flags, 0o666)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, osErrToFileErr(err)
|
return 0, osErrToFileErr(err)
|
||||||
}
|
}
|
||||||
|
@ -75,7 +75,7 @@ func TestFSStats(t *testing.T) {
|
|||||||
t.Fatalf("Unable to create volume, %s", err)
|
t.Fatalf("Unable to create volume, %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var reader = bytes.NewReader([]byte("Hello, world"))
|
reader := bytes.NewReader([]byte("Hello, world"))
|
||||||
if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "success-file"), reader, 0); err != nil {
|
if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "success-file"), reader, 0); err != nil {
|
||||||
t.Fatalf("Unable to create file, %s", err)
|
t.Fatalf("Unable to create file, %s", err)
|
||||||
}
|
}
|
||||||
@ -201,7 +201,7 @@ func TestFSCreateAndOpen(t *testing.T) {
|
|||||||
t.Fatal("Unexpected error", err)
|
t.Fatal("Unexpected error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var reader = bytes.NewReader([]byte("Hello, world"))
|
reader := bytes.NewReader([]byte("Hello, world"))
|
||||||
if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "success-file"), reader, 0); err != nil {
|
if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "success-file"), reader, 0); err != nil {
|
||||||
t.Fatalf("Unable to create file, %s", err)
|
t.Fatalf("Unable to create file, %s", err)
|
||||||
}
|
}
|
||||||
@ -259,7 +259,7 @@ func TestFSDeletes(t *testing.T) {
|
|||||||
t.Fatalf("Unable to create directory, %s", err)
|
t.Fatalf("Unable to create directory, %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var reader = bytes.NewReader([]byte("Hello, world"))
|
reader := bytes.NewReader([]byte("Hello, world"))
|
||||||
if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "success-file"), reader, reader.Size()); err != nil {
|
if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "success-file"), reader, reader.Size()); err != nil {
|
||||||
t.Fatalf("Unable to create file, %s", err)
|
t.Fatalf("Unable to create file, %s", err)
|
||||||
}
|
}
|
||||||
@ -271,7 +271,7 @@ func TestFSDeletes(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
err = ioutil.WriteFile(pathJoin(path, "success-vol", "not-empty", "file"), []byte("data"), 0777)
|
err = ioutil.WriteFile(pathJoin(path, "success-vol", "not-empty", "file"), []byte("data"), 0o777)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -368,7 +368,7 @@ func BenchmarkFSDeleteFile(b *testing.B) {
|
|||||||
// We need to create and delete the file sequentially inside the benchmark.
|
// We need to create and delete the file sequentially inside the benchmark.
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
err = ioutil.WriteFile(filename, []byte("data"), 0777)
|
err = ioutil.WriteFile(filename, []byte("data"), 0o777)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -395,7 +395,7 @@ func TestFSRemoves(t *testing.T) {
|
|||||||
t.Fatalf("Unable to create directory, %s", err)
|
t.Fatalf("Unable to create directory, %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var reader = bytes.NewReader([]byte("Hello, world"))
|
reader := bytes.NewReader([]byte("Hello, world"))
|
||||||
if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "success-file"), reader, 0); err != nil {
|
if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "success-file"), reader, 0); err != nil {
|
||||||
t.Fatalf("Unable to create file, %s", err)
|
t.Fatalf("Unable to create file, %s", err)
|
||||||
}
|
}
|
||||||
@ -514,7 +514,7 @@ func TestFSRemoveMeta(t *testing.T) {
|
|||||||
|
|
||||||
filePath := pathJoin(fsPath, "success-vol", "success-file")
|
filePath := pathJoin(fsPath, "success-vol", "success-file")
|
||||||
|
|
||||||
var reader = bytes.NewReader([]byte("Hello, world"))
|
reader := bytes.NewReader([]byte("Hello, world"))
|
||||||
if _, err = fsCreateFile(GlobalContext, filePath, reader, 0); err != nil {
|
if _, err = fsCreateFile(GlobalContext, filePath, reader, 0); err != nil {
|
||||||
t.Fatalf("Unable to create file, %s", err)
|
t.Fatalf("Unable to create file, %s", err)
|
||||||
}
|
}
|
||||||
@ -556,7 +556,7 @@ func TestFSIsFile(t *testing.T) {
|
|||||||
|
|
||||||
filePath := pathJoin(dirPath, "tmpfile")
|
filePath := pathJoin(dirPath, "tmpfile")
|
||||||
|
|
||||||
if err = ioutil.WriteFile(filePath, nil, 0777); err != nil {
|
if err = ioutil.WriteFile(filePath, nil, 0o777); err != nil {
|
||||||
t.Fatalf("Unable to create file %s", filePath)
|
t.Fatalf("Unable to create file %s", filePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ func (c *FSChecksumInfoV1) UnmarshalJSON(data []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var info checksuminfo
|
var info checksuminfo
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
err := json.Unmarshal(data, &info)
|
err := json.Unmarshal(data, &info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -230,7 +230,7 @@ func (m *fsMetaV1) ReadFrom(ctx context.Context, lk *lock.LockedFile) (n int64,
|
|||||||
return 0, io.EOF
|
return 0, io.EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
if err = json.Unmarshal(fsMetaBuf, m); err != nil {
|
if err = json.Unmarshal(fsMetaBuf, m); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -224,7 +224,7 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
|
|||||||
uploadID := mustGetUUID()
|
uploadID := mustGetUUID()
|
||||||
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
|
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
|
||||||
|
|
||||||
err := mkdirAll(uploadIDDir, 0755)
|
err := mkdirAll(uploadIDDir, 0o755)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
return "", err
|
return "", err
|
||||||
@ -240,7 +240,7 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0666); err != nil {
|
if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0o666); err != nil {
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -252,8 +252,8 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
|
|||||||
// object. Internally incoming data is written to '.minio.sys/tmp' location
|
// object. Internally incoming data is written to '.minio.sys/tmp' location
|
||||||
// and safely renamed to '.minio.sys/multipart' for reach parts.
|
// and safely renamed to '.minio.sys/multipart' for reach parts.
|
||||||
func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int,
|
func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int,
|
||||||
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) {
|
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error,
|
||||||
|
) {
|
||||||
if srcOpts.VersionID != "" && srcOpts.VersionID != nullVersionID {
|
if srcOpts.VersionID != "" && srcOpts.VersionID != nullVersionID {
|
||||||
return pi, VersionNotFound{
|
return pi, VersionNotFound{
|
||||||
Bucket: srcBucket,
|
Bucket: srcBucket,
|
||||||
@ -397,7 +397,7 @@ func (fs *FSObjects) GetMultipartInfo(ctx context.Context, bucket, object, uploa
|
|||||||
}
|
}
|
||||||
|
|
||||||
var fsMeta fsMetaV1
|
var fsMeta fsMetaV1
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
if err = json.Unmarshal(fsMetaBytes, &fsMeta); err != nil {
|
if err = json.Unmarshal(fsMetaBytes, &fsMeta); err != nil {
|
||||||
return minfo, toObjectErr(err, bucket, object)
|
return minfo, toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
@ -526,7 +526,7 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
|
|||||||
}
|
}
|
||||||
|
|
||||||
var fsMeta fsMetaV1
|
var fsMeta fsMetaV1
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
if err = json.Unmarshal(fsMetaBytes, &fsMeta); err != nil {
|
if err = json.Unmarshal(fsMetaBytes, &fsMeta); err != nil {
|
||||||
return result, err
|
return result, err
|
||||||
}
|
}
|
||||||
@ -542,7 +542,6 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
|
|||||||
//
|
//
|
||||||
// Implements S3 compatible Complete multipart API.
|
// Implements S3 compatible Complete multipart API.
|
||||||
func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, e error) {
|
func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, e error) {
|
||||||
|
|
||||||
var actualSize int64
|
var actualSize int64
|
||||||
|
|
||||||
if err := checkCompleteMultipartArgs(ctx, bucket, object, fs); err != nil {
|
if err := checkCompleteMultipartArgs(ctx, bucket, object, fs); err != nil {
|
||||||
|
@ -148,7 +148,7 @@ func (fsi *fsIOPool) Write(path string) (wlk *lock.LockedFile, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
wlk, err = lock.LockedOpenFile(path, os.O_RDWR, 0666)
|
wlk, err = lock.LockedOpenFile(path, os.O_RDWR, 0o666)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch {
|
switch {
|
||||||
case osIsNotExist(err):
|
case osIsNotExist(err):
|
||||||
@ -175,12 +175,12 @@ func (fsi *fsIOPool) Create(path string) (wlk *lock.LockedFile, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Creates parent if missing.
|
// Creates parent if missing.
|
||||||
if err = mkdirAll(pathutil.Dir(path), 0777); err != nil {
|
if err = mkdirAll(pathutil.Dir(path), 0o777); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Attempt to create the file.
|
// Attempt to create the file.
|
||||||
wlk, err = lock.LockedOpenFile(path, os.O_RDWR|os.O_CREATE, 0666)
|
wlk, err = lock.LockedOpenFile(path, os.O_RDWR|os.O_CREATE, 0o666)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch {
|
switch {
|
||||||
case osIsPermission(err):
|
case osIsPermission(err):
|
||||||
@ -220,7 +220,6 @@ func (fsi *fsIOPool) Close(path string) error {
|
|||||||
|
|
||||||
// If the file is closed, remove it from the reader pool map.
|
// If the file is closed, remove it from the reader pool map.
|
||||||
if rlkFile.IsClosed() {
|
if rlkFile.IsClosed() {
|
||||||
|
|
||||||
// Purge the cached lock path from map.
|
// Purge the cached lock path from map.
|
||||||
delete(fsi.readersMap, path)
|
delete(fsi.readersMap, path)
|
||||||
}
|
}
|
||||||
|
@ -110,5 +110,4 @@ func TestRWPool(t *testing.T) {
|
|||||||
t.Fatal("Unexpected error", err)
|
t.Fatal("Unexpected error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
21
cmd/fs-v1.go
21
cmd/fs-v1.go
@ -97,22 +97,21 @@ func initMetaVolumeFS(fsPath, fsUUID string) error {
|
|||||||
// if it doesn't exist yet.
|
// if it doesn't exist yet.
|
||||||
metaBucketPath := pathJoin(fsPath, minioMetaBucket)
|
metaBucketPath := pathJoin(fsPath, minioMetaBucket)
|
||||||
|
|
||||||
if err := os.MkdirAll(metaBucketPath, 0777); err != nil {
|
if err := os.MkdirAll(metaBucketPath, 0o777); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
metaTmpPath := pathJoin(fsPath, minioMetaTmpBucket, fsUUID)
|
metaTmpPath := pathJoin(fsPath, minioMetaTmpBucket, fsUUID)
|
||||||
if err := os.MkdirAll(metaTmpPath, 0777); err != nil {
|
if err := os.MkdirAll(metaTmpPath, 0o777); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.MkdirAll(pathJoin(fsPath, dataUsageBucket), 0777); err != nil {
|
if err := os.MkdirAll(pathJoin(fsPath, dataUsageBucket), 0o777); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
metaMultipartPath := pathJoin(fsPath, minioMetaMultipartBucket)
|
metaMultipartPath := pathJoin(fsPath, minioMetaMultipartBucket)
|
||||||
return os.MkdirAll(metaMultipartPath, 0777)
|
return os.MkdirAll(metaMultipartPath, 0o777)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFSObjectLayer - initialize new fs object layer.
|
// NewFSObjectLayer - initialize new fs object layer.
|
||||||
@ -366,7 +365,7 @@ func (fs *FSObjects) scanBucket(ctx context.Context, bucket string, cache dataUs
|
|||||||
fsMeta := newFSMetaV1()
|
fsMeta := newFSMetaV1()
|
||||||
metaOk := false
|
metaOk := false
|
||||||
if len(fsMetaBytes) > 0 {
|
if len(fsMetaBytes) > 0 {
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
if err = json.Unmarshal(fsMetaBytes, &fsMeta); err == nil {
|
if err = json.Unmarshal(fsMetaBytes, &fsMeta); err == nil {
|
||||||
metaOk = true
|
metaOk = true
|
||||||
}
|
}
|
||||||
@ -474,7 +473,7 @@ func (fs *FSObjects) SetBucketPolicy(ctx context.Context, bucket string, p *poli
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
configData, err := json.Marshal(p)
|
configData, err := json.Marshal(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -542,7 +541,7 @@ func (fs *FSObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
|
|||||||
// Ignore any errors returned here.
|
// Ignore any errors returned here.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
var created = fi.ModTime()
|
created := fi.ModTime()
|
||||||
meta, err := globalBucketMetadataSys.Get(fi.Name())
|
meta, err := globalBucketMetadataSys.Get(fi.Name())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
created = meta.Created
|
created = meta.Created
|
||||||
@ -705,7 +704,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
|||||||
return nil, toObjectErr(err, bucket)
|
return nil, toObjectErr(err, bucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
var nsUnlocker = func() {}
|
nsUnlocker := func() {}
|
||||||
|
|
||||||
if lockType != noLock {
|
if lockType != noLock {
|
||||||
// Lock the object before reading.
|
// Lock the object before reading.
|
||||||
@ -843,7 +842,7 @@ func (fs *FSObjects) getObjectInfoNoFSLock(ctx context.Context, bucket, object s
|
|||||||
fsMetaBuf, rerr := ioutil.ReadAll(rc)
|
fsMetaBuf, rerr := ioutil.ReadAll(rc)
|
||||||
rc.Close()
|
rc.Close()
|
||||||
if rerr == nil {
|
if rerr == nil {
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
if rerr = json.Unmarshal(fsMetaBuf, &fsMeta); rerr != nil {
|
if rerr = json.Unmarshal(fsMetaBuf, &fsMeta); rerr != nil {
|
||||||
// For any error to read fsMeta, set default ETag and proceed.
|
// For any error to read fsMeta, set default ETag and proceed.
|
||||||
fsMeta = fs.defaultFsJSON(object)
|
fsMeta = fs.defaultFsJSON(object)
|
||||||
@ -1029,7 +1028,7 @@ func (fs *FSObjects) putObject(ctx context.Context, bucket string, object string
|
|||||||
// with a slash separator, we treat it like a valid operation
|
// with a slash separator, we treat it like a valid operation
|
||||||
// and return success.
|
// and return success.
|
||||||
if isObjectDir(object, data.Size()) {
|
if isObjectDir(object, data.Size()) {
|
||||||
if err = mkdirAll(pathJoin(fs.fsPath, bucket, object), 0777); err != nil {
|
if err = mkdirAll(pathJoin(fs.fsPath, bucket, object), 0o777); err != nil {
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
|
@ -205,7 +205,6 @@ func TestFSDeleteObject(t *testing.T) {
|
|||||||
t.Fatal("Unexpected error: ", err)
|
t.Fatal("Unexpected error: ", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestFSDeleteBucket - tests for fs DeleteBucket
|
// TestFSDeleteBucket - tests for fs DeleteBucket
|
||||||
@ -263,7 +262,7 @@ func TestFSListBuckets(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create a bucket with invalid name
|
// Create a bucket with invalid name
|
||||||
if err := os.MkdirAll(pathJoin(fs.fsPath, "vo^"), 0777); err != nil {
|
if err := os.MkdirAll(pathJoin(fs.fsPath, "vo^"), 0o777); err != nil {
|
||||||
t.Fatal("Unexpected error: ", err)
|
t.Fatal("Unexpected error: ", err)
|
||||||
}
|
}
|
||||||
f, err := os.Create(pathJoin(fs.fsPath, "test"))
|
f, err := os.Create(pathJoin(fs.fsPath, "test"))
|
||||||
|
@ -130,7 +130,6 @@ func FromMinioClientListMultipartsInfo(lmur minio.ListMultipartUploadsResult) Li
|
|||||||
CommonPrefixes: commonPrefixes,
|
CommonPrefixes: commonPrefixes,
|
||||||
EncodingType: lmur.EncodingType,
|
EncodingType: lmur.EncodingType,
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// FromMinioClientObjectInfo converts minio ObjectInfo to gateway ObjectInfo
|
// FromMinioClientObjectInfo converts minio ObjectInfo to gateway ObjectInfo
|
||||||
|
@ -37,14 +37,12 @@ import (
|
|||||||
"github.com/minio/pkg/env"
|
"github.com/minio/pkg/env"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var gatewayCmd = cli.Command{
|
||||||
gatewayCmd = cli.Command{
|
Name: "gateway",
|
||||||
Name: "gateway",
|
Usage: "start object storage gateway",
|
||||||
Usage: "start object storage gateway",
|
Flags: append(ServerFlags, GlobalFlags...),
|
||||||
Flags: append(ServerFlags, GlobalFlags...),
|
HideHelpCommand: true,
|
||||||
HideHelpCommand: true,
|
}
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// GatewayLocker implements custom NewNSLock implementation
|
// GatewayLocker implements custom NewNSLock implementation
|
||||||
type GatewayLocker struct {
|
type GatewayLocker struct {
|
||||||
|
@ -349,7 +349,6 @@ func azureTierToS3StorageClass(tierType string) string {
|
|||||||
default:
|
default:
|
||||||
return "STANDARD"
|
return "STANDARD"
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// azurePropertiesToS3Meta converts Azure metadata/properties to S3
|
// azurePropertiesToS3Meta converts Azure metadata/properties to S3
|
||||||
@ -578,7 +577,6 @@ func (a *azureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi min
|
|||||||
resp, err := a.client.ListContainersSegment(ctx, marker, azblob.ListContainersSegmentOptions{
|
resp, err := a.client.ListContainersSegment(ctx, marker, azblob.ListContainersSegmentOptions{
|
||||||
Prefix: bucket,
|
Prefix: bucket,
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return bi, azureToObjectError(err, bucket)
|
return bi, azureToObjectError(err, bucket)
|
||||||
}
|
}
|
||||||
@ -604,7 +602,6 @@ func (a *azureObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketI
|
|||||||
|
|
||||||
for marker.NotDone() {
|
for marker.NotDone() {
|
||||||
resp, err := a.client.ListContainersSegment(ctx, marker, azblob.ListContainersSegmentOptions{})
|
resp, err := a.client.ListContainersSegment(ctx, marker, azblob.ListContainersSegmentOptions{})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, azureToObjectError(err)
|
return nil, azureToObjectError(err)
|
||||||
}
|
}
|
||||||
|
@ -192,34 +192,41 @@ func TestAzureCodesToObjectError(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
nil, "ContainerAlreadyExists", 0,
|
nil, "ContainerAlreadyExists", 0,
|
||||||
minio.BucketExists{Bucket: "bucket"}, "bucket", "",
|
minio.BucketExists{Bucket: "bucket"},
|
||||||
|
"bucket", "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
nil, "InvalidResourceName", 0,
|
nil, "InvalidResourceName", 0,
|
||||||
minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
|
minio.BucketNameInvalid{Bucket: "bucket."},
|
||||||
|
"bucket.", "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
nil, "RequestBodyTooLarge", 0,
|
nil, "RequestBodyTooLarge", 0,
|
||||||
minio.PartTooBig{}, "", "",
|
minio.PartTooBig{},
|
||||||
|
"", "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
nil, "InvalidMetadata", 0,
|
nil, "InvalidMetadata", 0,
|
||||||
minio.UnsupportedMetadata{}, "", "",
|
minio.UnsupportedMetadata{},
|
||||||
|
"", "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
nil, "", http.StatusNotFound,
|
nil, "", http.StatusNotFound,
|
||||||
minio.ObjectNotFound{
|
minio.ObjectNotFound{
|
||||||
Bucket: "bucket",
|
Bucket: "bucket",
|
||||||
Object: "object",
|
Object: "object",
|
||||||
}, "bucket", "object",
|
},
|
||||||
|
"bucket", "object",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
nil, "", http.StatusNotFound,
|
nil, "", http.StatusNotFound,
|
||||||
minio.BucketNotFound{Bucket: "bucket"}, "bucket", "",
|
minio.BucketNotFound{Bucket: "bucket"},
|
||||||
|
"bucket", "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
nil, "", http.StatusBadRequest,
|
nil, "", http.StatusBadRequest,
|
||||||
minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "",
|
minio.BucketNameInvalid{Bucket: "bucket."},
|
||||||
|
"bucket.", "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
fmt.Errorf("unhandled azure error"), "", http.StatusForbidden,
|
fmt.Errorf("unhandled azure error"), "", http.StatusForbidden,
|
||||||
|
@ -1121,7 +1121,6 @@ func (l *gcsGateway) PutObjectPart(ctx context.Context, bucket string, key strin
|
|||||||
LastModified: minio.UTCNow(),
|
LastModified: minio.UTCNow(),
|
||||||
Size: data.Size(),
|
Size: data.Size(),
|
||||||
}, nil
|
}, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// gcsGetPartInfo returns PartInfo of a given object part
|
// gcsGetPartInfo returns PartInfo of a given object part
|
||||||
|
@ -72,7 +72,6 @@ func TestToGCSPageToken(t *testing.T) {
|
|||||||
t.Errorf("Test %d: Expected %s, got %s", i+1, toGCSPageToken(testCase.Name), testCase.Token)
|
t.Errorf("Test %d: Expected %s, got %s", i+1, toGCSPageToken(testCase.Name), testCase.Token)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestIsValidGCSProjectIDFormat tests isValidGCSProjectIDFormat
|
// TestIsValidGCSProjectIDFormat tests isValidGCSProjectIDFormat
|
||||||
@ -166,7 +165,6 @@ func TestGCSMultipartDataName(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestFromMinioClientListBucketResultToV2Info(t *testing.T) {
|
func TestFromMinioClientListBucketResultToV2Info(t *testing.T) {
|
||||||
|
|
||||||
listBucketResult := miniogo.ListBucketResult{
|
listBucketResult := miniogo.ListBucketResult{
|
||||||
IsTruncated: false,
|
IsTruncated: false,
|
||||||
Marker: "testMarker",
|
Marker: "testMarker",
|
||||||
|
@ -133,7 +133,6 @@ func getKerberosClient() (*krb.Client, error) {
|
|||||||
realm := env.Get("KRB5REALM", "")
|
realm := env.Get("KRB5REALM", "")
|
||||||
if username == "" || realm == "" {
|
if username == "" || realm == "" {
|
||||||
return nil, errors.New("empty KRB5USERNAME or KRB5REALM")
|
return nil, errors.New("empty KRB5USERNAME or KRB5REALM")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return krb.NewWithKeytab(username, realm, kt, cfg), nil
|
return krb.NewWithKeytab(username, realm, kt, cfg), nil
|
||||||
@ -216,7 +215,7 @@ func (g *HDFS) NewGatewayLayer(creds madmin.Credentials) (minio.ObjectLayer, err
|
|||||||
return nil, fmt.Errorf("unable to initialize hdfsClient: %v", err)
|
return nil, fmt.Errorf("unable to initialize hdfsClient: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = clnt.MkdirAll(minio.PathJoin(commonPath, hdfsSeparator, minioMetaTmpBucket), os.FileMode(0755)); err != nil {
|
if err = clnt.MkdirAll(minio.PathJoin(commonPath, hdfsSeparator, minioMetaTmpBucket), os.FileMode(0o755)); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -324,7 +323,7 @@ func (n *hdfsObjects) MakeBucketWithLocation(ctx context.Context, bucket string,
|
|||||||
if !hdfsIsValidBucketName(bucket) {
|
if !hdfsIsValidBucketName(bucket) {
|
||||||
return minio.BucketNameInvalid{Bucket: bucket}
|
return minio.BucketNameInvalid{Bucket: bucket}
|
||||||
}
|
}
|
||||||
return hdfsToObjectErr(ctx, n.clnt.Mkdir(n.hdfsPathJoin(bucket), os.FileMode(0755)), bucket)
|
return hdfsToObjectErr(ctx, n.clnt.Mkdir(n.hdfsPathJoin(bucket), os.FileMode(0o755)), bucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *hdfsObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, err error) {
|
func (n *hdfsObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, err error) {
|
||||||
@ -480,7 +479,6 @@ func fileInfoToObjectInfo(bucket string, entry string, fi os.FileInfo) minio.Obj
|
|||||||
// a path entry to an `os.FileInfo`. It also saves the listed path's `os.FileInfo` in the cache.
|
// a path entry to an `os.FileInfo`. It also saves the listed path's `os.FileInfo` in the cache.
|
||||||
func (n *hdfsObjects) populateDirectoryListing(filePath string, fileInfos map[string]os.FileInfo) (os.FileInfo, error) {
|
func (n *hdfsObjects) populateDirectoryListing(filePath string, fileInfos map[string]os.FileInfo) (os.FileInfo, error) {
|
||||||
dirReader, err := n.clnt.Open(filePath)
|
dirReader, err := n.clnt.Open(filePath)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -494,7 +492,6 @@ func (n *hdfsObjects) populateDirectoryListing(filePath string, fileInfos map[st
|
|||||||
|
|
||||||
fileInfos[key] = dirStat
|
fileInfos[key] = dirStat
|
||||||
infos, err := dirReader.Readdir(0)
|
infos, err := dirReader.Readdir(0)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -602,7 +599,6 @@ func (n *hdfsObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
|||||||
// exit in case of partial read
|
// exit in case of partial read
|
||||||
pipeCloser := func() { pr.Close() }
|
pipeCloser := func() { pr.Close() }
|
||||||
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts, pipeCloser)
|
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts, pipeCloser)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *hdfsObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.ObjectInfo, error) {
|
func (n *hdfsObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.ObjectInfo, error) {
|
||||||
@ -689,7 +685,7 @@ func (n *hdfsObjects) PutObject(ctx context.Context, bucket string, object strin
|
|||||||
|
|
||||||
// If its a directory create a prefix {
|
// If its a directory create a prefix {
|
||||||
if strings.HasSuffix(object, hdfsSeparator) && r.Size() == 0 {
|
if strings.HasSuffix(object, hdfsSeparator) && r.Size() == 0 {
|
||||||
if err = n.clnt.MkdirAll(name, os.FileMode(0755)); err != nil {
|
if err = n.clnt.MkdirAll(name, os.FileMode(0o755)); err != nil {
|
||||||
n.deleteObject(n.hdfsPathJoin(bucket), name)
|
n.deleteObject(n.hdfsPathJoin(bucket), name)
|
||||||
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
||||||
}
|
}
|
||||||
@ -707,7 +703,7 @@ func (n *hdfsObjects) PutObject(ctx context.Context, bucket string, object strin
|
|||||||
}
|
}
|
||||||
dir := path.Dir(name)
|
dir := path.Dir(name)
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
if err = n.clnt.MkdirAll(dir, os.FileMode(0755)); err != nil {
|
if err = n.clnt.MkdirAll(dir, os.FileMode(0o755)); err != nil {
|
||||||
w.Close()
|
w.Close()
|
||||||
n.deleteObject(n.hdfsPathJoin(bucket), dir)
|
n.deleteObject(n.hdfsPathJoin(bucket), dir)
|
||||||
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
||||||
@ -839,7 +835,7 @@ func (n *hdfsObjects) CompleteMultipartUpload(ctx context.Context, bucket, objec
|
|||||||
name := n.hdfsPathJoin(bucket, object)
|
name := n.hdfsPathJoin(bucket, object)
|
||||||
dir := path.Dir(name)
|
dir := path.Dir(name)
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
if err = n.clnt.MkdirAll(dir, os.FileMode(0755)); err != nil {
|
if err = n.clnt.MkdirAll(dir, os.FileMode(0o755)); err != nil {
|
||||||
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
return objInfo, hdfsToObjectErr(ctx, err, bucket, object)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -89,7 +89,6 @@ func (c *Chain) Retrieve() (credentials.Value, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return credentials.Value{}, fmt.Errorf("no credentials found in %s cannot proceed", providers)
|
return credentials.Value{}, fmt.Errorf("no credentials found in %s cannot proceed", providers)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsExpired will returned the expired state of the currently cached provider
|
// IsExpired will returned the expired state of the currently cached provider
|
||||||
|
@ -139,7 +139,7 @@ func (m gwMetaV1) ObjectToPartOffset(ctx context.Context, offset int64) (partInd
|
|||||||
|
|
||||||
// Constructs GWMetaV1 using `jsoniter` lib to retrieve each field.
|
// Constructs GWMetaV1 using `jsoniter` lib to retrieve each field.
|
||||||
func gwMetaUnmarshalJSON(ctx context.Context, gwMetaBuf []byte) (gwMeta gwMetaV1, err error) {
|
func gwMetaUnmarshalJSON(ctx context.Context, gwMetaBuf []byte) (gwMeta gwMetaV1, err error) {
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
err = json.Unmarshal(gwMetaBuf, &gwMeta)
|
err = json.Unmarshal(gwMetaBuf, &gwMeta)
|
||||||
return gwMeta, err
|
return gwMeta, err
|
||||||
}
|
}
|
||||||
|
@ -75,12 +75,10 @@ func (l *s3EncObjects) ListObjects(ctx context.Context, bucket string, prefix st
|
|||||||
loi.Objects = res.Objects
|
loi.Objects = res.Objects
|
||||||
loi.Prefixes = res.Prefixes
|
loi.Prefixes = res.Prefixes
|
||||||
return loi, nil
|
return loi, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListObjectsV2 lists all blobs in S3 bucket filtered by prefix
|
// ListObjectsV2 lists all blobs in S3 bucket filtered by prefix
|
||||||
func (l *s3EncObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) {
|
func (l *s3EncObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) {
|
||||||
|
|
||||||
var objects []minio.ObjectInfo
|
var objects []minio.ObjectInfo
|
||||||
var prefixes []string
|
var prefixes []string
|
||||||
var isTruncated bool
|
var isTruncated bool
|
||||||
@ -423,7 +421,6 @@ func (l *s3EncObjects) DeleteObjects(ctx context.Context, bucket string, objects
|
|||||||
|
|
||||||
// ListMultipartUploads lists all multipart uploads.
|
// ListMultipartUploads lists all multipart uploads.
|
||||||
func (l *s3EncObjects) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, e error) {
|
func (l *s3EncObjects) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, e error) {
|
||||||
|
|
||||||
lmi, e = l.s3Objects.ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
lmi, e = l.s3Objects.ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return
|
return
|
||||||
@ -505,7 +502,6 @@ func (l *s3EncObjects) PutObject(ctx context.Context, bucket string, object stri
|
|||||||
|
|
||||||
// PutObjectPart puts a part of object in bucket
|
// PutObjectPart puts a part of object in bucket
|
||||||
func (l *s3EncObjects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *minio.PutObjReader, opts minio.ObjectOptions) (pi minio.PartInfo, e error) {
|
func (l *s3EncObjects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *minio.PutObjReader, opts minio.ObjectOptions) (pi minio.PartInfo, e error) {
|
||||||
|
|
||||||
if opts.ServerSideEncryption == nil {
|
if opts.ServerSideEncryption == nil {
|
||||||
return l.s3Objects.PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
return l.s3Objects.PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
||||||
}
|
}
|
||||||
@ -630,7 +626,6 @@ func (l *s3EncObjects) AbortMultipartUpload(ctx context.Context, bucket string,
|
|||||||
|
|
||||||
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
|
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
|
||||||
func (l *s3EncObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (oi minio.ObjectInfo, e error) {
|
func (l *s3EncObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (oi minio.ObjectInfo, e error) {
|
||||||
|
|
||||||
tmpMeta, err := l.getGWMetadata(ctx, bucket, getTmpDareMetaPath(object, uploadID))
|
tmpMeta, err := l.getGWMetadata(ctx, bucket, getTmpDareMetaPath(object, uploadID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
oi, e = l.s3Objects.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
oi, e = l.s3Objects.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||||
|
@ -89,7 +89,7 @@ func getBackgroundHealStatus(ctx context.Context, o ObjectLayer) (madmin.BgHealS
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var healDisksMap = map[string]struct{}{}
|
healDisksMap := map[string]struct{}{}
|
||||||
for _, ep := range getLocalDisksToHeal() {
|
for _, ep := range getLocalDisksToHeal() {
|
||||||
healDisksMap[ep.String()] = struct{}{}
|
healDisksMap[ep.String()] = struct{}{}
|
||||||
}
|
}
|
||||||
@ -139,7 +139,6 @@ func getBackgroundHealStatus(ctx context.Context, o ObjectLayer) (madmin.BgHealS
|
|||||||
status.SCParity[storageclass.RRS] = backendInfo.RRSCParity
|
status.SCParity[storageclass.RRS] = backendInfo.RRSCParity
|
||||||
|
|
||||||
return status, true
|
return status, true
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func mustGetHealSequence(ctx context.Context) *healSequence {
|
func mustGetHealSequence(ctx context.Context) *healSequence {
|
||||||
@ -306,7 +305,6 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
|
|||||||
},
|
},
|
||||||
finished: nil,
|
finished: nil,
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Set this such that when we return this function
|
// Set this such that when we return this function
|
||||||
// we let the caller retry this disk again for the
|
// we let the caller retry this disk again for the
|
||||||
|
@ -77,7 +77,6 @@ func availableMemory() (available uint64) {
|
|||||||
if available != 9223372036854771712 {
|
if available != 9223372036854771712 {
|
||||||
// This means cgroup memory limit is configured.
|
// This means cgroup memory limit is configured.
|
||||||
return
|
return
|
||||||
|
|
||||||
} // no-limit set proceed to set the limits based on virtual memory.
|
} // no-limit set proceed to set the limits based on virtual memory.
|
||||||
|
|
||||||
} // for all other platforms limits are based on virtual memory.
|
} // for all other platforms limits are based on virtual memory.
|
||||||
|
@ -307,7 +307,7 @@ func extractPostPolicyFormValues(ctx context.Context, form *multipart.Form) (fil
|
|||||||
// an ugly way of handling this situation. Refer here
|
// an ugly way of handling this situation. Refer here
|
||||||
// https://golang.org/src/mime/multipart/formdata.go#L61
|
// https://golang.org/src/mime/multipart/formdata.go#L61
|
||||||
if len(form.File) == 0 {
|
if len(form.File) == 0 {
|
||||||
var b = &bytes.Buffer{}
|
b := &bytes.Buffer{}
|
||||||
for _, v := range formValues["File"] {
|
for _, v := range formValues["File"] {
|
||||||
b.WriteString(v)
|
b.WriteString(v)
|
||||||
}
|
}
|
||||||
@ -544,7 +544,6 @@ func errorResponseHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
HTTPStatusCode: http.StatusBadRequest,
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
}, r.URL)
|
}, r.URL)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// gets host name for current node
|
// gets host name for current node
|
||||||
|
@ -34,7 +34,6 @@ const (
|
|||||||
|
|
||||||
// registerHealthCheckRouter - add handler functions for liveness and readiness routes.
|
// registerHealthCheckRouter - add handler functions for liveness and readiness routes.
|
||||||
func registerHealthCheckRouter(router *mux.Router) {
|
func registerHealthCheckRouter(router *mux.Router) {
|
||||||
|
|
||||||
// Healthcheck router
|
// Healthcheck router
|
||||||
healthRouter := router.PathPrefix(healthCheckPathPrefix).Subrouter()
|
healthRouter := router.PathPrefix(healthCheckPathPrefix).Subrouter()
|
||||||
|
|
||||||
|
@ -128,7 +128,7 @@ func (stats *HTTPAPIStats) Dec(api string) {
|
|||||||
func (stats *HTTPAPIStats) Load() map[string]int {
|
func (stats *HTTPAPIStats) Load() map[string]int {
|
||||||
stats.Lock()
|
stats.Lock()
|
||||||
defer stats.Unlock()
|
defer stats.Unlock()
|
||||||
var apiStats = make(map[string]int, len(stats.apiStats))
|
apiStats := make(map[string]int, len(stats.apiStats))
|
||||||
for k, v := range stats.apiStats {
|
for k, v := range stats.apiStats {
|
||||||
apiStats[k] = v
|
apiStats[k] = v
|
||||||
}
|
}
|
||||||
|
@ -28,13 +28,16 @@ func TestRedactLDAPPwd(t *testing.T) {
|
|||||||
expectedQuery string
|
expectedQuery string
|
||||||
}{
|
}{
|
||||||
{"", ""},
|
{"", ""},
|
||||||
{"?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername&LDAPPassword=can+youreadthis%3F&Version=2011-06-15",
|
{
|
||||||
|
"?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername&LDAPPassword=can+youreadthis%3F&Version=2011-06-15",
|
||||||
"?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername&LDAPPassword=*REDACTED*&Version=2011-06-15",
|
"?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername&LDAPPassword=*REDACTED*&Version=2011-06-15",
|
||||||
},
|
},
|
||||||
{"LDAPPassword=can+youreadthis%3F&Version=2011-06-15&?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername",
|
{
|
||||||
|
"LDAPPassword=can+youreadthis%3F&Version=2011-06-15&?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername",
|
||||||
"LDAPPassword=*REDACTED*&Version=2011-06-15&?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername",
|
"LDAPPassword=*REDACTED*&Version=2011-06-15&?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername",
|
||||||
},
|
},
|
||||||
{"?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername&Version=2011-06-15&LDAPPassword=can+youreadthis%3F",
|
{
|
||||||
|
"?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername&Version=2011-06-15&LDAPPassword=can+youreadthis%3F",
|
||||||
"?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername&Version=2011-06-15&LDAPPassword=*REDACTED*",
|
"?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername&Version=2011-06-15&LDAPPassword=*REDACTED*",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -139,7 +139,7 @@ func getIAMConfig(item interface{}, data []byte, itemPath string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
return json.Unmarshal(data, item)
|
return json.Unmarshal(data, item)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -416,7 +416,6 @@ func (ies *IAMEtcdStore) loadGroup(ctx context.Context, group string, m map[stri
|
|||||||
}
|
}
|
||||||
m[group] = gi
|
m[group] = gi
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ies *IAMEtcdStore) loadGroups(ctx context.Context, m map[string]GroupInfo) error {
|
func (ies *IAMEtcdStore) loadGroups(ctx context.Context, m map[string]GroupInfo) error {
|
||||||
@ -437,7 +436,6 @@ func (ies *IAMEtcdStore) loadGroups(ctx context.Context, m map[string]GroupInfo)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ies *IAMEtcdStore) loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error {
|
func (ies *IAMEtcdStore) loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error {
|
||||||
@ -497,7 +495,6 @@ func (ies *IAMEtcdStore) loadMappedPolicies(ctx context.Context, userType IAMUse
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ies *IAMEtcdStore) savePolicyDoc(ctx context.Context, policyName string, p PolicyDoc) error {
|
func (ies *IAMEtcdStore) savePolicyDoc(ctx context.Context, policyName string, p PolicyDoc) error {
|
||||||
@ -601,5 +598,4 @@ func (ies *IAMEtcdStore) watch(ctx context.Context, keyPath string) <-chan iamWa
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
return ch
|
return ch
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -128,7 +128,7 @@ func (iamOS *IAMObjectStore) migrateUsersConfigToV1(ctx context.Context) error {
|
|||||||
next:
|
next:
|
||||||
// 4. check if user identity has old format.
|
// 4. check if user identity has old format.
|
||||||
identityPath := pathJoin(basePrefix, user, iamIdentityFile)
|
identityPath := pathJoin(basePrefix, user, iamIdentityFile)
|
||||||
var cred = auth.Credentials{
|
cred := auth.Credentials{
|
||||||
AccessKey: user,
|
AccessKey: user,
|
||||||
}
|
}
|
||||||
if err := iamOS.loadIAMConfig(ctx, &cred, identityPath); err != nil {
|
if err := iamOS.loadIAMConfig(ctx, &cred, identityPath); err != nil {
|
||||||
@ -159,7 +159,6 @@ func (iamOS *IAMObjectStore) migrateUsersConfigToV1(ctx context.Context) error {
|
|||||||
// has not changed.
|
// has not changed.
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (iamOS *IAMObjectStore) migrateToV1(ctx context.Context) error {
|
func (iamOS *IAMObjectStore) migrateToV1(ctx context.Context) error {
|
||||||
@ -201,7 +200,7 @@ func (iamOS *IAMObjectStore) migrateBackendFormat(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (iamOS *IAMObjectStore) saveIAMConfig(ctx context.Context, item interface{}, objPath string, opts ...options) error {
|
func (iamOS *IAMObjectStore) saveIAMConfig(ctx context.Context, item interface{}, objPath string, opts ...options) error {
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
data, err := json.Marshal(item)
|
data, err := json.Marshal(item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -238,7 +237,7 @@ func (iamOS *IAMObjectStore) loadIAMConfig(ctx context.Context, item interface{}
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
return json.Unmarshal(data, item)
|
return json.Unmarshal(data, item)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -364,8 +363,8 @@ func (iamOS *IAMObjectStore) loadGroups(ctx context.Context, m map[string]GroupI
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (iamOS *IAMObjectStore) loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool,
|
func (iamOS *IAMObjectStore) loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool,
|
||||||
m map[string]MappedPolicy) error {
|
m map[string]MappedPolicy,
|
||||||
|
) error {
|
||||||
var p MappedPolicy
|
var p MappedPolicy
|
||||||
err := iamOS.loadIAMConfig(ctx, &p, getMappedPolicyPath(name, userType, isGroup))
|
err := iamOS.loadIAMConfig(ctx, &p, getMappedPolicyPath(name, userType, isGroup))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -213,7 +213,7 @@ func (d *PolicyDoc) update(p iampolicy.Policy) {
|
|||||||
// from iampolicy.Policy to PolicyDoc. To avoid a migration, loading supports
|
// from iampolicy.Policy to PolicyDoc. To avoid a migration, loading supports
|
||||||
// both the old and the new formats.
|
// both the old and the new formats.
|
||||||
func (d *PolicyDoc) parseJSON(data []byte) error {
|
func (d *PolicyDoc) parseJSON(data []byte) error {
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
var doc PolicyDoc
|
var doc PolicyDoc
|
||||||
err := json.Unmarshal(data, &doc)
|
err := json.Unmarshal(data, &doc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -378,7 +378,6 @@ func (c *iamCache) policyDBGet(mode UsersSysType, name string, isGroup bool) ([]
|
|||||||
|
|
||||||
// IAMStorageAPI defines an interface for the IAM persistence layer
|
// IAMStorageAPI defines an interface for the IAM persistence layer
|
||||||
type IAMStorageAPI interface {
|
type IAMStorageAPI interface {
|
||||||
|
|
||||||
// The role of the read-write lock is to prevent go routines from
|
// The role of the read-write lock is to prevent go routines from
|
||||||
// concurrently reading and writing the IAM storage. The (r)lock()
|
// concurrently reading and writing the IAM storage. The (r)lock()
|
||||||
// functions return the iamCache. The cache can be safely written to
|
// functions return the iamCache. The cache can be safely written to
|
||||||
@ -387,32 +386,23 @@ type IAMStorageAPI interface {
|
|||||||
unlock()
|
unlock()
|
||||||
rlock() *iamCache
|
rlock() *iamCache
|
||||||
runlock()
|
runlock()
|
||||||
|
|
||||||
migrateBackendFormat(context.Context) error
|
migrateBackendFormat(context.Context) error
|
||||||
|
|
||||||
getUsersSysType() UsersSysType
|
getUsersSysType() UsersSysType
|
||||||
|
|
||||||
loadPolicyDoc(ctx context.Context, policy string, m map[string]PolicyDoc) error
|
loadPolicyDoc(ctx context.Context, policy string, m map[string]PolicyDoc) error
|
||||||
loadPolicyDocs(ctx context.Context, m map[string]PolicyDoc) error
|
loadPolicyDocs(ctx context.Context, m map[string]PolicyDoc) error
|
||||||
|
|
||||||
loadUser(ctx context.Context, user string, userType IAMUserType, m map[string]auth.Credentials) error
|
loadUser(ctx context.Context, user string, userType IAMUserType, m map[string]auth.Credentials) error
|
||||||
loadUsers(ctx context.Context, userType IAMUserType, m map[string]auth.Credentials) error
|
loadUsers(ctx context.Context, userType IAMUserType, m map[string]auth.Credentials) error
|
||||||
|
|
||||||
loadGroup(ctx context.Context, group string, m map[string]GroupInfo) error
|
loadGroup(ctx context.Context, group string, m map[string]GroupInfo) error
|
||||||
loadGroups(ctx context.Context, m map[string]GroupInfo) error
|
loadGroups(ctx context.Context, m map[string]GroupInfo) error
|
||||||
|
|
||||||
loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error
|
loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error
|
||||||
loadMappedPolicies(ctx context.Context, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error
|
loadMappedPolicies(ctx context.Context, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error
|
||||||
|
|
||||||
saveIAMConfig(ctx context.Context, item interface{}, path string, opts ...options) error
|
saveIAMConfig(ctx context.Context, item interface{}, path string, opts ...options) error
|
||||||
loadIAMConfig(ctx context.Context, item interface{}, path string) error
|
loadIAMConfig(ctx context.Context, item interface{}, path string) error
|
||||||
deleteIAMConfig(ctx context.Context, path string) error
|
deleteIAMConfig(ctx context.Context, path string) error
|
||||||
|
|
||||||
savePolicyDoc(ctx context.Context, policyName string, p PolicyDoc) error
|
savePolicyDoc(ctx context.Context, policyName string, p PolicyDoc) error
|
||||||
saveMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, mp MappedPolicy, opts ...options) error
|
saveMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, mp MappedPolicy, opts ...options) error
|
||||||
saveUserIdentity(ctx context.Context, name string, userType IAMUserType, u UserIdentity, opts ...options) error
|
saveUserIdentity(ctx context.Context, name string, userType IAMUserType, u UserIdentity, opts ...options) error
|
||||||
saveGroupInfo(ctx context.Context, group string, gi GroupInfo) error
|
saveGroupInfo(ctx context.Context, group string, gi GroupInfo) error
|
||||||
|
|
||||||
deletePolicyDoc(ctx context.Context, policyName string) error
|
deletePolicyDoc(ctx context.Context, policyName string) error
|
||||||
deleteMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool) error
|
deleteMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool) error
|
||||||
deleteUserIdentity(ctx context.Context, name string, userType IAMUserType) error
|
deleteUserIdentity(ctx context.Context, name string, userType IAMUserType) error
|
||||||
@ -639,7 +629,6 @@ func (store *IAMStoreSys) AddUsersToGroup(ctx context.Context, group string, mem
|
|||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// helper function - does not take any locks. Updates only cache if
|
// helper function - does not take any locks. Updates only cache if
|
||||||
@ -880,7 +869,6 @@ func (store *IAMStoreSys) PolicyDBSet(ctx context.Context, name, policy string,
|
|||||||
cache.iamGroupPolicyMap[name] = mp
|
cache.iamGroupPolicyMap[name] = mp
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// PolicyNotificationHandler - loads given policy from storage. If not present,
|
// PolicyNotificationHandler - loads given policy from storage. If not present,
|
||||||
@ -1034,7 +1022,6 @@ func (store *IAMStoreSys) GetPolicyDoc(name string) (r PolicyDoc, err error) {
|
|||||||
|
|
||||||
// SetPolicy - creates a policy with name.
|
// SetPolicy - creates a policy with name.
|
||||||
func (store *IAMStoreSys) SetPolicy(ctx context.Context, name string, policy iampolicy.Policy) error {
|
func (store *IAMStoreSys) SetPolicy(ctx context.Context, name string, policy iampolicy.Policy) error {
|
||||||
|
|
||||||
if policy.IsEmpty() || name == "" {
|
if policy.IsEmpty() || name == "" {
|
||||||
return errInvalidArgument
|
return errInvalidArgument
|
||||||
}
|
}
|
||||||
@ -1058,7 +1045,6 @@ func (store *IAMStoreSys) SetPolicy(ctx context.Context, name string, policy iam
|
|||||||
|
|
||||||
cache.iamPolicyDocsMap[name] = d
|
cache.iamPolicyDocsMap[name] = d
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListPolicies - fetches all policies from storage and updates cache as well.
|
// ListPolicies - fetches all policies from storage and updates cache as well.
|
||||||
@ -1118,7 +1104,6 @@ func (store *IAMStoreSys) FilterPolicies(policyName string, bucketName string) (
|
|||||||
defer store.runlock()
|
defer store.runlock()
|
||||||
|
|
||||||
return filterPolicies(cache, policyName, bucketName)
|
return filterPolicies(cache, policyName, bucketName)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBucketUsers - returns users (not STS or service accounts) that have access
|
// GetBucketUsers - returns users (not STS or service accounts) that have access
|
||||||
|
@ -170,7 +170,6 @@ func (sys *IAMSys) initStore(objAPI ObjectLayer, etcdClient *etcd.Client) {
|
|||||||
} else {
|
} else {
|
||||||
sys.store = &IAMStoreSys{newIAMEtcdStore(etcdClient, sys.usersSysType)}
|
sys.store = &IAMStoreSys{newIAMEtcdStore(etcdClient, sys.usersSysType)}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialized checks if IAM is initialized
|
// Initialized checks if IAM is initialized
|
||||||
@ -801,9 +800,7 @@ func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, gro
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var cred auth.Credentials
|
||||||
cred auth.Credentials
|
|
||||||
)
|
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
if len(opts.accessKey) > 0 {
|
if len(opts.accessKey) > 0 {
|
||||||
|
@ -140,7 +140,6 @@ func (l *localLocker) Unlock(_ context.Context, args dsync.LockArgs) (reply bool
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// removeEntry based on the uid of the lock message, removes a single entry from the
|
// removeEntry based on the uid of the lock message, removes a single entry from the
|
||||||
|
@ -228,7 +228,7 @@ func Test_metaCacheEntries_resolve(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
var inputs = []xlMetaV2{
|
inputs := []xlMetaV2{
|
||||||
0: {
|
0: {
|
||||||
versions: []xlMetaV2ShallowVersion{
|
versions: []xlMetaV2ShallowVersion{
|
||||||
{header: xlMetaV2VersionHeader{
|
{header: xlMetaV2VersionHeader{
|
||||||
@ -379,7 +379,7 @@ func Test_metaCacheEntries_resolve(t *testing.T) {
|
|||||||
for i, xl := range inputs {
|
for i, xl := range inputs {
|
||||||
xl.sortByModTime()
|
xl.sortByModTime()
|
||||||
var err error
|
var err error
|
||||||
var entry = metaCacheEntry{
|
entry := metaCacheEntry{
|
||||||
name: "testobject",
|
name: "testobject",
|
||||||
}
|
}
|
||||||
entry.metadata, err = xl.AppendTo(nil)
|
entry.metadata, err = xl.AppendTo(nil)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user