fix: import bucket metadata import to return a summary (#15462)

This commit is contained in:
Poorna 2022-08-05 01:52:50 -07:00 committed by GitHub
parent 1ffd063939
commit 1beea3daba
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 172 additions and 151 deletions

View File

@ -374,7 +374,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
bucket := pathClean(r.Form.Get("bucket")) bucket := pathClean(r.Form.Get("bucket"))
if !globalIsErasure { if globalIsGateway {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return return
} }
@ -619,6 +619,39 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
} }
} }
type importMetaReport struct {
madmin.BucketMetaImportErrs
}
func (i *importMetaReport) SetStatus(bucket, fname string, err error) {
st := i.Buckets[bucket]
var errMsg string
if err != nil {
errMsg = err.Error()
}
switch fname {
case bucketPolicyConfig:
st.Policy = madmin.MetaStatus{IsSet: true, Err: errMsg}
case bucketNotificationConfig:
st.Notification = madmin.MetaStatus{IsSet: true, Err: errMsg}
case bucketLifecycleConfig:
st.Lifecycle = madmin.MetaStatus{IsSet: true, Err: errMsg}
case bucketSSEConfig:
st.SSEConfig = madmin.MetaStatus{IsSet: true, Err: errMsg}
case bucketTaggingConfig:
st.Tagging = madmin.MetaStatus{IsSet: true, Err: errMsg}
case bucketQuotaConfigFile:
st.Quota = madmin.MetaStatus{IsSet: true, Err: errMsg}
case objectLockConfig:
st.ObjectLock = madmin.MetaStatus{IsSet: true, Err: errMsg}
case bucketVersioningConfig:
st.Versioning = madmin.MetaStatus{IsSet: true, Err: errMsg}
default:
st.Err = errMsg
}
i.Buckets[bucket] = st
}
// ImportBucketMetadataHandler - imports all bucket metadata from a zipped file and overwrite bucket metadata config // ImportBucketMetadataHandler - imports all bucket metadata from a zipped file and overwrite bucket metadata config
// There are some caveats regarding the following: // There are some caveats regarding the following:
// 1. object lock config - object lock should have been specified at time of bucket creation. Only default retention settings are imported here. // 1. object lock config - object lock should have been specified at time of bucket creation. Only default retention settings are imported here.
@ -628,9 +661,8 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
ctx := newContext(r, w, "ImportBucketMetadata") ctx := newContext(r, w, "ImportBucketMetadata")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
bucket := pathClean(r.Form.Get("bucket"))
if !globalIsErasure { if globalIsGateway {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return return
} }
@ -651,37 +683,36 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
return return
} }
bucketMap := make(map[string]struct{}, 1) bucketMap := make(map[string]struct{}, 1)
rpt := importMetaReport{
madmin.BucketMetaImportErrs{
Buckets: make(map[string]madmin.BucketStatus, len(zr.File)),
},
}
// import object lock config if any - order of import matters here. // import object lock config if any - order of import matters here.
for _, file := range zr.File { for _, file := range zr.File {
slc := strings.Split(file.Name, slashSeparator) slc := strings.Split(file.Name, slashSeparator)
if len(slc) != 2 { // expecting bucket/configfile in the zipfile if len(slc) != 2 { // expecting bucket/configfile in the zipfile
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) rpt.SetStatus(file.Name, "", fmt.Errorf("malformed zip - expecting format bucket/<config.json>"))
return continue
}
b, fileName := slc[0], slc[1]
if bucket == "" { // use bucket requested in query parameters if specified. Otherwise default bucket name to directory name within zip
bucket = b
} }
bucket, fileName := slc[0], slc[1]
switch fileName { switch fileName {
case objectLockConfig: case objectLockConfig:
reader, err := file.Open() reader, err := file.Open()
if err != nil { if err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
config, err := objectlock.ParseObjectLockConfig(reader) config, err := objectlock.ParseObjectLockConfig(reader)
if err != nil { if err != nil {
apiErr := errorCodes.ToAPIErr(ErrMalformedXML) rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
apiErr.Description = err.Error() continue
writeErrorResponse(ctx, w, apiErr, r.URL)
return
} }
configData, err := xml.Marshal(config) configData, err := xml.Marshal(config)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
if _, ok := bucketMap[bucket]; !ok { if _, ok := bucketMap[bucket]; !ok {
opts := MakeBucketOptions{ opts := MakeBucketOptions{
@ -690,8 +721,8 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
err = objectAPI.MakeBucketWithLocation(ctx, bucket, opts) err = objectAPI.MakeBucketWithLocation(ctx, bucket, opts)
if err != nil { if err != nil {
if _, ok := err.(BucketExists); !ok { if _, ok := err.(BucketExists); !ok {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
} }
bucketMap[bucket] = struct{}{} bucketMap[bucket] = struct{}{}
@ -699,15 +730,16 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
// Deny object locking configuration settings on existing buckets without object lock enabled. // Deny object locking configuration settings on existing buckets without object lock enabled.
if _, _, err = globalBucketMetadataSys.GetObjectLockConfig(bucket); err != nil { if _, _, err = globalBucketMetadataSys.GetObjectLockConfig(bucket); err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, objectLockConfig, configData) updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, objectLockConfig, configData)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
rpt.SetStatus(bucket, fileName, nil)
// Call site replication hook. // Call site replication hook.
// //
@ -720,8 +752,8 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
ObjectLockConfig: &cfgStr, ObjectLockConfig: &cfgStr,
UpdatedAt: updatedAt, UpdatedAt: updatedAt,
}); err != nil { }); err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
} }
} }
@ -730,162 +762,145 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
for _, file := range zr.File { for _, file := range zr.File {
slc := strings.Split(file.Name, slashSeparator) slc := strings.Split(file.Name, slashSeparator)
if len(slc) != 2 { // expecting bucket/configfile in the zipfile if len(slc) != 2 { // expecting bucket/configfile in the zipfile
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) rpt.SetStatus(file.Name, "", fmt.Errorf("malformed zip - expecting format bucket/<config.json>"))
return continue
}
b, fileName := slc[0], slc[1]
if bucket == "" { // use bucket requested in query parameters if specified. Otherwise default bucket name to directory name within zip
bucket = b
} }
bucket, fileName := slc[0], slc[1]
switch fileName { switch fileName {
case bucketVersioningConfig: case bucketVersioningConfig:
reader, err := file.Open() reader, err := file.Open()
if err != nil { if err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
v, err := versioning.ParseConfig(io.LimitReader(reader, maxBucketVersioningConfigSize)) v, err := versioning.ParseConfig(io.LimitReader(reader, maxBucketVersioningConfigSize))
if err != nil { if err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
if _, ok := bucketMap[bucket]; !ok { if _, ok := bucketMap[bucket]; !ok {
err = objectAPI.MakeBucketWithLocation(ctx, bucket, MakeBucketOptions{}) if err = objectAPI.MakeBucketWithLocation(ctx, bucket, MakeBucketOptions{}); err != nil {
if err != nil {
if _, ok := err.(BucketExists); !ok { if _, ok := err.(BucketExists); !ok {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
} }
bucketMap[bucket] = struct{}{} bucketMap[bucket] = struct{}{}
} }
if globalSiteReplicationSys.isEnabled() && v.Suspended() { if globalSiteReplicationSys.isEnabled() && v.Suspended() {
writeErrorResponse(ctx, w, APIError{ rpt.SetStatus(bucket, fileName, fmt.Errorf("Cluster replication is enabled for this site, so the versioning state cannot be suspended."))
Code: "InvalidBucketState", continue
Description: "Cluster replication is enabled for this site, so the versioning state cannot be changed.",
HTTPStatusCode: http.StatusConflict,
}, r.URL)
return
} }
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled && v.Suspended() { if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled && v.Suspended() {
writeErrorResponse(ctx, w, APIError{ rpt.SetStatus(bucket, fileName, fmt.Errorf("An Object Lock configuration is present on this bucket, so the versioning state cannot be suspended."))
Code: "InvalidBucketState", continue
Description: "An Object Lock configuration is present on this bucket, so the versioning state cannot be changed.",
HTTPStatusCode: http.StatusConflict,
}, r.URL)
return
} }
if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() { if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() {
writeErrorResponse(ctx, w, APIError{ rpt.SetStatus(bucket, fileName, fmt.Errorf("A replication configuration is present on this bucket, so the versioning state cannot be suspended."))
Code: "InvalidBucketState", continue
Description: "A replication configuration is present on this bucket, so the versioning state cannot be changed.",
HTTPStatusCode: http.StatusConflict,
}, r.URL)
return
} }
configData, err := xml.Marshal(v) configData, err := xml.Marshal(v)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
return continue
} }
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketVersioningConfig, configData); err != nil { if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketVersioningConfig, configData); err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
rpt.SetStatus(bucket, fileName, nil)
} }
} }
for _, file := range zr.File { for _, file := range zr.File {
reader, err := file.Open() reader, err := file.Open()
if err != nil { if err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, ""), r.URL) rpt.SetStatus(file.Name, "", err)
return continue
} }
sz := file.FileInfo().Size() sz := file.FileInfo().Size()
slc := strings.Split(file.Name, slashSeparator) slc := strings.Split(file.Name, slashSeparator)
if len(slc) != 2 { // expecting bucket/configfile in the zipfile if len(slc) != 2 { // expecting bucket/configfile in the zipfile
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) rpt.SetStatus(file.Name, "", fmt.Errorf("malformed zip - expecting format bucket/<config.json>"))
return continue
}
b, fileName := slc[0], slc[1]
if bucket == "" { // use bucket requested in query parameters if specified. Otherwise default bucket name to directory name within zip
bucket = b
} }
bucket, fileName := slc[0], slc[1]
// create bucket if it does not exist yet. // create bucket if it does not exist yet.
if _, ok := bucketMap[bucket]; !ok { if _, ok := bucketMap[bucket]; !ok {
err = objectAPI.MakeBucketWithLocation(ctx, bucket, MakeBucketOptions{}) err = objectAPI.MakeBucketWithLocation(ctx, bucket, MakeBucketOptions{})
if err != nil { if err != nil {
if _, ok := err.(BucketExists); !ok { if _, ok := err.(BucketExists); !ok {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, "", err)
return continue
} }
} }
bucketMap[bucket] = struct{}{} bucketMap[bucket] = struct{}{}
} }
if _, ok := bucketMap[bucket]; !ok {
continue
}
switch fileName { switch fileName {
case bucketNotificationConfig: case bucketNotificationConfig:
config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region, globalNotificationSys.targetList) config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region, globalNotificationSys.targetList)
if err != nil { if err != nil {
apiErr := errorCodes.ToAPIErr(ErrMalformedXML) rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
if event.IsEventError(err) { continue
apiErr = importError(ctx, err, file.Name, bucket)
}
writeErrorResponse(ctx, w, apiErr, r.URL)
return
} }
configData, err := xml.Marshal(config) configData, err := xml.Marshal(config)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketNotificationConfig, configData); err != nil { if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketNotificationConfig, configData); err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
rulesMap := config.ToRulesMap() rulesMap := config.ToRulesMap()
globalNotificationSys.AddRulesMap(bucket, rulesMap) globalNotificationSys.AddRulesMap(bucket, rulesMap)
rpt.SetStatus(bucket, fileName, nil)
case bucketPolicyConfig: case bucketPolicyConfig:
// Error out if Content-Length is beyond allowed size. // Error out if Content-Length is beyond allowed size.
if sz > maxBucketPolicySize { if sz > maxBucketPolicySize {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPolicyTooLarge), r.URL) rpt.SetStatus(bucket, fileName, fmt.Errorf(ErrPolicyTooLarge.String()))
return continue
} }
bucketPolicyBytes, err := ioutil.ReadAll(io.LimitReader(reader, sz)) bucketPolicyBytes, err := ioutil.ReadAll(io.LimitReader(reader, sz))
if err != nil { if err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
bucketPolicy, err := policy.ParseConfig(bytes.NewReader(bucketPolicyBytes), bucket) bucketPolicy, err := policy.ParseConfig(bytes.NewReader(bucketPolicyBytes), bucket)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
// Version in policy must not be empty // Version in policy must not be empty
if bucketPolicy.Version == "" { if bucketPolicy.Version == "" {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPolicy), r.URL) rpt.SetStatus(bucket, fileName, fmt.Errorf(ErrMalformedPolicy.String()))
return continue
} }
configData, err := json.Marshal(bucketPolicy) configData, err := json.Marshal(bucketPolicy)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, configData) updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, configData)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
rpt.SetStatus(bucket, fileName, nil)
// Call site replication hook. // Call site replication hook.
if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypePolicy, Type: madmin.SRBucketMetaTypePolicy,
@ -893,56 +908,51 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
Policy: bucketPolicyBytes, Policy: bucketPolicyBytes,
UpdatedAt: updatedAt, UpdatedAt: updatedAt,
}); err != nil { }); err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
case bucketLifecycleConfig: case bucketLifecycleConfig:
bucketLifecycle, err := lifecycle.ParseLifecycleConfig(io.LimitReader(reader, sz)) bucketLifecycle, err := lifecycle.ParseLifecycleConfig(io.LimitReader(reader, sz))
if err != nil { if err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
// Validate the received bucket policy document // Validate the received bucket policy document
if err = bucketLifecycle.Validate(); err != nil { if err = bucketLifecycle.Validate(); err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
// Validate the transition storage ARNs // Validate the transition storage ARNs
if err = validateTransitionTier(bucketLifecycle); err != nil { if err = validateTransitionTier(bucketLifecycle); err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
configData, err := xml.Marshal(bucketLifecycle) configData, err := xml.Marshal(bucketLifecycle)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketLifecycleConfig, configData); err != nil { if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketLifecycleConfig, configData); err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
rpt.SetStatus(bucket, fileName, nil)
case bucketSSEConfig: case bucketSSEConfig:
// Parse bucket encryption xml // Parse bucket encryption xml
encConfig, err := validateBucketSSEConfig(io.LimitReader(reader, maxBucketSSEConfigSize)) encConfig, err := validateBucketSSEConfig(io.LimitReader(reader, maxBucketSSEConfigSize))
if err != nil { if err != nil {
apiErr := APIError{ rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
Code: "MalformedXML", continue
Description: fmt.Sprintf("%s (%s)", errorCodes[ErrMalformedXML].Description, err),
HTTPStatusCode: errorCodes[ErrMalformedXML].HTTPStatusCode,
}
writeErrorResponse(ctx, w, apiErr, r.URL)
return
} }
// Return error if KMS is not initialized // Return error if KMS is not initialized
if GlobalKMS == nil { if GlobalKMS == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL) rpt.SetStatus(bucket, fileName, fmt.Errorf("%s", errorCodes[ErrKMSNotConfigured].Description))
return continue
} }
kmsKey := encConfig.KeyID() kmsKey := encConfig.KeyID()
if kmsKey != "" { if kmsKey != "" {
@ -950,26 +960,27 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
_, err := GlobalKMS.GenerateKey(ctx, kmsKey, kmsContext) _, err := GlobalKMS.GenerateKey(ctx, kmsKey, kmsContext)
if err != nil { if err != nil {
if errors.Is(err, kes.ErrKeyNotFound) { if errors.Is(err, kes.ErrKeyNotFound) {
writeErrorResponse(ctx, w, importError(ctx, errKMSKeyNotFound, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, errKMSKeyNotFound)
return continue
} }
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
} }
configData, err := xml.Marshal(encConfig) configData, err := xml.Marshal(encConfig)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
// Store the bucket encryption configuration in the object layer // Store the bucket encryption configuration in the object layer
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, configData) updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, configData)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
rpt.SetStatus(bucket, fileName, nil)
// Call site replication hook. // Call site replication hook.
// //
@ -982,29 +993,30 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
SSEConfig: &cfgStr, SSEConfig: &cfgStr,
UpdatedAt: updatedAt, UpdatedAt: updatedAt,
}); err != nil { }); err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
case bucketTaggingConfig: case bucketTaggingConfig:
tags, err := tags.ParseBucketXML(io.LimitReader(reader, sz)) tags, err := tags.ParseBucketXML(io.LimitReader(reader, sz))
if err != nil { if err != nil {
apiErr := errorCodes.ToAPIErrWithErr(ErrMalformedXML, fmt.Errorf("error importing %s with %w", file.Name, err)) rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
writeErrorResponse(ctx, w, apiErr, r.URL) continue
return
} }
configData, err := xml.Marshal(tags) configData, err := xml.Marshal(tags)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, configData) updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, configData)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
rpt.SetStatus(bucket, fileName, nil)
// Call site replication hook. // Call site replication hook.
// //
// We encode the xml bytes as base64 to ensure there are no encoding // We encode the xml bytes as base64 to ensure there are no encoding
@ -1016,32 +1028,33 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
Tags: &cfgStr, Tags: &cfgStr,
UpdatedAt: updatedAt, UpdatedAt: updatedAt,
}); err != nil { }); err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
case bucketQuotaConfigFile: case bucketQuotaConfigFile:
data, err := ioutil.ReadAll(reader) data, err := ioutil.ReadAll(reader)
if err != nil { if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
quotaConfig, err := parseBucketQuota(bucket, data) quotaConfig, err := parseBucketQuota(bucket, data)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
if quotaConfig.Type == "fifo" { if quotaConfig.Type == "fifo" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) rpt.SetStatus(bucket, fileName, fmt.Errorf("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore"))
return continue
} }
updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, data) updatedAt, err := globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, data)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
rpt.SetStatus(bucket, fileName, nil)
bucketMeta := madmin.SRBucketMeta{ bucketMeta := madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeQuotaConfig, Type: madmin.SRBucketMetaTypeQuotaConfig,
@ -1055,11 +1068,19 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
// Call site replication hook. // Call site replication hook.
if err = globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta); err != nil { if err = globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta); err != nil {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) rpt.SetStatus(bucket, fileName, err)
return continue
} }
} }
} }
rptData, err := json.Marshal(rpt.BucketMetaImportErrs)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, rptData)
} }
// ReplicationDiffHandler - POST returns info on unreplicated versions for a remote target ARN // ReplicationDiffHandler - POST returns info on unreplicated versions for a remote target ARN

4
go.mod
View File

@ -48,7 +48,7 @@ require (
github.com/minio/dperf v0.4.2 github.com/minio/dperf v0.4.2
github.com/minio/highwayhash v1.0.2 github.com/minio/highwayhash v1.0.2
github.com/minio/kes v0.20.0 github.com/minio/kes v0.20.0
github.com/minio/madmin-go v1.4.9 github.com/minio/madmin-go v1.4.12
github.com/minio/minio-go/v7 v7.0.34 github.com/minio/minio-go/v7 v7.0.34
github.com/minio/pkg v1.3.0 github.com/minio/pkg v1.3.0
github.com/minio/selfupdate v0.5.0 github.com/minio/selfupdate v0.5.0
@ -164,7 +164,7 @@ require (
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/minio/colorjson v1.0.2 // indirect github.com/minio/colorjson v1.0.2 // indirect
github.com/minio/filepath v1.0.0 // indirect github.com/minio/filepath v1.0.0 // indirect
github.com/minio/mc v0.0.0-20220705180830-01b87ecc02ff // indirect github.com/minio/mc v0.0.0-20220805080128-351d021b924b // indirect
github.com/minio/md5-simd v1.1.2 // indirect github.com/minio/md5-simd v1.1.2 // indirect
github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect

8
go.sum
View File

@ -622,10 +622,10 @@ github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLT
github.com/minio/kes v0.20.0 h1:1tyC51Rr8zTregTESuT/QN/iebNMX7B9t7d3xLNMEpE= github.com/minio/kes v0.20.0 h1:1tyC51Rr8zTregTESuT/QN/iebNMX7B9t7d3xLNMEpE=
github.com/minio/kes v0.20.0/go.mod h1:3FW1BQkMGQW78yhy+69tUq5bdcf5rnXJizyeKB9a/tc= github.com/minio/kes v0.20.0/go.mod h1:3FW1BQkMGQW78yhy+69tUq5bdcf5rnXJizyeKB9a/tc=
github.com/minio/madmin-go v1.3.5/go.mod h1:vGKGboQgGIWx4DuDUaXixjlIEZOCIp6ivJkQoiVaACc= github.com/minio/madmin-go v1.3.5/go.mod h1:vGKGboQgGIWx4DuDUaXixjlIEZOCIp6ivJkQoiVaACc=
github.com/minio/madmin-go v1.4.9 h1:kFJhzWlDomaK1l2RT6yMRVDTYBUTcfn7sT5c8PCM/3Q= github.com/minio/madmin-go v1.4.12 h1:2jr4QLoJOXkwxpPPSbriopZJ3ExyO4TwF/j57pE6dVQ=
github.com/minio/madmin-go v1.4.9/go.mod h1:ez87VmMtsxP7DRxjKJKD4RDNW+nhO2QF9KSzwxBDQ98= github.com/minio/madmin-go v1.4.12/go.mod h1:ez87VmMtsxP7DRxjKJKD4RDNW+nhO2QF9KSzwxBDQ98=
github.com/minio/mc v0.0.0-20220705180830-01b87ecc02ff h1:b5XHy2gDZ+B3xQFhegHdSsQQUp82y6pKowwBCgD7SBU= github.com/minio/mc v0.0.0-20220805080128-351d021b924b h1:ikMXncKqNE/0acH6us6yy3v+gJBP7nGv/3Rc9F7vRio=
github.com/minio/mc v0.0.0-20220705180830-01b87ecc02ff/go.mod h1:z/hyvWFsn5ZLbSaJjr9TlCocFghHmhYuNrtpEpEIn48= github.com/minio/mc v0.0.0-20220805080128-351d021b924b/go.mod h1:YUXIqqgGfFknByv0eeJSMBQl/WGuEN0XkpW68/ghBm0=
github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=