mirror of https://github.com/minio/minio.git
deprecate/remove global WORM mode (#9436)
global WORM mode is a complex piece for which the time has passed, with the advent of S3 compatible object locking and retention implementation global WORM is sort of deprecated, this has been mentioned in our documentation for some time, now the time has come for this to go.
This commit is contained in:
parent
45e22cf8aa
commit
60d415bb8a
|
@ -88,11 +88,6 @@ export class ChangePasswordModal extends React.Component {
|
||||||
|
|
||||||
canChangePassword() {
|
canChangePassword() {
|
||||||
const { serverInfo } = this.props
|
const { serverInfo } = this.props
|
||||||
// Password change is not allowed in WORM mode
|
|
||||||
if (serverInfo.info.isWorm) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Password change is not allowed for temporary users(STS)
|
// Password change is not allowed for temporary users(STS)
|
||||||
if(serverInfo.userInfo.isTempUser) {
|
if(serverInfo.userInfo.isTempUser) {
|
||||||
return false
|
return false
|
||||||
|
|
|
@ -64,17 +64,6 @@ describe("ChangePasswordModal", () => {
|
||||||
shallow(<ChangePasswordModal serverInfo={serverInfo} />)
|
shallow(<ChangePasswordModal serverInfo={serverInfo} />)
|
||||||
})
|
})
|
||||||
|
|
||||||
it("should not allow changing password when isWorm is true", () => {
|
|
||||||
const newServerInfo = { ...serverInfo, info: { isWorm: true } }
|
|
||||||
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
|
|
||||||
expect(
|
|
||||||
wrapper
|
|
||||||
.find("ModalBody")
|
|
||||||
.childAt(0)
|
|
||||||
.text()
|
|
||||||
).toBe("Credentials of this user cannot be updated through MinIO Browser.")
|
|
||||||
})
|
|
||||||
|
|
||||||
it("should not allow changing password when not IAM user", () => {
|
it("should not allow changing password when not IAM user", () => {
|
||||||
const newServerInfo = {
|
const newServerInfo = {
|
||||||
...serverInfo,
|
...serverInfo,
|
||||||
|
|
|
@ -66,12 +66,6 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deny if WORM is enabled
|
|
||||||
if globalWORMEnabled {
|
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||||
// More than maxConfigSize bytes were available
|
// More than maxConfigSize bytes were available
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||||
|
@ -112,12 +106,6 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deny if WORM is enabled
|
|
||||||
if globalWORMEnabled {
|
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||||
// More than maxConfigSize bytes were available
|
// More than maxConfigSize bytes were available
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||||
|
@ -358,12 +346,6 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deny if WORM is enabled
|
|
||||||
if globalWORMEnabled {
|
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||||
// More than maxConfigSize bytes were available
|
// More than maxConfigSize bytes were available
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||||
|
|
|
@ -60,12 +60,6 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deny if WORM is enabled
|
|
||||||
if globalWORMEnabled {
|
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
vars := mux.Vars(r)
|
vars := mux.Vars(r)
|
||||||
accessKey := vars["accessKey"]
|
accessKey := vars["accessKey"]
|
||||||
|
|
||||||
|
@ -290,12 +284,6 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deny if WORM is enabled
|
|
||||||
if globalWORMEnabled {
|
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
vars := mux.Vars(r)
|
vars := mux.Vars(r)
|
||||||
accessKey := vars["accessKey"]
|
accessKey := vars["accessKey"]
|
||||||
status := vars["status"]
|
status := vars["status"]
|
||||||
|
@ -329,12 +317,6 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deny if WORM is enabled
|
|
||||||
if globalWORMEnabled {
|
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
vars := mux.Vars(r)
|
vars := mux.Vars(r)
|
||||||
accessKey := vars["accessKey"]
|
accessKey := vars["accessKey"]
|
||||||
|
|
||||||
|
@ -415,12 +397,6 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deny if WORM is enabled
|
|
||||||
if globalWORMEnabled {
|
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
newCred, err := globalIAMSys.NewServiceAccount(ctx, cred.AccessKey, createReq.Policy)
|
newCred, err := globalIAMSys.NewServiceAccount(ctx, cred.AccessKey, createReq.Policy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||||
|
@ -528,12 +504,6 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deny if WORM is enabled
|
|
||||||
if globalWORMEnabled {
|
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
serviceAccount := mux.Vars(r)["accessKey"]
|
serviceAccount := mux.Vars(r)["accessKey"]
|
||||||
if serviceAccount == "" {
|
if serviceAccount == "" {
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
|
||||||
|
@ -681,12 +651,6 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
|
||||||
vars := mux.Vars(r)
|
vars := mux.Vars(r)
|
||||||
policyName := vars["name"]
|
policyName := vars["name"]
|
||||||
|
|
||||||
// Deny if WORM is enabled
|
|
||||||
if globalWORMEnabled {
|
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := globalIAMSys.DeletePolicy(policyName); err != nil {
|
if err := globalIAMSys.DeletePolicy(policyName); err != nil {
|
||||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
|
@ -713,12 +677,6 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||||
vars := mux.Vars(r)
|
vars := mux.Vars(r)
|
||||||
policyName := vars["name"]
|
policyName := vars["name"]
|
||||||
|
|
||||||
// Deny if WORM is enabled
|
|
||||||
if globalWORMEnabled {
|
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error out if Content-Length is missing.
|
// Error out if Content-Length is missing.
|
||||||
if r.ContentLength <= 0 {
|
if r.ContentLength <= 0 {
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
|
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
|
||||||
|
@ -771,12 +729,6 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
||||||
entityName := vars["userOrGroup"]
|
entityName := vars["userOrGroup"]
|
||||||
isGroup := vars["isGroup"] == "true"
|
isGroup := vars["isGroup"] == "true"
|
||||||
|
|
||||||
// Deny if WORM is enabled
|
|
||||||
if globalWORMEnabled {
|
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if !isGroup {
|
if !isGroup {
|
||||||
ok, err := globalIAMSys.IsTempUser(entityName)
|
ok, err := globalIAMSys.IsTempUser(entityName)
|
||||||
if err != nil && err != errNoSuchUser {
|
if err != nil && err != errNoSuchUser {
|
||||||
|
|
|
@ -920,7 +920,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := globalBucketObjectLockConfig.Get(bucket); (ok || globalWORMEnabled) && forceDelete {
|
if _, ok := globalBucketObjectLockConfig.Get(bucket); ok && forceDelete {
|
||||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1021,11 +1021,6 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deny if WORM is enabled
|
|
||||||
if globalWORMEnabled {
|
|
||||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketObjectLockConfigurationAction, bucket, ""); s3Error != ErrNone {
|
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketObjectLockConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||||
return
|
return
|
||||||
|
|
|
@ -173,7 +173,14 @@ func handleCommonCmdArgs(ctx *cli.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleCommonEnvVars() {
|
func handleCommonEnvVars() {
|
||||||
var err error
|
wormEnabled, err := config.LookupWorm()
|
||||||
|
if err != nil {
|
||||||
|
logger.Fatal(config.ErrInvalidWormValue(err), "Invalid worm configuration")
|
||||||
|
}
|
||||||
|
if wormEnabled {
|
||||||
|
logger.Fatal(errors.New("WORM is deprecated"), "global MINIO_WORM support is removed, please downgrade your server or migrate to https://github.com/minio/minio/tree/master/docs/retention")
|
||||||
|
}
|
||||||
|
|
||||||
globalBrowserEnabled, err = config.ParseBool(env.Get(config.EnvBrowser, config.EnableOn))
|
globalBrowserEnabled, err = config.ParseBool(env.Get(config.EnvBrowser, config.EnableOn))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Fatal(config.ErrInvalidBrowserValue(err), "Invalid MINIO_BROWSER value in environment variable")
|
logger.Fatal(config.ErrInvalidBrowserValue(err), "Invalid MINIO_BROWSER value in environment variable")
|
||||||
|
@ -239,12 +246,6 @@ func handleCommonEnvVars() {
|
||||||
os.Unsetenv(config.EnvAccessKeyOld)
|
os.Unsetenv(config.EnvAccessKeyOld)
|
||||||
os.Unsetenv(config.EnvSecretKeyOld)
|
os.Unsetenv(config.EnvSecretKeyOld)
|
||||||
}
|
}
|
||||||
|
|
||||||
globalWORMEnabled, err = config.LookupWorm()
|
|
||||||
if err != nil {
|
|
||||||
logger.Fatal(config.ErrInvalidWormValue(err), "Invalid worm configuration")
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func logStartupMessage(msg string) {
|
func logStartupMessage(msg string) {
|
||||||
|
|
|
@ -51,11 +51,6 @@ func startDailyLifecycle(ctx context.Context, objAPI ObjectLayer) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func lifecycleRound(ctx context.Context, objAPI ObjectLayer) error {
|
func lifecycleRound(ctx context.Context, objAPI ObjectLayer) error {
|
||||||
// No action is expected when WORM is enabled
|
|
||||||
if globalWORMEnabled {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
buckets, err := objAPI.ListBuckets(ctx)
|
buckets, err := objAPI.ListBuckets(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -681,13 +681,6 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
|
||||||
return oi, toObjectErr(err, bucket, object)
|
return oi, toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deny if WORM is enabled
|
|
||||||
if isWORMEnabled(bucket) {
|
|
||||||
if _, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object)); err == nil {
|
|
||||||
return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = fsRenameFile(ctx, appendFilePath, pathJoin(fs.fsPath, bucket, object))
|
err = fsRenameFile(ctx, appendFilePath, pathJoin(fs.fsPath, bucket, object))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
|
|
|
@ -991,12 +991,6 @@ func (fs *FSObjects) putObject(ctx context.Context, bucket string, object string
|
||||||
|
|
||||||
// Entire object was written to the temp location, now it's safe to rename it to the actual location.
|
// Entire object was written to the temp location, now it's safe to rename it to the actual location.
|
||||||
fsNSObjPath := pathJoin(fs.fsPath, bucket, object)
|
fsNSObjPath := pathJoin(fs.fsPath, bucket, object)
|
||||||
// Deny if WORM is enabled
|
|
||||||
if isWORMEnabled(bucket) {
|
|
||||||
if _, err := fsStatFile(ctx, fsNSObjPath); err == nil {
|
|
||||||
return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err = fsRenameFile(ctx, fsTmpObjPath, fsNSObjPath); err != nil {
|
if err = fsRenameFile(ctx, fsTmpObjPath, fsNSObjPath); err != nil {
|
||||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
|
|
|
@ -213,9 +213,6 @@ var (
|
||||||
globalOperationTimeout = newDynamicTimeout(10*time.Minute /*30*/, 600*time.Second) // default timeout for general ops
|
globalOperationTimeout = newDynamicTimeout(10*time.Minute /*30*/, 600*time.Second) // default timeout for general ops
|
||||||
globalHealingTimeout = newDynamicTimeout(30*time.Minute /*1*/, 30*time.Minute) // timeout for healing related ops
|
globalHealingTimeout = newDynamicTimeout(30*time.Minute /*1*/, 30*time.Minute) // timeout for healing related ops
|
||||||
|
|
||||||
// Is worm enabled
|
|
||||||
globalWORMEnabled bool
|
|
||||||
|
|
||||||
globalBucketObjectLockConfig = objectlock.NewBucketObjectLockConfig()
|
globalBucketObjectLockConfig = objectlock.NewBucketObjectLockConfig()
|
||||||
|
|
||||||
// Disk cache drives
|
// Disk cache drives
|
||||||
|
|
|
@ -1527,14 +1527,6 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deny if WORM is enabled
|
|
||||||
if globalWORMEnabled {
|
|
||||||
if _, err := objectAPI.GetObjectInfo(ctx, bucket, object, opts); err == nil {
|
|
||||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate storage class metadata if present
|
// Validate storage class metadata if present
|
||||||
if sc := r.Header.Get(xhttp.AmzStorageClass); sc != "" {
|
if sc := r.Header.Get(xhttp.AmzStorageClass); sc != "" {
|
||||||
if !storageclass.IsValid(sc) {
|
if !storageclass.IsValid(sc) {
|
||||||
|
@ -1725,14 +1717,6 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deny if global WORM is enabled
|
|
||||||
if globalWORMEnabled {
|
|
||||||
if _, err := objectAPI.GetObjectInfo(ctx, dstBucket, dstObject, dstOpts); err == nil {
|
|
||||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
getObjectNInfo := objectAPI.GetObjectNInfo
|
getObjectNInfo := objectAPI.GetObjectNInfo
|
||||||
if api.CacheAPI() != nil {
|
if api.CacheAPI() != nil {
|
||||||
getObjectNInfo = api.CacheAPI().GetObjectNInfo
|
getObjectNInfo = api.CacheAPI().GetObjectNInfo
|
||||||
|
@ -2105,14 +2089,6 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||||
rawReader := hashReader
|
rawReader := hashReader
|
||||||
pReader := NewPutObjReader(rawReader, nil, nil)
|
pReader := NewPutObjReader(rawReader, nil, nil)
|
||||||
|
|
||||||
// Deny if WORM is enabled
|
|
||||||
if globalWORMEnabled {
|
|
||||||
if _, err := objectAPI.GetObjectInfo(ctx, bucket, object, opts); err == nil {
|
|
||||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
isEncrypted := false
|
isEncrypted := false
|
||||||
var objectEncryptionKey crypto.ObjectKey
|
var objectEncryptionKey crypto.ObjectKey
|
||||||
if objectAPI.IsEncryptionSupported() && !isCompressed {
|
if objectAPI.IsEncryptionSupported() && !isCompressed {
|
||||||
|
@ -2430,19 +2406,6 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deny if global WORM is enabled
|
|
||||||
if globalWORMEnabled {
|
|
||||||
opts, err := getOpts(ctx, r, bucket, object)
|
|
||||||
if err != nil {
|
|
||||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if _, err := objectAPI.GetObjectInfo(ctx, bucket, object, opts); err == nil {
|
|
||||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, _, _, s3Err := checkPutObjectLockAllowed(ctx, r, bucket, object, objectAPI.GetObjectInfo, ErrNone, ErrNone); s3Err != ErrNone {
|
if _, _, _, s3Err := checkPutObjectLockAllowed(ctx, r, bucket, object, objectAPI.GetObjectInfo, ErrNone, ErrNone); s3Err != ErrNone {
|
||||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r))
|
||||||
return
|
return
|
||||||
|
@ -2627,19 +2590,6 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deny if global WORM is enabled
|
|
||||||
if globalWORMEnabled {
|
|
||||||
opts, err := getOpts(ctx, r, bucket, object)
|
|
||||||
if err != nil {
|
|
||||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if _, err := objectAPI.GetObjectInfo(ctx, bucket, object, opts); err == nil {
|
|
||||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
apiErr := ErrNone
|
apiErr := ErrNone
|
||||||
if _, ok := globalBucketObjectLockConfig.Get(bucket); ok {
|
if _, ok := globalBucketObjectLockConfig.Get(bucket); ok {
|
||||||
apiErr = enforceRetentionBypassForDelete(ctx, r, bucket, object, getObjectInfo)
|
apiErr = enforceRetentionBypassForDelete(ctx, r, bucket, object, getObjectInfo)
|
||||||
|
@ -2864,11 +2814,6 @@ func (api objectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if globalWORMEnabled {
|
|
||||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrObjectLocked), r.URL, guessIsBrowserReq(r))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := globalBucketObjectLockConfig.Get(bucket); !ok {
|
if _, ok := globalBucketObjectLockConfig.Get(bucket); !ok {
|
||||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketObjectLockConfiguration), r.URL, guessIsBrowserReq(r))
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketObjectLockConfiguration), r.URL, guessIsBrowserReq(r))
|
||||||
return
|
return
|
||||||
|
|
|
@ -314,7 +314,7 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj
|
||||||
if objInfo, err := getObjectInfoFn(ctx, bucket, object, opts); err == nil {
|
if objInfo, err := getObjectInfoFn(ctx, bucket, object, opts); err == nil {
|
||||||
objExists = true
|
objExists = true
|
||||||
r := objectlock.GetObjectRetentionMeta(objInfo.UserDefined)
|
r := objectlock.GetObjectRetentionMeta(objInfo.UserDefined)
|
||||||
if globalWORMEnabled || ((r.Mode == objectlock.RetCompliance) && r.RetainUntilDate.After(t)) {
|
if r.Mode == objectlock.RetCompliance && r.RetainUntilDate.After(t) {
|
||||||
return mode, retainDate, legalHold, ErrObjectLocked
|
return mode, retainDate, legalHold, ErrObjectLocked
|
||||||
}
|
}
|
||||||
mode = r.Mode
|
mode = r.Mode
|
||||||
|
|
|
@ -652,10 +652,3 @@ func iamPolicyClaimNameOpenID() string {
|
||||||
func iamPolicyClaimNameSA() string {
|
func iamPolicyClaimNameSA() string {
|
||||||
return "sa-policy"
|
return "sa-policy"
|
||||||
}
|
}
|
||||||
|
|
||||||
func isWORMEnabled(bucket string) bool {
|
|
||||||
if isMinioMetaBucketName(bucket) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return globalWORMEnabled
|
|
||||||
}
|
|
||||||
|
|
|
@ -745,17 +745,6 @@ next:
|
||||||
}
|
}
|
||||||
|
|
||||||
apiErr := ErrNone
|
apiErr := ErrNone
|
||||||
// Deny if global WORM is enabled
|
|
||||||
if globalWORMEnabled {
|
|
||||||
opts, err := getOpts(ctx, r, args.BucketName, objectName)
|
|
||||||
if err != nil {
|
|
||||||
apiErr = toAPIErrorCode(ctx, err)
|
|
||||||
} else {
|
|
||||||
if _, err := getObjectInfo(ctx, args.BucketName, objectName, opts); err == nil {
|
|
||||||
apiErr = ErrMethodNotAllowed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if _, ok := globalBucketObjectLockConfig.Get(args.BucketName); ok && (apiErr == ErrNone) {
|
if _, ok := globalBucketObjectLockConfig.Get(args.BucketName); ok && (apiErr == ErrNone) {
|
||||||
apiErr = enforceRetentionBypassForDeleteWeb(ctx, r, args.BucketName, objectName, getObjectInfo)
|
apiErr = enforceRetentionBypassForDeleteWeb(ctx, r, args.BucketName, objectName, getObjectInfo)
|
||||||
if apiErr != ErrNone && apiErr != ErrNoSuchKey {
|
if apiErr != ErrNone && apiErr != ErrNoSuchKey {
|
||||||
|
@ -909,11 +898,6 @@ func (web *webAPIHandlers) SetAuth(r *http.Request, args *SetAuthArgs, reply *Se
|
||||||
return toJSONError(ctx, authErr)
|
return toJSONError(ctx, authErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// When WORM is enabled, disallow changing credenatials for owner and user
|
|
||||||
if globalWORMEnabled {
|
|
||||||
return toJSONError(ctx, errChangeCredNotAllowed)
|
|
||||||
}
|
|
||||||
|
|
||||||
if owner {
|
if owner {
|
||||||
// Owner is not allowed to change credentials through browser.
|
// Owner is not allowed to change credentials through browser.
|
||||||
return toJSONError(ctx, errChangeCredNotAllowed)
|
return toJSONError(ctx, errChangeCredNotAllowed)
|
||||||
|
|
|
@ -673,13 +673,6 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
|
||||||
}
|
}
|
||||||
|
|
||||||
if xl.isObject(bucket, object) {
|
if xl.isObject(bucket, object) {
|
||||||
// Deny if WORM is enabled
|
|
||||||
if isWORMEnabled(bucket) {
|
|
||||||
if _, err := xl.getObjectInfo(ctx, bucket, object, ObjectOptions{}); err == nil {
|
|
||||||
return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rename if an object already exists to temporary location.
|
// Rename if an object already exists to temporary location.
|
||||||
newUniqueID := mustGetUUID()
|
newUniqueID := mustGetUUID()
|
||||||
|
|
||||||
|
|
|
@ -628,13 +628,6 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string,
|
||||||
}
|
}
|
||||||
|
|
||||||
if xl.isObject(bucket, object) {
|
if xl.isObject(bucket, object) {
|
||||||
// Deny if WORM is enabled
|
|
||||||
if isWORMEnabled(bucket) {
|
|
||||||
if _, err := xl.getObjectInfo(ctx, bucket, object, ObjectOptions{}); err == nil {
|
|
||||||
return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rename if an object already exists to temporary location.
|
// Rename if an object already exists to temporary location.
|
||||||
newUniqueID := mustGetUUID()
|
newUniqueID := mustGetUUID()
|
||||||
|
|
||||||
|
|
|
@ -270,16 +270,6 @@ export MINIO_DISK_USAGE_CRAWL_DELAY=30
|
||||||
minio server /data
|
minio server /data
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Worm (deprecated)
|
|
||||||
Enable this to turn on Write-Once-Read-Many. By default it is set to `off`. Set ``MINIO_WORM=on`` environment variable to enable WORM mode. This ENV setting is not recommended anymore, please use Object Locking and Object Retention APIs documented [here](https://github.com/minio/minio/tree/master/docs/retention).
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
export MINIO_WORM=on
|
|
||||||
minio server /data
|
|
||||||
```
|
|
||||||
|
|
||||||
### Browser
|
### Browser
|
||||||
|
|
||||||
Enable or disable access to web UI. By default it is set to `on`. You may override this field with `MINIO_BROWSER` environment variable.
|
Enable or disable access to web UI. By default it is set to `on`. You may override this field with `MINIO_BROWSER` environment variable.
|
||||||
|
|
|
@ -2,11 +2,9 @@
|
||||||
|
|
||||||
MinIO server allows selectively specify WORM for specific objects or configuring a bucket with default object lock configuration that applies default retention mode and retention duration to all incoming objects. Essentially, this makes objects in the bucket immutable i.e. delete and overwrite are not allowed till stipulated time specified in the bucket's object lock configuration or object retention.
|
MinIO server allows selectively specify WORM for specific objects or configuring a bucket with default object lock configuration that applies default retention mode and retention duration to all incoming objects. Essentially, this makes objects in the bucket immutable i.e. delete and overwrite are not allowed till stipulated time specified in the bucket's object lock configuration or object retention.
|
||||||
|
|
||||||
Object locking requires locking to be enabled on a bucket at the time of bucket creation. In addition, a default retention period and retention mode can be configured on a bucket to be
|
Object locking requires locking to be enabled on a bucket at the time of bucket creation. In addition, a default retention period and retention mode can be configured on a bucket to be applied to objects created in that bucket.
|
||||||
applied to objects created in that bucket.
|
|
||||||
|
|
||||||
Independently of retention, an object can also be under legal hold. This effectively disallows
|
Independently of retention, an object can also be under legal hold. This effectively disallows all deletes and overwrites of an object under legal hold until the hold is lifted.
|
||||||
all deletes and overwrites of an object under legal hold until the hold is lifted.
|
|
||||||
|
|
||||||
## Get Started
|
## Get Started
|
||||||
|
|
||||||
|
@ -21,6 +19,7 @@ WORM on a bucket is enabled by setting object lock configuration. This configura
|
||||||
```sh
|
```sh
|
||||||
$ awscli s3api put-object-lock-configuration --bucket mybucket --object-lock-configuration 'ObjectLockEnabled=\"Enabled\",Rule={DefaultRetention={Mode=\"GOVERNANCE\",Days=1}}'
|
$ awscli s3api put-object-lock-configuration --bucket mybucket --object-lock-configuration 'ObjectLockEnabled=\"Enabled\",Rule={DefaultRetention={Mode=\"GOVERNANCE\",Days=1}}'
|
||||||
```
|
```
|
||||||
|
|
||||||
### Set object lock
|
### Set object lock
|
||||||
|
|
||||||
PutObject API allows setting per object retention mode and retention duration using `x-amz-object-lock-mode` and `x-amz-object-lock-retain-until-date` headers. This takes precedence over any bucket object lock configuration w.r.t retention.
|
PutObject API allows setting per object retention mode and retention duration using `x-amz-object-lock-mode` and `x-amz-object-lock-retain-until-date` headers. This takes precedence over any bucket object lock configuration w.r.t retention.
|
||||||
|
@ -29,8 +28,7 @@ PutObject API allows setting per object retention mode and retention duration us
|
||||||
aws s3api put-object --bucket testbucket --key lockme --object-lock-mode GOVERNANCE --object-lock-retain-until-date "2019-11-20" --body /etc/issue
|
aws s3api put-object --bucket testbucket --key lockme --object-lock-mode GOVERNANCE --object-lock-retain-until-date "2019-11-20" --body /etc/issue
|
||||||
```
|
```
|
||||||
|
|
||||||
See https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html for AWS S3 spec on
|
See https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html for AWS S3 spec on object locking and permissions required for object retention and governance bypass overrides.
|
||||||
object locking and permissions required for object retention and governance bypass overrides.
|
|
||||||
|
|
||||||
### Set legal hold on an object
|
### Set legal hold on an object
|
||||||
|
|
||||||
|
@ -40,24 +38,14 @@ PutObject API allows setting legal hold using `x-amz-object-lock-legal-hold` hea
|
||||||
aws s3api put-object --bucket testbucket --key legalhold --object-lock-legal-hold-status ON --body /etc/issue
|
aws s3api put-object --bucket testbucket --key legalhold --object-lock-legal-hold-status ON --body /etc/issue
|
||||||
```
|
```
|
||||||
|
|
||||||
See https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html for AWS S3 spec on
|
See https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html for AWS S3 spec on object locking and permissions required for specifying legal hold.
|
||||||
object locking and permissions required for specifying legal hold.
|
|
||||||
|
|
||||||
### 3. Note
|
> NOTE:
|
||||||
|
> - If an object is under legal hold, it cannot be overwritten unless the legal hold is explicitly removed.
|
||||||
- When global WORM is enabled by `MINIO_WORM` environment variable or `worm` field in configuration file supersedes bucket level WORM and `PUT object lock configuration` REST API is disabled.
|
> - In `Compliance` mode, objects cannot be overwritten or deleted by anyone until retention period is expired. If user has requisite governance bypass permissions, an object's retention date can be extended in `Compliance` mode.
|
||||||
- In global WORM mode objects can never be overwritten
|
> - Currently `Governance` mode does not allow overwriting an existing object as versioning is not available in MinIO. However, if user has requisite `Governance` bypass permissions, an object in `Governance` mode can be overwritten.
|
||||||
- If an object is under legal hold, it cannot be overwritten unless the legal hold is explicitly removed.
|
> - Once object lock configuration is set to a bucket, new objects inherit the retention settings of the bucket object lock configuration (if set) or the retention headers set in the PUT request or set with PutObjectRetention API call
|
||||||
- In `Compliance` mode, objects cannot be overwritten or deleted by anyone until retention period
|
> - *MINIO_NTP_SERVER* environment variable can be set to remote NTP server endpoint if system time is not desired for setting retention dates.
|
||||||
is expired. If user has requisite governance bypass permissions, an object's retention date can
|
|
||||||
be extended in `Compliance` mode.
|
|
||||||
- Currently `Governance` mode does not allow overwriting an existing object as versioning is not
|
|
||||||
available in MinIO. However, if user has requisite `Governance` bypass permissions, an object in `Governance` mode can be overwritten.
|
|
||||||
- Once object lock configuration is set to a bucket, new objects inherit the retention settings of the bucket object lock configuration (if set) or the retention headers set in the PUT request
|
|
||||||
or set with PutObjectRetention API call
|
|
||||||
|
|
||||||
- MINIO_NTP_SERVER environment variable can be set to remote NTP server endpoint if system time
|
|
||||||
is not desired for setting retention dates.
|
|
||||||
|
|
||||||
## Explore Further
|
## Explore Further
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,6 @@ Mint is a testing framework for Minio object server, available as a docker image
|
||||||
- minio-py
|
- minio-py
|
||||||
- minio-dotnet
|
- minio-dotnet
|
||||||
- s3cmd
|
- s3cmd
|
||||||
- worm
|
|
||||||
|
|
||||||
## Running Mint
|
## Running Mint
|
||||||
|
|
||||||
|
@ -42,7 +41,7 @@ Below environment variables are required to be passed to the docker container. S
|
||||||
| `ACCESS_KEY` | Access key of access `SERVER_ENDPOINT` | `Q3AM3UQ867SPQQA43P2F` |
|
| `ACCESS_KEY` | Access key of access `SERVER_ENDPOINT` | `Q3AM3UQ867SPQQA43P2F` |
|
||||||
| `SECRET_KEY` | Secret Key of access `SERVER_ENDPOINT` | `zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG` |
|
| `SECRET_KEY` | Secret Key of access `SERVER_ENDPOINT` | `zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG` |
|
||||||
| `ENABLE_HTTPS` | (Optional) Set `1` to indicate to use HTTPS to access `SERVER_ENDPOINT`. Defaults to `0` (HTTP) | `1` |
|
| `ENABLE_HTTPS` | (Optional) Set `1` to indicate to use HTTPS to access `SERVER_ENDPOINT`. Defaults to `0` (HTTP) | `1` |
|
||||||
| `MINT_MODE` | (Optional) Set mode indicating what category of tests to be run by values `core`, `full` or `worm`. Defaults to `core` | `full` |
|
| `MINT_MODE` | (Optional) Set mode indicating what category of tests to be run by values `core`, `full`. Defaults to `core` | `full` |
|
||||||
| `DOMAIN` | (Optional) Value of MINIO_DOMAIN environment variable used in Minio server | `myminio.com` |
|
| `DOMAIN` | (Optional) Value of MINIO_DOMAIN environment variable used in Minio server | `myminio.com` |
|
||||||
| `ENABLE_VIRTUAL_STYLE` | (Optional) Set `1` to indicate virtual style access . Defaults to `0` (Path style) | `1` |
|
| `ENABLE_VIRTUAL_STYLE` | (Optional) Set `1` to indicate virtual style access . Defaults to `0` (Path style) | `1` |
|
||||||
|
|
||||||
|
@ -87,22 +86,19 @@ $ docker run -e SERVER_ENDPOINT=play.minio.io:9000 -e ACCESS_KEY=Q3AM3UQ867SPQQA
|
||||||
-e SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG \
|
-e SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG \
|
||||||
-e ENABLE_HTTPS=1 -e MINT_MODE=full minio/mint:latest
|
-e ENABLE_HTTPS=1 -e MINT_MODE=full minio/mint:latest
|
||||||
```
|
```
|
||||||
In case of Worm Mode, start your server with configuration `MINT_MODE` set to `worm`.
|
|
||||||
Build/Run of local docker image for Worm mode, is to be tested against your server configuration, by
|
|
||||||
|
|
||||||
```
|
|
||||||
### Adding tests with new tool/SDK
|
### Adding tests with new tool/SDK
|
||||||
|
|
||||||
Below are the steps need to be followed
|
Below are the steps need to be followed
|
||||||
|
|
||||||
* Create new app directory under [build](https://github.com/minio/mint/tree/master/build) and [run/core](https://github.com/minio/mint/tree/master/run/core) directories.
|
- Create new app directory under [build](https://github.com/minio/mint/tree/master/build) and [run/core](https://github.com/minio/mint/tree/master/run/core) directories.
|
||||||
* Create `install.sh` which does installation of required tool/SDK under app directory.
|
- Create `install.sh` which does installation of required tool/SDK under app directory.
|
||||||
* Any build and install time dependencies should be added to [install-packages.list](https://github.com/minio/mint/blob/master/install-packages.list).
|
- Any build and install time dependencies should be added to [install-packages.list](https://github.com/minio/mint/blob/master/install-packages.list).
|
||||||
* Build time dependencies should be added to [remove-packages.list](https://github.com/minio/mint/blob/master/remove-packages.list) for removal to have clean Mint docker image.
|
- Build time dependencies should be added to [remove-packages.list](https://github.com/minio/mint/blob/master/remove-packages.list) for removal to have clean Mint docker image.
|
||||||
* Add `run.sh` in app directory under `run/core` which execute actual tests.
|
- Add `run.sh` in app directory under `run/core` which execute actual tests.
|
||||||
|
|
||||||
#### Test data
|
#### Test data
|
||||||
|
|
||||||
Tests may use pre-created data set to perform various object operations on Minio server. Below data files are available under `/mint/data` directory.
|
Tests may use pre-created data set to perform various object operations on Minio server. Below data files are available under `/mint/data` directory.
|
||||||
|
|
||||||
| File name | Size |
|
| File name | Size |
|
||||||
|
|
|
@ -1,19 +0,0 @@
|
||||||
#!/bin/bash -e
|
|
||||||
#
|
|
||||||
# Mint (C) 2017-2019 Minio, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
test_run_dir="$MINT_RUN_CORE_DIR/worm"
|
|
||||||
GO111MODULE=on CGO_ENABLED=0 go build -o "$test_run_dir/worm" "$test_run_dir/quick-worm-tests.go"
|
|
17
mint/mint.sh
17
mint/mint.sh
|
@ -154,30 +154,15 @@ function main()
|
||||||
[ "$ENABLE_HTTPS" == "1" ] && trust_s3_endpoint_tls_cert
|
[ "$ENABLE_HTTPS" == "1" ] && trust_s3_endpoint_tls_cert
|
||||||
|
|
||||||
declare -a run_list
|
declare -a run_list
|
||||||
if [ "$MINT_MODE" == "worm" ]; then
|
|
||||||
if [ "$#" -gt 1 ]; then
|
|
||||||
echo "No argument is accepted for worm mode"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
run_list=( "$TESTS_DIR/worm" )
|
|
||||||
else
|
|
||||||
sdks=( "$@" )
|
sdks=( "$@" )
|
||||||
|
|
||||||
## populate all sdks except worm when no argument is given.
|
|
||||||
if [ "$#" -eq 0 ]; then
|
if [ "$#" -eq 0 ]; then
|
||||||
sdks=( $(ls -I worm "$TESTS_DIR") )
|
sdks=( $(ls "$TESTS_DIR") )
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for sdk in "${sdks[@]}"; do
|
for sdk in "${sdks[@]}"; do
|
||||||
if [ "$sdk" == "worm" ]; then
|
|
||||||
echo "worm test cannot be run without worm mode"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
run_list=( "${run_list[@]}" "$TESTS_DIR/$sdk" )
|
run_list=( "${run_list[@]}" "$TESTS_DIR/$sdk" )
|
||||||
done
|
done
|
||||||
fi
|
|
||||||
|
|
||||||
count="${#run_list[@]}"
|
count="${#run_list[@]}"
|
||||||
i=0
|
i=0
|
||||||
|
|
|
@ -1,409 +0,0 @@
|
||||||
/*
|
|
||||||
* Mint, (C) 2018 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"math/rand"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
const charset = "abcdefghijklmnopqrstuvwxyz0123456789"
|
|
||||||
|
|
||||||
var randSource *rand.Rand = rand.New(rand.NewSource(time.Now().UnixNano()))
|
|
||||||
|
|
||||||
const (
|
|
||||||
letterIdxBits = 6 // 6 bits to represent a letter index
|
|
||||||
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
|
|
||||||
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
|
|
||||||
)
|
|
||||||
const (
|
|
||||||
PASS = "PASS" // Indicate that a test passed
|
|
||||||
FAIL = "FAIL" // Indicate that a test failed
|
|
||||||
NA = "NA" // Indicate that a test is not applicable
|
|
||||||
maxPartSize = int64(512 * 1000 * 1024)
|
|
||||||
maxRetries = 1
|
|
||||||
)
|
|
||||||
|
|
||||||
type mintJSONFormatter struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) {
|
|
||||||
data := make(log.Fields, len(entry.Data))
|
|
||||||
for k, v := range entry.Data {
|
|
||||||
switch v := v.(type) {
|
|
||||||
case error:
|
|
||||||
// Otherwise errors are ignored by `encoding/json`
|
|
||||||
// https://github.com/sirupsen/logrus/issues/137
|
|
||||||
data[k] = v.Error()
|
|
||||||
default:
|
|
||||||
data[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
serialized, err := json.Marshal(data)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err)
|
|
||||||
}
|
|
||||||
return append(serialized, '\n'), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// log successful test runs
|
|
||||||
func successLogger(function string, args map[string]interface{}, startTime time.Time) *log.Entry {
|
|
||||||
// calculate the test case duration
|
|
||||||
duration := time.Since(startTime)
|
|
||||||
// log with the fields as per mint
|
|
||||||
fields := log.Fields{"name": "test worm mode", "function": function, "args": args, "duration": duration.Nanoseconds() / 1000000, "status": PASS}
|
|
||||||
return log.WithFields(fields)
|
|
||||||
}
|
|
||||||
|
|
||||||
// log failed test runs
|
|
||||||
func failureLog(function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) *log.Entry {
|
|
||||||
// calculate the test case duration
|
|
||||||
duration := time.Since(startTime)
|
|
||||||
var fields log.Fields
|
|
||||||
// log with the fields as per mint
|
|
||||||
if err != nil {
|
|
||||||
fields = log.Fields{"name": "test worm mode", "function": function, "args": args,
|
|
||||||
"duration": duration.Nanoseconds() / 1000000, "status": FAIL, "alert": alert, "message": message, "error": err}
|
|
||||||
} else {
|
|
||||||
fields = log.Fields{"name": "test worm mode", "function": function, "args": args,
|
|
||||||
"duration": duration.Nanoseconds() / 1000000, "status": FAIL, "alert": alert, "message": message}
|
|
||||||
}
|
|
||||||
return log.WithFields(fields)
|
|
||||||
}
|
|
||||||
|
|
||||||
func randBucketName() string {
|
|
||||||
b := make([]byte, 55)
|
|
||||||
for i := range b {
|
|
||||||
b[i] = charset[randSource.Intn(len(charset))]
|
|
||||||
}
|
|
||||||
return "bucket-" + string(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testPutDeletObject(s3Client *s3.S3) {
|
|
||||||
startTime := time.Now()
|
|
||||||
object := "testObject"
|
|
||||||
function := "PutAndDelete"
|
|
||||||
bucket := randBucketName()
|
|
||||||
expiry := 1 * time.Minute
|
|
||||||
args := map[string]interface{}{
|
|
||||||
"bucketName": bucket,
|
|
||||||
"objectName": object,
|
|
||||||
"expiry": expiry,
|
|
||||||
}
|
|
||||||
// First time bucket creation will be successful
|
|
||||||
_, err := s3Client.CreateBucket(&s3.CreateBucketInput{
|
|
||||||
Bucket: aws.String(bucket),
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
failureLog(function, args, startTime, "", "WORM_MODE ON - CreateBucket Failed", err).Fatal()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// First time put object will be successful
|
|
||||||
putInput1 := &s3.PutObjectInput{
|
|
||||||
Body: aws.ReadSeekCloser(strings.NewReader("fileToUpload")),
|
|
||||||
Bucket: aws.String(bucket),
|
|
||||||
Key: aws.String(object),
|
|
||||||
}
|
|
||||||
_, err = s3Client.PutObject(putInput1)
|
|
||||||
if err != nil {
|
|
||||||
failureLog(function, args, startTime, "", fmt.Sprintf("WORM_MODE ON - expected to pass but got %v", err), err).Fatal()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Put Object
|
|
||||||
putInput2 := &s3.PutObjectInput{
|
|
||||||
Body: aws.ReadSeekCloser(strings.NewReader("filetouploadSecondTime")),
|
|
||||||
Bucket: aws.String(bucket),
|
|
||||||
Key: aws.String(object),
|
|
||||||
}
|
|
||||||
_, err = s3Client.PutObject(putInput2)
|
|
||||||
if err == nil {
|
|
||||||
failureLog(function, args, startTime, "", fmt.Sprintf("WORM_MODE ON Put is expected to fail, but it passed %v", nil), nil).Fatal()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deleting the Object
|
|
||||||
delObject := &s3.DeleteObjectInput{
|
|
||||||
Bucket: aws.String(bucket),
|
|
||||||
Key: aws.String(object),
|
|
||||||
}
|
|
||||||
_, err = s3Client.DeleteObject(delObject)
|
|
||||||
if err == nil {
|
|
||||||
failureLog(function, args, startTime, "", fmt.Sprintf("WORM_MODE ON Delete is expected to fail, but it passed %v", nil), nil).Fatal()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
successLogger(function, args, startTime).Info()
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func testCopyObject(s3Client *s3.S3) {
|
|
||||||
startTime := time.Now()
|
|
||||||
function := "CopyObject"
|
|
||||||
object := "DestinationObject"
|
|
||||||
object1 := "SourceObject"
|
|
||||||
destinationBucket := randBucketName()
|
|
||||||
sourceBucket := randBucketName()
|
|
||||||
expiry := 1 * time.Minute
|
|
||||||
args := map[string]interface{}{
|
|
||||||
"bucketName": destinationBucket,
|
|
||||||
"objectName": object,
|
|
||||||
"expiry": expiry,
|
|
||||||
}
|
|
||||||
// Create Destination bucket
|
|
||||||
_, err := s3Client.CreateBucket(&s3.CreateBucketInput{
|
|
||||||
Bucket: aws.String(destinationBucket),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
failureLog(function, args, startTime, "", "WORM_MODE ON Destination Bucket Creation Failed", err).Fatal()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put object on Destination bucket
|
|
||||||
putInput1 := &s3.PutObjectInput{
|
|
||||||
Body: aws.ReadSeekCloser(strings.NewReader("file to Upload In Destination")),
|
|
||||||
Bucket: aws.String(destinationBucket),
|
|
||||||
Key: aws.String(object),
|
|
||||||
}
|
|
||||||
_, err = s3Client.PutObject(putInput1)
|
|
||||||
if err != nil {
|
|
||||||
failureLog(function, args, startTime, "", fmt.Sprintf("WORM_MODE ON PUT expected to pass but got %v", err), err).Fatal()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create Source bucket
|
|
||||||
_, err1 := s3Client.CreateBucket(&s3.CreateBucketInput{
|
|
||||||
Bucket: aws.String(sourceBucket),
|
|
||||||
})
|
|
||||||
if err1 != nil {
|
|
||||||
failureLog(function, args, startTime, "", "WORM_MODE ON Source Bucket Creation Failed", err).Fatal()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put object on Destination bucket
|
|
||||||
putInput2 := &s3.PutObjectInput{
|
|
||||||
Body: aws.ReadSeekCloser(strings.NewReader("file content to copy ")),
|
|
||||||
Bucket: aws.String(sourceBucket),
|
|
||||||
Key: aws.String(object1),
|
|
||||||
}
|
|
||||||
_, err = s3Client.PutObject(putInput2)
|
|
||||||
if err != nil {
|
|
||||||
failureLog(function, args, startTime, "", fmt.Sprintf("WORM_MODE ON PUT expected to pass but got %v", err), err).Fatal()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test for Copy Object
|
|
||||||
copyInput := &s3.CopyObjectInput{
|
|
||||||
Bucket: aws.String(destinationBucket),
|
|
||||||
CopySource: aws.String(sourceBucket + "/" + object1),
|
|
||||||
Key: aws.String(object),
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = s3Client.CopyObject(copyInput)
|
|
||||||
if err == nil {
|
|
||||||
failureLog(function, args, startTime, "", fmt.Sprintf("WORM_MODE ON Copy Object should fail, but it passed %v", nil), nil).Fatal()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
successLogger(function, args, startTime).Info()
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func testPutMultipart(s3Client *s3.S3) {
|
|
||||||
bucket := randBucketName()
|
|
||||||
startTime := time.Now()
|
|
||||||
object := "testObject"
|
|
||||||
expiry := 1 * time.Minute
|
|
||||||
args := map[string]interface{}{
|
|
||||||
"bucketName": bucket,
|
|
||||||
"objectName": object,
|
|
||||||
"expiry": expiry,
|
|
||||||
}
|
|
||||||
function := "PutMultiPart"
|
|
||||||
file, err := os.Open("/mint/data/datafile-5-MB")
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
failureLog(function, args, startTime, "", "WORM_MODE ON err opening file", err).Fatal()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
fileInfo, _ := file.Stat()
|
|
||||||
size := fileInfo.Size()
|
|
||||||
buffer := make([]byte, size)
|
|
||||||
fileType := http.DetectContentType(buffer)
|
|
||||||
file.Read(buffer)
|
|
||||||
|
|
||||||
path := file.Name()
|
|
||||||
input := &s3.CreateMultipartUploadInput{
|
|
||||||
Bucket: aws.String(bucket),
|
|
||||||
Key: aws.String(path),
|
|
||||||
ContentType: aws.String(fileType),
|
|
||||||
}
|
|
||||||
_, err = s3Client.CreateBucket(&s3.CreateBucketInput{
|
|
||||||
Bucket: aws.String(bucket),
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
failureLog(function, args, startTime, "", "WORM_MODE ON Destination Bucket Creation Failed", err).Fatal()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Upload for the first time
|
|
||||||
resp, err := s3Client.CreateMultipartUpload(input)
|
|
||||||
if err != nil {
|
|
||||||
failureLog(function, args, startTime, "", "WORM_MODE ON CreateMultipartUpload Failed", err).Fatal()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var curr, partLength int64
|
|
||||||
var remaining = size
|
|
||||||
var completedParts []*s3.CompletedPart
|
|
||||||
partNumber := 1
|
|
||||||
for curr = 0; remaining != 0; curr += partLength {
|
|
||||||
if remaining < maxPartSize {
|
|
||||||
partLength = remaining
|
|
||||||
} else {
|
|
||||||
partLength = maxPartSize
|
|
||||||
}
|
|
||||||
completedPart, err := uploadPart(s3Client, resp, buffer[curr:curr+partLength], partNumber)
|
|
||||||
if err != nil {
|
|
||||||
failureLog(function, args, startTime, "", "WORM_MODE ON uploadPart Failed", err).Fatal()
|
|
||||||
err := abortMultipartUpload(s3Client, resp)
|
|
||||||
if err != nil {
|
|
||||||
failureLog(function, args, startTime, "", "WORM_MODE ON abortMultipartUpload Failed", err).Fatal()
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
remaining -= partLength
|
|
||||||
partNumber++
|
|
||||||
completedParts = append(completedParts, completedPart)
|
|
||||||
}
|
|
||||||
_, err = completeMultipartUpload(s3Client, resp, completedParts)
|
|
||||||
if err != nil {
|
|
||||||
failureLog(function, args, startTime, "", "WORM_MODE ON completeMultipartUpload Failed", err).Fatal()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// These tests should fail
|
|
||||||
_, err = s3Client.CreateMultipartUpload(input)
|
|
||||||
if err == nil {
|
|
||||||
failureLog(function, args, startTime, "", fmt.Sprintf("WORM_MODE ON CreateMultipartUpload must fail, but it passed %v", nil), nil).Fatal()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
successLogger(function, args, startTime).Info()
|
|
||||||
|
|
||||||
}
|
|
||||||
func completeMultipartUpload(svc *s3.S3, resp *s3.CreateMultipartUploadOutput, completedParts []*s3.CompletedPart) (*s3.CompleteMultipartUploadOutput, error) {
|
|
||||||
completeInput := &s3.CompleteMultipartUploadInput{
|
|
||||||
Bucket: resp.Bucket,
|
|
||||||
Key: resp.Key,
|
|
||||||
UploadId: resp.UploadId,
|
|
||||||
MultipartUpload: &s3.CompletedMultipartUpload{
|
|
||||||
Parts: completedParts,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return svc.CompleteMultipartUpload(completeInput)
|
|
||||||
}
|
|
||||||
func uploadPart(svc *s3.S3, resp *s3.CreateMultipartUploadOutput, fileBytes []byte, partNumber int) (*s3.CompletedPart, error) {
|
|
||||||
tryNum := 1
|
|
||||||
partInput := &s3.UploadPartInput{
|
|
||||||
Body: bytes.NewReader(fileBytes),
|
|
||||||
Bucket: resp.Bucket,
|
|
||||||
Key: resp.Key,
|
|
||||||
PartNumber: aws.Int64(int64(partNumber)),
|
|
||||||
UploadId: resp.UploadId,
|
|
||||||
ContentLength: aws.Int64(int64(len(fileBytes))),
|
|
||||||
}
|
|
||||||
|
|
||||||
for tryNum <= maxRetries {
|
|
||||||
uploadResult, err := svc.UploadPart(partInput)
|
|
||||||
if err != nil {
|
|
||||||
if tryNum == maxRetries {
|
|
||||||
if aerr, ok := err.(awserr.Error); ok {
|
|
||||||
return nil, aerr
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
tryNum++
|
|
||||||
} else {
|
|
||||||
return &s3.CompletedPart{
|
|
||||||
ETag: uploadResult.ETag,
|
|
||||||
PartNumber: aws.Int64(int64(partNumber)),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func abortMultipartUpload(svc *s3.S3, resp *s3.CreateMultipartUploadOutput) error {
|
|
||||||
abortInput := &s3.AbortMultipartUploadInput{
|
|
||||||
Bucket: resp.Bucket,
|
|
||||||
Key: resp.Key,
|
|
||||||
UploadId: resp.UploadId,
|
|
||||||
}
|
|
||||||
_, err := svc.AbortMultipartUpload(abortInput)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
endpoint := os.Getenv("SERVER_ENDPOINT")
|
|
||||||
accessKey := os.Getenv("ACCESS_KEY")
|
|
||||||
secretKey := os.Getenv("SECRET_KEY")
|
|
||||||
secure := os.Getenv("ENABLE_HTTPS")
|
|
||||||
sdkEndpoint := "http://" + endpoint
|
|
||||||
if secure == "1" {
|
|
||||||
sdkEndpoint = "https://" + endpoint
|
|
||||||
}
|
|
||||||
creds := credentials.NewStaticCredentials(accessKey, secretKey, "")
|
|
||||||
newSession := session.New()
|
|
||||||
s3Config := &aws.Config{
|
|
||||||
Credentials: creds,
|
|
||||||
Endpoint: aws.String(sdkEndpoint),
|
|
||||||
Region: aws.String("us-east-1"),
|
|
||||||
S3ForcePathStyle: aws.Bool(true),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create an S3 service object in the default region.
|
|
||||||
s3Client := s3.New(newSession, s3Config)
|
|
||||||
|
|
||||||
// Output to stdout instead of the default stderr
|
|
||||||
log.SetOutput(os.Stdout)
|
|
||||||
// create custom formatter
|
|
||||||
mintFormatter := mintJSONFormatter{}
|
|
||||||
// set custom formatter
|
|
||||||
log.SetFormatter(&mintFormatter)
|
|
||||||
// log Info or above -- success cases are Info level, failures are Fatal level
|
|
||||||
log.SetLevel(log.InfoLevel)
|
|
||||||
// execute tests
|
|
||||||
// Test Put and Delete Object
|
|
||||||
testPutDeletObject(s3Client)
|
|
||||||
//testCopyObject
|
|
||||||
testCopyObject(s3Client)
|
|
||||||
// Test Multipart Upload
|
|
||||||
testPutMultipart(s3Client)
|
|
||||||
}
|
|
|
@ -1,28 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# Mint (C) 2017 Minio, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# handle command line arguments
|
|
||||||
if [ $# -ne 2 ]; then
|
|
||||||
echo "usage: run.sh <OUTPUT-LOG-FILE> <ERROR-LOG-FILE>"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
output_log_file="$1"
|
|
||||||
error_log_file="$2"
|
|
||||||
|
|
||||||
# run tests
|
|
||||||
/mint/run/core/worm/worm 1>>"$output_log_file" 2>"$error_log_file"
|
|
Loading…
Reference in New Issue