From 4915433bd28585868e8259996d43fbcf6b4d7b46 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Fri, 12 Jun 2020 20:04:01 -0700 Subject: [PATCH] Support bucket versioning (#9377) - Implement a new xl.json 2.0.0 format to support, this moves the entire marshaling logic to POSIX layer, top layer always consumes a common FileInfo construct which simplifies the metadata reads. - Implement list object versions - Migrate to siphash from crchash for new deployments for object placements. Fixes #2111 --- Dockerfile.arm.release | 2 +- Dockerfile.arm64.release | 2 +- Dockerfile.dev.browser | 1 - cmd/admin-handlers.go | 22 +- cmd/admin-handlers_test.go | 63 +- cmd/admin-heal-ops.go | 66 +- cmd/admin-router.go | 6 +- cmd/admin-server-info.go | 2 +- cmd/api-datatypes.go | 15 +- cmd/api-errors.go | 17 +- cmd/api-headers.go | 22 + cmd/api-response.go | 56 +- cmd/api-router.go | 4 +- cmd/background-heal-ops.go | 24 +- cmd/background-newdisks-heal-ops.go | 6 +- cmd/benchmark-utils_test.go | 34 +- cmd/bitrot.go | 19 - cmd/bitrot_test.go | 2 +- cmd/bucket-encryption.go | 1 + cmd/bucket-handlers.go | 211 +- cmd/bucket-handlers_test.go | 51 +- cmd/bucket-lifecycle.go | 1 - cmd/bucket-listobjects-handlers.go | 27 +- cmd/bucket-metadata-sys.go | 13 + cmd/bucket-metadata.go | 16 + cmd/bucket-metadata_gen.go | 35 +- cmd/bucket-object-lock.go | 141 +- cmd/bucket-policy-handlers.go | 2 +- cmd/bucket-policy-handlers_test.go | 8 +- cmd/bucket-policy.go | 15 +- cmd/bucket-quota.go | 79 +- cmd/bucket-versioning-handler.go | 128 ++ cmd/bucket-versioning.go | 57 + cmd/config-common.go | 2 +- cmd/config-current.go | 12 +- cmd/config.go | 3 +- cmd/consolelogger.go | 2 +- cmd/copy-part-range.go | 4 +- cmd/data-crawler.go | 54 +- cmd/disk-cache-backend.go | 4 +- cmd/disk-cache-utils.go | 55 +- cmd/disk-cache.go | 49 +- cmd/encryption-v1.go | 53 +- cmd/endpoint.go | 14 +- cmd/endpoint_test.go | 26 +- cmd/{xl-v1-bucket.go => erasure-bucket.go} | 42 +- cmd/erasure-coding.go | 143 ++ cmd/{xl-v1-common.go => erasure-common.go} | 29 +- ...-common_test.go => erasure-common_test.go} | 10 +- cmd/erasure-decode.go | 4 +- cmd/erasure-decode_test.go | 10 +- cmd/erasure-encode.go | 2 +- cmd/erasure-encode_test.go | 2 +- cmd/{xl-v1-errors.go => erasure-errors.go} | 8 +- cmd/erasure-heal_test.go | 4 +- ...ng-common.go => erasure-healing-common.go} | 90 +- ...test.go => erasure-healing-common_test.go} | 101 +- cmd/{xl-v1-healing.go => erasure-healing.go} | 205 +- ...ealing_test.go => erasure-healing_test.go} | 256 ++- cmd/erasure-list-objects.go | 58 + ...asure-heal.go => erasure-lowlevel-heal.go} | 2 +- ...-v1-utils.go => erasure-metadata-utils.go} | 50 +- cmd/erasure-metadata-utils_test.go | 201 ++ cmd/erasure-metadata.go | 326 +++ cmd/erasure-metadata_test.go | 153 ++ ...l-v1-multipart.go => erasure-multipart.go} | 431 ++-- cmd/{xl-v1-object.go => erasure-object.go} | 746 ++++--- ...-object_test.go => erasure-object_test.go} | 358 ++-- cmd/{xl-sets.go => erasure-sets.go} | 739 +++---- cmd/erasure-sets_test.go | 245 +++ cmd/erasure-utils.go | 2 +- cmd/{xl-zones.go => erasure-zones.go} | 497 +++-- cmd/erasure.go | 455 ++++- cmd/erasure_test.go | 4 +- cmd/format-disk-cache.go | 2 +- cmd/{format-xl.go => format-erasure.go} | 359 ++-- ...rmat-xl_test.go => format-erasure_test.go} | 202 +- cmd/format-fs.go | 2 +- cmd/fs-v1-helpers.go | 40 +- cmd/fs-v1-helpers_test.go | 44 +- cmd/fs-v1-metadata_test.go | 6 +- cmd/fs-v1-multipart.go | 16 + cmd/fs-v1-multipart_test.go | 14 +- cmd/fs-v1-rwpool_test.go | 6 +- cmd/fs-v1.go | 138 +- cmd/fs-v1_test.go | 34 +- cmd/gateway-common.go | 36 - cmd/gateway-unsupported.go | 41 +- cmd/gateway/azure/gateway-azure.go | 25 +- cmd/gateway/azure/gateway-azure_test.go | 37 - cmd/gateway/gcs/gateway-gcs.go | 26 +- cmd/gateway/hdfs/gateway-hdfs.go | 26 +- cmd/gateway/s3/gateway-s3-sse.go | 28 +- cmd/gateway/s3/gateway-s3.go | 34 +- cmd/generic-handlers.go | 97 +- cmd/generic-handlers_test.go | 16 +- cmd/global-heal.go | 38 +- cmd/globals.go | 11 +- cmd/handler-utils.go | 2 +- cmd/http-tracer.go | 2 +- cmd/http/headers.go | 4 + cmd/iam.go | 2 +- cmd/lock-rest-server.go | 4 +- cmd/merge-walk-pool.go | 106 +- cmd/metrics.go | 2 +- cmd/namespace-lock.go | 14 +- cmd/naughty-disk_test.go | 72 +- cmd/notification.go | 4 +- cmd/obdinfo.go | 11 +- cmd/obdinfo_freebsd.go | 2 +- cmd/obdinfo_other.go | 2 +- cmd/object-api-common.go | 10 +- cmd/object-api-datatypes.go | 58 + cmd/object-api-deleteobject_test.go | 14 +- cmd/object-api-errors.go | 64 +- cmd/object-api-getobject_test.go | 114 +- cmd/object-api-getobjectinfo_test.go | 4 +- cmd/object-api-interface.go | 32 +- cmd/object-api-listobjects_test.go | 6 +- cmd/object-api-multipart_test.go | 65 +- cmd/object-api-putobject_test.go | 112 +- cmd/object-handlers-common.go | 78 +- cmd/object-handlers.go | 231 +-- cmd/object-handlers_test.go | 31 +- cmd/object_api_suite_test.go | 68 +- cmd/os-readdir_other.go | 33 +- cmd/os-readdir_test.go | 2 +- cmd/os-readdir_unix.go | 32 +- cmd/os-readdir_windows.go | 75 +- cmd/os-reliable_test.go | 12 +- cmd/peer-rest-server.go | 6 +- cmd/posix-diskid-check.go | 224 --- cmd/post-policy_test.go | 12 +- cmd/prepare-storage.go | 48 +- cmd/routers.go | 8 +- cmd/server-main.go | 37 +- cmd/server-main_test.go | 4 +- cmd/server_test.go | 22 +- cmd/setup-type.go | 16 +- cmd/storage-datatypes.go | 96 +- cmd/storage-errors.go | 41 +- cmd/storage-interface.go | 21 +- cmd/storage-rest-client.go | 225 ++- cmd/storage-rest-common.go | 53 +- cmd/storage-rest-server.go | 323 +-- cmd/storage-rest_test.go | 18 +- cmd/test-utils_test.go | 120 +- cmd/tree-walk_test.go | 94 +- cmd/update_test.go | 8 +- cmd/utils.go | 8 +- cmd/utils_test.go | 14 +- cmd/web-handlers.go | 79 +- cmd/web-handlers_test.go | 82 +- cmd/xl-sets_test.go | 153 -- cmd/xl-storage-disk-id-check.go | 266 +++ cmd/{posix-errors.go => xl-storage-errors.go} | 2 +- ...rors_test.go => xl-storage-errors_test.go} | 2 +- cmd/xl-storage-format-utils.go | 81 + cmd/xl-storage-format-v1.go | 208 ++ cmd/xl-storage-format-v1_gen.go | 1568 +++++++++++++++ cmd/xl-storage-format-v1_gen_test.go | 688 +++++++ cmd/xl-storage-format-v2.go | 601 ++++++ cmd/xl-storage-format-v2_gen.go | 1780 +++++++++++++++++ cmd/xl-storage-format-v2_gen_test.go | 462 +++++ ...tils_test.go => xl-storage-format_test.go} | 245 +-- cmd/{posix.go => xl-storage.go} | 979 +++++++-- cmd/{posix_test.go => xl-storage_test.go} | 643 +++--- ...x_unix_test.go => xl-storage_unix_test.go} | 29 +- ...ows_test.go => xl-storage_windows_test.go} | 8 +- cmd/xl-v1-list-objects-heal.go | 42 - cmd/xl-v1-list-objects.go | 27 - cmd/xl-v1-metadata.go | 459 ----- cmd/xl-v1-metadata_test.go | 249 --- cmd/xl-v1-multipart_test.go | 75 - cmd/xl-v1.go | 391 ---- docs/bucket/versioning/DESIGN.md | 100 + docs/bucket/versioning/README.md | 37 + .../versioning_DELETE_versionEnabled.png | Bin 0 -> 47451 bytes .../versioning_DELETE_versionEnabled_id.png | Bin 0 -> 39988 bytes .../versioning_GET_versionEnabled.png | Bin 0 -> 36983 bytes .../versioning_GET_versionEnabled_id.png | Bin 0 -> 62819 bytes .../versioning_PUT_versionEnabled.png | Bin 0 -> 26946 bytes docs/bucket/versioning/xl-meta-to-json.go | 36 + docs/minio-limits.md | 1 - docs/zh_CN/backend/README.md | 8 - docs/zh_CN/backend/fs/README.md | 24 - docs/zh_CN/backend/xl/README.md | 54 - go.mod | 2 +- go.sum | 2 + mint/preinstall.sh | 2 +- mint/run/core/awscli/test.sh | 314 +-- pkg/bucket/lifecycle/expiration.go | 50 +- pkg/bucket/lifecycle/lifecycle.go | 97 +- pkg/bucket/lifecycle/lifecycle_test.go | 24 +- pkg/bucket/lifecycle/noncurrentversion.go | 36 +- pkg/bucket/lifecycle/rule_test.go | 10 +- pkg/bucket/policy/action.go | 77 +- pkg/bucket/policy/condition/key.go | 4 + pkg/bucket/versioning/error.go | 44 + pkg/bucket/versioning/versioning.go | 79 + pkg/event/name.go | 5 + pkg/iam/policy/action.go | 80 +- pkg/madmin/info-commands.go | 4 +- 203 files changed, 13833 insertions(+), 6919 deletions(-) create mode 100644 cmd/bucket-versioning-handler.go create mode 100644 cmd/bucket-versioning.go rename cmd/{xl-v1-bucket.go => erasure-bucket.go} (84%) create mode 100644 cmd/erasure-coding.go rename cmd/{xl-v1-common.go => erasure-common.go} (72%) rename cmd/{xl-v1-common_test.go => erasure-common_test.go} (91%) rename cmd/{xl-v1-errors.go => erasure-errors.go} (73%) rename cmd/{xl-v1-healing-common.go => erasure-healing-common.go} (66%) rename cmd/{xl-v1-healing-common_test.go => erasure-healing-common_test.go} (77%) rename cmd/{xl-v1-healing.go => erasure-healing.go} (78%) rename cmd/{xl-v1-healing_test.go => erasure-healing_test.go} (53%) create mode 100644 cmd/erasure-list-objects.go rename cmd/{erasure-heal.go => erasure-lowlevel-heal.go} (96%) rename cmd/{xl-v1-utils.go => erasure-metadata-utils.go} (81%) create mode 100644 cmd/erasure-metadata-utils_test.go create mode 100644 cmd/erasure-metadata.go create mode 100644 cmd/erasure-metadata_test.go rename cmd/{xl-v1-multipart.go => erasure-multipart.go} (53%) rename cmd/{xl-v1-object.go => erasure-object.go} (51%) rename cmd/{xl-v1-object_test.go => erasure-object_test.go} (55%) rename cmd/{xl-sets.go => erasure-sets.go} (66%) create mode 100644 cmd/erasure-sets_test.go rename cmd/{xl-zones.go => erasure-zones.go} (69%) rename cmd/{format-xl.go => format-erasure.go} (66%) rename cmd/{format-xl_test.go => format-erasure_test.go} (61%) delete mode 100644 cmd/posix-diskid-check.go delete mode 100644 cmd/xl-sets_test.go create mode 100644 cmd/xl-storage-disk-id-check.go rename cmd/{posix-errors.go => xl-storage-errors.go} (98%) rename cmd/{posix-errors_test.go => xl-storage-errors_test.go} (96%) create mode 100644 cmd/xl-storage-format-utils.go create mode 100644 cmd/xl-storage-format-v1.go create mode 100644 cmd/xl-storage-format-v1_gen.go create mode 100644 cmd/xl-storage-format-v1_gen_test.go create mode 100644 cmd/xl-storage-format-v2.go create mode 100644 cmd/xl-storage-format-v2_gen.go create mode 100644 cmd/xl-storage-format-v2_gen_test.go rename cmd/{xl-v1-utils_test.go => xl-storage-format_test.go} (59%) rename cmd/{posix.go => xl-storage.go} (64%) rename cmd/{posix_test.go => xl-storage_test.go} (69%) rename cmd/{posix_unix_test.go => xl-storage_unix_test.go} (79%) rename cmd/{posix_windows_test.go => xl-storage_windows_test.go} (92%) delete mode 100644 cmd/xl-v1-list-objects-heal.go delete mode 100644 cmd/xl-v1-list-objects.go delete mode 100644 cmd/xl-v1-metadata.go delete mode 100644 cmd/xl-v1-metadata_test.go delete mode 100644 cmd/xl-v1-multipart_test.go delete mode 100644 cmd/xl-v1.go create mode 100644 docs/bucket/versioning/DESIGN.md create mode 100644 docs/bucket/versioning/README.md create mode 100644 docs/bucket/versioning/versioning_DELETE_versionEnabled.png create mode 100644 docs/bucket/versioning/versioning_DELETE_versionEnabled_id.png create mode 100644 docs/bucket/versioning/versioning_GET_versionEnabled.png create mode 100644 docs/bucket/versioning/versioning_GET_versionEnabled_id.png create mode 100644 docs/bucket/versioning/versioning_PUT_versionEnabled.png create mode 100644 docs/bucket/versioning/xl-meta-to-json.go delete mode 100644 docs/zh_CN/backend/README.md delete mode 100644 docs/zh_CN/backend/fs/README.md delete mode 100644 docs/zh_CN/backend/xl/README.md create mode 100644 pkg/bucket/versioning/error.go create mode 100644 pkg/bucket/versioning/versioning.go diff --git a/Dockerfile.arm.release b/Dockerfile.arm.release index 3e1f64192..f5e8368b8 100644 --- a/Dockerfile.arm.release +++ b/Dockerfile.arm.release @@ -9,7 +9,7 @@ ENV GO111MODULE on RUN \ apk add --no-cache git 'curl>7.61.0' && \ git clone https://github.com/minio/minio && \ - curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static . + curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static . FROM arm32v7/alpine:3.10 diff --git a/Dockerfile.arm64.release b/Dockerfile.arm64.release index 711886821..c456ac9ac 100644 --- a/Dockerfile.arm64.release +++ b/Dockerfile.arm64.release @@ -9,7 +9,7 @@ ENV GO111MODULE on RUN \ apk add --no-cache git 'curl>7.61.0' && \ git clone https://github.com/minio/minio && \ - curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static . + curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static . FROM arm64v8/alpine:3.10 diff --git a/Dockerfile.dev.browser b/Dockerfile.dev.browser index da38e5986..fbdbfed31 100644 --- a/Dockerfile.dev.browser +++ b/Dockerfile.dev.browser @@ -10,4 +10,3 @@ ENV PATH=$PATH:/root/go/bin RUN go get github.com/go-bindata/go-bindata/go-bindata && \ go get github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs - diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index 1aeaa5391..03964b27b 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -631,7 +631,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) { } // Check if this setup has an erasure coded backend. - if !globalIsXL { + if !globalIsErasure { writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrHealNotImplemented), r.URL) return } @@ -779,7 +779,7 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r * } // Check if this setup has an erasure coded backend. - if !globalIsXL { + if !globalIsErasure { writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrHealNotImplemented), r.URL) return } @@ -789,7 +789,7 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r * // Get local heal status first bgHealStates = append(bgHealStates, getLocalBackgroundHealStatus()) - if globalIsDistXL { + if globalIsDistErasure { // Get heal status from other peers peersHealStates := globalNotificationSys.BackgroundHealStatus() bgHealStates = append(bgHealStates, peersHealStates...) @@ -862,11 +862,11 @@ const ( AdminUpdateApplyFailure = "XMinioAdminUpdateApplyFailure" ) -// toAdminAPIErrCode - converts errXLWriteQuorum error to admin API +// toAdminAPIErrCode - converts errErasureWriteQuorum error to admin API // specific error. func toAdminAPIErrCode(ctx context.Context, err error) APIErrorCode { switch err { - case errXLWriteQuorum: + case errErasureWriteQuorum: return ErrAdminConfigNoQuorum default: return toAPIErrorCode(ctx, err) @@ -1277,7 +1277,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request) partialWrite(obdInfo) } - if net, ok := vars["perfnet"]; ok && net == "true" && globalIsDistXL { + if net, ok := vars["perfnet"]; ok && net == "true" && globalIsDistErasure { obdInfo.Perf.Net = append(obdInfo.Perf.Net, globalNotificationSys.NetOBDInfo(deadlinedCtx)) partialWrite(obdInfo) @@ -1384,7 +1384,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque OffDisks += v } - backend = madmin.XLBackend{ + backend = madmin.ErasureBackend{ Type: madmin.ErasureType, OnlineDisks: OnDisks, OfflineDisks: OffDisks, @@ -1413,10 +1413,10 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque for _, sp := range servers { for i, di := range sp.Disks { path := "" - if globalIsXL { + if globalIsErasure { path = di.DrivePath } - if globalIsDistXL { + if globalIsDistErasure { path = sp.Endpoint + di.DrivePath } // For distributed @@ -1424,13 +1424,13 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque for b := range storageInfo.Backend.Sets[a] { ep := storageInfo.Backend.Sets[a][b].Endpoint - if globalIsDistXL { + if globalIsDistErasure { if strings.Replace(ep, "http://", "", -1) == path || strings.Replace(ep, "https://", "", -1) == path { sp.Disks[i].State = storageInfo.Backend.Sets[a][b].State sp.Disks[i].UUID = storageInfo.Backend.Sets[a][b].UUID } } - if globalIsXL { + if globalIsErasure { if ep == path { sp.Disks[i].State = storageInfo.Backend.Sets[a][b].State sp.Disks[i].UUID = storageInfo.Backend.Sets[a][b].UUID diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index 5955a50e1..c00a0bc9a 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -33,27 +33,27 @@ import ( "github.com/minio/minio/pkg/madmin" ) -// adminXLTestBed - encapsulates subsystems that need to be setup for +// adminErasureTestBed - encapsulates subsystems that need to be setup for // admin-handler unit tests. -type adminXLTestBed struct { - xlDirs []string - objLayer ObjectLayer - router *mux.Router +type adminErasureTestBed struct { + erasureDirs []string + objLayer ObjectLayer + router *mux.Router } -// prepareAdminXLTestBed - helper function that setups a single-node -// XL backend for admin-handler tests. -func prepareAdminXLTestBed(ctx context.Context) (*adminXLTestBed, error) { +// prepareAdminErasureTestBed - helper function that setups a single-node +// Erasure backend for admin-handler tests. +func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, error) { // reset global variables to start afresh. resetTestGlobals() - // Set globalIsXL to indicate that the setup uses an erasure + // Set globalIsErasure to indicate that the setup uses an erasure // code backend. - globalIsXL = true + globalIsErasure = true // Initializing objectLayer for HealFormatHandler. - objLayer, xlDirs, xlErr := initTestXLObjLayer(ctx) + objLayer, erasureDirs, xlErr := initTestErasureObjLayer(ctx) if xlErr != nil { return nil, xlErr } @@ -66,7 +66,7 @@ func prepareAdminXLTestBed(ctx context.Context) (*adminXLTestBed, error) { // Initialize boot time globalBootTime = UTCNow() - globalEndpoints = mustGetZoneEndpoints(xlDirs...) + globalEndpoints = mustGetZoneEndpoints(erasureDirs...) newAllSubsystems() @@ -76,36 +76,37 @@ func prepareAdminXLTestBed(ctx context.Context) (*adminXLTestBed, error) { adminRouter := mux.NewRouter() registerAdminRouter(adminRouter, true, true) - return &adminXLTestBed{ - xlDirs: xlDirs, - objLayer: objLayer, - router: adminRouter, + return &adminErasureTestBed{ + erasureDirs: erasureDirs, + objLayer: objLayer, + router: adminRouter, }, nil } // TearDown - method that resets the test bed for subsequent unit // tests to start afresh. -func (atb *adminXLTestBed) TearDown() { - removeRoots(atb.xlDirs) +func (atb *adminErasureTestBed) TearDown() { + removeRoots(atb.erasureDirs) resetTestGlobals() } -// initTestObjLayer - Helper function to initialize an XL-based object +// initTestObjLayer - Helper function to initialize an Erasure-based object // layer and set globalObjectAPI. -func initTestXLObjLayer(ctx context.Context) (ObjectLayer, []string, error) { - xlDirs, err := getRandomDisks(16) +func initTestErasureObjLayer(ctx context.Context) (ObjectLayer, []string, error) { + erasureDirs, err := getRandomDisks(16) if err != nil { return nil, nil, err } - endpoints := mustGetNewEndpoints(xlDirs...) - storageDisks, format, err := waitForFormatXL(true, endpoints, 1, 1, 16, "") + endpoints := mustGetNewEndpoints(erasureDirs...) + storageDisks, format, err := waitForFormatErasure(true, endpoints, 1, 1, 16, "") if err != nil { - removeRoots(xlDirs) + removeRoots(erasureDirs) return nil, nil, err } globalPolicySys = NewPolicySys() - objLayer, err := newXLSets(ctx, endpoints, storageDisks, format) + objLayer := &erasureZones{zones: make([]*erasureSets, 1)} + objLayer.zones[0], err = newErasureSets(ctx, endpoints, storageDisks, format) if err != nil { return nil, nil, err } @@ -114,7 +115,7 @@ func initTestXLObjLayer(ctx context.Context) (ObjectLayer, []string, error) { globalObjLayerMutex.Lock() globalObjectAPI = objLayer globalObjLayerMutex.Unlock() - return objLayer, xlDirs, nil + return objLayer, erasureDirs, nil } // cmdType - Represents different service subcomands like status, stop @@ -183,9 +184,9 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - adminTestBed, err := prepareAdminXLTestBed(ctx) + adminTestBed, err := prepareAdminErasureTestBed(ctx) if err != nil { - t.Fatal("Failed to initialize a single node XL backend for admin handler tests.") + t.Fatal("Failed to initialize a single node Erasure backend for admin handler tests.") } defer adminTestBed.TearDown() @@ -254,9 +255,9 @@ func TestAdminServerInfo(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - adminTestBed, err := prepareAdminXLTestBed(ctx) + adminTestBed, err := prepareAdminErasureTestBed(ctx) if err != nil { - t.Fatal("Failed to initialize a single node XL backend for admin handler tests.") + t.Fatal("Failed to initialize a single node Erasure backend for admin handler tests.") } defer adminTestBed.TearDown() @@ -298,7 +299,7 @@ func TestToAdminAPIErrCode(t *testing.T) { }{ // 1. Server not in quorum. { - err: errXLWriteQuorum, + err: errErasureWriteQuorum, expectedAPIErr: ErrAdminConfigNoQuorum, }, // 2. No error. diff --git a/cmd/admin-heal-ops.go b/cmd/admin-heal-ops.go index c3fd34bef..71082199e 100644 --- a/cmd/admin-heal-ops.go +++ b/cmd/admin-heal-ops.go @@ -21,7 +21,6 @@ import ( "encoding/json" "fmt" "net/http" - "strings" "sync" "time" @@ -193,7 +192,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) ( respBytes []byte, apiErr APIError, errMsg string) { existsAndLive := false - he, exists := ahs.getHealSequence(h.path) + he, exists := ahs.getHealSequence(pathJoin(h.bucket, h.object)) if exists { existsAndLive = !he.hasEnded() } @@ -220,8 +219,9 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) ( // Check if new heal sequence to be started overlaps with any // existing, running sequence + hpath := pathJoin(h.bucket, h.object) for k, hSeq := range ahs.healSeqMap { - if !hSeq.hasEnded() && (HasPrefix(k, h.path) || HasPrefix(h.path, k)) { + if !hSeq.hasEnded() && (HasPrefix(k, hpath) || HasPrefix(hpath, k)) { errMsg = "The provided heal sequence path overlaps with an existing " + fmt.Sprintf("heal path: %s", k) @@ -230,7 +230,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) ( } // Add heal state and start sequence - ahs.healSeqMap[h.path] = h + ahs.healSeqMap[hpath] = h // Launch top-level background heal go-routine go h.healSequenceStart() @@ -251,11 +251,11 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) ( // status results from global state and returns its JSON // representation. The clientToken helps ensure there aren't // conflicting clients fetching status. -func (ahs *allHealState) PopHealStatusJSON(path string, +func (ahs *allHealState) PopHealStatusJSON(hpath string, clientToken string) ([]byte, APIErrorCode) { // fetch heal state for given path - h, exists := ahs.getHealSequence(path) + h, exists := ahs.getHealSequence(hpath) if !exists { // If there is no such heal sequence, return error. return nil, ErrHealNoSuchProcess @@ -296,18 +296,17 @@ func (ahs *allHealState) PopHealStatusJSON(path string, // healSource denotes single entity and heal option. type healSource struct { - path string // entity path (format, buckets, objects) to heal - opts *madmin.HealOpts // optional heal option overrides default setting + bucket string + object string + versionID string + opts *madmin.HealOpts // optional heal option overrides default setting } // healSequence - state for each heal sequence initiated on the // server. type healSequence struct { - // bucket, and prefix on which heal seq. was initiated - bucket, objPrefix string - - // path is just pathJoin(bucket, objPrefix) - path string + // bucket, and object on which heal seq. was initiated + bucket, object string // A channel of entities (format, buckets, objects) to heal sourceCh chan healSource @@ -377,8 +376,7 @@ func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string, return &healSequence{ respCh: make(chan healResult), bucket: bucket, - objPrefix: objPrefix, - path: pathJoin(bucket, objPrefix), + object: objPrefix, reportProgress: true, startTime: UTCNow(), clientToken: mustGetUUID(), @@ -618,7 +616,9 @@ func (h *healSequence) healSequenceStart() { func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error { // Send heal request task := healTask{ - path: source.path, + bucket: source.bucket, + object: source.object, + versionID: source.versionID, opts: h.settings, responseCh: h.respCh, } @@ -690,11 +690,11 @@ func (h *healSequence) healItemsFromSourceCh() error { } var itemType madmin.HealItemType switch { - case source.path == nopHeal: + case source.bucket == nopHeal: continue - case source.path == SlashSeparator: + case source.bucket == SlashSeparator: itemType = madmin.HealItemMetadata - case !strings.Contains(source.path, SlashSeparator): + case source.bucket != "" && source.object == "": itemType = madmin.HealItemBucket default: itemType = madmin.HealItemObject @@ -762,12 +762,16 @@ func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error { // NOTE: Healing on meta is run regardless // of any bucket being selected, this is to ensure that // meta are always upto date and correct. - return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket string, object string) error { + return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket, object, versionID string) error { if h.isQuitting() { return errHealStopSignalled } - herr := h.queueHealTask(healSource{path: pathJoin(bucket, object)}, madmin.HealItemBucketMetadata) + herr := h.queueHealTask(healSource{ + bucket: bucket, + object: object, + versionID: versionID, + }, madmin.HealItemBucketMetadata) // Object might have been deleted, by the time heal // was attempted we ignore this object an move on. if isErrObjectNotFound(herr) { @@ -791,7 +795,7 @@ func (h *healSequence) healDiskFormat() error { return errServerNotInitialized } - return h.queueHealTask(healSource{path: SlashSeparator}, madmin.HealItemMetadata) + return h.queueHealTask(healSource{bucket: SlashSeparator}, madmin.HealItemMetadata) } // healBuckets - check for all buckets heal or just particular bucket. @@ -833,7 +837,7 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error { return errServerNotInitialized } - if err := h.queueHealTask(healSource{path: bucket}, madmin.HealItemBucket); err != nil { + if err := h.queueHealTask(healSource{bucket: bucket}, madmin.HealItemBucket); err != nil { return err } @@ -842,12 +846,12 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error { } if !h.settings.Recursive { - if h.objPrefix != "" { + if h.object != "" { // Check if an object named as the objPrefix exists, // and if so heal it. - _, err := objectAPI.GetObjectInfo(h.ctx, bucket, h.objPrefix, ObjectOptions{}) + _, err := objectAPI.GetObjectInfo(h.ctx, bucket, h.object, ObjectOptions{}) if err == nil { - if err = h.healObject(bucket, h.objPrefix); err != nil { + if err = h.healObject(bucket, h.object, ""); err != nil { return err } } @@ -856,14 +860,14 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error { return nil } - if err := objectAPI.HealObjects(h.ctx, bucket, h.objPrefix, h.settings, h.healObject); err != nil { + if err := objectAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil { return errFnHealFromAPIErr(h.ctx, err) } return nil } // healObject - heal the given object and record result -func (h *healSequence) healObject(bucket, object string) error { +func (h *healSequence) healObject(bucket, object, versionID string) error { // Get current object layer instance. objectAPI := newObjectLayerWithoutSafeModeFn() if objectAPI == nil { @@ -874,5 +878,9 @@ func (h *healSequence) healObject(bucket, object string) error { return errHealStopSignalled } - return h.queueHealTask(healSource{path: pathJoin(bucket, object)}, madmin.HealItemObject) + return h.queueHealTask(healSource{ + bucket: bucket, + object: object, + versionID: versionID, + }, madmin.HealItemObject) } diff --git a/cmd/admin-router.go b/cmd/admin-router.go index f3c877339..0fe823f73 100644 --- a/cmd/admin-router.go +++ b/cmd/admin-router.go @@ -64,7 +64,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) // DataUsageInfo operations adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(httpTraceAll(adminAPI.DataUsageInfoHandler)) - if globalIsDistXL || globalIsXL { + if globalIsDistErasure || globalIsErasure { /// Heal operations // Heal processing endpoint. @@ -172,7 +172,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) } // Quota operations - if globalIsXL || globalIsDistXL { + if globalIsDistErasure || globalIsErasure { if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn { // GetBucketQuotaConfig adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc( @@ -185,7 +185,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) // -- Top APIs -- // Top locks - if globalIsDistXL { + if globalIsDistErasure { adminRouter.Methods(http.MethodGet).Path(adminVersion + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler)) } diff --git a/cmd/admin-server-info.go b/cmd/admin-server-info.go index d6f562658..0faaadc6c 100644 --- a/cmd/admin-server-info.go +++ b/cmd/admin-server-info.go @@ -29,7 +29,7 @@ import ( func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin.ServerProperties { var disks []madmin.Disk addr := r.Host - if globalIsDistXL { + if globalIsDistErasure { addr = GetLocalPeer(endpointZones) } network := make(map[string]string) diff --git a/cmd/api-datatypes.go b/cmd/api-datatypes.go index 59f2fd41e..eb74e20f7 100644 --- a/cmd/api-datatypes.go +++ b/cmd/api-datatypes.go @@ -20,9 +20,18 @@ import ( "encoding/xml" ) -// ObjectIdentifier carries key name for the object to delete. -type ObjectIdentifier struct { +// DeletedObject objects deleted +type DeletedObject struct { + DeleteMarker bool `xml:"DeleteMarker"` + DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"` + ObjectName string `xml:"Key,omitempty"` + VersionID string `xml:"VersionId,omitempty"` +} + +// ObjectToDelete carries key name for the object to delete. +type ObjectToDelete struct { ObjectName string `xml:"Key"` + VersionID string `xml:"VersionId"` } // createBucketConfiguration container for bucket configuration request from client. @@ -37,5 +46,5 @@ type DeleteObjectsRequest struct { // Element to enable quiet mode for the request Quiet bool // List of objects to be deleted - Objects []ObjectIdentifier `xml:"Object"` + Objects []ObjectToDelete `xml:"Object"` } diff --git a/cmd/api-errors.go b/cmd/api-errors.go index 3d334d9c5..961735be4 100644 --- a/cmd/api-errors.go +++ b/cmd/api-errors.go @@ -36,6 +36,7 @@ import ( "github.com/minio/minio/pkg/bucket/lifecycle" objectlock "github.com/minio/minio/pkg/bucket/object/lock" "github.com/minio/minio/pkg/bucket/policy" + "github.com/minio/minio/pkg/bucket/versioning" "github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/hash" ) @@ -538,9 +539,9 @@ var errorCodes = errorCodeMap{ HTTPStatusCode: http.StatusNotFound, }, ErrNoSuchVersion: { - Code: "NoSuchVersion", - Description: "Indicates that the version ID specified in the request does not match an existing version.", - HTTPStatusCode: http.StatusNotFound, + Code: "InvalidArgument", + Description: "Invalid version id specified", + HTTPStatusCode: http.StatusBadRequest, }, ErrNotImplemented: { Code: "NotImplemented", @@ -1782,6 +1783,10 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) { apiErr = ErrBucketAlreadyOwnedByYou case ObjectNotFound: apiErr = ErrNoSuchKey + case MethodNotAllowed: + apiErr = ErrMethodNotAllowed + case VersionNotFound: + apiErr = ErrNoSuchVersion case ObjectAlreadyExists: apiErr = ErrMethodNotAllowed case ObjectNameInvalid: @@ -1918,6 +1923,12 @@ func toAPIError(ctx context.Context, err error) APIError { e.Error()), HTTPStatusCode: http.StatusBadRequest, } + case versioning.Error: + apiErr = APIError{ + Code: "IllegalVersioningConfigurationException", + Description: fmt.Sprintf("Versioning configuration specified in the request is invalid. (%s)", e.Error()), + HTTPStatusCode: http.StatusBadRequest, + } case lifecycle.Error: apiErr = APIError{ Code: "InvalidRequest", diff --git a/cmd/api-headers.go b/cmd/api-headers.go index a9b8031d0..39274d344 100644 --- a/cmd/api-headers.go +++ b/cmd/api-headers.go @@ -29,6 +29,7 @@ import ( "github.com/minio/minio/cmd/crypto" xhttp "github.com/minio/minio/cmd/http" + "github.com/minio/minio/pkg/bucket/lifecycle" ) // Returns a hexadecimal representation of time at the @@ -152,5 +153,26 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp w.Header().Set(xhttp.ContentRange, contentRange) } + // Set the relevant version ID as part of the response header. + if objInfo.VersionID != "" { + w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID} + } + + if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil { + ruleID, expiryTime := lc.PredictExpiryTime(lifecycle.ObjectOpts{ + Name: objInfo.Name, + UserTags: objInfo.UserTags, + VersionID: objInfo.VersionID, + ModTime: objInfo.ModTime, + IsLatest: objInfo.IsLatest, + DeleteMarker: objInfo.DeleteMarker, + }) + if !expiryTime.IsZero() { + w.Header()[xhttp.AmzExpiration] = []string{ + fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, expiryTime.Format(http.TimeFormat), ruleID), + } + } + } + return nil } diff --git a/cmd/api-response.go b/cmd/api-response.go index b5bbc1170..846d5be7d 100644 --- a/cmd/api-response.go +++ b/cmd/api-response.go @@ -81,6 +81,7 @@ type ListVersionsResponse struct { CommonPrefixes []CommonPrefix Versions []ObjectVersion + DeleteMarkers []DeletedVersion // Encoding type used to encode object keys in the response. EncodingType string `xml:"EncodingType,omitempty"` @@ -237,8 +238,22 @@ type Bucket struct { type ObjectVersion struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Version" json:"-"` Object - VersionID string `xml:"VersionId"` IsLatest bool + VersionID string `xml:"VersionId"` +} + +// DeletedVersion container for the delete object version metadata. +type DeletedVersion struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteMarker" json:"-"` + + IsLatest bool + Key string + LastModified string // time string of format "2006-01-02T15:04:05.000Z" + + // Owner of the object. + Owner Owner + + VersionID string `xml:"VersionId"` } // StringMap is a map[string]string. @@ -333,9 +348,10 @@ type CompleteMultipartUploadResponse struct { // DeleteError structure. type DeleteError struct { - Code string - Message string - Key string + Code string + Message string + Key string + VersionID string `xml:"VersionId"` } // DeleteObjectsResponse container for multiple object deletes. @@ -343,7 +359,7 @@ type DeleteObjectsResponse struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"` // Collection of all deleted objects - DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"` + DeletedObjects []DeletedObject `xml:"Deleted,omitempty"` // Collection of errors deleting certain objects. Errors []DeleteError `xml:"Error,omitempty"` @@ -413,8 +429,9 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse { } // generates an ListBucketVersions response for the said bucket with other enumerated options. -func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListVersionsResponse { +func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse { var versions []ObjectVersion + var deletedVersions []DeletedVersion var prefixes []CommonPrefix var owner = Owner{} var data = ListVersionsResponse{} @@ -436,15 +453,29 @@ func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingTyp } else { content.StorageClass = globalMinioDefaultStorageClass } - content.Owner = owner - content.VersionID = "null" - content.IsLatest = true + content.VersionID = object.VersionID + if content.VersionID == "" { + content.VersionID = nullVersionID + } + content.IsLatest = object.IsLatest versions = append(versions, content) } + for _, deleted := range resp.DeleteObjects { + var dv = DeletedVersion{ + Key: s3EncodeName(deleted.Name, encodingType), + Owner: owner, + LastModified: deleted.ModTime.UTC().Format(iso8601TimeFormat), + VersionID: deleted.VersionID, + IsLatest: deleted.IsLatest, + } + deletedVersions = append(deletedVersions, dv) + } + data.Name = bucket data.Versions = versions + data.DeleteMarkers = deletedVersions data.EncodingType = encodingType data.Prefix = s3EncodeName(prefix, encodingType) data.KeyMarker = s3EncodeName(marker, encodingType) @@ -452,6 +483,8 @@ func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingTyp data.MaxKeys = maxKeys data.NextKeyMarker = s3EncodeName(resp.NextMarker, encodingType) + data.NextVersionIDMarker = resp.NextVersionIDMarker + data.VersionIDMarker = versionIDMarker data.IsTruncated = resp.IsTruncated for _, prefix := range resp.Prefixes { @@ -666,11 +699,14 @@ func generateListMultipartUploadsResponse(bucket string, multipartsInfo ListMult } // generate multi objects delete response. -func generateMultiDeleteResponse(quiet bool, deletedObjects []ObjectIdentifier, errs []DeleteError) DeleteObjectsResponse { +func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, errs []DeleteError) DeleteObjectsResponse { deleteResp := DeleteObjectsResponse{} if !quiet { deleteResp.DeletedObjects = deletedObjects } + if len(errs) == len(deletedObjects) { + deleteResp.DeletedObjects = nil + } deleteResp.Errors = errs return deleteResp } diff --git a/cmd/api-router.go b/cmd/api-router.go index 4011c9aa5..cfd26e7f7 100644 --- a/cmd/api-router.go +++ b/cmd/api-router.go @@ -224,9 +224,9 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool) // ListObjectsV2 bucket.Methods(http.MethodGet).HandlerFunc( maxClients(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2") - // ListBucketVersions + // ListObjectVersions bucket.Methods(http.MethodGet).HandlerFunc( - maxClients(collectAPIStats("listbucketversions", httpTraceAll(api.ListBucketObjectVersionsHandler)))).Queries("versions", "") + maxClients(collectAPIStats("listobjectversions", httpTraceAll(api.ListObjectVersionsHandler)))).Queries("versions", "") // ListObjectsV1 (Legacy) bucket.Methods(http.MethodGet).HandlerFunc( maxClients(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler)))) diff --git a/cmd/background-heal-ops.go b/cmd/background-heal-ops.go index 20271977b..dd0582a3d 100644 --- a/cmd/background-heal-ops.go +++ b/cmd/background-heal-ops.go @@ -18,6 +18,7 @@ package cmd import ( "context" + "path" "time" "github.com/minio/minio/cmd/logger" @@ -29,8 +30,10 @@ import ( // path: 'bucket/' or '/bucket/' => Heal bucket // path: 'bucket/object' => Heal object type healTask struct { - path string - opts madmin.HealOpts + bucket string + object string + versionID string + opts madmin.HealOpts // Healing response will be sent here responseCh chan healResult } @@ -79,17 +82,18 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) { var res madmin.HealResultItem var err error - bucket, object := path2BucketObject(task.path) switch { - case bucket == "" && object == "": + case task.bucket == nopHeal: + continue + case task.bucket == SlashSeparator: res, err = healDiskFormat(ctx, objAPI, task.opts) - case bucket != "" && object == "": - res, err = objAPI.HealBucket(ctx, bucket, task.opts.DryRun, task.opts.Remove) - case bucket != "" && object != "": - res, err = objAPI.HealObject(ctx, bucket, object, task.opts) + case task.bucket != "" && task.object == "": + res, err = objAPI.HealBucket(ctx, task.bucket, task.opts.DryRun, task.opts.Remove) + case task.bucket != "" && task.object != "": + res, err = objAPI.HealObject(ctx, task.bucket, task.object, task.versionID, task.opts) } - if task.path != slashSeparator && task.path != nopHeal { - ObjectPathUpdated(task.path) + if task.bucket != "" && task.object != "" { + ObjectPathUpdated(path.Join(task.bucket, task.object)) } task.responseCh <- healResult{result: res, err: err} case <-h.doneCh: diff --git a/cmd/background-newdisks-heal-ops.go b/cmd/background-newdisks-heal-ops.go index 5bfc86fde..0d1637000 100644 --- a/cmd/background-newdisks-heal-ops.go +++ b/cmd/background-newdisks-heal-ops.go @@ -33,7 +33,7 @@ func initLocalDisksAutoHeal(ctx context.Context, objAPI ObjectLayer) { // 1. Only the concerned erasure set will be listed and healed // 2. Only the node hosting the disk is responsible to perform the heal func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) { - z, ok := objAPI.(*xlZones) + z, ok := objAPI.(*erasureZones) if !ok { return } @@ -84,10 +84,10 @@ func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) { } // Reformat disks - bgSeq.sourceCh <- healSource{path: SlashSeparator} + bgSeq.sourceCh <- healSource{bucket: SlashSeparator} // Ensure that reformatting disks is finished - bgSeq.sourceCh <- healSource{path: nopHeal} + bgSeq.sourceCh <- healSource{bucket: nopHeal} var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal)) // Compute the list of erasure set to heal diff --git a/cmd/benchmark-utils_test.go b/cmd/benchmark-utils_test.go index c13772872..92cc4a578 100644 --- a/cmd/benchmark-utils_test.go +++ b/cmd/benchmark-utils_test.go @@ -35,7 +35,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) { // obtains random bucket name. bucket := getRandomBucketName() // create bucket. - err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { b.Fatal(err) } @@ -76,7 +76,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) { object := getRandomObjectName() // create bucket. - err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { b.Fatal(err) } @@ -127,9 +127,9 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) { b.StopTimer() } -// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function. +// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function. func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) { - // create a temp XL/FS backend. + // create a temp Erasure/FS backend. ctx, cancel := context.WithCancel(context.Background()) defer cancel() objLayer, disks, err := prepareTestBackend(ctx, instanceType) @@ -143,9 +143,9 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) { runPutObjectPartBenchmark(b, objLayer, objSize) } -// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function. +// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function. func benchmarkPutObject(b *testing.B, instanceType string, objSize int) { - // create a temp XL/FS backend. + // create a temp Erasure/FS backend. ctx, cancel := context.WithCancel(context.Background()) defer cancel() objLayer, disks, err := prepareTestBackend(ctx, instanceType) @@ -159,9 +159,9 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) { runPutObjectBenchmark(b, objLayer, objSize) } -// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for put object. +// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for put object. func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) { - // create a temp XL/FS backend. + // create a temp Erasure/FS backend. ctx, cancel := context.WithCancel(context.Background()) defer cancel() objLayer, disks, err := prepareTestBackend(ctx, instanceType) @@ -181,7 +181,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) { // obtains random bucket name. bucket := getRandomBucketName() // create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { b.Fatal(err) } @@ -190,7 +190,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) { // generate etag for the generated data. // etag of the data to written is required as input for PutObject. - // PutObject is the functions which writes the data onto the FS/XL backend. + // PutObject is the functions which writes the data onto the FS/Erasure backend. // get text data generated for number of bytes equal to object size. md5hex := getMD5Hash(textData) @@ -240,9 +240,9 @@ func generateBytesData(size int) []byte { return bytes.Repeat(getRandomByte(), size) } -// creates XL/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function. +// creates Erasure/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function. func benchmarkGetObject(b *testing.B, instanceType string, objSize int) { - // create a temp XL/FS backend. + // create a temp Erasure/FS backend. ctx, cancel := context.WithCancel(context.Background()) defer cancel() objLayer, disks, err := prepareTestBackend(ctx, instanceType) @@ -256,9 +256,9 @@ func benchmarkGetObject(b *testing.B, instanceType string, objSize int) { runGetObjectBenchmark(b, objLayer, objSize) } -// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() . +// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() . func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) { - // create a temp XL/FS backend. + // create a temp Erasure/FS backend. ctx, cancel := context.WithCancel(context.Background()) defer cancel() objLayer, disks, err := prepareTestBackend(ctx, instanceType) @@ -278,7 +278,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) { // obtains random bucket name. bucket := getRandomBucketName() // create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { b.Fatal(err) } @@ -322,7 +322,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) { // obtains random bucket name. bucket := getRandomBucketName() // create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { b.Fatal(err) } @@ -331,7 +331,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) { textData := generateBytesData(objSize) // generate md5sum for the generated data. // md5sum of the data to written is required as input for PutObject. - // PutObject is the functions which writes the data onto the FS/XL backend. + // PutObject is the functions which writes the data onto the FS/Erasure backend. md5hex := getMD5Hash([]byte(textData)) sha256hex := "" diff --git a/cmd/bitrot.go b/cmd/bitrot.go index bfdecfbd3..cddc17a96 100644 --- a/cmd/bitrot.go +++ b/cmd/bitrot.go @@ -30,25 +30,6 @@ import ( // magic HH-256 key as HH-256 hash of the first 100 decimals of π as utf-8 string with a zero key. var magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0") -// BitrotAlgorithm specifies a algorithm used for bitrot protection. -type BitrotAlgorithm uint - -const ( - // SHA256 represents the SHA-256 hash function - SHA256 BitrotAlgorithm = 1 + iota - // HighwayHash256 represents the HighwayHash-256 hash function - HighwayHash256 - // HighwayHash256S represents the Streaming HighwayHash-256 hash function - HighwayHash256S - // BLAKE2b512 represents the BLAKE2b-512 hash function - BLAKE2b512 -) - -// DefaultBitrotAlgorithm is the default algorithm used for bitrot protection. -const ( - DefaultBitrotAlgorithm = HighwayHash256S -) - var bitrotAlgorithms = map[BitrotAlgorithm]string{ SHA256: "sha256", BLAKE2b512: "blake2b", diff --git a/cmd/bitrot_test.go b/cmd/bitrot_test.go index 78cf9dc5c..a31b909f0 100644 --- a/cmd/bitrot_test.go +++ b/cmd/bitrot_test.go @@ -34,7 +34,7 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) { volume := "testvol" filePath := "testfile" - disk, err := newPosix(tmpDir, "") + disk, err := newXLStorage(tmpDir, "") if err != nil { t.Fatal(err) } diff --git a/cmd/bucket-encryption.go b/cmd/bucket-encryption.go index b5504c909..2c2ab1606 100644 --- a/cmd/bucket-encryption.go +++ b/cmd/bucket-encryption.go @@ -55,5 +55,6 @@ func validateBucketSSEConfig(r io.Reader) (*bucketsse.BucketSSEConfig, error) { if len(encConfig.Rules) == 1 && encConfig.Rules[0].DefaultEncryptionAction.Algorithm == bucketsse.AES256 { return encConfig, nil } + return nil, errors.New("Unsupported bucket encryption configuration") } diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index d6ae21bea..df6102607 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -45,9 +45,8 @@ import ( ) const ( - getBucketVersioningResponse = `` - objectLockConfig = "object-lock.xml" - bucketTaggingConfigFile = "tagging.xml" + objectLockConfig = "object-lock.xml" + bucketTaggingConfigFile = "tagging.xml" ) // Check if there are buckets on server without corresponding entry in etcd backend and @@ -382,75 +381,86 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, deleteObjectsFn = api.CacheAPI().DeleteObjects } - var objectsToDelete = map[string]int{} + var objectsToDelete = map[ObjectToDelete]int{} getObjectInfoFn := objectAPI.GetObjectInfo if api.CacheAPI() != nil { getObjectInfoFn = api.CacheAPI().GetObjectInfo } - var dErrs = make([]APIErrorCode, len(deleteObjects.Objects)) - + dErrs := make([]DeleteError, len(deleteObjects.Objects)) for index, object := range deleteObjects.Objects { - if dErrs[index] = checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object.ObjectName); dErrs[index] != ErrNone { - if dErrs[index] == ErrSignatureDoesNotMatch || dErrs[index] == ErrInvalidAccessKeyID { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(dErrs[index]), r.URL, guessIsBrowserReq(r)) + if apiErrCode := checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object.ObjectName); apiErrCode != ErrNone { + if apiErrCode == ErrSignatureDoesNotMatch || apiErrCode == ErrInvalidAccessKeyID { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErrCode), r.URL, guessIsBrowserReq(r)) return } + apiErr := errorCodes.ToAPIErr(apiErrCode) + dErrs[index] = DeleteError{ + Code: apiErr.Code, + Message: apiErr.Description, + Key: object.ObjectName, + VersionID: object.VersionID, + } continue } - if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled { - if apiErr := enforceRetentionBypassForDelete(ctx, r, bucket, object.ObjectName, getObjectInfoFn); apiErr != ErrNone { - dErrs[index] = apiErr - continue + if object.VersionID != "" { + if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled { + if apiErrCode := enforceRetentionBypassForDelete(ctx, r, bucket, object, getObjectInfoFn); apiErrCode != ErrNone { + apiErr := errorCodes.ToAPIErr(apiErrCode) + dErrs[index] = DeleteError{ + Code: apiErr.Code, + Message: apiErr.Description, + Key: object.ObjectName, + VersionID: object.VersionID, + } + continue + } } } // Avoid duplicate objects, we use map to filter them out. - if _, ok := objectsToDelete[object.ObjectName]; !ok { - objectsToDelete[object.ObjectName] = index + if _, ok := objectsToDelete[object]; !ok { + objectsToDelete[object] = index } } - toNames := func(input map[string]int) (output []string) { - output = make([]string, len(input)) + toNames := func(input map[ObjectToDelete]int) (output []ObjectToDelete) { + output = make([]ObjectToDelete, len(input)) idx := 0 - for name := range input { - output[idx] = name + for obj := range input { + output[idx] = obj idx++ } return } deleteList := toNames(objectsToDelete) - errs, err := deleteObjectsFn(ctx, bucket, deleteList) - if err != nil { - writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) - return - } + dObjects, errs := deleteObjectsFn(ctx, bucket, deleteList, ObjectOptions{ + Versioned: globalBucketVersioningSys.Enabled(bucket), + }) - for i, objName := range deleteList { - dIdx := objectsToDelete[objName] - dErrs[dIdx] = toAPIErrorCode(ctx, errs[i]) - } - - // Collect deleted objects and errors if any. - var deletedObjects []ObjectIdentifier - var deleteErrors []DeleteError - for index, errCode := range dErrs { - object := deleteObjects.Objects[index] - // Success deleted objects are collected separately. - if errCode == ErrNone || errCode == ErrNoSuchKey { - deletedObjects = append(deletedObjects, object) + deletedObjects := make([]DeletedObject, len(deleteObjects.Objects)) + for i := range errs { + dindex := objectsToDelete[deleteList[i]] + apiErr := toAPIError(ctx, errs[i]) + if apiErr.Code == "" || apiErr.Code == "NoSuchKey" { + deletedObjects[dindex] = dObjects[i] continue } - apiErr := getAPIError(errCode) - // Error during delete should be collected separately. - deleteErrors = append(deleteErrors, DeleteError{ - Code: apiErr.Code, - Message: apiErr.Description, - Key: object.ObjectName, - }) + dErrs[dindex] = DeleteError{ + Code: apiErr.Code, + Message: apiErr.Description, + Key: deleteList[i].ObjectName, + VersionID: deleteList[i].VersionID, + } + } + + var deleteErrors []DeleteError + for _, dErr := range dErrs { + if dErr.Code != "" { + deleteErrors = append(deleteErrors, dErr) + } } // Generate response @@ -462,12 +472,21 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, // Notify deleted event for objects. for _, dobj := range deletedObjects { + objInfo := ObjectInfo{ + Name: dobj.ObjectName, + VersionID: dobj.VersionID, + } + if dobj.DeleteMarker { + objInfo = ObjectInfo{ + Name: dobj.ObjectName, + DeleteMarker: dobj.DeleteMarker, + VersionID: dobj.DeleteMarkerVersionID, + } + } sendEvent(eventArgs{ - EventName: event.ObjectRemovedDelete, - BucketName: bucket, - Object: ObjectInfo{ - Name: dobj.ObjectName, - }, + EventName: event.ObjectRemovedDelete, + BucketName: bucket, + Object: objInfo, ReqParams: extractReqParams(r), RespElements: extractRespElements(w), UserAgent: r.UserAgent(), @@ -522,12 +541,17 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req return } + opts := BucketOptions{ + Location: location, + LockEnabled: objectLockEnabled, + } + if globalDNSConfig != nil { sr, err := globalDNSConfig.Get(bucket) if err != nil { if err == dns.ErrNoEntriesFound { // Proceed to creating a bucket. - if err = objectAPI.MakeBucketWithLocation(ctx, bucket, location, objectLockEnabled); err != nil { + if err = objectAPI.MakeBucketWithLocation(ctx, bucket, opts); err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } @@ -565,7 +589,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req } // Proceed to creating a bucket. - err := objectAPI.MakeBucketWithLocation(ctx, bucket, location, objectLockEnabled) + err := objectAPI.MakeBucketWithLocation(ctx, bucket, opts) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return @@ -797,9 +821,17 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h return } - location := getObjectLocation(r, globalDomainNames, bucket, object) + // We must not use the http.Header().Set method here because some (broken) + // clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive). + // Therefore, we have to set the ETag directly as map entry. w.Header()[xhttp.ETag] = []string{`"` + objInfo.ETag + `"`} - w.Header().Set(xhttp.Location, location) + + // Set the relevant version ID as part of the response header. + if objInfo.VersionID != "" { + w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID} + } + + w.Header().Set(xhttp.Location, getObjectLocation(r, globalDomainNames, bucket, object)) // Notify object created event. defer sendEvent(eventArgs{ @@ -826,9 +858,9 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h Bucket: objInfo.Bucket, Key: objInfo.Name, ETag: `"` + objInfo.ETag + `"`, - Location: location, + Location: w.Header().Get(xhttp.Location), }) - writeResponse(w, http.StatusCreated, resp, "application/xml") + writeResponse(w, http.StatusCreated, resp, mimeXML) case "200": writeSuccessResponseHeadersOnly(w) default: @@ -921,79 +953,30 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http. // Attempt to delete bucket. if err := deleteBucket(ctx, bucket, forceDelete); err != nil { - writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + if _, ok := err.(BucketNotEmpty); ok && (globalBucketVersioningSys.Enabled(bucket) || globalBucketVersioningSys.Suspended(bucket)) { + apiErr := toAPIError(ctx, err) + apiErr.Description = "The bucket you tried to delete is not empty. You must delete all versions in the bucket." + writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r)) + } else { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + } return } + globalNotificationSys.DeleteBucketMetadata(ctx, bucket) + if globalDNSConfig != nil { if err := globalDNSConfig.Delete(bucket); err != nil { - // Deleting DNS entry failed, attempt to create the bucket again. - objectAPI.MakeBucketWithLocation(ctx, bucket, "", false) + logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually using etcdctl", err)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } } - globalNotificationSys.DeleteBucketMetadata(ctx, bucket) - // Write success response. writeSuccessNoContent(w) } -// PutBucketVersioningHandler - PUT Bucket Versioning. -// ---------- -// No-op. Available for API compatibility. -func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Request) { - ctx := newContext(r, w, "PutBucketVersioning") - - defer logger.AuditLog(w, r, "PutBucketVersioning", mustGetClaimsFromToken(r)) - - vars := mux.Vars(r) - bucket := vars["bucket"] - - objectAPI := api.ObjectAPI() - if objectAPI == nil { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) - return - } - - getBucketInfo := objectAPI.GetBucketInfo - if _, err := getBucketInfo(ctx, bucket); err != nil { - writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) - return - } - - // Write success response. - writeSuccessResponseHeadersOnly(w) -} - -// GetBucketVersioningHandler - GET Bucket Versioning. -// ---------- -// No-op. Available for API compatibility. -func (api objectAPIHandlers) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Request) { - ctx := newContext(r, w, "GetBucketVersioning") - - defer logger.AuditLog(w, r, "GetBucketVersioning", mustGetClaimsFromToken(r)) - - vars := mux.Vars(r) - bucket := vars["bucket"] - - objectAPI := api.ObjectAPI() - if objectAPI == nil { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) - return - } - - getBucketInfo := objectAPI.GetBucketInfo - if _, err := getBucketInfo(ctx, bucket); err != nil { - writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) - return - } - - // Write success response. - writeSuccessResponseXML(w, []byte(getBucketVersioningResponse)) -} - // PutBucketObjectLockConfigHandler - PUT Bucket object lock configuration. // ---------- // Places an Object Lock configuration on the specified bucket. The rule diff --git a/cmd/bucket-handlers_test.go b/cmd/bucket-handlers_test.go index 3f4f80706..87e351ac8 100644 --- a/cmd/bucket-handlers_test.go +++ b/cmd/bucket-handlers_test.go @@ -19,6 +19,7 @@ package cmd import ( "bytes" "encoding/xml" + "fmt" "io/ioutil" "net/http" "net/http/httptest" @@ -28,7 +29,7 @@ import ( "github.com/minio/minio/pkg/auth" ) -// Wrapper for calling RemoveBucket HTTP handler tests for both XL multiple disks and single node setup. +// Wrapper for calling RemoveBucket HTTP handler tests for both Erasure multiple disks and single node setup. func TestRemoveBucketHandler(t *testing.T) { ExecObjectLayerAPITest(t, testRemoveBucketHandler, []string{"RemoveBucket"}) } @@ -73,7 +74,7 @@ func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, a } } -// Wrapper for calling GetBucketPolicy HTTP handler tests for both XL multiple disks and single node setup. +// Wrapper for calling GetBucketPolicy HTTP handler tests for both Erasure multiple disks and single node setup. func TestGetBucketLocationHandler(t *testing.T) { ExecObjectLayerAPITest(t, testGetBucketLocationHandler, []string{"GetBucketLocation"}) } @@ -217,7 +218,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq) } -// Wrapper for calling HeadBucket HTTP handler tests for both XL multiple disks and single node setup. +// Wrapper for calling HeadBucket HTTP handler tests for both Erasure multiple disks and single node setup. func TestHeadBucketHandler(t *testing.T) { ExecObjectLayerAPITest(t, testHeadBucketHandler, []string{"HeadBucket"}) } @@ -322,7 +323,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq) } -// Wrapper for calling TestListMultipartUploadsHandler tests for both XL multiple disks and single node setup. +// Wrapper for calling TestListMultipartUploadsHandler tests for both Erasure multiple disks and single node setup. func TestListMultipartUploadsHandler(t *testing.T) { ExecObjectLayerAPITest(t, testListMultipartUploadsHandler, []string{"ListMultipartUploads"}) } @@ -559,7 +560,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq) } -// Wrapper for calling TestListBucketsHandler tests for both XL multiple disks and single node setup. +// Wrapper for calling TestListBucketsHandler tests for both Erasure multiple disks and single node setup. func TestListBucketsHandler(t *testing.T) { ExecObjectLayerAPITest(t, testListBucketsHandler, []string{"ListBuckets"}) } @@ -653,7 +654,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap ExecObjectLayerAPINilTest(t, "", "", instanceType, apiRouter, nilReq) } -// Wrapper for calling DeleteMultipleObjects HTTP handler tests for both XL multiple disks and single node setup. +// Wrapper for calling DeleteMultipleObjects HTTP handler tests for both Erasure multiple disks and single node setup. func TestAPIDeleteMultipleObjectsHandler(t *testing.T) { ExecObjectLayerAPITest(t, testAPIDeleteMultipleObjectsHandler, []string{"DeleteMultipleObjects"}) } @@ -679,14 +680,17 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa objectNames = append(objectNames, objectName) } - getObjectIdentifierList := func(objectNames []string) (objectIdentifierList []ObjectIdentifier) { + getObjectToDeleteList := func(objectNames []string) (objectList []ObjectToDelete) { for _, objectName := range objectNames { - objectIdentifierList = append(objectIdentifierList, ObjectIdentifier{objectName}) + objectList = append(objectList, ObjectToDelete{ + ObjectName: objectName, + }) } - return objectIdentifierList + return objectList } - getDeleteErrorList := func(objects []ObjectIdentifier) (deleteErrorList []DeleteError) { + + getDeleteErrorList := func(objects []ObjectToDelete) (deleteErrorList []DeleteError) { for _, obj := range objects { deleteErrorList = append(deleteErrorList, DeleteError{ Code: errorCodes[ErrAccessDenied].Code, @@ -699,22 +703,38 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa } requestList := []DeleteObjectsRequest{ - {Quiet: false, Objects: getObjectIdentifierList(objectNames[:5])}, - {Quiet: true, Objects: getObjectIdentifierList(objectNames[5:])}, + {Quiet: false, Objects: getObjectToDeleteList(objectNames[:5])}, + {Quiet: true, Objects: getObjectToDeleteList(objectNames[5:])}, } // generate multi objects delete response. successRequest0 := encodeResponse(requestList[0]) - successResponse0 := generateMultiDeleteResponse(requestList[0].Quiet, requestList[0].Objects, nil) + + deletedObjects := make([]DeletedObject, len(requestList[0].Objects)) + for i := range requestList[0].Objects { + deletedObjects[i] = DeletedObject{ + ObjectName: requestList[0].Objects[i].ObjectName, + } + } + + successResponse0 := generateMultiDeleteResponse(requestList[0].Quiet, deletedObjects, nil) encodedSuccessResponse0 := encodeResponse(successResponse0) successRequest1 := encodeResponse(requestList[1]) - successResponse1 := generateMultiDeleteResponse(requestList[1].Quiet, requestList[1].Objects, nil) + + deletedObjects = make([]DeletedObject, len(requestList[1].Objects)) + for i := range requestList[0].Objects { + deletedObjects[i] = DeletedObject{ + ObjectName: requestList[1].Objects[i].ObjectName, + } + } + + successResponse1 := generateMultiDeleteResponse(requestList[1].Quiet, deletedObjects, nil) encodedSuccessResponse1 := encodeResponse(successResponse1) // generate multi objects delete response for errors. // errorRequest := encodeResponse(requestList[1]) - errorResponse := generateMultiDeleteResponse(requestList[1].Quiet, requestList[1].Objects, nil) + errorResponse := generateMultiDeleteResponse(requestList[1].Quiet, deletedObjects, nil) encodedErrorResponse := encodeResponse(errorResponse) anonRequest := encodeResponse(requestList[0]) @@ -817,6 +837,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa // Verify whether the bucket obtained object is same as the one created. if testCase.expectedContent != nil && !bytes.Equal(testCase.expectedContent, actualContent) { + fmt.Println(string(testCase.expectedContent), string(actualContent)) t.Errorf("Test %d : MinIO %s: Object content differs from expected value.", i+1, instanceType) } } diff --git a/cmd/bucket-lifecycle.go b/cmd/bucket-lifecycle.go index fd7c9f6df..01d69978e 100644 --- a/cmd/bucket-lifecycle.go +++ b/cmd/bucket-lifecycle.go @@ -21,7 +21,6 @@ import ( ) const ( - // Disabled means the lifecycle rule is inactive Disabled = "Disabled" ) diff --git a/cmd/bucket-listobjects-handlers.go b/cmd/bucket-listobjects-handlers.go index ad03b1fd5..567864d30 100644 --- a/cmd/bucket-listobjects-handlers.go +++ b/cmd/bucket-listobjects-handlers.go @@ -49,13 +49,13 @@ func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int return ErrNone } -// ListBucketObjectVersions - GET Bucket Object versions +// ListObjectVersions - GET Bucket Object versions // You can use the versions subresource to list metadata about all // of the versions of objects in a bucket. -func (api objectAPIHandlers) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http.Request) { - ctx := newContext(r, w, "ListBucketObjectVersions") +func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r *http.Request) { + ctx := newContext(r, w, "ListObjectVersions") - defer logger.AuditLog(w, r, "ListBucketObjectVersions", mustGetClaimsFromToken(r)) + defer logger.AuditLog(w, r, "ListObjectVersions", mustGetClaimsFromToken(r)) vars := mux.Vars(r) bucket := vars["bucket"] @@ -74,8 +74,7 @@ func (api objectAPIHandlers) ListBucketObjectVersionsHandler(w http.ResponseWrit urlValues := r.URL.Query() // Extract all the listBucketVersions query params to their native values. - // versionIDMarker is ignored here. - prefix, marker, delimiter, maxkeys, encodingType, _, errCode := getListBucketObjectVersionsArgs(urlValues) + prefix, marker, delimiter, maxkeys, encodingType, versionIDMarker, errCode := getListBucketObjectVersionsArgs(urlValues) if errCode != ErrNone { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r)) return @@ -87,29 +86,29 @@ func (api objectAPIHandlers) ListBucketObjectVersionsHandler(w http.ResponseWrit return } - listObjects := objectAPI.ListObjects + listObjectVersions := objectAPI.ListObjectVersions - // Inititate a list objects operation based on the input params. + // Inititate a list object versions operation based on the input params. // On success would return back ListObjectsInfo object to be // marshaled into S3 compatible XML header. - listObjectsInfo, err := listObjects(ctx, bucket, prefix, marker, delimiter, maxkeys) + listObjectVersionsInfo, err := listObjectVersions(ctx, bucket, prefix, marker, versionIDMarker, delimiter, maxkeys) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } - for i := range listObjectsInfo.Objects { - if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) { - listObjectsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsInfo.Objects[i], false) + for i := range listObjectVersionsInfo.Objects { + if crypto.IsEncrypted(listObjectVersionsInfo.Objects[i].UserDefined) { + listObjectVersionsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectVersionsInfo.Objects[i], false) } - listObjectsInfo.Objects[i].Size, err = listObjectsInfo.Objects[i].GetActualSize() + listObjectVersionsInfo.Objects[i].Size, err = listObjectVersionsInfo.Objects[i].GetActualSize() if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } } - response := generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingType, maxkeys, listObjectsInfo) + response := generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo) // Write success response. writeSuccessResponseXML(w, encodeResponse(response)) diff --git a/cmd/bucket-metadata-sys.go b/cmd/bucket-metadata-sys.go index 55ab43d9f..72d001713 100644 --- a/cmd/bucket-metadata-sys.go +++ b/cmd/bucket-metadata-sys.go @@ -28,6 +28,7 @@ import ( "github.com/minio/minio/pkg/bucket/lifecycle" objectlock "github.com/minio/minio/pkg/bucket/object/lock" "github.com/minio/minio/pkg/bucket/policy" + "github.com/minio/minio/pkg/bucket/versioning" "github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/madmin" "github.com/minio/minio/pkg/sync/errgroup" @@ -111,6 +112,8 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat meta.TaggingConfigXML = configData case objectLockConfig: meta.ObjectLockConfigXML = configData + case bucketVersioningConfig: + meta.VersioningConfigXML = configData case bucketQuotaConfigFile: meta.QuotaConfigJSON = configData default: @@ -147,6 +150,16 @@ func (sys *BucketMetadataSys) Get(bucket string) (BucketMetadata, error) { return meta, nil } +// GetVersioningConfig returns configured versioning config +// The returned object may not be modified. +func (sys *BucketMetadataSys) GetVersioningConfig(bucket string) (*versioning.Versioning, error) { + meta, err := sys.GetConfig(bucket) + if err != nil { + return nil, err + } + return meta.versioningConfig, nil +} + // GetTaggingConfig returns configured tagging config // The returned object may not be modified. func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, error) { diff --git a/cmd/bucket-metadata.go b/cmd/bucket-metadata.go index 681ebd2d7..0ab18f066 100644 --- a/cmd/bucket-metadata.go +++ b/cmd/bucket-metadata.go @@ -32,6 +32,7 @@ import ( "github.com/minio/minio/pkg/bucket/lifecycle" objectlock "github.com/minio/minio/pkg/bucket/object/lock" "github.com/minio/minio/pkg/bucket/policy" + "github.com/minio/minio/pkg/bucket/versioning" "github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/madmin" ) @@ -47,6 +48,7 @@ const ( var ( enabledBucketObjectLockConfig = []byte(`Enabled`) + enabledBucketVersioningConfig = []byte(`Enabled`) ) //go:generate msgp -file $GOFILE @@ -64,6 +66,7 @@ type BucketMetadata struct { NotificationConfigXML []byte LifecycleConfigXML []byte ObjectLockConfigXML []byte + VersioningConfigXML []byte EncryptionConfigXML []byte TaggingConfigXML []byte QuotaConfigJSON []byte @@ -73,6 +76,7 @@ type BucketMetadata struct { notificationConfig *event.Config lifecycleConfig *lifecycle.Lifecycle objectLockConfig *objectlock.Config + versioningConfig *versioning.Versioning sseConfig *bucketsse.BucketSSEConfig taggingConfig *tags.Tags quotaConfig *madmin.BucketQuota @@ -87,6 +91,9 @@ func newBucketMetadata(name string) BucketMetadata { XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/", }, quotaConfig: &madmin.BucketQuota{}, + versioningConfig: &versioning.Versioning{ + XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/", + }, } } @@ -188,6 +195,13 @@ func (b *BucketMetadata) parseAllConfigs(ctx context.Context, objectAPI ObjectLa b.objectLockConfig = nil } + if len(b.VersioningConfigXML) != 0 { + b.versioningConfig, err = versioning.ParseConfig(bytes.NewReader(b.VersioningConfigXML)) + if err != nil { + return err + } + } + if len(b.QuotaConfigJSON) != 0 { b.quotaConfig, err = parseBucketQuota(b.Name, b.QuotaConfigJSON) if err != nil { @@ -244,6 +258,7 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj case legacyBucketObjectLockEnabledConfigFile: if string(configData) == legacyBucketObjectLockEnabledConfig { b.ObjectLockConfigXML = enabledBucketObjectLockConfig + b.VersioningConfigXML = enabledBucketVersioningConfig b.LockEnabled = false // legacy value unset it // we are only interested in b.ObjectLockConfigXML } @@ -259,6 +274,7 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj b.TaggingConfigXML = configData case objectLockConfig: b.ObjectLockConfigXML = configData + b.VersioningConfigXML = enabledBucketVersioningConfig case bucketQuotaConfigFile: b.QuotaConfigJSON = configData } diff --git a/cmd/bucket-metadata_gen.go b/cmd/bucket-metadata_gen.go index 5ec12ebdc..a331be67c 100644 --- a/cmd/bucket-metadata_gen.go +++ b/cmd/bucket-metadata_gen.go @@ -66,6 +66,12 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "ObjectLockConfigXML") return } + case "VersioningConfigXML": + z.VersioningConfigXML, err = dc.ReadBytes(z.VersioningConfigXML) + if err != nil { + err = msgp.WrapError(err, "VersioningConfigXML") + return + } case "EncryptionConfigXML": z.EncryptionConfigXML, err = dc.ReadBytes(z.EncryptionConfigXML) if err != nil { @@ -97,9 +103,9 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 10 + // map header, size 11 // write "Name" - err = en.Append(0x8a, 0xa4, 0x4e, 0x61, 0x6d, 0x65) + err = en.Append(0x8b, 0xa4, 0x4e, 0x61, 0x6d, 0x65) if err != nil { return } @@ -168,6 +174,16 @@ func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "ObjectLockConfigXML") return } + // write "VersioningConfigXML" + err = en.Append(0xb3, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c) + if err != nil { + return + } + err = en.WriteBytes(z.VersioningConfigXML) + if err != nil { + err = msgp.WrapError(err, "VersioningConfigXML") + return + } // write "EncryptionConfigXML" err = en.Append(0xb3, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c) if err != nil { @@ -204,9 +220,9 @@ func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) { // MarshalMsg implements msgp.Marshaler func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 10 + // map header, size 11 // string "Name" - o = append(o, 0x8a, 0xa4, 0x4e, 0x61, 0x6d, 0x65) + o = append(o, 0x8b, 0xa4, 0x4e, 0x61, 0x6d, 0x65) o = msgp.AppendString(o, z.Name) // string "Created" o = append(o, 0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64) @@ -226,6 +242,9 @@ func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) { // string "ObjectLockConfigXML" o = append(o, 0xb3, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c) o = msgp.AppendBytes(o, z.ObjectLockConfigXML) + // string "VersioningConfigXML" + o = append(o, 0xb3, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c) + o = msgp.AppendBytes(o, z.VersioningConfigXML) // string "EncryptionConfigXML" o = append(o, 0xb3, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c) o = msgp.AppendBytes(o, z.EncryptionConfigXML) @@ -298,6 +317,12 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "ObjectLockConfigXML") return } + case "VersioningConfigXML": + z.VersioningConfigXML, bts, err = msgp.ReadBytesBytes(bts, z.VersioningConfigXML) + if err != nil { + err = msgp.WrapError(err, "VersioningConfigXML") + return + } case "EncryptionConfigXML": z.EncryptionConfigXML, bts, err = msgp.ReadBytesBytes(bts, z.EncryptionConfigXML) if err != nil { @@ -330,6 +355,6 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BucketMetadata) Msgsize() (s int) { - s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON) + s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.VersioningConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON) return } diff --git a/cmd/bucket-object-lock.go b/cmd/bucket-object-lock.go index 73b103579..f8c7931be 100644 --- a/cmd/bucket-object-lock.go +++ b/cmd/bucket-object-lock.go @@ -52,79 +52,6 @@ func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention, return config.ToRetention(), nil } -// Similar to enforceRetentionBypassForDelete but for WebUI -func enforceRetentionBypassForDeleteWeb(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, govBypassPerms bool) APIErrorCode { - opts, err := getOpts(ctx, r, bucket, object) - if err != nil { - return toAPIErrorCode(ctx, err) - } - - oi, err := getObjectInfoFn(ctx, bucket, object, opts) - if err != nil { - return toAPIErrorCode(ctx, err) - } - - lhold := objectlock.GetObjectLegalHoldMeta(oi.UserDefined) - if lhold.Status.Valid() && lhold.Status == objectlock.LegalHoldOn { - return ErrObjectLocked - } - - ret := objectlock.GetObjectRetentionMeta(oi.UserDefined) - if ret.Mode.Valid() { - switch ret.Mode { - case objectlock.RetCompliance: - // In compliance mode, a protected object version can't be overwritten - // or deleted by any user, including the root user in your AWS account. - // When an object is locked in compliance mode, its retention mode can't - // be changed, and its retention period can't be shortened. Compliance mode - // ensures that an object version can't be overwritten or deleted for the - // duration of the retention period. - t, err := objectlock.UTCNowNTP() - if err != nil { - logger.LogIf(ctx, err) - return ErrObjectLocked - } - - if !ret.RetainUntilDate.Before(t) { - return ErrObjectLocked - } - return ErrNone - case objectlock.RetGovernance: - // In governance mode, users can't overwrite or delete an object - // version or alter its lock settings unless they have special - // permissions. With governance mode, you protect objects against - // being deleted by most users, but you can still grant some users - // permission to alter the retention settings or delete the object - // if necessary. You can also use governance mode to test retention-period - // settings before creating a compliance-mode retention period. - // To override or remove governance-mode retention settings, a - // user must have the s3:BypassGovernanceRetention permission - // and must explicitly include x-amz-bypass-governance-retention:true - // as a request header with any request that requires overriding - // governance mode. - byPassSet := govBypassPerms && objectlock.IsObjectLockGovernanceBypassSet(r.Header) - if !byPassSet { - t, err := objectlock.UTCNowNTP() - if err != nil { - logger.LogIf(ctx, err) - return ErrObjectLocked - } - - if !ret.RetainUntilDate.Before(t) { - return ErrObjectLocked - } - - if !govBypassPerms { - return ErrObjectLocked - } - - return ErrNone - } - } - } - return ErrNone -} - // enforceRetentionForDeletion checks if it is appropriate to remove an // object according to locking configuration when this is lifecycle/ bucket quota asking. func enforceRetentionForDeletion(ctx context.Context, objInfo ObjectInfo) (locked bool) { @@ -153,14 +80,23 @@ func enforceRetentionForDeletion(ctx context.Context, objInfo ObjectInfo) (locke // For objects in "Governance" mode, overwrite is allowed if a) object retention date is past OR // governance bypass headers are set and user has governance bypass permissions. // Objects in "Compliance" mode can be overwritten only if retention date is past. -func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn) APIErrorCode { - opts, err := getOpts(ctx, r, bucket, object) +func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucket string, object ObjectToDelete, getObjectInfoFn GetObjectInfoFn) APIErrorCode { + opts, err := getOpts(ctx, r, bucket, object.ObjectName) if err != nil { return toAPIErrorCode(ctx, err) } - oi, err := getObjectInfoFn(ctx, bucket, object, opts) + opts.VersionID = object.VersionID + + oi, err := getObjectInfoFn(ctx, bucket, object.ObjectName, opts) if err != nil { + switch err.(type) { + case MethodNotAllowed: // This happens usually for a delete marker + if oi.DeleteMarker { + // Delete marker should be present and valid. + return ErrNone + } + } return toAPIErrorCode(ctx, err) } @@ -219,8 +155,8 @@ func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucke // https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes // If you try to delete objects protected by governance mode and have s3:BypassGovernanceRetention // or s3:GetBucketObjectLockConfiguration permissions, the operation will succeed. - govBypassPerms1 := checkRequestAuthType(ctx, r, policy.BypassGovernanceRetentionAction, bucket, object) - govBypassPerms2 := checkRequestAuthType(ctx, r, policy.GetBucketObjectLockConfigurationAction, bucket, object) + govBypassPerms1 := checkRequestAuthType(ctx, r, policy.BypassGovernanceRetentionAction, bucket, object.ObjectName) + govBypassPerms2 := checkRequestAuthType(ctx, r, policy.GetBucketObjectLockConfigurationAction, bucket, object.ObjectName) if govBypassPerms1 != ErrNone && govBypassPerms2 != ErrNone { return ErrAccessDenied } @@ -331,30 +267,32 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj return mode, retainDate, legalHold, ErrNone } - var objExists bool opts, err := getOpts(ctx, r, bucket, object) if err != nil { return mode, retainDate, legalHold, toAPIErrorCode(ctx, err) } - t, err := objectlock.UTCNowNTP() - if err != nil { - logger.LogIf(ctx, err) - return mode, retainDate, legalHold, ErrObjectLocked - } + if opts.VersionID != "" { + if objInfo, err := getObjectInfoFn(ctx, bucket, object, opts); err == nil { + r := objectlock.GetObjectRetentionMeta(objInfo.UserDefined) - if objInfo, err := getObjectInfoFn(ctx, bucket, object, opts); err == nil { - objExists = true - r := objectlock.GetObjectRetentionMeta(objInfo.UserDefined) - if r.Mode == objectlock.RetCompliance && r.RetainUntilDate.After(t) { - return mode, retainDate, legalHold, ErrObjectLocked - } - mode = r.Mode - retainDate = r.RetainUntilDate - legalHold = objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined) - // Disallow overwriting an object on legal hold - if legalHold.Status == objectlock.LegalHoldOn { - return mode, retainDate, legalHold, ErrObjectLocked + t, err := objectlock.UTCNowNTP() + if err != nil { + logger.LogIf(ctx, err) + return mode, retainDate, legalHold, ErrObjectLocked + } + + if r.Mode == objectlock.RetCompliance && r.RetainUntilDate.After(t) { + return mode, retainDate, legalHold, ErrObjectLocked + } + + mode = r.Mode + retainDate = r.RetainUntilDate + legalHold = objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined) + // Disallow overwriting an object on legal hold + if legalHold.Status == objectlock.LegalHoldOn { + return mode, retainDate, legalHold, ErrObjectLocked + } } } @@ -374,9 +312,6 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj if err != nil { return mode, retainDate, legalHold, toAPIErrorCode(ctx, err) } - if objExists && retainDate.After(t) { - return mode, retainDate, legalHold, ErrObjectLocked - } if retentionPermErr != ErrNone { return mode, retainDate, legalHold, retentionPermErr } @@ -387,16 +322,14 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj if retentionPermErr != ErrNone { return mode, retainDate, legalHold, retentionPermErr } + t, err := objectlock.UTCNowNTP() if err != nil { logger.LogIf(ctx, err) return mode, retainDate, legalHold, ErrObjectLocked } - // AWS S3 just creates a new version of object when an object is being overwritten. - if objExists && retainDate.After(t) { - return mode, retainDate, legalHold, ErrObjectLocked - } - if !legalHoldRequested { + + if !legalHoldRequested && retentionCfg.LockEnabled { // inherit retention from bucket configuration return retentionCfg.Mode, objectlock.RetentionDate{Time: t.Add(retentionCfg.Validity)}, legalHold, ErrNone } diff --git a/cmd/bucket-policy-handlers.go b/cmd/bucket-policy-handlers.go index f659d2466..9c3f79290 100644 --- a/cmd/bucket-policy-handlers.go +++ b/cmd/bucket-policy-handlers.go @@ -164,7 +164,7 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht } // Read bucket access policy. - config, err := globalBucketMetadataSys.GetPolicyConfig(bucket) + config, err := globalPolicySys.Get(bucket) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return diff --git a/cmd/bucket-policy-handlers_test.go b/cmd/bucket-policy-handlers_test.go index 91c1b5cdf..c5a3579cc 100644 --- a/cmd/bucket-policy-handlers_test.go +++ b/cmd/bucket-policy-handlers_test.go @@ -92,7 +92,7 @@ func getAnonWriteOnlyObjectPolicy(bucketName, prefix string) *policy.Policy { } } -// Wrapper for calling Put Bucket Policy HTTP handler tests for both XL multiple disks and single node setup. +// Wrapper for calling Put Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup. func TestPutBucketPolicyHandler(t *testing.T) { ExecObjectLayerAPITest(t, testPutBucketPolicyHandler, []string{"PutBucketPolicy"}) } @@ -102,7 +102,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string credentials auth.Credentials, t *testing.T) { bucketName1 := fmt.Sprintf("%s-1", bucketName) - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, BucketOptions{}); err != nil { t.Fatal(err) } @@ -314,7 +314,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string } -// Wrapper for calling Get Bucket Policy HTTP handler tests for both XL multiple disks and single node setup. +// Wrapper for calling Get Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup. func TestGetBucketPolicyHandler(t *testing.T) { ExecObjectLayerAPITest(t, testGetBucketPolicyHandler, []string{"PutBucketPolicy", "GetBucketPolicy"}) } @@ -520,7 +520,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq) } -// Wrapper for calling Delete Bucket Policy HTTP handler tests for both XL multiple disks and single node setup. +// Wrapper for calling Delete Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup. func TestDeleteBucketPolicyHandler(t *testing.T) { ExecObjectLayerAPITest(t, testDeleteBucketPolicyHandler, []string{"PutBucketPolicy", "DeleteBucketPolicy"}) } diff --git a/cmd/bucket-policy.go b/cmd/bucket-policy.go index baec0f42f..6fff203a3 100644 --- a/cmd/bucket-policy.go +++ b/cmd/bucket-policy.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. + * MinIO Cloud Storage, (C) 2018,2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -70,6 +70,16 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[ principalType = "User" } + vid := r.URL.Query().Get("versionId") + if vid == "" { + if u, err := url.Parse(r.Header.Get(xhttp.AmzCopySource)); err == nil { + vid = u.Query().Get("versionId") + } + if vid == "" { + vid = r.Header.Get(xhttp.AmzCopySourceVersionID) + } + } + args := map[string][]string{ "CurrentTime": {currTime.Format(time.RFC3339)}, "EpochTime": {strconv.FormatInt(currTime.Unix(), 10)}, @@ -80,6 +90,7 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[ "principaltype": {principalType}, "userid": {username}, "username": {username}, + "versionid": {vid}, } if lc != "" { @@ -142,7 +153,7 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[ return args } -// PolicyToBucketAccessPolicy - converts policy.Policy to minio-go/policy.BucketAccessPolicy. +// PolicyToBucketAccessPolicy converts a MinIO policy into a minio-go policy data structure. func PolicyToBucketAccessPolicy(bucketPolicy *policy.Policy) (*miniogopolicy.BucketAccessPolicy, error) { // Return empty BucketAccessPolicy for empty bucket policy. if bucketPolicy == nil { diff --git a/cmd/bucket-quota.go b/cmd/bucket-quota.go index 4fdcf6d3d..84288d166 100644 --- a/cmd/bucket-quota.go +++ b/cmd/bucket-quota.go @@ -138,7 +138,7 @@ func startBucketQuotaEnforcement(ctx context.Context, objAPI ObjectLayer) { case <-ctx.Done(): return case <-time.NewTimer(bgQuotaInterval).C: - logger.LogIf(ctx, enforceFIFOQuota(ctx, objAPI)) + enforceFIFOQuota(ctx, objAPI) } } @@ -146,20 +146,22 @@ func startBucketQuotaEnforcement(ctx context.Context, objAPI ObjectLayer) { // enforceFIFOQuota deletes objects in FIFO order until sufficient objects // have been deleted so as to bring bucket usage within quota -func enforceFIFOQuota(ctx context.Context, objectAPI ObjectLayer) error { +func enforceFIFOQuota(ctx context.Context, objectAPI ObjectLayer) { // Turn off quota enforcement if data usage info is unavailable. if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff { - return nil + return } buckets, err := objectAPI.ListBuckets(ctx) if err != nil { - return err + logger.LogIf(ctx, err) + return } dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI) if err != nil { - return err + logger.LogIf(ctx, err) + return } for _, binfo := range buckets { @@ -196,7 +198,8 @@ func enforceFIFOQuota(ctx context.Context, objectAPI ObjectLayer) error { // Walk through all objects if err := objectAPI.Walk(ctx, bucket, "", objInfoCh); err != nil { - return err + logger.LogIf(ctx, err) + continue } // reuse the fileScorer used by disk cache to score entries by @@ -205,53 +208,61 @@ func enforceFIFOQuota(ctx context.Context, objectAPI ObjectLayer) error { // irrelevant. scorer, err := newFileScorer(toFree, time.Now().Unix(), 1) if err != nil { - return err + logger.LogIf(ctx, err) + continue } rcfg, _ := globalBucketObjectLockSys.Get(bucket) - for obj := range objInfoCh { + if obj.DeleteMarker { + // Delete markers are automatically added for FIFO purge. + scorer.addFileWithObjInfo(obj, 1) + continue + } // skip objects currently under retention if rcfg.LockEnabled && enforceRetentionForDeletion(ctx, obj) { continue } - scorer.addFile(obj.Name, obj.ModTime, obj.Size, 1) + scorer.addFileWithObjInfo(obj, 1) } - var objects []string - numKeys := len(scorer.fileNames()) - for i, key := range scorer.fileNames() { - objects = append(objects, key) + + versioned := globalBucketVersioningSys.Enabled(bucket) + + var objects []ObjectToDelete + numKeys := len(scorer.fileObjInfos()) + for i, obj := range scorer.fileObjInfos() { + objects = append(objects, ObjectToDelete{ + ObjectName: obj.Name, + VersionID: obj.VersionID, + }) if len(objects) < maxDeleteList && (i < numKeys-1) { - // skip deletion until maxObjectList or end of slice + // skip deletion until maxDeleteList or end of slice continue } if len(objects) == 0 { break } + // Deletes a list of objects. - deleteErrs, err := objectAPI.DeleteObjects(ctx, bucket, objects) - if err != nil { - logger.LogIf(ctx, err) - } else { - for i := range deleteErrs { - if deleteErrs[i] != nil { - logger.LogIf(ctx, deleteErrs[i]) - continue - } - // Notify object deleted event. - sendEvent(eventArgs{ - EventName: event.ObjectRemovedDelete, - BucketName: bucket, - Object: ObjectInfo{ - Name: objects[i], - }, - Host: "Internal: [FIFO-QUOTA-EXPIRY]", - }) + _, deleteErrs := objectAPI.DeleteObjects(ctx, bucket, objects, ObjectOptions{ + Versioned: versioned, + }) + for i := range deleteErrs { + if deleteErrs[i] != nil { + logger.LogIf(ctx, deleteErrs[i]) + continue } - objects = nil + + // Notify object deleted event. + sendEvent(eventArgs{ + EventName: event.ObjectRemovedDelete, + BucketName: bucket, + Object: obj, + Host: "Internal: [FIFO-QUOTA-EXPIRY]", + }) } + objects = nil } } - return nil } diff --git a/cmd/bucket-versioning-handler.go b/cmd/bucket-versioning-handler.go new file mode 100644 index 000000000..5ce740391 --- /dev/null +++ b/cmd/bucket-versioning-handler.go @@ -0,0 +1,128 @@ +/* + * MinIO Cloud Storage, (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "encoding/xml" + "io" + "net/http" + + humanize "github.com/dustin/go-humanize" + "github.com/gorilla/mux" + xhttp "github.com/minio/minio/cmd/http" + "github.com/minio/minio/cmd/logger" + "github.com/minio/minio/pkg/bucket/policy" + "github.com/minio/minio/pkg/bucket/versioning" +) + +const ( + bucketVersioningConfig = "versioning.xml" + + // Maximum size of bucket versioning configuration payload sent to the PutBucketVersioningHandler. + maxBucketVersioningConfigSize = 1 * humanize.MiByte +) + +// PutBucketVersioningHandler - PUT Bucket Versioning. +// ---------- +func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Request) { + ctx := newContext(r, w, "PutBucketVersioning") + + defer logger.AuditLog(w, r, "PutBucketVersioning", mustGetClaimsFromToken(r)) + + vars := mux.Vars(r) + bucket := vars["bucket"] + + objectAPI := api.ObjectAPI() + if objectAPI == nil { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) + return + } + + // PutBucketVersioning API requires Content-Md5 + if _, ok := r.Header[xhttp.ContentMD5]; !ok { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r)) + return + } + + if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketVersioningAction, bucket, ""); s3Error != ErrNone { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) + return + } + + v, err := versioning.ParseConfig(io.LimitReader(r.Body, maxBucketVersioningConfigSize)) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + + configData, err := xml.Marshal(v) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + + if err = globalBucketMetadataSys.Update(bucket, bucketVersioningConfig, configData); err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + + writeSuccessResponseHeadersOnly(w) +} + +// GetBucketVersioningHandler - GET Bucket Versioning. +// ---------- +func (api objectAPIHandlers) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Request) { + ctx := newContext(r, w, "GetBucketVersioning") + + defer logger.AuditLog(w, r, "GetBucketVersioning", mustGetClaimsFromToken(r)) + + vars := mux.Vars(r) + bucket := vars["bucket"] + + objectAPI := api.ObjectAPI() + if objectAPI == nil { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) + return + } + + if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketVersioningAction, bucket, ""); s3Error != ErrNone { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) + return + } + + // Check if bucket exists. + if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + + config, err := globalBucketVersioningSys.Get(bucket) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + + configData, err := xml.Marshal(config) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + + // Write bucket versioning configuration to client + writeSuccessResponseXML(w, configData) + +} diff --git a/cmd/bucket-versioning.go b/cmd/bucket-versioning.go new file mode 100644 index 000000000..55c2e50b7 --- /dev/null +++ b/cmd/bucket-versioning.go @@ -0,0 +1,57 @@ +/* + * MinIO Cloud Storage, (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import "github.com/minio/minio/pkg/bucket/versioning" + +// BucketVersioningSys - policy subsystem. +type BucketVersioningSys struct{} + +// Enabled enabled versioning? +func (sys *BucketVersioningSys) Enabled(bucket string) bool { + vc, err := globalBucketMetadataSys.GetVersioningConfig(bucket) + if err != nil { + return false + } + return vc.Enabled() +} + +// Suspended suspended versioning? +func (sys *BucketVersioningSys) Suspended(bucket string) bool { + vc, err := globalBucketMetadataSys.GetVersioningConfig(bucket) + if err != nil { + return false + } + return vc.Suspended() +} + +// Get returns stored bucket policy +func (sys *BucketVersioningSys) Get(bucket string) (*versioning.Versioning, error) { + if globalIsGateway { + objAPI := newObjectLayerFn() + if objAPI == nil { + return nil, errServerNotInitialized + } + return nil, NotImplemented{} + } + return globalBucketMetadataSys.GetVersioningConfig(bucket) +} + +// NewBucketVersioningSys - creates new versioning system. +func NewBucketVersioningSys() *BucketVersioningSys { + return &BucketVersioningSys{} +} diff --git a/cmd/config-common.go b/cmd/config-common.go index 1cc9543d1..1bb1a202a 100644 --- a/cmd/config-common.go +++ b/cmd/config-common.go @@ -50,7 +50,7 @@ func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]b } func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error { - err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile) + _, err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile, ObjectOptions{}) if err != nil && isErrObjectNotFound(err) { return errConfigNotFound } diff --git a/cmd/config-current.go b/cmd/config-current.go index 8ce9ceaef..4c7352ecf 100644 --- a/cmd/config-current.go +++ b/cmd/config-current.go @@ -59,7 +59,7 @@ func initHelp() { for k, v := range notify.DefaultNotificationKVS { kvs[k] = v } - if globalIsXL { + if globalIsErasure { kvs[config.StorageClassSubSys] = storageclass.DefaultKVS } config.RegisterDefaultKVS(kvs) @@ -168,7 +168,7 @@ func initHelp() { }, } - if globalIsXL { + if globalIsErasure { helpSubSys = append(helpSubSys, config.HelpKV{}) copy(helpSubSys[2:], helpSubSys[1:]) helpSubSys[1] = config.HelpKV{ @@ -232,9 +232,9 @@ func validateConfig(s config.Config) error { return err } - if globalIsXL { + if globalIsErasure { if _, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], - globalXLSetDriveCount); err != nil { + globalErasureSetDriveCount); err != nil { return err } } @@ -367,9 +367,9 @@ func lookupConfigs(s config.Config) { globalAPIConfig.init(apiConfig) - if globalIsXL { + if globalIsErasure { globalStorageClass, err = storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], - globalXLSetDriveCount) + globalErasureSetDriveCount) if err != nil { logger.LogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err)) } diff --git a/cmd/config.go b/cmd/config.go index 199ece926..a06addb58 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -92,7 +92,8 @@ func listServerConfigHistory(ctx context.Context, objAPI ObjectLayer, withData b func delServerConfigHistory(ctx context.Context, objAPI ObjectLayer, uuidKV string) error { historyFile := pathJoin(minioConfigHistoryPrefix, uuidKV+kvPrefix) - return objAPI.DeleteObject(ctx, minioMetaBucket, historyFile) + _, err := objAPI.DeleteObject(ctx, minioMetaBucket, historyFile, ObjectOptions{}) + return err } func readServerConfigHistory(ctx context.Context, objAPI ObjectLayer, uuidKV string) ([]byte, error) { diff --git a/cmd/consolelogger.go b/cmd/consolelogger.go index 103a892d1..81f8438d6 100644 --- a/cmd/consolelogger.go +++ b/cmd/consolelogger.go @@ -45,7 +45,7 @@ func mustGetNodeName(endpointZones EndpointZones) (nodeName string) { if err != nil { logger.FatalIf(err, "Unable to start console logging subsystem") } - if globalIsDistXL { + if globalIsDistErasure { nodeName = host.Name } return nodeName diff --git a/cmd/copy-part-range.go b/cmd/copy-part-range.go index ed9eca390..8c1f87d87 100644 --- a/cmd/copy-part-range.go +++ b/cmd/copy-part-range.go @@ -32,7 +32,9 @@ func writeCopyPartErr(ctx context.Context, w http.ResponseWriter, err error, url writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopyPartRangeSource), url, browser) return default: - writeErrorResponse(ctx, w, toAPIError(ctx, err), url, browser) + apiErr := errorCodes.ToAPIErr(ErrInvalidCopyPartRangeSource) + apiErr.Description = err.Error() + writeErrorResponse(ctx, w, apiErr, url, browser) return } } diff --git a/cmd/data-crawler.go b/cmd/data-crawler.go index 46b1e405d..08a84c4de 100644 --- a/cmd/data-crawler.go +++ b/cmd/data-crawler.go @@ -28,7 +28,6 @@ import ( "time" "github.com/minio/minio/cmd/config" - xhttp "github.com/minio/minio/cmd/http" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/bucket/lifecycle" "github.com/minio/minio/pkg/color" @@ -512,7 +511,6 @@ func (i *crawlItem) transformMetaDir() { type actionMeta struct { oi ObjectInfo trustOI bool // Set true if oi can be trusted and has been read with quorum. - meta map[string]string } // applyActions will apply lifecycle checks on to a scanned item. @@ -528,7 +526,16 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action return size } - action := i.lifeCycle.ComputeAction(i.objectPath(), meta.meta[xhttp.AmzObjectTagging], meta.oi.ModTime) + versionID := meta.oi.VersionID + action := i.lifeCycle.ComputeAction( + lifecycle.ObjectOpts{ + Name: i.objectPath(), + UserTags: meta.oi.UserTags, + ModTime: meta.oi.ModTime, + VersionID: meta.oi.VersionID, + DeleteMarker: meta.oi.DeleteMarker, + IsLatest: meta.oi.IsLatest, + }) if i.debug { logger.Info(color.Green("applyActions:")+" lifecycle: %q, Initial scan: %v", i.objectPath(), action) } @@ -542,19 +549,42 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action // These (expensive) operations should only run on items we are likely to delete. // Load to ensure that we have the correct version and not an unsynced version. if !meta.trustOI { - obj, err := o.GetObjectInfo(ctx, i.bucket, i.objectPath(), ObjectOptions{}) + obj, err := o.GetObjectInfo(ctx, i.bucket, i.objectPath(), ObjectOptions{ + VersionID: versionID, + }) if err != nil { - // Do nothing - heal in the future. - logger.LogIf(ctx, err) - return size + switch err.(type) { + case MethodNotAllowed: // This happens usually for a delete marker + if !obj.DeleteMarker { // if this is not a delete marker log and return + // Do nothing - heal in the future. + logger.LogIf(ctx, err) + return size + } + case ObjectNotFound: + // object not found return 0 + return 0 + default: + // All other errors proceed. + logger.LogIf(ctx, err) + return size + } } size = obj.Size // Recalculate action. - action = i.lifeCycle.ComputeAction(i.objectPath(), obj.UserTags, obj.ModTime) + action = i.lifeCycle.ComputeAction( + lifecycle.ObjectOpts{ + Name: i.objectPath(), + UserTags: obj.UserTags, + ModTime: obj.ModTime, + VersionID: obj.VersionID, + DeleteMarker: obj.DeleteMarker, + IsLatest: obj.IsLatest, + }) if i.debug { logger.Info(color.Green("applyActions:")+" lifecycle: Secondary scan: %v", action) } + versionID = obj.VersionID switch action { case lifecycle.DeleteAction: default: @@ -563,7 +593,7 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action } } - err = o.DeleteObject(ctx, i.bucket, i.objectPath()) + obj, err := o.DeleteObject(ctx, i.bucket, i.objectPath(), ObjectOptions{VersionID: versionID}) if err != nil { // Assume it is still there. logger.LogIf(ctx, err) @@ -574,10 +604,8 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action sendEvent(eventArgs{ EventName: event.ObjectRemovedDelete, BucketName: i.bucket, - Object: ObjectInfo{ - Name: i.objectPath(), - }, - Host: "Internal: [ILM-EXPIRY]", + Object: obj, + Host: "Internal: [ILM-EXPIRY]", }) return 0 } diff --git a/cmd/disk-cache-backend.go b/cmd/disk-cache-backend.go index 6eb8cd4a3..d73fa5542 100644 --- a/cmd/disk-cache-backend.go +++ b/cmd/disk-cache-backend.go @@ -60,7 +60,7 @@ type CacheChecksumInfoV1 struct { // Represents the cache metadata struct type cacheMeta struct { Version string `json:"version"` - Stat statInfo `json:"stat"` // Stat of the current object `cache.json`. + Stat StatInfo `json:"stat"` // Stat of the current object `cache.json`. // checksums of blocks on disk. Checksum CacheChecksumInfoV1 `json:"checksum,omitempty"` @@ -553,7 +553,7 @@ func (c *diskCache) bitrotWriteToCache(cachePath, fileName string, reader io.Rea } f, err := os.Create(filePath) if err != nil { - return 0, osErrToFSFileErr(err) + return 0, osErrToFileErr(err) } defer f.Close() diff --git a/cmd/disk-cache-utils.go b/cmd/disk-cache-utils.go index 1987e21e9..335af10fc 100644 --- a/cmd/disk-cache-utils.go +++ b/cmd/disk-cache-utils.go @@ -187,12 +187,12 @@ func readCacheFileStream(filePath string, offset, length int64) (io.ReadCloser, fr, err := os.Open(filePath) if err != nil { - return nil, osErrToFSFileErr(err) + return nil, osErrToFileErr(err) } // Stat to get the size of the file at path. st, err := fr.Stat() if err != nil { - err = osErrToFSFileErr(err) + err = osErrToFileErr(err) return nil, err } @@ -298,9 +298,10 @@ type fileScorer struct { } type queuedFile struct { - name string - size uint64 - score float64 + name string + versionID string + size uint64 + score float64 } // newFileScorer allows to collect files to save a specific number of bytes. @@ -321,15 +322,33 @@ func newFileScorer(saveBytes uint64, now int64, maxHits int) (*fileScorer, error return &f, nil } -func (f *fileScorer) addFile(name string, lastAccess time.Time, size int64, hits int) { +func (f *fileScorer) addFile(name string, accTime time.Time, size int64, hits int) { + f.addFileWithObjInfo(ObjectInfo{ + Name: name, + AccTime: accTime, + Size: size, + }, hits) +} + +func (f *fileScorer) addFileWithObjInfo(objInfo ObjectInfo, hits int) { // Calculate how much we want to delete this object. file := queuedFile{ - name: name, - size: uint64(size), + name: objInfo.Name, + versionID: objInfo.VersionID, + size: uint64(objInfo.Size), } - score := float64(f.now - lastAccess.Unix()) + + var score float64 + if objInfo.ModTime.IsZero() { + // Mod time is not available with disk cache use atime. + score = float64(f.now - objInfo.AccTime.Unix()) + } else { + // if not used mod time when mod time is available. + score = float64(f.now - objInfo.ModTime.Unix()) + } + // Size as fraction of how much we want to save, 0->1. - szWeight := math.Max(0, (math.Min(1, float64(size)*f.sizeMult))) + szWeight := math.Max(0, (math.Min(1, float64(file.size)*f.sizeMult))) // 0 at f.maxHits, 1 at 0. hitsWeight := (1.0 - math.Max(0, math.Min(1.0, float64(hits)/float64(f.maxHits)))) file.score = score * (1 + 0.25*szWeight + 0.25*hitsWeight) @@ -404,6 +423,22 @@ func (f *fileScorer) trimQueue() { } } +// fileObjInfos returns all queued file object infos +func (f *fileScorer) fileObjInfos() []ObjectInfo { + res := make([]ObjectInfo, 0, f.queue.Len()) + e := f.queue.Front() + for e != nil { + qfile := e.Value.(queuedFile) + res = append(res, ObjectInfo{ + Name: qfile.name, + Size: int64(qfile.size), + VersionID: qfile.versionID, + }) + e = e.Next() + } + return res +} + // fileNames returns all queued file names. func (f *fileScorer) fileNames() []string { res := make([]string, 0, f.queue.Len()) diff --git a/cmd/disk-cache.go b/cmd/disk-cache.go index 742d357e9..b9bbd11ef 100644 --- a/cmd/disk-cache.go +++ b/cmd/disk-cache.go @@ -51,8 +51,8 @@ type CacheObjectLayer interface { // Object operations. GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) - DeleteObject(ctx context.Context, bucket, object string) error - DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) + DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) + DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) // Storage operations. @@ -78,8 +78,7 @@ type cacheObjects struct { GetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) - DeleteObjectFn func(ctx context.Context, bucket, object string) error - DeleteObjectsFn func(ctx context.Context, bucket string, objects []string) ([]error, error) + DeleteObjectFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) PutObjectFn func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) CopyObjectFn func(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) } @@ -120,8 +119,8 @@ func (c *cacheObjects) updateMetadataIfChanged(ctx context.Context, dcache *disk } // DeleteObject clears cache entry if backend delete operation succeeds -func (c *cacheObjects) DeleteObject(ctx context.Context, bucket, object string) (err error) { - if err = c.DeleteObjectFn(ctx, bucket, object); err != nil { +func (c *cacheObjects) DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { + if objInfo, err = c.DeleteObjectFn(ctx, bucket, object, opts); err != nil { return } if c.isCacheExclude(bucket, object) || c.skipCache() { @@ -130,19 +129,38 @@ func (c *cacheObjects) DeleteObject(ctx context.Context, bucket, object string) dcache, cerr := c.getCacheLoc(bucket, object) if cerr != nil { - return + return objInfo, cerr } dcache.Delete(ctx, bucket, object) return } // DeleteObjects batch deletes objects in slice, and clears any cached entries -func (c *cacheObjects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { +func (c *cacheObjects) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) { errs := make([]error, len(objects)) + objInfos := make([]ObjectInfo, len(objects)) for idx, object := range objects { - errs[idx] = c.DeleteObject(ctx, bucket, object) + opts.VersionID = object.VersionID + objInfos[idx], errs[idx] = c.DeleteObject(ctx, bucket, object.ObjectName, opts) } - return errs, nil + deletedObjects := make([]DeletedObject, len(objInfos)) + for idx := range errs { + if errs[idx] != nil { + continue + } + if objInfos[idx].DeleteMarker { + deletedObjects[idx] = DeletedObject{ + DeleteMarker: objInfos[idx].DeleteMarker, + DeleteMarkerVersionID: objInfos[idx].VersionID, + } + continue + } + deletedObjects[idx] = DeletedObject{ + ObjectName: objInfos[idx].Name, + VersionID: objInfos[idx].VersionID, + } + } + return deletedObjects, errs } // construct a metadata k-v map @@ -649,15 +667,8 @@ func newServerCacheObjects(ctx context.Context, config cache.Config) (CacheObjec GetObjectNInfoFn: func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { return newObjectLayerFn().GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts) }, - DeleteObjectFn: func(ctx context.Context, bucket, object string) error { - return newObjectLayerFn().DeleteObject(ctx, bucket, object) - }, - DeleteObjectsFn: func(ctx context.Context, bucket string, objects []string) ([]error, error) { - errs := make([]error, len(objects)) - for idx, object := range objects { - errs[idx] = newObjectLayerFn().DeleteObject(ctx, bucket, object) - } - return errs, nil + DeleteObjectFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) { + return newObjectLayerFn().DeleteObject(ctx, bucket, object, opts) }, PutObjectFn: func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { return newObjectLayerFn().PutObject(ctx, bucket, object, data, opts) diff --git a/cmd/encryption-v1.go b/cmd/encryption-v1.go index 789f40e5e..5900736d7 100644 --- a/cmd/encryption-v1.go +++ b/cmd/encryption-v1.go @@ -31,6 +31,7 @@ import ( "strconv" "strings" + "github.com/google/uuid" "github.com/minio/minio-go/v6/pkg/encrypt" "github.com/minio/minio/cmd/crypto" "github.com/minio/minio/cmd/logger" @@ -82,7 +83,7 @@ func isEncryptedMultipart(objInfo ObjectInfo) bool { } } // Further check if this object is uploaded using multipart mechanism - // by the user and it is not about XL internally splitting the + // by the user and it is not about Erasure internally splitting the // object into parts in PutObject() return !(objInfo.backendType == BackendErasure && len(objInfo.ETag) == 32) } @@ -859,6 +860,7 @@ func getDefaultOpts(header http.Header, copySource bool, metadata map[string]str var clientKey [32]byte var sse encrypt.ServerSide + opts = ObjectOptions{UserDefined: metadata} if copySource { if crypto.SSECopy.IsRequested(header) { clientKey, err = crypto.SSECopy.ParseHTTP(header) @@ -868,7 +870,8 @@ func getDefaultOpts(header http.Header, copySource bool, metadata map[string]str if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil { return } - return ObjectOptions{ServerSideEncryption: encrypt.SSECopy(sse), UserDefined: metadata}, nil + opts.ServerSideEncryption = encrypt.SSECopy(sse) + return } return } @@ -881,12 +884,13 @@ func getDefaultOpts(header http.Header, copySource bool, metadata map[string]str if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil { return } - return ObjectOptions{ServerSideEncryption: sse, UserDefined: metadata}, nil + opts.ServerSideEncryption = sse + return } if crypto.S3.IsRequested(header) || (metadata != nil && crypto.S3.IsEncrypted(metadata)) { - return ObjectOptions{ServerSideEncryption: encrypt.NewSSE(), UserDefined: metadata}, nil + opts.ServerSideEncryption = encrypt.NewSSE() } - return ObjectOptions{UserDefined: metadata}, nil + return } // get ObjectOptions for GET calls from encryption headers @@ -908,6 +912,19 @@ func getOpts(ctx context.Context, r *http.Request, bucket, object string) (Objec } } + vid := strings.TrimSpace(r.URL.Query().Get("versionId")) + if vid != "" && vid != nullVersionID { + _, err := uuid.Parse(vid) + if err != nil { + logger.LogIf(ctx, err) + return opts, VersionNotFound{ + Bucket: bucket, + Object: object, + VersionID: vid, + } + } + } + if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) { key, err := crypto.SSEC.ParseHTTP(r.Header) if err != nil { @@ -916,7 +933,11 @@ func getOpts(ctx context.Context, r *http.Request, bucket, object string) (Objec derivedKey := deriveClientKey(key, bucket, object) encryption, err = encrypt.NewSSEC(derivedKey[:]) logger.CriticalIf(ctx, err) - return ObjectOptions{ServerSideEncryption: encryption, PartNumber: partNumber}, nil + return ObjectOptions{ + ServerSideEncryption: encryption, + VersionID: vid, + PartNumber: partNumber, + }, nil } // default case of passing encryption headers to backend @@ -925,18 +946,21 @@ func getOpts(ctx context.Context, r *http.Request, bucket, object string) (Objec return opts, err } opts.PartNumber = partNumber + opts.VersionID = vid return opts, nil } // get ObjectOptions for PUT calls from encryption headers and metadata func putOpts(ctx context.Context, r *http.Request, bucket, object string, metadata map[string]string) (opts ObjectOptions, err error) { + versioned := globalBucketVersioningSys.Enabled(bucket) // In the case of multipart custom format, the metadata needs to be checked in addition to header to see if it // is SSE-S3 encrypted, primarily because S3 protocol does not require SSE-S3 headers in PutObjectPart calls if GlobalGatewaySSE.SSES3() && (crypto.S3.IsRequested(r.Header) || crypto.S3.IsEncrypted(metadata)) { - return ObjectOptions{ServerSideEncryption: encrypt.NewSSE(), UserDefined: metadata}, nil + return ObjectOptions{ServerSideEncryption: encrypt.NewSSE(), UserDefined: metadata, Versioned: versioned}, nil } if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) { opts, err = getOpts(ctx, r, bucket, object) + opts.Versioned = versioned opts.UserDefined = metadata return } @@ -949,10 +973,15 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada if err != nil { return ObjectOptions{}, err } - return ObjectOptions{ServerSideEncryption: sseKms, UserDefined: metadata}, nil + return ObjectOptions{ServerSideEncryption: sseKms, UserDefined: metadata, Versioned: versioned}, nil } // default case of passing encryption headers and UserDefined metadata to backend - return getDefaultOpts(r.Header, false, metadata) + opts, err = getDefaultOpts(r.Header, false, metadata) + if err != nil { + return opts, err + } + opts.Versioned = versioned + return opts, nil } // get ObjectOptions for Copy calls with encryption headers provided on the target side and source side metadata @@ -981,5 +1010,9 @@ func copySrcOpts(ctx context.Context, r *http.Request, bucket, object string) (O } // default case of passing encryption headers to backend - return getDefaultOpts(r.Header, true, nil) + opts, err := getDefaultOpts(r.Header, false, nil) + if err != nil { + return opts, err + } + return opts, nil } diff --git a/cmd/endpoint.go b/cmd/endpoint.go index bbb39905f..4ac6c357d 100644 --- a/cmd/endpoint.go +++ b/cmd/endpoint.go @@ -547,9 +547,9 @@ func CreateEndpoints(serverAddr string, foundLocal bool, args ...[]string) (Endp return endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg("invalid number of endpoints") } - // Return XL setup when all endpoints are path style. + // Return Erasure setup when all endpoints are path style. if endpoints[0].Type() == PathEndpointType { - setupType = XLSetupType + setupType = ErasureSetupType return endpoints, setupType, nil } @@ -614,18 +614,18 @@ func CreateEndpoints(serverAddr string, foundLocal bool, args ...[]string) (Endp // All endpoints are pointing to local host if len(endpoints) == localEndpointCount { - // If all endpoints have same port number, Just treat it as distXL setup + // If all endpoints have same port number, Just treat it as distErasure setup // using URL style endpoints. if len(localPortSet) == 1 { if len(localServerHostSet) > 1 { return endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg("all local endpoints should not have different hostnames/ips") } - return endpoints, DistXLSetupType, nil + return endpoints, DistErasureSetupType, nil } // Even though all endpoints are local, but those endpoints use different ports. - // This means it is DistXL setup. + // This means it is DistErasure setup. } // Add missing port in all endpoints. @@ -645,7 +645,7 @@ func CreateEndpoints(serverAddr string, foundLocal bool, args ...[]string) (Endp } // Error out if we have less than 2 unique servers. - if len(uniqueArgs.ToSlice()) < 2 && setupType == DistXLSetupType { + if len(uniqueArgs.ToSlice()) < 2 && setupType == DistErasureSetupType { err := fmt.Errorf("Unsupported number of endpoints (%s), minimum number of servers cannot be less than 2 in distributed setup", endpoints) return endpoints, setupType, err } @@ -655,7 +655,7 @@ func CreateEndpoints(serverAddr string, foundLocal bool, args ...[]string) (Endp updateDomainIPs(uniqueArgs) } - setupType = DistXLSetupType + setupType = DistErasureSetupType return endpoints, setupType, nil } diff --git a/cmd/endpoint_test.go b/cmd/endpoint_test.go index e562211b4..0aab439bc 100644 --- a/cmd/endpoint_test.go +++ b/cmd/endpoint_test.go @@ -232,71 +232,71 @@ func TestCreateEndpoints(t *testing.T) { {"localhost:10000", [][]string{{"/d1"}}, "localhost:10000", Endpoints{Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true}}, FSSetupType, nil}, {"localhost:9000", [][]string{{"https://127.0.0.1:9000/d1", "https://localhost:9001/d1", "https://example.com/d1", "https://example.com/d2"}}, "", Endpoints{}, -1, fmt.Errorf("path '/d1' can not be served by different port on same address")}, - // XL Setup with PathEndpointType + // Erasure Setup with PathEndpointType {":1234", [][]string{{"/d1", "/d2", "/d3", "/d4"}}, ":1234", Endpoints{ Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true}, Endpoint{URL: &url.URL{Path: mustAbs("/d2")}, IsLocal: true}, Endpoint{URL: &url.URL{Path: mustAbs("/d3")}, IsLocal: true}, Endpoint{URL: &url.URL{Path: mustAbs("/d4")}, IsLocal: true}, - }, XLSetupType, nil}, - // DistXL Setup with URLEndpointType + }, ErasureSetupType, nil}, + // DistErasure Setup with URLEndpointType {":9000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://localhost/d3", "http://localhost/d4"}}, ":9000", Endpoints{ Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d1"}, IsLocal: true}, Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d2"}, IsLocal: true}, Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d3"}, IsLocal: true}, Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d4"}, IsLocal: true}, - }, DistXLSetupType, nil}, - // DistXL Setup with URLEndpointType having mixed naming to local host. + }, DistErasureSetupType, nil}, + // DistErasure Setup with URLEndpointType having mixed naming to local host. {"127.0.0.1:10000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://127.0.0.1/d3", "http://127.0.0.1/d4"}}, "", Endpoints{}, -1, fmt.Errorf("all local endpoints should not have different hostnames/ips")}, {":9001", [][]string{{"http://10.0.0.1:9000/export", "http://10.0.0.2:9000/export", "http://" + nonLoopBackIP + ":9001/export", "http://10.0.0.2:9001/export"}}, "", Endpoints{}, -1, fmt.Errorf("path '/export' can not be served by different port on same address")}, {":9000", [][]string{{"http://127.0.0.1:9000/export", "http://" + nonLoopBackIP + ":9000/export", "http://10.0.0.1:9000/export", "http://10.0.0.2:9000/export"}}, "", Endpoints{}, -1, fmt.Errorf("path '/export' cannot be served by different address on same server")}, - // DistXL type + // DistErasure type {"127.0.0.1:10000", [][]string{{case1Endpoint1, case1Endpoint2, "http://example.org/d3", "http://example.com/d4"}}, "127.0.0.1:10000", Endpoints{ Endpoint{URL: case1URLs[0], IsLocal: case1LocalFlags[0]}, Endpoint{URL: case1URLs[1], IsLocal: case1LocalFlags[1]}, Endpoint{URL: case1URLs[2], IsLocal: case1LocalFlags[2]}, Endpoint{URL: case1URLs[3], IsLocal: case1LocalFlags[3]}, - }, DistXLSetupType, nil}, + }, DistErasureSetupType, nil}, {"127.0.0.1:10000", [][]string{{case2Endpoint1, case2Endpoint2, "http://example.org/d3", "http://example.com/d4"}}, "127.0.0.1:10000", Endpoints{ Endpoint{URL: case2URLs[0], IsLocal: case2LocalFlags[0]}, Endpoint{URL: case2URLs[1], IsLocal: case2LocalFlags[1]}, Endpoint{URL: case2URLs[2], IsLocal: case2LocalFlags[2]}, Endpoint{URL: case2URLs[3], IsLocal: case2LocalFlags[3]}, - }, DistXLSetupType, nil}, + }, DistErasureSetupType, nil}, {":80", [][]string{{case3Endpoint1, "http://example.org:9000/d2", "http://example.com/d3", "http://example.net/d4"}}, ":80", Endpoints{ Endpoint{URL: case3URLs[0], IsLocal: case3LocalFlags[0]}, Endpoint{URL: case3URLs[1], IsLocal: case3LocalFlags[1]}, Endpoint{URL: case3URLs[2], IsLocal: case3LocalFlags[2]}, Endpoint{URL: case3URLs[3], IsLocal: case3LocalFlags[3]}, - }, DistXLSetupType, nil}, + }, DistErasureSetupType, nil}, {":9000", [][]string{{case4Endpoint1, "http://example.org/d2", "http://example.com/d3", "http://example.net/d4"}}, ":9000", Endpoints{ Endpoint{URL: case4URLs[0], IsLocal: case4LocalFlags[0]}, Endpoint{URL: case4URLs[1], IsLocal: case4LocalFlags[1]}, Endpoint{URL: case4URLs[2], IsLocal: case4LocalFlags[2]}, Endpoint{URL: case4URLs[3], IsLocal: case4LocalFlags[3]}, - }, DistXLSetupType, nil}, + }, DistErasureSetupType, nil}, {":9000", [][]string{{case5Endpoint1, case5Endpoint2, case5Endpoint3, case5Endpoint4}}, ":9000", Endpoints{ Endpoint{URL: case5URLs[0], IsLocal: case5LocalFlags[0]}, Endpoint{URL: case5URLs[1], IsLocal: case5LocalFlags[1]}, Endpoint{URL: case5URLs[2], IsLocal: case5LocalFlags[2]}, Endpoint{URL: case5URLs[3], IsLocal: case5LocalFlags[3]}, - }, DistXLSetupType, nil}, + }, DistErasureSetupType, nil}, - // DistXL Setup using only local host. + // DistErasure Setup using only local host. {":9003", [][]string{{"http://localhost:9000/d1", "http://localhost:9001/d2", "http://127.0.0.1:9002/d3", case6Endpoint}}, ":9003", Endpoints{ Endpoint{URL: case6URLs[0], IsLocal: case6LocalFlags[0]}, Endpoint{URL: case6URLs[1], IsLocal: case6LocalFlags[1]}, Endpoint{URL: case6URLs[2], IsLocal: case6LocalFlags[2]}, Endpoint{URL: case6URLs[3], IsLocal: case6LocalFlags[3]}, - }, DistXLSetupType, nil}, + }, DistErasureSetupType, nil}, } for _, testCase := range testCases { diff --git a/cmd/xl-v1-bucket.go b/cmd/erasure-bucket.go similarity index 84% rename from cmd/xl-v1-bucket.go rename to cmd/erasure-bucket.go index 5c1cf6a38..a9622ad27 100644 --- a/cmd/xl-v1-bucket.go +++ b/cmd/erasure-bucket.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,7 +22,6 @@ import ( "github.com/minio/minio-go/v6/pkg/s3utils" "github.com/minio/minio/cmd/logger" - "github.com/minio/minio/pkg/sync/errgroup" ) @@ -35,13 +34,13 @@ var bucketMetadataOpIgnoredErrs = append(bucketOpIgnoredErrs, errVolumeNotFound) /// Bucket operations // MakeBucket - make a bucket. -func (xl xlObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { +func (er erasureObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error { // Verify if bucket is valid. if err := s3utils.CheckValidBucketNameStrict(bucket); err != nil { return BucketNameInvalid{Bucket: bucket} } - storageDisks := xl.getDisks() + storageDisks := er.getDisks() g := errgroup.WithNErrs(len(storageDisks)) @@ -86,9 +85,9 @@ func undoDeleteBucket(storageDisks []StorageAPI, bucket string) { } // getBucketInfo - returns the BucketInfo from one of the load balanced disks. -func (xl xlObjects) getBucketInfo(ctx context.Context, bucketName string) (bucketInfo BucketInfo, err error) { +func (er erasureObjects) getBucketInfo(ctx context.Context, bucketName string) (bucketInfo BucketInfo, err error) { var bucketErrs []error - for _, disk := range xl.getLoadBalancedDisks() { + for _, disk := range er.getLoadBalancedDisks() { if disk == nil { bucketErrs = append(bucketErrs, errDiskNotFound) continue @@ -110,13 +109,13 @@ func (xl xlObjects) getBucketInfo(ctx context.Context, bucketName string) (bucke // reduce to one error based on read quorum. // `nil` is deliberately passed for ignoredErrs // because these errors were already ignored. - readQuorum := getReadQuorum(len(xl.getDisks())) + readQuorum := getReadQuorum(len(er.getDisks())) return BucketInfo{}, reduceReadQuorumErrs(ctx, bucketErrs, nil, readQuorum) } // GetBucketInfo - returns BucketInfo for a bucket. -func (xl xlObjects) GetBucketInfo(ctx context.Context, bucket string) (bi BucketInfo, e error) { - bucketInfo, err := xl.getBucketInfo(ctx, bucket) +func (er erasureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi BucketInfo, e error) { + bucketInfo, err := er.getBucketInfo(ctx, bucket) if err != nil { return bi, toObjectErr(err, bucket) } @@ -124,8 +123,8 @@ func (xl xlObjects) GetBucketInfo(ctx context.Context, bucket string) (bi Bucket } // listBuckets - returns list of all buckets from a disk picked at random. -func (xl xlObjects) listBuckets(ctx context.Context) (bucketsInfo []BucketInfo, err error) { - for _, disk := range xl.getLoadBalancedDisks() { +func (er erasureObjects) listBuckets(ctx context.Context) (bucketsInfo []BucketInfo, err error) { + for _, disk := range er.getLoadBalancedDisks() { if disk == nil { continue } @@ -161,8 +160,8 @@ func (xl xlObjects) listBuckets(ctx context.Context) (bucketsInfo []BucketInfo, } // ListBuckets - lists all the buckets, sorted by its name. -func (xl xlObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) { - bucketInfos, err := xl.listBuckets(ctx) +func (er erasureObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) { + bucketInfos, err := er.listBuckets(ctx) if err != nil { return nil, toObjectErr(err) } @@ -196,9 +195,9 @@ func deleteDanglingBucket(ctx context.Context, storageDisks []StorageAPI, dErrs } // DeleteBucket - deletes a bucket. -func (xl xlObjects) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { +func (er erasureObjects) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { // Collect if all disks report volume not found. - storageDisks := xl.getDisks() + storageDisks := er.getDisks() g := errgroup.WithNErrs(len(storageDisks)) @@ -235,7 +234,7 @@ func (xl xlObjects) DeleteBucket(ctx context.Context, bucket string, forceDelete writeQuorum := getWriteQuorum(len(storageDisks)) err := reduceWriteQuorumErrs(ctx, dErrs, bucketOpIgnoredErrs, writeQuorum) - if err == errXLWriteQuorum { + if err == errErasureWriteQuorum { undoDeleteBucket(storageDisks, bucket) } if err != nil { @@ -251,25 +250,26 @@ func (xl xlObjects) DeleteBucket(ctx context.Context, bucket string, forceDelete } // IsNotificationSupported returns whether bucket notification is applicable for this layer. -func (xl xlObjects) IsNotificationSupported() bool { +func (er erasureObjects) IsNotificationSupported() bool { return true } // IsListenBucketSupported returns whether listen bucket notification is applicable for this layer. -func (xl xlObjects) IsListenBucketSupported() bool { +func (er erasureObjects) IsListenBucketSupported() bool { return true } // IsEncryptionSupported returns whether server side encryption is implemented for this layer. -func (xl xlObjects) IsEncryptionSupported() bool { +func (er erasureObjects) IsEncryptionSupported() bool { return true } // IsCompressionSupported returns whether compression is applicable for this layer. -func (xl xlObjects) IsCompressionSupported() bool { +func (er erasureObjects) IsCompressionSupported() bool { return true } -func (xl xlObjects) IsTaggingSupported() bool { +// IsTaggingSupported indicates whether erasureObjects implements tagging support. +func (er erasureObjects) IsTaggingSupported() bool { return true } diff --git a/cmd/erasure-coding.go b/cmd/erasure-coding.go new file mode 100644 index 000000000..b0a4cb40a --- /dev/null +++ b/cmd/erasure-coding.go @@ -0,0 +1,143 @@ +/* + * MinIO Cloud Storage, (C) 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "context" + "sync" + + "github.com/klauspost/reedsolomon" + "github.com/minio/minio/cmd/logger" +) + +// Erasure - erasure encoding details. +type Erasure struct { + encoder func() reedsolomon.Encoder + dataBlocks, parityBlocks int + blockSize int64 +} + +// NewErasure creates a new ErasureStorage. +func NewErasure(ctx context.Context, dataBlocks, parityBlocks int, blockSize int64) (e Erasure, err error) { + // Check the parameters for sanity now. + if dataBlocks <= 0 || parityBlocks <= 0 { + return e, reedsolomon.ErrInvShardNum + } + + if dataBlocks+parityBlocks > 256 { + return e, reedsolomon.ErrMaxShardNum + } + + e = Erasure{ + dataBlocks: dataBlocks, + parityBlocks: parityBlocks, + blockSize: blockSize, + } + + // Encoder when needed. + var enc reedsolomon.Encoder + var once sync.Once + e.encoder = func() reedsolomon.Encoder { + once.Do(func() { + e, err := reedsolomon.New(dataBlocks, parityBlocks, reedsolomon.WithAutoGoroutines(int(e.ShardSize()))) + if err != nil { + // Error conditions should be checked above. + panic(err) + } + enc = e + }) + return enc + } + return +} + +// EncodeData encodes the given data and returns the erasure-coded data. +// It returns an error if the erasure coding failed. +func (e *Erasure) EncodeData(ctx context.Context, data []byte) ([][]byte, error) { + if len(data) == 0 { + return make([][]byte, e.dataBlocks+e.parityBlocks), nil + } + encoded, err := e.encoder().Split(data) + if err != nil { + logger.LogIf(ctx, err) + return nil, err + } + if err = e.encoder().Encode(encoded); err != nil { + logger.LogIf(ctx, err) + return nil, err + } + return encoded, nil +} + +// DecodeDataBlocks decodes the given erasure-coded data. +// It only decodes the data blocks but does not verify them. +// It returns an error if the decoding failed. +func (e *Erasure) DecodeDataBlocks(data [][]byte) error { + var isZero = 0 + for _, b := range data[:] { + if len(b) == 0 { + isZero++ + break + } + } + if isZero == 0 || isZero == len(data) { + // If all are zero, payload is 0 bytes. + return nil + } + return e.encoder().ReconstructData(data) +} + +// DecodeDataAndParityBlocks decodes the given erasure-coded data and verifies it. +// It returns an error if the decoding failed. +func (e *Erasure) DecodeDataAndParityBlocks(ctx context.Context, data [][]byte) error { + if err := e.encoder().Reconstruct(data); err != nil { + logger.LogIf(ctx, err) + return err + } + return nil +} + +// ShardSize - returns actual shared size from erasure blockSize. +func (e *Erasure) ShardSize() int64 { + return ceilFrac(e.blockSize, int64(e.dataBlocks)) +} + +// ShardFileSize - returns final erasure size from original size. +func (e *Erasure) ShardFileSize(totalLength int64) int64 { + if totalLength == 0 { + return 0 + } + if totalLength == -1 { + return -1 + } + numShards := totalLength / e.blockSize + lastBlockSize := totalLength % int64(e.blockSize) + lastShardSize := ceilFrac(lastBlockSize, int64(e.dataBlocks)) + return numShards*e.ShardSize() + lastShardSize +} + +// ShardFileOffset - returns the effective offset where erasure reading begins. +func (e *Erasure) ShardFileOffset(startOffset, length, totalLength int64) int64 { + shardSize := e.ShardSize() + shardFileSize := e.ShardFileSize(totalLength) + endShard := (startOffset + int64(length)) / e.blockSize + tillOffset := endShard*shardSize + shardSize + if tillOffset > shardFileSize { + tillOffset = shardFileSize + } + return tillOffset +} diff --git a/cmd/xl-v1-common.go b/cmd/erasure-common.go similarity index 72% rename from cmd/xl-v1-common.go rename to cmd/erasure-common.go index be826dee0..48218f1d4 100644 --- a/cmd/xl-v1-common.go +++ b/cmd/erasure-common.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,8 +24,8 @@ import ( ) // getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice. -func (xl xlObjects) getLoadBalancedDisks() (newDisks []StorageAPI) { - disks := xl.getDisks() +func (er erasureObjects) getLoadBalancedDisks() (newDisks []StorageAPI) { + disks := er.getDisks() // Based on the random shuffling return back randomized disks. for _, i := range hashOrder(UTCNow().String(), len(disks)) { newDisks = append(newDisks, disks[i-1]) @@ -36,13 +36,13 @@ func (xl xlObjects) getLoadBalancedDisks() (newDisks []StorageAPI) { // This function does the following check, suppose // object is "a/b/c/d", stat makes sure that objects ""a/b/c"" // "a/b" and "a" do not exist. -func (xl xlObjects) parentDirIsObject(ctx context.Context, bucket, parent string) bool { +func (er erasureObjects) parentDirIsObject(ctx context.Context, bucket, parent string) bool { var isParentDirObject func(string) bool isParentDirObject = func(p string) bool { if p == "." || p == SlashSeparator { return false } - if xl.isObject(bucket, p) { + if er.isObject(ctx, bucket, p) { // If there is already a file at prefix "p", return true. return true } @@ -53,9 +53,9 @@ func (xl xlObjects) parentDirIsObject(ctx context.Context, bucket, parent string } // isObject - returns `true` if the prefix is an object i.e if -// `xl.json` exists at the leaf, false otherwise. -func (xl xlObjects) isObject(bucket, prefix string) (ok bool) { - storageDisks := xl.getDisks() +// `xl.meta` exists at the leaf, false otherwise. +func (er erasureObjects) isObject(ctx context.Context, bucket, prefix string) (ok bool) { + storageDisks := er.getDisks() g := errgroup.WithNErrs(len(storageDisks)) @@ -66,22 +66,15 @@ func (xl xlObjects) isObject(bucket, prefix string) (ok bool) { return errDiskNotFound } // Check if 'prefix' is an object on this 'disk', else continue the check the next disk - fi, err := storageDisks[index].StatFile(bucket, pathJoin(prefix, xlMetaJSONFile)) - if err != nil { - return err - } - if fi.Size == 0 { - return errCorruptedFormat - } - return nil + return storageDisks[index].CheckFile(bucket, prefix) }, index) } - // NOTE: Observe we are not trying to read `xl.json` and figure out the actual + // NOTE: Observe we are not trying to read `xl.meta` and figure out the actual // quorum intentionally, but rely on the default case scenario. Actual quorum // verification will happen by top layer by using getObjectInfo() and will be // ignored if necessary. readQuorum := getReadQuorum(len(storageDisks)) - return reduceReadQuorumErrs(GlobalContext, g.Wait(), objectOpIgnoredErrs, readQuorum) == nil + return reduceReadQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, readQuorum) == nil } diff --git a/cmd/xl-v1-common_test.go b/cmd/erasure-common_test.go similarity index 91% rename from cmd/xl-v1-common_test.go rename to cmd/erasure-common_test.go index 876ecb72e..ce2d2e58f 100644 --- a/cmd/xl-v1-common_test.go +++ b/cmd/erasure-common_test.go @@ -24,13 +24,13 @@ import ( ) // Tests for if parent directory is object -func TestXLParentDirIsObject(t *testing.T) { +func TestErasureParentDirIsObject(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - obj, fsDisks, err := prepareXL16(ctx) + obj, fsDisks, err := prepareErasure16(ctx) if err != nil { - t.Fatalf("Unable to initialize 'XL' object layer.") + t.Fatalf("Unable to initialize 'Erasure' object layer.") } // Remove all disks. @@ -41,7 +41,7 @@ func TestXLParentDirIsObject(t *testing.T) { bucketName := "testbucket" objectName := "object" - if err = obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err = obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal(err) } objectContent := "12345" @@ -54,7 +54,7 @@ func TestXLParentDirIsObject(t *testing.T) { t.Fatalf("Unexpected object name returned got %s, expected %s", objInfo.Name, objectName) } - z := obj.(*xlZones) + z := obj.(*erasureZones) xl := z.zones[0].sets[0] testCases := []struct { parentIsObject bool diff --git a/cmd/erasure-decode.go b/cmd/erasure-decode.go index 2468ab661..ed8abbb16 100644 --- a/cmd/erasure-decode.go +++ b/cmd/erasure-decode.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -191,7 +191,7 @@ func (p *parallelReader) Read(dst [][]byte) ([][]byte, error) { return newBuf, nil } - return nil, errXLReadQuorum + return nil, errErasureReadQuorum } type errDecodeHealRequired struct { diff --git a/cmd/erasure-decode_test.go b/cmd/erasure-decode_test.go index f168b3cb1..17beb2ea1 100644 --- a/cmd/erasure-decode_test.go +++ b/cmd/erasure-decode_test.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -132,7 +132,7 @@ func TestErasureDecode(t *testing.T) { if disk == OfflineDisk { continue } - tillOffset := erasure.ShardFileTillOffset(test.offset, test.length, test.data) + tillOffset := erasure.ShardFileOffset(test.offset, test.length, test.data) bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", tillOffset, writeAlgorithm, bitrotWriterSum(writers[index]), erasure.ShardSize()) } @@ -163,7 +163,7 @@ func TestErasureDecode(t *testing.T) { if disk == OfflineDisk { continue } - tillOffset := erasure.ShardFileTillOffset(test.offset, test.length, test.data) + tillOffset := erasure.ShardFileOffset(test.offset, test.length, test.data) bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", tillOffset, writeAlgorithm, bitrotWriterSum(writers[index]), erasure.ShardSize()) } for j := range disks[:test.offDisks] { @@ -268,7 +268,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) { if disk == OfflineDisk { continue } - tillOffset := erasure.ShardFileTillOffset(offset, readLen, length) + tillOffset := erasure.ShardFileOffset(offset, readLen, length) bitrotReaders[index] = newStreamingBitrotReader(disk, "testbucket", "object", tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize()) } err = erasure.Decode(context.Background(), buf, bitrotReaders, offset, readLen, length, nil) @@ -330,7 +330,7 @@ func benchmarkErasureDecode(data, parity, dataDown, parityDown int, size int64, if writers[index] == nil { continue } - tillOffset := erasure.ShardFileTillOffset(0, size, size) + tillOffset := erasure.ShardFileOffset(0, size, size) bitrotReaders[index] = newStreamingBitrotReader(disk, "testbucket", "object", tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize()) } if err = erasure.Decode(context.Background(), bytes.NewBuffer(content[:0]), bitrotReaders, 0, size, size, nil); err != nil { diff --git a/cmd/erasure-encode.go b/cmd/erasure-encode.go index f263b579d..d8f9cc65c 100644 --- a/cmd/erasure-encode.go +++ b/cmd/erasure-encode.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cmd/erasure-encode_test.go b/cmd/erasure-encode_test.go index 8fbe2da70..ab923ec1b 100644 --- a/cmd/erasure-encode_test.go +++ b/cmd/erasure-encode_test.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cmd/xl-v1-errors.go b/cmd/erasure-errors.go similarity index 73% rename from cmd/xl-v1-errors.go rename to cmd/erasure-errors.go index 3dc0260b7..e7b2d366e 100644 --- a/cmd/xl-v1-errors.go +++ b/cmd/erasure-errors.go @@ -18,11 +18,11 @@ package cmd import "errors" -// errXLReadQuorum - did not meet read quorum. -var errXLReadQuorum = errors.New("Read failed. Insufficient number of disks online") +// errErasureReadQuorum - did not meet read quorum. +var errErasureReadQuorum = errors.New("Read failed. Insufficient number of disks online") -// errXLWriteQuorum - did not meet write quorum. -var errXLWriteQuorum = errors.New("Write failed. Insufficient number of disks online") +// errErasureWriteQuorum - did not meet write quorum. +var errErasureWriteQuorum = errors.New("Write failed. Insufficient number of disks online") // errNoHealRequired - returned when healing is attempted on a previously healed disks. var errNoHealRequired = errors.New("No healing is required") diff --git a/cmd/erasure-heal_test.go b/cmd/erasure-heal_test.go index 5ab0e23db..4d1fff044 100644 --- a/cmd/erasure-heal_test.go +++ b/cmd/erasure-heal_test.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -71,7 +71,7 @@ func TestErasureHeal(t *testing.T) { // create some test data setup, err := newErasureTestSetup(test.dataBlocks, test.disks-test.dataBlocks, test.blocksize) if err != nil { - t.Fatalf("Test %d: failed to setup XL environment: %v", i, err) + t.Fatalf("Test %d: failed to setup Erasure environment: %v", i, err) } disks := setup.disks erasure, err := NewErasure(context.Background(), test.dataBlocks, test.disks-test.dataBlocks, test.blocksize) diff --git a/cmd/xl-v1-healing-common.go b/cmd/erasure-healing-common.go similarity index 66% rename from cmd/xl-v1-healing-common.go rename to cmd/erasure-healing-common.go index f7daef0bd..61cacc19d 100644 --- a/cmd/xl-v1-healing-common.go +++ b/cmd/erasure-healing-common.go @@ -18,10 +18,8 @@ package cmd import ( "context" - "fmt" "time" - "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/madmin" ) @@ -31,7 +29,7 @@ func commonTime(modTimes []time.Time) (modTime time.Time, count int) { timeOccurenceMap := make(map[time.Time]int) // Ignore the uuid sentinel and count the rest. for _, time := range modTimes { - if time == timeSentinel { + if time.Equal(timeSentinel) { continue } timeOccurenceMap[time]++ @@ -61,45 +59,45 @@ func bootModtimes(diskCount int) []time.Time { return modTimes } -// Extracts list of times from xlMetaV1 slice and returns, skips +// Extracts list of times from FileInfo slice and returns, skips // slice elements which have errors. -func listObjectModtimes(partsMetadata []xlMetaV1, errs []error) (modTimes []time.Time) { +func listObjectModtimes(partsMetadata []FileInfo, errs []error) (modTimes []time.Time) { modTimes = bootModtimes(len(partsMetadata)) for index, metadata := range partsMetadata { if errs[index] != nil { continue } // Once the file is found, save the uuid saved on disk. - modTimes[index] = metadata.Stat.ModTime + modTimes[index] = metadata.ModTime } return modTimes } // Notes: // There are 5 possible states a disk could be in, -// 1. __online__ - has the latest copy of xl.json - returned by listOnlineDisks +// 1. __online__ - has the latest copy of xl.meta - returned by listOnlineDisks // // 2. __offline__ - err == errDiskNotFound // -// 3. __availableWithParts__ - has the latest copy of xl.json and has all +// 3. __availableWithParts__ - has the latest copy of xl.meta and has all // parts with checksums matching; returned by disksWithAllParts // // 4. __outdated__ - returned by outDatedDisk, provided []StorageAPI // returned by diskWithAllParts is passed for latestDisks. -// - has an old copy of xl.json -// - doesn't have xl.json (errFileNotFound) -// - has the latest xl.json but one or more parts are corrupt +// - has an old copy of xl.meta +// - doesn't have xl.meta (errFileNotFound) +// - has the latest xl.meta but one or more parts are corrupt // -// 5. __missingParts__ - has the latest copy of xl.json but has some parts +// 5. __missingParts__ - has the latest copy of xl.meta but has some parts // missing. This is identified separately since this may need manual // inspection to understand the root cause. E.g, this could be due to // backend filesystem corruption. // listOnlineDisks - returns -// - a slice of disks where disk having 'older' xl.json (or nothing) +// - a slice of disks where disk having 'older' xl.meta (or nothing) // are set to nil. // - latest (in time) of the maximally occurring modTime(s). -func listOnlineDisks(disks []StorageAPI, partsMetadata []xlMetaV1, errs []error) (onlineDisks []StorageAPI, modTime time.Time) { +func listOnlineDisks(disks []StorageAPI, partsMetadata []FileInfo, errs []error) (onlineDisks []StorageAPI, modTime time.Time) { onlineDisks = make([]StorageAPI, len(disks)) // List all the file commit ids from parts metadata. @@ -110,7 +108,7 @@ func listOnlineDisks(disks []StorageAPI, partsMetadata []xlMetaV1, errs []error) // Create a new online disks slice, which have common uuid. for index, t := range modTimes { - if t == modTime { + if t.Equal(modTime) { onlineDisks[index] = disks[index] } else { onlineDisks[index] = nil @@ -119,89 +117,67 @@ func listOnlineDisks(disks []StorageAPI, partsMetadata []xlMetaV1, errs []error) return onlineDisks, modTime } -// Returns the latest updated xlMeta files and error in case of failure. -func getLatestXLMeta(ctx context.Context, partsMetadata []xlMetaV1, errs []error) (xlMetaV1, error) { - +// Returns the latest updated FileInfo files and error in case of failure. +func getLatestFileInfo(ctx context.Context, partsMetadata []FileInfo, errs []error) (FileInfo, error) { // There should be atleast half correct entries, if not return failure if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, len(partsMetadata)/2); reducedErr != nil { - return xlMetaV1{}, reducedErr + return FileInfo{}, reducedErr } // List all the file commit ids from parts metadata. modTimes := listObjectModtimes(partsMetadata, errs) - // Count all latest updated xlMeta values + // Count all latest updated FileInfo values var count int - var latestXLMeta xlMetaV1 + var latestFileInfo FileInfo // Reduce list of UUIDs to a single common value - i.e. the last updated Time modTime, _ := commonTime(modTimes) - // Interate through all the modTimes and count the xlMeta(s) with latest time. + // Interate through all the modTimes and count the FileInfo(s) with latest time. for index, t := range modTimes { - if t == modTime && partsMetadata[index].IsValid() { - latestXLMeta = partsMetadata[index] + if t.Equal(modTime) && partsMetadata[index].IsValid() { + latestFileInfo = partsMetadata[index] count++ } } if count < len(partsMetadata)/2 { - return xlMetaV1{}, errXLReadQuorum + return FileInfo{}, errErasureReadQuorum } - return latestXLMeta, nil + return latestFileInfo, nil } // disksWithAllParts - This function needs to be called with // []StorageAPI returned by listOnlineDisks. Returns, // -// - disks which have all parts specified in the latest xl.json. +// - disks which have all parts specified in the latest xl.meta. // // - slice of errors about the state of data files on disk - can have // a not-found error or a hash-mismatch error. -func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetadata []xlMetaV1, errs []error, bucket, +func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetadata []FileInfo, errs []error, bucket, object string, scanMode madmin.HealScanMode) ([]StorageAPI, []error) { availableDisks := make([]StorageAPI, len(onlineDisks)) dataErrs := make([]error, len(onlineDisks)) for i, onlineDisk := range onlineDisks { - if onlineDisk == nil { + if errs[i] != nil { dataErrs[i] = errs[i] continue } + if onlineDisk == nil { + dataErrs[i] = errDiskNotFound + continue + } switch scanMode { case madmin.HealDeepScan: - erasure := partsMetadata[i].Erasure - - // disk has a valid xl.json but may not have all the + // disk has a valid xl.meta but may not have all the // parts. This is considered an outdated disk, since // it needs healing too. - for _, part := range partsMetadata[i].Parts { - checksumInfo := erasure.GetChecksumInfo(part.Number) - partPath := pathJoin(object, fmt.Sprintf("part.%d", part.Number)) - err := onlineDisk.VerifyFile(bucket, partPath, erasure.ShardFileSize(part.Size), checksumInfo.Algorithm, checksumInfo.Hash, erasure.ShardSize()) - if err != nil { - if !IsErr(err, []error{ - errFileNotFound, - errVolumeNotFound, - errFileCorrupt, - }...) { - logger.GetReqInfo(ctx).AppendTags("disk", onlineDisk.String()) - logger.LogIf(ctx, err) - } - dataErrs[i] = err - break - } - } + dataErrs[i] = onlineDisk.VerifyFile(bucket, object, partsMetadata[i]) case madmin.HealNormalScan: - for _, part := range partsMetadata[i].Parts { - partPath := pathJoin(object, fmt.Sprintf("part.%d", part.Number)) - _, err := onlineDisk.StatFile(bucket, partPath) - if err != nil { - dataErrs[i] = err - break - } - } + dataErrs[i] = onlineDisk.CheckParts(bucket, object, partsMetadata[i]) } if dataErrs[i] == nil { diff --git a/cmd/xl-v1-healing-common_test.go b/cmd/erasure-healing-common_test.go similarity index 77% rename from cmd/xl-v1-healing-common_test.go rename to cmd/erasure-healing-common_test.go index 7d2df85e1..300f99775 100644 --- a/cmd/xl-v1-healing-common_test.go +++ b/cmd/erasure-healing-common_test.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -95,9 +95,9 @@ func TestListOnlineDisks(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - obj, disks, err := prepareXL16(ctx) + obj, disks, err := prepareErasure16(ctx) if err != nil { - t.Fatalf("Prepare XL backend failed - %v", err) + t.Fatalf("Prepare Erasure backend failed - %v", err) } defer removeRoots(disks) @@ -141,9 +141,9 @@ func TestListOnlineDisks(t *testing.T) { modTimes: modTimesThreeNone, expectedTime: threeNanoSecs, errs: []error{ - // Disks that have a valid xl.json. + // Disks that have a valid xl.meta. nil, nil, nil, nil, nil, nil, nil, - // Majority of disks don't have xl.json. + // Majority of disks don't have xl.meta. errFileNotFound, errFileNotFound, errFileNotFound, errFileNotFound, errFileNotFound, errDiskAccessDenied, @@ -156,9 +156,9 @@ func TestListOnlineDisks(t *testing.T) { modTimes: modTimesThreeNone, expectedTime: threeNanoSecs, errs: []error{ - // Disks that have a valid xl.json. + // Disks that have a valid xl.meta. nil, nil, nil, nil, nil, nil, nil, - // Majority of disks don't have xl.json. + // Majority of disks don't have xl.meta. errFileNotFound, errFileNotFound, errFileNotFound, errFileNotFound, errFileNotFound, errDiskAccessDenied, @@ -170,27 +170,34 @@ func TestListOnlineDisks(t *testing.T) { } bucket := "bucket" + err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) + if err != nil { + t.Fatalf("Failed to make a bucket %v", err) + } + object := "object" data := bytes.Repeat([]byte("a"), 1024) - z := obj.(*xlZones) - xlDisks := z.zones[0].sets[0].getDisks() + z := obj.(*erasureZones) + erasureDisks := z.zones[0].sets[0].getDisks() for i, test := range testCases { - // Prepare bucket/object backend for the tests below. - - // Cleanup from previous test. - obj.DeleteObject(GlobalContext, bucket, object) - obj.DeleteBucket(GlobalContext, bucket, false) - - err = obj.MakeBucketWithLocation(GlobalContext, "bucket", "", false) - if err != nil { - t.Fatalf("Failed to make a bucket %v", err) - } - - _, err = obj.PutObject(GlobalContext, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{}) + _, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{}) if err != nil { t.Fatalf("Failed to putObject %v", err) } + partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "") + fi, err := getLatestFileInfo(ctx, partsMetadata, errs) + if err != nil { + t.Fatalf("Failed to getLatestFileInfo %v", err) + } + + for j := range partsMetadata { + if errs[j] != nil { + t.Fatalf("Test %d: expected error to be nil: %s", i+1, errs[j]) + } + partsMetadata[j].ModTime = test.modTimes[j] + } + tamperedIndex := -1 switch test._tamperBackend { case deletePart: @@ -199,11 +206,11 @@ func TestListOnlineDisks(t *testing.T) { continue } // Remove a part from a disk - // which has a valid xl.json, + // which has a valid xl.meta, // and check if that disk // appears in outDatedDisks. tamperedIndex = index - dErr := xlDisks[index].DeleteFile(bucket, filepath.Join(object, "part.1")) + dErr := erasureDisks[index].DeleteFile(bucket, pathJoin(object, fi.DataDir, "part.1")) if dErr != nil { t.Fatalf("Test %d: Failed to delete %s - %v", i+1, filepath.Join(object, "part.1"), dErr) @@ -216,11 +223,11 @@ func TestListOnlineDisks(t *testing.T) { continue } // Corrupt a part from a disk - // which has a valid xl.json, + // which has a valid xl.meta, // and check if that disk // appears in outDatedDisks. tamperedIndex = index - filePath := pathJoin(xlDisks[index].String(), bucket, object, "part.1") + filePath := pathJoin(erasureDisks[index].String(), bucket, object, fi.DataDir, "part.1") f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0) if err != nil { t.Fatalf("Failed to open %s: %s\n", filePath, err) @@ -232,27 +239,19 @@ func TestListOnlineDisks(t *testing.T) { } - partsMetadata, errs := readAllXLMetadata(GlobalContext, xlDisks, bucket, object) - for i := range partsMetadata { - if errs[i] != nil { - t.Fatalf("Test %d: expected error to be nil: %s", i+1, errs[i].Error()) - } - partsMetadata[i].Stat.ModTime = test.modTimes[i] - } - - onlineDisks, modTime := listOnlineDisks(xlDisks, partsMetadata, test.errs) + onlineDisks, modTime := listOnlineDisks(erasureDisks, partsMetadata, test.errs) if !modTime.Equal(test.expectedTime) { t.Fatalf("Test %d: Expected modTime to be equal to %v but was found to be %v", i+1, test.expectedTime, modTime) } - availableDisks, newErrs := disksWithAllParts(GlobalContext, onlineDisks, partsMetadata, test.errs, bucket, object, madmin.HealDeepScan) + availableDisks, newErrs := disksWithAllParts(ctx, onlineDisks, partsMetadata, test.errs, bucket, object, madmin.HealDeepScan) test.errs = newErrs if test._tamperBackend != noTamper { if tamperedIndex != -1 && availableDisks[tamperedIndex] != nil { t.Fatalf("Test %d: disk (%v) with part.1 missing is not a disk with available data", - i+1, xlDisks[tamperedIndex]) + i+1, erasureDisks[tamperedIndex]) } } @@ -262,9 +261,9 @@ func TestListOnlineDisks(t *testing.T) { func TestDisksWithAllParts(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - obj, disks, err := prepareXL16(ctx) + obj, disks, err := prepareErasure16(ctx) if err != nil { - t.Fatalf("Prepare XL backend failed - %v", err) + t.Fatalf("Prepare Erasure backend failed - %v", err) } defer removeRoots(disks) @@ -273,10 +272,10 @@ func TestDisksWithAllParts(t *testing.T) { // make data with more than one part partCount := 3 data := bytes.Repeat([]byte("a"), 6*1024*1024*partCount) - z := obj.(*xlZones) - xl := z.zones[0].sets[0] - xlDisks := xl.getDisks() - err = obj.MakeBucketWithLocation(ctx, "bucket", "", false) + z := obj.(*erasureZones) + s := z.zones[0].sets[0] + erasureDisks := s.getDisks() + err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) if err != nil { t.Fatalf("Failed to make a bucket %v", err) } @@ -286,22 +285,22 @@ func TestDisksWithAllParts(t *testing.T) { t.Fatalf("Failed to putObject %v", err) } - _, errs := readAllXLMetadata(ctx, xlDisks, bucket, object) - readQuorum := len(xlDisks) / 2 + _, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "") + readQuorum := len(erasureDisks) / 2 if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil { t.Fatalf("Failed to read xl meta data %v", reducedErr) } // Test that all disks are returned without any failures with // unmodified meta data - partsMetadata, errs := readAllXLMetadata(ctx, xlDisks, bucket, object) + partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "") if err != nil { t.Fatalf("Failed to read xl meta data %v", err) } - filteredDisks, errs := disksWithAllParts(ctx, xlDisks, partsMetadata, errs, bucket, object, madmin.HealDeepScan) + filteredDisks, errs := disksWithAllParts(ctx, erasureDisks, partsMetadata, errs, bucket, object, madmin.HealDeepScan) - if len(filteredDisks) != len(xlDisks) { + if len(filteredDisks) != len(erasureDisks) { t.Errorf("Unexpected number of disks: %d", len(filteredDisks)) } @@ -324,7 +323,7 @@ func TestDisksWithAllParts(t *testing.T) { for diskIndex, partName := range diskFailures { for i := range partsMetadata[diskIndex].Erasure.Checksums { if fmt.Sprintf("part.%d", i+1) == partName { - filePath := pathJoin(xlDisks[diskIndex].String(), bucket, object, partName) + filePath := pathJoin(erasureDisks[diskIndex].String(), bucket, object, partsMetadata[diskIndex].DataDir, partName) f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0) if err != nil { t.Fatalf("Failed to open %s: %s\n", filePath, err) @@ -335,10 +334,10 @@ func TestDisksWithAllParts(t *testing.T) { } } - errs = make([]error, len(xlDisks)) - filteredDisks, errs = disksWithAllParts(ctx, xlDisks, partsMetadata, errs, bucket, object, madmin.HealDeepScan) + errs = make([]error, len(erasureDisks)) + filteredDisks, errs = disksWithAllParts(ctx, erasureDisks, partsMetadata, errs, bucket, object, madmin.HealDeepScan) - if len(filteredDisks) != len(xlDisks) { + if len(filteredDisks) != len(erasureDisks) { t.Errorf("Unexpected number of disks: %d", len(filteredDisks)) } diff --git a/cmd/xl-v1-healing.go b/cmd/erasure-healing.go similarity index 78% rename from cmd/xl-v1-healing.go rename to cmd/erasure-healing.go index 20d9c57a3..9a2c58938 100644 --- a/cmd/xl-v1-healing.go +++ b/cmd/erasure-healing.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ import ( "context" "fmt" "io" + "sync" "time" "github.com/minio/minio/cmd/logger" @@ -27,12 +28,12 @@ import ( "github.com/minio/minio/pkg/sync/errgroup" ) -func (xl xlObjects) ReloadFormat(ctx context.Context, dryRun bool) error { +func (er erasureObjects) ReloadFormat(ctx context.Context, dryRun bool) error { logger.LogIf(ctx, NotImplemented{}) return NotImplemented{} } -func (xl xlObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) { +func (er erasureObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) { logger.LogIf(ctx, NotImplemented{}) return madmin.HealResultItem{}, NotImplemented{} } @@ -40,14 +41,14 @@ func (xl xlObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealRes // Heals a bucket if it doesn't exist on one of the disks, additionally // also heals the missing entries for bucket metadata files // `policy.json, notification.xml, listeners.json`. -func (xl xlObjects) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) ( +func (er erasureObjects) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) ( result madmin.HealResultItem, err error) { if !dryRun { defer ObjectPathUpdated(bucket) } - storageDisks := xl.getDisks() - storageEndpoints := xl.getEndpoints() + storageDisks := er.getDisks() + storageEndpoints := er.getEndpoints() // get write quorum for an object writeQuorum := getWriteQuorum(len(storageDisks)) @@ -158,7 +159,6 @@ func healBucket(ctx context.Context, storageDisks []StorageAPI, storageEndpoints State: afterState[i], }) } - return res, nil } @@ -196,22 +196,22 @@ func listAllBuckets(storageDisks []StorageAPI, healBuckets map[string]VolInfo) ( // Only heal on disks where we are sure that healing is needed. We can expand // this list as and when we figure out more errors can be added to this list safely. -func shouldHealObjectOnDisk(xlErr, dataErr error, meta xlMetaV1, quorumModTime time.Time) bool { - switch xlErr { +func shouldHealObjectOnDisk(erErr, dataErr error, meta FileInfo, quorumModTime time.Time) bool { + switch erErr { case errFileNotFound: return true case errCorruptedFormat: return true } - if xlErr == nil { - // If xl.json was read fine but there may be problem with the part.N files. + if erErr == nil { + // If er.meta was read fine but there may be problem with the part.N files. if IsErr(dataErr, []error{ errFileNotFound, errFileCorrupt, }...) { return true } - if !quorumModTime.Equal(meta.Stat.ModTime) { + if !quorumModTime.Equal(meta.ModTime) { return true } } @@ -219,20 +219,20 @@ func shouldHealObjectOnDisk(xlErr, dataErr error, meta xlMetaV1, quorumModTime t } // Heals an object by re-writing corrupt/missing erasure blocks. -func (xl xlObjects) healObject(ctx context.Context, bucket string, object string, - partsMetadata []xlMetaV1, errs []error, latestXLMeta xlMetaV1, +func (er erasureObjects) healObject(ctx context.Context, bucket string, object string, + partsMetadata []FileInfo, errs []error, latestFileInfo FileInfo, dryRun bool, remove bool, scanMode madmin.HealScanMode) (result madmin.HealResultItem, err error) { - dataBlocks := latestXLMeta.Erasure.DataBlocks + dataBlocks := latestFileInfo.Erasure.DataBlocks - storageDisks := xl.getDisks() - storageEndpoints := xl.getEndpoints() + storageDisks := er.getDisks() + storageEndpoints := er.getEndpoints() - // List of disks having latest version of the object xl.json + // List of disks having latest version of the object er.meta // (by modtime). latestDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs) - // List of disks having all parts as per latest xl.json. + // List of disks having all parts as per latest er.meta. availableDisks, dataErrs := disksWithAllParts(ctx, latestDisks, partsMetadata, errs, bucket, object, scanMode) // Initialize heal result object @@ -241,8 +241,8 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string Bucket: bucket, Object: object, DiskCount: len(storageDisks), - ParityBlocks: latestXLMeta.Erasure.ParityBlocks, - DataBlocks: latestXLMeta.Erasure.DataBlocks, + ParityBlocks: latestFileInfo.Erasure.ParityBlocks, + DataBlocks: latestFileInfo.Erasure.DataBlocks, // Initialize object size to -1, so we can detect if we are // unable to reliably find the object size. @@ -263,7 +263,7 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string numAvailableDisks++ // If data is sane on any one disk, we can // extract the correct object size. - result.ObjectSize = partsMetadata[i].Stat.Size + result.ObjectSize = partsMetadata[i].Size result.ParityBlocks = partsMetadata[i].Erasure.ParityBlocks result.DataBlocks = partsMetadata[i].Erasure.DataBlocks case errs[i] == errDiskNotFound, dataErrs[i] == errDiskNotFound: @@ -307,18 +307,18 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string // If less than read quorum number of disks have all the parts // of the data, we can't reconstruct the erasure-coded data. if numAvailableDisks < dataBlocks { - // Check if xl.json, and corresponding parts are also missing. + // Check if er.meta, and corresponding parts are also missing. if m, ok := isObjectDangling(partsMetadata, errs, dataErrs); ok { writeQuorum := m.Erasure.DataBlocks + 1 if m.Erasure.DataBlocks == 0 { writeQuorum = getWriteQuorum(len(storageDisks)) } if !dryRun && remove { - err = xl.deleteObject(ctx, bucket, object, writeQuorum, false) + err = er.deleteObject(ctx, bucket, object, writeQuorum) } - return defaultHealResult(latestXLMeta, storageDisks, storageEndpoints, errs, bucket, object), err + return defaultHealResult(latestFileInfo, storageDisks, storageEndpoints, errs, bucket, object), err } - return result, toObjectErr(errXLReadQuorum, bucket, object) + return result, toObjectErr(errErasureReadQuorum, bucket, object) } if disksToHealCount == 0 { @@ -332,32 +332,19 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string return result, nil } - // Latest xlMetaV1 for reference. If a valid metadata is not + // Latest FileInfo for reference. If a valid metadata is not // present, it is as good as object not found. - latestMeta, pErr := pickValidXLMeta(ctx, partsMetadata, modTime, dataBlocks) + latestMeta, pErr := pickValidFileInfo(ctx, partsMetadata, modTime, dataBlocks) if pErr != nil { return result, toObjectErr(pErr, bucket, object) } - // Clear data files of the object on outdated disks - for _, disk := range outDatedDisks { - // Before healing outdated disks, we need to remove - // xl.json and part files from "bucket/object/" so - // that rename(minioMetaBucket, "tmp/tmpuuid/", - // "bucket", "object/") succeeds. - if disk == nil { - // Not an outdated disk. - continue - } - - // List and delete the object directory, - files, derr := disk.ListDir(bucket, object, -1, "") - if derr == nil { - for _, entry := range files { - _ = disk.DeleteFile(bucket, - pathJoin(object, entry)) - } - } + cleanFileInfo := func(fi FileInfo) FileInfo { + // Returns a copy of the 'fi' with checksums and parts nil'ed. + nfi := fi + nfi.Erasure.Checksums = nil + nfi.Parts = nil + return nfi } // Reorder so that we have data disks first and parity disks next. @@ -368,7 +355,7 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string if outDatedDisks[i] == nil { continue } - partsMetadata[i] = newXLMetaFromXLMeta(latestMeta) + partsMetadata[i] = cleanFileInfo(latestMeta) } // We write at temporary location and then rename to final location. @@ -388,7 +375,7 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string partSize := latestMeta.Parts[partIndex].Size partActualSize := latestMeta.Parts[partIndex].ActualSize partNumber := latestMeta.Parts[partIndex].Number - tillOffset := erasure.ShardFileTillOffset(0, partSize, partSize) + tillOffset := erasure.ShardFileOffset(0, partSize, partSize) readers := make([]io.ReaderAt, len(latestDisks)) checksumAlgo := erasureInfo.GetChecksumInfo(partNumber).Algorithm for i, disk := range latestDisks { @@ -396,7 +383,7 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string continue } checksumInfo := partsMetadata[i].Erasure.GetChecksumInfo(partNumber) - partPath := pathJoin(object, fmt.Sprintf("part.%d", partNumber)) + partPath := pathJoin(object, latestMeta.DataDir, fmt.Sprintf("part.%d", partNumber)) readers[i] = newBitrotReader(disk, bucket, partPath, tillOffset, checksumAlgo, checksumInfo.Hash, erasure.ShardSize()) } writers := make([]io.Writer, len(outDatedDisks)) @@ -404,21 +391,22 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string if disk == OfflineDisk { continue } - partPath := pathJoin(tmpID, fmt.Sprintf("part.%d", partNumber)) - writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, partPath, tillOffset, checksumAlgo, erasure.ShardSize()) + partPath := pathJoin(tmpID, latestMeta.DataDir, fmt.Sprintf("part.%d", partNumber)) + writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, partPath, tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize()) } - hErr := erasure.Heal(ctx, readers, writers, partSize) + err = erasure.Heal(ctx, readers, writers, partSize) closeBitrotReaders(readers) closeBitrotWriters(writers) - if hErr != nil { - return result, toObjectErr(hErr, bucket, object) + if err != nil { + return result, toObjectErr(err, bucket, object) } // outDatedDisks that had write errors should not be // written to for remaining parts, so we nil it out. for i, disk := range outDatedDisks { - if disk == nil { + if disk == OfflineDisk { continue } + // A non-nil stale disk which did not receive // a healed part checksum had a write error. if writers[i] == nil { @@ -426,6 +414,7 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string disksToHealCount-- continue } + partsMetadata[i].AddObjectPart(partNumber, "", partSize, partActualSize) partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{ PartNumber: partNumber, @@ -436,33 +425,31 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string // If all disks are having errors, we give up. if disksToHealCount == 0 { - return result, fmt.Errorf("all disks without up-to-date data had write errors") + return result, fmt.Errorf("all disks had write errors, unable to heal") } } - // Cleanup in case of xl.json writing failure + // Cleanup in case of er.meta writing failure writeQuorum := latestMeta.Erasure.DataBlocks + 1 - defer xl.deleteObject(ctx, minioMetaTmpBucket, tmpID, writeQuorum, false) + defer er.deleteObject(ctx, minioMetaTmpBucket, tmpID, writeQuorum) - // Generate and write `xl.json` generated from other disks. - outDatedDisks, aErr := writeUniqueXLMetadata(ctx, outDatedDisks, minioMetaTmpBucket, tmpID, + // Generate and write `xl.meta` generated from other disks. + outDatedDisks, err = writeUniqueFileInfo(ctx, outDatedDisks, minioMetaTmpBucket, tmpID, partsMetadata, diskCount(outDatedDisks)) - if aErr != nil { - return result, toObjectErr(aErr, bucket, object) + if err != nil { + return result, toObjectErr(err, bucket, object) } // Rename from tmp location to the actual location. for _, disk := range outDatedDisks { - if disk == nil { + if disk == OfflineDisk { continue } // Attempt a rename now from healed data to final location. - aErr = disk.RenameFile(minioMetaTmpBucket, retainSlash(tmpID), bucket, - retainSlash(object)) - if aErr != nil { - logger.LogIf(ctx, aErr) - return result, toObjectErr(aErr, bucket, object) + if err = disk.RenameData(minioMetaTmpBucket, tmpID, latestMeta.DataDir, bucket, object); err != nil { + logger.LogIf(ctx, err) + return result, toObjectErr(err, bucket, object) } for i, v := range result.Before.Drives { @@ -473,16 +460,16 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string } // Set the size of the object in the heal result - result.ObjectSize = latestMeta.Stat.Size + result.ObjectSize = latestMeta.Size return result, nil } // healObjectDir - heals object directory specifically, this special call // is needed since we do not have a special backend format for directories. -func (xl xlObjects) healObjectDir(ctx context.Context, bucket, object string, dryRun bool, remove bool) (hr madmin.HealResultItem, err error) { - storageDisks := xl.getDisks() - storageEndpoints := xl.getEndpoints() +func (er erasureObjects) healObjectDir(ctx context.Context, bucket, object string, dryRun bool, remove bool) (hr madmin.HealResultItem, err error) { + storageDisks := er.getDisks() + storageEndpoints := er.getEndpoints() // Initialize heal result object hr = madmin.HealResultItem{ @@ -502,7 +489,19 @@ func (xl xlObjects) healObjectDir(ctx context.Context, bucket, object string, dr danglingObject := isObjectDirDangling(errs) if danglingObject { if !dryRun && remove { - xl.deleteObject(ctx, bucket, object, hr.DataBlocks+1, true) + var wg sync.WaitGroup + // Remove versions in bulk for each disk + for index, disk := range storageDisks { + if disk == nil { + continue + } + wg.Add(1) + go func(index int, disk StorageAPI) { + defer wg.Done() + _ = disk.DeleteFile(bucket, object) + }(index, disk) + } + wg.Wait() } } @@ -548,7 +547,7 @@ func (xl xlObjects) healObjectDir(ctx context.Context, bucket, object string, dr // Populates default heal result item entries with possible values when we are returning prematurely. // This is to ensure that in any circumstance we are not returning empty arrays with wrong values. -func defaultHealResult(latestXLMeta xlMetaV1, storageDisks []StorageAPI, storageEndpoints []string, errs []error, bucket, object string) madmin.HealResultItem { +func defaultHealResult(latestFileInfo FileInfo, storageDisks []StorageAPI, storageEndpoints []string, errs []error, bucket, object string) madmin.HealResultItem { // Initialize heal result object result := madmin.HealResultItem{ Type: madmin.HealItemObject, @@ -560,8 +559,8 @@ func defaultHealResult(latestXLMeta xlMetaV1, storageDisks []StorageAPI, storage // unable to reliably find the object size. ObjectSize: -1, } - if latestXLMeta.IsValid() { - result.ObjectSize = latestXLMeta.Stat.Size + if latestFileInfo.IsValid() { + result.ObjectSize = latestFileInfo.Size } for index, disk := range storageDisks { @@ -595,13 +594,13 @@ func defaultHealResult(latestXLMeta xlMetaV1, storageDisks []StorageAPI, storage }) } - if !latestXLMeta.IsValid() { + if !latestFileInfo.IsValid() { // Default to most common configuration for erasure blocks. result.ParityBlocks = getDefaultParityBlocks(len(storageDisks)) result.DataBlocks = getDefaultDataBlocks(len(storageDisks)) } else { - result.ParityBlocks = latestXLMeta.Erasure.ParityBlocks - result.DataBlocks = latestXLMeta.Erasure.DataBlocks + result.ParityBlocks = latestFileInfo.Erasure.ParityBlocks + result.DataBlocks = latestFileInfo.Erasure.DataBlocks } return result @@ -616,7 +615,7 @@ func statAllDirs(ctx context.Context, storageDisks []StorageAPI, bucket, prefix } index := index g.Go(func() error { - entries, err := storageDisks[index].ListDir(bucket, prefix, 1, "") + entries, err := storageDisks[index].ListDir(bucket, prefix, 1) if err != nil { return err } @@ -655,23 +654,23 @@ func isObjectDirDangling(errs []error) (ok bool) { // Object is considered dangling/corrupted if any only // if total disks - a combination of corrupted and missing // files is lesser than number of data blocks. -func isObjectDangling(metaArr []xlMetaV1, errs []error, dataErrs []error) (validMeta xlMetaV1, ok bool) { +func isObjectDangling(metaArr []FileInfo, errs []error, dataErrs []error) (validMeta FileInfo, ok bool) { // We can consider an object data not reliable - // when xl.json is not found in read quorum disks. - // or when xl.json is not readable in read quorum disks. - var notFoundXLJSON, corruptedXLJSON int + // when er.meta is not found in read quorum disks. + // or when er.meta is not readable in read quorum disks. + var notFoundErasureJSON, corruptedErasureJSON int for _, readErr := range errs { if readErr == errFileNotFound { - notFoundXLJSON++ + notFoundErasureJSON++ } else if readErr == errCorruptedFormat { - corruptedXLJSON++ + corruptedErasureJSON++ } } var notFoundParts int for i := range dataErrs { // Only count part errors, if the error is not - // same as xl.json error. This is to avoid - // double counting when both parts and xl.json + // same as er.meta error. This is to avoid + // double counting when both parts and er.meta // are not available. if errs[i] != dataErrs[i] { if dataErrs[i] == errFileNotFound { @@ -694,11 +693,11 @@ func isObjectDangling(metaArr []xlMetaV1, errs []error, dataErrs []error) (valid } // We have valid meta, now verify if we have enough files with parity blocks. - return validMeta, corruptedXLJSON+notFoundXLJSON+notFoundParts > validMeta.Erasure.ParityBlocks + return validMeta, corruptedErasureJSON+notFoundErasureJSON+notFoundParts > validMeta.Erasure.ParityBlocks } // HealObject - heal the given object, automatically deletes the object if stale/corrupted if `remove` is true. -func (xl xlObjects) HealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) (hr madmin.HealResultItem, err error) { +func (er erasureObjects) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (hr madmin.HealResultItem, err error) { // Create context that also contains information about the object and bucket. // The top level handler might not have this information. reqInfo := logger.GetReqInfo(ctx) @@ -712,14 +711,14 @@ func (xl xlObjects) HealObject(ctx context.Context, bucket, object string, opts // Healing directories handle it separately. if HasSuffix(object, SlashSeparator) { - return xl.healObjectDir(healCtx, bucket, object, opts.DryRun, opts.Remove) + return er.healObjectDir(healCtx, bucket, object, opts.DryRun, opts.Remove) } - storageDisks := xl.getDisks() - storageEndpoints := xl.getEndpoints() + storageDisks := er.getDisks() + storageEndpoints := er.getEndpoints() // Read metadata files from all the disks - partsMetadata, errs := readAllXLMetadata(healCtx, storageDisks, bucket, object) + partsMetadata, errs := readAllFileInfo(healCtx, storageDisks, bucket, object, versionID) // Check if the object is dangling, if yes and user requested // remove we simply delete it from namespace. @@ -729,15 +728,15 @@ func (xl xlObjects) HealObject(ctx context.Context, bucket, object string, opts writeQuorum = getWriteQuorum(len(storageDisks)) } if !opts.DryRun && opts.Remove { - xl.deleteObject(healCtx, bucket, object, writeQuorum, false) + er.deleteObject(healCtx, bucket, object, writeQuorum) } err = reduceReadQuorumErrs(ctx, errs, nil, writeQuorum-1) - return defaultHealResult(xlMetaV1{}, storageDisks, storageEndpoints, errs, bucket, object), toObjectErr(err, bucket, object) + return defaultHealResult(FileInfo{}, storageDisks, storageEndpoints, errs, bucket, object), toObjectErr(err, bucket, object) } - latestXLMeta, err := getLatestXLMeta(healCtx, partsMetadata, errs) + latestFileInfo, err := getLatestFileInfo(healCtx, partsMetadata, errs) if err != nil { - return defaultHealResult(xlMetaV1{}, storageDisks, storageEndpoints, errs, bucket, object), toObjectErr(err, bucket, object) + return defaultHealResult(FileInfo{}, storageDisks, storageEndpoints, errs, bucket, object), toObjectErr(err, bucket, object) } errCount := 0 @@ -751,20 +750,20 @@ func (xl xlObjects) HealObject(ctx context.Context, bucket, object string, opts // Only if we get errors from all the disks we return error. Else we need to // continue to return filled madmin.HealResultItem struct which includes info // on what disks the file is available etc. - if err = reduceReadQuorumErrs(ctx, errs, nil, latestXLMeta.Erasure.DataBlocks); err != nil { + if err = reduceReadQuorumErrs(ctx, errs, nil, latestFileInfo.Erasure.DataBlocks); err != nil { if m, ok := isObjectDangling(partsMetadata, errs, []error{}); ok { writeQuorum := m.Erasure.DataBlocks + 1 if m.Erasure.DataBlocks == 0 { writeQuorum = getWriteQuorum(len(storageDisks)) } if !opts.DryRun && opts.Remove { - xl.deleteObject(ctx, bucket, object, writeQuorum, false) + er.deleteObject(ctx, bucket, object, writeQuorum) } } - return defaultHealResult(latestXLMeta, storageDisks, storageEndpoints, errs, bucket, object), toObjectErr(err, bucket, object) + return defaultHealResult(latestFileInfo, storageDisks, storageEndpoints, errs, bucket, object), toObjectErr(err, bucket, object) } } // Heal the object. - return xl.healObject(healCtx, bucket, object, partsMetadata, errs, latestXLMeta, opts.DryRun, opts.Remove, opts.ScanMode) + return er.healObject(healCtx, bucket, object, partsMetadata, errs, latestFileInfo, opts.DryRun, opts.Remove, opts.ScanMode) } diff --git a/cmd/xl-v1-healing_test.go b/cmd/erasure-healing_test.go similarity index 53% rename from cmd/xl-v1-healing_test.go rename to cmd/erasure-healing_test.go index 3afd9ed71..a4f1addea 100644 --- a/cmd/xl-v1-healing_test.go +++ b/cmd/erasure-healing_test.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,12 +19,127 @@ package cmd import ( "bytes" "context" - "path/filepath" + "crypto/rand" + "os" + "path" + "reflect" "testing" + "time" + "github.com/dustin/go-humanize" "github.com/minio/minio/pkg/madmin" ) +// Tests both object and bucket healing. +func TestHealing(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + obj, fsDirs, err := prepareErasure16(ctx) + if err != nil { + t.Fatal(err) + } + defer removeRoots(fsDirs) + + z := obj.(*erasureZones) + er := z.zones[0].sets[0] + + // Create "bucket" + err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) + if err != nil { + t.Fatal(err) + } + + bucket := "bucket" + object := "object" + + data := make([]byte, 1*humanize.MiByte) + length := int64(len(data)) + _, err = rand.Read(data) + if err != nil { + t.Fatal(err) + } + + _, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), length, "", ""), ObjectOptions{}) + if err != nil { + t.Fatal(err) + } + + disk := er.getDisks()[0] + fileInfoPreHeal, err := disk.ReadVersion(bucket, object, "") + if err != nil { + t.Fatal(err) + } + + // Remove the object - to simulate the case where the disk was down when the object + // was created. + err = removeAll(pathJoin(disk.String(), bucket, object)) + if err != nil { + t.Fatal(err) + } + + _, err = er.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan}) + if err != nil { + t.Fatal(err) + } + + fileInfoPostHeal, err := disk.ReadVersion(bucket, object, "") + if err != nil { + t.Fatal(err) + } + + // After heal the meta file should be as expected. + if !reflect.DeepEqual(fileInfoPreHeal, fileInfoPostHeal) { + t.Fatal("HealObject failed") + } + + err = os.RemoveAll(path.Join(fsDirs[0], bucket, object, "er.meta")) + if err != nil { + t.Fatal(err) + } + + // Write er.meta with different modtime to simulate the case where a disk had + // gone down when an object was replaced by a new object. + fileInfoOutDated := fileInfoPreHeal + fileInfoOutDated.ModTime = time.Now() + err = disk.WriteMetadata(bucket, object, fileInfoOutDated) + if err != nil { + t.Fatal(err) + } + + _, err = er.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealDeepScan}) + if err != nil { + t.Fatal(err) + } + + fileInfoPostHeal, err = disk.ReadVersion(bucket, object, "") + if err != nil { + t.Fatal(err) + } + + // After heal the meta file should be as expected. + if !reflect.DeepEqual(fileInfoPreHeal, fileInfoPostHeal) { + t.Fatal("HealObject failed") + } + + // Remove the bucket - to simulate the case where bucket was + // created when the disk was down. + err = os.RemoveAll(path.Join(fsDirs[0], bucket)) + if err != nil { + t.Fatal(err) + } + // This would create the bucket. + _, err = er.HealBucket(ctx, bucket, false, false) + if err != nil { + t.Fatal(err) + } + // Stat the bucket to make sure that it was created. + _, err = er.getDisks()[0].StatVol(bucket) + if err != nil { + t.Fatal(err) + } +} + func TestHealObjectCorrupted(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -51,7 +166,7 @@ func TestHealObjectCorrupted(t *testing.T) { data := bytes.Repeat([]byte("a"), 5*1024*1024) var opts ObjectOptions - err = objLayer.MakeBucketWithLocation(ctx, bucket, "", false) + err = objLayer.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) if err != nil { t.Fatalf("Failed to make a bucket - %v", err) } @@ -81,91 +196,96 @@ func TestHealObjectCorrupted(t *testing.T) { } // Test 1: Remove the object backend files from the first disk. - z := objLayer.(*xlZones) - xl := z.zones[0].sets[0] - firstDisk := xl.getDisks()[0] - err = firstDisk.DeleteFile(bucket, filepath.Join(object, xlMetaJSONFile)) + z := objLayer.(*erasureZones) + er := z.zones[0].sets[0] + erasureDisks := er.getDisks() + firstDisk := erasureDisks[0] + err = firstDisk.DeleteFile(bucket, pathJoin(object, xlStorageFormatFile)) if err != nil { t.Fatalf("Failed to delete a file - %v", err) } - _, err = objLayer.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan}) + _, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan}) if err != nil { t.Fatalf("Failed to heal object - %v", err) } - _, err = firstDisk.StatFile(bucket, filepath.Join(object, xlMetaJSONFile)) + fileInfos, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "") + fi, err := getLatestFileInfo(ctx, fileInfos, errs) if err != nil { - t.Errorf("Expected xl.json file to be present but stat failed - %v", err) + t.Fatalf("Failed to getLatestFileInfo - %v", err) } - // Test 2: Heal when part.1 is empty - partSt1, err := firstDisk.StatFile(bucket, filepath.Join(object, "part.1")) - if err != nil { - t.Errorf("Expected part.1 file to be present but stat failed - %v", err) + if err = firstDisk.CheckFile(bucket, object); err != nil { + t.Errorf("Expected er.meta file to be present but stat failed - %v", err) } - err = firstDisk.DeleteFile(bucket, filepath.Join(object, "part.1")) + + err = firstDisk.DeleteFile(bucket, pathJoin(object, fi.DataDir, "part.1")) if err != nil { t.Errorf("Failure during deleting part.1 - %v", err) } - err = firstDisk.WriteAll(bucket, filepath.Join(object, "part.1"), bytes.NewReader([]byte{})) + + err = firstDisk.WriteAll(bucket, pathJoin(object, fi.DataDir, "part.1"), bytes.NewReader([]byte{})) if err != nil { t.Errorf("Failure during creating part.1 - %v", err) } - _, err = objLayer.HealObject(ctx, bucket, object, madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan}) + + _, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan}) if err != nil { t.Errorf("Expected nil but received %v", err) } - partSt2, err := firstDisk.StatFile(bucket, filepath.Join(object, "part.1")) + + fileInfos, errs = readAllFileInfo(ctx, erasureDisks, bucket, object, "") + nfi, err := getLatestFileInfo(ctx, fileInfos, errs) if err != nil { - t.Errorf("Expected from part.1 file to be present but stat failed - %v", err) - } - if partSt1.Size != partSt2.Size { - t.Errorf("part.1 file size is not the same before and after heal") + t.Fatalf("Failed to getLatestFileInfo - %v", err) } - // Test 3: Heal when part.1 is correct in size but corrupted - partSt1, err = firstDisk.StatFile(bucket, filepath.Join(object, "part.1")) - if err != nil { - t.Errorf("Expected part.1 file to be present but stat failed - %v", err) + if !reflect.DeepEqual(fi, nfi) { + t.Fatalf("FileInfo not equal after healing") } - err = firstDisk.DeleteFile(bucket, filepath.Join(object, "part.1")) + + err = firstDisk.DeleteFile(bucket, pathJoin(object, fi.DataDir, "part.1")) if err != nil { t.Errorf("Failure during deleting part.1 - %v", err) } - bdata := bytes.Repeat([]byte("b"), int(partSt1.Size)) - err = firstDisk.WriteAll(bucket, filepath.Join(object, "part.1"), bytes.NewReader(bdata)) + + bdata := bytes.Repeat([]byte("b"), int(nfi.Size)) + err = firstDisk.WriteAll(bucket, pathJoin(object, fi.DataDir, "part.1"), bytes.NewReader(bdata)) if err != nil { t.Errorf("Failure during creating part.1 - %v", err) } - _, err = objLayer.HealObject(ctx, bucket, object, madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan}) + + _, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan}) if err != nil { t.Errorf("Expected nil but received %v", err) } - partSt2, err = firstDisk.StatFile(bucket, filepath.Join(object, "part.1")) + + fileInfos, errs = readAllFileInfo(ctx, erasureDisks, bucket, object, "") + nfi, err = getLatestFileInfo(ctx, fileInfos, errs) if err != nil { - t.Errorf("Expected from part.1 file to be present but stat failed - %v", err) - } - if partSt1.Size != partSt2.Size { - t.Errorf("part.1 file size is not the same before and after heal") + t.Fatalf("Failed to getLatestFileInfo - %v", err) } - // Test 4: checks if HealObject returns an error when xl.json is not found + if !reflect.DeepEqual(fi, nfi) { + t.Fatalf("FileInfo not equal after healing") + } + + // Test 4: checks if HealObject returns an error when xl.meta is not found // in more than read quorum number of disks, to create a corrupted situation. - - for i := 0; i <= len(xl.getDisks())/2; i++ { - xl.getDisks()[i].DeleteFile(bucket, filepath.Join(object, xlMetaJSONFile)) + for i := 0; i <= len(er.getDisks())/2; i++ { + er.getDisks()[i].DeleteFile(bucket, pathJoin(object, xlStorageFormatFile)) } // Try healing now, expect to receive errFileNotFound. - _, err = objLayer.HealObject(ctx, bucket, object, madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan}) + _, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan}) if err != nil { if _, ok := err.(ObjectNotFound); !ok { t.Errorf("Expect %v but received %v", ObjectNotFound{Bucket: bucket, Object: object}, err) } } - // since majority of xl.jsons are not available, object should be successfully deleted. + // since majority of xl.meta's are not available, object should be successfully deleted. _, err = objLayer.GetObjectInfo(ctx, bucket, object, ObjectOptions{}) if _, ok := err.(ObjectNotFound); !ok { t.Errorf("Expect %v but received %v", ObjectNotFound{Bucket: bucket, Object: object}, err) @@ -173,7 +293,7 @@ func TestHealObjectCorrupted(t *testing.T) { } // Tests healing of object. -func TestHealObjectXL(t *testing.T) { +func TestHealObjectErasure(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -196,7 +316,7 @@ func TestHealObjectXL(t *testing.T) { data := bytes.Repeat([]byte("a"), 5*1024*1024) var opts ObjectOptions - err = obj.MakeBucketWithLocation(ctx, bucket, "", false) + err = obj.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) if err != nil { t.Fatalf("Failed to make a bucket - %v", err) } @@ -220,51 +340,51 @@ func TestHealObjectXL(t *testing.T) { }) } + // Remove the object backend files from the first disk. + z := obj.(*erasureZones) + er := z.zones[0].sets[0] + firstDisk := er.getDisks()[0] + _, err = obj.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{}) if err != nil { t.Fatalf("Failed to complete multipart upload - %v", err) } - // Remove the object backend files from the first disk. - z := obj.(*xlZones) - xl := z.zones[0].sets[0] - firstDisk := xl.getDisks()[0] - err = firstDisk.DeleteFile(bucket, filepath.Join(object, xlMetaJSONFile)) + err = firstDisk.DeleteFile(bucket, pathJoin(object, xlStorageFormatFile)) if err != nil { t.Fatalf("Failed to delete a file - %v", err) } - _, err = obj.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan}) + _, err = obj.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan}) if err != nil { t.Fatalf("Failed to heal object - %v", err) } - _, err = firstDisk.StatFile(bucket, filepath.Join(object, xlMetaJSONFile)) - if err != nil { - t.Errorf("Expected xl.json file to be present but stat failed - %v", err) + if err = firstDisk.CheckFile(bucket, object); err != nil { + t.Errorf("Expected er.meta file to be present but stat failed - %v", err) } - xlDisks := xl.getDisks() - z.zones[0].xlDisksMu.Lock() - xl.getDisks = func() []StorageAPI { + erasureDisks := er.getDisks() + z.zones[0].erasureDisksMu.Lock() + er.getDisks = func() []StorageAPI { // Nil more than half the disks, to remove write quorum. - for i := 0; i <= len(xlDisks)/2; i++ { - xlDisks[i] = nil + for i := 0; i <= len(erasureDisks)/2; i++ { + erasureDisks[i] = nil } - return xlDisks + return erasureDisks } - z.zones[0].xlDisksMu.Unlock() + z.zones[0].erasureDisksMu.Unlock() // Try healing now, expect to receive errDiskNotFound. - _, err = obj.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealDeepScan}) - // since majority of xl.jsons are not available, object quorum can't be read properly and error will be errXLReadQuorum + _, err = obj.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealDeepScan}) + // since majority of er.meta's are not available, object quorum can't be read properly and error will be errErasureReadQuorum if _, ok := err.(InsufficientReadQuorum); !ok { t.Errorf("Expected %v but received %v", InsufficientReadQuorum{}, err) } } // Tests healing of empty directories -func TestHealEmptyDirectoryXL(t *testing.T) { +func TestHealEmptyDirectoryErasure(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -285,7 +405,7 @@ func TestHealEmptyDirectoryXL(t *testing.T) { object := "empty-dir/" var opts ObjectOptions - err = obj.MakeBucketWithLocation(ctx, bucket, "", false) + err = obj.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) if err != nil { t.Fatalf("Failed to make a bucket - %v", err) } @@ -298,16 +418,16 @@ func TestHealEmptyDirectoryXL(t *testing.T) { } // Remove the object backend files from the first disk. - z := obj.(*xlZones) - xl := z.zones[0].sets[0] - firstDisk := xl.getDisks()[0] + z := obj.(*erasureZones) + er := z.zones[0].sets[0] + firstDisk := er.getDisks()[0] err = firstDisk.DeleteFile(bucket, object) if err != nil { t.Fatalf("Failed to delete a file - %v", err) } // Heal the object - hr, err := obj.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan}) + hr, err := obj.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan}) if err != nil { t.Fatalf("Failed to heal object - %v", err) } @@ -331,7 +451,7 @@ func TestHealEmptyDirectoryXL(t *testing.T) { } // Heal the same object again - hr, err = obj.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan}) + hr, err = obj.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan}) if err != nil { t.Fatalf("Failed to heal object - %v", err) } diff --git a/cmd/erasure-list-objects.go b/cmd/erasure-list-objects.go new file mode 100644 index 000000000..d5c2c6b0d --- /dev/null +++ b/cmd/erasure-list-objects.go @@ -0,0 +1,58 @@ +/* + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "context" + + "github.com/minio/minio/pkg/madmin" +) + +// ListObjectVersions - This is not implemented, look for erasure-zones.ListObjectVersions() +func (er erasureObjects) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (loi ListObjectVersionsInfo, e error) { + return loi, NotImplemented{} +} + +// ListObjectsV2 - This is not implemented/needed anymore, look for erasure-zones.ListObjectsV2() +func (er erasureObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi ListObjectsV2Info, e error) { + return loi, NotImplemented{} +} + +// ListObjects - This is not implemented/needed anymore, look for erasure-zones.ListObjects() +func (er erasureObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { + return loi, NotImplemented{} +} + +// ListBucketsHeal - This is not implemented/needed anymore, look for erasure-zones.ListBucketHeal() +func (er erasureObjects) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { + return nil, NotImplemented{} +} + +// ListObjectsHeal - This is not implemented, look for erasure-zones.ListObjectsHeal() +func (er erasureObjects) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { + return ListObjectsInfo{}, NotImplemented{} +} + +// HealObjects - This is not implemented/needed anymore, look for erasure-zones.HealObjects() +func (er erasureObjects) HealObjects(ctx context.Context, bucket, prefix string, _ madmin.HealOpts, _ HealObjectFn) (e error) { + return NotImplemented{} +} + +// Walk - This is not implemented/needed anymore, look for erasure-zones.Walk() +func (er erasureObjects) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo) error { + return NotImplemented{} +} diff --git a/cmd/erasure-heal.go b/cmd/erasure-lowlevel-heal.go similarity index 96% rename from cmd/erasure-heal.go rename to cmd/erasure-lowlevel-heal.go index 947416abf..66b031d92 100644 --- a/cmd/erasure-heal.go +++ b/cmd/erasure-lowlevel-heal.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cmd/xl-v1-utils.go b/cmd/erasure-metadata-utils.go similarity index 81% rename from cmd/xl-v1-utils.go rename to cmd/erasure-metadata-utils.go index c77b2ed4e..ca78b9678 100644 --- a/cmd/xl-v1-utils.go +++ b/cmd/erasure-metadata-utils.go @@ -20,9 +20,7 @@ import ( "context" "errors" "hash/crc32" - "path" - jsoniter "github.com/json-iterator/go" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/sync/errgroup" ) @@ -72,13 +70,13 @@ func reduceQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, qu // reduceReadQuorumErrs behaves like reduceErrs but only for returning // values of maximally occurring errors validated against readQuorum. func reduceReadQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, readQuorum int) (maxErr error) { - return reduceQuorumErrs(ctx, errs, ignoredErrs, readQuorum, errXLReadQuorum) + return reduceQuorumErrs(ctx, errs, ignoredErrs, readQuorum, errErasureReadQuorum) } // reduceWriteQuorumErrs behaves like reduceErrs but only for returning // values of maximally occurring errors validated against writeQuorum. func reduceWriteQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, writeQuorum int) (maxErr error) { - return reduceQuorumErrs(ctx, errs, ignoredErrs, writeQuorum, errXLWriteQuorum) + return reduceQuorumErrs(ctx, errs, ignoredErrs, writeQuorum, errErasureWriteQuorum) } // Similar to 'len(slice)' but returns the actual elements count @@ -115,44 +113,26 @@ func hashOrder(key string, cardinality int) []int { return nums } -// Constructs xlMetaV1 using `jsoniter` lib. -func xlMetaV1UnmarshalJSON(ctx context.Context, xlMetaBuf []byte) (xlMeta xlMetaV1, err error) { - var json = jsoniter.ConfigCompatibleWithStandardLibrary - err = json.Unmarshal(xlMetaBuf, &xlMeta) - return xlMeta, err -} - -// readXLMeta reads `xl.json` and returns back XL metadata structure. -func readXLMeta(ctx context.Context, disk StorageAPI, bucket string, object string) (xlMeta xlMetaV1, err error) { - // Reads entire `xl.json`. - xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile)) - if err != nil { - if err != errFileNotFound && err != errVolumeNotFound { - logger.GetReqInfo(ctx).AppendTags("disk", disk.String()) - logger.LogIf(ctx, err) - } - return xlMetaV1{}, err - } - if len(xlMetaBuf) == 0 { - return xlMetaV1{}, errFileNotFound - } - return xlMetaV1UnmarshalJSON(ctx, xlMetaBuf) -} - -// Reads all `xl.json` metadata as a xlMetaV1 slice. +// Reads all `xl.meta` metadata as a FileInfo slice. // Returns error slice indicating the failed metadata reads. -func readAllXLMetadata(ctx context.Context, disks []StorageAPI, bucket, object string) ([]xlMetaV1, []error) { - metadataArray := make([]xlMetaV1, len(disks)) +func readAllFileInfo(ctx context.Context, disks []StorageAPI, bucket, object, versionID string) ([]FileInfo, []error) { + metadataArray := make([]FileInfo, len(disks)) g := errgroup.WithNErrs(len(disks)) - // Read `xl.json` parallelly across disks. + // Read `xl.meta` parallelly across disks. for index := range disks { index := index g.Go(func() (err error) { if disks[index] == nil { return errDiskNotFound } - metadataArray[index], err = readXLMeta(ctx, disks[index], bucket, object) + metadataArray[index], err = disks[index].ReadVersion(bucket, object, versionID) + if err != nil { + if err != errFileNotFound && err != errVolumeNotFound && err != errFileVersionNotFound { + logger.GetReqInfo(ctx).AppendTags("disk", disks[index].String()) + logger.LogIf(ctx, err) + } + } return err }, index) } @@ -162,11 +142,11 @@ func readAllXLMetadata(ctx context.Context, disks []StorageAPI, bucket, object s } // Return shuffled partsMetadata depending on distribution. -func shufflePartsMetadata(partsMetadata []xlMetaV1, distribution []int) (shuffledPartsMetadata []xlMetaV1) { +func shufflePartsMetadata(partsMetadata []FileInfo, distribution []int) (shuffledPartsMetadata []FileInfo) { if distribution == nil { return partsMetadata } - shuffledPartsMetadata = make([]xlMetaV1, len(partsMetadata)) + shuffledPartsMetadata = make([]FileInfo, len(partsMetadata)) // Shuffle slice xl metadata for expected distribution. for index := range partsMetadata { blockIndex := distribution[index] diff --git a/cmd/erasure-metadata-utils_test.go b/cmd/erasure-metadata-utils_test.go new file mode 100644 index 000000000..43341eb1d --- /dev/null +++ b/cmd/erasure-metadata-utils_test.go @@ -0,0 +1,201 @@ +/* + * MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "context" + "reflect" + "testing" +) + +// Tests caclculating disk count. +func TestDiskCount(t *testing.T) { + testCases := []struct { + disks []StorageAPI + diskCount int + }{ + // Test case - 1 + { + disks: []StorageAPI{&xlStorage{}, &xlStorage{}, &xlStorage{}, &xlStorage{}}, + diskCount: 4, + }, + // Test case - 2 + { + disks: []StorageAPI{nil, &xlStorage{}, &xlStorage{}, &xlStorage{}}, + diskCount: 3, + }, + } + for i, testCase := range testCases { + cdiskCount := diskCount(testCase.disks) + if cdiskCount != testCase.diskCount { + t.Errorf("Test %d: Expected %d, got %d", i+1, testCase.diskCount, cdiskCount) + } + } +} + +// Test for reduceErrs, reduceErr reduces collection +// of errors into a single maximal error with in the list. +func TestReduceErrs(t *testing.T) { + // List all of all test cases to validate various cases of reduce errors. + testCases := []struct { + errs []error + ignoredErrs []error + err error + }{ + // Validate if have reduced properly. + {[]error{ + errDiskNotFound, + errDiskNotFound, + errDiskFull, + }, []error{}, errErasureReadQuorum}, + // Validate if have no consensus. + {[]error{ + errDiskFull, + errDiskNotFound, + nil, nil, + }, []error{}, errErasureReadQuorum}, + // Validate if have consensus and errors ignored. + {[]error{ + errVolumeNotFound, + errVolumeNotFound, + errVolumeNotFound, + errVolumeNotFound, + errVolumeNotFound, + errDiskNotFound, + errDiskNotFound, + }, []error{errDiskNotFound}, errVolumeNotFound}, + {[]error{}, []error{}, errErasureReadQuorum}, + {[]error{errFileNotFound, errFileNotFound, errFileNotFound, + errFileNotFound, errFileNotFound, nil, nil, nil, nil, nil}, + nil, nil}, + } + // Validates list of all the testcases for returning valid errors. + for i, testCase := range testCases { + gotErr := reduceReadQuorumErrs(context.Background(), testCase.errs, testCase.ignoredErrs, 5) + if gotErr != testCase.err { + t.Errorf("Test %d : expected %s, got %s", i+1, testCase.err, gotErr) + } + gotNewErr := reduceWriteQuorumErrs(context.Background(), testCase.errs, testCase.ignoredErrs, 6) + if gotNewErr != errErasureWriteQuorum { + t.Errorf("Test %d : expected %s, got %s", i+1, errErasureWriteQuorum, gotErr) + } + } +} + +// TestHashOrder - test order of ints in array +func TestHashOrder(t *testing.T) { + testCases := []struct { + objectName string + hashedOrder []int + }{ + // cases which should pass the test. + // passing in valid object name. + {"object", []int{14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}}, + {"The Shining Script .pdf", []int{16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}}, + {"Cost Benefit Analysis (2009-2010).pptx", []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}}, + {"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", []int{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2}}, + {"SHØRT", []int{11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, + {"There are far too many object names, and far too few bucket names!", []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}}, + {"a/b/c/", []int{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2}}, + {"/a/b/c", []int{6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5}}, + {string([]byte{0xff, 0xfe, 0xfd}), []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}}, + } + + // Tests hashing order to be consistent. + for i, testCase := range testCases { + hashedOrder := hashOrder(testCase.objectName, 16) + if !reflect.DeepEqual(testCase.hashedOrder, hashedOrder) { + t.Errorf("Test case %d: Expected \"%v\" but failed \"%v\"", i+1, testCase.hashedOrder, hashedOrder) + } + } + + // Tests hashing order to fail for when order is '-1'. + if hashedOrder := hashOrder("This will fail", -1); hashedOrder != nil { + t.Errorf("Test: Expect \"nil\" but failed \"%#v\"", hashedOrder) + } + + if hashedOrder := hashOrder("This will fail", 0); hashedOrder != nil { + t.Errorf("Test: Expect \"nil\" but failed \"%#v\"", hashedOrder) + } +} + +func TestShuffleDisks(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + nDisks := 16 + disks, err := getRandomDisks(nDisks) + if err != nil { + t.Fatal(err) + } + objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(disks...)) + if err != nil { + removeRoots(disks) + t.Fatal(err) + } + defer removeRoots(disks) + z := objLayer.(*erasureZones) + testShuffleDisks(t, z) +} + +// Test shuffleDisks which returns shuffled slice of disks for their actual distribution. +func testShuffleDisks(t *testing.T, z *erasureZones) { + disks := z.zones[0].GetDisks(0)() + distribution := []int{16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15} + shuffledDisks := shuffleDisks(disks, distribution) + // From the "distribution" above you can notice that: + // 1st data block is in the 9th disk (i.e distribution index 8) + // 2nd data block is in the 8th disk (i.e distribution index 7) and so on. + if shuffledDisks[0] != disks[8] || + shuffledDisks[1] != disks[7] || + shuffledDisks[2] != disks[9] || + shuffledDisks[3] != disks[6] || + shuffledDisks[4] != disks[10] || + shuffledDisks[5] != disks[5] || + shuffledDisks[6] != disks[11] || + shuffledDisks[7] != disks[4] || + shuffledDisks[8] != disks[12] || + shuffledDisks[9] != disks[3] || + shuffledDisks[10] != disks[13] || + shuffledDisks[11] != disks[2] || + shuffledDisks[12] != disks[14] || + shuffledDisks[13] != disks[1] || + shuffledDisks[14] != disks[15] || + shuffledDisks[15] != disks[0] { + t.Errorf("shuffleDisks returned incorrect order.") + } +} + +// TestEvalDisks tests the behavior of evalDisks +func TestEvalDisks(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + nDisks := 16 + disks, err := getRandomDisks(nDisks) + if err != nil { + t.Fatal(err) + } + objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(disks...)) + if err != nil { + removeRoots(disks) + t.Fatal(err) + } + defer removeRoots(disks) + z := objLayer.(*erasureZones) + testShuffleDisks(t, z) +} diff --git a/cmd/erasure-metadata.go b/cmd/erasure-metadata.go new file mode 100644 index 000000000..661032586 --- /dev/null +++ b/cmd/erasure-metadata.go @@ -0,0 +1,326 @@ +/* + * MinIO Cloud Storage, (C) 2016-2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "context" + "encoding/hex" + "fmt" + "net/http" + "sort" + "time" + + xhttp "github.com/minio/minio/cmd/http" + "github.com/minio/minio/cmd/logger" + "github.com/minio/minio/pkg/sync/errgroup" + "github.com/minio/sha256-simd" +) + +const erasureAlgorithm = "rs-vandermonde" + +// byObjectPartNumber is a collection satisfying sort.Interface. +type byObjectPartNumber []ObjectPartInfo + +func (t byObjectPartNumber) Len() int { return len(t) } +func (t byObjectPartNumber) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t byObjectPartNumber) Less(i, j int) bool { return t[i].Number < t[j].Number } + +// AddChecksumInfo adds a checksum of a part. +func (e *ErasureInfo) AddChecksumInfo(ckSumInfo ChecksumInfo) { + for i, sum := range e.Checksums { + if sum.PartNumber == ckSumInfo.PartNumber { + e.Checksums[i] = ckSumInfo + return + } + } + e.Checksums = append(e.Checksums, ckSumInfo) +} + +// GetChecksumInfo - get checksum of a part. +func (e ErasureInfo) GetChecksumInfo(partNumber int) (ckSum ChecksumInfo) { + for _, sum := range e.Checksums { + if sum.PartNumber == partNumber { + // Return the checksum + return sum + } + } + return ChecksumInfo{} +} + +// ShardFileSize - returns final erasure size from original size. +func (e ErasureInfo) ShardFileSize(totalLength int64) int64 { + if totalLength == 0 { + return 0 + } + if totalLength == -1 { + return -1 + } + numShards := totalLength / e.BlockSize + lastBlockSize := totalLength % e.BlockSize + lastShardSize := ceilFrac(lastBlockSize, int64(e.DataBlocks)) + return numShards*e.ShardSize() + lastShardSize +} + +// ShardSize - returns actual shared size from erasure blockSize. +func (e ErasureInfo) ShardSize() int64 { + return ceilFrac(e.BlockSize, int64(e.DataBlocks)) +} + +// IsValid - tells if erasure info fields are valid. +func (fi FileInfo) IsValid() bool { + if fi.Deleted { + // Delete marker has no data, no need to check + // for erasure coding information + return true + } + data := fi.Erasure.DataBlocks + parity := fi.Erasure.ParityBlocks + return ((data >= parity) && (data != 0) && (parity != 0)) +} + +// ToObjectInfo - Converts metadata to object info. +func (fi FileInfo) ToObjectInfo(bucket, object string) ObjectInfo { + if HasSuffix(object, SlashSeparator) { + return ObjectInfo{ + Bucket: bucket, + Name: object, + IsDir: true, + } + } + objInfo := ObjectInfo{ + IsDir: false, + Bucket: bucket, + Name: object, + VersionID: fi.VersionID, + IsLatest: fi.IsLatest, + DeleteMarker: fi.Deleted, + Size: fi.Size, + ModTime: fi.ModTime, + ContentType: fi.Metadata["content-type"], + ContentEncoding: fi.Metadata["content-encoding"], + } + // Update expires + var ( + t time.Time + e error + ) + if exp, ok := fi.Metadata["expires"]; ok { + if t, e = time.Parse(http.TimeFormat, exp); e == nil { + objInfo.Expires = t.UTC() + } + } + objInfo.backendType = BackendErasure + + // Extract etag from metadata. + objInfo.ETag = extractETag(fi.Metadata) + + // Add user tags to the object info + objInfo.UserTags = fi.Metadata[xhttp.AmzObjectTagging] + + // etag/md5Sum has already been extracted. We need to + // remove to avoid it from appearing as part of + // response headers. e.g, X-Minio-* or X-Amz-*. + // Tags have also been extracted, we remove that as well. + objInfo.UserDefined = cleanMetadata(fi.Metadata) + + // All the parts per object. + objInfo.Parts = fi.Parts + + // Update storage class + if sc, ok := fi.Metadata[xhttp.AmzStorageClass]; ok { + objInfo.StorageClass = sc + } else { + objInfo.StorageClass = globalMinioDefaultStorageClass + } + + // Success. + return objInfo +} + +// objectPartIndex - returns the index of matching object part number. +func objectPartIndex(parts []ObjectPartInfo, partNumber int) int { + for i, part := range parts { + if partNumber == part.Number { + return i + } + } + return -1 +} + +// AddObjectPart - add a new object part in order. +func (fi *FileInfo) AddObjectPart(partNumber int, partETag string, partSize int64, actualSize int64) { + partInfo := ObjectPartInfo{ + Number: partNumber, + ETag: partETag, + Size: partSize, + ActualSize: actualSize, + } + + // Update part info if it already exists. + for i, part := range fi.Parts { + if partNumber == part.Number { + fi.Parts[i] = partInfo + return + } + } + + // Proceed to include new part info. + fi.Parts = append(fi.Parts, partInfo) + + // Parts in FileInfo should be in sorted order by part number. + sort.Sort(byObjectPartNumber(fi.Parts)) +} + +// ObjectToPartOffset - translate offset of an object to offset of its individual part. +func (fi FileInfo) ObjectToPartOffset(ctx context.Context, offset int64) (partIndex int, partOffset int64, err error) { + if offset == 0 { + // Special case - if offset is 0, then partIndex and partOffset are always 0. + return 0, 0, nil + } + partOffset = offset + // Seek until object offset maps to a particular part offset. + for i, part := range fi.Parts { + partIndex = i + // Offset is smaller than size we have reached the proper part offset. + if partOffset < part.Size { + return partIndex, partOffset, nil + } + // Continue to towards the next part. + partOffset -= part.Size + } + logger.LogIf(ctx, InvalidRange{}) + // Offset beyond the size of the object return InvalidRange. + return 0, 0, InvalidRange{} +} + +func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, quorum int) (xmv FileInfo, e error) { + metaHashes := make([]string, len(metaArr)) + for i, meta := range metaArr { + if meta.IsValid() && meta.ModTime.Equal(modTime) { + h := sha256.New() + for _, part := range meta.Parts { + h.Write([]byte(fmt.Sprintf("part.%d", part.Number))) + } + metaHashes[i] = hex.EncodeToString(h.Sum(nil)) + } + } + + metaHashCountMap := make(map[string]int) + for _, hash := range metaHashes { + if hash == "" { + continue + } + metaHashCountMap[hash]++ + } + + maxHash := "" + maxCount := 0 + for hash, count := range metaHashCountMap { + if count > maxCount { + maxCount = count + maxHash = hash + } + } + + if maxCount < quorum { + return FileInfo{}, errErasureReadQuorum + } + + for i, hash := range metaHashes { + if hash == maxHash { + return metaArr[i], nil + } + } + + return FileInfo{}, errErasureReadQuorum +} + +// pickValidFileInfo - picks one valid FileInfo content and returns from a +// slice of FileInfo. +func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Time, quorum int) (xmv FileInfo, e error) { + return findFileInfoInQuorum(ctx, metaArr, modTime, quorum) +} + +// Rename metadata content to destination location for each disk concurrently. +func renameFileInfo(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, quorum int) ([]StorageAPI, error) { + ignoredErr := []error{errFileNotFound} + + g := errgroup.WithNErrs(len(disks)) + + // Rename file on all underlying storage disks. + for index := range disks { + index := index + g.Go(func() error { + if disks[index] == nil { + return errDiskNotFound + } + if err := disks[index].RenameData(srcBucket, srcEntry, "", dstBucket, dstEntry); err != nil { + if !IsErrIgnored(err, ignoredErr...) { + return err + } + } + return nil + }, index) + } + + // Wait for all renames to finish. + errs := g.Wait() + + // We can safely allow RenameData errors up to len(er.getDisks()) - writeQuorum + // otherwise return failure. Cleanup successful renames. + err := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, quorum) + return evalDisks(disks, errs), err +} + +// writeUniqueFileInfo - writes unique `xl.meta` content for each disk concurrently. +func writeUniqueFileInfo(ctx context.Context, disks []StorageAPI, bucket, prefix string, files []FileInfo, quorum int) ([]StorageAPI, error) { + g := errgroup.WithNErrs(len(disks)) + + // Start writing `xl.meta` to all disks in parallel. + for index := range disks { + index := index + g.Go(func() error { + if disks[index] == nil { + return errDiskNotFound + } + // Pick one FileInfo for a disk at index. + files[index].Erasure.Index = index + 1 + return disks[index].WriteMetadata(bucket, prefix, files[index]) + }, index) + } + + // Wait for all the routines. + mErrs := g.Wait() + + err := reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, quorum) + return evalDisks(disks, mErrs), err +} + +// Returns per object readQuorum and writeQuorum +// readQuorum is the min required disks to read data. +// writeQuorum is the min required disks to write data. +func objectQuorumFromMeta(ctx context.Context, er erasureObjects, partsMetaData []FileInfo, errs []error) (objectReadQuorum, objectWriteQuorum int, err error) { + // get the latest updated Metadata and a count of all the latest updated FileInfo(s) + latestFileInfo, err := getLatestFileInfo(ctx, partsMetaData, errs) + if err != nil { + return 0, 0, err + } + + // Since all the valid erasure code meta updated at the same time are equivalent, pass dataBlocks + // from latestFileInfo to get the quorum + return latestFileInfo.Erasure.DataBlocks, latestFileInfo.Erasure.DataBlocks + 1, nil +} diff --git a/cmd/erasure-metadata_test.go b/cmd/erasure-metadata_test.go new file mode 100644 index 000000000..438955e7e --- /dev/null +++ b/cmd/erasure-metadata_test.go @@ -0,0 +1,153 @@ +/* + * MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "context" + "strconv" + "testing" + + humanize "github.com/dustin/go-humanize" +) + +const ActualSize = 1000 + +// Test FileInfo.AddObjectPart() +func TestAddObjectPart(t *testing.T) { + testCases := []struct { + partNum int + expectedIndex int + }{ + {1, 0}, + {2, 1}, + {4, 2}, + {5, 3}, + {7, 4}, + // Insert part. + {3, 2}, + // Replace existing part. + {4, 3}, + // Missing part. + {6, -1}, + } + + // Setup. + fi := newFileInfo("test-object", 8, 8) + if !fi.IsValid() { + t.Fatalf("unable to get xl meta") + } + + // Test them. + for _, testCase := range testCases { + if testCase.expectedIndex > -1 { + partNumString := strconv.Itoa(testCase.partNum) + fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize) + } + + if index := objectPartIndex(fi.Parts, testCase.partNum); index != testCase.expectedIndex { + t.Fatalf("%+v: expected = %d, got: %d", testCase, testCase.expectedIndex, index) + } + } +} + +// Test objectPartIndex(). generates a sample FileInfo data and asserts +// the output of objectPartIndex() with the expected value. +func TestObjectPartIndex(t *testing.T) { + testCases := []struct { + partNum int + expectedIndex int + }{ + {2, 1}, + {1, 0}, + {5, 3}, + {4, 2}, + {7, 4}, + } + + // Setup. + fi := newFileInfo("test-object", 8, 8) + if !fi.IsValid() { + t.Fatalf("unable to get xl meta") + } + + // Add some parts for testing. + for _, testCase := range testCases { + partNumString := strconv.Itoa(testCase.partNum) + fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize) + } + + // Add failure test case. + testCases = append(testCases, struct { + partNum int + expectedIndex int + }{6, -1}) + + // Test them. + for _, testCase := range testCases { + if index := objectPartIndex(fi.Parts, testCase.partNum); index != testCase.expectedIndex { + t.Fatalf("%+v: expected = %d, got: %d", testCase, testCase.expectedIndex, index) + } + } +} + +// Test FileInfo.ObjectToPartOffset(). +func TestObjectToPartOffset(t *testing.T) { + // Setup. + fi := newFileInfo("test-object", 8, 8) + if !fi.IsValid() { + t.Fatalf("unable to get xl meta") + } + + // Add some parts for testing. + // Total size of all parts is 5,242,899 bytes. + for _, partNum := range []int{1, 2, 4, 5, 7} { + partNumString := strconv.Itoa(partNum) + fi.AddObjectPart(partNum, "etag."+partNumString, int64(partNum+humanize.MiByte), ActualSize) + } + + testCases := []struct { + offset int64 + expectedIndex int + expectedOffset int64 + expectedErr error + }{ + {0, 0, 0, nil}, + {1 * humanize.MiByte, 0, 1 * humanize.MiByte, nil}, + {1 + humanize.MiByte, 1, 0, nil}, + {2 + humanize.MiByte, 1, 1, nil}, + // Its valid for zero sized object. + {-1, 0, -1, nil}, + // Max fffset is always (size - 1). + {(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte) - 1, 4, 1048582, nil}, + // Error if offset is size. + {(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte), 0, 0, InvalidRange{}}, + } + + // Test them. + for _, testCase := range testCases { + index, offset, err := fi.ObjectToPartOffset(context.Background(), testCase.offset) + if err != testCase.expectedErr { + t.Fatalf("%+v: expected = %s, got: %s", testCase, testCase.expectedErr, err) + } + if index != testCase.expectedIndex { + t.Fatalf("%+v: index: expected = %d, got: %d", testCase, testCase.expectedIndex, index) + } + if offset != testCase.expectedOffset { + t.Fatalf("%+v: offset: expected = %d, got: %d", testCase, testCase.expectedOffset, offset) + } + } +} diff --git a/cmd/xl-v1-multipart.go b/cmd/erasure-multipart.go similarity index 53% rename from cmd/xl-v1-multipart.go rename to cmd/erasure-multipart.go index 81e953852..c96d4a3d2 100644 --- a/cmd/xl-v1-multipart.go +++ b/cmd/erasure-multipart.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,7 +24,6 @@ import ( "sort" "strconv" "strings" - "time" xhttp "github.com/minio/minio/cmd/http" "github.com/minio/minio/cmd/logger" @@ -32,24 +31,25 @@ import ( "github.com/minio/minio/pkg/sync/errgroup" ) -func (xl xlObjects) getUploadIDDir(bucket, object, uploadID string) string { - return pathJoin(xl.getMultipartSHADir(bucket, object), uploadID) +func (er erasureObjects) getUploadIDDir(bucket, object, uploadID string) string { + return pathJoin(er.getMultipartSHADir(bucket, object), uploadID) } -func (xl xlObjects) getMultipartSHADir(bucket, object string) string { +func (er erasureObjects) getMultipartSHADir(bucket, object string) string { return getSHA256Hash([]byte(pathJoin(bucket, object))) } // checkUploadIDExists - verify if a given uploadID exists and is valid. -func (xl xlObjects) checkUploadIDExists(ctx context.Context, bucket, object, uploadID string) error { - _, err := xl.getObjectInfo(ctx, minioMetaMultipartBucket, xl.getUploadIDDir(bucket, object, uploadID), ObjectOptions{}) +func (er erasureObjects) checkUploadIDExists(ctx context.Context, bucket, object, uploadID string) error { + _, err := er.getObjectInfo(ctx, minioMetaMultipartBucket, er.getUploadIDDir(bucket, object, uploadID), ObjectOptions{}) return err } // Removes part given by partName belonging to a mulitpart upload from minioMetaBucket -func (xl xlObjects) removeObjectPart(bucket, object, uploadID string, partNumber int) { - curpartPath := pathJoin(xl.getUploadIDDir(bucket, object, uploadID), fmt.Sprintf("part.%d", partNumber)) - storageDisks := xl.getDisks() +func (er erasureObjects) removeObjectPart(bucket, object, uploadID, dataDir string, partNumber int) { + uploadIDPath := er.getUploadIDDir(bucket, object, uploadID) + curpartPath := pathJoin(uploadIDPath, dataDir, fmt.Sprintf("part.%d", partNumber)) + storageDisks := er.getDisks() g := errgroup.WithNErrs(len(storageDisks)) for index, disk := range storageDisks { @@ -59,7 +59,7 @@ func (xl xlObjects) removeObjectPart(bucket, object, uploadID string, partNumber index := index g.Go(func() error { // Ignoring failure to remove parts that weren't present in CompleteMultipartUpload - // requests. xl.json is the authoritative source of truth on which parts constitute + // requests. xl.meta is the authoritative source of truth on which parts constitute // the object. The presence of parts that don't belong in the object doesn't affect correctness. _ = storageDisks[index].DeleteFile(minioMetaMultipartBucket, curpartPath) return nil @@ -68,36 +68,6 @@ func (xl xlObjects) removeObjectPart(bucket, object, uploadID string, partNumber g.Wait() } -// commitXLMetadata - commit `xl.json` from source prefix to destination prefix in the given slice of disks. -func commitXLMetadata(ctx context.Context, disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPrefix string, quorum int) ([]StorageAPI, error) { - srcJSONFile := path.Join(srcPrefix, xlMetaJSONFile) - dstJSONFile := path.Join(dstPrefix, xlMetaJSONFile) - - g := errgroup.WithNErrs(len(disks)) - - // Rename `xl.json` to all disks in parallel. - for index := range disks { - index := index - g.Go(func() error { - if disks[index] == nil { - return errDiskNotFound - } - - // Delete any dangling directories. - defer disks[index].DeleteFile(srcBucket, srcPrefix) - - // Renames `xl.json` from source prefix to destination prefix. - return disks[index].RenameFile(srcBucket, srcJSONFile, dstBucket, dstJSONFile) - }, index) - } - - // Wait for all the routines. - mErrs := g.Wait() - - err := reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, quorum) - return evalDisks(disks, mErrs), err -} - // ListMultipartUploads - lists all the pending multipart // uploads for a particular object in a bucket. // @@ -105,17 +75,17 @@ func commitXLMetadata(ctx context.Context, disks []StorageAPI, srcBucket, srcPre // not support prefix based listing, this is a deliberate attempt // towards simplification of multipart APIs. // The resulting ListMultipartsInfo structure is unmarshalled directly as XML. -func (xl xlObjects) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, e error) { +func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, e error) { result.MaxUploads = maxUploads result.KeyMarker = keyMarker result.Prefix = object result.Delimiter = delimiter - for _, disk := range xl.getLoadBalancedDisks() { + for _, disk := range er.getLoadBalancedDisks() { if disk == nil { continue } - uploadIDs, err := disk.ListDir(minioMetaMultipartBucket, xl.getMultipartSHADir(bucket, object), -1, "") + uploadIDs, err := disk.ListDir(minioMetaMultipartBucket, er.getMultipartSHADir(bucket, object), -1) if err != nil { if err == errFileNotFound { return result, nil @@ -147,16 +117,16 @@ func (xl xlObjects) ListMultipartUploads(ctx context.Context, bucket, object, ke // '.minio.sys/multipart/bucket/object/uploads.json' on all the // disks. `uploads.json` carries metadata regarding on-going multipart // operation(s) on the object. -func (xl xlObjects) newMultipartUpload(ctx context.Context, bucket string, object string, meta map[string]string) (string, error) { +func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string, object string, opts ObjectOptions) (string, error) { - onlineDisks := xl.getDisks() - parityBlocks := globalStorageClass.GetParityForSC(meta[xhttp.AmzStorageClass]) + onlineDisks := er.getDisks() + parityBlocks := globalStorageClass.GetParityForSC(opts.UserDefined[xhttp.AmzStorageClass]) if parityBlocks == 0 { parityBlocks = len(onlineDisks) / 2 } dataBlocks := len(onlineDisks) - parityBlocks - xlMeta := newXLMetaV1(object, dataBlocks, parityBlocks) + fi := newFileInfo(object, dataBlocks, parityBlocks) // we now know the number of blocks this object needs for data and parity. // establish the writeQuorum using this data @@ -165,30 +135,37 @@ func (xl xlObjects) newMultipartUpload(ctx context.Context, bucket string, objec writeQuorum = dataBlocks + 1 } - if meta["content-type"] == "" { + if opts.UserDefined["content-type"] == "" { contentType := mimedb.TypeByExtension(path.Ext(object)) - meta["content-type"] = contentType + opts.UserDefined["content-type"] = contentType } - xlMeta.Stat.ModTime = UTCNow() - xlMeta.Meta = meta + + // Calculate the version to be saved. + if opts.Versioned { + fi.VersionID = mustGetUUID() + } + + fi.DataDir = mustGetUUID() + fi.ModTime = UTCNow() + fi.Metadata = opts.UserDefined uploadID := mustGetUUID() - uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID) + uploadIDPath := er.getUploadIDDir(bucket, object, uploadID) tempUploadIDPath := uploadID // Delete the tmp path later in case we fail to commit (ignore // returned errors) - this will be a no-op in case of a commit // success. - defer xl.deleteObject(ctx, minioMetaTmpBucket, tempUploadIDPath, writeQuorum, false) + defer er.deleteObject(ctx, minioMetaTmpBucket, tempUploadIDPath, writeQuorum) - var partsMetadata = make([]xlMetaV1, len(onlineDisks)) + var partsMetadata = make([]FileInfo, len(onlineDisks)) for i := range onlineDisks { - partsMetadata[i] = xlMeta + partsMetadata[i] = fi } var err error - // Write updated `xl.json` to all disks. - onlineDisks, err = writeUniqueXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempUploadIDPath, partsMetadata, writeQuorum) + // Write updated `xl.meta` to all disks. + onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempUploadIDPath, partsMetadata, writeQuorum) if err != nil { return "", toObjectErr(err, minioMetaTmpBucket, tempUploadIDPath) } @@ -208,12 +185,12 @@ func (xl xlObjects) newMultipartUpload(ctx context.Context, bucket string, objec // subsequent request each UUID is unique. // // Implements S3 compatible initiate multipart API. -func (xl xlObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) { +func (er erasureObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) { // No metadata is set, allocate a new one. if opts.UserDefined == nil { opts.UserDefined = make(map[string]string) } - return xl.newMultipartUpload(ctx, bucket, object, opts.UserDefined) + return er.newMultipartUpload(ctx, bucket, object, opts) } // CopyObjectPart - reads incoming stream and internally erasure codes @@ -221,8 +198,8 @@ func (xl xlObjects) NewMultipartUpload(ctx context.Context, bucket, object strin // data is read from an existing object. // // Implements S3 compatible Upload Part Copy API. -func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) { - partInfo, err := xl.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, NewPutObjReader(srcInfo.Reader, nil, nil), dstOpts) +func (er erasureObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) { + partInfo, err := er.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, NewPutObjReader(srcInfo.Reader, nil, nil), dstOpts) if err != nil { return pi, toObjectErr(err, dstBucket, dstObject) } @@ -236,64 +213,60 @@ func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, ds // of the multipart transaction. // // Implements S3 compatible Upload Part API. -func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, e error) { +func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, e error) { data := r.Reader - if err := checkPutObjectPartArgs(ctx, bucket, object, xl); err != nil { - return pi, err - } - // Validate input data size and it can never be less than zero. if data.Size() < -1 { logger.LogIf(ctx, errInvalidArgument, logger.Application) return pi, toObjectErr(errInvalidArgument) } - var partsMetadata []xlMetaV1 + var partsMetadata []FileInfo var errs []error - uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID) + uploadIDPath := er.getUploadIDDir(bucket, object, uploadID) // Validates if upload ID exists. - if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { + if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { return pi, toObjectErr(err, bucket, object, uploadID) } // Read metadata associated with the object from all disks. - partsMetadata, errs = readAllXLMetadata(ctx, xl.getDisks(), minioMetaMultipartBucket, - uploadIDPath) + partsMetadata, errs = readAllFileInfo(ctx, er.getDisks(), minioMetaMultipartBucket, + uploadIDPath, "") // get Quorum for this object - _, writeQuorum, err := objectQuorumFromMeta(ctx, xl, partsMetadata, errs) + _, writeQuorum, err := objectQuorumFromMeta(ctx, er, partsMetadata, errs) if err != nil { return pi, toObjectErr(err, bucket, object) } reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) - if reducedErr == errXLWriteQuorum { + if reducedErr == errErasureWriteQuorum { return pi, toObjectErr(reducedErr, bucket, object) } // List all online disks. - onlineDisks, modTime := listOnlineDisks(xl.getDisks(), partsMetadata, errs) + onlineDisks, modTime := listOnlineDisks(er.getDisks(), partsMetadata, errs) // Pick one from the first valid metadata. - xlMeta, err := pickValidXLMeta(ctx, partsMetadata, modTime, writeQuorum) + fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum) if err != nil { return pi, err } - onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution) + onlineDisks = shuffleDisks(onlineDisks, fi.Erasure.Distribution) // Need a unique name for the part being written in minioMetaBucket to // accommodate concurrent PutObjectPart requests partSuffix := fmt.Sprintf("part.%d", partID) tmpPart := mustGetUUID() - tmpPartPath := path.Join(tmpPart, partSuffix) + tmpPartPath := pathJoin(tmpPart, partSuffix) // Delete the temporary object part. If PutObjectPart succeeds there would be nothing to delete. - defer xl.deleteObject(ctx, minioMetaTmpBucket, tmpPart, writeQuorum, false) + defer er.deleteObject(ctx, minioMetaTmpBucket, tmpPart, writeQuorum) - erasure, err := NewErasure(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize) + erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize) if err != nil { return pi, toObjectErr(err, bucket, object) } @@ -303,16 +276,16 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID switch size := data.Size(); { case size == 0: buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF - case size == -1 || size >= blockSizeV1: - buffer = xl.bp.Get() - defer xl.bp.Put(buffer) - case size < blockSizeV1: - // No need to allocate fully blockSizeV1 buffer if the incoming data is smaller. - buffer = make([]byte, size, 2*size+int64(erasure.parityBlocks+erasure.dataBlocks-1)) + case size == -1 || size >= fi.Erasure.BlockSize: + buffer = er.bp.Get() + defer er.bp.Put(buffer) + case size < fi.Erasure.BlockSize: + // No need to allocate fully fi.Erasure.BlockSize buffer if the incoming data is smaller. + buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1)) } - if len(buffer) > int(xlMeta.Erasure.BlockSize) { - buffer = buffer[:xlMeta.Erasure.BlockSize] + if len(buffer) > int(fi.Erasure.BlockSize) { + buffer = buffer[:fi.Erasure.BlockSize] } writers := make([]io.Writer, len(onlineDisks)) for i, disk := range onlineDisks { @@ -322,7 +295,7 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tmpPartPath, erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize()) } - n, err := erasure.Encode(ctx, data, writers, buffer, erasure.dataBlocks+1) + n, err := erasure.Encode(ctx, data, writers, buffer, fi.Erasure.DataBlocks+1) closeBitrotWriters(writers) if err != nil { return pi, toObjectErr(err, bucket, object) @@ -341,21 +314,21 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID } // Validates if upload ID exists. - if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { + if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { return pi, toObjectErr(err, bucket, object, uploadID) } // Rename temporary part file to its final location. - partPath := path.Join(uploadIDPath, partSuffix) + partPath := pathJoin(uploadIDPath, fi.DataDir, partSuffix) onlineDisks, err = rename(ctx, onlineDisks, minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath, false, writeQuorum, nil) if err != nil { return pi, toObjectErr(err, minioMetaMultipartBucket, partPath) } // Read metadata again because it might be updated with parallel upload of another part. - partsMetadata, errs = readAllXLMetadata(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath) + partsMetadata, errs = readAllFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, "") reducedErr = reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) - if reducedErr == errXLWriteQuorum { + if reducedErr == errErasureWriteQuorum { return pi, toObjectErr(reducedErr, bucket, object) } @@ -363,25 +336,26 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID onlineDisks, modTime = listOnlineDisks(onlineDisks, partsMetadata, errs) // Pick one from the first valid metadata. - xlMeta, err = pickValidXLMeta(ctx, partsMetadata, modTime, writeQuorum) + fi, err = pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum) if err != nil { return pi, err } - // Once part is successfully committed, proceed with updating XL metadata. - xlMeta.Stat.ModTime = UTCNow() + // Once part is successfully committed, proceed with updating erasure metadata. + fi.ModTime = UTCNow() md5hex := r.MD5CurrentHexString() // Add the current part. - xlMeta.AddObjectPart(partID, md5hex, n, data.ActualSize()) + fi.AddObjectPart(partID, md5hex, n, data.ActualSize()) for i, disk := range onlineDisks { if disk == OfflineDisk { continue } - partsMetadata[i].Stat = xlMeta.Stat - partsMetadata[i].Parts = xlMeta.Parts + partsMetadata[i].Size = fi.Size + partsMetadata[i].ModTime = fi.ModTime + partsMetadata[i].Parts = fi.Parts partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{ PartNumber: partID, Algorithm: DefaultBitrotAlgorithm, @@ -389,19 +363,8 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID }) } - // Write all the checksum metadata. - tempXLMetaPath := mustGetUUID() - - // Cleanup in case of xl.json writing failure - defer xl.deleteObject(ctx, minioMetaTmpBucket, tempXLMetaPath, writeQuorum, false) - - // Writes a unique `xl.json` each disk carrying new checksum related information. - onlineDisks, err = writeUniqueXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempXLMetaPath, partsMetadata, writeQuorum) - if err != nil { - return pi, toObjectErr(err, minioMetaTmpBucket, tempXLMetaPath) - } - - if _, err = commitXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempXLMetaPath, minioMetaMultipartBucket, uploadIDPath, writeQuorum); err != nil { + // Writes update `xl.meta` format for each disk. + if _, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil { return pi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) } @@ -409,8 +372,8 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID return PartInfo{ PartNumber: partID, ETag: md5hex, - LastModified: xlMeta.Stat.ModTime, - Size: xlMeta.Stat.Size, + LastModified: fi.ModTime, + Size: fi.Size, ActualSize: data.ActualSize(), }, nil } @@ -419,44 +382,44 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID // by callers to verify object states // - encrypted // - compressed -func (xl xlObjects) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) { +func (er erasureObjects) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) { result := MultipartInfo{ Bucket: bucket, Object: object, UploadID: uploadID, } - if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { + if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { return result, toObjectErr(err, bucket, object, uploadID) } - uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID) + uploadIDPath := er.getUploadIDDir(bucket, object, uploadID) - storageDisks := xl.getDisks() + storageDisks := er.getDisks() // Read metadata associated with the object from all disks. - partsMetadata, errs := readAllXLMetadata(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath) + partsMetadata, errs := readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath, opts.VersionID) // get Quorum for this object - _, writeQuorum, err := objectQuorumFromMeta(ctx, xl, partsMetadata, errs) + readQuorum, _, err := objectQuorumFromMeta(ctx, er, partsMetadata, errs) if err != nil { return result, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) } - reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) - if reducedErr == errXLWriteQuorum { + reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum) + if reducedErr == errErasureReadQuorum { return result, toObjectErr(reducedErr, minioMetaMultipartBucket, uploadIDPath) } _, modTime := listOnlineDisks(storageDisks, partsMetadata, errs) // Pick one from the first valid metadata. - xlMeta, err := pickValidXLMeta(ctx, partsMetadata, modTime, writeQuorum) + fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, readQuorum) if err != nil { return result, err } - result.UserDefined = xlMeta.Meta + result.UserDefined = fi.Metadata return result, nil } @@ -467,51 +430,47 @@ func (xl xlObjects) GetMultipartInfo(ctx context.Context, bucket, object, upload // Implements S3 compatible ListObjectParts API. The resulting // ListPartsInfo structure is marshaled directly into XML and // replied back to the client. -func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts ObjectOptions) (result ListPartsInfo, e error) { - - if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { +func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts ObjectOptions) (result ListPartsInfo, e error) { + if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { return result, toObjectErr(err, bucket, object, uploadID) } - uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID) + uploadIDPath := er.getUploadIDDir(bucket, object, uploadID) - storageDisks := xl.getDisks() + storageDisks := er.getDisks() // Read metadata associated with the object from all disks. - partsMetadata, errs := readAllXLMetadata(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath) + partsMetadata, errs := readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath, "") // get Quorum for this object - _, writeQuorum, err := objectQuorumFromMeta(ctx, xl, partsMetadata, errs) + _, writeQuorum, err := objectQuorumFromMeta(ctx, er, partsMetadata, errs) if err != nil { return result, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) } reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) - if reducedErr == errXLWriteQuorum { + if reducedErr == errErasureWriteQuorum { return result, toObjectErr(reducedErr, minioMetaMultipartBucket, uploadIDPath) } _, modTime := listOnlineDisks(storageDisks, partsMetadata, errs) // Pick one from the first valid metadata. - xlValidMeta, err := pickValidXLMeta(ctx, partsMetadata, modTime, writeQuorum) + fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum) if err != nil { return result, err } - var xlMeta = xlValidMeta.Meta - var xlParts = xlValidMeta.Parts - // Populate the result stub. result.Bucket = bucket result.Object = object result.UploadID = uploadID result.MaxParts = maxParts result.PartNumberMarker = partNumberMarker - result.UserDefined = xlMeta + result.UserDefined = fi.Metadata // For empty number of parts or maxParts as zero, return right here. - if len(xlParts) == 0 || maxParts == 0 { + if len(fi.Parts) == 0 || maxParts == 0 { return result, nil } @@ -521,17 +480,17 @@ func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadI } // Only parts with higher part numbers will be listed. - partIdx := objectPartIndex(xlParts, partNumberMarker) - parts := xlParts + partIdx := objectPartIndex(fi.Parts, partNumberMarker) + parts := fi.Parts if partIdx != -1 { - parts = xlParts[partIdx+1:] + parts = fi.Parts[partIdx+1:] } count := maxParts for _, part := range parts { result.Parts = append(result.Parts, PartInfo{ PartNumber: part.Number, ETag: part.ETag, - LastModified: xlValidMeta.Stat.ModTime, + LastModified: fi.ModTime, Size: part.Size, }) count-- @@ -556,14 +515,14 @@ func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadI // md5sums of all the parts. // // Implements S3 compatible Complete multipart API. -func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, e error) { - if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { +func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, e error) { + if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { return oi, toObjectErr(err, bucket, object, uploadID) } // Check if an object is present as one of the parent dir. // -- FIXME. (needs a new kind of lock). - if xl.parentDirIsObject(ctx, bucket, path.Dir(object)) { + if er.parentDirIsObject(ctx, bucket, path.Dir(object)) { return oi, toObjectErr(errFileParentIsFile, bucket, object) } @@ -572,21 +531,21 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string, // Calculate s3 compatible md5sum for complete multipart. s3MD5 := getCompleteMultipartMD5(parts) - uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID) + uploadIDPath := er.getUploadIDDir(bucket, object, uploadID) - storageDisks := xl.getDisks() + storageDisks := er.getDisks() // Read metadata associated with the object from all disks. - partsMetadata, errs := readAllXLMetadata(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath) + partsMetadata, errs := readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath, "") // get Quorum for this object - _, writeQuorum, err := objectQuorumFromMeta(ctx, xl, partsMetadata, errs) + _, writeQuorum, err := objectQuorumFromMeta(ctx, er, partsMetadata, errs) if err != nil { return oi, toObjectErr(err, bucket, object) } reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) - if reducedErr == errXLWriteQuorum { + if reducedErr == errErasureWriteQuorum { return oi, toObjectErr(reducedErr, bucket, object) } @@ -599,28 +558,26 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string, var objectActualSize int64 // Pick one from the first valid metadata. - xlMeta, err := pickValidXLMeta(ctx, partsMetadata, modTime, writeQuorum) + fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum) if err != nil { return oi, err } // Order online disks in accordance with distribution order. - onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution) + onlineDisks = shuffleDisks(onlineDisks, fi.Erasure.Distribution) // Order parts metadata in accordance with distribution order. - partsMetadata = shufflePartsMetadata(partsMetadata, xlMeta.Erasure.Distribution) + partsMetadata = shufflePartsMetadata(partsMetadata, fi.Erasure.Distribution) - // Save current xl meta for validation. - var currentXLMeta = xlMeta + // Save current erasure metadata for validation. + var currentFI = fi // Allocate parts similar to incoming slice. - xlMeta.Parts = make([]ObjectPartInfo, len(parts)) + fi.Parts = make([]ObjectPartInfo, len(parts)) // Validate each part and then commit to disk. for i, part := range parts { - // ensure that part ETag is canonicalized to strip off extraneous quotes - part.ETag = canonicalizeETag(part.ETag) - partIdx := objectPartIndex(currentXLMeta.Parts, part.PartNumber) + partIdx := objectPartIndex(currentFI.Parts, part.PartNumber) // All parts should have same part number. if partIdx == -1 { invp := InvalidPart{ @@ -630,116 +587,103 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string, return oi, invp } - if currentXLMeta.Parts[partIdx].ETag != part.ETag { + // ensure that part ETag is canonicalized to strip off extraneous quotes + part.ETag = canonicalizeETag(part.ETag) + if currentFI.Parts[partIdx].ETag != part.ETag { invp := InvalidPart{ PartNumber: part.PartNumber, - ExpETag: currentXLMeta.Parts[partIdx].ETag, + ExpETag: currentFI.Parts[partIdx].ETag, GotETag: part.ETag, } return oi, invp } // All parts except the last part has to be atleast 5MB. - if (i < len(parts)-1) && !isMinAllowedPartSize(currentXLMeta.Parts[partIdx].ActualSize) { + if (i < len(parts)-1) && !isMinAllowedPartSize(currentFI.Parts[partIdx].ActualSize) { return oi, PartTooSmall{ PartNumber: part.PartNumber, - PartSize: currentXLMeta.Parts[partIdx].ActualSize, + PartSize: currentFI.Parts[partIdx].ActualSize, PartETag: part.ETag, } } // Save for total object size. - objectSize += currentXLMeta.Parts[partIdx].Size + objectSize += currentFI.Parts[partIdx].Size // Save the consolidated actual size. - objectActualSize += currentXLMeta.Parts[partIdx].ActualSize + objectActualSize += currentFI.Parts[partIdx].ActualSize // Add incoming parts. - xlMeta.Parts[i] = ObjectPartInfo{ + fi.Parts[i] = ObjectPartInfo{ Number: part.PartNumber, - Size: currentXLMeta.Parts[partIdx].Size, - ActualSize: currentXLMeta.Parts[partIdx].ActualSize, + Size: currentFI.Parts[partIdx].Size, + ActualSize: currentFI.Parts[partIdx].ActualSize, } } // Save the final object size and modtime. - xlMeta.Stat.Size = objectSize - xlMeta.Stat.ModTime = UTCNow() + fi.Size = objectSize + fi.ModTime = UTCNow() // Save successfully calculated md5sum. - xlMeta.Meta["etag"] = s3MD5 + fi.Metadata["etag"] = s3MD5 // Save the consolidated actual size. - xlMeta.Meta[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(objectActualSize, 10) + fi.Metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(objectActualSize, 10) - // Update all xl metadata, make sure to not modify fields like + // Update all erasure metadata, make sure to not modify fields like // checksum which are different on each disks. for index := range partsMetadata { - partsMetadata[index].Stat = xlMeta.Stat - partsMetadata[index].Meta = xlMeta.Meta - partsMetadata[index].Parts = xlMeta.Parts + partsMetadata[index].Size = fi.Size + partsMetadata[index].ModTime = fi.ModTime + partsMetadata[index].Metadata = fi.Metadata + partsMetadata[index].Parts = fi.Parts } - tempXLMetaPath := mustGetUUID() - - // Cleanup in case of failure - defer xl.deleteObject(ctx, minioMetaTmpBucket, tempXLMetaPath, writeQuorum, false) - - // Write unique `xl.json` for each disk. - if onlineDisks, err = writeUniqueXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempXLMetaPath, partsMetadata, writeQuorum); err != nil { - return oi, toObjectErr(err, minioMetaTmpBucket, tempXLMetaPath) - } - - var rErr error - onlineDisks, rErr = commitXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempXLMetaPath, minioMetaMultipartBucket, uploadIDPath, writeQuorum) - if rErr != nil { - return oi, toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath) - } - - if xl.isObject(bucket, object) { - // Rename if an object already exists to temporary location. - newUniqueID := mustGetUUID() - - // Delete success renamed object. - defer xl.deleteObject(ctx, minioMetaTmpBucket, newUniqueID, writeQuorum, false) - - // NOTE: Do not use online disks slice here: the reason is that existing object should be purged - // regardless of `xl.json` status and rolled back in case of errors. Also allow renaming of the - // existing object if it is not present in quorum disks so users can overwrite stale objects. - _, err = rename(ctx, xl.getDisks(), bucket, object, minioMetaTmpBucket, newUniqueID, true, writeQuorum, []error{errFileNotFound}) - if err != nil { - return oi, toObjectErr(err, bucket, object) - } + // Write final `xl.meta` at uploadID location + if onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil { + return oi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) } // Remove parts that weren't present in CompleteMultipartUpload request. - for _, curpart := range currentXLMeta.Parts { - if objectPartIndex(xlMeta.Parts, curpart.Number) == -1 { + for _, curpart := range currentFI.Parts { + if objectPartIndex(fi.Parts, curpart.Number) == -1 { // Delete the missing part files. e.g, // Request 1: NewMultipart // Request 2: PutObjectPart 1 // Request 3: PutObjectPart 2 // Request 4: CompleteMultipartUpload --part 2 // N.B. 1st part is not present. This part should be removed from the storage. - xl.removeObjectPart(bucket, object, uploadID, curpart.Number) + er.removeObjectPart(bucket, object, uploadID, fi.DataDir, curpart.Number) } } // Rename the multipart object to final location. - if onlineDisks, err = rename(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, bucket, object, true, writeQuorum, nil); err != nil { + if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, + fi.DataDir, bucket, object, writeQuorum, nil); err != nil { return oi, toObjectErr(err, bucket, object) } // Check if there is any offline disk and add it to the MRF list for i := 0; i < len(onlineDisks); i++ { if onlineDisks[i] == nil || storageDisks[i] == nil { - xl.addPartialUpload(bucket, object) + er.addPartialUpload(bucket, object) break } } + for i := 0; i < len(onlineDisks); i++ { + if onlineDisks[i] == nil { + continue + } + // Object info is the same in all disks, so we can pick + // the first meta from online disk + fi = partsMetadata[i] + break + } + // Success, return object info. - return xlMeta.ToObjectInfo(bucket, object), nil + return fi.ToObjectInfo(bucket, object), nil } // AbortMultipartUpload - aborts an ongoing multipart operation @@ -753,79 +697,28 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string, // Implements S3 compatible Abort multipart API, slight difference is // that this is an atomic idempotent operation. Subsequent calls have // no affect and further requests to the same uploadID would not be honored. -func (xl xlObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { +func (er erasureObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { // Validates if upload ID exists. - if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { + if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { return toObjectErr(err, bucket, object, uploadID) } - uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID) + uploadIDPath := er.getUploadIDDir(bucket, object, uploadID) // Read metadata associated with the object from all disks. - partsMetadata, errs := readAllXLMetadata(ctx, xl.getDisks(), minioMetaMultipartBucket, uploadIDPath) + partsMetadata, errs := readAllFileInfo(ctx, er.getDisks(), minioMetaMultipartBucket, uploadIDPath, "") // get Quorum for this object - _, writeQuorum, err := objectQuorumFromMeta(ctx, xl, partsMetadata, errs) + _, writeQuorum, err := objectQuorumFromMeta(ctx, er, partsMetadata, errs) if err != nil { return toObjectErr(err, bucket, object, uploadID) } // Cleanup all uploaded parts. - if err = xl.deleteObject(ctx, minioMetaMultipartBucket, uploadIDPath, writeQuorum, false); err != nil { + if err = er.deleteObject(ctx, minioMetaMultipartBucket, uploadIDPath, writeQuorum); err != nil { return toObjectErr(err, bucket, object, uploadID) } // Successfully purged. return nil } - -// Clean-up the old multipart uploads. Should be run in a Go routine. -func (xl xlObjects) cleanupStaleMultipartUploads(ctx context.Context, cleanupInterval, expiry time.Duration, doneCh <-chan struct{}) { - ticker := time.NewTicker(cleanupInterval) - defer ticker.Stop() - - for { - select { - case <-doneCh: - return - case <-ticker.C: - var disk StorageAPI - for _, d := range xl.getLoadBalancedDisks() { - if d != nil { - disk = d - break - } - } - if disk == nil { - continue - } - xl.cleanupStaleMultipartUploadsOnDisk(ctx, disk, expiry) - } - } -} - -// Remove the old multipart uploads on the given disk. -func (xl xlObjects) cleanupStaleMultipartUploadsOnDisk(ctx context.Context, disk StorageAPI, expiry time.Duration) { - now := time.Now() - shaDirs, err := disk.ListDir(minioMetaMultipartBucket, "", -1, "") - if err != nil { - return - } - for _, shaDir := range shaDirs { - uploadIDDirs, err := disk.ListDir(minioMetaMultipartBucket, shaDir, -1, "") - if err != nil { - continue - } - for _, uploadIDDir := range uploadIDDirs { - uploadIDPath := pathJoin(shaDir, uploadIDDir) - fi, err := disk.StatFile(minioMetaMultipartBucket, pathJoin(uploadIDPath, xlMetaJSONFile)) - if err != nil { - continue - } - if now.Sub(fi.ModTime) > expiry { - writeQuorum := getWriteQuorum(len(xl.getDisks())) - xl.deleteObject(ctx, minioMetaMultipartBucket, uploadIDPath, writeQuorum, false) - } - } - } -} diff --git a/cmd/xl-v1-object.go b/cmd/erasure-object.go similarity index 51% rename from cmd/xl-v1-object.go rename to cmd/erasure-object.go index e89413a5e..2ccdccc2d 100644 --- a/cmd/xl-v1-object.go +++ b/cmd/erasure-object.go @@ -36,8 +36,8 @@ import ( var objectOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied) // putObjectDir hints the bottom layer to create a new directory. -func (xl xlObjects) putObjectDir(ctx context.Context, bucket, object string, writeQuorum int) error { - storageDisks := xl.getDisks() +func (er erasureObjects) putObjectDir(ctx context.Context, bucket, object string, writeQuorum int) error { + storageDisks := er.getDisks() g := errgroup.WithNErrs(len(storageDisks)) @@ -64,7 +64,7 @@ func (xl xlObjects) putObjectDir(ctx context.Context, bucket, object string, wri // CopyObject - copy object source object to destination object. // if source object and destination object are same we only // update metadata. -func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, e error) { +func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, e error) { // This call shouldn't be used for anything other than metadata updates. if !srcInfo.metadataOnly { return oi, NotImplemented{} @@ -73,58 +73,59 @@ func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBuc defer ObjectPathUpdated(path.Join(dstBucket, dstObject)) // Read metadata associated with the object from all disks. - storageDisks := xl.getDisks() + storageDisks := er.getDisks() - metaArr, errs := readAllXLMetadata(ctx, storageDisks, srcBucket, srcObject) + metaArr, errs := readAllFileInfo(ctx, storageDisks, srcBucket, srcObject, srcOpts.VersionID) // get Quorum for this object - readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, xl, metaArr, errs) + readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, er, metaArr, errs) if err != nil { return oi, toObjectErr(err, srcBucket, srcObject) } - if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil { - return oi, toObjectErr(reducedErr, srcBucket, srcObject) - } - // List all online disks. - _, modTime := listOnlineDisks(storageDisks, metaArr, errs) + onlineDisks, modTime := listOnlineDisks(storageDisks, metaArr, errs) // Pick latest valid metadata. - xlMeta, err := pickValidXLMeta(ctx, metaArr, modTime, readQuorum) + fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum) if err != nil { return oi, toObjectErr(err, srcBucket, srcObject) } - // Update `xl.json` content on each disks. - for index := range metaArr { - metaArr[index].Meta = srcInfo.UserDefined - metaArr[index].Meta["etag"] = srcInfo.ETag + if fi.Deleted { + if srcOpts.VersionID == "" { + return oi, toObjectErr(errFileNotFound, srcBucket, srcObject) + } + return fi.ToObjectInfo(srcBucket, srcObject), toObjectErr(errMethodNotAllowed, srcBucket, srcObject) } - var onlineDisks []StorageAPI + // Update `xl.meta` content on each disks. + for index := range metaArr { + metaArr[index].Metadata = srcInfo.UserDefined + metaArr[index].Metadata["etag"] = srcInfo.ETag + } tempObj := mustGetUUID() - // Cleanup in case of xl.json writing failure - defer xl.deleteObject(ctx, minioMetaTmpBucket, tempObj, writeQuorum, false) + // Cleanup in case of xl.meta writing failure + defer er.deleteObject(ctx, minioMetaTmpBucket, tempObj, writeQuorum) - // Write unique `xl.json` for each disk. - if onlineDisks, err = writeUniqueXLMetadata(ctx, storageDisks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil { + // Write unique `xl.meta` for each disk. + if onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil { return oi, toObjectErr(err, srcBucket, srcObject) } - // Rename atomically `xl.json` from tmp location to destination for each disk. - if _, err = renameXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempObj, srcBucket, srcObject, writeQuorum); err != nil { + // Rename atomically `xl.meta` from tmp location to destination for each disk. + if _, err = renameFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, srcBucket, srcObject, writeQuorum); err != nil { return oi, toObjectErr(err, srcBucket, srcObject) } - return xlMeta.ToObjectInfo(srcBucket, srcObject), nil + return fi.ToObjectInfo(srcBucket, srcObject), nil } // GetObjectNInfo - returns object info and an object // Read(Closer). When err != nil, the returned reader is always nil. -func (xl xlObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { +func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { if err = checkGetObjArgs(ctx, bucket, object); err != nil { return nil, err } @@ -133,25 +134,25 @@ func (xl xlObjects) GetObjectNInfo(ctx context.Context, bucket, object string, r // returns no bytes. if HasSuffix(object, SlashSeparator) { var objInfo ObjectInfo - if objInfo, err = xl.getObjectInfoDir(ctx, bucket, object); err != nil { + if objInfo, err = er.getObjectInfoDir(ctx, bucket, object); err != nil { return nil, toObjectErr(err, bucket, object) } return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts) } - meta, metaArr, onlineDisks, err := xl.getObjectXLMeta(ctx, bucket, object, opts) + fi, metaArr, onlineDisks, err := er.getObjectFileInfo(ctx, bucket, object, opts) if err != nil { return nil, toObjectErr(err, bucket, object) } - fn, off, length, nErr := NewGetObjectReader(rs, meta.ToObjectInfo(bucket, object), opts) + fn, off, length, nErr := NewGetObjectReader(rs, fi.ToObjectInfo(bucket, object), opts) if nErr != nil { return nil, nErr } pr, pw := io.Pipe() go func() { - err := xl.getObjectWithXLMeta(ctx, bucket, object, off, length, pw, "", opts, meta, metaArr, onlineDisks) + err := er.getObjectWithFileInfo(ctx, bucket, object, off, length, pw, "", opts, fi, metaArr, onlineDisks) pw.CloseWithError(err) }() @@ -168,7 +169,7 @@ func (xl xlObjects) GetObjectNInfo(ctx context.Context, bucket, object string, r // // startOffset indicates the starting read location of the object. // length indicates the total length of the object. -func (xl xlObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { +func (er erasureObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { if err := checkGetObjArgs(ctx, bucket, object); err != nil { return err } @@ -192,31 +193,31 @@ func (xl xlObjects) GetObject(ctx context.Context, bucket, object string, startO return toObjectErr(err, bucket, object) } - return xl.getObject(ctx, bucket, object, startOffset, length, writer, etag, opts) + return er.getObject(ctx, bucket, object, startOffset, length, writer, etag, opts) } -func (xl xlObjects) getObjectWithXLMeta(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions, xlMeta xlMetaV1, metaArr []xlMetaV1, onlineDisks []StorageAPI) error { +func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions, fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI) error { // Reorder online disks based on erasure distribution order. - onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution) + onlineDisks = shuffleDisks(onlineDisks, fi.Erasure.Distribution) // Reorder parts metadata based on erasure distribution order. - metaArr = shufflePartsMetadata(metaArr, xlMeta.Erasure.Distribution) + metaArr = shufflePartsMetadata(metaArr, fi.Erasure.Distribution) // For negative length read everything. if length < 0 { - length = xlMeta.Stat.Size - startOffset + length = fi.Size - startOffset } // Reply back invalid range if the input offset and length fall out of range. - if startOffset > xlMeta.Stat.Size || startOffset+length > xlMeta.Stat.Size { - logger.LogIf(ctx, InvalidRange{startOffset, length, xlMeta.Stat.Size}, logger.Application) - return InvalidRange{startOffset, length, xlMeta.Stat.Size} + if startOffset > fi.Size || startOffset+length > fi.Size { + logger.LogIf(ctx, InvalidRange{startOffset, length, fi.Size}, logger.Application) + return InvalidRange{startOffset, length, fi.Size} } // Get start part index and offset. - partIndex, partOffset, err := xlMeta.ObjectToPartOffset(ctx, startOffset) + partIndex, partOffset, err := fi.ObjectToPartOffset(ctx, startOffset) if err != nil { - return InvalidRange{startOffset, length, xlMeta.Stat.Size} + return InvalidRange{startOffset, length, fi.Size} } // Calculate endOffset according to length @@ -226,13 +227,13 @@ func (xl xlObjects) getObjectWithXLMeta(ctx context.Context, bucket, object stri } // Get last part index to read given length. - lastPartIndex, _, err := xlMeta.ObjectToPartOffset(ctx, endOffset) + lastPartIndex, _, err := fi.ObjectToPartOffset(ctx, endOffset) if err != nil { - return InvalidRange{startOffset, length, xlMeta.Stat.Size} + return InvalidRange{startOffset, length, fi.Size} } var totalBytesRead int64 - erasure, err := NewErasure(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize) + erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize) if err != nil { return toObjectErr(err, bucket, object) } @@ -243,10 +244,10 @@ func (xl xlObjects) getObjectWithXLMeta(ctx context.Context, bucket, object stri break } - partNumber := xlMeta.Parts[partIndex].Number + partNumber := fi.Parts[partIndex].Number // Save the current part name and size. - partSize := xlMeta.Parts[partIndex].Size + partSize := fi.Parts[partIndex].Size partLength := partSize - partOffset // partLength should be adjusted so that we don't write more data than what was requested. @@ -254,7 +255,7 @@ func (xl xlObjects) getObjectWithXLMeta(ctx context.Context, bucket, object stri partLength = length - totalBytesRead } - tillOffset := erasure.ShardFileTillOffset(partOffset, partLength, partSize) + tillOffset := erasure.ShardFileOffset(partOffset, partLength, partSize) // Get the checksums of the current part. readers := make([]io.ReaderAt, len(onlineDisks)) prefer := make([]bool, len(onlineDisks)) @@ -263,21 +264,22 @@ func (xl xlObjects) getObjectWithXLMeta(ctx context.Context, bucket, object stri continue } checksumInfo := metaArr[index].Erasure.GetChecksumInfo(partNumber) - partPath := pathJoin(object, fmt.Sprintf("part.%d", partNumber)) + partPath := pathJoin(object, metaArr[index].DataDir, fmt.Sprintf("part.%d", partNumber)) readers[index] = newBitrotReader(disk, bucket, partPath, tillOffset, checksumInfo.Algorithm, checksumInfo.Hash, erasure.ShardSize()) // Prefer local disks prefer[index] = disk.Hostname() == "" } - err := erasure.Decode(ctx, writer, readers, partOffset, partLength, partSize, prefer) - // Note: we should not be defer'ing the following closeBitrotReaders() call as we are inside a for loop i.e if we use defer, we would accumulate a lot of open files by the time + err = erasure.Decode(ctx, writer, readers, partOffset, partLength, partSize, prefer) + // Note: we should not be defer'ing the following closeBitrotReaders() call as + // we are inside a for loop i.e if we use defer, we would accumulate a lot of open files by the time // we return from this function. closeBitrotReaders(readers) if err != nil { if decodeHealErr, ok := err.(*errDecodeHealRequired); ok { healOnce.Do(func() { - go deepHealObject(pathJoin(bucket, object)) + go deepHealObject(bucket, object, fi.VersionID) }) err = decodeHealErr.err } @@ -302,18 +304,18 @@ func (xl xlObjects) getObjectWithXLMeta(ctx context.Context, bucket, object stri return nil } -// getObject wrapper for xl GetObject -func (xl xlObjects) getObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { - xlMeta, metaArr, onlineDisks, err := xl.getObjectXLMeta(ctx, bucket, object, opts) +// getObject wrapper for erasure GetObject +func (er erasureObjects) getObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { + fi, metaArr, onlineDisks, err := er.getObjectFileInfo(ctx, bucket, object, opts) if err != nil { return toObjectErr(err, bucket, object) } - return xl.getObjectWithXLMeta(ctx, bucket, object, startOffset, length, writer, etag, opts, xlMeta, metaArr, onlineDisks) + return er.getObjectWithFileInfo(ctx, bucket, object, startOffset, length, writer, etag, opts, fi, metaArr, onlineDisks) } // getObjectInfoDir - This getObjectInfo is specific to object directory lookup. -func (xl xlObjects) getObjectInfoDir(ctx context.Context, bucket, object string) (ObjectInfo, error) { - storageDisks := xl.getDisks() +func (er erasureObjects) getObjectInfoDir(ctx context.Context, bucket, object string) (ObjectInfo, error) { + storageDisks := er.getDisks() g := errgroup.WithNErrs(len(storageDisks)) @@ -325,7 +327,7 @@ func (xl xlObjects) getObjectInfoDir(ctx context.Context, bucket, object string) index := index g.Go(func() error { // Check if 'prefix' is an object on this 'disk'. - entries, err := storageDisks[index].ListDir(bucket, object, 1, "") + entries, err := storageDisks[index].ListDir(bucket, object, 1) if err != nil { return err } @@ -343,60 +345,70 @@ func (xl xlObjects) getObjectInfoDir(ctx context.Context, bucket, object string) } // GetObjectInfo - reads object metadata and replies back ObjectInfo. -func (xl xlObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (oi ObjectInfo, e error) { - if err := checkGetObjArgs(ctx, bucket, object); err != nil { - return oi, err +func (er erasureObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (info ObjectInfo, err error) { + if err = checkGetObjArgs(ctx, bucket, object); err != nil { + return info, err } if HasSuffix(object, SlashSeparator) { - info, err := xl.getObjectInfoDir(ctx, bucket, object) + info, err = er.getObjectInfoDir(ctx, bucket, object) if err != nil { - return oi, toObjectErr(err, bucket, object) + return info, toObjectErr(err, bucket, object) } return info, nil } - info, err := xl.getObjectInfo(ctx, bucket, object, opts) + info, err = er.getObjectInfo(ctx, bucket, object, opts) if err != nil { - return oi, toObjectErr(err, bucket, object) + return info, toObjectErr(err, bucket, object) } return info, nil } -func (xl xlObjects) getObjectXLMeta(ctx context.Context, bucket, object string, opt ObjectOptions) (xlMeta xlMetaV1, metaArr []xlMetaV1, onlineDisks []StorageAPI, err error) { - disks := xl.getDisks() +func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI, err error) { + disks := er.getDisks() // Read metadata associated with the object from all disks. - metaArr, errs := readAllXLMetadata(ctx, disks, bucket, object) + metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID) - readQuorum, _, err := objectQuorumFromMeta(ctx, xl, metaArr, errs) + readQuorum, _, err := objectQuorumFromMeta(ctx, er, metaArr, errs) if err != nil { - return xlMeta, nil, nil, err + return fi, nil, nil, err } if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil { - return xlMeta, nil, nil, toObjectErr(reducedErr, bucket, object) + return fi, nil, nil, toObjectErr(reducedErr, bucket, object) } + // List all online disks. onlineDisks, modTime := listOnlineDisks(disks, metaArr, errs) // Pick latest valid metadata. - xlMeta, err = pickValidXLMeta(ctx, metaArr, modTime, readQuorum) + fi, err = pickValidFileInfo(ctx, metaArr, modTime, readQuorum) if err != nil { - return xlMeta, nil, nil, err + return fi, nil, nil, err } - return xlMeta, metaArr, onlineDisks, nil + return fi, metaArr, onlineDisks, nil } // getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo. -func (xl xlObjects) getObjectInfo(ctx context.Context, bucket, object string, opt ObjectOptions) (objInfo ObjectInfo, err error) { - xlMeta, _, _, err := xl.getObjectXLMeta(ctx, bucket, object, opt) +func (er erasureObjects) getObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { + fi, _, _, err := er.getObjectFileInfo(ctx, bucket, object, opts) if err != nil { return objInfo, err } - return xlMeta.ToObjectInfo(bucket, object), nil + + if fi.Deleted { + if opts.VersionID == "" { + return objInfo, toObjectErr(errFileNotFound, bucket, object) + } + // Make sure to return object info to provide extra information. + return fi.ToObjectInfo(bucket, object), toObjectErr(errMethodNotAllowed, bucket, object) + } + + return fi.ToObjectInfo(bucket, object), nil } func undoRename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isDir bool, errs []error) { @@ -424,6 +436,53 @@ func undoRename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry str g.Wait() } +// Similar to rename but renames data from srcEntry to dstEntry at dataDir +func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dataDir, dstBucket, dstEntry string, writeQuorum int, ignoredErr []error) ([]StorageAPI, error) { + dataDir = retainSlash(dataDir) + g := errgroup.WithNErrs(len(disks)) + + // Rename file on all underlying storage disks. + for index := range disks { + index := index + g.Go(func() error { + if disks[index] == nil { + return errDiskNotFound + } + if err := disks[index].RenameData(srcBucket, srcEntry, dataDir, dstBucket, dstEntry); err != nil { + if !IsErrIgnored(err, ignoredErr...) { + return err + } + } + return nil + }, index) + } + + // Wait for all renames to finish. + errs := g.Wait() + + // We can safely allow RenameFile errors up to len(er.getDisks()) - writeQuorum + // otherwise return failure. Cleanup successful renames. + err := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) + if err == errErasureWriteQuorum { + ug := errgroup.WithNErrs(len(disks)) + for index, disk := range disks { + if disk == nil { + continue + } + index := index + ug.Go(func() error { + // Undo all the partial rename operations. + if errs[index] == nil { + _ = disks[index].RenameData(dstBucket, dstEntry, dataDir, srcBucket, srcEntry) + } + return nil + }, index) + } + ug.Wait() + } + return evalDisks(disks, errs), err +} + // rename - common function that renamePart and renameObject use to rename // the respective underlying storage layer representations. func rename(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isDir bool, writeQuorum int, ignoredErr []error) ([]StorageAPI, error) { @@ -454,10 +513,10 @@ func rename(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBuc // Wait for all renames to finish. errs := g.Wait() - // We can safely allow RenameFile errors up to len(xl.getDisks()) - writeQuorum + // We can safely allow RenameFile errors up to len(er.getDisks()) - writeQuorum // otherwise return failure. Cleanup successful renames. err := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) - if err == errXLWriteQuorum { + if err == errErasureWriteQuorum { // Undo all the partial rename operations. undoRename(disks, srcBucket, srcEntry, dstBucket, dstEntry, isDir, errs) } @@ -466,20 +525,21 @@ func rename(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBuc // PutObject - creates an object upon reading from the input stream // until EOF, erasure codes the data across all disk and additionally -// writes `xl.json` which carries the necessary metadata for future +// writes `xl.meta` which carries the necessary metadata for future // object operations. -func (xl xlObjects) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { +func (er erasureObjects) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { // Validate put object input args. - if err = checkPutObjectArgs(ctx, bucket, object, xl, data.Size()); err != nil { + if err = checkPutObjectArgs(ctx, bucket, object, er, data.Size()); err != nil { return ObjectInfo{}, err } - return xl.putObject(ctx, bucket, object, data, opts) + return er.putObject(ctx, bucket, object, data, opts) } -// putObject wrapper for xl PutObject -func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { +// putObject wrapper for erasureObjects PutObject +func (er erasureObjects) putObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { defer ObjectPathUpdated(path.Join(bucket, object)) + data := r.Reader uniqueID := mustGetUUID() @@ -489,7 +549,7 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, opts.UserDefined = make(map[string]string) } - storageDisks := xl.getDisks() + storageDisks := er.getDisks() // Get parity and data drive count based on storage class metadata parityDrives := globalStorageClass.GetParityForSC(opts.UserDefined[xhttp.AmzStorageClass]) @@ -508,7 +568,7 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, // Delete temporary object in the event of failure. // If PutObject succeeded there would be no temporary // object to delete. - defer xl.deleteObject(ctx, minioMetaTmpBucket, tempObj, writeQuorum, false) + defer er.deleteObject(ctx, minioMetaTmpBucket, tempObj, writeQuorum) // This is a special case with size as '0' and object ends with // a slash separator, we treat it like a valid operation and @@ -517,17 +577,11 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, // Check if an object is present as one of the parent dir. // -- FIXME. (needs a new kind of lock). // -- FIXME (this also causes performance issue when disks are down). - if xl.parentDirIsObject(ctx, bucket, path.Dir(object)) { + if er.parentDirIsObject(ctx, bucket, path.Dir(object)) { return ObjectInfo{}, toObjectErr(errFileParentIsFile, bucket, object) } - if err = xl.putObjectDir(ctx, minioMetaTmpBucket, tempObj, writeQuorum); err != nil { - return ObjectInfo{}, toObjectErr(err, bucket, object) - } - - // Rename the successfully written temporary object to final location. Ignore errFileAccessDenied - // error because it means that the target object dir exists and we want to be close to S3 specification. - if _, err = rename(ctx, storageDisks, minioMetaTmpBucket, tempObj, bucket, object, true, writeQuorum, []error{errFileAccessDenied}); err != nil { + if err = er.putObjectDir(ctx, bucket, object, writeQuorum); err != nil { return ObjectInfo{}, toObjectErr(err, bucket, object) } @@ -543,24 +597,30 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, // Check if an object is present as one of the parent dir. // -- FIXME. (needs a new kind of lock). // -- FIXME (this also causes performance issue when disks are down). - if xl.parentDirIsObject(ctx, bucket, path.Dir(object)) { + if er.parentDirIsObject(ctx, bucket, path.Dir(object)) { + fmt.Println("I am here") return ObjectInfo{}, toObjectErr(errFileParentIsFile, bucket, object) } // Initialize parts metadata - partsMetadata := make([]xlMetaV1, len(xl.getDisks())) + partsMetadata := make([]FileInfo, len(er.getDisks())) - xlMeta := newXLMetaV1(object, dataDrives, parityDrives) + fi := newFileInfo(object, dataDrives, parityDrives) - // Initialize xl meta. + if opts.Versioned { + fi.VersionID = mustGetUUID() + } + fi.DataDir = mustGetUUID() + + // Initialize erasure metadata. for index := range partsMetadata { - partsMetadata[index] = xlMeta + partsMetadata[index] = fi } // Order disks according to erasure distribution - onlineDisks := shuffleDisks(storageDisks, xlMeta.Erasure.Distribution) + onlineDisks := shuffleDisks(storageDisks, fi.Erasure.Distribution) - erasure, err := NewErasure(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize) + erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize) if err != nil { return ObjectInfo{}, toObjectErr(err, bucket, object) } @@ -570,20 +630,20 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, switch size := data.Size(); { case size == 0: buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF - case size == -1 || size >= blockSizeV1: - buffer = xl.bp.Get() - defer xl.bp.Put(buffer) - case size < blockSizeV1: + case size == -1 || size >= fi.Erasure.BlockSize: + buffer = er.bp.Get() + defer er.bp.Put(buffer) + case size < fi.Erasure.BlockSize: // No need to allocate fully blockSizeV1 buffer if the incoming data is smaller. - buffer = make([]byte, size, 2*size+int64(erasure.parityBlocks+erasure.dataBlocks-1)) + buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1)) } - if len(buffer) > int(xlMeta.Erasure.BlockSize) { - buffer = buffer[:xlMeta.Erasure.BlockSize] + if len(buffer) > int(fi.Erasure.BlockSize) { + buffer = buffer[:fi.Erasure.BlockSize] } partName := "part.1" - tempErasureObj := pathJoin(uniqueID, partName) + tempErasureObj := pathJoin(uniqueID, fi.DataDir, partName) writers := make([]io.Writer, len(onlineDisks)) for i, disk := range onlineDisks { @@ -593,7 +653,7 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tempErasureObj, erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize()) } - n, erasureErr := erasure.Encode(ctx, data, writers, buffer, erasure.dataBlocks+1) + n, erasureErr := erasure.Encode(ctx, data, writers, buffer, fi.Erasure.DataBlocks+1) closeBitrotWriters(writers) if erasureErr != nil { return ObjectInfo{}, toObjectErr(erasureErr, minioMetaTmpBucket, tempErasureObj) @@ -629,37 +689,21 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, opts.UserDefined["content-type"] = mimedb.TypeByExtension(path.Ext(object)) } - if xl.isObject(bucket, object) { - // Rename if an object already exists to temporary location. - newUniqueID := mustGetUUID() - - // Delete successfully renamed object. - defer xl.deleteObject(ctx, minioMetaTmpBucket, newUniqueID, writeQuorum, false) - - // NOTE: Do not use online disks slice here: the reason is that existing object should be purged - // regardless of `xl.json` status and rolled back in case of errors. Also allow renaming the - // existing object if it is not present in quorum disks so users can overwrite stale objects. - _, err = rename(ctx, storageDisks, bucket, object, minioMetaTmpBucket, newUniqueID, true, writeQuorum, []error{errFileNotFound}) - if err != nil { - return ObjectInfo{}, toObjectErr(err, bucket, object) - } - } - // Fill all the necessary metadata. - // Update `xl.json` content on each disks. + // Update `xl.meta` content on each disks. for index := range partsMetadata { - partsMetadata[index].Meta = opts.UserDefined - partsMetadata[index].Stat.Size = n - partsMetadata[index].Stat.ModTime = modTime + partsMetadata[index].Metadata = opts.UserDefined + partsMetadata[index].Size = n + partsMetadata[index].ModTime = modTime } - // Write unique `xl.json` for each disk. - if onlineDisks, err = writeUniqueXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, writeQuorum); err != nil { + // Write unique `xl.meta` for each disk. + if onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, writeQuorum); err != nil { return ObjectInfo{}, toObjectErr(err, bucket, object) } // Rename the successfully written temporary object to final location. - if onlineDisks, err = rename(ctx, onlineDisks, minioMetaTmpBucket, tempObj, bucket, object, true, writeQuorum, nil); err != nil { + if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaTmpBucket, tempObj, fi.DataDir, bucket, object, writeQuorum, nil); err != nil { return ObjectInfo{}, toObjectErr(err, bucket, object) } @@ -667,34 +711,51 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, // during this upload, send it to the MRF list. for i := 0; i < len(onlineDisks); i++ { if onlineDisks[i] == nil || storageDisks[i] == nil { - xl.addPartialUpload(bucket, object) + er.addPartialUpload(bucket, object) break } } - // Object info is the same in all disks, so we can pick the first meta - // of the first disk - xlMeta = partsMetadata[0] - - objInfo = ObjectInfo{ - IsDir: false, - Bucket: bucket, - Name: object, - Size: xlMeta.Stat.Size, - ModTime: xlMeta.Stat.ModTime, - ETag: xlMeta.Meta["etag"], - ContentType: xlMeta.Meta["content-type"], - ContentEncoding: xlMeta.Meta["content-encoding"], - UserDefined: xlMeta.Meta, + for i := 0; i < len(onlineDisks); i++ { + if onlineDisks[i] == nil { + continue + } + // Object info is the same in all disks, so we can pick + // the first meta from online disk + fi = partsMetadata[i] + break } - return objInfo, nil + return fi.ToObjectInfo(bucket, object), nil +} + +func (er erasureObjects) deleteObjectVersion(ctx context.Context, bucket, object string, writeQuorum int, fi FileInfo) error { + disks := er.getDisks() + + g := errgroup.WithNErrs(len(disks)) + + for index := range disks { + index := index + g.Go(func() error { + if disks[index] == nil { + return errDiskNotFound + } + err := disks[index].DeleteVersion(bucket, object, fi) + if err != nil && err != errVolumeNotFound { + return err + } + return nil + }, index) + } + + // return errors if any during deletion + return reduceWriteQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, writeQuorum) } // deleteObject - wrapper for delete object, deletes an object from -// all the disks in parallel, including `xl.json` associated with the +// all the disks in parallel, including `xl.meta` associated with the // object. -func (xl xlObjects) deleteObject(ctx context.Context, bucket, object string, writeQuorum int, isDir bool) error { +func (er erasureObjects) deleteObject(ctx context.Context, bucket, object string, writeQuorum int) error { var disks []StorageAPI var err error defer ObjectPathUpdated(path.Join(bucket, object)) @@ -702,18 +763,13 @@ func (xl xlObjects) deleteObject(ctx context.Context, bucket, object string, wri tmpObj := mustGetUUID() if bucket == minioMetaTmpBucket { tmpObj = object - disks = xl.getDisks() + disks = er.getDisks() } else { // Rename the current object while requiring write quorum, but also consider // that a non found object in a given disk as a success since it already // confirms that the object doesn't have a part in that disk (already removed) - if isDir { - disks, err = rename(ctx, xl.getDisks(), bucket, object, minioMetaTmpBucket, tmpObj, true, writeQuorum, - []error{errFileNotFound, errFileAccessDenied}) - } else { - disks, err = rename(ctx, xl.getDisks(), bucket, object, minioMetaTmpBucket, tmpObj, true, writeQuorum, - []error{errFileNotFound}) - } + disks, err = rename(ctx, er.getDisks(), bucket, object, minioMetaTmpBucket, tmpObj, true, writeQuorum, + []error{errFileNotFound}) if err != nil { return toObjectErr(err, bucket, object) } @@ -727,14 +783,7 @@ func (xl xlObjects) deleteObject(ctx context.Context, bucket, object string, wri if disks[index] == nil { return errDiskNotFound } - var err error - if isDir { - // DeleteFile() simply tries to remove a directory - // and will succeed only if that directory is empty. - err = disks[index].DeleteFile(minioMetaTmpBucket, tmpObj) - } else { - err = cleanupDir(ctx, disks[index], minioMetaTmpBucket, tmpObj) - } + err := cleanupDir(ctx, disks[index], minioMetaTmpBucket, tmpObj) if err != nil && err != errVolumeNotFound { return err } @@ -746,124 +795,19 @@ func (xl xlObjects) deleteObject(ctx context.Context, bucket, object string, wri return reduceWriteQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, writeQuorum) } -// deleteObject - wrapper for delete object, deletes an object from -// all the disks in parallel, including `xl.json` associated with the -// object. -func (xl xlObjects) doDeleteObjects(ctx context.Context, bucket string, objects []string, errs []error, writeQuorums []int, isDirs []bool) ([]error, error) { - var tmpObjs = make([]string, len(objects)) - if bucket == minioMetaTmpBucket { - copy(tmpObjs, objects) - } else { - for idx := range objects { - if errs[idx] != nil { - continue - } - - tmpObjs[idx] = mustGetUUID() - var err error - // Rename the current object while requiring - // write quorum, but also consider that a non - // found object in a given disk as a success - // since it already confirms that the object - // doesn't have a part in that disk (already removed) - if isDirs[idx] { - _, err = rename(ctx, xl.getDisks(), bucket, objects[idx], - minioMetaTmpBucket, tmpObjs[idx], true, writeQuorums[idx], - []error{errFileNotFound, errFileAccessDenied}) - } else { - _, err = rename(ctx, xl.getDisks(), bucket, objects[idx], - minioMetaTmpBucket, tmpObjs[idx], true, writeQuorums[idx], - []error{errFileNotFound}) - } - if err != nil { - errs[idx] = err - } - ObjectPathUpdated(path.Join(bucket, objects[idx])) - } - } - - disks := xl.getDisks() - - // Initialize list of errors. - var opErrs = make([]error, len(disks)) - var delObjErrs = make([][]error, len(disks)) - var wg = sync.WaitGroup{} - - // Remove objects in bulk for each disk - for i, d := range disks { - if d == nil { - opErrs[i] = errDiskNotFound - continue - } - wg.Add(1) - go func(index int, disk StorageAPI) { - defer wg.Done() - delObjErrs[index], opErrs[index] = disk.DeletePrefixes(minioMetaTmpBucket, tmpObjs) - if opErrs[index] == errVolumeNotFound || opErrs[index] == errFileNotFound { - opErrs[index] = nil - } - }(i, d) - } - - wg.Wait() - - // Return errors if any during deletion - if err := reduceWriteQuorumErrs(ctx, opErrs, objectOpIgnoredErrs, len(disks)/2+1); err != nil { - return nil, err - } - - // Reduce errors for each object - for objIndex := range objects { - if errs[objIndex] != nil { - continue - } - listErrs := make([]error, len(disks)) - // Iterate over disks to fetch the error - // of deleting of the current object - for i := range delObjErrs { - // delObjErrs[i] is not nil when disks[i] is also not nil - if delObjErrs[i] != nil { - if delObjErrs[i][objIndex] != errFileNotFound { - listErrs[i] = delObjErrs[i][objIndex] - } - } - } - errs[objIndex] = reduceWriteQuorumErrs(ctx, listErrs, objectOpIgnoredErrs, writeQuorums[objIndex]) - } - - return errs, nil -} - -func (xl xlObjects) deleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { +// DeleteObjects deletes objects/versions in bulk, this function will still automatically split objects list +// into smaller bulks if some object names are found to be duplicated in the delete list, splitting +// into smaller bulks will avoid holding twice the write lock of the duplicated object names. +func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) { errs := make([]error, len(objects)) + dobjects := make([]DeletedObject, len(objects)) writeQuorums := make([]int, len(objects)) - isObjectDirs := make([]bool, len(objects)) for i, object := range objects { - errs[i] = checkDelObjArgs(ctx, bucket, object) + errs[i] = checkDelObjArgs(ctx, bucket, object.ObjectName) } - for i, object := range objects { - isObjectDirs[i] = HasSuffix(object, SlashSeparator) - } - - storageDisks := xl.getDisks() - - for i, object := range objects { - if isObjectDirs[i] { - _, err := xl.getObjectInfoDir(ctx, bucket, object) - if err == errXLReadQuorum { - if isObjectDirDangling(statAllDirs(ctx, storageDisks, bucket, object)) { - // If object is indeed dangling, purge it. - errs[i] = nil - } - } - if err != nil { - errs[i] = toObjectErr(err, bucket, object) - continue - } - } - } + storageDisks := er.getDisks() for i := range objects { if errs[i] != nil { @@ -878,167 +822,175 @@ func (xl xlObjects) deleteObjects(ctx context.Context, bucket string, objects [] writeQuorums[i] = getWriteQuorum(len(storageDisks)) } - return xl.doDeleteObjects(ctx, bucket, objects, errs, writeQuorums, isObjectDirs) -} - -// DeleteObjects deletes objects in bulk, this function will still automatically split objects list -// into smaller bulks if some object names are found to be duplicated in the delete list, splitting -// into smaller bulks will avoid holding twice the write lock of the duplicated object names. -func (xl xlObjects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { - - var ( - i, start, end int - // Deletion result for all objects - deleteErrs []error - // Object names store will be used to check for object name duplication - objectNamesStore = make(map[string]interface{}) - ) - - for { - if i >= len(objects) { - break - } - - object := objects[i] - - _, duplicationFound := objectNamesStore[object] - if duplicationFound { - end = i - 1 - } else { - objectNamesStore[object] = true - end = i - } - - if duplicationFound || i == len(objects)-1 { - errs, err := xl.deleteObjects(ctx, bucket, objects[start:end+1]) - if err != nil { - return nil, err + versions := make([]FileInfo, len(objects)) + for i := range objects { + if objects[i].VersionID == "" { + if opts.Versioned && !HasSuffix(objects[i].ObjectName, SlashSeparator) { + versions[i] = FileInfo{ + Name: objects[i].ObjectName, + VersionID: mustGetUUID(), + ModTime: UTCNow(), + Deleted: true, // delete marker + } + continue } - deleteErrs = append(deleteErrs, errs...) - objectNamesStore = make(map[string]interface{}) } - - if duplicationFound { - // Avoid to increase the index if object - // name is found to be duplicated. - start = i - } else { - i++ + versions[i] = FileInfo{ + Name: objects[i].ObjectName, + VersionID: objects[i].VersionID, } } - return deleteErrs, nil + // Initialize list of errors. + var opErrs = make([]error, len(storageDisks)) + var delObjErrs = make([][]error, len(storageDisks)) + + var wg sync.WaitGroup + // Remove versions in bulk for each disk + for index, disk := range storageDisks { + if disk == nil { + opErrs[index] = errDiskNotFound + continue + } + wg.Add(1) + go func(index int, disk StorageAPI) { + defer wg.Done() + delObjErrs[index] = disk.DeleteVersions(bucket, versions) + }(index, disk) + } + + wg.Wait() + + // Reduce errors for each object + for objIndex := range objects { + if errs[objIndex] != nil { + continue + } + diskErrs := make([]error, len(storageDisks)) + // Iterate over disks to fetch the error + // of deleting of the current object + for i := range delObjErrs { + // delObjErrs[i] is not nil when disks[i] is also not nil + if delObjErrs[i] != nil { + if delObjErrs[i][objIndex] != errFileNotFound { + diskErrs[i] = delObjErrs[i][objIndex] + } + } + } + errs[objIndex] = reduceWriteQuorumErrs(ctx, diskErrs, objectOpIgnoredErrs, writeQuorums[objIndex]) + if errs[objIndex] == nil { + if versions[objIndex].Deleted { + dobjects[objIndex] = DeletedObject{ + DeleteMarker: versions[objIndex].Deleted, + DeleteMarkerVersionID: versions[objIndex].VersionID, + ObjectName: versions[objIndex].Name, + } + } else { + dobjects[objIndex] = DeletedObject{ + ObjectName: versions[objIndex].Name, + VersionID: versions[objIndex].VersionID, + } + } + } + } + + return dobjects, errs } // DeleteObject - deletes an object, this call doesn't necessary reply // any error as it is not necessary for the handler to reply back a // response to the client request. -func (xl xlObjects) DeleteObject(ctx context.Context, bucket, object string) (err error) { +func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { if err = checkDelObjArgs(ctx, bucket, object); err != nil { - return err + return objInfo, err } - var writeQuorum int - var isObjectDir = HasSuffix(object, SlashSeparator) + storageDisks := er.getDisks() + writeQuorum := len(storageDisks)/2 + 1 - storageDisks := xl.getDisks() - - if isObjectDir { - _, err = xl.getObjectInfoDir(ctx, bucket, object) - if err == errXLReadQuorum { - if isObjectDirDangling(statAllDirs(ctx, storageDisks, bucket, object)) { - // If object is indeed dangling, purge it. - err = nil + if opts.VersionID == "" { + if opts.Versioned && !HasSuffix(object, SlashSeparator) { + fi := FileInfo{ + Name: object, + VersionID: mustGetUUID(), + Deleted: true, + ModTime: UTCNow(), } - } - if err != nil { - return toObjectErr(err, bucket, object) + // Add delete marker, since we don't have any version specified explicitly. + if err = er.deleteObjectVersion(ctx, bucket, object, writeQuorum, fi); err != nil { + return objInfo, toObjectErr(err, bucket, object) + } + return fi.ToObjectInfo(bucket, object), nil } } - if isObjectDir { - writeQuorum = getWriteQuorum(len(storageDisks)) - } else { - // Read metadata associated with the object from all disks. - partsMetadata, errs := readAllXLMetadata(ctx, storageDisks, bucket, object) - // get Quorum for this object - _, writeQuorum, err = objectQuorumFromMeta(ctx, xl, partsMetadata, errs) - if err != nil { - return toObjectErr(err, bucket, object) - } + // Delete the object version on all disks. + if err = er.deleteObjectVersion(ctx, bucket, object, writeQuorum, FileInfo{ + Name: object, + VersionID: opts.VersionID, + }); err != nil { + return objInfo, toObjectErr(err, bucket, object) } - // Delete the object on all disks. - if err = xl.deleteObject(ctx, bucket, object, writeQuorum, isObjectDir); err != nil { - return toObjectErr(err, bucket, object) - } - - // Success. - return nil -} - -// ListObjectsV2 lists all blobs in bucket filtered by prefix -func (xl xlObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { - marker := continuationToken - if marker == "" { - marker = startAfter - } - - loi, err := xl.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) - if err != nil { - return result, err - } - - listObjectsV2Info := ListObjectsV2Info{ - IsTruncated: loi.IsTruncated, - ContinuationToken: continuationToken, - NextContinuationToken: loi.NextMarker, - Objects: loi.Objects, - Prefixes: loi.Prefixes, - } - return listObjectsV2Info, err + return ObjectInfo{Bucket: bucket, Name: object, VersionID: opts.VersionID}, nil } // Send the successful but partial upload, however ignore // if the channel is blocked by other items. -func (xl xlObjects) addPartialUpload(bucket, key string) { +func (er erasureObjects) addPartialUpload(bucket, key string) { select { - case xl.mrfUploadCh <- partialUpload{bucket: bucket, object: key}: + case er.mrfUploadCh <- partialUpload{bucket: bucket, object: key}: default: } } // PutObjectTags - replace or add tags to an existing object -func (xl xlObjects) PutObjectTags(ctx context.Context, bucket, object string, tags string) error { - disks := xl.getDisks() +func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error { + disks := er.getDisks() // Read metadata associated with the object from all disks. - metaArr, errs := readAllXLMetadata(ctx, disks, bucket, object) + metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID) - _, writeQuorum, err := objectQuorumFromMeta(ctx, xl, metaArr, errs) + readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, er, metaArr, errs) if err != nil { return toObjectErr(err, bucket, object) } - for i, xlMeta := range metaArr { - // clean xlMeta.Meta of tag key, before updating the new tags - delete(xlMeta.Meta, xhttp.AmzObjectTagging) + // List all online disks. + _, modTime := listOnlineDisks(disks, metaArr, errs) + + // Pick latest valid metadata. + fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum) + if err != nil { + return toObjectErr(err, bucket, object) + } + + if fi.Deleted { + if opts.VersionID == "" { + return toObjectErr(errFileNotFound, bucket, object) + } + return toObjectErr(errMethodNotAllowed, bucket, object) + } + + for i, fi := range metaArr { + // clean fi.Meta of tag key, before updating the new tags + delete(fi.Metadata, xhttp.AmzObjectTagging) // Don't update for empty tags if tags != "" { - xlMeta.Meta[xhttp.AmzObjectTagging] = tags + fi.Metadata[xhttp.AmzObjectTagging] = tags } - metaArr[i].Meta = xlMeta.Meta + metaArr[i].Metadata = fi.Metadata } tempObj := mustGetUUID() - // Write unique `xl.json` for each disk. - if disks, err = writeUniqueXLMetadata(ctx, disks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil { + // Write unique `xl.meta` for each disk. + if disks, err = writeUniqueFileInfo(ctx, disks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil { return toObjectErr(err, bucket, object) } - // Atomically rename `xl.json` from tmp location to destination for each disk. - if _, err = renameXLMetadata(ctx, disks, minioMetaTmpBucket, tempObj, bucket, object, writeQuorum); err != nil { + // Atomically rename metadata from tmp location to destination for each disk. + if _, err = renameFileInfo(ctx, disks, minioMetaTmpBucket, tempObj, bucket, object, writeQuorum); err != nil { return toObjectErr(err, bucket, object) } @@ -1046,14 +998,14 @@ func (xl xlObjects) PutObjectTags(ctx context.Context, bucket, object string, ta } // DeleteObjectTags - delete object tags from an existing object -func (xl xlObjects) DeleteObjectTags(ctx context.Context, bucket, object string) error { - return xl.PutObjectTags(ctx, bucket, object, "") +func (er erasureObjects) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) error { + return er.PutObjectTags(ctx, bucket, object, "", opts) } // GetObjectTags - get object tags from an existing object -func (xl xlObjects) GetObjectTags(ctx context.Context, bucket, object string) (*tags.Tags, error) { +func (er erasureObjects) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) { // GetObjectInfo will return tag value as well - oi, err := xl.GetObjectInfo(ctx, bucket, object, ObjectOptions{}) + oi, err := er.GetObjectInfo(ctx, bucket, object, opts) if err != nil { return nil, err } diff --git a/cmd/xl-v1-object_test.go b/cmd/erasure-object_test.go similarity index 55% rename from cmd/xl-v1-object_test.go rename to cmd/erasure-object_test.go index 1e7dfac3d..bf33b6368 100644 --- a/cmd/xl-v1-object_test.go +++ b/cmd/erasure-object_test.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,16 +20,11 @@ import ( "bytes" "context" "io/ioutil" - "math/rand" "os" - "path" - "reflect" "testing" - "time" humanize "github.com/dustin/go-humanize" "github.com/minio/minio/cmd/config/storageclass" - "github.com/minio/minio/pkg/madmin" ) func TestRepeatPutObjectPart(t *testing.T) { @@ -41,7 +36,7 @@ func TestRepeatPutObjectPart(t *testing.T) { var err error var opts ObjectOptions - objLayer, disks, err = prepareXL16(ctx) + objLayer, disks, err = prepareErasure16(ctx) if err != nil { t.Fatal(err) } @@ -49,7 +44,7 @@ func TestRepeatPutObjectPart(t *testing.T) { // cleaning up of temporary test directories defer removeRoots(disks) - err = objLayer.MakeBucketWithLocation(ctx, "bucket1", "", false) + err = objLayer.MakeBucketWithLocation(ctx, "bucket1", BucketOptions{}) if err != nil { t.Fatal(err) } @@ -71,7 +66,7 @@ func TestRepeatPutObjectPart(t *testing.T) { } } -func TestXLDeleteObjectBasic(t *testing.T) { +func TestErasureDeleteObjectBasic(t *testing.T) { testCases := []struct { bucket string object string @@ -91,12 +86,12 @@ func TestXLDeleteObjectBasic(t *testing.T) { defer cancel() // Create an instance of xl backend - xl, fsDirs, err := prepareXL16(ctx) + xl, fsDirs, err := prepareErasure16(ctx) if err != nil { t.Fatal(err) } - err = xl.MakeBucketWithLocation(ctx, "bucket", "", false) + err = xl.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) if err != nil { t.Fatal(err) } @@ -104,40 +99,43 @@ func TestXLDeleteObjectBasic(t *testing.T) { // Create object "dir/obj" under bucket "bucket" for Test 7 to pass _, err = xl.PutObject(ctx, "bucket", "dir/obj", mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}) if err != nil { - t.Fatalf("XL Object upload failed: %s", err) + t.Fatalf("Erasure Object upload failed: %s", err) } - for i, test := range testCases { - actualErr := xl.DeleteObject(ctx, test.bucket, test.object) - if test.expectedErr != nil && actualErr != test.expectedErr { - t.Errorf("Test %d: Expected to fail with %s, but failed with %s", i+1, test.expectedErr, actualErr) - } - if test.expectedErr == nil && actualErr != nil { - t.Errorf("Test %d: Expected to pass, but failed with %s", i+1, actualErr) - } + for _, test := range testCases { + test := test + t.Run("", func(t *testing.T) { + _, actualErr := xl.DeleteObject(ctx, test.bucket, test.object, ObjectOptions{}) + if test.expectedErr != nil && actualErr != test.expectedErr { + t.Errorf("Expected to fail with %s, but failed with %s", test.expectedErr, actualErr) + } + if test.expectedErr == nil && actualErr != nil { + t.Errorf("Expected to pass, but failed with %s", actualErr) + } + }) } // Cleanup backend directories removeRoots(fsDirs) } -func TestXLDeleteObjectsXLSet(t *testing.T) { +func TestErasureDeleteObjectsErasureSet(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var objs []*xlObjects + var objs []*erasureObjects for i := 0; i < 32; i++ { - obj, fsDirs, err := prepareXL(ctx, 16) + obj, fsDirs, err := prepareErasure(ctx, 16) if err != nil { - t.Fatal("Unable to initialize 'XL' object layer.", err) + t.Fatal("Unable to initialize 'Erasure' object layer.", err) } // Remove all dirs. for _, dir := range fsDirs { defer os.RemoveAll(dir) } - z := obj.(*xlZones) + z := obj.(*erasureZones) xl := z.zones[0].sets[0] objs = append(objs, xl) } - xlSets := &xlSets{sets: objs, distributionAlgo: "CRCMOD"} + erasureSets := &erasureSets{sets: objs, distributionAlgo: "CRCMOD"} type testCaseType struct { bucket string @@ -152,32 +150,29 @@ func TestXLDeleteObjectsXLSet(t *testing.T) { {bucketName, "obj_4"}, } - err := xlSets.MakeBucketWithLocation(GlobalContext, bucketName, "", false) + err := erasureSets.MakeBucketWithLocation(ctx, bucketName, BucketOptions{}) if err != nil { t.Fatal(err) } for _, testCase := range testCases { - _, err = xlSets.PutObject(GlobalContext, testCase.bucket, testCase.object, + _, err = erasureSets.PutObject(ctx, testCase.bucket, testCase.object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}) if err != nil { - t.Fatalf("XL Object upload failed: %s", err) + t.Fatalf("Erasure Object upload failed: %s", err) } } - toObjectNames := func(testCases []testCaseType) []string { - names := make([]string, len(testCases)) + toObjectNames := func(testCases []testCaseType) []ObjectToDelete { + names := make([]ObjectToDelete, len(testCases)) for i := range testCases { - names[i] = testCases[i].object + names[i] = ObjectToDelete{ObjectName: testCases[i].object} } return names } objectNames := toObjectNames(testCases) - delErrs, err := xlSets.DeleteObjects(GlobalContext, bucketName, objectNames) - if err != nil { - t.Errorf("Failed to call DeleteObjects with the error: `%v`", err) - } + _, delErrs := erasureSets.DeleteObjects(ctx, bucketName, objectNames, ObjectOptions{}) for i := range delErrs { if delErrs[i] != nil { @@ -186,7 +181,7 @@ func TestXLDeleteObjectsXLSet(t *testing.T) { } for _, test := range testCases { - _, statErr := xlSets.GetObjectInfo(GlobalContext, test.bucket, test.object, ObjectOptions{}) + _, statErr := erasureSets.GetObjectInfo(ctx, test.bucket, test.object, ObjectOptions{}) switch statErr.(type) { case ObjectNotFound: default: @@ -195,23 +190,23 @@ func TestXLDeleteObjectsXLSet(t *testing.T) { } } -func TestXLDeleteObjectDiskNotFound(t *testing.T) { +func TestErasureDeleteObjectDiskNotFound(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() // Create an instance of xl backend. - obj, fsDirs, err := prepareXL16(ctx) + obj, fsDirs, err := prepareErasure16(ctx) if err != nil { t.Fatal(err) } // Cleanup backend directories defer removeRoots(fsDirs) - z := obj.(*xlZones) + z := obj.(*erasureZones) xl := z.zones[0].sets[0] // Create "bucket" - err = obj.MakeBucketWithLocation(ctx, "bucket", "", false) + err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) if err != nil { t.Fatal(err) } @@ -226,16 +221,17 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) { } // for a 16 disk setup, quorum is 9. To simulate disks not found yet // quorum is available, we remove disks leaving quorum disks behind. - xlDisks := xl.getDisks() - z.zones[0].xlDisksMu.Lock() + erasureDisks := xl.getDisks() + z.zones[0].erasureDisksMu.Lock() xl.getDisks = func() []StorageAPI { - for i := range xlDisks[:7] { - xlDisks[i] = newNaughtyDisk(xlDisks[i], nil, errFaultyDisk) + for i := range erasureDisks[:7] { + erasureDisks[i] = newNaughtyDisk(erasureDisks[i], nil, errFaultyDisk) } - return xlDisks + return erasureDisks } - z.zones[0].xlDisksMu.Unlock() - err = obj.DeleteObject(ctx, bucket, object) + + z.zones[0].erasureDisksMu.Unlock() + _, err = obj.DeleteObject(ctx, bucket, object, ObjectOptions{}) if err != nil { t.Fatal(err) } @@ -247,18 +243,19 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) { } // Remove one more disk to 'lose' quorum, by setting it to nil. - xlDisks = xl.getDisks() - z.zones[0].xlDisksMu.Lock() + erasureDisks = xl.getDisks() + z.zones[0].erasureDisksMu.Lock() xl.getDisks = func() []StorageAPI { - xlDisks[7] = nil - xlDisks[8] = nil - return xlDisks + erasureDisks[7] = nil + erasureDisks[8] = nil + return erasureDisks } - z.zones[0].xlDisksMu.Unlock() - err = obj.DeleteObject(ctx, bucket, object) - // since majority of disks are not available, metaquorum is not achieved and hence errXLReadQuorum error - if err != toObjectErr(errXLReadQuorum, bucket, object) { - t.Errorf("Expected deleteObject to fail with %v, but failed with %v", toObjectErr(errXLReadQuorum, bucket, object), err) + + z.zones[0].erasureDisksMu.Unlock() + _, err = obj.DeleteObject(ctx, bucket, object, ObjectOptions{}) + // since majority of disks are not available, metaquorum is not achieved and hence errErasureWriteQuorum error + if err != toObjectErr(errErasureWriteQuorum, bucket, object) { + t.Errorf("Expected deleteObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err) } } @@ -267,18 +264,18 @@ func TestGetObjectNoQuorum(t *testing.T) { defer cancel() // Create an instance of xl backend. - obj, fsDirs, err := prepareXL16(ctx) + obj, fsDirs, err := prepareErasure16(ctx) if err != nil { t.Fatal(err) } // Cleanup backend directories. defer removeRoots(fsDirs) - z := obj.(*xlZones) + z := obj.(*erasureZones) xl := z.zones[0].sets[0] // Create "bucket" - err = obj.MakeBucketWithLocation(ctx, "bucket", "", false) + err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) if err != nil { t.Fatal(err) } @@ -293,7 +290,7 @@ func TestGetObjectNoQuorum(t *testing.T) { } // Make 9 disks offline, which leaves less than quorum number of disks - // in a 16 disk XL setup. The original disks are 'replaced' with + // in a 16 disk Erasure setup. The original disks are 'replaced' with // naughtyDisks that fail after 'f' successful StorageAPI method // invocations, where f - [0,2) for f := 0; f < 2; f++ { @@ -301,24 +298,24 @@ func TestGetObjectNoQuorum(t *testing.T) { for i := 0; i <= f; i++ { diskErrors[i] = nil } - xlDisks := xl.getDisks() - for i := range xlDisks[:9] { - switch diskType := xlDisks[i].(type) { + erasureDisks := xl.getDisks() + for i := range erasureDisks[:9] { + switch diskType := erasureDisks[i].(type) { case *naughtyDisk: - xlDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk) + erasureDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk) default: - xlDisks[i] = newNaughtyDisk(xlDisks[i], diskErrors, errFaultyDisk) + erasureDisks[i] = newNaughtyDisk(erasureDisks[i], diskErrors, errFaultyDisk) } } - z.zones[0].xlDisksMu.Lock() + z.zones[0].erasureDisksMu.Lock() xl.getDisks = func() []StorageAPI { - return xlDisks + return erasureDisks } - z.zones[0].xlDisksMu.Unlock() + z.zones[0].erasureDisksMu.Unlock() // Fetch object from store. err = xl.GetObject(ctx, bucket, object, 0, int64(len("abcd")), ioutil.Discard, "", opts) - if err != toObjectErr(errXLReadQuorum, bucket, object) { - t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err) + if err != toObjectErr(errErasureReadQuorum, bucket, object) { + t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err) } } } @@ -328,7 +325,7 @@ func TestPutObjectNoQuorum(t *testing.T) { defer cancel() // Create an instance of xl backend. - obj, fsDirs, err := prepareXL16(ctx) + obj, fsDirs, err := prepareErasure16(ctx) if err != nil { t.Fatal(err) } @@ -336,11 +333,11 @@ func TestPutObjectNoQuorum(t *testing.T) { // Cleanup backend directories. defer removeRoots(fsDirs) - z := obj.(*xlZones) + z := obj.(*erasureZones) xl := z.zones[0].sets[0] // Create "bucket" - err = obj.MakeBucketWithLocation(ctx, "bucket", "", false) + err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) if err != nil { t.Fatal(err) } @@ -355,7 +352,7 @@ func TestPutObjectNoQuorum(t *testing.T) { } // Make 9 disks offline, which leaves less than quorum number of disks - // in a 16 disk XL setup. The original disks are 'replaced' with + // in a 16 disk Erasure setup. The original disks are 'replaced' with // naughtyDisks that fail after 'f' successful StorageAPI method // invocations, where f - [0,3) for f := 0; f < 3; f++ { @@ -363,143 +360,38 @@ func TestPutObjectNoQuorum(t *testing.T) { for i := 0; i <= f; i++ { diskErrors[i] = nil } - xlDisks := xl.getDisks() - for i := range xlDisks[:9] { - switch diskType := xlDisks[i].(type) { + erasureDisks := xl.getDisks() + for i := range erasureDisks[:9] { + switch diskType := erasureDisks[i].(type) { case *naughtyDisk: - xlDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk) + erasureDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk) default: - xlDisks[i] = newNaughtyDisk(xlDisks[i], diskErrors, errFaultyDisk) + erasureDisks[i] = newNaughtyDisk(erasureDisks[i], diskErrors, errFaultyDisk) } } - z.zones[0].xlDisksMu.Lock() + z.zones[0].erasureDisksMu.Lock() xl.getDisks = func() []StorageAPI { - return xlDisks + return erasureDisks } - z.zones[0].xlDisksMu.Unlock() + z.zones[0].erasureDisksMu.Unlock() // Upload new content to same object "object" _, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts) - if err != toObjectErr(errXLWriteQuorum, bucket, object) { - t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err) + if err != toObjectErr(errErasureWriteQuorum, bucket, object) { + t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err) } } } -// Tests both object and bucket healing. -func TestHealing(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - obj, fsDirs, err := prepareXL16(ctx) - if err != nil { - t.Fatal(err) - } - defer removeRoots(fsDirs) - - z := obj.(*xlZones) - xl := z.zones[0].sets[0] - - // Create "bucket" - err = obj.MakeBucketWithLocation(ctx, "bucket", "", false) - if err != nil { - t.Fatal(err) - } - - bucket := "bucket" - object := "object" - - data := make([]byte, 1*humanize.MiByte) - length := int64(len(data)) - _, err = rand.Read(data) - if err != nil { - t.Fatal(err) - } - - _, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), length, "", ""), ObjectOptions{}) - if err != nil { - t.Fatal(err) - } - - disk := xl.getDisks()[0] - xlMetaPreHeal, err := readXLMeta(ctx, disk, bucket, object) - if err != nil { - t.Fatal(err) - } - - // Remove the object - to simulate the case where the disk was down when the object - // was created. - err = os.RemoveAll(path.Join(fsDirs[0], bucket, object)) - if err != nil { - t.Fatal(err) - } - - _, err = xl.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan}) - if err != nil { - t.Fatal(err) - } - - xlMetaPostHeal, err := readXLMeta(ctx, disk, bucket, object) - if err != nil { - t.Fatal(err) - } - - // After heal the meta file should be as expected. - if !reflect.DeepEqual(xlMetaPreHeal, xlMetaPostHeal) { - t.Fatal("HealObject failed") - } - - err = os.RemoveAll(path.Join(fsDirs[0], bucket, object, "xl.json")) - if err != nil { - t.Fatal(err) - } - - // Write xl.json with different modtime to simulate the case where a disk had - // gone down when an object was replaced by a new object. - xlMetaOutDated := xlMetaPreHeal - xlMetaOutDated.Stat.ModTime = time.Now() - err = writeXLMetadata(ctx, disk, bucket, object, xlMetaOutDated) - if err != nil { - t.Fatal(err) - } - - _, err = xl.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealDeepScan}) - if err != nil { - t.Fatal(err) - } - - xlMetaPostHeal, err = readXLMeta(ctx, disk, bucket, object) - if err != nil { - t.Fatal(err) - } - - // After heal the meta file should be as expected. - if !reflect.DeepEqual(xlMetaPreHeal, xlMetaPostHeal) { - t.Fatal("HealObject failed") - } - - // Remove the bucket - to simulate the case where bucket was - // created when the disk was down. - err = os.RemoveAll(path.Join(fsDirs[0], bucket)) - if err != nil { - t.Fatal(err) - } - // This would create the bucket. - _, err = xl.HealBucket(ctx, bucket, false, false) - if err != nil { - t.Fatal(err) - } - // Stat the bucket to make sure that it was created. - _, err = xl.getDisks()[0].StatVol(bucket) - if err != nil { - t.Fatal(err) - } -} - func TestObjectQuorumFromMeta(t *testing.T) { ExecObjectLayerTestWithDirs(t, testObjectQuorumFromMeta) } func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []string, t TestErrHandler) { + restoreGlobalStorageClass := globalStorageClass + defer func() { + globalStorageClass = restoreGlobalStorageClass + }() + bucket := getRandomBucketName() var opts ObjectOptions @@ -507,45 +399,48 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin partCount := 3 data := bytes.Repeat([]byte("a"), 6*1024*1024*partCount) - z := obj.(*xlZones) + z := obj.(*erasureZones) xl := z.zones[0].sets[0] - xlDisks := xl.getDisks() + erasureDisks := xl.getDisks() - err := obj.MakeBucketWithLocation(GlobalContext, bucket, globalMinioDefaultRegion, false) + ctx, cancel := context.WithCancel(GlobalContext) + defer cancel() + + err := obj.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) if err != nil { t.Fatalf("Failed to make a bucket %v", err) } // Object for test case 1 - No StorageClass defined, no MetaData in PutObject object1 := "object1" - _, err = obj.PutObject(GlobalContext, bucket, object1, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts) + _, err = obj.PutObject(ctx, bucket, object1, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts) if err != nil { t.Fatalf("Failed to putObject %v", err) } - parts1, errs1 := readAllXLMetadata(GlobalContext, xlDisks, bucket, object1) + parts1, errs1 := readAllFileInfo(ctx, erasureDisks, bucket, object1, "") // Object for test case 2 - No StorageClass defined, MetaData in PutObject requesting RRS Class object2 := "object2" metadata2 := make(map[string]string) metadata2["x-amz-storage-class"] = storageclass.RRS - _, err = obj.PutObject(GlobalContext, bucket, object2, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata2}) + _, err = obj.PutObject(ctx, bucket, object2, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata2}) if err != nil { t.Fatalf("Failed to putObject %v", err) } - parts2, errs2 := readAllXLMetadata(GlobalContext, xlDisks, bucket, object2) + parts2, errs2 := readAllFileInfo(ctx, erasureDisks, bucket, object2, "") // Object for test case 3 - No StorageClass defined, MetaData in PutObject requesting Standard Storage Class object3 := "object3" metadata3 := make(map[string]string) metadata3["x-amz-storage-class"] = storageclass.STANDARD - _, err = obj.PutObject(GlobalContext, bucket, object3, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata3}) + _, err = obj.PutObject(ctx, bucket, object3, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata3}) if err != nil { t.Fatalf("Failed to putObject %v", err) } - parts3, errs3 := readAllXLMetadata(GlobalContext, xlDisks, bucket, object3) + parts3, errs3 := readAllFileInfo(ctx, erasureDisks, bucket, object3, "") // Object for test case 4 - Standard StorageClass defined as Parity 6, MetaData in PutObject requesting Standard Storage Class object4 := "object4" @@ -557,12 +452,12 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin }, } - _, err = obj.PutObject(GlobalContext, bucket, object4, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata4}) + _, err = obj.PutObject(ctx, bucket, object4, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata4}) if err != nil { t.Fatalf("Failed to putObject %v", err) } - parts4, errs4 := readAllXLMetadata(GlobalContext, xlDisks, bucket, object4) + parts4, errs4 := readAllFileInfo(ctx, erasureDisks, bucket, object4, "") // Object for test case 5 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting RRS Class // Reset global storage class flags @@ -575,12 +470,12 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin }, } - _, err = obj.PutObject(GlobalContext, bucket, object5, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata5}) + _, err = obj.PutObject(ctx, bucket, object5, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata5}) if err != nil { t.Fatalf("Failed to putObject %v", err) } - parts5, errs5 := readAllXLMetadata(GlobalContext, xlDisks, bucket, object5) + parts5, errs5 := readAllFileInfo(ctx, erasureDisks, bucket, object5, "") // Object for test case 6 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting Standard Storage Class object6 := "object6" @@ -592,12 +487,12 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin }, } - _, err = obj.PutObject(GlobalContext, bucket, object6, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata6}) + _, err = obj.PutObject(ctx, bucket, object6, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata6}) if err != nil { t.Fatalf("Failed to putObject %v", err) } - parts6, errs6 := readAllXLMetadata(GlobalContext, xlDisks, bucket, object6) + parts6, errs6 := readAllFileInfo(ctx, erasureDisks, bucket, object6, "") // Object for test case 7 - Standard StorageClass defined as Parity 5, MetaData in PutObject requesting RRS Class // Reset global storage class flags @@ -610,15 +505,15 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin }, } - _, err = obj.PutObject(GlobalContext, bucket, object7, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata7}) + _, err = obj.PutObject(ctx, bucket, object7, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata7}) if err != nil { t.Fatalf("Failed to putObject %v", err) } - parts7, errs7 := readAllXLMetadata(GlobalContext, xlDisks, bucket, object7) + parts7, errs7 := readAllFileInfo(ctx, erasureDisks, bucket, object7, "") tests := []struct { - parts []xlMetaV1 + parts []FileInfo errs []error expectedReadQuorum int expectedWriteQuorum int @@ -632,23 +527,22 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin {parts6, errs6, 8, 9, nil}, {parts7, errs7, 14, 15, nil}, } - for i, tt := range tests { - actualReadQuorum, actualWriteQuorum, err := objectQuorumFromMeta(GlobalContext, *xl, tt.parts, tt.errs) - if tt.expectedError != nil && err == nil { - t.Errorf("Test %d, Expected %s, got %s", i+1, tt.expectedError, err) - return - } - if tt.expectedError == nil && err != nil { - t.Errorf("Test %d, Expected %s, got %s", i+1, tt.expectedError, err) - return - } - if tt.expectedReadQuorum != actualReadQuorum { - t.Errorf("Test %d, Expected Read Quorum %d, got %d", i+1, tt.expectedReadQuorum, actualReadQuorum) - return - } - if tt.expectedWriteQuorum != actualWriteQuorum { - t.Errorf("Test %d, Expected Write Quorum %d, got %d", i+1, tt.expectedWriteQuorum, actualWriteQuorum) - return - } + for _, tt := range tests { + tt := tt + t.(*testing.T).Run("", func(t *testing.T) { + actualReadQuorum, actualWriteQuorum, err := objectQuorumFromMeta(ctx, *xl, tt.parts, tt.errs) + if tt.expectedError != nil && err == nil { + t.Errorf("Expected %s, got %s", tt.expectedError, err) + } + if tt.expectedError == nil && err != nil { + t.Errorf("Expected %s, got %s", tt.expectedError, err) + } + if tt.expectedReadQuorum != actualReadQuorum { + t.Errorf("Expected Read Quorum %d, got %d", tt.expectedReadQuorum, actualReadQuorum) + } + if tt.expectedWriteQuorum != actualWriteQuorum { + t.Errorf("Expected Write Quorum %d, got %d", tt.expectedWriteQuorum, actualWriteQuorum) + } + }) } } diff --git a/cmd/xl-sets.go b/cmd/erasure-sets.go similarity index 66% rename from cmd/xl-sets.go rename to cmd/erasure-sets.go index f06f5b27c..2cbaa72f9 100644 --- a/cmd/xl-sets.go +++ b/cmd/erasure-sets.go @@ -23,13 +23,13 @@ import ( "io" "net/http" "sort" - "strings" "sync" "time" + "github.com/dchest/siphash" + "github.com/google/uuid" "github.com/minio/minio-go/v6/pkg/tags" "github.com/minio/minio/cmd/config/storageclass" - xhttp "github.com/minio/minio/cmd/http" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/bpool" "github.com/minio/minio/pkg/dsync" @@ -45,25 +45,25 @@ type diskConnectInfo struct { setIndex int } -// xlSets implements ObjectLayer combining a static list of erasure coded +// erasureSets implements ObjectLayer combining a static list of erasure coded // object sets. NOTE: There is no dynamic scaling allowed or intended in // current design. -type xlSets struct { +type erasureSets struct { GatewayUnsupported - sets []*xlObjects + sets []*erasureObjects // Reference format. - format *formatXLV3 + format *formatErasureV3 - // xlDisks mutex to lock xlDisks. - xlDisksMu sync.RWMutex + // erasureDisks mutex to lock erasureDisks. + erasureDisksMu sync.RWMutex // Re-ordered list of disks per set. - xlDisks [][]StorageAPI + erasureDisks [][]StorageAPI // Distributed locker clients. - xlLockers setsDsyncLockers + erasureLockers setsDsyncLockers // List of endpoints provided on the command line. endpoints Endpoints @@ -83,15 +83,17 @@ type xlSets struct { // Distribution algorithm of choice. distributionAlgo string + deploymentID [16]byte disksStorageInfoCache timedValue // Merge tree walk - pool *MergeWalkPool - poolSplunk *MergeWalkPool + pool *MergeWalkPool + poolSplunk *MergeWalkPool + poolVersions *MergeWalkVersionsPool mrfMU sync.Mutex - mrfUploads map[string]int + mrfUploads map[healSource]int } func isEndpointConnected(diskMap map[string]StorageAPI, endpoint string) bool { @@ -102,15 +104,15 @@ func isEndpointConnected(diskMap map[string]StorageAPI, endpoint string) bool { return disk.IsOnline() } -func (s *xlSets) getDiskMap() map[string]StorageAPI { +func (s *erasureSets) getDiskMap() map[string]StorageAPI { diskMap := make(map[string]StorageAPI) - s.xlDisksMu.RLock() - defer s.xlDisksMu.RUnlock() + s.erasureDisksMu.RLock() + defer s.erasureDisksMu.RUnlock() for i := 0; i < s.setCount; i++ { for j := 0; j < s.drivesPerSet; j++ { - disk := s.xlDisks[i][j] + disk := s.erasureDisks[i][j] if disk == nil { continue } @@ -125,13 +127,13 @@ func (s *xlSets) getDiskMap() map[string]StorageAPI { // Initializes a new StorageAPI from the endpoint argument, returns // StorageAPI and also `format` which exists on the disk. -func connectEndpoint(endpoint Endpoint) (StorageAPI, *formatXLV3, error) { +func connectEndpoint(endpoint Endpoint) (StorageAPI, *formatErasureV3, error) { disk, err := newStorageAPI(endpoint) if err != nil { return nil, nil, err } - format, err := loadFormatXL(disk) + format, err := loadFormatErasure(disk) if err != nil { // Close the internal connection to avoid connection leaks. disk.Close() @@ -145,13 +147,13 @@ func connectEndpoint(endpoint Endpoint) (StorageAPI, *formatXLV3, error) { // format, after successful validation. // - i'th position is the set index // - j'th position is the disk index in the current set -func findDiskIndexByDiskID(refFormat *formatXLV3, diskID string) (int, int, error) { +func findDiskIndexByDiskID(refFormat *formatErasureV3, diskID string) (int, int, error) { if diskID == offlineDiskUUID { return -1, -1, fmt.Errorf("diskID: %s is offline", diskID) } - for i := 0; i < len(refFormat.XL.Sets); i++ { - for j := 0; j < len(refFormat.XL.Sets[0]); j++ { - if refFormat.XL.Sets[i][j] == diskID { + for i := 0; i < len(refFormat.Erasure.Sets); i++ { + for j := 0; j < len(refFormat.Erasure.Sets[0]); j++ { + if refFormat.Erasure.Sets[i][j] == diskID { return i, j, nil } } @@ -164,29 +166,29 @@ func findDiskIndexByDiskID(refFormat *formatXLV3, diskID string) (int, int, erro // format, after successful validation. // - i'th position is the set index // - j'th position is the disk index in the current set -func findDiskIndex(refFormat, format *formatXLV3) (int, int, error) { - if err := formatXLV3Check(refFormat, format); err != nil { +func findDiskIndex(refFormat, format *formatErasureV3) (int, int, error) { + if err := formatErasureV3Check(refFormat, format); err != nil { return 0, 0, err } - if format.XL.This == offlineDiskUUID { - return -1, -1, fmt.Errorf("diskID: %s is offline", format.XL.This) + if format.Erasure.This == offlineDiskUUID { + return -1, -1, fmt.Errorf("diskID: %s is offline", format.Erasure.This) } - for i := 0; i < len(refFormat.XL.Sets); i++ { - for j := 0; j < len(refFormat.XL.Sets[0]); j++ { - if refFormat.XL.Sets[i][j] == format.XL.This { + for i := 0; i < len(refFormat.Erasure.Sets); i++ { + for j := 0; j < len(refFormat.Erasure.Sets[0]); j++ { + if refFormat.Erasure.Sets[i][j] == format.Erasure.This { return i, j, nil } } } - return -1, -1, fmt.Errorf("diskID: %s not found", format.XL.This) + return -1, -1, fmt.Errorf("diskID: %s not found", format.Erasure.This) } // connectDisks - attempt to connect all the endpoints, loads format // and re-arranges the disks in proper position. -func (s *xlSets) connectDisks() { +func (s *erasureSets) connectDisks() { var wg sync.WaitGroup diskMap := s.getDiskMap() for _, endpoint := range s.endpoints { @@ -212,14 +214,14 @@ func (s *xlSets) connectDisks() { printEndpointError(endpoint, err) return } - disk.SetDiskID(format.XL.This) - s.xlDisksMu.Lock() - if s.xlDisks[setIndex][diskIndex] != nil { - s.xlDisks[setIndex][diskIndex].Close() + disk.SetDiskID(format.Erasure.This) + s.erasureDisksMu.Lock() + if s.erasureDisks[setIndex][diskIndex] != nil { + s.erasureDisks[setIndex][diskIndex].Close() } - s.xlDisks[setIndex][diskIndex] = disk + s.erasureDisks[setIndex][diskIndex] = disk s.endpointStrings[setIndex*s.drivesPerSet+diskIndex] = disk.String() - s.xlDisksMu.Unlock() + s.erasureDisksMu.Unlock() go func(setIndex int) { // Send a new disk connect event with a timeout select { @@ -235,7 +237,7 @@ func (s *xlSets) connectDisks() { // monitorAndConnectEndpoints this is a monitoring loop to keep track of disconnected // endpoints by reconnecting them and making sure to place them into right position in // the set topology, this monitoring happens at a given monitoring interval. -func (s *xlSets) monitorAndConnectEndpoints(ctx context.Context, monitorInterval time.Duration) { +func (s *erasureSets) monitorAndConnectEndpoints(ctx context.Context, monitorInterval time.Duration) { for { select { case <-ctx.Done(): @@ -248,18 +250,18 @@ func (s *xlSets) monitorAndConnectEndpoints(ctx context.Context, monitorInterval } } -func (s *xlSets) GetLockers(setIndex int) func() []dsync.NetLocker { +func (s *erasureSets) GetLockers(setIndex int) func() []dsync.NetLocker { return func() []dsync.NetLocker { lockers := make([]dsync.NetLocker, s.drivesPerSet) - copy(lockers, s.xlLockers[setIndex]) + copy(lockers, s.erasureLockers[setIndex]) return lockers } } -func (s *xlSets) GetEndpoints(setIndex int) func() []string { +func (s *erasureSets) GetEndpoints(setIndex int) func() []string { return func() []string { - s.xlDisksMu.RLock() - defer s.xlDisksMu.RUnlock() + s.erasureDisksMu.RLock() + defer s.erasureDisksMu.RUnlock() eps := make([]string, s.drivesPerSet) for i := 0; i < s.drivesPerSet; i++ { @@ -270,12 +272,12 @@ func (s *xlSets) GetEndpoints(setIndex int) func() []string { } // GetDisks returns a closure for a given set, which provides list of disks per set. -func (s *xlSets) GetDisks(setIndex int) func() []StorageAPI { +func (s *erasureSets) GetDisks(setIndex int) func() []StorageAPI { return func() []StorageAPI { - s.xlDisksMu.RLock() - defer s.xlDisksMu.RUnlock() + s.erasureDisksMu.RLock() + defer s.erasureDisksMu.RUnlock() disks := make([]StorageAPI, s.drivesPerSet) - copy(disks, s.xlDisks[setIndex]) + copy(disks, s.erasureDisks[setIndex]) return disks } } @@ -283,46 +285,47 @@ func (s *xlSets) GetDisks(setIndex int) func() []StorageAPI { const defaultMonitorConnectEndpointInterval = time.Second * 10 // Set to 10 secs. // Initialize new set of erasure coded sets. -func newXLSets(ctx context.Context, endpoints Endpoints, storageDisks []StorageAPI, format *formatXLV3) (*xlSets, error) { - setCount := len(format.XL.Sets) - drivesPerSet := len(format.XL.Sets[0]) +func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []StorageAPI, format *formatErasureV3) (*erasureSets, error) { + setCount := len(format.Erasure.Sets) + drivesPerSet := len(format.Erasure.Sets[0]) endpointStrings := make([]string, len(endpoints)) - // Initialize the XL sets instance. - s := &xlSets{ - sets: make([]*xlObjects, setCount), - xlDisks: make([][]StorageAPI, setCount), - xlLockers: make([][]dsync.NetLocker, setCount), + // Initialize the erasure sets instance. + s := &erasureSets{ + sets: make([]*erasureObjects, setCount), + erasureDisks: make([][]StorageAPI, setCount), + erasureLockers: make([][]dsync.NetLocker, setCount), + endpoints: endpoints, + endpointStrings: endpointStrings, setCount: setCount, drivesPerSet: drivesPerSet, format: format, - endpoints: endpoints, - endpointStrings: endpointStrings, disksConnectEvent: make(chan diskConnectInfo), disksConnectDoneCh: make(chan struct{}), - distributionAlgo: format.XL.DistributionAlgo, + distributionAlgo: format.Erasure.DistributionAlgo, + deploymentID: uuid.MustParse(format.ID), pool: NewMergeWalkPool(globalMergeLookupTimeout), poolSplunk: NewMergeWalkPool(globalMergeLookupTimeout), - mrfUploads: make(map[string]int), + poolVersions: NewMergeWalkVersionsPool(globalMergeLookupTimeout), + mrfUploads: make(map[healSource]int), } - mutex := newNSLock(globalIsDistXL) + mutex := newNSLock(globalIsDistErasure) // Initialize byte pool once for all sets, bpool size is set to // setCount * drivesPerSet with each memory upto blockSizeV1. bp := bpool.NewBytePoolCap(setCount*drivesPerSet, blockSizeV1, blockSizeV1*2) for i := 0; i < setCount; i++ { - s.xlDisks[i] = make([]StorageAPI, drivesPerSet) - s.xlLockers[i] = make([]dsync.NetLocker, drivesPerSet) + s.erasureDisks[i] = make([]StorageAPI, drivesPerSet) + s.erasureLockers[i] = make([]dsync.NetLocker, drivesPerSet) } for i := 0; i < setCount; i++ { for j := 0; j < drivesPerSet; j++ { endpoint := endpoints[i*drivesPerSet+j] // Rely on endpoints list to initialize, init lockers and available disks. - s.xlLockers[i][j] = newLockAPI(endpoint) - + s.erasureLockers[i][j] = newLockAPI(endpoint) disk := storageDisks[i*drivesPerSet+j] if disk == nil { continue @@ -338,11 +341,11 @@ func newXLSets(ctx context.Context, endpoints Endpoints, storageDisks []StorageA continue } s.endpointStrings[m*drivesPerSet+n] = disk.String() - s.xlDisks[m][n] = disk + s.erasureDisks[m][n] = disk } - // Initialize xl objects for a given set. - s.sets[i] = &xlObjects{ + // Initialize erasure objects for a given set. + s.sets[i] = &erasureObjects{ getDisks: s.GetDisks(i), getLockers: s.GetLockers(i), getEndpoints: s.GetEndpoints(i), @@ -350,9 +353,6 @@ func newXLSets(ctx context.Context, endpoints Endpoints, storageDisks []StorageA bp: bp, mrfUploadCh: make(chan partialUpload, 10000), } - - go s.sets[i].cleanupStaleMultipartUploads(ctx, - GlobalMultipartCleanupInterval, GlobalMultipartExpiry, ctx.Done()) } // Start the disk monitoring and connect routine. @@ -364,7 +364,7 @@ func newXLSets(ctx context.Context, endpoints Endpoints, storageDisks []StorageA } // NewNSLock - initialize a new namespace RWLocker instance. -func (s *xlSets) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker { +func (s *erasureSets) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker { if len(objects) == 1 { return s.getHashedSet(objects[0]).NewNSLock(ctx, bucket, objects...) } @@ -375,7 +375,7 @@ func (s *xlSets) NewNSLock(ctx context.Context, bucket string, objects ...string // This only returns disk usage info for Zones to perform placement decision, this call // is not implemented in Object interface and is not meant to be used by other object // layer implementations. -func (s *xlSets) StorageUsageInfo(ctx context.Context) StorageInfo { +func (s *erasureSets) StorageUsageInfo(ctx context.Context) StorageInfo { storageUsageInfo := func() StorageInfo { var storageInfo StorageInfo storageInfos := make([]StorageInfo, len(s.sets)) @@ -418,7 +418,7 @@ func (s *xlSets) StorageUsageInfo(ctx context.Context) StorageInfo { } // StorageInfo - combines output of StorageInfo across all erasure coded object sets. -func (s *xlSets) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) { +func (s *erasureSets) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) { var storageInfo StorageInfo storageInfos := make([]StorageInfo, len(s.sets)) @@ -516,14 +516,14 @@ func (s *xlSets) StorageInfo(ctx context.Context, local bool) (StorageInfo, []er return storageInfo, errs } -func (s *xlSets) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error { +func (s *erasureSets) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error { // Use the zone-level implementation instead. - return NotImplemented{} + return NotImplemented{API: "CrawlAndGetDataUsage"} } // Shutdown shutsdown all erasure coded sets in parallel // returns error upon first error. -func (s *xlSets) Shutdown(ctx context.Context) error { +func (s *erasureSets) Shutdown(ctx context.Context) error { g := errgroup.WithNErrs(len(s.sets)) for index := range s.sets { @@ -544,14 +544,14 @@ func (s *xlSets) Shutdown(ctx context.Context) error { // MakeBucketLocation - creates a new bucket across all sets simultaneously, // then return the first encountered error -func (s *xlSets) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { +func (s *erasureSets) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error { g := errgroup.WithNErrs(len(s.sets)) // Create buckets in parallel across all sets. for index := range s.sets { index := index g.Go(func() error { - return s.sets[index].MakeBucketWithLocation(ctx, bucket, location, lockEnabled) + return s.sets[index].MakeBucketWithLocation(ctx, bucket, opts) }, index) } @@ -571,7 +571,17 @@ func (s *xlSets) MakeBucketWithLocation(ctx context.Context, bucket, location st // hashes the key returning an integer based on the input algorithm. // This function currently supports // - CRCMOD +// - SIPMOD // - all new algos. +func sipHashMod(key string, cardinality int, id [16]byte) int { + if cardinality <= 0 { + return -1 + } + sip := siphash.New(id[:]) + sip.Write([]byte(key)) + return int(sip.Sum64() % uint64(cardinality)) +} + func crcHashMod(key string, cardinality int) int { if cardinality <= 0 { return -1 @@ -580,10 +590,12 @@ func crcHashMod(key string, cardinality int) int { return int(keyCrc % uint32(cardinality)) } -func hashKey(algo string, key string, cardinality int) int { +func hashKey(algo string, key string, cardinality int, id [16]byte) int { switch algo { - case formatXLVersionV2DistributionAlgo: + case formatErasureVersionV2DistributionAlgoLegacy: return crcHashMod(key, cardinality) + case formatErasureVersionV3DistributionAlgo: + return sipHashMod(key, cardinality, id) default: // Unknown algorithm returns -1, also if cardinality is lesser than 0. return -1 @@ -591,70 +603,53 @@ func hashKey(algo string, key string, cardinality int) int { } // Returns always a same erasure coded set for a given input. -func (s *xlSets) getHashedSetIndex(input string) int { - return hashKey(s.distributionAlgo, input, len(s.sets)) +func (s *erasureSets) getHashedSetIndex(input string) int { + return hashKey(s.distributionAlgo, input, len(s.sets), s.deploymentID) } // Returns always a same erasure coded set for a given input. -func (s *xlSets) getHashedSet(input string) (set *xlObjects) { +func (s *erasureSets) getHashedSet(input string) (set *erasureObjects) { return s.sets[s.getHashedSetIndex(input)] } // GetBucketInfo - returns bucket info from one of the erasure coded set. -func (s *xlSets) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) { +func (s *erasureSets) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) { return s.getHashedSet("").GetBucketInfo(ctx, bucket) } // ListObjectsV2 lists all objects in bucket filtered by prefix -func (s *xlSets) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { - marker := continuationToken - if marker == "" { - marker = startAfter - } - - loi, err := s.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) - if err != nil { - return result, err - } - - listObjectsV2Info := ListObjectsV2Info{ - IsTruncated: loi.IsTruncated, - ContinuationToken: continuationToken, - NextContinuationToken: loi.NextMarker, - Objects: loi.Objects, - Prefixes: loi.Prefixes, - } - return listObjectsV2Info, err +func (s *erasureSets) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { + return result, NotImplemented{} } // IsNotificationSupported returns whether bucket notification is applicable for this layer. -func (s *xlSets) IsNotificationSupported() bool { +func (s *erasureSets) IsNotificationSupported() bool { return s.getHashedSet("").IsNotificationSupported() } // IsListenBucketSupported returns whether listen bucket notification is applicable for this layer. -func (s *xlSets) IsListenBucketSupported() bool { +func (s *erasureSets) IsListenBucketSupported() bool { return true } // IsEncryptionSupported returns whether server side encryption is implemented for this layer. -func (s *xlSets) IsEncryptionSupported() bool { +func (s *erasureSets) IsEncryptionSupported() bool { return s.getHashedSet("").IsEncryptionSupported() } // IsCompressionSupported returns whether compression is applicable for this layer. -func (s *xlSets) IsCompressionSupported() bool { +func (s *erasureSets) IsCompressionSupported() bool { return s.getHashedSet("").IsCompressionSupported() } -func (s *xlSets) IsTaggingSupported() bool { +func (s *erasureSets) IsTaggingSupported() bool { return true } // DeleteBucket - deletes a bucket on all sets simultaneously, // even if one of the sets fail to delete buckets, we proceed to // undo a successful operation. -func (s *xlSets) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { +func (s *erasureSets) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { g := errgroup.WithNErrs(len(s.sets)) // Delete buckets in parallel across all sets. @@ -670,7 +665,7 @@ func (s *xlSets) DeleteBucket(ctx context.Context, bucket string, forceDelete bo // by creating buckets again on all sets which were successfully deleted. for _, err := range errs { if err != nil { - undoDeleteBucketSets(bucket, s.sets, errs) + undoDeleteBucketSets(ctx, bucket, s.sets, errs) return err } } @@ -683,7 +678,7 @@ func (s *xlSets) DeleteBucket(ctx context.Context, bucket string, forceDelete bo } // This function is used to undo a successful DeleteBucket operation. -func undoDeleteBucketSets(bucket string, sets []*xlObjects, errs []error) { +func undoDeleteBucketSets(ctx context.Context, bucket string, sets []*erasureObjects, errs []error) { g := errgroup.WithNErrs(len(sets)) // Undo previous delete bucket on all underlying sets. @@ -691,7 +686,7 @@ func undoDeleteBucketSets(bucket string, sets []*xlObjects, errs []error) { index := index g.Go(func() error { if errs[index] == nil { - return sets[index].MakeBucketWithLocation(GlobalContext, bucket, "", false) + return sets[index].MakeBucketWithLocation(ctx, bucket, BucketOptions{}) } return nil }, index) @@ -703,7 +698,7 @@ func undoDeleteBucketSets(bucket string, sets []*xlObjects, errs []error) { // List all buckets from one of the set, we are not doing merge // sort here just for simplification. As per design it is assumed // that all buckets are present on all sets. -func (s *xlSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) { +func (s *erasureSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) { // Always lists from the same set signified by the empty string. return s.getHashedSet("").ListBuckets(ctx) } @@ -711,83 +706,86 @@ func (s *xlSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, err err // --- Object Operations --- // GetObjectNInfo - returns object info and locked object ReadCloser -func (s *xlSets) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { +func (s *erasureSets) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { return s.getHashedSet(object).GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts) } // GetObject - reads an object from the hashedSet based on the object name. -func (s *xlSets) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { +func (s *erasureSets) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { return s.getHashedSet(object).GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts) } // PutObject - writes an object to hashedSet based on the object name. -func (s *xlSets) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { +func (s *erasureSets) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { return s.getHashedSet(object).PutObject(ctx, bucket, object, data, opts) } // GetObjectInfo - reads object metadata from the hashedSet based on the object name. -func (s *xlSets) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { +func (s *erasureSets) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { return s.getHashedSet(object).GetObjectInfo(ctx, bucket, object, opts) } // DeleteObject - deletes an object from the hashedSet based on the object name. -func (s *xlSets) DeleteObject(ctx context.Context, bucket string, object string) (err error) { - return s.getHashedSet(object).DeleteObject(ctx, bucket, object) +func (s *erasureSets) DeleteObject(ctx context.Context, bucket string, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { + return s.getHashedSet(object).DeleteObject(ctx, bucket, object, opts) } // DeleteObjects - bulk delete of objects // Bulk delete is only possible within one set. For that purpose // objects are group by set first, and then bulk delete is invoked // for each set, the error response of each delete will be returned -func (s *xlSets) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { +func (s *erasureSets) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) { type delObj struct { // Set index associated to this object setIndex int // Original index from the list of arguments // where this object is passed origIndex int - // Object name - name string + // object to delete + object ObjectToDelete } // Transform []delObj to the list of object names - toNames := func(delObjs []delObj) []string { - names := make([]string, len(delObjs)) + toNames := func(delObjs []delObj) []ObjectToDelete { + objs := make([]ObjectToDelete, len(delObjs)) for i, obj := range delObjs { - names[i] = obj.name + objs[i] = obj.object } - return names + return objs } // The result of delete operation on all passed objects var delErrs = make([]error, len(objects)) + // The result of delete objects + var delObjects = make([]DeletedObject, len(objects)) + // A map between a set and its associated objects var objSetMap = make(map[int][]delObj) // Group objects by set index for i, object := range objects { - index := s.getHashedSetIndex(object) - objSetMap[index] = append(objSetMap[index], delObj{setIndex: index, origIndex: i, name: object}) + index := s.getHashedSetIndex(object.ObjectName) + objSetMap[index] = append(objSetMap[index], delObj{setIndex: index, origIndex: i, object: object}) } // Invoke bulk delete on objects per set and save // the result of the delete operation for _, objsGroup := range objSetMap { - errs, err := s.getHashedSet(objsGroup[0].name).DeleteObjects(ctx, bucket, toNames(objsGroup)) - if err != nil { - return nil, err - } + dobjects, errs := s.getHashedSet(objsGroup[0].object.ObjectName).DeleteObjects(ctx, bucket, toNames(objsGroup), opts) for i, obj := range objsGroup { delErrs[obj.origIndex] = errs[i] + if delErrs[obj.origIndex] == nil { + delObjects[obj.origIndex] = dobjects[i] + } } } - return delErrs, nil + return delObjects, delErrs } // CopyObject - copies objects from one hashedSet to another hashedSet, on server side. -func (s *xlSets) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) { +func (s *erasureSets) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) { srcSet := s.getHashedSet(srcObject) dstSet := s.getHashedSet(dstObject) @@ -800,6 +798,29 @@ func (s *xlSets) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket return dstSet.putObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts) } +// FileInfoVersionsCh - file info versions channel +type FileInfoVersionsCh struct { + Ch chan FileInfoVersions + Prev FileInfoVersions + Valid bool +} + +// Pop - pops a cached entry if any, or from the cached channel. +func (f *FileInfoVersionsCh) Pop() (fi FileInfoVersions, ok bool) { + if f.Valid { + f.Valid = false + return f.Prev, true + } // No cached entries found, read from channel + f.Prev, ok = <-f.Ch + return f.Prev, ok +} + +// Push - cache an entry, for Pop() later. +func (f *FileInfoVersionsCh) Push(fi FileInfoVersions) { + f.Prev = fi + f.Valid = true +} + // FileInfoCh - file info channel type FileInfoCh struct { Ch chan FileInfo @@ -830,7 +851,7 @@ func (f *FileInfoCh) Push(fi FileInfo) { // again to list the next entry. It is callers responsibility // if the caller wishes to list N entries to call lexicallySortedEntry // N times until this boolean is 'false'. -func lexicallySortedEntry(entryChs []FileInfoCh, entries []FileInfo, entriesValid []bool) (FileInfo, int, bool) { +func lexicallySortedEntryVersions(entryChs []FileInfoVersionsCh, entries []FileInfoVersions, entriesValid []bool) (FileInfoVersions, int, bool) { for i := range entryChs { entries[i], entriesValid[i] = entryChs[i].Pop() } @@ -844,7 +865,7 @@ func lexicallySortedEntry(entryChs []FileInfoCh, entries []FileInfo, entriesVali break } - var lentry FileInfo + var lentry FileInfoVersions var found bool for i, valid := range entriesValid { if !valid { @@ -874,7 +895,7 @@ func lexicallySortedEntry(entryChs []FileInfoCh, entries []FileInfo, entriesVali // Entries are duplicated across disks, // we should simply skip such entries. - if lentry.Name == entries[i].Name && lentry.ModTime.Equal(entries[i].ModTime) { + if lentry.Name == entries[i].Name && lentry.LatestModTime.Equal(entries[i].LatestModTime) { lexicallySortedEntryCount++ continue } @@ -887,61 +908,47 @@ func lexicallySortedEntry(entryChs []FileInfoCh, entries []FileInfo, entriesVali return lentry, lexicallySortedEntryCount, isTruncated } -// mergeEntriesCh - merges FileInfo channel to entries upto maxKeys. -func mergeEntriesCh(entryChs []FileInfoCh, maxKeys int, ndisks int) (entries FilesInfo) { - var i = 0 - entriesInfos := make([]FileInfo, len(entryChs)) - entriesValid := make([]bool, len(entryChs)) - for { - fi, quorumCount, valid := lexicallySortedEntry(entryChs, entriesInfos, entriesValid) - if !valid { - // We have reached EOF across all entryChs, break the loop. - break - } - - if quorumCount < ndisks-1 { - // Skip entries which are not found on upto ndisks. - continue - } - - entries.Files = append(entries.Files, fi) - i++ - if i == maxKeys { - entries.IsTruncated = isTruncated(entryChs, entriesInfos, entriesValid) - break - } - } - return entries -} - -func isTruncated(entryChs []FileInfoCh, entries []FileInfo, entriesValid []bool) bool { - for i := range entryChs { - entries[i], entriesValid[i] = entryChs[i].Pop() - } - - var isTruncated = false - for _, valid := range entriesValid { - if !valid { - continue - } - isTruncated = true - break - } - for i := range entryChs { - if entriesValid[i] { - entryChs[i].Push(entries[i]) - } - } - return isTruncated -} - -func (s *xlSets) startMergeWalks(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}) []FileInfoCh { +func (s *erasureSets) startMergeWalks(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}) []FileInfoCh { return s.startMergeWalksN(ctx, bucket, prefix, marker, recursive, endWalkCh, -1) } -// Starts a walk channel across all disks and returns a slice of -// FileInfo channels which can be read from. -func (s *xlSets) startMergeWalksN(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}, ndisks int) []FileInfoCh { +func (s *erasureSets) startMergeWalksVersions(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}) []FileInfoVersionsCh { + return s.startMergeWalksVersionsN(ctx, bucket, prefix, marker, recursive, endWalkCh, -1) +} + +// Starts a walk versions channel across N number of disks and returns a slice. +// FileInfoCh which can be read from. +func (s *erasureSets) startMergeWalksVersionsN(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}, ndisks int) []FileInfoVersionsCh { + var entryChs []FileInfoVersionsCh + var success int + for _, set := range s.sets { + // Reset for the next erasure set. + success = ndisks + for _, disk := range set.getLoadBalancedDisks() { + if disk == nil { + // Disk can be offline + continue + } + entryCh, err := disk.WalkVersions(bucket, prefix, marker, recursive, endWalkCh) + if err != nil { + // Disk walk returned error, ignore it. + continue + } + entryChs = append(entryChs, FileInfoVersionsCh{ + Ch: entryCh, + }) + success-- + if success == 0 { + break + } + } + } + return entryChs +} + +// Starts a walk channel across n number of disks and returns a slice of +// FileInfoCh which can be read from. +func (s *erasureSets) startMergeWalksN(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}, ndisks int) []FileInfoCh { var entryChs []FileInfoCh var success int for _, set := range s.sets { @@ -952,7 +959,7 @@ func (s *xlSets) startMergeWalksN(ctx context.Context, bucket, prefix, marker st // Disk can be offline continue } - entryCh, err := disk.Walk(bucket, prefix, marker, recursive, xlMetaJSONFile, readMetadata, endWalkCh) + entryCh, err := disk.Walk(bucket, prefix, marker, recursive, endWalkCh) if err != nil { // Disk walk returned error, ignore it. continue @@ -969,9 +976,9 @@ func (s *xlSets) startMergeWalksN(ctx context.Context, bucket, prefix, marker st return entryChs } -// Starts a walk channel across all disks and returns a slice of +// Starts a walk channel across n number of disks and returns a slice of // FileInfo channels which can be read from. -func (s *xlSets) startSplunkMergeWalksN(ctx context.Context, bucket, prefix, marker string, endWalkCh <-chan struct{}, ndisks int) []FileInfoCh { +func (s *erasureSets) startSplunkMergeWalksN(ctx context.Context, bucket, prefix, marker string, endWalkCh <-chan struct{}, ndisks int) []FileInfoCh { var entryChs []FileInfoCh var success int for _, set := range s.sets { @@ -999,207 +1006,35 @@ func (s *xlSets) startSplunkMergeWalksN(ctx context.Context, bucket, prefix, mar return entryChs } -func (s *xlSets) listObjectsNonSlash(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { - endWalkCh := make(chan struct{}) - defer close(endWalkCh) - - const ndisks = 3 - entryChs := s.startMergeWalksN(GlobalContext, bucket, prefix, "", true, endWalkCh, ndisks) - - var objInfos []ObjectInfo - var eof bool - var prevPrefix string - - entriesValid := make([]bool, len(entryChs)) - entries := make([]FileInfo, len(entryChs)) - for { - if len(objInfos) == maxKeys { - break - } - - result, quorumCount, ok := lexicallySortedEntry(entryChs, entries, entriesValid) - if !ok { - eof = true - break - } - - if quorumCount < ndisks-1 { - // Skip entries which are not found on upto ndisks. - continue - } - - var objInfo ObjectInfo - - index := strings.Index(strings.TrimPrefix(result.Name, prefix), delimiter) - if index == -1 { - objInfo = ObjectInfo{ - IsDir: false, - Bucket: bucket, - Name: result.Name, - ModTime: result.ModTime, - Size: result.Size, - ContentType: result.Metadata["content-type"], - ContentEncoding: result.Metadata["content-encoding"], - } - - // Extract etag from metadata. - objInfo.ETag = extractETag(result.Metadata) - - // All the parts per object. - objInfo.Parts = result.Parts - - // etag/md5Sum has already been extracted. We need to - // remove to avoid it from appearing as part of - // response headers. e.g, X-Minio-* or X-Amz-*. - objInfo.UserDefined = cleanMetadata(result.Metadata) - - // Update storage class - if sc, ok := result.Metadata[xhttp.AmzStorageClass]; ok { - objInfo.StorageClass = sc - } else { - objInfo.StorageClass = globalMinioDefaultStorageClass - } - } else { - index = len(prefix) + index + len(delimiter) - currPrefix := result.Name[:index] - if currPrefix == prevPrefix { - continue - } - prevPrefix = currPrefix - - objInfo = ObjectInfo{ - Bucket: bucket, - Name: currPrefix, - IsDir: true, - } - } - - if objInfo.Name <= marker { - continue - } - - objInfos = append(objInfos, objInfo) - } - - result := ListObjectsInfo{} - for _, objInfo := range objInfos { - if objInfo.IsDir { - result.Prefixes = append(result.Prefixes, objInfo.Name) - continue - } - result.Objects = append(result.Objects, objInfo) - } - - if !eof { - result.IsTruncated = true - if len(objInfos) > 0 { - result.NextMarker = objInfos[len(objInfos)-1].Name - } - } - - return result, nil -} - -// ListObjects - implements listing of objects across disks, each disk is independently +// ListObjectVersions - implements listing of objects across disks, each disk is indepenently // walked and merged at this layer. Resulting value through the merge process sends // the data in lexically sorted order. -// If partialQuorumOnly is set only objects that does not have full quorum is returned. -func (s *xlSets) listObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { - if err = checkListObjsArgs(ctx, bucket, prefix, marker, s); err != nil { - return loi, err - } - - // Marker is set validate pre-condition. - if marker != "" { - // Marker not common with prefix is not implemented. Send an empty response - if !HasPrefix(marker, prefix) { - return loi, nil - } - } - - // With max keys of zero we have reached eof, return right here. - if maxKeys == 0 { - return loi, nil - } - - // For delimiter and prefix as '/' we do not list anything at all - // since according to s3 spec we stop at the 'delimiter' - // along // with the prefix. On a flat namespace with 'prefix' - // as '/' we don't have any entries, since all the keys are - // of form 'keyName/...' - if delimiter == SlashSeparator && prefix == SlashSeparator { - return loi, nil - } - - // Over flowing count - reset to maxObjectList. - if maxKeys < 0 || maxKeys > maxObjectList { - maxKeys = maxObjectList - } - - if delimiter != SlashSeparator && delimiter != "" { - // "heal" option passed can be ignored as the heal-listing does not send non-standard delimiter. - return s.listObjectsNonSlash(ctx, bucket, prefix, marker, delimiter, maxKeys) - } - - // Default is recursive, if delimiter is set then list non recursive. - recursive := true - if delimiter == SlashSeparator { - recursive = false - } - - const ndisks = 3 - - entryChs, endWalkCh := s.pool.Release(listParams{bucket: bucket, recursive: recursive, marker: marker, prefix: prefix}) - if entryChs == nil { - endWalkCh = make(chan struct{}) - // start file tree walk across at most randomly 3 disks in a set. - entryChs = s.startMergeWalksN(GlobalContext, bucket, prefix, marker, recursive, endWalkCh, ndisks) - } - - entries := mergeEntriesCh(entryChs, maxKeys, ndisks) - if len(entries.Files) == 0 { - return loi, nil - } - - loi.IsTruncated = entries.IsTruncated - if loi.IsTruncated { - loi.NextMarker = entries.Files[len(entries.Files)-1].Name - } - - for _, entry := range entries.Files { - objInfo := entry.ToObjectInfo() - if HasSuffix(objInfo.Name, SlashSeparator) && !recursive { - loi.Prefixes = append(loi.Prefixes, entry.Name) - continue - } - loi.Objects = append(loi.Objects, objInfo) - } - if loi.IsTruncated { - s.pool.Set(listParams{bucket, recursive, loi.NextMarker, prefix}, entryChs, endWalkCh) - } - return loi, nil +func (s *erasureSets) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionIDMarker, delimiter string, maxKeys int) (loi ListObjectVersionsInfo, err error) { + // Shouldn't be called directly, caller Zones already has an implementation + return loi, NotImplemented{} } // ListObjects - implements listing of objects across disks, each disk is indepenently // walked and merged at this layer. Resulting value through the merge process sends // the data in lexically sorted order. -func (s *xlSets) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { - return s.listObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) +func (s *erasureSets) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { + // Shouldn't be called directly, caller Zones already has an implementation + return loi, NotImplemented{} } -func (s *xlSets) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) { +func (s *erasureSets) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) { // In list multipart uploads we are going to treat input prefix as the object, // this means that we are not supporting directory navigation. return s.getHashedSet(prefix).ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) } // Initiate a new multipart upload on a hashedSet based on object name. -func (s *xlSets) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) { +func (s *erasureSets) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) { return s.getHashedSet(object).NewMultipartUpload(ctx, bucket, object, opts) } // Copies a part of an object from source hashedSet to destination hashedSet. -func (s *xlSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, +func (s *erasureSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (partInfo PartInfo, err error) { destSet := s.getHashedSet(destObject) @@ -1207,27 +1042,27 @@ func (s *xlSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destB } // PutObjectPart - writes part of an object to hashedSet based on the object name. -func (s *xlSets) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) { +func (s *erasureSets) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) { return s.getHashedSet(object).PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts) } // GetMultipartInfo - return multipart metadata info uploaded at hashedSet. -func (s *xlSets) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (result MultipartInfo, err error) { +func (s *erasureSets) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (result MultipartInfo, err error) { return s.getHashedSet(object).GetMultipartInfo(ctx, bucket, object, uploadID, opts) } // ListObjectParts - lists all uploaded parts to an object in hashedSet. -func (s *xlSets) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (result ListPartsInfo, err error) { +func (s *erasureSets) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (result ListPartsInfo, err error) { return s.getHashedSet(object).ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts) } // Aborts an in-progress multipart operation on hashedSet based on the object name. -func (s *xlSets) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { +func (s *erasureSets) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { return s.getHashedSet(object).AbortMultipartUpload(ctx, bucket, object, uploadID) } // CompleteMultipartUpload - completes a pending multipart transaction, on hashedSet based on object name. -func (s *xlSets) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) { +func (s *erasureSets) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) { return s.getHashedSet(object).CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts) } @@ -1284,7 +1119,7 @@ else fi */ -func formatsToDrivesInfo(endpoints Endpoints, formats []*formatXLV3, sErrs []error) (beforeDrives []madmin.DriveInfo) { +func formatsToDrivesInfo(endpoints Endpoints, formats []*formatErasureV3, sErrs []error) (beforeDrives []madmin.DriveInfo) { beforeDrives = make([]madmin.DriveInfo, len(endpoints)) // Existing formats are available (i.e. ok), so save it in // result, also populate disks to be healed. @@ -1302,7 +1137,7 @@ func formatsToDrivesInfo(endpoints Endpoints, formats []*formatXLV3, sErrs []err beforeDrives[i] = madmin.DriveInfo{ UUID: func() string { if format != nil { - return format.XL.This + return format.Erasure.This } return "" }(), @@ -1316,7 +1151,7 @@ func formatsToDrivesInfo(endpoints Endpoints, formats []*formatXLV3, sErrs []err // Reloads the format from the disk, usually called by a remote peer notifier while // healing in a distributed setup. -func (s *xlSets) ReloadFormat(ctx context.Context, dryRun bool) (err error) { +func (s *erasureSets) ReloadFormat(ctx context.Context, dryRun bool) (err error) { storageDisks, errs := initStorageDisksWithErrors(s.endpoints) for i, err := range errs { if err != nil && err != errDiskNotFound { @@ -1329,8 +1164,8 @@ func (s *xlSets) ReloadFormat(ctx context.Context, dryRun bool) (err error) { } }(storageDisks) - formats, sErrs := loadFormatXLAll(storageDisks, false) - if err = checkFormatXLValues(formats, s.drivesPerSet); err != nil { + formats, sErrs := loadFormatErasureAll(storageDisks, false) + if err = checkFormatErasureValues(formats, s.drivesPerSet); err != nil { return err } @@ -1344,7 +1179,7 @@ func (s *xlSets) ReloadFormat(ctx context.Context, dryRun bool) (err error) { } } - refFormat, err := getFormatXLInQuorum(formats) + refFormat, err := getFormatErasureInQuorum(formats) if err != nil { return err } @@ -1358,7 +1193,7 @@ func (s *xlSets) ReloadFormat(ctx context.Context, dryRun bool) (err error) { s.format = refFormat // Close all existing disks and reconnect all the disks. - s.xlDisksMu.Lock() + s.erasureDisksMu.Lock() for _, disk := range storageDisks { if disk == nil { continue @@ -1376,14 +1211,14 @@ func (s *xlSets) ReloadFormat(ctx context.Context, dryRun bool) (err error) { continue } - if s.xlDisks[m][n] != nil { - s.xlDisks[m][n].Close() + if s.erasureDisks[m][n] != nil { + s.erasureDisks[m][n].Close() } s.endpointStrings[m*s.drivesPerSet+n] = disk.String() - s.xlDisks[m][n] = disk + s.erasureDisks[m][n] = disk } - s.xlDisksMu.Unlock() + s.erasureDisksMu.Unlock() // Restart monitoring loop to monitor reformatted disks again. go s.monitorAndConnectEndpoints(GlobalContext, defaultMonitorConnectEndpointInterval) @@ -1391,7 +1226,7 @@ func (s *xlSets) ReloadFormat(ctx context.Context, dryRun bool) (err error) { return nil } -// If it is a single node XL and all disks are root disks, it is most likely a test setup, else it is a production setup. +// If it is a single node Erasure and all disks are root disks, it is most likely a test setup, else it is a production setup. // On a test setup we allow creation of format.json on root disks to help with dev/testing. func isTestSetup(infos []DiskInfo, errs []error) bool { rootDiskCount := 0 @@ -1450,7 +1285,7 @@ func markRootDisksAsDown(storageDisks []StorageAPI) { } // HealFormat - heals missing `format.json` on fresh unformatted disks. -func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealResultItem, err error) { +func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealResultItem, err error) { storageDisks, errs := initStorageDisksWithErrors(s.endpoints) for i, derr := range errs { if derr != nil && derr != errDiskNotFound { @@ -1466,8 +1301,8 @@ func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealRe markRootDisksAsDown(storageDisks) - formats, sErrs := loadFormatXLAll(storageDisks, true) - if err = checkFormatXLValues(formats, s.drivesPerSet); err != nil { + formats, sErrs := loadFormatErasureAll(storageDisks, true) + if err = checkFormatErasureValues(formats, s.drivesPerSet); err != nil { return madmin.HealResultItem{}, err } @@ -1506,7 +1341,7 @@ func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealRe return res, errNoHealRequired } - refFormat, err := getFormatXLInQuorum(formats) + refFormat, err := getFormatErasureInQuorum(formats) if err != nil { return res, err } @@ -1522,19 +1357,19 @@ func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealRe // such that we can fill them up with new UUIDs, this looping also // ensures that the replaced disks allocated evenly across all sets. // Making sure that the redundancy is not lost. - for i := range refFormat.XL.Sets { - for j := range refFormat.XL.Sets[i] { - if refFormat.XL.Sets[i][j] == offlineDiskUUID { + for i := range refFormat.Erasure.Sets { + for j := range refFormat.Erasure.Sets[i] { + if refFormat.Erasure.Sets[i][j] == offlineDiskUUID { for l := range newFormatSets[i] { if newFormatSets[i][l] == nil { continue } - if newFormatSets[i][l].XL.This == "" { - newFormatSets[i][l].XL.This = mustGetUUID() - refFormat.XL.Sets[i][j] = newFormatSets[i][l].XL.This + if newFormatSets[i][l].Erasure.This == "" { + newFormatSets[i][l].Erasure.This = mustGetUUID() + refFormat.Erasure.Sets[i][j] = newFormatSets[i][l].Erasure.This for m, v := range res.After.Drives { if v.Endpoint == s.endpoints.GetString(i*s.drivesPerSet+l) { - res.After.Drives[m].UUID = newFormatSets[i][l].XL.This + res.After.Drives[m].UUID = newFormatSets[i][l].Erasure.This res.After.Drives[m].State = madmin.DriveStateOk } } @@ -1546,19 +1381,19 @@ func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealRe } if !dryRun { - var tmpNewFormats = make([]*formatXLV3, s.setCount*s.drivesPerSet) + var tmpNewFormats = make([]*formatErasureV3, s.setCount*s.drivesPerSet) for i := range newFormatSets { for j := range newFormatSets[i] { if newFormatSets[i][j] == nil { continue } tmpNewFormats[i*s.drivesPerSet+j] = newFormatSets[i][j] - tmpNewFormats[i*s.drivesPerSet+j].XL.Sets = refFormat.XL.Sets + tmpNewFormats[i*s.drivesPerSet+j].Erasure.Sets = refFormat.Erasure.Sets } } // Save formats `format.json` across all disks. - if err = saveFormatXLAll(ctx, storageDisks, tmpNewFormats); err != nil { + if err = saveFormatErasureAll(ctx, storageDisks, tmpNewFormats); err != nil { return madmin.HealResultItem{}, err } @@ -1571,7 +1406,7 @@ func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealRe s.format = refFormat // Disconnect/relinquish all existing disks, lockers and reconnect the disks, lockers. - s.xlDisksMu.Lock() + s.erasureDisksMu.Lock() for _, disk := range storageDisks { if disk == nil { continue @@ -1589,14 +1424,14 @@ func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealRe continue } - if s.xlDisks[m][n] != nil { - s.xlDisks[m][n].Close() + if s.erasureDisks[m][n] != nil { + s.erasureDisks[m][n].Close() } s.endpointStrings[m*s.drivesPerSet+n] = disk.String() - s.xlDisks[m][n] = disk + s.erasureDisks[m][n] = disk } - s.xlDisksMu.Unlock() + s.erasureDisksMu.Unlock() // Restart our monitoring loop to start monitoring newly formatted disks. go s.monitorAndConnectEndpoints(GlobalContext, defaultMonitorConnectEndpointInterval) @@ -1606,7 +1441,7 @@ func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealRe } // HealBucket - heals inconsistent buckets and bucket metadata on all sets. -func (s *xlSets) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (result madmin.HealResultItem, err error) { +func (s *erasureSets) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (result madmin.HealResultItem, err error) { // Initialize heal result info result = madmin.HealResultItem{ Type: madmin.HealItemBucket, @@ -1628,21 +1463,21 @@ func (s *xlSets) HealBucket(ctx context.Context, bucket string, dryRun, remove b // Check if we had quorum to write, if not return an appropriate error. _, afterDriveOnline := result.GetOnlineCounts() if afterDriveOnline < ((s.setCount*s.drivesPerSet)/2)+1 { - return result, toObjectErr(errXLWriteQuorum, bucket) + return result, toObjectErr(errErasureWriteQuorum, bucket) } return result, nil } // HealObject - heals inconsistent object on a hashedSet based on object name. -func (s *xlSets) HealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) (madmin.HealResultItem, error) { - return s.getHashedSet(object).HealObject(ctx, bucket, object, opts) +func (s *erasureSets) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (madmin.HealResultItem, error) { + return s.getHashedSet(object).HealObject(ctx, bucket, object, versionID, opts) } // Lists all buckets which need healing. -func (s *xlSets) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { +func (s *erasureSets) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { var listBuckets []BucketInfo - var healBuckets = make(map[string]VolInfo) + var healBuckets = map[string]VolInfo{} for _, set := range s.sets { // lists all unique buckets across drives. if err := listAllBuckets(set.getDisks(), healBuckets); err != nil { @@ -1661,29 +1496,35 @@ func (s *xlSets) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { // to allocate a receive channel for ObjectInfo, upon any unhandled // error walker returns error. Optionally if context.Done() is received // then Walk() stops the walker. -func (s *xlSets) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo) error { +func (s *erasureSets) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo) error { if err := checkListObjsArgs(ctx, bucket, prefix, "", s); err != nil { // Upon error close the channel. close(results) return err } - entryChs := s.startMergeWalks(ctx, bucket, prefix, "", true, ctx.Done()) + entryChs := s.startMergeWalksVersions(ctx, bucket, prefix, "", true, ctx.Done()) entriesValid := make([]bool, len(entryChs)) - entries := make([]FileInfo, len(entryChs)) + entries := make([]FileInfoVersions, len(entryChs)) go func() { defer close(results) for { - entry, quorumCount, ok := lexicallySortedEntry(entryChs, entries, entriesValid) + entry, quorumCount, ok := lexicallySortedEntryVersions(entryChs, entries, entriesValid) if !ok { return } if quorumCount >= s.drivesPerSet/2 { - results <- entry.ToObjectInfo() // Read quorum exists proceed + // Read quorum exists proceed + for _, version := range entry.Versions { + results <- version.ToObjectInfo(bucket, version.Name) + } + for _, deleted := range entry.Deleted { + results <- deleted.ToObjectInfo(bucket, deleted.Name) + } } // skip entries which do not have quorum } @@ -1694,16 +1535,16 @@ func (s *xlSets) Walk(ctx context.Context, bucket, prefix string, results chan<- // HealObjects - Heal all objects recursively at a specified prefix, any // dangling objects deleted as well automatically. -func (s *xlSets) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, healObject healObjectFn) error { +func (s *erasureSets) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, healObject HealObjectFn) error { endWalkCh := make(chan struct{}) defer close(endWalkCh) - entryChs := s.startMergeWalks(ctx, bucket, prefix, "", true, endWalkCh) + entryChs := s.startMergeWalksVersions(ctx, bucket, prefix, "", true, endWalkCh) entriesValid := make([]bool, len(entryChs)) - entries := make([]FileInfo, len(entryChs)) + entries := make([]FileInfoVersions, len(entryChs)) for { - entry, quorumCount, ok := lexicallySortedEntry(entryChs, entries, entriesValid) + entry, quorumCount, ok := lexicallySortedEntryVersions(entryChs, entries, entriesValid) if !ok { break } @@ -1716,8 +1557,10 @@ func (s *xlSets) HealObjects(ctx context.Context, bucket, prefix string, opts ma // Wait and proceed if there are active requests waitForLowHTTPReq(int32(s.drivesPerSet)) - if err := healObject(bucket, entry.Name); err != nil { - return toObjectErr(err, bucket, entry.Name) + for _, version := range entry.Versions { + if err := healObject(bucket, version.Name, version.VersionID); err != nil { + return toObjectErr(err, bucket, version.Name) + } } } @@ -1725,32 +1568,37 @@ func (s *xlSets) HealObjects(ctx context.Context, bucket, prefix string, opts ma } // PutObjectTags - replace or add tags to an existing object -func (s *xlSets) PutObjectTags(ctx context.Context, bucket, object string, tags string) error { - return s.getHashedSet(object).PutObjectTags(ctx, bucket, object, tags) +func (s *erasureSets) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error { + return s.getHashedSet(object).PutObjectTags(ctx, bucket, object, tags, opts) } // DeleteObjectTags - delete object tags from an existing object -func (s *xlSets) DeleteObjectTags(ctx context.Context, bucket, object string) error { - return s.getHashedSet(object).DeleteObjectTags(ctx, bucket, object) +func (s *erasureSets) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) error { + return s.getHashedSet(object).DeleteObjectTags(ctx, bucket, object, opts) } // GetObjectTags - get object tags from an existing object -func (s *xlSets) GetObjectTags(ctx context.Context, bucket, object string) (*tags.Tags, error) { - return s.getHashedSet(object).GetObjectTags(ctx, bucket, object) +func (s *erasureSets) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) { + return s.getHashedSet(object).GetObjectTags(ctx, bucket, object, opts) } // GetMetrics - no op -func (s *xlSets) GetMetrics(ctx context.Context) (*Metrics, error) { +func (s *erasureSets) GetMetrics(ctx context.Context) (*Metrics, error) { logger.LogIf(ctx, NotImplemented{}) return &Metrics{}, NotImplemented{} } +// IsReady - Returns true if atleast n/2 disks (read quorum) are online +func (s *erasureSets) IsReady(_ context.Context) bool { + return false +} + // maintainMRFList gathers the list of successful partial uploads -// from all underlying xl sets and puts them in a global map which +// from all underlying er.sets and puts them in a global map which // should not have more than 10000 entries. -func (s *xlSets) maintainMRFList() { +func (s *erasureSets) maintainMRFList() { var agg = make(chan partialUpload, 10000) - for i, xl := range s.sets { + for i, er := range s.sets { go func(c <-chan partialUpload, setIndex int) { for msg := range c { msg.failedSet = setIndex @@ -1759,7 +1607,7 @@ func (s *xlSets) maintainMRFList() { default: } } - }(xl.mrfUploadCh, i) + }(er.mrfUploadCh, i) } for fUpload := range agg { @@ -1768,14 +1616,17 @@ func (s *xlSets) maintainMRFList() { s.mrfMU.Unlock() continue } - s.mrfUploads[pathJoin(fUpload.bucket, fUpload.object)] = fUpload.failedSet + s.mrfUploads[healSource{ + bucket: fUpload.bucket, + object: fUpload.object, + }] = fUpload.failedSet s.mrfMU.Unlock() } } // healMRFRoutine monitors new disks connection, sweep the MRF list // to find objects related to the new disk that needs to be healed. -func (s *xlSets) healMRFRoutine() { +func (s *erasureSets) healMRFRoutine() { // Wait until background heal state is initialized var bgSeq *healSequence for { @@ -1792,9 +1643,9 @@ func (s *xlSets) healMRFRoutine() { } for e := range s.disksConnectEvent { - // Get the list of objects related the xl set + // Get the list of objects related the er.set // to which the connected disk belongs. - var mrfUploads []string + var mrfUploads []healSource s.mrfMU.Lock() for k, v := range s.mrfUploads { if v == e.setIndex { @@ -1807,7 +1658,7 @@ func (s *xlSets) healMRFRoutine() { for _, u := range mrfUploads { // Send an object to be healed with a timeout select { - case bgSeq.sourceCh <- healSource{path: u}: + case bgSeq.sourceCh <- u: case <-time.After(100 * time.Millisecond): } diff --git a/cmd/erasure-sets_test.go b/cmd/erasure-sets_test.go new file mode 100644 index 000000000..7a66c614f --- /dev/null +++ b/cmd/erasure-sets_test.go @@ -0,0 +1,245 @@ +/* + * MinIO Cloud Storage, (C) 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/google/uuid" +) + +var testUUID = uuid.MustParse("f5c58c61-7175-4018-ab5e-a94fe9c2de4e") + +func BenchmarkCrcHash(b *testing.B) { + cases := []struct { + key int + }{ + {16}, + {64}, + {128}, + {256}, + {512}, + {1024}, + } + for _, testCase := range cases { + testCase := testCase + key := randString(testCase.key) + b.Run("", func(b *testing.B) { + b.SetBytes(1024) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + crcHashMod(key, 16) + } + }) + } +} + +func BenchmarkSipHash(b *testing.B) { + cases := []struct { + key int + }{ + {16}, + {64}, + {128}, + {256}, + {512}, + {1024}, + } + for _, testCase := range cases { + testCase := testCase + key := randString(testCase.key) + b.Run("", func(b *testing.B) { + b.SetBytes(1024) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + sipHashMod(key, 16, testUUID) + } + }) + } +} + +// TestSipHashMod - test sip hash. +func TestSipHashMod(t *testing.T) { + testCases := []struct { + objectName string + sipHash int + }{ + // cases which should pass the test. + // passing in valid object name. + {"object", 37}, + {"The Shining Script .pdf", 38}, + {"Cost Benefit Analysis (2009-2010).pptx", 59}, + {"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", 35}, + {"SHØRT", 49}, + {"There are far too many object names, and far too few bucket names!", 8}, + {"a/b/c/", 159}, + {"/a/b/c", 96}, + {string([]byte{0xff, 0xfe, 0xfd}), 147}, + } + + // Tests hashing order to be consistent. + for i, testCase := range testCases { + if sipHashElement := hashKey("SIPMOD", testCase.objectName, 200, testUUID); sipHashElement != testCase.sipHash { + t.Errorf("Test case %d: Expected \"%v\" but failed \"%v\"", i+1, testCase.sipHash, sipHashElement) + } + } + + if sipHashElement := hashKey("SIPMOD", "This will fail", -1, testUUID); sipHashElement != -1 { + t.Errorf("Test: Expected \"-1\" but got \"%v\"", sipHashElement) + } + + if sipHashElement := hashKey("SIPMOD", "This will fail", 0, testUUID); sipHashElement != -1 { + t.Errorf("Test: Expected \"-1\" but got \"%v\"", sipHashElement) + } + + if sipHashElement := hashKey("UNKNOWN", "This will fail", 0, testUUID); sipHashElement != -1 { + t.Errorf("Test: Expected \"-1\" but got \"%v\"", sipHashElement) + } +} + +// TestCrcHashMod - test crc hash. +func TestCrcHashMod(t *testing.T) { + testCases := []struct { + objectName string + crcHash int + }{ + // cases which should pass the test. + // passing in valid object name. + {"object", 28}, + {"The Shining Script .pdf", 142}, + {"Cost Benefit Analysis (2009-2010).pptx", 133}, + {"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", 185}, + {"SHØRT", 97}, + {"There are far too many object names, and far too few bucket names!", 101}, + {"a/b/c/", 193}, + {"/a/b/c", 116}, + {string([]byte{0xff, 0xfe, 0xfd}), 61}, + } + + // Tests hashing order to be consistent. + for i, testCase := range testCases { + if crcHashElement := hashKey("CRCMOD", testCase.objectName, 200, testUUID); crcHashElement != testCase.crcHash { + t.Errorf("Test case %d: Expected \"%v\" but failed \"%v\"", i+1, testCase.crcHash, crcHashElement) + } + } + + if crcHashElement := hashKey("CRCMOD", "This will fail", -1, testUUID); crcHashElement != -1 { + t.Errorf("Test: Expected \"-1\" but got \"%v\"", crcHashElement) + } + + if crcHashElement := hashKey("CRCMOD", "This will fail", 0, testUUID); crcHashElement != -1 { + t.Errorf("Test: Expected \"-1\" but got \"%v\"", crcHashElement) + } + + if crcHashElement := hashKey("UNKNOWN", "This will fail", 0, testUUID); crcHashElement != -1 { + t.Errorf("Test: Expected \"-1\" but got \"%v\"", crcHashElement) + } +} + +// TestNewErasure - tests initialization of all input disks +// and constructs a valid `Erasure` object +func TestNewErasureSets(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var nDisks = 16 // Maximum disks. + var erasureDisks []string + for i := 0; i < nDisks; i++ { + // Do not attempt to create this path, the test validates + // so that newErasureSets initializes non existing paths + // and successfully returns initialized object layer. + disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) + erasureDisks = append(erasureDisks, disk) + defer os.RemoveAll(disk) + } + + endpoints := mustGetNewEndpoints(erasureDisks...) + _, _, err := waitForFormatErasure(true, endpoints, 1, 0, 16, "") + if err != errInvalidArgument { + t.Fatalf("Expecting error, got %s", err) + } + + _, _, err = waitForFormatErasure(true, nil, 1, 1, 16, "") + if err != errInvalidArgument { + t.Fatalf("Expecting error, got %s", err) + } + + // Initializes all erasure disks + storageDisks, format, err := waitForFormatErasure(true, endpoints, 1, 1, 16, "") + if err != nil { + t.Fatalf("Unable to format disks for erasure, %s", err) + } + + if _, err := newErasureSets(ctx, endpoints, storageDisks, format); err != nil { + t.Fatalf("Unable to initialize erasure") + } +} + +// TestHashedLayer - tests the hashed layer which will be returned +// consistently for a given object name. +func TestHashedLayer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var objs []*erasureObjects + for i := 0; i < 16; i++ { + obj, fsDirs, err := prepareErasure16(ctx) + if err != nil { + t.Fatal("Unable to initialize 'Erasure' object layer.", err) + } + + // Remove all dirs. + for _, dir := range fsDirs { + defer os.RemoveAll(dir) + } + + z := obj.(*erasureZones) + objs = append(objs, z.zones[0].sets[0]) + } + + sets := &erasureSets{sets: objs, distributionAlgo: "CRCMOD"} + + testCases := []struct { + objectName string + expectedObj *erasureObjects + }{ + // cases which should pass the test. + // passing in valid object name. + {"object", objs[12]}, + {"The Shining Script .pdf", objs[14]}, + {"Cost Benefit Analysis (2009-2010).pptx", objs[13]}, + {"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", objs[1]}, + {"SHØRT", objs[9]}, + {"There are far too many object names, and far too few bucket names!", objs[13]}, + {"a/b/c/", objs[1]}, + {"/a/b/c", objs[4]}, + {string([]byte{0xff, 0xfe, 0xfd}), objs[13]}, + } + + // Tests hashing order to be consistent. + for i, testCase := range testCases { + gotObj := sets.getHashedSet(testCase.objectName) + if gotObj != testCase.expectedObj { + t.Errorf("Test case %d: Expected \"%#v\" but failed \"%#v\"", i+1, testCase.expectedObj, gotObj) + } + } +} diff --git a/cmd/erasure-utils.go b/cmd/erasure-utils.go index 2f844da87..ad2e197d7 100644 --- a/cmd/erasure-utils.go +++ b/cmd/erasure-utils.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cmd/xl-zones.go b/cmd/erasure-zones.go similarity index 69% rename from cmd/xl-zones.go rename to cmd/erasure-zones.go index 12d6b7ef1..8f0efa13f 100644 --- a/cmd/xl-zones.go +++ b/cmd/erasure-zones.go @@ -34,25 +34,25 @@ import ( "github.com/minio/minio/pkg/sync/errgroup" ) -type xlZones struct { +type erasureZones struct { GatewayUnsupported - zones []*xlSets + zones []*erasureSets } -func (z *xlZones) SingleZone() bool { +func (z *erasureZones) SingleZone() bool { return len(z.zones) == 1 } // Initialize new zone of erasure sets. -func newXLZones(ctx context.Context, endpointZones EndpointZones) (ObjectLayer, error) { +func newErasureZones(ctx context.Context, endpointZones EndpointZones) (ObjectLayer, error) { var ( deploymentID string err error - formats = make([]*formatXLV3, len(endpointZones)) + formats = make([]*formatErasureV3, len(endpointZones)) storageDisks = make([][]StorageAPI, len(endpointZones)) - z = &xlZones{zones: make([]*xlSets, len(endpointZones))} + z = &erasureZones{zones: make([]*erasureSets, len(endpointZones))} ) var localDrives []string @@ -64,7 +64,7 @@ func newXLZones(ctx context.Context, endpointZones EndpointZones) (ObjectLayer, localDrives = append(localDrives, endpoint.Path) } } - storageDisks[i], formats[i], err = waitForFormatXL(local, ep.Endpoints, i+1, + storageDisks[i], formats[i], err = waitForFormatErasure(local, ep.Endpoints, i+1, ep.SetCount, ep.DrivesPerSet, deploymentID) if err != nil { return nil, err @@ -72,7 +72,7 @@ func newXLZones(ctx context.Context, endpointZones EndpointZones) (ObjectLayer, if deploymentID == "" { deploymentID = formats[i].ID } - z.zones[i], err = newXLSets(ctx, ep.Endpoints, storageDisks[i], formats[i]) + z.zones[i], err = newErasureSets(ctx, ep.Endpoints, storageDisks[i], formats[i]) if err != nil { return nil, err } @@ -82,7 +82,7 @@ func newXLZones(ctx context.Context, endpointZones EndpointZones) (ObjectLayer, return z, nil } -func (z *xlZones) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker { +func (z *erasureZones) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker { return z.zones[0].NewNSLock(ctx, bucket, objects...) } @@ -102,7 +102,7 @@ func (p zonesAvailableSpace) TotalAvailable() uint64 { return total } -func (z *xlZones) getAvailableZoneIdx(ctx context.Context) int { +func (z *erasureZones) getAvailableZoneIdx(ctx context.Context) int { zones := z.getZonesAvailableSpace(ctx) total := zones.TotalAvailable() if total == 0 { @@ -122,7 +122,7 @@ func (z *xlZones) getAvailableZoneIdx(ctx context.Context) int { panic(fmt.Errorf("reached end of zones (total: %v, atTotal: %v, choose: %v)", total, atTotal, choose)) } -func (z *xlZones) getZonesAvailableSpace(ctx context.Context) zonesAvailableSpace { +func (z *erasureZones) getZonesAvailableSpace(ctx context.Context) zonesAvailableSpace { var zones = make(zonesAvailableSpace, len(z.zones)) storageInfos := make([]StorageInfo, len(z.zones)) @@ -151,7 +151,7 @@ func (z *xlZones) getZonesAvailableSpace(ctx context.Context) zonesAvailableSpac return zones } -func (z *xlZones) Shutdown(ctx context.Context) error { +func (z *erasureZones) Shutdown(ctx context.Context) error { if z.SingleZone() { return z.zones[0].Shutdown(ctx) } @@ -175,7 +175,7 @@ func (z *xlZones) Shutdown(ctx context.Context) error { return nil } -func (z *xlZones) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) { +func (z *erasureZones) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) { if z.SingleZone() { return z.zones[0].StorageInfo(ctx, local) } @@ -219,9 +219,10 @@ func (z *xlZones) StorageInfo(ctx context.Context, local bool) (StorageInfo, []e return storageInfo, errs } -func (z *xlZones) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error { +func (z *erasureZones) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error { ctx, cancel := context.WithCancel(ctx) defer cancel() + var wg sync.WaitGroup var mu sync.Mutex var results []dataUsageCache @@ -231,9 +232,9 @@ func (z *xlZones) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, upd // Collect for each set in zones. for _, z := range z.zones { - for _, xlObj := range z.sets { + for _, erObj := range z.sets { // Add new buckets. - buckets, err := xlObj.ListBuckets(ctx) + buckets, err := erObj.ListBuckets(ctx) if err != nil { return err } @@ -246,7 +247,7 @@ func (z *xlZones) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, upd } wg.Add(1) results = append(results, dataUsageCache{}) - go func(i int, xl *xlObjects) { + go func(i int, erObj *erasureObjects) { updates := make(chan dataUsageCache, 1) defer close(updates) // Start update collector. @@ -259,7 +260,7 @@ func (z *xlZones) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, upd } }() // Start crawler. Blocks until done. - err := xl.crawlAndGetDataUsage(ctx, buckets, bf, updates) + err := erObj.crawlAndGetDataUsage(ctx, buckets, bf, updates) if err != nil { logger.LogIf(ctx, err) mu.Lock() @@ -271,7 +272,7 @@ func (z *xlZones) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, upd mu.Unlock() return } - }(len(results)-1, xlObj) + }(len(results)-1, erObj) } } updateCloser := make(chan chan struct{}) @@ -325,15 +326,16 @@ func (z *xlZones) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, upd // MakeBucketWithLocation - creates a new bucket across all zones simultaneously // even if one of the sets fail to create buckets, we proceed all the successful // operations. -func (z *xlZones) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { +func (z *erasureZones) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error { if z.SingleZone() { - if err := z.zones[0].MakeBucketWithLocation(ctx, bucket, location, lockEnabled); err != nil { + if err := z.zones[0].MakeBucketWithLocation(ctx, bucket, opts); err != nil { return err } // If it doesn't exist we get a new, so ignore errors meta := newBucketMetadata(bucket) - if lockEnabled { + if opts.LockEnabled { + meta.VersioningConfigXML = enabledBucketVersioningConfig meta.ObjectLockConfigXML = enabledBucketObjectLockConfig } if err := meta.Save(ctx, z); err != nil { @@ -349,7 +351,7 @@ func (z *xlZones) MakeBucketWithLocation(ctx context.Context, bucket, location s for index := range z.zones { index := index g.Go(func() error { - return z.zones[index].MakeBucketWithLocation(ctx, bucket, location, lockEnabled) + return z.zones[index].MakeBucketWithLocation(ctx, bucket, opts) }, index) } @@ -363,12 +365,15 @@ func (z *xlZones) MakeBucketWithLocation(ctx context.Context, bucket, location s // If it doesn't exist we get a new, so ignore errors meta := newBucketMetadata(bucket) - if lockEnabled { + if opts.LockEnabled { + meta.VersioningConfigXML = enabledBucketVersioningConfig meta.ObjectLockConfigXML = enabledBucketObjectLockConfig } + if err := meta.Save(ctx, z); err != nil { return toObjectErr(err, bucket) } + globalBucketMetadataSys.Set(bucket, meta) // Success. @@ -376,7 +381,7 @@ func (z *xlZones) MakeBucketWithLocation(ctx context.Context, bucket, location s } -func (z *xlZones) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { +func (z *erasureZones) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { var nsUnlocker = func() {} // Acquire lock @@ -412,7 +417,7 @@ func (z *xlZones) GetObjectNInfo(ctx context.Context, bucket, object string, rs return nil, ObjectNotFound{Bucket: bucket, Object: object} } -func (z *xlZones) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { +func (z *erasureZones) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { // Lock the object before reading. lk := z.NewNSLock(ctx, bucket, object) if err := lk.GetRLock(globalObjectTimeout); err != nil { @@ -435,7 +440,7 @@ func (z *xlZones) GetObject(ctx context.Context, bucket, object string, startOff return ObjectNotFound{Bucket: bucket, Object: object} } -func (z *xlZones) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) { +func (z *erasureZones) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) { // Lock the object before reading. lk := z.NewNSLock(ctx, bucket, object) if err := lk.GetRLock(globalObjectTimeout); err != nil { @@ -460,7 +465,7 @@ func (z *xlZones) GetObjectInfo(ctx context.Context, bucket, object string, opts } // PutObject - writes an object to least used erasure zone. -func (z *xlZones) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (ObjectInfo, error) { +func (z *erasureZones) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (ObjectInfo, error) { // Lock the object. lk := z.NewNSLock(ctx, bucket, object) if err := lk.GetLock(globalObjectTimeout); err != nil { @@ -487,56 +492,65 @@ func (z *xlZones) PutObject(ctx context.Context, bucket string, object string, d return z.zones[z.getAvailableZoneIdx(ctx)].PutObject(ctx, bucket, object, data, opts) } -func (z *xlZones) DeleteObject(ctx context.Context, bucket string, object string) error { +func (z *erasureZones) DeleteObject(ctx context.Context, bucket string, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { // Acquire a write lock before deleting the object. lk := z.NewNSLock(ctx, bucket, object) - if err := lk.GetLock(globalOperationTimeout); err != nil { - return err + if err = lk.GetLock(globalOperationTimeout); err != nil { + return ObjectInfo{}, err } defer lk.Unlock() if z.SingleZone() { - return z.zones[0].DeleteObject(ctx, bucket, object) + return z.zones[0].DeleteObject(ctx, bucket, object, opts) } for _, zone := range z.zones { - err := zone.DeleteObject(ctx, bucket, object) + objInfo, err = zone.DeleteObject(ctx, bucket, object, opts) + if err == nil { + return objInfo, nil + } if err != nil && !isErrObjectNotFound(err) { - return err + break } } - return nil + return objInfo, err } -func (z *xlZones) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { +func (z *erasureZones) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) { derrs := make([]error, len(objects)) + dobjects := make([]DeletedObject, len(objects)) + objNames := make([]string, len(objects)) for i := range derrs { - derrs[i] = checkDelObjArgs(ctx, bucket, objects[i]) + derrs[i] = checkDelObjArgs(ctx, bucket, objects[i].ObjectName) + objNames[i] = objects[i].ObjectName } // Acquire a bulk write lock across 'objects' - multiDeleteLock := z.NewNSLock(ctx, bucket, objects...) + multiDeleteLock := z.NewNSLock(ctx, bucket, objNames...) if err := multiDeleteLock.GetLock(globalOperationTimeout); err != nil { - return nil, err + for i := range derrs { + derrs[i] = err + } + return nil, derrs } defer multiDeleteLock.Unlock() for _, zone := range z.zones { - errs, err := zone.DeleteObjects(ctx, bucket, objects) - if err != nil { - return nil, err - } + deletedObjects, errs := zone.DeleteObjects(ctx, bucket, objects, opts) for i, derr := range errs { if derrs[i] == nil { if derr != nil && !isErrObjectNotFound(derr) { derrs[i] = derr } } + if derrs[i] == nil { + dobjects[i] = deletedObjects[i] + } } } - return derrs, nil + return dobjects, derrs } -func (z *xlZones) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) { +func (z *erasureZones) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) { // Check if this request is only metadata update. cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject)) if !cpSrcDstSame { @@ -574,10 +588,7 @@ func (z *xlZones) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucke return z.zones[z.getAvailableZoneIdx(ctx)].PutObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts) } -func (z *xlZones) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (ListObjectsV2Info, error) { - if z.SingleZone() { - return z.zones[0].ListObjectsV2(ctx, bucket, prefix, continuationToken, delimiter, maxKeys, fetchOwner, startAfter) - } +func (z *erasureZones) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (ListObjectsV2Info, error) { marker := continuationToken if marker == "" { marker = startAfter @@ -598,7 +609,7 @@ func (z *xlZones) ListObjectsV2(ctx context.Context, bucket, prefix, continuatio return listObjectsV2Info, err } -func (z *xlZones) listObjectsNonSlash(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { +func (z *erasureZones) listObjectsNonSlash(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { var zonesEntryChs [][]FileInfoCh @@ -710,7 +721,7 @@ func (z *xlZones) listObjectsNonSlash(ctx context.Context, bucket, prefix, marke return result, nil } -func (z *xlZones) listObjectsSplunk(ctx context.Context, bucket, prefix, marker string, maxKeys int) (loi ListObjectsInfo, err error) { +func (z *erasureZones) listObjectsSplunk(ctx context.Context, bucket, prefix, marker string, maxKeys int) (loi ListObjectsInfo, err error) { if strings.Contains(prefix, guidSplunk) { logger.LogIf(ctx, NotImplemented{}) return loi, NotImplemented{} @@ -743,7 +754,7 @@ func (z *xlZones) listObjectsSplunk(ctx context.Context, bucket, prefix, marker } for _, entry := range entries.Files { - objInfo := entry.ToObjectInfo() + objInfo := entry.ToObjectInfo(bucket, entry.Name) splits := strings.Split(objInfo.Name, guidSplunk) if len(splits) == 0 { loi.Objects = append(loi.Objects, objInfo) @@ -762,7 +773,7 @@ func (z *xlZones) listObjectsSplunk(ctx context.Context, bucket, prefix, marker return loi, nil } -func (z *xlZones) listObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { +func (z *erasureZones) listObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { loi := ListObjectsInfo{} if err := checkListObjsArgs(ctx, bucket, prefix, marker, z); err != nil { @@ -834,7 +845,7 @@ func (z *xlZones) listObjects(ctx context.Context, bucket, prefix, marker, delim } for _, entry := range entries.Files { - objInfo := entry.ToObjectInfo() + objInfo := entry.ToObjectInfo(entry.Volume, entry.Name) if HasSuffix(objInfo.Name, SlashSeparator) && !recursive { loi.Prefixes = append(loi.Prefixes, objInfo.Name) continue @@ -881,6 +892,8 @@ func lexicallySortedEntryZone(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileI var lentry FileInfo var found bool var zoneIndex = -1 + // TODO: following loop can be merged with above + // loop, explore this possibility. for i, entriesValid := range zoneEntriesValid { for j, valid := range entriesValid { if !valid { @@ -928,6 +941,115 @@ func lexicallySortedEntryZone(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileI return lentry, lexicallySortedEntryCount, zoneIndex, isTruncated } +// Calculate least entry across zones and across multiple FileInfoVersions +// channels, returns the least common entry and the total number of times +// we found this entry. Additionally also returns a boolean +// to indicate if the caller needs to call this function +// again to list the next entry. It is callers responsibility +// if the caller wishes to list N entries to call lexicallySortedEntry +// N times until this boolean is 'false'. +func lexicallySortedEntryZoneVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneEntries [][]FileInfoVersions, zoneEntriesValid [][]bool) (FileInfoVersions, int, int, bool) { + for i, entryChs := range zoneEntryChs { + for j := range entryChs { + zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop() + } + } + + var isTruncated = false + for _, entriesValid := range zoneEntriesValid { + for _, valid := range entriesValid { + if !valid { + continue + } + isTruncated = true + break + } + if isTruncated { + break + } + } + + var lentry FileInfoVersions + var found bool + var zoneIndex = -1 + for i, entriesValid := range zoneEntriesValid { + for j, valid := range entriesValid { + if !valid { + continue + } + if !found { + lentry = zoneEntries[i][j] + found = true + zoneIndex = i + continue + } + if zoneEntries[i][j].Name < lentry.Name { + lentry = zoneEntries[i][j] + zoneIndex = i + } + } + } + + // We haven't been able to find any least entry, + // this would mean that we don't have valid entry. + if !found { + return lentry, 0, zoneIndex, isTruncated + } + + lexicallySortedEntryCount := 0 + for i, entriesValid := range zoneEntriesValid { + for j, valid := range entriesValid { + if !valid { + continue + } + + // Entries are duplicated across disks, + // we should simply skip such entries. + if lentry.Name == zoneEntries[i][j].Name && lentry.LatestModTime.Equal(zoneEntries[i][j].LatestModTime) { + lexicallySortedEntryCount++ + continue + } + + // Push all entries which are lexically higher + // and will be returned later in Pop() + zoneEntryChs[i][j].Push(zoneEntries[i][j]) + } + } + + return lentry, lexicallySortedEntryCount, zoneIndex, isTruncated +} + +// mergeZonesEntriesVersionsCh - merges FileInfoVersions channel to entries upto maxKeys. +func mergeZonesEntriesVersionsCh(zonesEntryChs [][]FileInfoVersionsCh, maxKeys int, ndisks int) (entries FilesInfoVersions) { + var i = 0 + var zonesEntriesInfos [][]FileInfoVersions + var zonesEntriesValid [][]bool + for _, entryChs := range zonesEntryChs { + zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfoVersions, len(entryChs))) + zonesEntriesValid = append(zonesEntriesValid, make([]bool, len(entryChs))) + } + for { + fi, quorumCount, _, ok := lexicallySortedEntryZoneVersions(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid) + if !ok { + // We have reached EOF across all entryChs, break the loop. + break + } + + if quorumCount < ndisks-1 { + // Skip entries which are not found on upto ndisks. + continue + } + + entries.FilesVersions = append(entries.FilesVersions, fi) + i++ + if i == maxKeys { + entries.IsTruncated = isTruncatedZonesVersions(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid) + break + } + } + return entries +} + // mergeZonesEntriesCh - merges FileInfo channel to entries upto maxKeys. func mergeZonesEntriesCh(zonesEntryChs [][]FileInfoCh, maxKeys int, ndisks int) (entries FilesInfo) { var i = 0 @@ -966,6 +1088,35 @@ func isTruncatedZones(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileInfo, zon } } + var isTruncated = false + for _, entriesValid := range zoneEntriesValid { + for _, valid := range entriesValid { + if valid { + isTruncated = true + break + } + } + if isTruncated { + break + } + } + for i, entryChs := range zoneEntryChs { + for j := range entryChs { + if zoneEntriesValid[i][j] { + zoneEntryChs[i][j].Push(zoneEntries[i][j]) + } + } + } + return isTruncated +} + +func isTruncatedZonesVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneEntries [][]FileInfoVersions, zoneEntriesValid [][]bool) bool { + for i, entryChs := range zoneEntryChs { + for j := range entryChs { + zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop() + } + } + var isTruncated = false for _, entriesValid := range zoneEntriesValid { for _, valid := range entriesValid { @@ -989,15 +1140,116 @@ func isTruncatedZones(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileInfo, zon return isTruncated } -func (z *xlZones) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { - if z.SingleZone() { - return z.zones[0].ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) +func (z *erasureZones) listObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (ListObjectVersionsInfo, error) { + loi := ListObjectVersionsInfo{} + + if err := checkListObjsArgs(ctx, bucket, prefix, marker, z); err != nil { + return loi, err } + // Marker is set validate pre-condition. + if marker != "" { + // Marker not common with prefix is not implemented. Send an empty response + if !HasPrefix(marker, prefix) { + return loi, nil + } + } + + if marker == "" && versionMarker != "" { + return loi, NotImplemented{} + } + + // With max keys of zero we have reached eof, return right here. + if maxKeys == 0 { + return loi, nil + } + + // For delimiter and prefix as '/' we do not list anything at all + // since according to s3 spec we stop at the 'delimiter' + // along // with the prefix. On a flat namespace with 'prefix' + // as '/' we don't have any entries, since all the keys are + // of form 'keyName/...' + if delimiter == SlashSeparator && prefix == SlashSeparator { + return loi, nil + } + + // Over flowing count - reset to maxObjectList. + if maxKeys < 0 || maxKeys > maxObjectList { + maxKeys = maxObjectList + } + + if delimiter != SlashSeparator && delimiter != "" { + return loi, NotImplemented{} + } + + // Default is recursive, if delimiter is set then list non recursive. + recursive := true + if delimiter == SlashSeparator { + recursive = false + } + + var zonesEntryChs [][]FileInfoVersionsCh + var zonesEndWalkCh []chan struct{} + + const ndisks = 3 + for _, zone := range z.zones { + entryChs, endWalkCh := zone.poolVersions.Release(listParams{bucket, recursive, marker, prefix}) + if entryChs == nil { + endWalkCh = make(chan struct{}) + entryChs = zone.startMergeWalksVersionsN(ctx, bucket, prefix, marker, recursive, endWalkCh, ndisks) + } + zonesEntryChs = append(zonesEntryChs, entryChs) + zonesEndWalkCh = append(zonesEndWalkCh, endWalkCh) + } + + entries := mergeZonesEntriesVersionsCh(zonesEntryChs, maxKeys, ndisks) + if len(entries.FilesVersions) == 0 { + return loi, nil + } + + loi.IsTruncated = entries.IsTruncated + if loi.IsTruncated { + loi.NextMarker = entries.FilesVersions[len(entries.FilesVersions)-1].Name + } + + for _, entry := range entries.FilesVersions { + for _, version := range entry.Versions { + objInfo := version.ToObjectInfo(bucket, entry.Name) + if HasSuffix(objInfo.Name, SlashSeparator) && !recursive { + loi.Prefixes = append(loi.Prefixes, objInfo.Name) + continue + } + loi.Objects = append(loi.Objects, objInfo) + } + for _, deleted := range entry.Deleted { + loi.DeleteObjects = append(loi.DeleteObjects, DeletedObjectInfo{ + Bucket: bucket, + Name: entry.Name, + VersionID: deleted.VersionID, + ModTime: deleted.ModTime, + IsLatest: deleted.IsLatest, + }) + } + + } + if loi.IsTruncated { + for i, zone := range z.zones { + zone.poolVersions.Set(listParams{bucket, recursive, loi.NextMarker, prefix}, zonesEntryChs[i], + zonesEndWalkCh[i]) + } + } + return loi, nil +} + +func (z *erasureZones) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (ListObjectVersionsInfo, error) { + return z.listObjectVersions(ctx, bucket, prefix, marker, versionMarker, delimiter, maxKeys) +} + +func (z *erasureZones) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { return z.listObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) } -func (z *xlZones) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) { +func (z *erasureZones) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) { if err := checkListMultipartArgs(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, z); err != nil { return ListMultipartsInfo{}, err } @@ -1023,7 +1275,7 @@ func (z *xlZones) ListMultipartUploads(ctx context.Context, bucket, prefix, keyM } // Initiate a new multipart upload on a hashedSet based on object name. -func (z *xlZones) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) { +func (z *erasureZones) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) { if err := checkNewMultipartArgs(ctx, bucket, object, z); err != nil { return "", err } @@ -1035,7 +1287,7 @@ func (z *xlZones) NewMultipartUpload(ctx context.Context, bucket, object string, } // Copies a part of an object from source hashedSet to destination hashedSet. -func (z *xlZones) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (PartInfo, error) { +func (z *erasureZones) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (PartInfo, error) { if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, z); err != nil { return PartInfo{}, err } @@ -1045,7 +1297,7 @@ func (z *xlZones) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dest } // PutObjectPart - writes part of an object to hashedSet based on the object name. -func (z *xlZones) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (PartInfo, error) { +func (z *erasureZones) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (PartInfo, error) { if err := checkPutObjectPartArgs(ctx, bucket, object, z); err != nil { return PartInfo{}, err } @@ -1081,7 +1333,7 @@ func (z *xlZones) PutObjectPart(ctx context.Context, bucket, object, uploadID st } } -func (z *xlZones) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) { +func (z *erasureZones) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) { if err := checkListPartsArgs(ctx, bucket, object, z); err != nil { return MultipartInfo{}, err } @@ -1117,7 +1369,7 @@ func (z *xlZones) GetMultipartInfo(ctx context.Context, bucket, object, uploadID } // ListObjectParts - lists all uploaded parts to an object in hashedSet. -func (z *xlZones) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (ListPartsInfo, error) { +func (z *erasureZones) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (ListPartsInfo, error) { if err := checkListPartsArgs(ctx, bucket, object, z); err != nil { return ListPartsInfo{}, err } @@ -1150,7 +1402,7 @@ func (z *xlZones) ListObjectParts(ctx context.Context, bucket, object, uploadID } // Aborts an in-progress multipart operation on hashedSet based on the object name. -func (z *xlZones) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { +func (z *erasureZones) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { if err := checkAbortMultipartArgs(ctx, bucket, object, z); err != nil { return err } @@ -1185,7 +1437,7 @@ func (z *xlZones) AbortMultipartUpload(ctx context.Context, bucket, object, uplo } // CompleteMultipartUpload - completes a pending multipart transaction, on hashedSet based on object name. -func (z *xlZones) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) { +func (z *erasureZones) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) { if err = checkCompleteMultipartArgs(ctx, bucket, object, z); err != nil { return objInfo, err } @@ -1212,7 +1464,7 @@ func (z *xlZones) CompleteMultipartUpload(ctx context.Context, bucket, object, u // Purge any existing object. for _, zone := range z.zones { - zone.DeleteObject(ctx, bucket, object) + zone.DeleteObject(ctx, bucket, object, opts) } for _, zone := range z.zones { @@ -1232,7 +1484,7 @@ func (z *xlZones) CompleteMultipartUpload(ctx context.Context, bucket, object, u } // GetBucketInfo - returns bucket info from one of the erasure coded zones. -func (z *xlZones) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) { +func (z *erasureZones) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) { if z.SingleZone() { bucketInfo, err = z.zones[0].GetBucketInfo(ctx, bucket) if err != nil { @@ -1264,33 +1516,33 @@ func (z *xlZones) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo } // IsNotificationSupported returns whether bucket notification is applicable for this layer. -func (z *xlZones) IsNotificationSupported() bool { +func (z *erasureZones) IsNotificationSupported() bool { return true } // IsListenBucketSupported returns whether listen bucket notification is applicable for this layer. -func (z *xlZones) IsListenBucketSupported() bool { +func (z *erasureZones) IsListenBucketSupported() bool { return true } // IsEncryptionSupported returns whether server side encryption is implemented for this layer. -func (z *xlZones) IsEncryptionSupported() bool { +func (z *erasureZones) IsEncryptionSupported() bool { return true } // IsCompressionSupported returns whether compression is applicable for this layer. -func (z *xlZones) IsCompressionSupported() bool { +func (z *erasureZones) IsCompressionSupported() bool { return true } -func (z *xlZones) IsTaggingSupported() bool { +func (z *erasureZones) IsTaggingSupported() bool { return true } // DeleteBucket - deletes a bucket on all zones simultaneously, // even if one of the zones fail to delete buckets, we proceed to // undo a successful operation. -func (z *xlZones) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { +func (z *erasureZones) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { if z.SingleZone() { return z.zones[0].DeleteBucket(ctx, bucket, forceDelete) } @@ -1311,7 +1563,7 @@ func (z *xlZones) DeleteBucket(ctx context.Context, bucket string, forceDelete b for _, err := range errs { if err != nil { if _, ok := err.(InsufficientWriteQuorum); ok { - undoDeleteBucketZones(bucket, z.zones, errs) + undoDeleteBucketZones(ctx, bucket, z.zones, errs) } return err @@ -1323,7 +1575,7 @@ func (z *xlZones) DeleteBucket(ctx context.Context, bucket string, forceDelete b } // This function is used to undo a successful DeleteBucket operation. -func undoDeleteBucketZones(bucket string, zones []*xlSets, errs []error) { +func undoDeleteBucketZones(ctx context.Context, bucket string, zones []*erasureSets, errs []error) { g := errgroup.WithNErrs(len(zones)) // Undo previous delete bucket on all underlying zones. @@ -1331,7 +1583,7 @@ func undoDeleteBucketZones(bucket string, zones []*xlSets, errs []error) { index := index g.Go(func() error { if errs[index] == nil { - return zones[index].MakeBucketWithLocation(GlobalContext, bucket, "", false) + return zones[index].MakeBucketWithLocation(ctx, bucket, BucketOptions{}) } return nil }, index) @@ -1343,7 +1595,7 @@ func undoDeleteBucketZones(bucket string, zones []*xlSets, errs []error) { // List all buckets from one of the zones, we are not doing merge // sort here just for simplification. As per design it is assumed // that all buckets are present on all zones. -func (z *xlZones) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) { +func (z *erasureZones) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) { if z.SingleZone() { buckets, err = z.zones[0].ListBuckets(ctx) } else { @@ -1368,7 +1620,7 @@ func (z *xlZones) ListBuckets(ctx context.Context) (buckets []BucketInfo, err er return buckets, nil } -func (z *xlZones) ReloadFormat(ctx context.Context, dryRun bool) error { +func (z *erasureZones) ReloadFormat(ctx context.Context, dryRun bool) error { // Acquire lock on format.json formatLock := z.NewNSLock(ctx, minioMetaBucket, formatConfigFile) if err := formatLock.GetRLock(globalHealingTimeout); err != nil { @@ -1384,7 +1636,7 @@ func (z *xlZones) ReloadFormat(ctx context.Context, dryRun bool) error { return nil } -func (z *xlZones) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) { +func (z *erasureZones) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) { // Acquire lock on format.json formatLock := z.NewNSLock(ctx, minioMetaBucket, formatConfigFile) if err := formatLock.GetLock(globalHealingTimeout); err != nil { @@ -1421,7 +1673,7 @@ func (z *xlZones) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResul return r, nil } -func (z *xlZones) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (madmin.HealResultItem, error) { +func (z *erasureZones) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (madmin.HealResultItem, error) { var r = madmin.HealResultItem{ Type: madmin.HealItemBucket, Bucket: bucket, @@ -1449,18 +1701,16 @@ func (z *xlZones) HealBucket(ctx context.Context, bucket string, dryRun, remove // to allocate a receive channel for ObjectInfo, upon any unhandled // error walker returns error. Optionally if context.Done() is received // then Walk() stops the walker. -func (z *xlZones) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo) error { +func (z *erasureZones) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo) error { if err := checkListObjsArgs(ctx, bucket, prefix, "", z); err != nil { // Upon error close the channel. close(results) return err } - var zonesEntryChs [][]FileInfoCh - + var zonesEntryChs [][]FileInfoVersionsCh for _, zone := range z.zones { - zonesEntryChs = append(zonesEntryChs, - zone.startMergeWalks(ctx, bucket, prefix, "", true, ctx.Done())) + zonesEntryChs = append(zonesEntryChs, zone.startMergeWalksVersions(ctx, bucket, prefix, "", true, ctx.Done())) } var zoneDrivesPerSet []int @@ -1468,10 +1718,10 @@ func (z *xlZones) Walk(ctx context.Context, bucket, prefix string, results chan< zoneDrivesPerSet = append(zoneDrivesPerSet, zone.drivesPerSet) } - var zonesEntriesInfos [][]FileInfo + var zonesEntriesInfos [][]FileInfoVersions var zonesEntriesValid [][]bool for _, entryChs := range zonesEntryChs { - zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfo, len(entryChs))) + zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfoVersions, len(entryChs))) zonesEntriesValid = append(zonesEntriesValid, make([]bool, len(entryChs))) } @@ -1479,14 +1729,20 @@ func (z *xlZones) Walk(ctx context.Context, bucket, prefix string, results chan< defer close(results) for { - entry, quorumCount, zoneIndex, ok := lexicallySortedEntryZone(zonesEntryChs, - zonesEntriesInfos, zonesEntriesValid) + entry, quorumCount, zoneIndex, ok := lexicallySortedEntryZoneVersions(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid) if !ok { + // We have reached EOF across all entryChs, break the loop. return } if quorumCount >= zoneDrivesPerSet[zoneIndex]/2 { - results <- entry.ToObjectInfo() // Read quorum exists proceed + // Read quorum exists proceed + for _, version := range entry.Versions { + results <- version.ToObjectInfo(bucket, version.Name) + } + for _, deleted := range entry.Deleted { + results <- deleted.ToObjectInfo(bucket, deleted.Name) + } } // skip entries which do not have quorum @@ -1496,17 +1752,18 @@ func (z *xlZones) Walk(ctx context.Context, bucket, prefix string, results chan< return nil } -type healObjectFn func(string, string) error +// HealObjectFn closure function heals the object. +type HealObjectFn func(string, string, string) error -func (z *xlZones) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, healObject healObjectFn) error { - var zonesEntryChs [][]FileInfoCh +func (z *erasureZones) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, healObject HealObjectFn) error { + var zonesEntryChs [][]FileInfoVersionsCh endWalkCh := make(chan struct{}) defer close(endWalkCh) for _, zone := range z.zones { zonesEntryChs = append(zonesEntryChs, - zone.startMergeWalks(ctx, bucket, prefix, "", true, endWalkCh)) + zone.startMergeWalksVersions(ctx, bucket, prefix, "", true, endWalkCh)) } var zoneDrivesPerSet []int @@ -1514,15 +1771,15 @@ func (z *xlZones) HealObjects(ctx context.Context, bucket, prefix string, opts m zoneDrivesPerSet = append(zoneDrivesPerSet, zone.drivesPerSet) } - var zonesEntriesInfos [][]FileInfo + var zonesEntriesInfos [][]FileInfoVersions var zonesEntriesValid [][]bool for _, entryChs := range zonesEntryChs { - zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfo, len(entryChs))) + zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfoVersions, len(entryChs))) zonesEntriesValid = append(zonesEntriesValid, make([]bool, len(entryChs))) } for { - entry, quorumCount, zoneIndex, ok := lexicallySortedEntryZone(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid) + entry, quorumCount, zoneIndex, ok := lexicallySortedEntryZoneVersions(zonesEntryChs, zonesEntriesInfos, zonesEntriesValid) if !ok { break } @@ -1535,17 +1792,19 @@ func (z *xlZones) HealObjects(ctx context.Context, bucket, prefix string, opts m // Wait and proceed if there are active requests waitForLowHTTPReq(int32(zoneDrivesPerSet[zoneIndex])) - if err := healObject(bucket, entry.Name); err != nil { - return toObjectErr(err, bucket, entry.Name) + for _, version := range entry.Versions { + if err := healObject(bucket, version.Name, version.VersionID); err != nil { + return toObjectErr(err, bucket, version.Name) + } } } return nil } -func (z *xlZones) HealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) (madmin.HealResultItem, error) { +func (z *erasureZones) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (madmin.HealResultItem, error) { // Lock the object before healing. Use read lock since healing - // will only regenerate parts & xl.json of outdated disks. + // will only regenerate parts & xl.meta of outdated disks. lk := z.NewNSLock(ctx, bucket, object) if err := lk.GetRLock(globalHealingTimeout); err != nil { return madmin.HealResultItem{}, err @@ -1553,10 +1812,10 @@ func (z *xlZones) HealObject(ctx context.Context, bucket, object string, opts ma defer lk.RUnlock() if z.SingleZone() { - return z.zones[0].HealObject(ctx, bucket, object, opts) + return z.zones[0].HealObject(ctx, bucket, object, versionID, opts) } for _, zone := range z.zones { - result, err := zone.HealObject(ctx, bucket, object, opts) + result, err := zone.HealObject(ctx, bucket, object, versionID, opts) if err != nil { if isErrObjectNotFound(err) { continue @@ -1571,7 +1830,7 @@ func (z *xlZones) HealObject(ctx context.Context, bucket, object string, opts ma } } -func (z *xlZones) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { +func (z *erasureZones) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { var healBuckets []BucketInfo for _, zone := range z.zones { bucketsInfo, err := zone.ListBucketsHeal(ctx) @@ -1592,15 +1851,15 @@ func (z *xlZones) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { } // GetMetrics - no op -func (z *xlZones) GetMetrics(ctx context.Context) (*Metrics, error) { +func (z *erasureZones) GetMetrics(ctx context.Context) (*Metrics, error) { logger.LogIf(ctx, NotImplemented{}) return &Metrics{}, NotImplemented{} } -func (z *xlZones) getZoneAndSet(id string) (int, int, error) { +func (z *erasureZones) getZoneAndSet(id string) (int, int, error) { for zoneIdx := range z.zones { format := z.zones[zoneIdx].format - for setIdx, set := range format.XL.Sets { + for setIdx, set := range format.Erasure.Sets { for _, diskID := range set { if diskID == id { return zoneIdx, setIdx, nil @@ -1611,8 +1870,8 @@ func (z *xlZones) getZoneAndSet(id string) (int, int, error) { return 0, 0, errDiskNotFound } -// IsReady - Returns true all the erasure sets are writable. -func (z *xlZones) IsReady(ctx context.Context) bool { +// IsReady - Returns true, when all the erasure sets are writable. +func (z *erasureZones) IsReady(ctx context.Context) bool { erasureSetUpCount := make([][]int, len(z.zones)) for i := range z.zones { erasureSetUpCount[i] = make([]int, len(z.zones[i].sets)) @@ -1632,7 +1891,7 @@ func (z *xlZones) IsReady(ctx context.Context) bool { for zoneIdx := range erasureSetUpCount { parityDrives := globalStorageClass.GetParityForSC(storageclass.STANDARD) - diskCount := len(z.zones[zoneIdx].format.XL.Sets[0]) + diskCount := len(z.zones[zoneIdx].format.Erasure.Sets[0]) if parityDrives == 0 { parityDrives = getDefaultParityBlocks(diskCount) } @@ -1651,12 +1910,12 @@ func (z *xlZones) IsReady(ctx context.Context) bool { } // PutObjectTags - replace or add tags to an existing object -func (z *xlZones) PutObjectTags(ctx context.Context, bucket, object string, tags string) error { +func (z *erasureZones) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error { if z.SingleZone() { - return z.zones[0].PutObjectTags(ctx, bucket, object, tags) + return z.zones[0].PutObjectTags(ctx, bucket, object, tags, opts) } for _, zone := range z.zones { - err := zone.PutObjectTags(ctx, bucket, object, tags) + err := zone.PutObjectTags(ctx, bucket, object, tags, opts) if err != nil { if isErrBucketNotFound(err) { continue @@ -1671,12 +1930,12 @@ func (z *xlZones) PutObjectTags(ctx context.Context, bucket, object string, tags } // DeleteObjectTags - delete object tags from an existing object -func (z *xlZones) DeleteObjectTags(ctx context.Context, bucket, object string) error { +func (z *erasureZones) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) error { if z.SingleZone() { - return z.zones[0].DeleteObjectTags(ctx, bucket, object) + return z.zones[0].DeleteObjectTags(ctx, bucket, object, opts) } for _, zone := range z.zones { - err := zone.DeleteObjectTags(ctx, bucket, object) + err := zone.DeleteObjectTags(ctx, bucket, object, opts) if err != nil { if isErrBucketNotFound(err) { continue @@ -1691,12 +1950,12 @@ func (z *xlZones) DeleteObjectTags(ctx context.Context, bucket, object string) e } // GetObjectTags - get object tags from an existing object -func (z *xlZones) GetObjectTags(ctx context.Context, bucket, object string) (*tags.Tags, error) { +func (z *erasureZones) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) { if z.SingleZone() { - return z.zones[0].GetObjectTags(ctx, bucket, object) + return z.zones[0].GetObjectTags(ctx, bucket, object, opts) } for _, zone := range z.zones { - tags, err := zone.GetObjectTags(ctx, bucket, object) + tags, err := zone.GetObjectTags(ctx, bucket, object, opts) if err != nil { if isErrBucketNotFound(err) { continue diff --git a/cmd/erasure.go b/cmd/erasure.go index 4a6f31a6a..22e2484f0 100644 --- a/cmd/erasure.go +++ b/cmd/erasure.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,136 +18,373 @@ package cmd import ( "context" + "fmt" + "sort" "sync" + "time" - "github.com/klauspost/reedsolomon" "github.com/minio/minio/cmd/logger" + "github.com/minio/minio/pkg/bpool" + "github.com/minio/minio/pkg/dsync" + "github.com/minio/minio/pkg/madmin" + "github.com/minio/minio/pkg/sync/errgroup" ) -// Erasure - erasure encoding details. -type Erasure struct { - encoder func() reedsolomon.Encoder - dataBlocks, parityBlocks int - blockSize int64 +// OfflineDisk represents an unavailable disk. +var OfflineDisk StorageAPI // zero value is nil + +// partialUpload is a successful upload of an object +// but not written in all disks (having quorum) +type partialUpload struct { + bucket string + object string + failedSet int } -// NewErasure creates a new ErasureStorage. -func NewErasure(ctx context.Context, dataBlocks, parityBlocks int, blockSize int64) (e Erasure, err error) { - e = Erasure{ - dataBlocks: dataBlocks, - parityBlocks: parityBlocks, - blockSize: blockSize, - } +// erasureObjects - Implements ER object layer. +type erasureObjects struct { + GatewayUnsupported - // Check the parameters for sanity now. - if dataBlocks <= 0 || parityBlocks <= 0 { - return e, reedsolomon.ErrInvShardNum - } + // getDisks returns list of storageAPIs. + getDisks func() []StorageAPI - if dataBlocks+parityBlocks > 256 { - return e, reedsolomon.ErrMaxShardNum - } + // getLockers returns list of remote and local lockers. + getLockers func() []dsync.NetLocker - // Encoder when needed. - var enc reedsolomon.Encoder - var once sync.Once - e.encoder = func() reedsolomon.Encoder { - once.Do(func() { - e, err := reedsolomon.New(dataBlocks, parityBlocks, reedsolomon.WithAutoGoroutines(int(e.ShardSize()))) - if err != nil { - // Error conditions should be checked above. - panic(err) - } - enc = e - }) - return enc - } - return + // getEndpoints returns list of endpoint strings belonging this set. + // some may be local and some remote. + getEndpoints func() []string + + // Locker mutex map. + nsMutex *nsLockMap + + // Byte pools used for temporary i/o buffers. + bp *bpool.BytePoolCap + + mrfUploadCh chan partialUpload } -// EncodeData encodes the given data and returns the erasure-coded data. -// It returns an error if the erasure coding failed. -func (e *Erasure) EncodeData(ctx context.Context, data []byte) ([][]byte, error) { - if len(data) == 0 { - return make([][]byte, e.dataBlocks+e.parityBlocks), nil - } - encoded, err := e.encoder().Split(data) - if err != nil { - logger.LogIf(ctx, err) - return nil, err - } - if err = e.encoder().Encode(encoded); err != nil { - logger.LogIf(ctx, err) - return nil, err - } - return encoded, nil +// NewNSLock - initialize a new namespace RWLocker instance. +func (er erasureObjects) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker { + return er.nsMutex.NewNSLock(ctx, er.getLockers, bucket, objects...) } -// DecodeDataBlocks decodes the given erasure-coded data. -// It only decodes the data blocks but does not verify them. -// It returns an error if the decoding failed. -func (e *Erasure) DecodeDataBlocks(data [][]byte) error { - var isZero = 0 - for _, b := range data[:] { - if len(b) == 0 { - isZero++ - break - } - } - if isZero == 0 || isZero == len(data) { - // If all are zero, payload is 0 bytes. - return nil - } - return e.encoder().ReconstructData(data) -} - -// DecodeDataAndParityBlocks decodes the given erasure-coded data and verifies it. -// It returns an error if the decoding failed. -func (e *Erasure) DecodeDataAndParityBlocks(ctx context.Context, data [][]byte) error { - needsReconstruction := false - for _, b := range data { - if b == nil { - needsReconstruction = true - break - } - } - if !needsReconstruction { - return nil - } - if err := e.encoder().Reconstruct(data); err != nil { - logger.LogIf(ctx, err) - return err - } +// Shutdown function for object storage interface. +func (er erasureObjects) Shutdown(ctx context.Context) error { + // Add any object layer shutdown activities here. + closeStorageDisks(er.getDisks()) return nil } -// ShardSize - returns actual shared size from erasure blockSize. -func (e *Erasure) ShardSize() int64 { - return ceilFrac(e.blockSize, int64(e.dataBlocks)) +// byDiskTotal is a collection satisfying sort.Interface. +type byDiskTotal []DiskInfo + +func (d byDiskTotal) Len() int { return len(d) } +func (d byDiskTotal) Swap(i, j int) { d[i], d[j] = d[j], d[i] } +func (d byDiskTotal) Less(i, j int) bool { + return d[i].Total < d[j].Total } -// ShardFileSize - returns final erasure size from original size. -func (e *Erasure) ShardFileSize(totalLength int64) int64 { - if totalLength == 0 { - return 0 +// getDisksInfo - fetch disks info across all other storage API. +func getDisksInfo(disks []StorageAPI, local bool) (disksInfo []DiskInfo, errs []error, onlineDisks, offlineDisks madmin.BackendDisks) { + disksInfo = make([]DiskInfo, len(disks)) + onlineDisks = make(madmin.BackendDisks) + offlineDisks = make(madmin.BackendDisks) + + for _, disk := range disks { + if disk == OfflineDisk { + continue + } + peerAddr := disk.Hostname() + if _, ok := offlineDisks[peerAddr]; !ok { + offlineDisks[peerAddr] = 0 + } + if _, ok := onlineDisks[peerAddr]; !ok { + onlineDisks[peerAddr] = 0 + } } - if totalLength == -1 { - return -1 + + g := errgroup.WithNErrs(len(disks)) + for index := range disks { + index := index + g.Go(func() error { + if disks[index] == OfflineDisk { + // Storage disk is empty, perhaps ignored disk or not available. + return errDiskNotFound + } + info, err := disks[index].DiskInfo() + if err != nil { + if !IsErr(err, baseErrs...) { + reqInfo := (&logger.ReqInfo{}).AppendTags("disk", disks[index].String()) + ctx := logger.SetReqInfo(GlobalContext, reqInfo) + logger.LogIf(ctx, err) + } + return err + } + disksInfo[index] = info + return nil + }, index) } - numShards := totalLength / e.blockSize - lastBlockSize := totalLength % int64(e.blockSize) - lastShardSize := ceilFrac(lastBlockSize, int64(e.dataBlocks)) - return numShards*e.ShardSize() + lastShardSize + + errs = g.Wait() + // Wait for the routines. + for i, diskInfoErr := range errs { + if disks[i] == OfflineDisk { + continue + } + if diskInfoErr != nil { + offlineDisks[disks[i].Hostname()]++ + continue + } + onlineDisks[disks[i].Hostname()]++ + } + + // Iterate over the passed endpoints arguments and check + // if there are still disks missing from the offline/online lists + // and update them accordingly. + missingOfflineDisks := make(map[string]int) + for _, zone := range globalEndpoints { + for _, endpoint := range zone.Endpoints { + // if local is set and endpoint is not local + // we are not interested in remote disks. + if local && !endpoint.IsLocal { + continue + } + + if _, ok := offlineDisks[endpoint.Host]; !ok { + missingOfflineDisks[endpoint.Host]++ + } + } + } + for missingDisk, n := range missingOfflineDisks { + onlineDisks[missingDisk] = 0 + offlineDisks[missingDisk] = n + } + + // Success. + return disksInfo, errs, onlineDisks, offlineDisks } -// ShardFileTillOffset - returns the effectiv eoffset where erasure reading begins. -func (e *Erasure) ShardFileTillOffset(startOffset, length, totalLength int64) int64 { - shardSize := e.ShardSize() - shardFileSize := e.ShardFileSize(totalLength) - endShard := (startOffset + int64(length)) / e.blockSize - tillOffset := endShard*shardSize + shardSize - if tillOffset > shardFileSize { - tillOffset = shardFileSize +// Get an aggregated storage info across all disks. +func getStorageInfo(disks []StorageAPI, local bool) (StorageInfo, []error) { + disksInfo, errs, onlineDisks, offlineDisks := getDisksInfo(disks, local) + + // Sort so that the first element is the smallest. + sort.Sort(byDiskTotal(disksInfo)) + + // Combine all disks to get total usage + usedList := make([]uint64, len(disksInfo)) + totalList := make([]uint64, len(disksInfo)) + availableList := make([]uint64, len(disksInfo)) + mountPaths := make([]string, len(disksInfo)) + + for i, di := range disksInfo { + usedList[i] = di.Used + totalList[i] = di.Total + availableList[i] = di.Free + mountPaths[i] = di.MountPath } - return tillOffset + + storageInfo := StorageInfo{ + Used: usedList, + Total: totalList, + Available: availableList, + MountPaths: mountPaths, + } + + storageInfo.Backend.Type = BackendErasure + storageInfo.Backend.OnlineDisks = onlineDisks + storageInfo.Backend.OfflineDisks = offlineDisks + + return storageInfo, errs +} + +// StorageInfo - returns underlying storage statistics. +func (er erasureObjects) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) { + disks := er.getDisks() + if local { + var localDisks []StorageAPI + for _, disk := range disks { + if disk != nil { + if disk.IsLocal() { + // Append this local disk since local flag is true + localDisks = append(localDisks, disk) + } + } + } + disks = localDisks + } + return getStorageInfo(disks, local) +} + +// GetMetrics - is not implemented and shouldn't be called. +func (er erasureObjects) GetMetrics(ctx context.Context) (*Metrics, error) { + logger.LogIf(ctx, NotImplemented{}) + return &Metrics{}, NotImplemented{} +} + +// CrawlAndGetDataUsage collects usage from all buckets. +// updates are sent as different parts of the underlying +// structure has been traversed. +func (er erasureObjects) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error { + return NotImplemented{API: "CrawlAndGetDataUsage"} +} + +// CrawlAndGetDataUsage will start crawling buckets and send updated totals as they are traversed. +// Updates are sent on a regular basis and the caller *must* consume them. +func (er erasureObjects) crawlAndGetDataUsage(ctx context.Context, buckets []BucketInfo, bf *bloomFilter, updates chan<- dataUsageCache) error { + var disks []StorageAPI + + for _, d := range er.getLoadBalancedDisks() { + if d == nil || !d.IsOnline() { + continue + } + disks = append(disks, d) + } + if len(disks) == 0 || len(buckets) == 0 { + return nil + } + + // Load bucket totals + oldCache := dataUsageCache{} + err := oldCache.load(ctx, er, dataUsageCacheName) + if err != nil { + return err + } + + // New cache.. + cache := dataUsageCache{ + Info: dataUsageCacheInfo{ + Name: dataUsageRoot, + NextCycle: oldCache.Info.NextCycle, + }, + Cache: make(map[string]dataUsageEntry, len(oldCache.Cache)), + } + + // Put all buckets into channel. + bucketCh := make(chan BucketInfo, len(buckets)) + // Add new buckets first + for _, b := range buckets { + if oldCache.find(b.Name) == nil { + bucketCh <- b + } + } + // Add existing buckets. + for _, b := range buckets { + e := oldCache.find(b.Name) + if e != nil { + bucketCh <- b + cache.replace(b.Name, dataUsageRoot, *e) + } + } + + close(bucketCh) + bucketResults := make(chan dataUsageEntryInfo, len(disks)) + + // Start async collector/saver. + // This goroutine owns the cache. + var saverWg sync.WaitGroup + saverWg.Add(1) + go func() { + const updateTime = 30 * time.Second + t := time.NewTicker(updateTime) + defer t.Stop() + defer saverWg.Done() + var lastSave time.Time + + saveLoop: + for { + select { + case <-ctx.Done(): + // Return without saving. + return + case <-t.C: + if cache.Info.LastUpdate.Equal(lastSave) { + continue + } + logger.LogIf(ctx, cache.save(ctx, er, dataUsageCacheName)) + updates <- cache.clone() + lastSave = cache.Info.LastUpdate + case v, ok := <-bucketResults: + if !ok { + break saveLoop + } + cache.replace(v.Name, v.Parent, v.Entry) + cache.Info.LastUpdate = time.Now() + } + } + // Save final state... + cache.Info.NextCycle++ + cache.Info.LastUpdate = time.Now() + logger.LogIf(ctx, cache.save(ctx, er, dataUsageCacheName)) + updates <- cache + }() + + // Start one crawler per disk + var wg sync.WaitGroup + wg.Add(len(disks)) + for i := range disks { + go func(i int) { + defer wg.Done() + disk := disks[i] + + for bucket := range bucketCh { + select { + case <-ctx.Done(): + return + default: + } + + // Load cache for bucket + cacheName := pathJoin(bucket.Name, dataUsageCacheName) + cache := dataUsageCache{} + logger.LogIf(ctx, cache.load(ctx, er, cacheName)) + if cache.Info.Name == "" { + cache.Info.Name = bucket.Name + } + if cache.Info.Name != bucket.Name { + logger.LogIf(ctx, fmt.Errorf("cache name mismatch: %s != %s", cache.Info.Name, bucket.Name)) + cache.Info = dataUsageCacheInfo{ + Name: bucket.Name, + LastUpdate: time.Time{}, + NextCycle: 0, + } + } + + // Calc usage + before := cache.Info.LastUpdate + cache, err = disk.CrawlAndGetDataUsage(ctx, cache) + if err != nil { + logger.LogIf(ctx, err) + if cache.Info.LastUpdate.After(before) { + logger.LogIf(ctx, cache.save(ctx, er, cacheName)) + } + continue + } + + var root dataUsageEntry + if r := cache.root(); r != nil { + root = cache.flatten(*r) + } + bucketResults <- dataUsageEntryInfo{ + Name: cache.Info.Name, + Parent: dataUsageRoot, + Entry: root, + } + // Save cache + logger.LogIf(ctx, cache.save(ctx, er, cacheName)) + } + }(i) + } + wg.Wait() + close(bucketResults) + saverWg.Wait() + + return nil +} + +// IsReady - shouldn't be called will panic. +func (er erasureObjects) IsReady(ctx context.Context) bool { + logger.CriticalIf(ctx, NotImplemented{}) + return true } diff --git a/cmd/erasure_test.go b/cmd/erasure_test.go index 4caa6f53c..71a88d574 100644 --- a/cmd/erasure_test.go +++ b/cmd/erasure_test.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -130,7 +130,7 @@ func newErasureTestSetup(dataBlocks int, parityBlocks int, blockSize int64) (*er disks := make([]StorageAPI, len(diskPaths)) var err error for i := range diskPaths { - disks[i], diskPaths[i], err = newPosixTestSetup() + disks[i], diskPaths[i], err = newXLStorageTestSetup() if err != nil { return nil, err } diff --git a/cmd/format-disk-cache.go b/cmd/format-disk-cache.go index 3dec6e56a..ad98c319e 100644 --- a/cmd/format-disk-cache.go +++ b/cmd/format-disk-cache.go @@ -366,7 +366,7 @@ func loadAndValidateCacheFormat(ctx context.Context, drives []string) (formats [ func migrateCacheData(ctx context.Context, c *diskCache, bucket, object, oldfile, destDir string, metadata map[string]string) error { st, err := os.Stat(oldfile) if err != nil { - err = osErrToFSFileErr(err) + err = osErrToFileErr(err) return err } readCloser, err := readCacheFileStream(oldfile, 0, st.Size()) diff --git a/cmd/format-xl.go b/cmd/format-erasure.go similarity index 66% rename from cmd/format-xl.go rename to cmd/format-erasure.go index 8d7ad9f24..3c3a6d1b9 100644 --- a/cmd/format-xl.go +++ b/cmd/format-erasure.go @@ -36,20 +36,23 @@ import ( ) const ( - // Represents XL backend. - formatBackendXL = "xl" + // Represents Erasure backend. + formatBackendErasure = "xl" - // formatXLV1.XL.Version - version '1'. - formatXLVersionV1 = "1" + // formatErasureV1.Erasure.Version - version '1'. + formatErasureVersionV1 = "1" - // formatXLV2.XL.Version - version '2'. - formatXLVersionV2 = "2" + // formatErasureV2.Erasure.Version - version '2'. + formatErasureVersionV2 = "2" - // formatXLV3.XL.Version - version '3'. - formatXLVersionV3 = "3" + // formatErasureV3.Erasure.Version - version '3'. + formatErasureVersionV3 = "3" - // Distribution algorithm used. - formatXLVersionV2DistributionAlgo = "CRCMOD" + // Distribution algorithm used, legacy + formatErasureVersionV2DistributionAlgoLegacy = "CRCMOD" + + // Distributed algorithm used, current + formatErasureVersionV3DistributionAlgo = "SIPMOD" ) // Offline disk UUID represents an offline disk. @@ -68,34 +71,34 @@ var formatCriticalErrors = map[error]struct{}{ } // Used to detect the version of "xl" format. -type formatXLVersionDetect struct { - XL struct { +type formatErasureVersionDetect struct { + Erasure struct { Version string `json:"version"` } `json:"xl"` } // Represents the V1 backend disk structure version // under `.minio.sys` and actual data namespace. -// formatXLV1 - structure holds format config version '1'. -type formatXLV1 struct { +// formatErasureV1 - structure holds format config version '1'. +type formatErasureV1 struct { formatMetaV1 - XL struct { + Erasure struct { Version string `json:"version"` // Version of 'xl' format. Disk string `json:"disk"` // Disk field carries assigned disk uuid. // JBOD field carries the input disk order generated the first // time when fresh disks were supplied. JBOD []string `json:"jbod"` - } `json:"xl"` // XL field holds xl format. + } `json:"xl"` // Erasure field holds xl format. } // Represents the V2 backend disk structure version // under `.minio.sys` and actual data namespace. -// formatXLV2 - structure holds format config version '2'. +// formatErasureV2 - structure holds format config version '2'. // The V2 format to support "large bucket" support where a bucket // can span multiple erasure sets. -type formatXLV2 struct { +type formatErasureV2 struct { formatMetaV1 - XL struct { + Erasure struct { Version string `json:"version"` // Version of 'xl' format. This string `json:"this"` // This field carries assigned disk uuid. // Sets field carries the input disk order generated the first @@ -108,13 +111,13 @@ type formatXLV2 struct { } `json:"xl"` } -// formatXLV3 struct is same as formatXLV2 struct except that formatXLV3.XL.Version is "3" indicating +// formatErasureV3 struct is same as formatErasureV2 struct except that formatErasureV3.Erasure.Version is "3" indicating // the simplified multipart backend which is a flat hierarchy now. // In .minio.sys/multipart we have: -// sha256(bucket/object)/uploadID/[xl.json, part.1, part.2 ....] -type formatXLV3 struct { +// sha256(bucket/object)/uploadID/[xl.meta, part.1, part.2 ....] +type formatErasureV3 struct { formatMetaV1 - XL struct { + Erasure struct { Version string `json:"version"` // Version of 'xl' format. This string `json:"this"` // This field carries assigned disk uuid. // Sets field carries the input disk order generated the first @@ -127,40 +130,40 @@ type formatXLV3 struct { } `json:"xl"` } -func (f *formatXLV3) Clone() *formatXLV3 { +func (f *formatErasureV3) Clone() *formatErasureV3 { b, err := json.Marshal(f) if err != nil { panic(err) } - var dst formatXLV3 + var dst formatErasureV3 if err = json.Unmarshal(b, &dst); err != nil { panic(err) } return &dst } -// Returns formatXL.XL.Version -func newFormatXLV3(numSets int, setLen int) *formatXLV3 { - format := &formatXLV3{} +// Returns formatErasure.Erasure.Version +func newFormatErasureV3(numSets int, setLen int) *formatErasureV3 { + format := &formatErasureV3{} format.Version = formatMetaVersionV1 - format.Format = formatBackendXL + format.Format = formatBackendErasure format.ID = mustGetUUID() - format.XL.Version = formatXLVersionV3 - format.XL.DistributionAlgo = formatXLVersionV2DistributionAlgo - format.XL.Sets = make([][]string, numSets) + format.Erasure.Version = formatErasureVersionV3 + format.Erasure.DistributionAlgo = formatErasureVersionV3DistributionAlgo + format.Erasure.Sets = make([][]string, numSets) for i := 0; i < numSets; i++ { - format.XL.Sets[i] = make([]string, setLen) + format.Erasure.Sets[i] = make([]string, setLen) for j := 0; j < setLen; j++ { - format.XL.Sets[i][j] = mustGetUUID() + format.Erasure.Sets[i][j] = mustGetUUID() } } return format } -// Returns format XL version after reading `format.json`, returns -// successfully the version only if the backend is XL. -func formatGetBackendXLVersion(formatPath string) (string, error) { +// Returns format Erasure version after reading `format.json`, returns +// successfully the version only if the backend is Erasure. +func formatGetBackendErasureVersion(formatPath string) (string, error) { meta := &formatMetaV1{} b, err := ioutil.ReadFile(formatPath) if err != nil { @@ -172,42 +175,42 @@ func formatGetBackendXLVersion(formatPath string) (string, error) { if meta.Version != formatMetaVersionV1 { return "", fmt.Errorf(`format.Version expected: %s, got: %s`, formatMetaVersionV1, meta.Version) } - if meta.Format != formatBackendXL { - return "", fmt.Errorf(`found backend %s, expected %s`, meta.Format, formatBackendXL) + if meta.Format != formatBackendErasure { + return "", fmt.Errorf(`found backend %s, expected %s`, meta.Format, formatBackendErasure) } - // XL backend found, proceed to detect version. - format := &formatXLVersionDetect{} + // Erasure backend found, proceed to detect version. + format := &formatErasureVersionDetect{} if err = json.Unmarshal(b, format); err != nil { return "", err } - return format.XL.Version, nil + return format.Erasure.Version, nil } // Migrates all previous versions to latest version of `format.json`, // this code calls migration in sequence, such as V1 is migrated to V2 // first before it V2 migrates to V3. -func formatXLMigrate(export string) error { +func formatErasureMigrate(export string) error { formatPath := pathJoin(export, minioMetaBucket, formatConfigFile) - version, err := formatGetBackendXLVersion(formatPath) + version, err := formatGetBackendErasureVersion(formatPath) if err != nil { return err } switch version { - case formatXLVersionV1: - if err = formatXLMigrateV1ToV2(export, version); err != nil { + case formatErasureVersionV1: + if err = formatErasureMigrateV1ToV2(export, version); err != nil { return err } // Migrate successful v1 => v2, proceed to v2 => v3 - version = formatXLVersionV2 + version = formatErasureVersionV2 fallthrough - case formatXLVersionV2: - if err = formatXLMigrateV2ToV3(export, version); err != nil { + case formatErasureVersionV2: + if err = formatErasureMigrateV2ToV3(export, version); err != nil { return err } // Migrate successful v2 => v3, v3 is latest // version = formatXLVersionV3 fallthrough - case formatXLVersionV3: + case formatErasureVersionV3: // v3 is the latest version, return. return nil } @@ -216,14 +219,14 @@ func formatXLMigrate(export string) error { // Migrates version V1 of format.json to version V2 of format.json, // migration fails upon any error. -func formatXLMigrateV1ToV2(export, version string) error { - if version != formatXLVersionV1 { - return fmt.Errorf(`Disk %s: format version expected %s, found %s`, export, formatXLVersionV1, version) +func formatErasureMigrateV1ToV2(export, version string) error { + if version != formatErasureVersionV1 { + return fmt.Errorf(`Disk %s: format version expected %s, found %s`, export, formatErasureVersionV1, version) } formatPath := pathJoin(export, minioMetaBucket, formatConfigFile) - formatV1 := &formatXLV1{} + formatV1 := &formatErasureV1{} b, err := ioutil.ReadFile(formatPath) if err != nil { return err @@ -232,15 +235,15 @@ func formatXLMigrateV1ToV2(export, version string) error { return err } - formatV2 := &formatXLV2{} + formatV2 := &formatErasureV2{} formatV2.Version = formatMetaVersionV1 - formatV2.Format = formatBackendXL - formatV2.XL.Version = formatXLVersionV2 - formatV2.XL.DistributionAlgo = formatXLVersionV2DistributionAlgo - formatV2.XL.This = formatV1.XL.Disk - formatV2.XL.Sets = make([][]string, 1) - formatV2.XL.Sets[0] = make([]string, len(formatV1.XL.JBOD)) - copy(formatV2.XL.Sets[0], formatV1.XL.JBOD) + formatV2.Format = formatBackendErasure + formatV2.Erasure.Version = formatErasureVersionV2 + formatV2.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoLegacy + formatV2.Erasure.This = formatV1.Erasure.Disk + formatV2.Erasure.Sets = make([][]string, 1) + formatV2.Erasure.Sets[0] = make([]string, len(formatV1.Erasure.JBOD)) + copy(formatV2.Erasure.Sets[0], formatV1.Erasure.JBOD) b, err = json.Marshal(formatV2) if err != nil { @@ -250,13 +253,13 @@ func formatXLMigrateV1ToV2(export, version string) error { } // Migrates V2 for format.json to V3 (Flat hierarchy for multipart) -func formatXLMigrateV2ToV3(export, version string) error { - if version != formatXLVersionV2 { - return fmt.Errorf(`Disk %s: format version expected %s, found %s`, export, formatXLVersionV2, version) +func formatErasureMigrateV2ToV3(export, version string) error { + if version != formatErasureVersionV2 { + return fmt.Errorf(`Disk %s: format version expected %s, found %s`, export, formatErasureVersionV2, version) } formatPath := pathJoin(export, minioMetaBucket, formatConfigFile) - formatV2 := &formatXLV2{} + formatV2 := &formatErasureV2{} b, err := ioutil.ReadFile(formatPath) if err != nil { return err @@ -276,13 +279,13 @@ func formatXLMigrateV2ToV3(export, version string) error { // format-V2 struct is exactly same as format-V1 except that version is "3" // which indicates the simplified multipart backend. - formatV3 := formatXLV3{} + formatV3 := formatErasureV3{} formatV3.Version = formatV2.Version formatV3.Format = formatV2.Format - formatV3.XL = formatV2.XL + formatV3.Erasure = formatV2.Erasure - formatV3.XL.Version = formatXLVersionV3 + formatV3.Erasure.Version = formatErasureVersionV3 b, err = json.Marshal(formatV3) if err != nil { @@ -303,7 +306,7 @@ func countErrs(errs []error, err error) int { } // Does all errors indicate we need to initialize all disks?. -func shouldInitXLDisks(errs []error) bool { +func shouldInitErasureDisks(errs []error) bool { return countErrs(errs, errUnformattedDisk) == len(errs) } @@ -312,13 +315,13 @@ func quorumUnformattedDisks(errs []error) bool { return countErrs(errs, errUnformattedDisk) >= (len(errs)/2)+1 } -// loadFormatXLAll - load all format config from all input disks in parallel. -func loadFormatXLAll(storageDisks []StorageAPI, heal bool) ([]*formatXLV3, []error) { +// loadFormatErasureAll - load all format config from all input disks in parallel. +func loadFormatErasureAll(storageDisks []StorageAPI, heal bool) ([]*formatErasureV3, []error) { // Initialize list of errors. g := errgroup.WithNErrs(len(storageDisks)) // Initialize format configs. - var formats = make([]*formatXLV3, len(storageDisks)) + var formats = make([]*formatErasureV3, len(storageDisks)) // Load format from each disk in parallel for index := range storageDisks { @@ -327,7 +330,7 @@ func loadFormatXLAll(storageDisks []StorageAPI, heal bool) ([]*formatXLV3, []err if storageDisks[index] == nil { return errDiskNotFound } - format, err := loadFormatXL(storageDisks[index]) + format, err := loadFormatErasure(storageDisks[index]) if err != nil { return err } @@ -335,7 +338,7 @@ func loadFormatXLAll(storageDisks []StorageAPI, heal bool) ([]*formatXLV3, []err if !heal { // If no healing required, make the disks valid and // online. - storageDisks[index].SetDiskID(format.XL.This) + storageDisks[index].SetDiskID(format.Erasure.This) } return nil }, index) @@ -345,12 +348,12 @@ func loadFormatXLAll(storageDisks []StorageAPI, heal bool) ([]*formatXLV3, []err return formats, g.Wait() } -func saveFormatXL(disk StorageAPI, format interface{}, diskID string) error { +func saveFormatErasure(disk StorageAPI, format interface{}, diskID string) error { if format == nil || disk == nil { return errDiskNotFound } - if err := makeFormatXLMetaVolumes(disk); err != nil { + if err := makeFormatErasureMetaVolumes(disk); err != nil { return err } @@ -398,8 +401,8 @@ func isHiddenDirectories(vols ...VolInfo) bool { return true } -// loadFormatXL - loads format.json from disk. -func loadFormatXL(disk StorageAPI) (format *formatXLV3, err error) { +// loadFormatErasure - loads format.json from disk. +func loadFormatErasure(disk StorageAPI) (format *formatErasureV3, err error) { buf, err := disk.ReadAll(minioMetaBucket, formatConfigFile) if err != nil { // 'file not found' and 'volume not found' as @@ -421,7 +424,7 @@ func loadFormatXL(disk StorageAPI) (format *formatXLV3, err error) { } // Try to decode format json into formatConfigV1 struct. - format = &formatXLV3{} + format = &formatErasureV3{} if err = json.Unmarshal(buf, format); err != nil { return nil, err } @@ -430,56 +433,56 @@ func loadFormatXL(disk StorageAPI) (format *formatXLV3, err error) { return format, nil } -// Valid formatXL basic versions. -func checkFormatXLValue(formatXL *formatXLV3) error { +// Valid formatErasure basic versions. +func checkFormatErasureValue(formatErasure *formatErasureV3) error { // Validate format version and format type. - if formatXL.Version != formatMetaVersionV1 { - return fmt.Errorf("Unsupported version of backend format [%s] found", formatXL.Version) + if formatErasure.Version != formatMetaVersionV1 { + return fmt.Errorf("Unsupported version of backend format [%s] found", formatErasure.Version) } - if formatXL.Format != formatBackendXL { - return fmt.Errorf("Unsupported backend format [%s] found", formatXL.Format) + if formatErasure.Format != formatBackendErasure { + return fmt.Errorf("Unsupported backend format [%s] found", formatErasure.Format) } - if formatXL.XL.Version != formatXLVersionV3 { - return fmt.Errorf("Unsupported XL backend format found [%s]", formatXL.XL.Version) + if formatErasure.Erasure.Version != formatErasureVersionV3 { + return fmt.Errorf("Unsupported Erasure backend format found [%s]", formatErasure.Erasure.Version) } return nil } // Check all format values. -func checkFormatXLValues(formats []*formatXLV3, drivesPerSet int) error { - for i, formatXL := range formats { - if formatXL == nil { +func checkFormatErasureValues(formats []*formatErasureV3, drivesPerSet int) error { + for i, formatErasure := range formats { + if formatErasure == nil { continue } - if err := checkFormatXLValue(formatXL); err != nil { + if err := checkFormatErasureValue(formatErasure); err != nil { return err } - if len(formats) != len(formatXL.XL.Sets)*len(formatXL.XL.Sets[0]) { + if len(formats) != len(formatErasure.Erasure.Sets)*len(formatErasure.Erasure.Sets[0]) { return fmt.Errorf("%s disk is already being used in another erasure deployment. (Number of disks specified: %d but the number of disks found in the %s disk's format.json: %d)", - humanize.Ordinal(i+1), len(formats), humanize.Ordinal(i+1), len(formatXL.XL.Sets)*len(formatXL.XL.Sets[0])) + humanize.Ordinal(i+1), len(formats), humanize.Ordinal(i+1), len(formatErasure.Erasure.Sets)*len(formatErasure.Erasure.Sets[0])) } // Only if custom erasure drive count is set, // we should fail here other proceed to honor what // is present on the disk. - if globalCustomErasureDriveCount && len(formatXL.XL.Sets[0]) != drivesPerSet { - return fmt.Errorf("%s disk is already formatted with %d drives per erasure set. This cannot be changed to %d, please revert your MINIO_ERASURE_SET_DRIVE_COUNT setting", humanize.Ordinal(i+1), len(formatXL.XL.Sets[0]), drivesPerSet) + if globalCustomErasureDriveCount && len(formatErasure.Erasure.Sets[0]) != drivesPerSet { + return fmt.Errorf("%s disk is already formatted with %d drives per erasure set. This cannot be changed to %d, please revert your MINIO_ERASURE_SET_DRIVE_COUNT setting", humanize.Ordinal(i+1), len(formatErasure.Erasure.Sets[0]), drivesPerSet) } } return nil } -// Get Deployment ID for the XL sets from format.json. +// Get Deployment ID for the Erasure sets from format.json. // This need not be in quorum. Even if one of the format.json // file has this value, we assume it is valid. // If more than one format.json's have different id, it is considered a corrupt // backend format. -func formatXLGetDeploymentID(refFormat *formatXLV3, formats []*formatXLV3) (string, error) { +func formatErasureGetDeploymentID(refFormat *formatErasureV3, formats []*formatErasureV3) (string, error) { var deploymentID string for _, format := range formats { if format == nil || format.ID == "" { continue } - if reflect.DeepEqual(format.XL.Sets, refFormat.XL.Sets) { + if reflect.DeepEqual(format.Erasure.Sets, refFormat.Erasure.Sets) { // Found an ID in one of the format.json file // Set deploymentID for the first time. if deploymentID == "" { @@ -494,11 +497,11 @@ func formatXLGetDeploymentID(refFormat *formatXLV3, formats []*formatXLV3) (stri return deploymentID, nil } -// formatXLFixDeploymentID - Add deployment id if it is not present. -func formatXLFixDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, refFormat *formatXLV3) (err error) { +// formatErasureFixDeploymentID - Add deployment id if it is not present. +func formatErasureFixDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, refFormat *formatErasureV3) (err error) { // Attempt to load all `format.json` from all disks. var sErrs []error - formats, sErrs := loadFormatXLAll(storageDisks, false) + formats, sErrs := loadFormatErasureAll(storageDisks, false) for i, sErr := range sErrs { if _, ok := formatCriticalErrors[sErr]; ok { return config.ErrCorruptedBackend(err).Hint(fmt.Sprintf("Clear any pre-existing content on %s", endpoints[i])) @@ -506,13 +509,13 @@ func formatXLFixDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, ref } for index := range formats { - // If the XL sets do not match, set those formats to nil, + // If the Erasure sets do not match, set those formats to nil, // We do not have to update the ID on those format.json file. - if formats[index] != nil && !reflect.DeepEqual(formats[index].XL.Sets, refFormat.XL.Sets) { + if formats[index] != nil && !reflect.DeepEqual(formats[index].Erasure.Sets, refFormat.Erasure.Sets) { formats[index] = nil } } - refFormat.ID, err = formatXLGetDeploymentID(refFormat, formats) + refFormat.ID, err = formatErasureGetDeploymentID(refFormat, formats) if err != nil { return err } @@ -534,12 +537,12 @@ func formatXLFixDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, ref } // Deployment ID needs to be set on all the disks. // Save `format.json` across all disks. - return saveFormatXLAll(GlobalContext, storageDisks, formats) + return saveFormatErasureAll(GlobalContext, storageDisks, formats) } // Update only the valid local disks which have not been updated before. -func formatXLFixLocalDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, refFormat *formatXLV3) error { +func formatErasureFixLocalDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, refFormat *formatErasureV3) error { // If this server was down when the deploymentID was updated // then we make sure that we update the local disks with the deploymentID. @@ -550,7 +553,7 @@ func formatXLFixLocalDeploymentID(endpoints Endpoints, storageDisks []StorageAPI index := index g.Go(func() error { if endpoints[index].IsLocal && storageDisks[index] != nil && storageDisks[index].IsOnline() { - format, err := loadFormatXL(storageDisks[index]) + format, err := loadFormatErasure(storageDisks[index]) if err != nil { // Disk can be offline etc. // ignore the errors seen here. @@ -559,11 +562,11 @@ func formatXLFixLocalDeploymentID(endpoints Endpoints, storageDisks []StorageAPI if format.ID != "" { return nil } - if !reflect.DeepEqual(format.XL.Sets, refFormat.XL.Sets) { + if !reflect.DeepEqual(format.Erasure.Sets, refFormat.Erasure.Sets) { return nil } format.ID = refFormat.ID - if err := saveFormatXL(storageDisks[index], format, format.XL.This); err != nil { + if err := saveFormatErasure(storageDisks[index], format, format.Erasure.This); err != nil { logger.LogIf(GlobalContext, err) return fmt.Errorf("Unable to save format.json, %w", err) } @@ -579,15 +582,15 @@ func formatXLFixLocalDeploymentID(endpoints Endpoints, storageDisks []StorageAPI return nil } -// Get backend XL format in quorum `format.json`. -func getFormatXLInQuorum(formats []*formatXLV3) (*formatXLV3, error) { +// Get backend Erasure format in quorum `format.json`. +func getFormatErasureInQuorum(formats []*formatErasureV3) (*formatErasureV3, error) { formatHashes := make([]string, len(formats)) for i, format := range formats { if format == nil { continue } h := sha256.New() - for _, set := range format.XL.Sets { + for _, set := range format.Erasure.Sets { for _, diskID := range set { h.Write([]byte(diskID)) } @@ -613,55 +616,55 @@ func getFormatXLInQuorum(formats []*formatXLV3) (*formatXLV3, error) { } if maxCount < len(formats)/2 { - return nil, errXLReadQuorum + return nil, errErasureReadQuorum } for i, hash := range formatHashes { if hash == maxHash { format := formats[i].Clone() - format.XL.This = "" + format.Erasure.This = "" return format, nil } } - return nil, errXLReadQuorum + return nil, errErasureReadQuorum } -func formatXLV3Check(reference *formatXLV3, format *formatXLV3) error { +func formatErasureV3Check(reference *formatErasureV3, format *formatErasureV3) error { tmpFormat := format.Clone() - this := tmpFormat.XL.This - tmpFormat.XL.This = "" - if len(reference.XL.Sets) != len(format.XL.Sets) { - return fmt.Errorf("Expected number of sets %d, got %d", len(reference.XL.Sets), len(format.XL.Sets)) + this := tmpFormat.Erasure.This + tmpFormat.Erasure.This = "" + if len(reference.Erasure.Sets) != len(format.Erasure.Sets) { + return fmt.Errorf("Expected number of sets %d, got %d", len(reference.Erasure.Sets), len(format.Erasure.Sets)) } // Make sure that the sets match. - for i := range reference.XL.Sets { - if len(reference.XL.Sets[i]) != len(format.XL.Sets[i]) { + for i := range reference.Erasure.Sets { + if len(reference.Erasure.Sets[i]) != len(format.Erasure.Sets[i]) { return fmt.Errorf("Each set should be of same size, expected %d got %d", - len(reference.XL.Sets[i]), len(format.XL.Sets[i])) + len(reference.Erasure.Sets[i]), len(format.Erasure.Sets[i])) } - for j := range reference.XL.Sets[i] { - if reference.XL.Sets[i][j] != format.XL.Sets[i][j] { + for j := range reference.Erasure.Sets[i] { + if reference.Erasure.Sets[i][j] != format.Erasure.Sets[i][j] { return fmt.Errorf("UUID on positions %d:%d do not match with, expected %s got %s", - i, j, reference.XL.Sets[i][j], format.XL.Sets[i][j]) + i, j, reference.Erasure.Sets[i][j], format.Erasure.Sets[i][j]) } } } // Make sure that the diskID is found in the set. - for i := 0; i < len(tmpFormat.XL.Sets); i++ { - for j := 0; j < len(tmpFormat.XL.Sets[i]); j++ { - if this == tmpFormat.XL.Sets[i][j] { + for i := 0; i < len(tmpFormat.Erasure.Sets); i++ { + for j := 0; j < len(tmpFormat.Erasure.Sets[i]); j++ { + if this == tmpFormat.Erasure.Sets[i][j] { return nil } } } - return fmt.Errorf("Disk ID %s not found in any disk sets %s", this, format.XL.Sets) + return fmt.Errorf("Disk ID %s not found in any disk sets %s", this, format.Erasure.Sets) } // Initializes meta volume only on local storage disks. -func initXLMetaVolumesInLocalDisks(storageDisks []StorageAPI, formats []*formatXLV3) error { +func initErasureMetaVolumesInLocalDisks(storageDisks []StorageAPI, formats []*formatErasureV3) error { // Compute the local disks eligible for meta volumes (re)initialization var disksToInit []StorageAPI @@ -682,7 +685,7 @@ func initXLMetaVolumesInLocalDisks(storageDisks []StorageAPI, formats []*formatX // goroutine will return its own instance of index variable. index := index g.Go(func() error { - return makeFormatXLMetaVolumes(disksToInit[index]) + return makeFormatErasureMetaVolumes(disksToInit[index]) }, index) } @@ -698,15 +701,15 @@ func initXLMetaVolumesInLocalDisks(storageDisks []StorageAPI, formats []*formatX return nil } -// saveFormatXLAll - populates `format.json` on disks in its order. -func saveFormatXLAll(ctx context.Context, storageDisks []StorageAPI, formats []*formatXLV3) error { +// saveFormatErasureAll - populates `format.json` on disks in its order. +func saveFormatErasureAll(ctx context.Context, storageDisks []StorageAPI, formats []*formatErasureV3) error { g := errgroup.WithNErrs(len(storageDisks)) // Write `format.json` to all disks. for index := range storageDisks { index := index g.Go(func() error { - return saveFormatXL(storageDisks[index], formats[index], formats[index].XL.This) + return saveFormatErasure(storageDisks[index], formats[index], formats[index].Erasure.This) }, index) } @@ -745,9 +748,9 @@ func initStorageDisksWithErrors(endpoints Endpoints) ([]StorageAPI, []error) { return storageDisks, g.Wait() } -// formatXLV3ThisEmpty - find out if '.This' field is empty +// formatErasureV3ThisEmpty - find out if '.This' field is empty // in any of the input `formats`, if yes return true. -func formatXLV3ThisEmpty(formats []*formatXLV3) bool { +func formatErasureV3ThisEmpty(formats []*formatErasureV3) bool { for _, format := range formats { if format == nil { continue @@ -756,18 +759,18 @@ func formatXLV3ThisEmpty(formats []*formatXLV3) bool { // V1 to V2 to V3, in a scenario such as this we only need to handle // single sets since we never used to support multiple sets in releases // with V1 format version. - if len(format.XL.Sets) > 1 { + if len(format.Erasure.Sets) > 1 { continue } - if format.XL.This == "" { + if format.Erasure.This == "" { return true } } return false } -// fixFormatXLV3 - fix format XL configuration on all disks. -func fixFormatXLV3(storageDisks []StorageAPI, endpoints Endpoints, formats []*formatXLV3) error { +// fixFormatErasureV3 - fix format Erasure configuration on all disks. +func fixFormatErasureV3(storageDisks []StorageAPI, endpoints Endpoints, formats []*formatErasureV3) error { g := errgroup.WithNErrs(len(formats)) for i := range formats { i := i @@ -779,12 +782,12 @@ func fixFormatXLV3(storageDisks []StorageAPI, endpoints Endpoints, formats []*fo // V1 to V2 to V3, in a scenario such as this we only need to handle // single sets since we never used to support multiple sets in releases // with V1 format version. - if len(formats[i].XL.Sets) > 1 { + if len(formats[i].Erasure.Sets) > 1 { return nil } - if formats[i].XL.This == "" { - formats[i].XL.This = formats[i].XL.Sets[0][i] - if err := saveFormatXL(storageDisks[i], formats[i], formats[i].XL.This); err != nil { + if formats[i].Erasure.This == "" { + formats[i].Erasure.This = formats[i].Erasure.Sets[0][i] + if err := saveFormatErasure(storageDisks[i], formats[i], formats[i].Erasure.This); err != nil { return err } } @@ -800,10 +803,10 @@ func fixFormatXLV3(storageDisks []StorageAPI, endpoints Endpoints, formats []*fo } -// initFormatXL - save XL format configuration on all disks. -func initFormatXL(ctx context.Context, storageDisks []StorageAPI, setCount, drivesPerSet int, deploymentID string) (*formatXLV3, error) { - format := newFormatXLV3(setCount, drivesPerSet) - formats := make([]*formatXLV3, len(storageDisks)) +// initFormatErasure - save Erasure format configuration on all disks. +func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount, drivesPerSet int, deploymentID string) (*formatErasureV3, error) { + format := newFormatErasureV3(setCount, drivesPerSet) + formats := make([]*formatErasureV3, len(storageDisks)) wantAtMost := ecDrivesNoConfig(drivesPerSet) for i := 0; i < setCount; i++ { @@ -811,7 +814,7 @@ func initFormatXL(ctx context.Context, storageDisks []StorageAPI, setCount, driv for j := 0; j < drivesPerSet; j++ { disk := storageDisks[i*drivesPerSet+j] newFormat := format.Clone() - newFormat.XL.This = format.XL.Sets[i][j] + newFormat.Erasure.This = format.Erasure.Sets[i][j] if deploymentID != "" { newFormat.ID = deploymentID } @@ -843,11 +846,11 @@ func initFormatXL(ctx context.Context, storageDisks []StorageAPI, setCount, driv } // Save formats `format.json` across all disks. - if err := saveFormatXLAll(ctx, storageDisks, formats); err != nil { + if err := saveFormatErasureAll(ctx, storageDisks, formats); err != nil { return nil, err } - return getFormatXLInQuorum(formats) + return getFormatErasureInQuorum(formats) } // ecDrivesNoConfig returns the erasure coded drives in a set if no config has been set. @@ -866,8 +869,8 @@ func ecDrivesNoConfig(drivesPerSet int) int { return ecDrives } -// Make XL backend meta volumes. -func makeFormatXLMetaVolumes(disk StorageAPI) error { +// Make Erasure backend meta volumes. +func makeFormatErasureMetaVolumes(disk StorageAPI) error { if disk == nil { return errDiskNotFound } @@ -878,14 +881,14 @@ func makeFormatXLMetaVolumes(disk StorageAPI) error { // Get all UUIDs which are present in reference format should // be present in the list of formats provided, those are considered // as online UUIDs. -func getOnlineUUIDs(refFormat *formatXLV3, formats []*formatXLV3) (onlineUUIDs []string) { +func getOnlineUUIDs(refFormat *formatErasureV3, formats []*formatErasureV3) (onlineUUIDs []string) { for _, format := range formats { if format == nil { continue } - for _, set := range refFormat.XL.Sets { + for _, set := range refFormat.Erasure.Sets { for _, uuid := range set { - if format.XL.This == uuid { + if format.Erasure.This == uuid { onlineUUIDs = append(onlineUUIDs, uuid) } } @@ -897,13 +900,13 @@ func getOnlineUUIDs(refFormat *formatXLV3, formats []*formatXLV3) (onlineUUIDs [ // Look for all UUIDs which are not present in reference format // but are present in the onlineUUIDs list, construct of list such // offline UUIDs. -func getOfflineUUIDs(refFormat *formatXLV3, formats []*formatXLV3) (offlineUUIDs []string) { +func getOfflineUUIDs(refFormat *formatErasureV3, formats []*formatErasureV3) (offlineUUIDs []string) { onlineUUIDs := getOnlineUUIDs(refFormat, formats) - for i, set := range refFormat.XL.Sets { + for i, set := range refFormat.Erasure.Sets { for j, uuid := range set { var found bool for _, onlineUUID := range onlineUUIDs { - if refFormat.XL.Sets[i][j] == onlineUUID { + if refFormat.Erasure.Sets[i][j] == onlineUUID { found = true } } @@ -916,13 +919,13 @@ func getOfflineUUIDs(refFormat *formatXLV3, formats []*formatXLV3) (offlineUUIDs } // Mark all UUIDs that are offline. -func markUUIDsOffline(refFormat *formatXLV3, formats []*formatXLV3) { +func markUUIDsOffline(refFormat *formatErasureV3, formats []*formatErasureV3) { offlineUUIDs := getOfflineUUIDs(refFormat, formats) - for i, set := range refFormat.XL.Sets { + for i, set := range refFormat.Erasure.Sets { for j := range set { for _, offlineUUID := range offlineUUIDs { - if refFormat.XL.Sets[i][j] == offlineUUID { - refFormat.XL.Sets[i][j] = offlineDiskUUID + if refFormat.Erasure.Sets[i][j] == offlineUUID { + refFormat.Erasure.Sets[i][j] = offlineDiskUUID } } } @@ -930,29 +933,29 @@ func markUUIDsOffline(refFormat *formatXLV3, formats []*formatXLV3) { } // Initialize a new set of set formats which will be written to all disks. -func newHealFormatSets(refFormat *formatXLV3, setCount, drivesPerSet int, formats []*formatXLV3, errs []error) [][]*formatXLV3 { - newFormats := make([][]*formatXLV3, setCount) - for i := range refFormat.XL.Sets { - newFormats[i] = make([]*formatXLV3, drivesPerSet) +func newHealFormatSets(refFormat *formatErasureV3, setCount, drivesPerSet int, formats []*formatErasureV3, errs []error) [][]*formatErasureV3 { + newFormats := make([][]*formatErasureV3, setCount) + for i := range refFormat.Erasure.Sets { + newFormats[i] = make([]*formatErasureV3, drivesPerSet) } - for i := range refFormat.XL.Sets { - for j := range refFormat.XL.Sets[i] { + for i := range refFormat.Erasure.Sets { + for j := range refFormat.Erasure.Sets[i] { if errs[i*drivesPerSet+j] == errUnformattedDisk || errs[i*drivesPerSet+j] == nil { - newFormats[i][j] = &formatXLV3{} + newFormats[i][j] = &formatErasureV3{} newFormats[i][j].Version = refFormat.Version newFormats[i][j].ID = refFormat.ID newFormats[i][j].Format = refFormat.Format - newFormats[i][j].XL.Version = refFormat.XL.Version - newFormats[i][j].XL.DistributionAlgo = refFormat.XL.DistributionAlgo + newFormats[i][j].Erasure.Version = refFormat.Erasure.Version + newFormats[i][j].Erasure.DistributionAlgo = refFormat.Erasure.DistributionAlgo } if errs[i*drivesPerSet+j] == errUnformattedDisk { - newFormats[i][j].XL.This = "" - newFormats[i][j].XL.Sets = nil + newFormats[i][j].Erasure.This = "" + newFormats[i][j].Erasure.Sets = nil continue } if errs[i*drivesPerSet+j] == nil { - newFormats[i][j].XL.This = formats[i*drivesPerSet+j].XL.This - newFormats[i][j].XL.Sets = nil + newFormats[i][j].Erasure.This = formats[i*drivesPerSet+j].Erasure.This + newFormats[i][j].Erasure.Sets = nil } } } diff --git a/cmd/format-xl_test.go b/cmd/format-erasure_test.go similarity index 61% rename from cmd/format-xl_test.go rename to cmd/format-erasure_test.go index 10f5adde8..656d59a63 100644 --- a/cmd/format-xl_test.go +++ b/cmd/format-erasure_test.go @@ -26,13 +26,13 @@ import ( // Test get offline/online uuids. func TestGetUUIDs(t *testing.T) { - fmtV2 := newFormatXLV3(4, 16) - formats := make([]*formatXLV3, 64) + fmtV2 := newFormatErasureV3(4, 16) + formats := make([]*formatErasureV3, 64) for i := 0; i < 4; i++ { for j := 0; j < 16; j++ { newFormat := *fmtV2 - newFormat.XL.This = fmtV2.XL.Sets[i][j] + newFormat.Erasure.This = fmtV2.Erasure.Sets[i][j] formats[i*16+j] = &newFormat } } @@ -62,9 +62,9 @@ func TestGetUUIDs(t *testing.T) { markUUIDsOffline(fmtV2, formats) gotCount = 0 - for i := range fmtV2.XL.Sets { - for j := range fmtV2.XL.Sets[i] { - if fmtV2.XL.Sets[i][j] == offlineDiskUUID { + for i := range fmtV2.Erasure.Sets { + for j := range fmtV2.Erasure.Sets[i] { + if fmtV2.Erasure.Sets[i][j] == offlineDiskUUID { gotCount++ } } @@ -74,16 +74,16 @@ func TestGetUUIDs(t *testing.T) { } } -// tests fixFormatXLV3 - fix format.json on all disks. +// tests fixFormatErasureV3 - fix format.json on all disks. func TestFixFormatV3(t *testing.T) { - xlDirs, err := getRandomDisks(8) + erasureDirs, err := getRandomDisks(8) if err != nil { t.Fatal(err) } - for _, xlDir := range xlDirs { - defer os.RemoveAll(xlDir) + for _, erasureDir := range erasureDirs { + defer os.RemoveAll(erasureDir) } - endpoints := mustGetNewEndpoints(xlDirs...) + endpoints := mustGetNewEndpoints(erasureDirs...) storageDisks, errs := initStorageDisksWithErrors(endpoints) for _, err := range errs { @@ -92,46 +92,46 @@ func TestFixFormatV3(t *testing.T) { } } - format := newFormatXLV3(1, 8) - formats := make([]*formatXLV3, 8) + format := newFormatErasureV3(1, 8) + formats := make([]*formatErasureV3, 8) for j := 0; j < 8; j++ { newFormat := format.Clone() - newFormat.XL.This = format.XL.Sets[0][j] + newFormat.Erasure.This = format.Erasure.Sets[0][j] formats[j] = newFormat } - if err = initXLMetaVolumesInLocalDisks(storageDisks, formats); err != nil { + if err = initErasureMetaVolumesInLocalDisks(storageDisks, formats); err != nil { t.Fatal(err) } formats[1] = nil - expThis := formats[2].XL.This - formats[2].XL.This = "" - if err := fixFormatXLV3(storageDisks, endpoints, formats); err != nil { + expThis := formats[2].Erasure.This + formats[2].Erasure.This = "" + if err := fixFormatErasureV3(storageDisks, endpoints, formats); err != nil { t.Fatal(err) } - newFormats, errs := loadFormatXLAll(storageDisks, false) + newFormats, errs := loadFormatErasureAll(storageDisks, false) for _, err := range errs { if err != nil && err != errUnformattedDisk { t.Fatal(err) } } - gotThis := newFormats[2].XL.This + gotThis := newFormats[2].Erasure.This if expThis != gotThis { t.Fatalf("expected uuid %s, got %s", expThis, gotThis) } } -// tests formatXLV3ThisEmpty conditions. -func TestFormatXLEmpty(t *testing.T) { - format := newFormatXLV3(1, 16) - formats := make([]*formatXLV3, 16) +// tests formatErasureV3ThisEmpty conditions. +func TestFormatErasureEmpty(t *testing.T) { + format := newFormatErasureV3(1, 16) + formats := make([]*formatErasureV3, 16) for j := 0; j < 16; j++ { newFormat := format.Clone() - newFormat.XL.This = format.XL.Sets[0][j] + newFormat.Erasure.This = format.Erasure.Sets[0][j] formats[j] = newFormat } @@ -139,18 +139,18 @@ func TestFormatXLEmpty(t *testing.T) { // empty should return false. formats[0] = nil - if ok := formatXLV3ThisEmpty(formats); ok { + if ok := formatErasureV3ThisEmpty(formats); ok { t.Fatalf("expected value false, got %t", ok) } - formats[2].XL.This = "" - if ok := formatXLV3ThisEmpty(formats); !ok { + formats[2].Erasure.This = "" + if ok := formatErasureV3ThisEmpty(formats); !ok { t.Fatalf("expected value true, got %t", ok) } } // Tests xl format migration. -func TestFormatXLMigrate(t *testing.T) { +func TestFormatErasureMigrate(t *testing.T) { // Get test root. rootPath, err := getTestRoot() if err != nil { @@ -158,12 +158,12 @@ func TestFormatXLMigrate(t *testing.T) { } defer os.RemoveAll(rootPath) - m := &formatXLV1{} - m.Format = formatBackendXL + m := &formatErasureV1{} + m.Format = formatBackendErasure m.Version = formatMetaVersionV1 - m.XL.Version = formatXLVersionV1 - m.XL.Disk = mustGetUUID() - m.XL.JBOD = []string{m.XL.Disk, mustGetUUID(), mustGetUUID(), mustGetUUID()} + m.Erasure.Version = formatErasureVersionV1 + m.Erasure.Disk = mustGetUUID() + m.Erasure.JBOD = []string{m.Erasure.Disk, mustGetUUID(), mustGetUUID(), mustGetUUID()} b, err := json.Marshal(m) if err != nil { @@ -178,43 +178,43 @@ func TestFormatXLMigrate(t *testing.T) { t.Fatal(err) } - if err = formatXLMigrate(rootPath); err != nil { + if err = formatErasureMigrate(rootPath); err != nil { t.Fatal(err) } - migratedVersion, err := formatGetBackendXLVersion(pathJoin(rootPath, minioMetaBucket, formatConfigFile)) + migratedVersion, err := formatGetBackendErasureVersion(pathJoin(rootPath, minioMetaBucket, formatConfigFile)) if err != nil { t.Fatal(err) } - if migratedVersion != formatXLVersionV3 { - t.Fatalf("expected version: %s, got: %s", formatXLVersionV3, migratedVersion) + if migratedVersion != formatErasureVersionV3 { + t.Fatalf("expected version: %s, got: %s", formatErasureVersionV3, migratedVersion) } b, err = ioutil.ReadFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile)) if err != nil { t.Fatal(err) } - formatV3 := &formatXLV3{} + formatV3 := &formatErasureV3{} if err = json.Unmarshal(b, formatV3); err != nil { t.Fatal(err) } - if formatV3.XL.This != m.XL.Disk { - t.Fatalf("expected disk uuid: %s, got: %s", m.XL.Disk, formatV3.XL.This) + if formatV3.Erasure.This != m.Erasure.Disk { + t.Fatalf("expected disk uuid: %s, got: %s", m.Erasure.Disk, formatV3.Erasure.This) } - if len(formatV3.XL.Sets) != 1 { - t.Fatalf("expected single set after migrating from v1 to v3, but found %d", len(formatV3.XL.Sets)) + if len(formatV3.Erasure.Sets) != 1 { + t.Fatalf("expected single set after migrating from v1 to v3, but found %d", len(formatV3.Erasure.Sets)) } - if !reflect.DeepEqual(formatV3.XL.Sets[0], m.XL.JBOD) { - t.Fatalf("expected disk uuid: %v, got: %v", m.XL.JBOD, formatV3.XL.Sets[0]) + if !reflect.DeepEqual(formatV3.Erasure.Sets[0], m.Erasure.JBOD) { + t.Fatalf("expected disk uuid: %v, got: %v", m.Erasure.JBOD, formatV3.Erasure.Sets[0]) } - m = &formatXLV1{} + m = &formatErasureV1{} m.Format = "unknown" m.Version = formatMetaVersionV1 - m.XL.Version = formatXLVersionV1 - m.XL.Disk = mustGetUUID() - m.XL.JBOD = []string{m.XL.Disk, mustGetUUID(), mustGetUUID(), mustGetUUID()} + m.Erasure.Version = formatErasureVersionV1 + m.Erasure.Disk = mustGetUUID() + m.Erasure.JBOD = []string{m.Erasure.Disk, mustGetUUID(), mustGetUUID(), mustGetUUID()} b, err = json.Marshal(m) if err != nil { @@ -225,16 +225,16 @@ func TestFormatXLMigrate(t *testing.T) { t.Fatal(err) } - if err = formatXLMigrate(rootPath); err == nil { + if err = formatErasureMigrate(rootPath); err == nil { t.Fatal("Expected to fail with unexpected backend format") } - m = &formatXLV1{} - m.Format = formatBackendXL + m = &formatErasureV1{} + m.Format = formatBackendErasure m.Version = formatMetaVersionV1 - m.XL.Version = "30" - m.XL.Disk = mustGetUUID() - m.XL.JBOD = []string{m.XL.Disk, mustGetUUID(), mustGetUUID(), mustGetUUID()} + m.Erasure.Version = "30" + m.Erasure.Disk = mustGetUUID() + m.Erasure.JBOD = []string{m.Erasure.Disk, mustGetUUID(), mustGetUUID(), mustGetUUID()} b, err = json.Marshal(m) if err != nil { @@ -245,25 +245,25 @@ func TestFormatXLMigrate(t *testing.T) { t.Fatal(err) } - if err = formatXLMigrate(rootPath); err == nil { + if err = formatErasureMigrate(rootPath); err == nil { t.Fatal("Expected to fail with unexpected backend format version number") } } // Tests check format xl value. -func TestCheckFormatXLValue(t *testing.T) { +func TestCheckFormatErasureValue(t *testing.T) { testCases := []struct { - format *formatXLV3 + format *formatErasureV3 success bool }{ - // Invalid XL format version "2". + // Invalid Erasure format version "2". { - &formatXLV3{ + &formatErasureV3{ formatMetaV1: formatMetaV1{ Version: "2", - Format: "XL", + Format: "Erasure", }, - XL: struct { + Erasure: struct { Version string `json:"version"` This string `json:"this"` Sets [][]string `json:"sets"` @@ -274,14 +274,14 @@ func TestCheckFormatXLValue(t *testing.T) { }, false, }, - // Invalid XL format "Unknown". + // Invalid Erasure format "Unknown". { - &formatXLV3{ + &formatErasureV3{ formatMetaV1: formatMetaV1{ Version: "1", Format: "Unknown", }, - XL: struct { + Erasure: struct { Version string `json:"version"` This string `json:"this"` Sets [][]string `json:"sets"` @@ -292,14 +292,14 @@ func TestCheckFormatXLValue(t *testing.T) { }, false, }, - // Invalid XL format version "0". + // Invalid Erasure format version "0". { - &formatXLV3{ + &formatErasureV3{ formatMetaV1: formatMetaV1{ Version: "1", - Format: "XL", + Format: "Erasure", }, - XL: struct { + Erasure: struct { Version string `json:"version"` This string `json:"this"` Sets [][]string `json:"sets"` @@ -314,65 +314,65 @@ func TestCheckFormatXLValue(t *testing.T) { // Valid all test cases. for i, testCase := range testCases { - if err := checkFormatXLValue(testCase.format); err != nil && testCase.success { + if err := checkFormatErasureValue(testCase.format); err != nil && testCase.success { t.Errorf("Test %d: Expected failure %s", i+1, err) } } } -// Tests getFormatXLInQuorum() -func TestGetFormatXLInQuorumCheck(t *testing.T) { +// Tests getFormatErasureInQuorum() +func TestGetFormatErasureInQuorumCheck(t *testing.T) { setCount := 2 drivesPerSet := 16 - format := newFormatXLV3(setCount, drivesPerSet) - formats := make([]*formatXLV3, 32) + format := newFormatErasureV3(setCount, drivesPerSet) + formats := make([]*formatErasureV3, 32) for i := 0; i < setCount; i++ { for j := 0; j < drivesPerSet; j++ { newFormat := format.Clone() - newFormat.XL.This = format.XL.Sets[i][j] + newFormat.Erasure.This = format.Erasure.Sets[i][j] formats[i*drivesPerSet+j] = newFormat } } // Return a format from list of formats in quorum. - quorumFormat, err := getFormatXLInQuorum(formats) + quorumFormat, err := getFormatErasureInQuorum(formats) if err != nil { t.Fatal(err) } // Check if the reference format and input formats are same. - if err = formatXLV3Check(quorumFormat, formats[0]); err != nil { + if err = formatErasureV3Check(quorumFormat, formats[0]); err != nil { t.Fatal(err) } // QuorumFormat has .This field empty on purpose, expect a failure. - if err = formatXLV3Check(formats[0], quorumFormat); err == nil { + if err = formatErasureV3Check(formats[0], quorumFormat); err == nil { t.Fatal("Unexpected success") } formats[0] = nil - quorumFormat, err = getFormatXLInQuorum(formats) + quorumFormat, err = getFormatErasureInQuorum(formats) if err != nil { t.Fatal(err) } badFormat := *quorumFormat - badFormat.XL.Sets = nil - if err = formatXLV3Check(quorumFormat, &badFormat); err == nil { + badFormat.Erasure.Sets = nil + if err = formatErasureV3Check(quorumFormat, &badFormat); err == nil { t.Fatal("Unexpected success") } badFormatUUID := *quorumFormat - badFormatUUID.XL.Sets[0][0] = "bad-uuid" - if err = formatXLV3Check(quorumFormat, &badFormatUUID); err == nil { + badFormatUUID.Erasure.Sets[0][0] = "bad-uuid" + if err = formatErasureV3Check(quorumFormat, &badFormatUUID); err == nil { t.Fatal("Unexpected success") } badFormatSetSize := *quorumFormat - badFormatSetSize.XL.Sets[0] = nil - if err = formatXLV3Check(quorumFormat, &badFormatSetSize); err == nil { + badFormatSetSize.Erasure.Sets[0] = nil + if err = formatErasureV3Check(quorumFormat, &badFormatSetSize); err == nil { t.Fatal("Unexpected success") } @@ -381,36 +381,36 @@ func TestGetFormatXLInQuorumCheck(t *testing.T) { formats[i] = nil } } - if _, err = getFormatXLInQuorum(formats); err == nil { + if _, err = getFormatErasureInQuorum(formats); err == nil { t.Fatal("Unexpected success") } } -// Tests formatXLGetDeploymentID() -func TestGetXLID(t *testing.T) { +// Tests formatErasureGetDeploymentID() +func TestGetErasureID(t *testing.T) { setCount := 2 drivesPerSet := 8 - format := newFormatXLV3(setCount, drivesPerSet) - formats := make([]*formatXLV3, 16) + format := newFormatErasureV3(setCount, drivesPerSet) + formats := make([]*formatErasureV3, 16) for i := 0; i < setCount; i++ { for j := 0; j < drivesPerSet; j++ { newFormat := format.Clone() - newFormat.XL.This = format.XL.Sets[i][j] + newFormat.Erasure.This = format.Erasure.Sets[i][j] formats[i*drivesPerSet+j] = newFormat } } // Return a format from list of formats in quorum. - quorumFormat, err := getFormatXLInQuorum(formats) + quorumFormat, err := getFormatErasureInQuorum(formats) if err != nil { t.Fatal(err) } // Check if the reference format and input formats are same. var id string - if id, err = formatXLGetDeploymentID(quorumFormat, formats); err != nil { + if id, err = formatErasureGetDeploymentID(quorumFormat, formats); err != nil { t.Fatal(err) } @@ -419,15 +419,15 @@ func TestGetXLID(t *testing.T) { } formats[0] = nil - if id, err = formatXLGetDeploymentID(quorumFormat, formats); err != nil { + if id, err = formatErasureGetDeploymentID(quorumFormat, formats); err != nil { t.Fatal(err) } if id == "" { t.Fatal("ID cannot be empty.") } - formats[1].XL.Sets[0][0] = "bad-uuid" - if id, err = formatXLGetDeploymentID(quorumFormat, formats); err != nil { + formats[1].Erasure.Sets[0][0] = "bad-uuid" + if id, err = formatErasureGetDeploymentID(quorumFormat, formats); err != nil { t.Fatal(err) } @@ -436,7 +436,7 @@ func TestGetXLID(t *testing.T) { } formats[2].ID = "bad-id" - if _, err = formatXLGetDeploymentID(quorumFormat, formats); err != errCorruptedFormat { + if _, err = formatErasureGetDeploymentID(quorumFormat, formats); err != errCorruptedFormat { t.Fatal("Unexpected Success") } } @@ -446,19 +446,19 @@ func TestNewFormatSets(t *testing.T) { setCount := 2 drivesPerSet := 16 - format := newFormatXLV3(setCount, drivesPerSet) - formats := make([]*formatXLV3, 32) + format := newFormatErasureV3(setCount, drivesPerSet) + formats := make([]*formatErasureV3, 32) errs := make([]error, 32) for i := 0; i < setCount; i++ { for j := 0; j < drivesPerSet; j++ { newFormat := format.Clone() - newFormat.XL.This = format.XL.Sets[i][j] + newFormat.Erasure.This = format.Erasure.Sets[i][j] formats[i*drivesPerSet+j] = newFormat } } - quorumFormat, err := getFormatXLInQuorum(formats) + quorumFormat, err := getFormatErasureInQuorum(formats) if err != nil { t.Fatal(err) } diff --git a/cmd/format-fs.go b/cmd/format-fs.go index 1fab6dc62..13faff731 100644 --- a/cmd/format-fs.go +++ b/cmd/format-fs.go @@ -75,7 +75,7 @@ func newFormatFSV1() (format *formatFSV1) { } // Returns the field formatMetaV1.Format i.e the string "fs" which is never likely to change. -// We do not use this function in XL to get the format as the file is not fcntl-locked on XL. +// We do not use this function in Erasure to get the format as the file is not fcntl-locked on Erasure. func formatMetaGetFormatBackendFS(r io.ReadSeeker) (string, error) { format := &formatMetaV1{} if err := jsonLoad(r, format); err != nil { diff --git a/cmd/fs-v1-helpers.go b/cmd/fs-v1-helpers.go index b312a5a92..9aed0a29c 100644 --- a/cmd/fs-v1-helpers.go +++ b/cmd/fs-v1-helpers.go @@ -42,7 +42,7 @@ func fsRemoveFile(ctx context.Context, filePath string) (err error) { } if err = os.Remove((filePath)); err != nil { - err = osErrToFSFileErr(err) + err = osErrToFileErr(err) if err != errFileNotFound { logger.LogIf(ctx, err) } @@ -186,37 +186,11 @@ func fsStatVolume(ctx context.Context, volume string) (os.FileInfo, error) { return fi, nil } -// Is a one place function which converts all os.PathError -// into a more FS object layer friendly form, converts -// known errors into their typed form for top level -// interpretation. -func osErrToFSFileErr(err error) error { - if err == nil { - return nil - } - if os.IsNotExist(err) { - return errFileNotFound - } - if os.IsPermission(err) { - return errFileAccessDenied - } - if isSysErrNotDir(err) { - return errFileNotFound - } - if isSysErrPathNotFound(err) { - return errFileNotFound - } - if isSysErrTooManyFiles(err) { - return errTooManyOpenFiles - } - return err -} - // Lookup if directory exists, returns directory attributes upon success. func fsStatDir(ctx context.Context, statDir string) (os.FileInfo, error) { fi, err := fsStat(ctx, statDir) if err != nil { - err = osErrToFSFileErr(err) + err = osErrToFileErr(err) if err != errFileNotFound { logger.LogIf(ctx, err) } @@ -232,7 +206,7 @@ func fsStatDir(ctx context.Context, statDir string) (os.FileInfo, error) { func fsStatFile(ctx context.Context, statFile string) (os.FileInfo, error) { fi, err := fsStat(ctx, statFile) if err != nil { - err = osErrToFSFileErr(err) + err = osErrToFileErr(err) if err != errFileNotFound { logger.LogIf(ctx, err) } @@ -267,13 +241,13 @@ func fsOpenFile(ctx context.Context, readPath string, offset int64) (io.ReadClos fr, err := os.Open(readPath) if err != nil { - return nil, 0, osErrToFSFileErr(err) + return nil, 0, osErrToFileErr(err) } // Stat to get the size of the file at path. st, err := fr.Stat() if err != nil { - err = osErrToFSFileErr(err) + err = osErrToFileErr(err) if err != errFileNotFound { logger.LogIf(ctx, err) } @@ -327,7 +301,7 @@ func fsCreateFile(ctx context.Context, filePath string, reader io.Reader, buf [] } writer, err := lock.Open(filePath, flags, 0666) if err != nil { - return 0, osErrToFSFileErr(err) + return 0, osErrToFileErr(err) } defer writer.Close() @@ -399,7 +373,7 @@ func fsSimpleRenameFile(ctx context.Context, sourcePath, destPath string) error if err := os.Rename(sourcePath, destPath); err != nil { logger.LogIf(ctx, err) - return osErrToFSFileErr(err) + return osErrToFileErr(err) } return nil diff --git a/cmd/fs-v1-helpers_test.go b/cmd/fs-v1-helpers_test.go index 972616ce2..ed0d906f7 100644 --- a/cmd/fs-v1-helpers_test.go +++ b/cmd/fs-v1-helpers_test.go @@ -28,10 +28,10 @@ import ( ) func TestFSRenameFile(t *testing.T) { - // create posix test setup - _, path, err := newPosixTestSetup() + // create xlStorage test setup + _, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) @@ -53,10 +53,10 @@ func TestFSRenameFile(t *testing.T) { } func TestFSStats(t *testing.T) { - // create posix test setup - _, path, err := newPosixTestSetup() + // create xlStorage test setup + _, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) @@ -170,11 +170,11 @@ func TestFSStats(t *testing.T) { if testCase.srcPath != "" { if _, err := fsStatFile(GlobalContext, pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); err != testCase.expectedErr { - t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + t.Fatalf("TestErasureStorage case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } } else { if _, err := fsStatVolume(GlobalContext, pathJoin(testCase.srcFSPath, testCase.srcVol)); err != testCase.expectedErr { - t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + t.Fatalf("TestFS case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } } } @@ -182,9 +182,9 @@ func TestFSStats(t *testing.T) { func TestFSCreateAndOpen(t *testing.T) { // Setup test environment. - _, path, err := newPosixTestSetup() + _, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) @@ -246,10 +246,10 @@ func TestFSCreateAndOpen(t *testing.T) { } func TestFSDeletes(t *testing.T) { - // create posix test setup - _, path, err := newPosixTestSetup() + // create xlStorage test setup + _, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) @@ -349,10 +349,10 @@ func TestFSDeletes(t *testing.T) { } func BenchmarkFSDeleteFile(b *testing.B) { - // create posix test setup - _, path, err := newPosixTestSetup() + // create xlStorage test setup + _, path, err := newXLStorageTestSetup() if err != nil { - b.Fatalf("Unable to create posix test setup, %s", err) + b.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) @@ -383,10 +383,10 @@ func BenchmarkFSDeleteFile(b *testing.B) { // Tests fs removes. func TestFSRemoves(t *testing.T) { - // create posix test setup - _, path, err := newPosixTestSetup() + // create xlStorage test setup + _, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) @@ -500,10 +500,10 @@ func TestFSRemoves(t *testing.T) { } func TestFSRemoveMeta(t *testing.T) { - // create posix test setup - _, fsPath, err := newPosixTestSetup() + // create xlStorage test setup + _, fsPath, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(fsPath) diff --git a/cmd/fs-v1-metadata_test.go b/cmd/fs-v1-metadata_test.go index 1d8cb7ba5..71c9a2fa2 100644 --- a/cmd/fs-v1-metadata_test.go +++ b/cmd/fs-v1-metadata_test.go @@ -31,7 +31,7 @@ func TestFSV1MetadataObjInfo(t *testing.T) { if objInfo.Size != 0 { t.Fatal("Unexpected object info value for Size", objInfo.Size) } - if objInfo.ModTime != timeSentinel { + if !objInfo.ModTime.Equal(timeSentinel) { t.Fatal("Unexpected object info value for ModTime ", objInfo.ModTime) } if objInfo.IsDir { @@ -53,7 +53,7 @@ func TestReadFSMetadata(t *testing.T) { bucketName := "bucket" objectName := "object" - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal("Unexpected err: ", err) } if _, err := obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}); err != nil { @@ -88,7 +88,7 @@ func TestWriteFSMetadata(t *testing.T) { bucketName := "bucket" objectName := "object" - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal("Unexpected err: ", err) } if _, err := obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}); err != nil { diff --git a/cmd/fs-v1-multipart.go b/cmd/fs-v1-multipart.go index 9cf8c3755..37b78a82e 100644 --- a/cmd/fs-v1-multipart.go +++ b/cmd/fs-v1-multipart.go @@ -252,6 +252,14 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) { + if srcOpts.VersionID != "" && srcOpts.VersionID != nullVersionID { + return pi, VersionNotFound{ + Bucket: srcBucket, + Object: srcObject, + VersionID: srcOpts.VersionID, + } + } + if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, fs); err != nil { return pi, toObjectErr(err) } @@ -269,6 +277,14 @@ func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, d // written to '.minio.sys/tmp' location and safely renamed to // '.minio.sys/multipart' for reach parts. func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, e error) { + if opts.VersionID != "" && opts.VersionID != nullVersionID { + return pi, VersionNotFound{ + Bucket: bucket, + Object: object, + VersionID: opts.VersionID, + } + } + data := r.Reader if err := checkPutObjectPartArgs(ctx, bucket, object, fs); err != nil { return pi, toObjectErr(err, bucket) diff --git a/cmd/fs-v1-multipart_test.go b/cmd/fs-v1-multipart_test.go index 91d947f55..bb1a662f7 100644 --- a/cmd/fs-v1-multipart_test.go +++ b/cmd/fs-v1-multipart_test.go @@ -40,7 +40,7 @@ func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) { // Create a context we can cancel. ctx, cancel := context.WithCancel(GlobalContext) - obj.MakeBucketWithLocation(ctx, bucketName, "", false) + obj.MakeBucketWithLocation(ctx, bucketName, BucketOptions{}) uploadID, err := obj.NewMultipartUpload(ctx, bucketName, objectName, ObjectOptions{}) if err != nil { @@ -81,7 +81,7 @@ func TestNewMultipartUploadFaultyDisk(t *testing.T) { bucketName := "bucket" objectName := "object" - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal("Cannot create bucket, err: ", err) } @@ -106,7 +106,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) { data := []byte("12345") dataLen := int64(len(data)) - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal("Cannot create bucket, err: ", err) } @@ -139,7 +139,7 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) { objectName := "object" data := []byte("12345") - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal("Cannot create bucket, err: ", err) } @@ -172,7 +172,7 @@ func TestCompleteMultipartUpload(t *testing.T) { objectName := "object" data := []byte("12345") - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal("Cannot create bucket, err: ", err) } @@ -204,7 +204,7 @@ func TestAbortMultipartUpload(t *testing.T) { objectName := "object" data := []byte("12345") - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal("Cannot create bucket, err: ", err) } @@ -235,7 +235,7 @@ func TestListMultipartUploadsFaultyDisk(t *testing.T) { bucketName := "bucket" objectName := "object" - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal("Cannot create bucket, err: ", err) } diff --git a/cmd/fs-v1-rwpool_test.go b/cmd/fs-v1-rwpool_test.go index f648ab852..5788f6c72 100644 --- a/cmd/fs-v1-rwpool_test.go +++ b/cmd/fs-v1-rwpool_test.go @@ -46,10 +46,10 @@ func TestRWPoolLongPath(t *testing.T) { // Tests all RWPool methods. func TestRWPool(t *testing.T) { - // create posix test setup - _, path, err := newPosixTestSetup() + // create xlStorage test setup + _, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index 07c385ec2..784a623cf 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -346,7 +346,7 @@ func (fs *FSObjects) crawlBucket(ctx context.Context, bucket string, cache dataU } oi := fsMeta.ToObjectInfo(bucket, object, fi) - sz := item.applyActions(ctx, fs, actionMeta{oi: oi, meta: fsMeta.Meta}) + sz := item.applyActions(ctx, fs, actionMeta{oi: oi}) if sz >= 0 { return sz, nil } @@ -382,10 +382,9 @@ func (fs *FSObjects) statBucketDir(ctx context.Context, bucket string) (os.FileI return st, nil } -// MakeBucketWithLocation - create a new bucket, returns if it -// already exists. -func (fs *FSObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { - if lockEnabled { +// MakeBucketWithLocation - create a new bucket, returns if it already exists. +func (fs *FSObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error { + if opts.LockEnabled || opts.VersioningEnabled { return NotImplemented{} } @@ -581,6 +580,14 @@ func (fs *FSObjects) DeleteBucket(ctx context.Context, bucket string, forceDelet // if source object and destination object are same we only // update metadata. func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, e error) { + if srcOpts.VersionID != "" && srcOpts.VersionID != nullVersionID { + return oi, VersionNotFound{ + Bucket: srcBucket, + Object: srcObject, + VersionID: srcOpts.VersionID, + } + } + cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject)) defer ObjectPathUpdated(path.Join(dstBucket, dstObject)) @@ -649,6 +656,13 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu // GetObjectNInfo - returns object info and a reader for object // content. func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { + if opts.VersionID != "" && opts.VersionID != nullVersionID { + return nil, VersionNotFound{ + Bucket: bucket, + Object: object, + VersionID: opts.VersionID, + } + } if err = checkGetObjArgs(ctx, bucket, object); err != nil { return nil, err } @@ -746,6 +760,14 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string, // startOffset indicates the starting read location of the object. // length indicates the total length of the object. func (fs *FSObjects) GetObject(ctx context.Context, bucket, object string, offset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) { + if opts.VersionID != "" && opts.VersionID != nullVersionID { + return VersionNotFound{ + Bucket: bucket, + Object: object, + VersionID: opts.VersionID, + } + } + if err = checkGetObjArgs(ctx, bucket, object); err != nil { return err } @@ -948,6 +970,13 @@ func (fs *FSObjects) getObjectInfoWithLock(ctx context.Context, bucket, object s // GetObjectInfo - reads object metadata and replies back ObjectInfo. func (fs *FSObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (oi ObjectInfo, e error) { + if opts.VersionID != "" && opts.VersionID != nullVersionID { + return oi, VersionNotFound{ + Bucket: bucket, + Object: object, + VersionID: opts.VersionID, + } + } atomic.AddInt64(&fs.activeIOCount, 1) defer func() { @@ -998,6 +1027,10 @@ func (fs *FSObjects) parentDirIsObject(ctx context.Context, bucket, parent strin // Additionally writes `fs.json` which carries the necessary metadata // for future object operations. func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, retErr error) { + if opts.Versioned { + return objInfo, NotImplemented{} + } + if err := checkPutObjectArgs(ctx, bucket, object, fs, r.Size()); err != nil { return ObjectInfo{}, err } @@ -1146,26 +1179,45 @@ func (fs *FSObjects) putObject(ctx context.Context, bucket string, object string // DeleteObjects - deletes an object from a bucket, this operation is destructive // and there are no rollbacks supported. -func (fs *FSObjects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { +func (fs *FSObjects) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) { errs := make([]error, len(objects)) + dobjects := make([]DeletedObject, len(objects)) for idx, object := range objects { - errs[idx] = fs.DeleteObject(ctx, bucket, object) + if object.VersionID != "" { + errs[idx] = NotImplemented{} + continue + } + _, errs[idx] = fs.DeleteObject(ctx, bucket, object.ObjectName, opts) + if errs[idx] == nil || isErrObjectNotFound(errs[idx]) { + dobjects[idx] = DeletedObject{ + ObjectName: object.ObjectName, + } + errs[idx] = nil + } } - return errs, nil + return dobjects, errs } // DeleteObject - deletes an object from a bucket, this operation is destructive // and there are no rollbacks supported. -func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string) error { +func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { + if opts.VersionID != "" && opts.VersionID != nullVersionID { + return objInfo, VersionNotFound{ + Bucket: bucket, + Object: object, + VersionID: opts.VersionID, + } + } + // Acquire a write lock before deleting the object. lk := fs.NewNSLock(ctx, bucket, object) - if err := lk.GetLock(globalOperationTimeout); err != nil { - return err + if err = lk.GetLock(globalOperationTimeout); err != nil { + return objInfo, err } defer lk.Unlock() - if err := checkDelObjArgs(ctx, bucket, object); err != nil { - return err + if err = checkDelObjArgs(ctx, bucket, object); err != nil { + return objInfo, err } defer ObjectPathUpdated(path.Join(bucket, object)) @@ -1175,8 +1227,8 @@ func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string) er atomic.AddInt64(&fs.activeIOCount, -1) }() - if _, err := fs.statBucketDir(ctx, bucket); err != nil { - return toObjectErr(err, bucket) + if _, err = fs.statBucketDir(ctx, bucket); err != nil { + return objInfo, toObjectErr(err, bucket) } minioMetaBucketDir := pathJoin(fs.fsPath, minioMetaBucket) @@ -1189,23 +1241,23 @@ func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string) er } if lerr != nil && lerr != errFileNotFound { logger.LogIf(ctx, lerr) - return toObjectErr(lerr, bucket, object) + return objInfo, toObjectErr(lerr, bucket, object) } } // Delete the object. - if err := fsDeleteFile(ctx, pathJoin(fs.fsPath, bucket), pathJoin(fs.fsPath, bucket, object)); err != nil { - return toObjectErr(err, bucket, object) + if err = fsDeleteFile(ctx, pathJoin(fs.fsPath, bucket), pathJoin(fs.fsPath, bucket, object)); err != nil { + return objInfo, toObjectErr(err, bucket, object) } if bucket != minioMetaBucket { // Delete the metadata object. - err := fsDeleteFile(ctx, minioMetaBucketDir, fsMetaPath) + err = fsDeleteFile(ctx, minioMetaBucketDir, fsMetaPath) if err != nil && err != errFileNotFound { - return toObjectErr(err, bucket, object) + return objInfo, toObjectErr(err, bucket, object) } } - return nil + return ObjectInfo{Bucket: bucket, Name: object}, nil } // Returns function "listDir" of the type listDirFunc. @@ -1313,6 +1365,11 @@ func (fs *FSObjects) getObjectETag(ctx context.Context, bucket, entry string, lo return extractETag(fsMeta.Meta), nil } +// ListObjectVersions not implemented for FS mode. +func (fs *FSObjects) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (loi ListObjectVersionsInfo, e error) { + return loi, NotImplemented{} +} + // ListObjects - list all objects at prefix upto maxKeys., optionally delimited by '/'. Maintains the list pool // state for future re-entrant list requests. func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { @@ -1327,7 +1384,14 @@ func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, de } // GetObjectTags - get object tags from an existing object -func (fs *FSObjects) GetObjectTags(ctx context.Context, bucket, object string) (*tags.Tags, error) { +func (fs *FSObjects) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) { + if opts.VersionID != "" && opts.VersionID != nullVersionID { + return nil, VersionNotFound{ + Bucket: bucket, + Object: object, + VersionID: opts.VersionID, + } + } oi, err := fs.GetObjectInfo(ctx, bucket, object, ObjectOptions{}) if err != nil { return nil, err @@ -1337,7 +1401,15 @@ func (fs *FSObjects) GetObjectTags(ctx context.Context, bucket, object string) ( } // PutObjectTags - replace or add tags to an existing object -func (fs *FSObjects) PutObjectTags(ctx context.Context, bucket, object string, tags string) error { +func (fs *FSObjects) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error { + if opts.VersionID != "" && opts.VersionID != nullVersionID { + return VersionNotFound{ + Bucket: bucket, + Object: object, + VersionID: opts.VersionID, + } + } + fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile) fsMeta := fsMetaV1{} wlk, err := fs.rwPool.Write(fsMetaPath) @@ -1369,30 +1441,30 @@ func (fs *FSObjects) PutObjectTags(ctx context.Context, bucket, object string, t } // DeleteObjectTags - delete object tags from an existing object -func (fs *FSObjects) DeleteObjectTags(ctx context.Context, bucket, object string) error { - return fs.PutObjectTags(ctx, bucket, object, "") +func (fs *FSObjects) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) error { + return fs.PutObjectTags(ctx, bucket, object, "", opts) } -// ReloadFormat - no-op for fs, Valid only for XL. +// ReloadFormat - no-op for fs, Valid only for Erasure. func (fs *FSObjects) ReloadFormat(ctx context.Context, dryRun bool) error { logger.LogIf(ctx, NotImplemented{}) return NotImplemented{} } -// HealFormat - no-op for fs, Valid only for XL. +// HealFormat - no-op for fs, Valid only for Erasure. func (fs *FSObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) { logger.LogIf(ctx, NotImplemented{}) return madmin.HealResultItem{}, NotImplemented{} } -// HealObject - no-op for fs. Valid only for XL. -func (fs *FSObjects) HealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) ( +// HealObject - no-op for fs. Valid only for Erasure. +func (fs *FSObjects) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) ( res madmin.HealResultItem, err error) { logger.LogIf(ctx, NotImplemented{}) return res, NotImplemented{} } -// HealBucket - no-op for fs, Valid only for XL. +// HealBucket - no-op for fs, Valid only for Erasure. func (fs *FSObjects) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (madmin.HealResultItem, error) { logger.LogIf(ctx, NotImplemented{}) @@ -1408,13 +1480,13 @@ func (fs *FSObjects) Walk(ctx context.Context, bucket, prefix string, results ch return fsWalk(ctx, fs, bucket, prefix, fs.listDirFactory(), results, fs.getObjectInfo, fs.getObjectInfo) } -// HealObjects - no-op for fs. Valid only for XL. -func (fs *FSObjects) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn healObjectFn) (e error) { +// HealObjects - no-op for fs. Valid only for Erasure. +func (fs *FSObjects) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn HealObjectFn) (e error) { logger.LogIf(ctx, NotImplemented{}) return NotImplemented{} } -// ListBucketsHeal - list all buckets to be healed. Valid only for XL +// ListBucketsHeal - list all buckets to be healed. Valid only for Erasure func (fs *FSObjects) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { logger.LogIf(ctx, NotImplemented{}) return []BucketInfo{}, NotImplemented{} diff --git a/cmd/fs-v1_test.go b/cmd/fs-v1_test.go index 62da095fc..ae55fb7ac 100644 --- a/cmd/fs-v1_test.go +++ b/cmd/fs-v1_test.go @@ -36,7 +36,7 @@ func TestFSParentDirIsObject(t *testing.T) { bucketName := "testbucket" objectName := "object" - if err = obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err = obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal(err) } objectContent := "12345" @@ -124,7 +124,7 @@ func TestFSShutdown(t *testing.T) { fs := obj.(*FSObjects) objectContent := "12345" - obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false) + obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}) obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), ObjectOptions{}) return fs, disk } @@ -138,7 +138,7 @@ func TestFSShutdown(t *testing.T) { // Test Shutdown with faulty disk fs, disk = prepareTest() - fs.DeleteObject(GlobalContext, bucketName, objectName) + fs.DeleteObject(GlobalContext, bucketName, objectName, ObjectOptions{}) os.RemoveAll(disk) if err := fs.Shutdown(GlobalContext); err != nil { t.Fatal("Got unexpected fs shutdown error: ", err) @@ -155,12 +155,12 @@ func TestFSGetBucketInfo(t *testing.T) { fs := obj.(*FSObjects) bucketName := "bucket" - err := obj.MakeBucketWithLocation(GlobalContext, "a", "", false) + err := obj.MakeBucketWithLocation(GlobalContext, "a", BucketOptions{}) if !isSameType(err, BucketNameInvalid{}) { t.Fatal("BucketNameInvalid error not returned") } - err = obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false) + err = obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}) if err != nil { t.Fatal(err) } @@ -199,7 +199,7 @@ func TestFSPutObject(t *testing.T) { bucketName := "bucket" objectName := "1/2/3/4/object" - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal(err) } @@ -267,33 +267,33 @@ func TestFSDeleteObject(t *testing.T) { bucketName := "bucket" objectName := "object" - obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false) + obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}) obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}) // Test with invalid bucket name - if err := fs.DeleteObject(GlobalContext, "fo", objectName); !isSameType(err, BucketNameInvalid{}) { + if _, err := fs.DeleteObject(GlobalContext, "fo", objectName, ObjectOptions{}); !isSameType(err, BucketNameInvalid{}) { t.Fatal("Unexpected error: ", err) } // Test with bucket does not exist - if err := fs.DeleteObject(GlobalContext, "foobucket", "fooobject"); !isSameType(err, BucketNotFound{}) { + if _, err := fs.DeleteObject(GlobalContext, "foobucket", "fooobject", ObjectOptions{}); !isSameType(err, BucketNotFound{}) { t.Fatal("Unexpected error: ", err) } // Test with invalid object name - if err := fs.DeleteObject(GlobalContext, bucketName, "\\"); !(isSameType(err, ObjectNotFound{}) || isSameType(err, ObjectNameInvalid{})) { + if _, err := fs.DeleteObject(GlobalContext, bucketName, "\\", ObjectOptions{}); !(isSameType(err, ObjectNotFound{}) || isSameType(err, ObjectNameInvalid{})) { t.Fatal("Unexpected error: ", err) } // Test with object does not exist. - if err := fs.DeleteObject(GlobalContext, bucketName, "foooobject"); !isSameType(err, ObjectNotFound{}) { + if _, err := fs.DeleteObject(GlobalContext, bucketName, "foooobject", ObjectOptions{}); !isSameType(err, ObjectNotFound{}) { t.Fatal("Unexpected error: ", err) } // Test with valid condition - if err := fs.DeleteObject(GlobalContext, bucketName, objectName); err != nil { + if _, err := fs.DeleteObject(GlobalContext, bucketName, objectName, ObjectOptions{}); err != nil { t.Fatal("Unexpected error: ", err) } // Delete object should err disk not found. os.RemoveAll(disk) - if err := fs.DeleteObject(GlobalContext, bucketName, objectName); err != nil { + if _, err := fs.DeleteObject(GlobalContext, bucketName, objectName, ObjectOptions{}); err != nil { if !isSameType(err, BucketNotFound{}) { t.Fatal("Unexpected error: ", err) } @@ -311,7 +311,7 @@ func TestFSDeleteBucket(t *testing.T) { fs := obj.(*FSObjects) bucketName := "bucket" - err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false) + err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}) if err != nil { t.Fatal("Unexpected error: ", err) } @@ -330,7 +330,7 @@ func TestFSDeleteBucket(t *testing.T) { t.Fatal("Unexpected error: ", err) } - obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false) + obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}) // Delete bucket should get error disk not found. os.RemoveAll(disk) @@ -351,7 +351,7 @@ func TestFSListBuckets(t *testing.T) { fs := obj.(*FSObjects) bucketName := "bucket" - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { + if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { t.Fatal("Unexpected error: ", err) } @@ -389,7 +389,7 @@ func TestFSHealObject(t *testing.T) { defer os.RemoveAll(disk) obj := initFSObjects(disk, t) - _, err := obj.HealObject(GlobalContext, "bucket", "object", madmin.HealOpts{}) + _, err := obj.HealObject(GlobalContext, "bucket", "object", "", madmin.HealOpts{}) if err == nil || !isSameType(err, NotImplemented{}) { t.Fatalf("Heal Object should return NotImplemented error ") } diff --git a/cmd/gateway-common.go b/cmd/gateway-common.go index 0c31952f3..fbbc1a89b 100644 --- a/cmd/gateway-common.go +++ b/cmd/gateway-common.go @@ -55,42 +55,6 @@ var ( IsStringEqual = isStringEqual ) -// StatInfo - alias for statInfo -type StatInfo struct { - statInfo -} - -// AnonErrToObjectErr - converts standard http codes into meaningful object layer errors. -func AnonErrToObjectErr(statusCode int, params ...string) error { - bucket := "" - object := "" - if len(params) >= 1 { - bucket = params[0] - } - if len(params) == 2 { - object = params[1] - } - - switch statusCode { - case http.StatusNotFound: - if object != "" { - return ObjectNotFound{bucket, object} - } - return BucketNotFound{Bucket: bucket} - case http.StatusBadRequest: - if object != "" { - return ObjectNameInvalid{bucket, object} - } - return BucketNameInvalid{Bucket: bucket} - case http.StatusForbidden: - fallthrough - case http.StatusUnauthorized: - return AllAccessDisabled{bucket, object} - } - - return errUnexpected -} - // FromMinioClientMetadata converts minio metadata to map[string]string func FromMinioClientMetadata(metadata map[string][]string) map[string]string { mm := map[string]string{} diff --git a/cmd/gateway-unsupported.go b/cmd/gateway-unsupported.go index 927a83c89..9823fec26 100644 --- a/cmd/gateway-unsupported.go +++ b/cmd/gateway-unsupported.go @@ -26,6 +26,7 @@ import ( bucketsse "github.com/minio/minio/pkg/bucket/encryption" "github.com/minio/minio/pkg/bucket/lifecycle" "github.com/minio/minio/pkg/bucket/policy" + "github.com/minio/minio/pkg/bucket/versioning" "github.com/minio/minio/pkg/madmin" ) @@ -88,6 +89,12 @@ func (a GatewayUnsupported) GetMultipartInfo(ctx context.Context, bucket string, return MultipartInfo{}, NotImplemented{} } +// ListObjectVersions returns all object parts for specified object in specified bucket +func (a GatewayUnsupported) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (ListObjectVersionsInfo, error) { + logger.LogIf(ctx, NotImplemented{}) + return ListObjectVersionsInfo{}, NotImplemented{} +} + // ListObjectParts returns all object parts for specified object in specified bucket func (a GatewayUnsupported) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (lpi ListPartsInfo, err error) { logger.LogIf(ctx, NotImplemented{}) @@ -121,33 +128,45 @@ func (a GatewayUnsupported) DeleteBucketPolicy(ctx context.Context, bucket strin return NotImplemented{} } -// SetBucketLifecycle sets lifecycle on bucket +// SetBucketVersioning enables versioning on a bucket. +func (a GatewayUnsupported) SetBucketVersioning(ctx context.Context, bucket string, v *versioning.Versioning) error { + logger.LogIf(ctx, NotImplemented{}) + return NotImplemented{} +} + +// GetBucketVersioning retrieves versioning configuration of a bucket. +func (a GatewayUnsupported) GetBucketVersioning(ctx context.Context, bucket string) (*versioning.Versioning, error) { + logger.LogIf(ctx, NotImplemented{}) + return nil, NotImplemented{} +} + +// SetBucketLifecycle enables lifecycle policies on a bucket. func (a GatewayUnsupported) SetBucketLifecycle(ctx context.Context, bucket string, lifecycle *lifecycle.Lifecycle) error { logger.LogIf(ctx, NotImplemented{}) return NotImplemented{} } -// GetBucketLifecycle will get lifecycle on bucket +// GetBucketLifecycle retrieves lifecycle configuration of a bucket. func (a GatewayUnsupported) GetBucketLifecycle(ctx context.Context, bucket string) (*lifecycle.Lifecycle, error) { return nil, NotImplemented{} } -// DeleteBucketLifecycle deletes all lifecycle on bucket +// DeleteBucketLifecycle deletes all lifecycle policies on a bucket func (a GatewayUnsupported) DeleteBucketLifecycle(ctx context.Context, bucket string) error { return NotImplemented{} } -// GetBucketSSEConfig returns bucket encryption config on given bucket +// GetBucketSSEConfig returns bucket encryption config on a bucket func (a GatewayUnsupported) GetBucketSSEConfig(ctx context.Context, bucket string) (*bucketsse.BucketSSEConfig, error) { return nil, NotImplemented{} } -// SetBucketSSEConfig sets bucket encryption config on given bucket +// SetBucketSSEConfig sets bucket encryption config on a bucket func (a GatewayUnsupported) SetBucketSSEConfig(ctx context.Context, bucket string, config *bucketsse.BucketSSEConfig) error { return NotImplemented{} } -// DeleteBucketSSEConfig deletes bucket encryption config on given bucket +// DeleteBucketSSEConfig deletes bucket encryption config on a bucket func (a GatewayUnsupported) DeleteBucketSSEConfig(ctx context.Context, bucket string) error { return NotImplemented{} } @@ -173,7 +192,7 @@ func (a GatewayUnsupported) ListBucketsHeal(ctx context.Context) (buckets []Buck } // HealObject - Not implemented stub -func (a GatewayUnsupported) HealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) (h madmin.HealResultItem, e error) { +func (a GatewayUnsupported) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (h madmin.HealResultItem, e error) { return h, NotImplemented{} } @@ -188,7 +207,7 @@ func (a GatewayUnsupported) Walk(ctx context.Context, bucket, prefix string, res } // HealObjects - Not implemented stub -func (a GatewayUnsupported) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn healObjectFn) (e error) { +func (a GatewayUnsupported) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn HealObjectFn) (e error) { return NotImplemented{} } @@ -205,19 +224,19 @@ func (a GatewayUnsupported) GetMetrics(ctx context.Context) (*Metrics, error) { } // PutObjectTags - not implemented. -func (a GatewayUnsupported) PutObjectTags(ctx context.Context, bucket, object string, tags string) error { +func (a GatewayUnsupported) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error { logger.LogIf(ctx, NotImplemented{}) return NotImplemented{} } // GetObjectTags - not implemented. -func (a GatewayUnsupported) GetObjectTags(ctx context.Context, bucket, object string) (*tags.Tags, error) { +func (a GatewayUnsupported) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) { logger.LogIf(ctx, NotImplemented{}) return nil, NotImplemented{} } // DeleteObjectTags - not implemented. -func (a GatewayUnsupported) DeleteObjectTags(ctx context.Context, bucket, object string) error { +func (a GatewayUnsupported) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) error { logger.LogIf(ctx, NotImplemented{}) return NotImplemented{} } diff --git a/cmd/gateway/azure/gateway-azure.go b/cmd/gateway/azure/gateway-azure.go index b2ebf49c1..d5534a753 100644 --- a/cmd/gateway/azure/gateway-azure.go +++ b/cmd/gateway/azure/gateway-azure.go @@ -553,8 +553,8 @@ func (a *azureObjects) StorageInfo(ctx context.Context, _ bool) (si minio.Storag } // MakeBucketWithLocation - Create a new container on azure backend. -func (a *azureObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { - if lockEnabled { +func (a *azureObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts minio.BucketOptions) error { + if opts.LockEnabled || opts.VersioningEnabled { return minio.NotImplemented{} } @@ -966,21 +966,30 @@ func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, des // DeleteObject - Deletes a blob on azure container, uses Azure // equivalent `BlobURL.Delete`. -func (a *azureObjects) DeleteObject(ctx context.Context, bucket, object string) error { +func (a *azureObjects) DeleteObject(ctx context.Context, bucket, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) { blob := a.client.NewContainerURL(bucket).NewBlobURL(object) _, err := blob.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) if err != nil { - return azureToObjectError(err, bucket, object) + return minio.ObjectInfo{}, azureToObjectError(err, bucket, object) } - return nil + return minio.ObjectInfo{ + Bucket: bucket, + Name: object, + }, nil } -func (a *azureObjects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { +func (a *azureObjects) DeleteObjects(ctx context.Context, bucket string, objects []minio.ObjectToDelete, opts minio.ObjectOptions) ([]minio.DeletedObject, []error) { errs := make([]error, len(objects)) + dobjects := make([]minio.DeletedObject, len(objects)) for idx, object := range objects { - errs[idx] = a.DeleteObject(ctx, bucket, object) + _, errs[idx] = a.DeleteObject(ctx, bucket, object.ObjectName, opts) + if errs[idx] == nil { + dobjects[idx] = minio.DeletedObject{ + ObjectName: object.ObjectName, + } + } } - return errs, nil + return dobjects, errs } // ListMultipartUploads - It's decided not to support List Multipart Uploads, hence returning empty result. diff --git a/cmd/gateway/azure/gateway-azure_test.go b/cmd/gateway/azure/gateway-azure_test.go index 449aa7d63..3fd55e2df 100644 --- a/cmd/gateway/azure/gateway-azure_test.go +++ b/cmd/gateway/azure/gateway-azure_test.go @@ -243,43 +243,6 @@ func TestAzureCodesToObjectError(t *testing.T) { } } -func TestAnonErrToObjectErr(t *testing.T) { - testCases := []struct { - name string - statusCode int - params []string - wantErr error - }{ - {"ObjectNotFound", - http.StatusNotFound, - []string{"testBucket", "testObject"}, - minio.ObjectNotFound{Bucket: "testBucket", Object: "testObject"}, - }, - {"BucketNotFound", - http.StatusNotFound, - []string{"testBucket", ""}, - minio.BucketNotFound{Bucket: "testBucket"}, - }, - {"ObjectNameInvalid", - http.StatusBadRequest, - []string{"testBucket", "testObject"}, - minio.ObjectNameInvalid{Bucket: "testBucket", Object: "testObject"}, - }, - {"BucketNameInvalid", - http.StatusBadRequest, - []string{"testBucket", ""}, - minio.BucketNameInvalid{Bucket: "testBucket"}, - }, - } - for _, test := range testCases { - t.Run(test.name, func(t *testing.T) { - if err := minio.AnonErrToObjectErr(test.statusCode, test.params...); !reflect.DeepEqual(err, test.wantErr) { - t.Errorf("anonErrToObjectErr() error = %v, wantErr %v", err, test.wantErr) - } - }) - } -} - func TestCheckAzureUploadID(t *testing.T) { invalidUploadIDs := []string{ "123456789abcdefg", diff --git a/cmd/gateway/gcs/gateway-gcs.go b/cmd/gateway/gcs/gateway-gcs.go index 00df5c1df..33437b547 100644 --- a/cmd/gateway/gcs/gateway-gcs.go +++ b/cmd/gateway/gcs/gateway-gcs.go @@ -421,14 +421,15 @@ func (l *gcsGateway) StorageInfo(ctx context.Context, _ bool) (si minio.StorageI } // MakeBucketWithLocation - Create a new container on GCS backend. -func (l *gcsGateway) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { - if lockEnabled { +func (l *gcsGateway) MakeBucketWithLocation(ctx context.Context, bucket string, opts minio.BucketOptions) error { + if opts.LockEnabled || opts.VersioningEnabled { return minio.NotImplemented{} } bkt := l.client.Bucket(bucket) // we'll default to the us multi-region in case of us-east-1 + location := opts.Location if location == "us-east-1" { location = "us" } @@ -958,22 +959,31 @@ func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject } // DeleteObject - Deletes a blob in bucket -func (l *gcsGateway) DeleteObject(ctx context.Context, bucket string, object string) error { +func (l *gcsGateway) DeleteObject(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) { err := l.client.Bucket(bucket).Object(object).Delete(ctx) if err != nil { logger.LogIf(ctx, err) - return gcsToObjectError(err, bucket, object) + return minio.ObjectInfo{}, gcsToObjectError(err, bucket, object) } - return nil + return minio.ObjectInfo{ + Bucket: bucket, + Name: object, + }, nil } -func (l *gcsGateway) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { +func (l *gcsGateway) DeleteObjects(ctx context.Context, bucket string, objects []minio.ObjectToDelete, opts minio.ObjectOptions) ([]minio.DeletedObject, []error) { errs := make([]error, len(objects)) + dobjects := make([]minio.DeletedObject, len(objects)) for idx, object := range objects { - errs[idx] = l.DeleteObject(ctx, bucket, object) + _, errs[idx] = l.DeleteObject(ctx, bucket, object.ObjectName, opts) + if errs[idx] == nil { + dobjects[idx] = minio.DeletedObject{ + ObjectName: object.ObjectName, + } + } } - return errs, nil + return dobjects, errs } // NewMultipartUpload - upload object in multiple parts diff --git a/cmd/gateway/hdfs/gateway-hdfs.go b/cmd/gateway/hdfs/gateway-hdfs.go index d7c74734d..eb2fe40ad 100644 --- a/cmd/gateway/hdfs/gateway-hdfs.go +++ b/cmd/gateway/hdfs/gateway-hdfs.go @@ -75,7 +75,7 @@ EXAMPLES: {{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4" {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*,*.png" - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}90 + {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}90 {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_AFTER{{.AssignmentOperator}}3 {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_LOW{{.AssignmentOperator}}75 {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_HIGH{{.AssignmentOperator}}85 @@ -283,8 +283,8 @@ func (n *hdfsObjects) DeleteBucket(ctx context.Context, bucket string, forceDele return hdfsToObjectErr(ctx, n.clnt.Remove(minio.PathJoin(hdfsSeparator, bucket)), bucket) } -func (n *hdfsObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { - if lockEnabled { +func (n *hdfsObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts minio.BucketOptions) error { + if opts.LockEnabled || opts.VersioningEnabled { return minio.NotImplemented{} } @@ -439,16 +439,26 @@ func (n *hdfsObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continu }, nil } -func (n *hdfsObjects) DeleteObject(ctx context.Context, bucket, object string) error { - return hdfsToObjectErr(ctx, n.deleteObject(minio.PathJoin(hdfsSeparator, bucket), minio.PathJoin(hdfsSeparator, bucket, object)), bucket, object) +func (n *hdfsObjects) DeleteObject(ctx context.Context, bucket, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) { + err := hdfsToObjectErr(ctx, n.deleteObject(minio.PathJoin(hdfsSeparator, bucket), minio.PathJoin(hdfsSeparator, bucket, object)), bucket, object) + return minio.ObjectInfo{ + Bucket: bucket, + Name: object, + }, err } -func (n *hdfsObjects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { +func (n *hdfsObjects) DeleteObjects(ctx context.Context, bucket string, objects []minio.ObjectToDelete, opts minio.ObjectOptions) ([]minio.DeletedObject, []error) { errs := make([]error, len(objects)) + dobjects := make([]minio.DeletedObject, len(objects)) for idx, object := range objects { - errs[idx] = n.DeleteObject(ctx, bucket, object) + _, errs[idx] = n.DeleteObject(ctx, bucket, object.ObjectName, opts) + if errs[idx] == nil { + dobjects[idx] = minio.DeletedObject{ + ObjectName: object.ObjectName, + } + } } - return errs, nil + return dobjects, errs } func (n *hdfsObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, opts minio.ObjectOptions) (gr *minio.GetObjectReader, err error) { diff --git a/cmd/gateway/s3/gateway-s3-sse.go b/cmd/gateway/s3/gateway-s3-sse.go index 7cb172da1..a89af2a0e 100644 --- a/cmd/gateway/s3/gateway-s3-sse.go +++ b/cmd/gateway/s3/gateway-s3-sse.go @@ -258,8 +258,8 @@ func getPartMetaPath(object, uploadID string, partID int) string { } // deletes the custom dare metadata file saved at the backend -func (l *s3EncObjects) deleteGWMetadata(ctx context.Context, bucket, metaFileName string) error { - return l.s3Objects.DeleteObject(ctx, bucket, metaFileName) +func (l *s3EncObjects) deleteGWMetadata(ctx context.Context, bucket, metaFileName string) (minio.ObjectInfo, error) { + return l.s3Objects.DeleteObject(ctx, bucket, metaFileName, minio.ObjectOptions{}) } func (l *s3EncObjects) getObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error { @@ -381,14 +381,14 @@ func (l *s3EncObjects) CopyObject(ctx context.Context, srcBucket string, srcObje // DeleteObject deletes a blob in bucket // For custom gateway encrypted large objects, cleans up encrypted content and metadata files // from the backend. -func (l *s3EncObjects) DeleteObject(ctx context.Context, bucket string, object string) error { - +func (l *s3EncObjects) DeleteObject(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) { // Get dare meta json if _, err := l.getGWMetadata(ctx, bucket, getDareMetaPath(object)); err != nil { - return l.s3Objects.DeleteObject(ctx, bucket, object) + logger.LogIf(minio.GlobalContext, err) + return l.s3Objects.DeleteObject(ctx, bucket, object, opts) } // delete encrypted object - l.s3Objects.DeleteObject(ctx, bucket, getGWContentPath(object)) + l.s3Objects.DeleteObject(ctx, bucket, getGWContentPath(object), opts) return l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object)) } @@ -446,7 +446,7 @@ func (l *s3EncObjects) PutObject(ctx context.Context, bucket string, object stri } if opts.ServerSideEncryption == nil { defer l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object)) - defer l.DeleteObject(ctx, bucket, getGWContentPath(object)) + defer l.DeleteObject(ctx, bucket, getGWContentPath(object), opts) return l.s3Objects.PutObject(ctx, bucket, object, data, minio.ObjectOptions{UserDefined: opts.UserDefined}) } @@ -470,7 +470,7 @@ func (l *s3EncObjects) PutObject(ctx context.Context, bucket string, object stri } objInfo = gwMeta.ToObjectInfo(bucket, object) // delete any unencrypted content of the same name created previously - l.s3Objects.DeleteObject(ctx, bucket, object) + l.s3Objects.DeleteObject(ctx, bucket, object, opts) return objInfo, nil } @@ -586,7 +586,7 @@ func (l *s3EncObjects) AbortMultipartUpload(ctx context.Context, bucket string, return minio.InvalidUploadID{UploadID: uploadID} } for _, obj := range loi.Objects { - if err := l.s3Objects.DeleteObject(ctx, bucket, obj.Name); err != nil { + if _, err := l.s3Objects.DeleteObject(ctx, bucket, obj.Name, minio.ObjectOptions{}); err != nil { return minio.ErrorRespToObjectError(err) } startAfter = obj.Name @@ -608,7 +608,7 @@ func (l *s3EncObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje if e == nil { // delete any encrypted version of object that might exist defer l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object)) - defer l.DeleteObject(ctx, bucket, getGWContentPath(object)) + defer l.DeleteObject(ctx, bucket, getGWContentPath(object), opts) } return oi, e } @@ -640,7 +640,7 @@ func (l *s3EncObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje } //delete any unencrypted version of object that might be on the backend - defer l.s3Objects.DeleteObject(ctx, bucket, object) + defer l.s3Objects.DeleteObject(ctx, bucket, object, opts) // Save the final object size and modtime. gwMeta.Stat.Size = objectSize @@ -665,7 +665,7 @@ func (l *s3EncObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje break } startAfter = obj.Name - l.s3Objects.DeleteObject(ctx, bucket, obj.Name) + l.s3Objects.DeleteObject(ctx, bucket, obj.Name, opts) } continuationToken = loi.NextContinuationToken if !loi.IsTruncated || done { @@ -716,7 +716,7 @@ func (l *s3EncObjects) cleanupStaleEncMultipartUploadsOnGW(ctx context.Context, for _, b := range buckets { expParts := l.getStalePartsForBucket(ctx, b.Name, expiry) for k := range expParts { - l.s3Objects.DeleteObject(ctx, b.Name, k) + l.s3Objects.DeleteObject(ctx, b.Name, k, minio.ObjectOptions{}) } } } @@ -783,7 +783,7 @@ func (l *s3EncObjects) DeleteBucket(ctx context.Context, bucket string, forceDel } } for k := range expParts { - l.s3Objects.DeleteObject(ctx, bucket, k) + l.s3Objects.DeleteObject(ctx, bucket, k, minio.ObjectOptions{}) } err := l.Client.RemoveBucket(bucket) if err != nil { diff --git a/cmd/gateway/s3/gateway-s3.go b/cmd/gateway/s3/gateway-s3.go index 61ab1ee2e..4f3d7887d 100644 --- a/cmd/gateway/s3/gateway-s3.go +++ b/cmd/gateway/s3/gateway-s3.go @@ -287,8 +287,8 @@ func (l *s3Objects) StorageInfo(ctx context.Context, _ bool) (si minio.StorageIn } // MakeBucket creates a new container on S3 backend. -func (l *s3Objects) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { - if lockEnabled { +func (l *s3Objects) MakeBucketWithLocation(ctx context.Context, bucket string, opts minio.BucketOptions) error { + if opts.LockEnabled || opts.VersioningEnabled { return minio.NotImplemented{} } @@ -302,7 +302,7 @@ func (l *s3Objects) MakeBucketWithLocation(ctx context.Context, bucket, location if s3utils.CheckValidBucketName(bucket) != nil { return minio.BucketNameInvalid{Bucket: bucket} } - err := l.Client.MakeBucket(bucket, location) + err := l.Client.MakeBucket(bucket, opts.Location) if err != nil { return minio.ErrorRespToObjectError(err, bucket) } @@ -518,21 +518,30 @@ func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject } // DeleteObject deletes a blob in bucket -func (l *s3Objects) DeleteObject(ctx context.Context, bucket string, object string) error { +func (l *s3Objects) DeleteObject(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) { err := l.Client.RemoveObject(bucket, object) if err != nil { - return minio.ErrorRespToObjectError(err, bucket, object) + return minio.ObjectInfo{}, minio.ErrorRespToObjectError(err, bucket, object) } - return nil + return minio.ObjectInfo{ + Bucket: bucket, + Name: object, + }, nil } -func (l *s3Objects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { +func (l *s3Objects) DeleteObjects(ctx context.Context, bucket string, objects []minio.ObjectToDelete, opts minio.ObjectOptions) ([]minio.DeletedObject, []error) { errs := make([]error, len(objects)) + dobjects := make([]minio.DeletedObject, len(objects)) for idx, object := range objects { - errs[idx] = l.DeleteObject(ctx, bucket, object) + _, errs[idx] = l.DeleteObject(ctx, bucket, object.ObjectName, opts) + if errs[idx] == nil { + dobjects[idx] = minio.DeletedObject{ + ObjectName: object.ObjectName, + } + } } - return errs, nil + return dobjects, errs } // ListMultipartUploads lists all multipart uploads. @@ -700,11 +709,10 @@ func (l *s3Objects) DeleteBucketPolicy(ctx context.Context, bucket string) error } // GetObjectTags gets the tags set on the object -func (l *s3Objects) GetObjectTags(ctx context.Context, bucket string, object string) (*tags.Tags, error) { +func (l *s3Objects) GetObjectTags(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (*tags.Tags, error) { var err error var tagObj *tags.Tags var tagStr string - var opts minio.ObjectOptions if _, err = l.GetObjectInfo(ctx, bucket, object, opts); err != nil { return nil, minio.ErrorRespToObjectError(err, bucket, object) @@ -721,7 +729,7 @@ func (l *s3Objects) GetObjectTags(ctx context.Context, bucket string, object str } // PutObjectTags attaches the tags to the object -func (l *s3Objects) PutObjectTags(ctx context.Context, bucket, object string, tagStr string) error { +func (l *s3Objects) PutObjectTags(ctx context.Context, bucket, object string, tagStr string, opts minio.ObjectOptions) error { tagObj, err := tags.Parse(tagStr, true) if err != nil { return minio.ErrorRespToObjectError(err, bucket, object) @@ -733,7 +741,7 @@ func (l *s3Objects) PutObjectTags(ctx context.Context, bucket, object string, ta } // DeleteObjectTags removes the tags attached to the object -func (l *s3Objects) DeleteObjectTags(ctx context.Context, bucket, object string) error { +func (l *s3Objects) DeleteObjectTags(ctx context.Context, bucket, object string, opts minio.ObjectOptions) error { if err := l.Client.RemoveObjectTagging(bucket, object); err != nil { return minio.ErrorRespToObjectError(err, bucket, object) } diff --git a/cmd/generic-handlers.go b/cmd/generic-handlers.go index 0b07ac62f..85953c171 100644 --- a/cmd/generic-handlers.go +++ b/cmd/generic-handlers.go @@ -103,7 +103,7 @@ func isHTTPHeaderSizeTooLarge(header http.Header) bool { length := len(key) + len(header.Get(key)) size += length for _, prefix := range userMetadataKeyPrefixes { - if HasPrefix(key, prefix) { + if strings.HasPrefix(strings.ToLower(key), prefix) { usersize += length break } @@ -444,74 +444,75 @@ func setIgnoreResourcesHandler(h http.Handler) http.Handler { return resourceHandler{h} } +var supportedDummyBucketAPIs = map[string][]string{ + "acl": {http.MethodPut, http.MethodGet}, + "cors": {http.MethodGet}, + "website": {http.MethodGet, http.MethodDelete}, + "logging": {http.MethodGet}, + "accelerate": {http.MethodGet}, + "replication": {http.MethodGet}, + "requestPayment": {http.MethodGet}, +} + +// List of not implemented bucket queries +var notImplementedBucketResourceNames = map[string]struct{}{ + "cors": {}, + "metrics": {}, + "website": {}, + "logging": {}, + "inventory": {}, + "accelerate": {}, + "replication": {}, + "requestPayment": {}, +} + // Checks requests for not implemented Bucket resources func ignoreNotImplementedBucketResources(req *http.Request) bool { for name := range req.URL.Query() { - // Enable PutBucketACL, GetBucketACL, GetBucketCors, - // GetBucketWebsite, GetBucketAcccelerate, - // GetBucketRequestPayment, GetBucketLogging, - // GetBucketLifecycle, GetBucketReplication, - // GetBucketTagging, GetBucketVersioning, - // DeleteBucketTagging, and DeleteBucketWebsite - // dummy calls specifically. - if name == "acl" && req.Method == http.MethodPut { - return false - } - if ((name == "acl" || - name == "cors" || - name == "website" || - name == "accelerate" || - name == "requestPayment" || - name == "logging" || - name == "lifecycle" || - name == "replication" || - name == "tagging" || - name == "versioning") && req.Method == http.MethodGet) || - ((name == "tagging" || - name == "website") && req.Method == http.MethodDelete) { - return false + methods, ok := supportedDummyBucketAPIs[name] + if ok { + for _, method := range methods { + if method == req.Method { + return false + } + } } - if notImplementedBucketResourceNames[name] { + if _, ok := notImplementedBucketResourceNames[name]; ok { return true } } return false } +var supportedDummyObjectAPIs = map[string][]string{ + "acl": {http.MethodGet, http.MethodPut}, +} + +// List of not implemented object APIs +var notImplementedObjectResourceNames = map[string]struct{}{ + "restore": {}, + "torrent": {}, +} + // Checks requests for not implemented Object resources func ignoreNotImplementedObjectResources(req *http.Request) bool { for name := range req.URL.Query() { - // Enable Get/PutObjectACL dummy call specifically. - if name == "acl" && (req.Method == http.MethodGet || req.Method == http.MethodPut) { - return false + methods, ok := supportedDummyObjectAPIs[name] + if ok { + for _, method := range methods { + if method == req.Method { + return false + } + } } - if notImplementedObjectResourceNames[name] { + if _, ok := notImplementedObjectResourceNames[name]; ok { return true } } return false } -// List of not implemented bucket queries -var notImplementedBucketResourceNames = map[string]bool{ - "accelerate": true, - "cors": true, - "inventory": true, - "logging": true, - "metrics": true, - "replication": true, - "requestPayment": true, - "versioning": true, - "website": true, -} - -// List of not implemented object queries -var notImplementedObjectResourceNames = map[string]bool{ - "restore": true, - "torrent": true, -} - // Resource handler ServeHTTP() wrapper func (h resourceHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { bucketName, objectName := request2BucketObjectName(r) diff --git a/cmd/generic-handlers_test.go b/cmd/generic-handlers_test.go index b91ce282c..1a6f4f11f 100644 --- a/cmd/generic-handlers_test.go +++ b/cmd/generic-handlers_test.go @@ -199,12 +199,16 @@ var containsReservedMetadataTests = []struct { } func TestContainsReservedMetadata(t *testing.T) { - for i, test := range containsReservedMetadataTests { - if contains := containsReservedMetadata(test.header); contains && !test.shouldFail { - t.Errorf("Test %d: contains reserved header but should not fail", i) - } else if !contains && test.shouldFail { - t.Errorf("Test %d: does not contain reserved header but failed", i) - } + for _, test := range containsReservedMetadataTests { + test := test + t.Run("", func(t *testing.T) { + contains := containsReservedMetadata(test.header) + if contains && !test.shouldFail { + t.Errorf("contains reserved header but should not fail") + } else if !contains && test.shouldFail { + t.Errorf("does not contain reserved header but failed") + } + }) } } diff --git a/cmd/global-heal.go b/cmd/global-heal.go index 7309eb7db..d11f591c8 100644 --- a/cmd/global-heal.go +++ b/cmd/global-heal.go @@ -79,7 +79,7 @@ func getLocalBackgroundHealStatus() madmin.BgHealState { } // healErasureSet lists and heals all objects in a specific erasure set -func healErasureSet(ctx context.Context, setIndex int, xlObj *xlObjects, drivesPerSet int) error { +func healErasureSet(ctx context.Context, setIndex int, xlObj *erasureObjects, drivesPerSet int) error { buckets, err := xlObj.ListBuckets(ctx) if err != nil { return err @@ -105,32 +105,34 @@ func healErasureSet(ctx context.Context, setIndex int, xlObj *xlObjects, drivesP for _, bucket := range buckets { // Heal current bucket bgSeq.sourceCh <- healSource{ - path: bucket.Name, + bucket: bucket.Name, } - var entryChs []FileInfoCh + var entryChs []FileInfoVersionsCh for _, disk := range xlObj.getLoadBalancedDisks() { if disk == nil { // Disk can be offline continue } - entryCh, err := disk.Walk(bucket.Name, "", "", true, xlMetaJSONFile, readMetadata, ctx.Done()) + + entryCh, err := disk.WalkVersions(bucket.Name, "", "", true, ctx.Done()) if err != nil { // Disk walk returned error, ignore it. continue } - entryChs = append(entryChs, FileInfoCh{ + + entryChs = append(entryChs, FileInfoVersionsCh{ Ch: entryCh, }) } entriesValid := make([]bool, len(entryChs)) - entries := make([]FileInfo, len(entryChs)) + entries := make([]FileInfoVersions, len(entryChs)) for { - entry, quorumCount, ok := lexicallySortedEntry(entryChs, entries, entriesValid) + entry, quorumCount, ok := lexicallySortedEntryVersions(entryChs, entries, entriesValid) if !ok { - return nil + break } if quorumCount == drivesPerSet { @@ -138,8 +140,12 @@ func healErasureSet(ctx context.Context, setIndex int, xlObj *xlObjects, drivesP continue } - bgSeq.sourceCh <- healSource{ - path: pathJoin(bucket.Name, entry.Name), + for _, version := range entry.Versions { + bgSeq.sourceCh <- healSource{ + bucket: bucket.Name, + object: version.Name, + versionID: version.VersionID, + } } } } @@ -148,13 +154,15 @@ func healErasureSet(ctx context.Context, setIndex int, xlObj *xlObjects, drivesP } // deepHealObject heals given object path in deep to fix bitrot. -func deepHealObject(objectPath string) { +func deepHealObject(bucket, object, versionID string) { // Get background heal sequence to send elements to heal bgSeq, _ := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID) bgSeq.sourceCh <- healSource{ - path: objectPath, - opts: &madmin.HealOpts{ScanMode: madmin.HealDeepScan}, + bucket: bucket, + object: object, + versionID: versionID, + opts: &madmin.HealOpts{ScanMode: madmin.HealDeepScan}, } } @@ -172,7 +180,7 @@ func durationToNextHealRound(lastHeal time.Time) time.Duration { } // Healing leader will take the charge of healing all erasure sets -func execLeaderTasks(ctx context.Context, z *xlZones) { +func execLeaderTasks(ctx context.Context, z *erasureZones) { // So that we don't heal immediately, but after one month. lastScanTime := UTCNow() // Get background heal sequence to send elements to heal @@ -211,7 +219,7 @@ func execLeaderTasks(ctx context.Context, z *xlZones) { } func startGlobalHeal(ctx context.Context, objAPI ObjectLayer) { - zones, ok := objAPI.(*xlZones) + zones, ok := objAPI.(*erasureZones) if !ok { return } diff --git a/cmd/globals.go b/cmd/globals.go index fae732fc9..585380222 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -61,8 +61,8 @@ const ( globalNetBSDOSName = "netbsd" globalMacOSName = "darwin" globalMinioModeFS = "mode-server-fs" - globalMinioModeXL = "mode-server-xl" - globalMinioModeDistXL = "mode-server-distributed-xl" + globalMinioModeErasure = "mode-server-xl" + globalMinioModeDistErasure = "mode-server-distributed-xl" globalMinioModeGatewayPrefix = "mode-gateway-" // Add new global values here. @@ -107,13 +107,13 @@ var globalCLIContext = struct { var ( // Indicates set drive count. - globalXLSetDriveCount int + globalErasureSetDriveCount int // Indicates if the running minio server is distributed setup. - globalIsDistXL = false + globalIsDistErasure = false // Indicates if the running minio server is an erasure-code backend. - globalIsXL = false + globalIsErasure = false // Indicates if the running minio is in gateway mode. globalIsGateway = false @@ -215,6 +215,7 @@ var ( globalBucketObjectLockSys *BucketObjectLockSys globalBucketQuotaSys *BucketQuotaSys + globalBucketVersioningSys *BucketVersioningSys // Disk cache drives globalCacheConfig cache.Config diff --git a/cmd/handler-utils.go b/cmd/handler-utils.go index cb2baab90..f013ea614 100644 --- a/cmd/handler-utils.go +++ b/cmd/handler-utils.go @@ -445,7 +445,7 @@ func errorResponseHandler(w http.ResponseWriter, r *http.Request) { // gets host name for current node func getHostName(r *http.Request) (hostName string) { - if globalIsDistXL { + if globalIsDistErasure { hostName = GetLocalPeer(globalEndpoints) } else { hostName = r.Host diff --git a/cmd/http-tracer.go b/cmd/http-tracer.go index 01ac76a26..fe97dcaa1 100644 --- a/cmd/http-tracer.go +++ b/cmd/http-tracer.go @@ -114,7 +114,7 @@ func Trace(f http.HandlerFunc, logBody bool, w http.ResponseWriter, r *http.Requ reqBodyRecorder = &recordRequest{Reader: r.Body, logBody: logBody, headers: reqHeaders} r.Body = ioutil.NopCloser(reqBodyRecorder) t.NodeName = r.Host - if globalIsDistXL { + if globalIsDistErasure { t.NodeName = GetLocalPeer(globalEndpoints) } // strip port from the host address diff --git a/cmd/http/headers.go b/cmd/http/headers.go index 969db491b..a9c40185b 100644 --- a/cmd/http/headers.go +++ b/cmd/http/headers.go @@ -56,6 +56,10 @@ const ( // S3 storage class AmzStorageClass = "x-amz-storage-class" + // S3 object version ID + AmzVersionID = "x-amz-version-id" + AmzDeleteMarker = "x-amz-delete-marker" + // S3 object tagging AmzObjectTagging = "X-Amz-Tagging" AmzTagCount = "x-amz-tagging-count" diff --git a/cmd/iam.go b/cmd/iam.go index 18081ae68..dfbc0a01e 100644 --- a/cmd/iam.go +++ b/cmd/iam.go @@ -469,7 +469,7 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer) { } // These messages only meant primarily for distributed setup, so only log during distributed setup. - if globalIsDistXL { + if globalIsDistErasure { logger.Info("Waiting for all MinIO IAM sub-system to be initialized.. lock acquired") } diff --git a/cmd/lock-rest-server.go b/cmd/lock-rest-server.go index 147994b86..d925aeef3 100644 --- a/cmd/lock-rest-server.go +++ b/cmd/lock-rest-server.go @@ -270,10 +270,10 @@ func lockMaintenance(ctx context.Context, interval time.Duration) error { } // Read locks we assume quorum for be N/2 success - quorum := globalXLSetDriveCount / 2 + quorum := globalErasureSetDriveCount / 2 if nlrip.lri.Writer { // For write locks we need N/2+1 success - quorum = globalXLSetDriveCount/2 + 1 + quorum = globalErasureSetDriveCount/2 + 1 } // less than the quorum, we have locks expired. diff --git a/cmd/merge-walk-pool.go b/cmd/merge-walk-pool.go index b30be139b..309c1bca9 100644 --- a/cmd/merge-walk-pool.go +++ b/cmd/merge-walk-pool.go @@ -26,6 +26,13 @@ const ( globalMergeLookupTimeout = time.Minute * 1 // 1 minutes. ) +// mergeWalkVersions - represents the go routine that does the merge walk versions. +type mergeWalkVersions struct { + entryChs []FileInfoVersionsCh + endWalkCh chan struct{} // To signal when mergeWalk go-routine should end. + endTimerCh chan<- struct{} // To signal when timer go-routine should end. +} + // mergeWalk - represents the go routine that does the merge walk. type mergeWalk struct { entryChs []FileInfoCh @@ -33,6 +40,103 @@ type mergeWalk struct { endTimerCh chan<- struct{} // To signal when timer go-routine should end. } +// MergeWalkVersionsPool - pool of mergeWalk go routines. +// A mergeWalk is added to the pool by Set() and removed either by +// doing a Release() or if the concerned timer goes off. +// mergeWalkPool's purpose is to maintain active mergeWalk go-routines in a map so that +// it can be looked up across related list calls. +type MergeWalkVersionsPool struct { + sync.Mutex + pool map[listParams][]mergeWalkVersions + timeOut time.Duration +} + +// NewMergeWalkVersionsPool - initialize new tree walk pool for versions. +func NewMergeWalkVersionsPool(timeout time.Duration) *MergeWalkVersionsPool { + tPool := &MergeWalkVersionsPool{ + pool: make(map[listParams][]mergeWalkVersions), + timeOut: timeout, + } + return tPool +} + +// Release - similar to mergeWalkPool.Release but for versions. +func (t *MergeWalkVersionsPool) Release(params listParams) ([]FileInfoVersionsCh, chan struct{}) { + t.Lock() + defer t.Unlock() + walks, ok := t.pool[params] // Pick the valid walks. + if !ok || len(walks) == 0 { + // Release return nil if params not found. + return nil, nil + } + + // Pop out the first valid walk entry. + walk := walks[0] + walks = walks[1:] + if len(walks) > 0 { + t.pool[params] = walks + } else { + delete(t.pool, params) + } + walk.endTimerCh <- struct{}{} + return walk.entryChs, walk.endWalkCh +} + +// Set - similar to mergeWalkPool.Set but for file versions +func (t *MergeWalkVersionsPool) Set(params listParams, resultChs []FileInfoVersionsCh, endWalkCh chan struct{}) { + t.Lock() + defer t.Unlock() + + // Should be a buffered channel so that Release() never blocks. + endTimerCh := make(chan struct{}, 1) + + walkInfo := mergeWalkVersions{ + entryChs: resultChs, + endWalkCh: endWalkCh, + endTimerCh: endTimerCh, + } + + // Append new walk info. + t.pool[params] = append(t.pool[params], walkInfo) + + // Timer go-routine which times out after t.timeOut seconds. + go func(endTimerCh <-chan struct{}, walkInfo mergeWalkVersions) { + select { + // Wait until timeOut + case <-time.After(t.timeOut): + // Timeout has expired. Remove the mergeWalk from mergeWalkPool and + // end the mergeWalk go-routine. + t.Lock() + walks, ok := t.pool[params] + if ok { + // Trick of filtering without allocating + // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating + nwalks := walks[:0] + // Look for walkInfo, remove it from the walks list. + for _, walk := range walks { + if !reflect.DeepEqual(walk, walkInfo) { + nwalks = append(nwalks, walk) + } + } + if len(nwalks) == 0 { + // No more mergeWalk go-routines associated with listParams + // hence remove map entry. + delete(t.pool, params) + } else { + // There are more mergeWalk go-routines associated with listParams + // hence save the list in the map. + t.pool[params] = nwalks + } + } + // Signal the mergeWalk go-routine to die. + close(endWalkCh) + t.Unlock() + case <-endTimerCh: + return + } + }(endTimerCh, walkInfo) +} + // MergeWalkPool - pool of mergeWalk go routines. // A mergeWalk is added to the pool by Set() and removed either by // doing a Release() or if the concerned timer goes off. @@ -84,7 +188,7 @@ func (t *MergeWalkPool) Release(params listParams) ([]FileInfoCh, chan struct{}) // 1) time.After() expires after t.timeOut seconds. // The expiration is needed so that the mergeWalk go-routine resources are freed after a timeout // if the S3 client does only partial listing of objects. -// 2) Relase() signals the timer go-routine to end on endTimerCh. +// 2) Release() signals the timer go-routine to end on endTimerCh. // During listing the timer should not timeout and end the mergeWalk go-routine, hence the // timer go-routine should be ended. func (t *MergeWalkPool) Set(params listParams, resultChs []FileInfoCh, endWalkCh chan struct{}) { diff --git a/cmd/metrics.go b/cmd/metrics.go index b534cf8af..b5c8ec87b 100644 --- a/cmd/metrics.go +++ b/cmd/metrics.go @@ -97,7 +97,7 @@ func (c *minioCollector) Collect(ch chan<- prometheus.Metric) { // collects healing specific metrics for MinIO instance in Prometheus specific format // and sends to given channel func healingMetricsPrometheus(ch chan<- prometheus.Metric) { - if !globalIsXL { + if !globalIsErasure { return } bgSeq, exists := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID) diff --git a/cmd/namespace-lock.go b/cmd/namespace-lock.go index 23f0b32a2..42df2a2c5 100644 --- a/cmd/namespace-lock.go +++ b/cmd/namespace-lock.go @@ -45,11 +45,11 @@ type RWLocker interface { } // newNSLock - return a new name space lock map. -func newNSLock(isDistXL bool) *nsLockMap { +func newNSLock(isDistErasure bool) *nsLockMap { nsMutex := nsLockMap{ - isDistXL: isDistXL, + isDistErasure: isDistErasure, } - if isDistXL { + if isDistErasure { return &nsMutex } nsMutex.lockMap = make(map[string]*nsLock) @@ -66,9 +66,9 @@ type nsLock struct { // Unlock, RLock and RUnlock. type nsLockMap struct { // Indicates if namespace is part of a distributed setup. - isDistXL bool - lockMap map[string]*nsLock - lockMapMutex sync.Mutex + isDistErasure bool + lockMap map[string]*nsLock + lockMapMutex sync.Mutex } // Lock the namespace resource. @@ -190,7 +190,7 @@ type localLockInstance struct { // volume, path and operation ID. func (n *nsLockMap) NewNSLock(ctx context.Context, lockersFn func() []dsync.NetLocker, volume string, paths ...string) RWLocker { opsID := mustGetUUID() - if n.isDistXL { + if n.isDistErasure { drwmutex := dsync.NewDRWMutex(ctx, &dsync.Dsync{ GetLockersFn: lockersFn, }, pathsJoinPrefix(volume, paths...)...) diff --git a/cmd/naughty-disk_test.go b/cmd/naughty-disk_test.go index 4ad887936..98601250b 100644 --- a/cmd/naughty-disk_test.go +++ b/cmd/naughty-disk_test.go @@ -142,18 +142,25 @@ func (d *naughtyDisk) WalkSplunk(volume, path, marker string, endWalkCh <-chan s return d.disk.WalkSplunk(volume, path, marker, endWalkCh) } -func (d *naughtyDisk) Walk(volume, path, marker string, recursive bool, leafFile string, readMetadataFn readMetadataFunc, endWalkCh <-chan struct{}) (chan FileInfo, error) { +func (d *naughtyDisk) WalkVersions(volume, path, marker string, recursive bool, endWalkVersionsCh <-chan struct{}) (chan FileInfoVersions, error) { if err := d.calcError(); err != nil { return nil, err } - return d.disk.Walk(volume, path, marker, recursive, leafFile, readMetadataFn, endWalkCh) + return d.disk.WalkVersions(volume, path, marker, recursive, endWalkVersionsCh) } -func (d *naughtyDisk) ListDir(volume, path string, count int, leafFile string) (entries []string, err error) { +func (d *naughtyDisk) Walk(volume, path, marker string, recursive bool, endWalkCh <-chan struct{}) (chan FileInfo, error) { + if err := d.calcError(); err != nil { + return nil, err + } + return d.disk.Walk(volume, path, marker, recursive, endWalkCh) +} + +func (d *naughtyDisk) ListDir(volume, path string, count int) (entries []string, err error) { if err := d.calcError(); err != nil { return []string{}, err } - return d.disk.ListDir(volume, path, count, leafFile) + return d.disk.ListDir(volume, path, count) } func (d *naughtyDisk) ReadFile(volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) { @@ -184,6 +191,13 @@ func (d *naughtyDisk) AppendFile(volume, path string, buf []byte) error { return d.disk.AppendFile(volume, path, buf) } +func (d *naughtyDisk) RenameData(srcVolume, srcPath, dataDir, dstVolume, dstPath string) error { + if err := d.calcError(); err != nil { + return err + } + return d.disk.RenameData(srcVolume, srcPath, dataDir, dstVolume, dstPath) +} + func (d *naughtyDisk) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) error { if err := d.calcError(); err != nil { return err @@ -191,11 +205,18 @@ func (d *naughtyDisk) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) return d.disk.RenameFile(srcVolume, srcPath, dstVolume, dstPath) } -func (d *naughtyDisk) StatFile(volume string, path string) (file FileInfo, err error) { +func (d *naughtyDisk) CheckParts(volume string, path string, fi FileInfo) (err error) { if err := d.calcError(); err != nil { - return FileInfo{}, err + return err } - return d.disk.StatFile(volume, path) + return d.disk.CheckParts(volume, path, fi) +} + +func (d *naughtyDisk) CheckFile(volume string, path string) (err error) { + if err := d.calcError(); err != nil { + return err + } + return d.disk.CheckFile(volume, path) } func (d *naughtyDisk) DeleteFile(volume string, path string) (err error) { @@ -205,19 +226,36 @@ func (d *naughtyDisk) DeleteFile(volume string, path string) (err error) { return d.disk.DeleteFile(volume, path) } -func (d *naughtyDisk) DeleteFileBulk(volume string, paths []string) ([]error, error) { - errs := make([]error, len(paths)) - for idx, path := range paths { - errs[idx] = d.disk.DeleteFile(volume, path) +func (d *naughtyDisk) DeleteVersions(volume string, versions []FileInfo) []error { + if err := d.calcError(); err != nil { + errs := make([]error, len(versions)) + for i := range errs { + errs[i] = err + } + return errs } - return errs, nil + return d.disk.DeleteVersions(volume, versions) } -func (d *naughtyDisk) DeletePrefixes(volume string, paths []string) ([]error, error) { +func (d *naughtyDisk) WriteMetadata(volume, path string, fi FileInfo) (err error) { if err := d.calcError(); err != nil { - return nil, err + return err } - return d.disk.DeletePrefixes(volume, paths) + return d.disk.WriteMetadata(volume, path, fi) +} + +func (d *naughtyDisk) DeleteVersion(volume string, path string, fi FileInfo) (err error) { + if err := d.calcError(); err != nil { + return err + } + return d.disk.DeleteVersion(volume, path, fi) +} + +func (d *naughtyDisk) ReadVersion(volume string, path string, versionID string) (fi FileInfo, err error) { + if err := d.calcError(); err != nil { + return FileInfo{}, err + } + return d.disk.ReadVersion(volume, path, versionID) } func (d *naughtyDisk) WriteAll(volume string, path string, reader io.Reader) (err error) { @@ -234,9 +272,9 @@ func (d *naughtyDisk) ReadAll(volume string, path string) (buf []byte, err error return d.disk.ReadAll(volume, path) } -func (d *naughtyDisk) VerifyFile(volume, path string, size int64, algo BitrotAlgorithm, sum []byte, shardSize int64) error { +func (d *naughtyDisk) VerifyFile(volume, path string, fi FileInfo) error { if err := d.calcError(); err != nil { return err } - return d.disk.VerifyFile(volume, path, size, algo, sum, shardSize) + return d.disk.VerifyFile(volume, path, fi) } diff --git a/cmd/notification.go b/cmd/notification.go index 37fa8e323..c68eeabf3 100644 --- a/cmd/notification.go +++ b/cmd/notification.go @@ -628,7 +628,7 @@ func (sys *NotificationSys) load(buckets []BucketInfo, objAPI ObjectLayer) error return nil } -// Init - initializes notification system from notification.xml and listener.json of all buckets. +// Init - initializes notification system from notification.xml and listenxl.meta of all buckets. func (sys *NotificationSys) Init(buckets []BucketInfo, objAPI ObjectLayer) error { if objAPI == nil { return errServerNotInitialized @@ -1247,7 +1247,7 @@ func (args eventArgs) ToEvent(escape bool) event.Event { }, Object: event.Object{ Key: keyName, - VersionID: "1", + VersionID: args.Object.VersionID, Sequencer: uniqueID, }, }, diff --git a/cmd/obdinfo.go b/cmd/obdinfo.go index 2278dded2..b57d81a99 100644 --- a/cmd/obdinfo.go +++ b/cmd/obdinfo.go @@ -34,7 +34,7 @@ import ( func getLocalCPUOBDInfo(ctx context.Context, r *http.Request) madmin.ServerCPUOBDInfo { addr := r.Host - if globalIsDistXL { + if globalIsDistErasure { addr = GetLocalPeer(globalEndpoints) } @@ -103,8 +103,9 @@ func getLocalDrivesOBD(ctx context.Context, parallel bool, endpointZones Endpoin } } wg.Wait() + addr := r.Host - if globalIsDistXL { + if globalIsDistErasure { addr = GetLocalPeer(endpointZones) } if parallel { @@ -121,7 +122,7 @@ func getLocalDrivesOBD(ctx context.Context, parallel bool, endpointZones Endpoin func getLocalMemOBD(ctx context.Context, r *http.Request) madmin.ServerMemOBDInfo { addr := r.Host - if globalIsDistXL { + if globalIsDistErasure { addr = GetLocalPeer(globalEndpoints) } @@ -150,7 +151,7 @@ func getLocalMemOBD(ctx context.Context, r *http.Request) madmin.ServerMemOBDInf func getLocalProcOBD(ctx context.Context, r *http.Request) madmin.ServerProcOBDInfo { addr := r.Host - if globalIsDistXL { + if globalIsDistErasure { addr = GetLocalPeer(globalEndpoints) } @@ -371,7 +372,7 @@ func getLocalProcOBD(ctx context.Context, r *http.Request) madmin.ServerProcOBDI func getLocalOsInfoOBD(ctx context.Context, r *http.Request) madmin.ServerOsOBDInfo { addr := r.Host - if globalIsDistXL { + if globalIsDistErasure { addr = GetLocalPeer(globalEndpoints) } diff --git a/cmd/obdinfo_freebsd.go b/cmd/obdinfo_freebsd.go index 845b17ff0..8aab2895f 100644 --- a/cmd/obdinfo_freebsd.go +++ b/cmd/obdinfo_freebsd.go @@ -26,7 +26,7 @@ import ( func getLocalDiskHwOBD(ctx context.Context, r *http.Request) madmin.ServerDiskHwOBDInfo { addr := r.Host - if globalIsDistXL { + if globalIsDistErasure { addr = GetLocalPeer(globalEndpoints) } diff --git a/cmd/obdinfo_other.go b/cmd/obdinfo_other.go index 44645bbe5..0b323b3af 100644 --- a/cmd/obdinfo_other.go +++ b/cmd/obdinfo_other.go @@ -30,7 +30,7 @@ import ( func getLocalDiskHwOBD(ctx context.Context, r *http.Request) madmin.ServerDiskHwOBDInfo { addr := r.Host - if globalIsDistXL { + if globalIsDistErasure { addr = GetLocalPeer(globalEndpoints) } diff --git a/cmd/object-api-common.go b/cmd/object-api-common.go index 6811d851d..55eb73659 100644 --- a/cmd/object-api-common.go +++ b/cmd/object-api-common.go @@ -82,11 +82,11 @@ func dirObjectInfo(bucket, object string, size int64, metadata map[string]string // Depending on the disk type network or local, initialize storage API. func newStorageAPI(endpoint Endpoint) (storage StorageAPI, err error) { if endpoint.IsLocal { - storage, err := newPosix(endpoint.Path, endpoint.Host) + storage, err := newXLStorage(endpoint.Path, endpoint.Host) if err != nil { return nil, err } - return &posixDiskIDCheck{storage: storage}, nil + return &xlStorageDiskIDCheck{storage: storage}, nil } return newStorageRESTClient(endpoint), nil @@ -105,7 +105,7 @@ func cleanupDir(ctx context.Context, storage StorageAPI, volume, dirPath string) } // If it's a directory, list and call delFunc() for each entry. - entries, err := storage.ListDir(volume, entryPath, -1, "") + entries, err := storage.ListDir(volume, entryPath, -1) // If entryPath prefix never existed, safe to ignore. if err == errFileNotFound { return nil @@ -165,7 +165,7 @@ func listObjectsNonSlash(ctx context.Context, bucket, prefix, marker, delimiter // ignore quorum error as it might be an entry from an outdated disk. if IsErrIgnored(err, []error{ errFileNotFound, - errXLReadQuorum, + errErasureReadQuorum, }...) { continue } @@ -358,7 +358,7 @@ func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, d // ignore quorum error as it might be an entry from an outdated disk. if IsErrIgnored(err, []error{ errFileNotFound, - errXLReadQuorum, + errErasureReadQuorum, }...) { continue } diff --git a/cmd/object-api-datatypes.go b/cmd/object-api-datatypes.go index 926d306ae..815092a9b 100644 --- a/cmd/object-api-datatypes.go +++ b/cmd/object-api-datatypes.go @@ -160,6 +160,17 @@ type ObjectInfo struct { // Hex encoded unique entity tag of the object. ETag string + // Version ID of this object. + VersionID string + + // IsLatest indicates if this is the latest current version + // latest can be true for delete marker or a version. + IsLatest bool + + // DeleteMarker indicates if the versionId corresponds + // to a delete marker on an object. + DeleteMarker bool + // A standard MIME type describing the format of the object. ContentType string @@ -317,6 +328,53 @@ type ListMultipartsInfo struct { EncodingType string // Not supported yet. } +// DeletedObjectInfo - container for list objects versions deleted objects. +type DeletedObjectInfo struct { + // Name of the bucket. + Bucket string + + // Name of the object. + Name string + + // Date and time when the object was last modified. + ModTime time.Time + + // Version ID of this object. + VersionID string + + // Indicates the deleted marker is latest + IsLatest bool +} + +// ListObjectVersionsInfo - container for list objects versions. +type ListObjectVersionsInfo struct { + // Indicates whether the returned list objects response is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of objects exceeds the limit allowed or specified + // by max keys. + IsTruncated bool + + // When response is truncated (the IsTruncated element value in the response is true), + // you can use the key name in this field as marker in the subsequent + // request to get next set of objects. + // + // NOTE: AWS S3 returns NextMarker only if you have delimiter request parameter specified, + // MinIO always returns NextMarker. + NextMarker string + + // NextVersionIDMarker may be set of IsTruncated is true + NextVersionIDMarker string + + // List of objects info for this request. + Objects []ObjectInfo + + // List of deleted objects for this request. + DeleteObjects []DeletedObjectInfo + + // List of prefixes for this request. + Prefixes []string +} + // ListObjectsInfo - container for list objects. type ListObjectsInfo struct { // Indicates whether the returned list objects response is truncated. A diff --git a/cmd/object-api-deleteobject_test.go b/cmd/object-api-deleteobject_test.go index c3844fad8..fbf537b80 100644 --- a/cmd/object-api-deleteobject_test.go +++ b/cmd/object-api-deleteobject_test.go @@ -24,7 +24,7 @@ import ( "testing" ) -// Wrapper for calling DeleteObject tests for both XL multiple disks and single node setup. +// Wrapper for calling DeleteObject tests for both Erasure multiple disks and single node setup. func TestDeleteObject(t *testing.T) { ExecObjectLayerTest(t, testDeleteObject) } @@ -74,7 +74,7 @@ func testDeleteObject(obj ObjectLayer, instanceType string, t TestErrHandler) { "dir/", []string{"dir/object1", "object0"}, }, - // Test 4: Remove an empty directory and checks it is really removed + // Test 5: Remove an empty directory and checks it is really removed { "bucket5", []objectUpload{{"object0", "content"}, {"dir/", ""}}, @@ -84,8 +84,7 @@ func testDeleteObject(obj ObjectLayer, instanceType string, t TestErrHandler) { } for i, testCase := range testCases { - - err := obj.MakeBucketWithLocation(context.Background(), testCase.bucketName, "", false) + err := obj.MakeBucketWithLocation(context.Background(), testCase.bucketName, BucketOptions{}) if err != nil { t.Fatalf("%s : %s", instanceType, err.Error()) } @@ -99,16 +98,17 @@ func testDeleteObject(obj ObjectLayer, instanceType string, t TestErrHandler) { } } - // TODO: check the error in the future - _ = obj.DeleteObject(context.Background(), testCase.bucketName, testCase.pathToDelete) + _, _ = obj.DeleteObject(context.Background(), testCase.bucketName, testCase.pathToDelete, ObjectOptions{}) result, err := obj.ListObjects(context.Background(), testCase.bucketName, "", "", "", 1000) if err != nil { t.Errorf("Test %d: %s: Expected to pass, but failed with: %s", i+1, instanceType, err.Error()) + continue } if len(result.Objects) != len(testCase.objectsAfterDelete) { - t.Errorf("Test %d: %s: mismatch number of objects after delete, expected = %d, found = %d", i+1, instanceType, len(testCase.objectsAfterDelete), len(result.Objects)) + t.Errorf("Test %d: %s: mismatch number of objects after delete, expected = %v, found = %v", i+1, instanceType, testCase.objectsAfterDelete, result.Objects) + continue } for idx := range result.Objects { diff --git a/cmd/object-api-errors.go b/cmd/object-api-errors.go index fdc09c90b..5a7c041a5 100644 --- a/cmd/object-api-errors.go +++ b/cmd/object-api-errors.go @@ -66,6 +66,28 @@ func toObjectErr(err error, params ...string) error { Object: params[1], } } + case errFileVersionNotFound: + switch len(params) { + case 2: + err = VersionNotFound{ + Bucket: params[0], + Object: params[1], + } + case 3: + err = VersionNotFound{ + Bucket: params[0], + Object: params[1], + VersionID: params[2], + } + } + case errMethodNotAllowed: + switch len(params) { + case 2: + err = MethodNotAllowed{ + Bucket: params[0], + Object: params[1], + } + } case errFileNotFound: switch len(params) { case 2: @@ -101,9 +123,9 @@ func toObjectErr(err error, params ...string) error { Object: params[1], } } - case errXLReadQuorum: + case errErasureReadQuorum: err = InsufficientReadQuorum{} - case errXLWriteQuorum: + case errErasureWriteQuorum: err = InsufficientWriteQuorum{} case io.ErrUnexpectedEOF, io.ErrShortWrite: err = IncompleteBody{} @@ -150,8 +172,9 @@ func (e InsufficientWriteQuorum) Error() string { // GenericError - generic object layer error. type GenericError struct { - Bucket string - Object string + Bucket string + Object string + VersionID string } // BucketNotFound bucket does not exist. @@ -182,18 +205,32 @@ func (e BucketNotEmpty) Error() string { return "Bucket not empty: " + e.Bucket } +// VersionNotFound object does not exist. +type VersionNotFound GenericError + +func (e VersionNotFound) Error() string { + return "Version not found: " + e.Bucket + "/" + e.Object + "(" + e.VersionID + ")" +} + // ObjectNotFound object does not exist. type ObjectNotFound GenericError func (e ObjectNotFound) Error() string { - return "Object not found: " + e.Bucket + "#" + e.Object + return "Object not found: " + e.Bucket + "/" + e.Object +} + +// MethodNotAllowed on the object +type MethodNotAllowed GenericError + +func (e MethodNotAllowed) Error() string { + return "Method not allowed: " + e.Bucket + "/" + e.Object } // ObjectAlreadyExists object already exists. type ObjectAlreadyExists GenericError func (e ObjectAlreadyExists) Error() string { - return "Object: " + e.Bucket + "#" + e.Object + " already exists" + return "Object: " + e.Bucket + "/" + e.Object + " already exists" } // ObjectExistsAsDirectory object already exists as a directory. @@ -323,17 +360,17 @@ type ObjectNamePrefixAsSlash GenericError // Error returns string an error formatted as the given text. func (e ObjectNameInvalid) Error() string { - return "Object name invalid: " + e.Bucket + "#" + e.Object + return "Object name invalid: " + e.Bucket + "/" + e.Object } // Error returns string an error formatted as the given text. func (e ObjectNameTooLong) Error() string { - return "Object name too long: " + e.Bucket + "#" + e.Object + return "Object name too long: " + e.Bucket + "/" + e.Object } // Error returns string an error formatted as the given text. func (e ObjectNamePrefixAsSlash) Error() string { - return "Object name contains forward slash as pefix: " + e.Bucket + "#" + e.Object + return "Object name contains forward slash as pefix: " + e.Bucket + "/" + e.Object } // AllAccessDisabled All access to this object has been disabled @@ -349,7 +386,7 @@ type IncompleteBody GenericError // Error returns string an error formatted as the given text. func (e IncompleteBody) Error() string { - return e.Bucket + "#" + e.Object + "has incomplete body" + return e.Bucket + "/" + e.Object + "has incomplete body" } // InvalidRange - invalid range typed error. @@ -445,9 +482,14 @@ func (e InvalidETag) Error() string { } // NotImplemented If a feature is not implemented -type NotImplemented struct{} +type NotImplemented struct { + API string +} func (e NotImplemented) Error() string { + if e.API != "" { + return e.API + " is Not Implemented" + } return "Not Implemented" } diff --git a/cmd/object-api-getobject_test.go b/cmd/object-api-getobject_test.go index 3cded55fd..ecb10a711 100644 --- a/cmd/object-api-getobject_test.go +++ b/cmd/object-api-getobject_test.go @@ -29,7 +29,7 @@ import ( humanize "github.com/dustin/go-humanize" ) -// Wrapper for calling GetObject tests for both XL multiple disks and single node setup. +// Wrapper for calling GetObject tests for both Erasure multiple disks and single node setup. func TestGetObject(t *testing.T) { ExecObjectLayerTest(t, testGetObject) } @@ -42,7 +42,7 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) { emptyDirName := "test-empty-dir/" // create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) // Stop the test if creation of the bucket fails. if err != nil { t.Fatalf("%s : %s", instanceType, err.Error()) @@ -113,7 +113,7 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) { {"a", "obj", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Bucket name invalid: a")}, // Test case - 5. // Case with invalid object names. - {bucketName, "", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Object name invalid: "+bucketName+"#")}, + {bucketName, "", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Object name invalid: "+bucketName+"/")}, // Test case - 6. {bucketName, objectName, 0, int64(len(bytesData[0].byteData)), buffers[0], NewEOFWriter(buffers[0], 100), false, []byte{}, io.EOF}, // Test case with start offset set to 0 and length set to size of the object. @@ -194,7 +194,7 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [ // Setup for the tests. bucketName := getRandomBucketName() // create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) // Stop the test if creation of the bucket fails. if err != nil { t.Fatalf("%s : %s", instanceType, err.Error()) @@ -292,19 +292,19 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [ } -// Wrapper for calling GetObject tests for both XL multiple disks and single node setup. +// Wrapper for calling GetObject tests for both Erasure multiple disks and single node setup. func TestGetObjectDiskNotFound(t *testing.T) { ExecObjectLayerDiskAlteredTest(t, testGetObjectDiskNotFound) } // ObjectLayer.GetObject is called with series of cases for valid and erroneous inputs and the result is validated. -// Before the Get Object call XL disks are moved so that the quorum just holds. +// Before the Get Object call Erasure disks are moved so that the quorum just holds. func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []string, t *testing.T) { // Setup for the tests. bucketName := getRandomBucketName() objectName := "test-object" // create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) // Stop the test if creation of the bucket fails. if err != nil { t.Fatalf("%s : %s", instanceType, err.Error()) @@ -376,7 +376,7 @@ func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []str {"a", "obj", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Bucket name invalid: a")}, // Test case - 5. // Case with invalid object names. - {bucketName, "", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Object name invalid: "+bucketName+"#")}, + {bucketName, "", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Object name invalid: "+bucketName+"/")}, // Test case - 7. {bucketName, objectName, 0, int64(len(bytesData[0].byteData)), buffers[0], NewEOFWriter(buffers[0], 100), false, []byte{}, io.EOF}, // Test case with start offset set to 0 and length set to size of the object. @@ -446,16 +446,16 @@ func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []str // Benchmarks for ObjectLayer.GetObject(). // The intent is to benchmark GetObject for various sizes ranging from few bytes to 100MB. -// Also each of these Benchmarks are run both XL and FS backends. +// Also each of these Benchmarks are run both Erasure and FS backends. // BenchmarkGetObjectVerySmallFS - Benchmark FS.GetObject() for object size of 10 bytes. func BenchmarkGetObjectVerySmallFS(b *testing.B) { benchmarkGetObject(b, "FS", 10) } -// BenchmarkGetObjectVerySmallXL - Benchmark XL.GetObject() for object size of 10 bytes. -func BenchmarkGetObjectVerySmallXL(b *testing.B) { - benchmarkGetObject(b, "XL", 10) +// BenchmarkGetObjectVerySmallErasure - Benchmark Erasure.GetObject() for object size of 10 bytes. +func BenchmarkGetObjectVerySmallErasure(b *testing.B) { + benchmarkGetObject(b, "Erasure", 10) } // BenchmarkGetObject10KbFS - Benchmark FS.GetObject() for object size of 10KB. @@ -463,9 +463,9 @@ func BenchmarkGetObject10KbFS(b *testing.B) { benchmarkGetObject(b, "FS", 10*humanize.KiByte) } -// BenchmarkGetObject10KbXL - Benchmark XL.GetObject() for object size of 10KB. -func BenchmarkGetObject10KbXL(b *testing.B) { - benchmarkGetObject(b, "XL", 10*humanize.KiByte) +// BenchmarkGetObject10KbErasure - Benchmark Erasure.GetObject() for object size of 10KB. +func BenchmarkGetObject10KbErasure(b *testing.B) { + benchmarkGetObject(b, "Erasure", 10*humanize.KiByte) } // BenchmarkGetObject100KbFS - Benchmark FS.GetObject() for object size of 100KB. @@ -473,9 +473,9 @@ func BenchmarkGetObject100KbFS(b *testing.B) { benchmarkGetObject(b, "FS", 100*humanize.KiByte) } -// BenchmarkGetObject100KbXL - Benchmark XL.GetObject() for object size of 100KB. -func BenchmarkGetObject100KbXL(b *testing.B) { - benchmarkGetObject(b, "XL", 100*humanize.KiByte) +// BenchmarkGetObject100KbErasure - Benchmark Erasure.GetObject() for object size of 100KB. +func BenchmarkGetObject100KbErasure(b *testing.B) { + benchmarkGetObject(b, "Erasure", 100*humanize.KiByte) } // BenchmarkGetObject1MbFS - Benchmark FS.GetObject() for object size of 1MB. @@ -483,9 +483,9 @@ func BenchmarkGetObject1MbFS(b *testing.B) { benchmarkGetObject(b, "FS", 1*humanize.MiByte) } -// BenchmarkGetObject1MbXL - Benchmark XL.GetObject() for object size of 1MB. -func BenchmarkGetObject1MbXL(b *testing.B) { - benchmarkGetObject(b, "XL", 1*humanize.MiByte) +// BenchmarkGetObject1MbErasure - Benchmark Erasure.GetObject() for object size of 1MB. +func BenchmarkGetObject1MbErasure(b *testing.B) { + benchmarkGetObject(b, "Erasure", 1*humanize.MiByte) } // BenchmarkGetObject5MbFS - Benchmark FS.GetObject() for object size of 5MB. @@ -493,9 +493,9 @@ func BenchmarkGetObject5MbFS(b *testing.B) { benchmarkGetObject(b, "FS", 5*humanize.MiByte) } -// BenchmarkGetObject5MbXL - Benchmark XL.GetObject() for object size of 5MB. -func BenchmarkGetObject5MbXL(b *testing.B) { - benchmarkGetObject(b, "XL", 5*humanize.MiByte) +// BenchmarkGetObject5MbErasure - Benchmark Erasure.GetObject() for object size of 5MB. +func BenchmarkGetObject5MbErasure(b *testing.B) { + benchmarkGetObject(b, "Erasure", 5*humanize.MiByte) } // BenchmarkGetObject10MbFS - Benchmark FS.GetObject() for object size of 10MB. @@ -503,9 +503,9 @@ func BenchmarkGetObject10MbFS(b *testing.B) { benchmarkGetObject(b, "FS", 10*humanize.MiByte) } -// BenchmarkGetObject10MbXL - Benchmark XL.GetObject() for object size of 10MB. -func BenchmarkGetObject10MbXL(b *testing.B) { - benchmarkGetObject(b, "XL", 10*humanize.MiByte) +// BenchmarkGetObject10MbErasure - Benchmark Erasure.GetObject() for object size of 10MB. +func BenchmarkGetObject10MbErasure(b *testing.B) { + benchmarkGetObject(b, "Erasure", 10*humanize.MiByte) } // BenchmarkGetObject25MbFS - Benchmark FS.GetObject() for object size of 25MB. @@ -514,9 +514,9 @@ func BenchmarkGetObject25MbFS(b *testing.B) { } -// BenchmarkGetObject25MbXL - Benchmark XL.GetObject() for object size of 25MB. -func BenchmarkGetObject25MbXL(b *testing.B) { - benchmarkGetObject(b, "XL", 25*humanize.MiByte) +// BenchmarkGetObject25MbErasure - Benchmark Erasure.GetObject() for object size of 25MB. +func BenchmarkGetObject25MbErasure(b *testing.B) { + benchmarkGetObject(b, "Erasure", 25*humanize.MiByte) } // BenchmarkGetObject50MbFS - Benchmark FS.GetObject() for object size of 50MB. @@ -524,9 +524,9 @@ func BenchmarkGetObject50MbFS(b *testing.B) { benchmarkGetObject(b, "FS", 50*humanize.MiByte) } -// BenchmarkGetObject50MbXL - Benchmark XL.GetObject() for object size of 50MB. -func BenchmarkGetObject50MbXL(b *testing.B) { - benchmarkGetObject(b, "XL", 50*humanize.MiByte) +// BenchmarkGetObject50MbErasure - Benchmark Erasure.GetObject() for object size of 50MB. +func BenchmarkGetObject50MbErasure(b *testing.B) { + benchmarkGetObject(b, "Erasure", 50*humanize.MiByte) } // parallel benchmarks for ObjectLayer.GetObject() . @@ -536,9 +536,9 @@ func BenchmarkGetObjectParallelVerySmallFS(b *testing.B) { benchmarkGetObjectParallel(b, "FS", 10) } -// BenchmarkGetObjectParallelVerySmallXL - Benchmark XL.GetObject() for object size of 10 bytes. -func BenchmarkGetObjectParallelVerySmallXL(b *testing.B) { - benchmarkGetObjectParallel(b, "XL", 10) +// BenchmarkGetObjectParallelVerySmallErasure - Benchmark Erasure.GetObject() for object size of 10 bytes. +func BenchmarkGetObjectParallelVerySmallErasure(b *testing.B) { + benchmarkGetObjectParallel(b, "Erasure", 10) } // BenchmarkGetObjectParallel10KbFS - Benchmark FS.GetObject() for object size of 10KB. @@ -546,9 +546,9 @@ func BenchmarkGetObjectParallel10KbFS(b *testing.B) { benchmarkGetObjectParallel(b, "FS", 10*humanize.KiByte) } -// BenchmarkGetObjectParallel10KbXL - Benchmark XL.GetObject() for object size of 10KB. -func BenchmarkGetObjectParallel10KbXL(b *testing.B) { - benchmarkGetObjectParallel(b, "XL", 10*humanize.KiByte) +// BenchmarkGetObjectParallel10KbErasure - Benchmark Erasure.GetObject() for object size of 10KB. +func BenchmarkGetObjectParallel10KbErasure(b *testing.B) { + benchmarkGetObjectParallel(b, "Erasure", 10*humanize.KiByte) } // BenchmarkGetObjectParallel100KbFS - Benchmark FS.GetObject() for object size of 100KB. @@ -556,9 +556,9 @@ func BenchmarkGetObjectParallel100KbFS(b *testing.B) { benchmarkGetObjectParallel(b, "FS", 100*humanize.KiByte) } -// BenchmarkGetObjectParallel100KbXL - Benchmark XL.GetObject() for object size of 100KB. -func BenchmarkGetObjectParallel100KbXL(b *testing.B) { - benchmarkGetObjectParallel(b, "XL", 100*humanize.KiByte) +// BenchmarkGetObjectParallel100KbErasure - Benchmark Erasure.GetObject() for object size of 100KB. +func BenchmarkGetObjectParallel100KbErasure(b *testing.B) { + benchmarkGetObjectParallel(b, "Erasure", 100*humanize.KiByte) } // BenchmarkGetObjectParallel1MbFS - Benchmark FS.GetObject() for object size of 1MB. @@ -566,9 +566,9 @@ func BenchmarkGetObjectParallel1MbFS(b *testing.B) { benchmarkGetObjectParallel(b, "FS", 1*humanize.MiByte) } -// BenchmarkGetObjectParallel1MbXL - Benchmark XL.GetObject() for object size of 1MB. -func BenchmarkGetObjectParallel1MbXL(b *testing.B) { - benchmarkGetObjectParallel(b, "XL", 1*humanize.MiByte) +// BenchmarkGetObjectParallel1MbErasure - Benchmark Erasure.GetObject() for object size of 1MB. +func BenchmarkGetObjectParallel1MbErasure(b *testing.B) { + benchmarkGetObjectParallel(b, "Erasure", 1*humanize.MiByte) } // BenchmarkGetObjectParallel5MbFS - Benchmark FS.GetObject() for object size of 5MB. @@ -576,9 +576,9 @@ func BenchmarkGetObjectParallel5MbFS(b *testing.B) { benchmarkGetObjectParallel(b, "FS", 5*humanize.MiByte) } -// BenchmarkGetObjectParallel5MbXL - Benchmark XL.GetObject() for object size of 5MB. -func BenchmarkGetObjectParallel5MbXL(b *testing.B) { - benchmarkGetObjectParallel(b, "XL", 5*humanize.MiByte) +// BenchmarkGetObjectParallel5MbErasure - Benchmark Erasure.GetObject() for object size of 5MB. +func BenchmarkGetObjectParallel5MbErasure(b *testing.B) { + benchmarkGetObjectParallel(b, "Erasure", 5*humanize.MiByte) } // BenchmarkGetObjectParallel10MbFS - Benchmark FS.GetObject() for object size of 10MB. @@ -586,9 +586,9 @@ func BenchmarkGetObjectParallel10MbFS(b *testing.B) { benchmarkGetObjectParallel(b, "FS", 10*humanize.MiByte) } -// BenchmarkGetObjectParallel10MbXL - Benchmark XL.GetObject() for object size of 10MB. -func BenchmarkGetObjectParallel10MbXL(b *testing.B) { - benchmarkGetObjectParallel(b, "XL", 10*humanize.MiByte) +// BenchmarkGetObjectParallel10MbErasure - Benchmark Erasure.GetObject() for object size of 10MB. +func BenchmarkGetObjectParallel10MbErasure(b *testing.B) { + benchmarkGetObjectParallel(b, "Erasure", 10*humanize.MiByte) } // BenchmarkGetObjectParallel25MbFS - Benchmark FS.GetObject() for object size of 25MB. @@ -597,9 +597,9 @@ func BenchmarkGetObjectParallel25MbFS(b *testing.B) { } -// BenchmarkGetObjectParallel25MbXL - Benchmark XL.GetObject() for object size of 25MB. -func BenchmarkGetObjectParallel25MbXL(b *testing.B) { - benchmarkGetObjectParallel(b, "XL", 25*humanize.MiByte) +// BenchmarkGetObjectParallel25MbErasure - Benchmark Erasure.GetObject() for object size of 25MB. +func BenchmarkGetObjectParallel25MbErasure(b *testing.B) { + benchmarkGetObjectParallel(b, "Erasure", 25*humanize.MiByte) } // BenchmarkGetObjectParallel50MbFS - Benchmark FS.GetObject() for object size of 50MB. @@ -607,7 +607,7 @@ func BenchmarkGetObjectParallel50MbFS(b *testing.B) { benchmarkGetObjectParallel(b, "FS", 50*humanize.MiByte) } -// BenchmarkGetObjectParallel50MbXL - Benchmark XL.GetObject() for object size of 50MB. -func BenchmarkGetObjectParallel50MbXL(b *testing.B) { - benchmarkGetObjectParallel(b, "XL", 50*humanize.MiByte) +// BenchmarkGetObjectParallel50MbErasure - Benchmark Erasure.GetObject() for object size of 50MB. +func BenchmarkGetObjectParallel50MbErasure(b *testing.B) { + benchmarkGetObjectParallel(b, "Erasure", 50*humanize.MiByte) } diff --git a/cmd/object-api-getobjectinfo_test.go b/cmd/object-api-getobjectinfo_test.go index ba44d9d1f..73c656162 100644 --- a/cmd/object-api-getobjectinfo_test.go +++ b/cmd/object-api-getobjectinfo_test.go @@ -22,7 +22,7 @@ import ( "testing" ) -// Wrapper for calling GetObjectInfo tests for both XL multiple disks and single node setup. +// Wrapper for calling GetObjectInfo tests for both Erasure multiple disks and single node setup. func TestGetObjectInfo(t *testing.T) { ExecObjectLayerTest(t, testGetObjectInfo) } @@ -30,7 +30,7 @@ func TestGetObjectInfo(t *testing.T) { // Testing GetObjectInfo(). func testGetObjectInfo(obj ObjectLayer, instanceType string, t TestErrHandler) { // This bucket is used for testing getObjectInfo operations. - err := obj.MakeBucketWithLocation(context.Background(), "test-getobjectinfo", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "test-getobjectinfo", BucketOptions{}) if err != nil { t.Fatalf("%s : %s", instanceType, err.Error()) } diff --git a/cmd/object-api-interface.go b/cmd/object-api-interface.go index 2be7f223d..52f79eca2 100644 --- a/cmd/object-api-interface.go +++ b/cmd/object-api-interface.go @@ -23,7 +23,6 @@ import ( "github.com/minio/minio-go/v6/pkg/encrypt" "github.com/minio/minio-go/v6/pkg/tags" - "github.com/minio/minio/pkg/bucket/policy" "github.com/minio/minio/pkg/madmin" ) @@ -32,16 +31,25 @@ import ( type CheckCopyPreconditionFn func(o ObjectInfo, encETag string) bool // GetObjectInfoFn is the signature of GetObjectInfo function. -type GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) +type GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) -// ObjectOptions represents object options for ObjectLayer operations +// ObjectOptions represents object options for ObjectLayer object operations type ObjectOptions struct { ServerSideEncryption encrypt.ServerSide + Versioned bool + VersionID string UserDefined map[string]string PartNumber int CheckCopyPrecondFn CheckCopyPreconditionFn } +// BucketOptions represents bucket options for ObjectLayer bucket operations +type BucketOptions struct { + Location string + LockEnabled bool + VersioningEnabled bool +} + // LockType represents required locking for ObjectLayer operations type LockType int @@ -62,12 +70,14 @@ type ObjectLayer interface { StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) // local queries only local disks // Bucket operations. - MakeBucketWithLocation(ctx context.Context, bucket string, location string, lockEnabled bool) error + MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) + ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (result ListObjectVersionsInfo, err error) + // Walk lists all objects including versions, delete markers. Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo) error // Object operations. @@ -83,8 +93,8 @@ type ObjectLayer interface { GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) - DeleteObject(ctx context.Context, bucket, object string) error - DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) + DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) + DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) // Multipart operations. ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) @@ -101,8 +111,8 @@ type ObjectLayer interface { ReloadFormat(ctx context.Context, dryRun bool) error HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (madmin.HealResultItem, error) - HealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) (madmin.HealResultItem, error) - HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn healObjectFn) error + HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (madmin.HealResultItem, error) + HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn HealObjectFn) error ListBucketsHeal(ctx context.Context) (buckets []BucketInfo, err error) // Policy operations @@ -124,7 +134,7 @@ type ObjectLayer interface { IsReady(ctx context.Context) bool // ObjectTagging operations - PutObjectTags(context.Context, string, string, string) error - GetObjectTags(context.Context, string, string) (*tags.Tags, error) - DeleteObjectTags(context.Context, string, string) error + PutObjectTags(context.Context, string, string, string, ObjectOptions) error + GetObjectTags(context.Context, string, string, ObjectOptions) (*tags.Tags, error) + DeleteObjectTags(context.Context, string, string, ObjectOptions) error } diff --git a/cmd/object-api-listobjects_test.go b/cmd/object-api-listobjects_test.go index 46623206b..c60966249 100644 --- a/cmd/object-api-listobjects_test.go +++ b/cmd/object-api-listobjects_test.go @@ -29,7 +29,7 @@ import ( "testing" ) -// Wrapper for calling ListObjects tests for both XL multiple disks and single node setup. +// Wrapper for calling ListObjects tests for both Erasure multiple disks and single node setup. func TestListObjects(t *testing.T) { ExecObjectLayerTest(t, testListObjects) } @@ -49,7 +49,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) { "test-bucket-single-object", } for _, bucket := range testBuckets { - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { t.Fatalf("%s : %s", instanceType, err.Error()) } @@ -669,7 +669,7 @@ func BenchmarkListObjects(b *testing.B) { bucket := "ls-benchmark-bucket" // Create a bucket. - err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { b.Fatal(err) } diff --git a/cmd/object-api-multipart_test.go b/cmd/object-api-multipart_test.go index 351a5c944..f621a4cba 100644 --- a/cmd/object-api-multipart_test.go +++ b/cmd/object-api-multipart_test.go @@ -28,7 +28,7 @@ import ( "github.com/minio/minio/pkg/hash" ) -// Wrapper for calling NewMultipartUpload tests for both XL multiple disks and single node setup. +// Wrapper for calling NewMultipartUpload tests for both Erasure multiple disks and single node setup. func TestObjectNewMultipartUpload(t *testing.T) { ExecObjectLayerTest(t, testObjectNewMultipartUpload) } @@ -55,7 +55,7 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr } // Create bucket before intiating NewMultipartUpload. - err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { // failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -77,7 +77,7 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr } } -// Wrapper for calling AbortMultipartUpload tests for both XL multiple disks and single node setup. +// Wrapper for calling AbortMultipartUpload tests for both Erasure multiple disks and single node setup. func TestObjectAbortMultipartUpload(t *testing.T) { ExecObjectLayerTest(t, testObjectAbortMultipartUpload) } @@ -89,7 +89,7 @@ func testObjectAbortMultipartUpload(obj ObjectLayer, instanceType string, t Test object := "minio-object" opts := ObjectOptions{} // Create bucket before intiating NewMultipartUpload. - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { // failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -124,7 +124,7 @@ func testObjectAbortMultipartUpload(obj ObjectLayer, instanceType string, t Test } } -// Wrapper for calling isUploadIDExists tests for both XL multiple disks and single node setup. +// Wrapper for calling isUploadIDExists tests for both Erasure multiple disks and single node setup. func TestObjectAPIIsUploadIDExists(t *testing.T) { ExecObjectLayerTest(t, testObjectAPIIsUploadIDExists) } @@ -135,7 +135,7 @@ func testObjectAPIIsUploadIDExists(obj ObjectLayer, instanceType string, t TestE object := "minio-object" // Create bucket before intiating NewMultipartUpload. - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -154,7 +154,7 @@ func testObjectAPIIsUploadIDExists(obj ObjectLayer, instanceType string, t TestE } } -// Wrapper for calling PutObjectPart tests for both XL multiple disks and single node setup. +// Wrapper for calling PutObjectPart tests for both Erasure multiple disks and single node setup. func TestObjectAPIPutObjectPart(t *testing.T) { ExecObjectLayerTest(t, testObjectAPIPutObjectPart) } @@ -166,7 +166,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH object := "minio-object" opts := ObjectOptions{} // Create bucket before intiating NewMultipartUpload. - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -178,7 +178,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH t.Fatalf("%s : %s", instanceType, err.Error()) } // Creating a dummy bucket for tests. - err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", "", false) + err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -210,7 +210,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH {"a", "obj", "", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Bucket not found: a")}, // Test case - 5. // Case with invalid object names. - {bucket, "", "", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Object name invalid: minio-bucket#")}, + {bucket, "", "", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Object name invalid: minio-bucket/")}, // Test case - 6. // Valid object and bucket names but non-existent bucket. {"abc", "def", "", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Bucket not found: abc")}, @@ -286,7 +286,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH } } -// Wrapper for calling TestListMultipartUploads tests for both XL multiple disks and single node setup. +// Wrapper for calling TestListMultipartUploads tests for both Erasure multiple disks and single node setup. func TestListMultipartUploads(t *testing.T) { ExecObjectLayerTest(t, testListMultipartUploads) } @@ -302,7 +302,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan // objectNames[0]. // uploadIds [0]. // Create bucket before initiating NewMultipartUpload. - err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -320,7 +320,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan // objectNames[0]. // uploadIds [1-3]. // Bucket to test for mutiple upload Id's for a given object. - err = obj.MakeBucketWithLocation(context.Background(), bucketNames[1], "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketNames[1], BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -341,7 +341,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan // bucketnames[2]. // objectNames[0-2]. // uploadIds [4-9]. - err = obj.MakeBucketWithLocation(context.Background(), bucketNames[2], "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketNames[2], BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -1150,7 +1150,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan } } -// Wrapper for calling TestListObjectPartsDiskNotFound tests for both XL multiple disks and single node setup. +// Wrapper for calling TestListObjectPartsDiskNotFound tests for both Erasure multiple disks and single node setup. func TestListObjectPartsDiskNotFound(t *testing.T) { ExecObjectLayerDiskAlteredTest(t, testListObjectPartsDiskNotFound) } @@ -1166,7 +1166,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks // objectNames[0]. // uploadIds [0]. // Create bucket before intiating NewMultipartUpload. - err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -1395,7 +1395,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks } } -// Wrapper for calling TestListObjectParts tests for both XL multiple disks and single node setup. +// Wrapper for calling TestListObjectParts tests for both Erasure multiple disks and single node setup. func TestListObjectParts(t *testing.T) { ExecObjectLayerTest(t, testListObjectParts) } @@ -1411,7 +1411,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler) // objectNames[0]. // uploadIds [0]. // Create bucket before intiating NewMultipartUpload. - err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -1657,7 +1657,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T // objectNames[0]. // uploadIds [0]. // Create bucket before intiating NewMultipartUpload. - err = obj.MakeBucketWithLocation(context.Background(), bucketNames[0], "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketNames[0], BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err) @@ -1796,7 +1796,6 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T // the case above successfully completes CompleteMultipartUpload, the remaining Parts will be flushed. // Expecting to fail with Invalid UploadID. {bucketNames[0], objectNames[0], uploadIDs[0], inputParts[4].parts, "", InvalidUploadID{UploadID: uploadIDs[0]}, false}, - // Expecting to fail due to bad } for _, testCase := range testCases { @@ -1829,16 +1828,16 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T // Benchmarks for ObjectLayer.PutObjectPart(). // The intent is to benchmark PutObjectPart for various sizes ranging from few bytes to 100MB. -// Also each of these Benchmarks are run both XL and FS backends. +// Also each of these Benchmarks are run both Erasure and FS backends. // BenchmarkPutObjectPart5MbFS - Benchmark FS.PutObjectPart() for object size of 5MB. func BenchmarkPutObjectPart5MbFS(b *testing.B) { benchmarkPutObjectPart(b, "FS", 5*humanize.MiByte) } -// BenchmarkPutObjectPart5MbXL - Benchmark XL.PutObjectPart() for object size of 5MB. -func BenchmarkPutObjectPart5MbXL(b *testing.B) { - benchmarkPutObjectPart(b, "XL", 5*humanize.MiByte) +// BenchmarkPutObjectPart5MbErasure - Benchmark Erasure.PutObjectPart() for object size of 5MB. +func BenchmarkPutObjectPart5MbErasure(b *testing.B) { + benchmarkPutObjectPart(b, "Erasure", 5*humanize.MiByte) } // BenchmarkPutObjectPart10MbFS - Benchmark FS.PutObjectPart() for object size of 10MB. @@ -1846,9 +1845,9 @@ func BenchmarkPutObjectPart10MbFS(b *testing.B) { benchmarkPutObjectPart(b, "FS", 10*humanize.MiByte) } -// BenchmarkPutObjectPart10MbXL - Benchmark XL.PutObjectPart() for object size of 10MB. -func BenchmarkPutObjectPart10MbXL(b *testing.B) { - benchmarkPutObjectPart(b, "XL", 10*humanize.MiByte) +// BenchmarkPutObjectPart10MbErasure - Benchmark Erasure.PutObjectPart() for object size of 10MB. +func BenchmarkPutObjectPart10MbErasure(b *testing.B) { + benchmarkPutObjectPart(b, "Erasure", 10*humanize.MiByte) } // BenchmarkPutObjectPart25MbFS - Benchmark FS.PutObjectPart() for object size of 25MB. @@ -1857,9 +1856,9 @@ func BenchmarkPutObjectPart25MbFS(b *testing.B) { } -// BenchmarkPutObjectPart25MbXL - Benchmark XL.PutObjectPart() for object size of 25MB. -func BenchmarkPutObjectPart25MbXL(b *testing.B) { - benchmarkPutObjectPart(b, "XL", 25*humanize.MiByte) +// BenchmarkPutObjectPart25MbErasure - Benchmark Erasure.PutObjectPart() for object size of 25MB. +func BenchmarkPutObjectPart25MbErasure(b *testing.B) { + benchmarkPutObjectPart(b, "Erasure", 25*humanize.MiByte) } // BenchmarkPutObjectPart50MbFS - Benchmark FS.PutObjectPart() for object size of 50MB. @@ -1867,7 +1866,7 @@ func BenchmarkPutObjectPart50MbFS(b *testing.B) { benchmarkPutObjectPart(b, "FS", 50*humanize.MiByte) } -// BenchmarkPutObjectPart50MbXL - Benchmark XL.PutObjectPart() for object size of 50MB. -func BenchmarkPutObjectPart50MbXL(b *testing.B) { - benchmarkPutObjectPart(b, "XL", 50*humanize.MiByte) +// BenchmarkPutObjectPart50MbErasure - Benchmark Erasure.PutObjectPart() for object size of 50MB. +func BenchmarkPutObjectPart50MbErasure(b *testing.B) { + benchmarkPutObjectPart(b, "Erasure", 50*humanize.MiByte) } diff --git a/cmd/object-api-putobject_test.go b/cmd/object-api-putobject_test.go index 9886d31f0..1c6a8aac9 100644 --- a/cmd/object-api-putobject_test.go +++ b/cmd/object-api-putobject_test.go @@ -34,7 +34,7 @@ func md5Header(data []byte) map[string]string { return map[string]string{"etag": getMD5Hash([]byte(data))} } -// Wrapper for calling PutObject tests for both XL multiple disks and single node setup. +// Wrapper for calling PutObject tests for both Erasure multiple disks and single node setup. func TestObjectAPIPutObjectSingle(t *testing.T) { ExecObjectLayerTest(t, testObjectAPIPutObject) } @@ -46,14 +46,14 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl object := "minio-object" // Create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) } // Creating a dummy bucket for tests. - err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", "", false) + err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -198,7 +198,7 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl } } -// Wrapper for calling PutObject tests for both XL multiple disks case +// Wrapper for calling PutObject tests for both Erasure multiple disks case // when quorum is not available. func TestObjectAPIPutObjectDiskNotFound(t *testing.T) { ExecObjectLayerDiskAlteredTest(t, testObjectAPIPutObjectDiskNotFound) @@ -211,14 +211,14 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di object := "minio-object" // Create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) } // Creating a dummy bucket for tests. - err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", "", false) + err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -311,7 +311,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di } } -// Wrapper for calling PutObject tests for both XL multiple disks and single node setup. +// Wrapper for calling PutObject tests for both Erasure multiple disks and single node setup. func TestObjectAPIPutObjectStaleFiles(t *testing.T) { ExecObjectLayerStaleFilesTest(t, testObjectAPIPutObjectStaleFiles) } @@ -323,7 +323,7 @@ func testObjectAPIPutObjectStaleFiles(obj ObjectLayer, instanceType string, disk object := "minio-object" // Create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -345,7 +345,7 @@ func testObjectAPIPutObjectStaleFiles(obj ObjectLayer, instanceType string, disk } } -// Wrapper for calling Multipart PutObject tests for both XL multiple disks and single node setup. +// Wrapper for calling Multipart PutObject tests for both Erasure multiple disks and single node setup. func TestObjectAPIMultipartPutObjectStaleFiles(t *testing.T) { ExecObjectLayerStaleFilesTest(t, testObjectAPIMultipartPutObjectStaleFiles) } @@ -357,7 +357,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str object := "minio-object" // Create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -425,16 +425,16 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str // Benchmarks for ObjectLayer.PutObject(). // The intent is to benchmark PutObject for various sizes ranging from few bytes to 100MB. -// Also each of these Benchmarks are run both XL and FS backends. +// Also each of these Benchmarks are run both Erasure and FS backends. // BenchmarkPutObjectVerySmallFS - Benchmark FS.PutObject() for object size of 10 bytes. func BenchmarkPutObjectVerySmallFS(b *testing.B) { benchmarkPutObject(b, "FS", 10) } -// BenchmarkPutObjectVerySmallXL - Benchmark XL.PutObject() for object size of 10 bytes. -func BenchmarkPutObjectVerySmallXL(b *testing.B) { - benchmarkPutObject(b, "XL", 10) +// BenchmarkPutObjectVerySmallErasure - Benchmark Erasure.PutObject() for object size of 10 bytes. +func BenchmarkPutObjectVerySmallErasure(b *testing.B) { + benchmarkPutObject(b, "Erasure", 10) } // BenchmarkPutObject10KbFS - Benchmark FS.PutObject() for object size of 10KB. @@ -442,9 +442,9 @@ func BenchmarkPutObject10KbFS(b *testing.B) { benchmarkPutObject(b, "FS", 10*humanize.KiByte) } -// BenchmarkPutObject10KbXL - Benchmark XL.PutObject() for object size of 10KB. -func BenchmarkPutObject10KbXL(b *testing.B) { - benchmarkPutObject(b, "XL", 10*humanize.KiByte) +// BenchmarkPutObject10KbErasure - Benchmark Erasure.PutObject() for object size of 10KB. +func BenchmarkPutObject10KbErasure(b *testing.B) { + benchmarkPutObject(b, "Erasure", 10*humanize.KiByte) } // BenchmarkPutObject100KbFS - Benchmark FS.PutObject() for object size of 100KB. @@ -452,9 +452,9 @@ func BenchmarkPutObject100KbFS(b *testing.B) { benchmarkPutObject(b, "FS", 100*humanize.KiByte) } -// BenchmarkPutObject100KbXL - Benchmark XL.PutObject() for object size of 100KB. -func BenchmarkPutObject100KbXL(b *testing.B) { - benchmarkPutObject(b, "XL", 100*humanize.KiByte) +// BenchmarkPutObject100KbErasure - Benchmark Erasure.PutObject() for object size of 100KB. +func BenchmarkPutObject100KbErasure(b *testing.B) { + benchmarkPutObject(b, "Erasure", 100*humanize.KiByte) } // BenchmarkPutObject1MbFS - Benchmark FS.PutObject() for object size of 1MB. @@ -462,9 +462,9 @@ func BenchmarkPutObject1MbFS(b *testing.B) { benchmarkPutObject(b, "FS", 1*humanize.MiByte) } -// BenchmarkPutObject1MbXL - Benchmark XL.PutObject() for object size of 1MB. -func BenchmarkPutObject1MbXL(b *testing.B) { - benchmarkPutObject(b, "XL", 1*humanize.MiByte) +// BenchmarkPutObject1MbErasure - Benchmark Erasure.PutObject() for object size of 1MB. +func BenchmarkPutObject1MbErasure(b *testing.B) { + benchmarkPutObject(b, "Erasure", 1*humanize.MiByte) } // BenchmarkPutObject5MbFS - Benchmark FS.PutObject() for object size of 5MB. @@ -472,9 +472,9 @@ func BenchmarkPutObject5MbFS(b *testing.B) { benchmarkPutObject(b, "FS", 5*humanize.MiByte) } -// BenchmarkPutObject5MbXL - Benchmark XL.PutObject() for object size of 5MB. -func BenchmarkPutObject5MbXL(b *testing.B) { - benchmarkPutObject(b, "XL", 5*humanize.MiByte) +// BenchmarkPutObject5MbErasure - Benchmark Erasure.PutObject() for object size of 5MB. +func BenchmarkPutObject5MbErasure(b *testing.B) { + benchmarkPutObject(b, "Erasure", 5*humanize.MiByte) } // BenchmarkPutObject10MbFS - Benchmark FS.PutObject() for object size of 10MB. @@ -482,9 +482,9 @@ func BenchmarkPutObject10MbFS(b *testing.B) { benchmarkPutObject(b, "FS", 10*humanize.MiByte) } -// BenchmarkPutObject10MbXL - Benchmark XL.PutObject() for object size of 10MB. -func BenchmarkPutObject10MbXL(b *testing.B) { - benchmarkPutObject(b, "XL", 10*humanize.MiByte) +// BenchmarkPutObject10MbErasure - Benchmark Erasure.PutObject() for object size of 10MB. +func BenchmarkPutObject10MbErasure(b *testing.B) { + benchmarkPutObject(b, "Erasure", 10*humanize.MiByte) } // BenchmarkPutObject25MbFS - Benchmark FS.PutObject() for object size of 25MB. @@ -493,9 +493,9 @@ func BenchmarkPutObject25MbFS(b *testing.B) { } -// BenchmarkPutObject25MbXL - Benchmark XL.PutObject() for object size of 25MB. -func BenchmarkPutObject25MbXL(b *testing.B) { - benchmarkPutObject(b, "XL", 25*humanize.MiByte) +// BenchmarkPutObject25MbErasure - Benchmark Erasure.PutObject() for object size of 25MB. +func BenchmarkPutObject25MbErasure(b *testing.B) { + benchmarkPutObject(b, "Erasure", 25*humanize.MiByte) } // BenchmarkPutObject50MbFS - Benchmark FS.PutObject() for object size of 50MB. @@ -503,9 +503,9 @@ func BenchmarkPutObject50MbFS(b *testing.B) { benchmarkPutObject(b, "FS", 50*humanize.MiByte) } -// BenchmarkPutObject50MbXL - Benchmark XL.PutObject() for object size of 50MB. -func BenchmarkPutObject50MbXL(b *testing.B) { - benchmarkPutObject(b, "XL", 50*humanize.MiByte) +// BenchmarkPutObject50MbErasure - Benchmark Erasure.PutObject() for object size of 50MB. +func BenchmarkPutObject50MbErasure(b *testing.B) { + benchmarkPutObject(b, "Erasure", 50*humanize.MiByte) } // parallel benchmarks for ObjectLayer.PutObject() . @@ -515,9 +515,9 @@ func BenchmarkParallelPutObjectVerySmallFS(b *testing.B) { benchmarkPutObjectParallel(b, "FS", 10) } -// BenchmarkParallelPutObjectVerySmallXL - BenchmarkParallel XL.PutObject() for object size of 10 bytes. -func BenchmarkParallelPutObjectVerySmallXL(b *testing.B) { - benchmarkPutObjectParallel(b, "XL", 10) +// BenchmarkParallelPutObjectVerySmallErasure - BenchmarkParallel Erasure.PutObject() for object size of 10 bytes. +func BenchmarkParallelPutObjectVerySmallErasure(b *testing.B) { + benchmarkPutObjectParallel(b, "Erasure", 10) } // BenchmarkParallelPutObject10KbFS - BenchmarkParallel FS.PutObject() for object size of 10KB. @@ -525,9 +525,9 @@ func BenchmarkParallelPutObject10KbFS(b *testing.B) { benchmarkPutObjectParallel(b, "FS", 10*humanize.KiByte) } -// BenchmarkParallelPutObject10KbXL - BenchmarkParallel XL.PutObject() for object size of 10KB. -func BenchmarkParallelPutObject10KbXL(b *testing.B) { - benchmarkPutObjectParallel(b, "XL", 10*humanize.KiByte) +// BenchmarkParallelPutObject10KbErasure - BenchmarkParallel Erasure.PutObject() for object size of 10KB. +func BenchmarkParallelPutObject10KbErasure(b *testing.B) { + benchmarkPutObjectParallel(b, "Erasure", 10*humanize.KiByte) } // BenchmarkParallelPutObject100KbFS - BenchmarkParallel FS.PutObject() for object size of 100KB. @@ -535,9 +535,9 @@ func BenchmarkParallelPutObject100KbFS(b *testing.B) { benchmarkPutObjectParallel(b, "FS", 100*humanize.KiByte) } -// BenchmarkParallelPutObject100KbXL - BenchmarkParallel XL.PutObject() for object size of 100KB. -func BenchmarkParallelPutObject100KbXL(b *testing.B) { - benchmarkPutObjectParallel(b, "XL", 100*humanize.KiByte) +// BenchmarkParallelPutObject100KbErasure - BenchmarkParallel Erasure.PutObject() for object size of 100KB. +func BenchmarkParallelPutObject100KbErasure(b *testing.B) { + benchmarkPutObjectParallel(b, "Erasure", 100*humanize.KiByte) } // BenchmarkParallelPutObject1MbFS - BenchmarkParallel FS.PutObject() for object size of 1MB. @@ -545,9 +545,9 @@ func BenchmarkParallelPutObject1MbFS(b *testing.B) { benchmarkPutObjectParallel(b, "FS", 1*humanize.MiByte) } -// BenchmarkParallelPutObject1MbXL - BenchmarkParallel XL.PutObject() for object size of 1MB. -func BenchmarkParallelPutObject1MbXL(b *testing.B) { - benchmarkPutObjectParallel(b, "XL", 1*humanize.MiByte) +// BenchmarkParallelPutObject1MbErasure - BenchmarkParallel Erasure.PutObject() for object size of 1MB. +func BenchmarkParallelPutObject1MbErasure(b *testing.B) { + benchmarkPutObjectParallel(b, "Erasure", 1*humanize.MiByte) } // BenchmarkParallelPutObject5MbFS - BenchmarkParallel FS.PutObject() for object size of 5MB. @@ -555,9 +555,9 @@ func BenchmarkParallelPutObject5MbFS(b *testing.B) { benchmarkPutObjectParallel(b, "FS", 5*humanize.MiByte) } -// BenchmarkParallelPutObject5MbXL - BenchmarkParallel XL.PutObject() for object size of 5MB. -func BenchmarkParallelPutObject5MbXL(b *testing.B) { - benchmarkPutObjectParallel(b, "XL", 5*humanize.MiByte) +// BenchmarkParallelPutObject5MbErasure - BenchmarkParallel Erasure.PutObject() for object size of 5MB. +func BenchmarkParallelPutObject5MbErasure(b *testing.B) { + benchmarkPutObjectParallel(b, "Erasure", 5*humanize.MiByte) } // BenchmarkParallelPutObject10MbFS - BenchmarkParallel FS.PutObject() for object size of 10MB. @@ -565,9 +565,9 @@ func BenchmarkParallelPutObject10MbFS(b *testing.B) { benchmarkPutObjectParallel(b, "FS", 10*humanize.MiByte) } -// BenchmarkParallelPutObject10MbXL - BenchmarkParallel XL.PutObject() for object size of 10MB. -func BenchmarkParallelPutObject10MbXL(b *testing.B) { - benchmarkPutObjectParallel(b, "XL", 10*humanize.MiByte) +// BenchmarkParallelPutObject10MbErasure - BenchmarkParallel Erasure.PutObject() for object size of 10MB. +func BenchmarkParallelPutObject10MbErasure(b *testing.B) { + benchmarkPutObjectParallel(b, "Erasure", 10*humanize.MiByte) } // BenchmarkParallelPutObject25MbFS - BenchmarkParallel FS.PutObject() for object size of 25MB. @@ -576,7 +576,7 @@ func BenchmarkParallelPutObject25MbFS(b *testing.B) { } -// BenchmarkParallelPutObject25MbXL - BenchmarkParallel XL.PutObject() for object size of 25MB. -func BenchmarkParallelPutObject25MbXL(b *testing.B) { - benchmarkPutObjectParallel(b, "XL", 25*humanize.MiByte) +// BenchmarkParallelPutObject25MbErasure - BenchmarkParallel Erasure.PutObject() for object size of 25MB. +func BenchmarkParallelPutObject25MbErasure(b *testing.B) { + benchmarkPutObjectParallel(b, "Erasure", 25*humanize.MiByte) } diff --git a/cmd/object-handlers-common.go b/cmd/object-handlers-common.go index 9d7ecb1b7..73cc3396b 100644 --- a/cmd/object-handlers-common.go +++ b/cmd/object-handlers-common.go @@ -21,10 +21,12 @@ import ( "fmt" "net/http" "regexp" + "strconv" "time" "github.com/minio/minio/cmd/crypto" xhttp "github.com/minio/minio/cmd/http" + "github.com/minio/minio/pkg/bucket/lifecycle" "github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/handlers" ) @@ -253,11 +255,35 @@ func isETagEqual(left, right string) bool { return canonicalizeETag(left) == canonicalizeETag(right) } -// setAmzExpirationHeader sets x-amz-expiration header with expiry time -// after analyzing the current bucket lifecycle rules if any. -func setAmzExpirationHeader(w http.ResponseWriter, bucket string, objInfo ObjectInfo) { - if lc, err := globalLifecycleSys.Get(bucket); err == nil { - ruleID, expiryTime := lc.PredictExpiryTime(objInfo.Name, objInfo.ModTime, objInfo.UserTags) +// setPutObjHeaders sets all the necessary headers returned back +// upon a success Put/Copy/CompleteMultipart/Delete requests +// to activate delete only headers set delete as true +func setPutObjHeaders(w http.ResponseWriter, objInfo ObjectInfo, delete bool) { + // We must not use the http.Header().Set method here because some (broken) + // clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive). + // Therefore, we have to set the ETag directly as map entry. + if objInfo.ETag != "" && !delete { + w.Header()[xhttp.ETag] = []string{`"` + objInfo.ETag + `"`} + } + + // Set the relevant version ID as part of the response header. + if objInfo.VersionID != "" { + w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID} + // If version is a deleted marker, set this header as well + if objInfo.DeleteMarker && delete { // only returned during delete object + w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(objInfo.DeleteMarker)} + } + } + + if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil && !delete { + ruleID, expiryTime := lc.PredictExpiryTime(lifecycle.ObjectOpts{ + Name: objInfo.Name, + UserTags: objInfo.UserTags, + VersionID: objInfo.VersionID, + ModTime: objInfo.ModTime, + IsLatest: objInfo.IsLatest, + DeleteMarker: objInfo.DeleteMarker, + }) if !expiryTime.IsZero() { w.Header()[xhttp.AmzExpiration] = []string{ fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, expiryTime.Format(http.TimeFormat), ruleID), @@ -269,27 +295,37 @@ func setAmzExpirationHeader(w http.ResponseWriter, bucket string, objInfo Object // deleteObject is a convenient wrapper to delete an object, this // is a common function to be called from object handlers and // web handlers. -func deleteObject(ctx context.Context, obj ObjectLayer, cache CacheObjectLayer, bucket, object string, r *http.Request) (err error) { +func deleteObject(ctx context.Context, obj ObjectLayer, cache CacheObjectLayer, bucket, object string, r *http.Request, opts ObjectOptions) (objInfo ObjectInfo, err error) { deleteObject := obj.DeleteObject if cache != nil { deleteObject = cache.DeleteObject } // Proceed to delete the object. - if err = deleteObject(ctx, bucket, object); err != nil { - return err + if objInfo, err = deleteObject(ctx, bucket, object, opts); err != nil { + return objInfo, err } - // Notify object deleted event. - sendEvent(eventArgs{ - EventName: event.ObjectRemovedDelete, - BucketName: bucket, - Object: ObjectInfo{ - Name: object, - }, - ReqParams: extractReqParams(r), - UserAgent: r.UserAgent(), - Host: handlers.GetSourceIP(r), - }) - - return nil + // Requesting only a delete marker which was successfully attempted. + if objInfo.DeleteMarker { + // Notify object deleted marker event. + sendEvent(eventArgs{ + EventName: event.ObjectRemovedDeleteMarkerCreated, + BucketName: bucket, + Object: objInfo, + ReqParams: extractReqParams(r), + UserAgent: r.UserAgent(), + Host: handlers.GetSourceIP(r), + }) + } else { + // Notify object deleted event. + sendEvent(eventArgs{ + EventName: event.ObjectRemovedDelete, + BucketName: bucket, + Object: objInfo, + ReqParams: extractReqParams(r), + UserAgent: r.UserAgent(), + Host: handlers.GetSourceIP(r), + }) + } + return objInfo, nil } diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index d8d7404e9..c6a09bc4f 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -31,6 +31,7 @@ import ( "time" + "github.com/google/uuid" "github.com/gorilla/mux" miniogo "github.com/minio/minio-go/v6" "github.com/minio/minio-go/v6/pkg/encrypt" @@ -309,11 +310,6 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req return } - if vid := r.URL.Query().Get("versionId"); vid != "" && vid != "null" { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchVersion), r.URL, guessIsBrowserReq(r)) - return - } - // get gateway encryption options opts, err := getOpts(ctx, r, bucket, object) if err != nil { @@ -432,7 +428,6 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req } setHeadGetRespHeaders(w, r.URL.Query()) - setAmzExpirationHeader(w, bucket, objInfo) statusCodeWritten := false httpWriter := ioutil.WriteOnClose(w) @@ -496,11 +491,6 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re return } - if vid := r.URL.Query().Get("versionId"); vid != "" && vid != "null" { - writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrNoSuchVersion)) - return - } - getObjectInfo := objectAPI.GetObjectInfo if api.CacheAPI() != nil { getObjectInfo = api.CacheAPI().GetObjectInfo @@ -617,9 +607,6 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re // Set any additional requested response headers. setHeadGetRespHeaders(w, r.URL.Query()) - // Set the expiration header - setAmzExpirationHeader(w, bucket, objInfo) - // Successful response. if rs != nil { w.WriteHeader(http.StatusPartialContent) @@ -773,35 +760,16 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re return } - // TODO: Reject requests where body/payload is present, for now we don't even read it. - // Read escaped copy source path to check for parameters. cpSrcPath := r.Header.Get(xhttp.AmzCopySource) - - // Check https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html - // Regardless of whether you have enabled versioning, each object in your bucket - // has a version ID. If you have not enabled versioning, Amazon S3 sets the value - // of the version ID to null. If you have enabled versioning, Amazon S3 assigns a - // unique version ID value for the object. + var vid string if u, err := url.Parse(cpSrcPath); err == nil { - // Check if versionId query param was added, if yes then check if - // its non "null" value, we should error out since we do not support - // any versions other than "null". - if vid := u.Query().Get("versionId"); vid != "" && vid != "null" { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchVersion), r.URL, guessIsBrowserReq(r)) - return - } + vid = strings.TrimSpace(u.Query().Get("versionId")) // Note that url.Parse does the unescaping cpSrcPath = u.Path } - if vid := r.Header.Get(xhttp.AmzCopySourceVersionID); vid != "" { - // Check if versionId header was added, if yes then check if - // its non "null" value, we should error out since we do not support - // any versions other than "null". - if vid != "null" { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchVersion), r.URL, guessIsBrowserReq(r)) - return - } + if vid == "" { + vid = strings.TrimSpace(r.Header.Get(xhttp.AmzCopySourceVersionID)) } srcBucket, srcObject := path2BucketObject(cpSrcPath) @@ -811,6 +779,18 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re return } + if vid != "" && vid != nullVersionID { + _, err := uuid.Parse(vid) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, VersionNotFound{ + Bucket: srcBucket, + Object: srcObject, + VersionID: vid, + }), r.URL, guessIsBrowserReq(r)) + return + } + } + if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, srcBucket, srcObject); s3Error != ErrNone { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) return @@ -849,12 +829,15 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } + srcOpts.VersionID = vid + // convert copy src encryption options for GET calls - var getOpts = ObjectOptions{} + var getOpts = ObjectOptions{VersionID: srcOpts.VersionID, Versioned: srcOpts.Versioned} getSSE := encrypt.SSE(srcOpts.ServerSideEncryption) if getSSE != srcOpts.ServerSideEncryption { getOpts.ServerSideEncryption = getSSE } + dstOpts, err = copyDstOpts(ctx, r, dstBucket, dstObject, nil) if err != nil { logger.LogIf(ctx, err) @@ -1193,11 +1176,12 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re } } - setAmzExpirationHeader(w, dstBucket, objInfo) - - response := generateCopyObjectResponse(getDecryptedETag(r.Header, objInfo, false), objInfo.ModTime) + objInfo.ETag = getDecryptedETag(r.Header, objInfo, false) + response := generateCopyObjectResponse(objInfo.ETag, objInfo.ModTime) encodedSuccessResponse := encodeResponse(response) + setPutObjHeaders(w, objInfo, false) + // Write success response. writeSuccessResponseXML(w, encodedSuccessResponse) @@ -1376,6 +1360,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req sha256hex = getContentSha256Cksum(r, serviceS3) } } + if err := enforceBucketQuota(ctx, bucket, size); err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return @@ -1487,39 +1472,30 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req return } - etag := objInfo.ETag switch { case objInfo.IsCompressed(): if !strings.HasSuffix(objInfo.ETag, "-1") { - etag = objInfo.ETag + "-1" + objInfo.ETag = objInfo.ETag + "-1" } case crypto.IsEncrypted(objInfo.UserDefined): switch { case crypto.S3.IsEncrypted(objInfo.UserDefined): w.Header().Set(crypto.SSEHeader, crypto.SSEAlgorithmAES256) - etag, _ = DecryptETag(objectEncryptionKey, ObjectInfo{ETag: etag}) + objInfo.ETag, _ = DecryptETag(objectEncryptionKey, ObjectInfo{ETag: objInfo.ETag}) case crypto.SSEC.IsEncrypted(objInfo.UserDefined): w.Header().Set(crypto.SSECAlgorithm, r.Header.Get(crypto.SSECAlgorithm)) w.Header().Set(crypto.SSECKeyMD5, r.Header.Get(crypto.SSECKeyMD5)) - if len(etag) >= 32 && strings.Count(etag, "-") != 1 { - etag = etag[len(etag)-32:] + if len(objInfo.ETag) >= 32 && strings.Count(objInfo.ETag, "-") != 1 { + objInfo.ETag = objInfo.ETag[len(objInfo.ETag)-32:] } } } - // We must not use the http.Header().Set method here because some (broken) - // clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive). - // Therefore, we have to set the ETag directly as map entry. - w.Header()[xhttp.ETag] = []string{`"` + etag + `"`} - - setAmzExpirationHeader(w, bucket, objInfo) + setPutObjHeaders(w, objInfo, false) writeSuccessResponseHeadersOnly(w) - // Set the etag sent to the client as part of the event. - objInfo.ETag = etag - // Notify object created event. sendEvent(eventArgs{ EventName: event.ObjectCreatedPut, @@ -1697,31 +1673,14 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt // Read escaped copy source path to check for parameters. cpSrcPath := r.Header.Get(xhttp.AmzCopySource) - - // Check https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html - // Regardless of whether you have enabled versioning, each object in your bucket - // has a version ID. If you have not enabled versioning, Amazon S3 sets the value - // of the version ID to null. If you have enabled versioning, Amazon S3 assigns a - // unique version ID value for the object. + var vid string if u, err := url.Parse(cpSrcPath); err == nil { - // Check if versionId query param was added, if yes then check if - // its non "null" value, we should error out since we do not support - // any versions other than "null". - if vid := u.Query().Get("versionId"); vid != "" && vid != "null" { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchVersion), r.URL, guessIsBrowserReq(r)) - return - } + vid = strings.TrimSpace(u.Query().Get("versionId")) // Note that url.Parse does the unescaping cpSrcPath = u.Path } - if vid := r.Header.Get(xhttp.AmzCopySourceVersionID); vid != "" { - // Check if X-Amz-Copy-Source-Version-Id header was added, if yes then check if - // its non "null" value, we should error out since we do not support - // any versions other than "null". - if vid != "null" { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchVersion), r.URL, guessIsBrowserReq(r)) - return - } + if vid == "" { + vid = strings.TrimSpace(r.Header.Get(xhttp.AmzCopySourceVersionID)) } srcBucket, srcObject := path2BucketObject(cpSrcPath) @@ -1731,6 +1690,18 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt return } + if vid != "" && vid != nullVersionID { + _, err := uuid.Parse(vid) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, VersionNotFound{ + Bucket: srcBucket, + Object: srcObject, + VersionID: vid, + }), r.URL, guessIsBrowserReq(r)) + return + } + } + if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, srcBucket, srcObject); s3Error != ErrNone { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r)) return @@ -1757,8 +1728,10 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } + srcOpts.VersionID = vid + // convert copy src and dst encryption options for GET/PUT calls - var getOpts = ObjectOptions{} + var getOpts = ObjectOptions{VersionID: srcOpts.VersionID} if srcOpts.ServerSideEncryption != nil { getOpts.ServerSideEncryption = encrypt.SSE(srcOpts.ServerSideEncryption) } @@ -1779,8 +1752,6 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt if rangeHeader != "" { var parseRangeErr error if rs, parseRangeErr = parseCopyPartRangeSpec(rangeHeader); parseRangeErr != nil { - // Handle only errInvalidRange - // Ignore other parse error and treat it as regular Get request like Amazon S3. logger.GetReqInfo(ctx).AppendTags("rangeHeader", rangeHeader) logger.LogIf(ctx, parseRangeErr) writeCopyPartErr(ctx, w, parseRangeErr, r.URL, guessIsBrowserReq(r)) @@ -2098,7 +2069,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http // get encryption options var opts ObjectOptions if crypto.SSEC.IsRequested(r.Header) { - opts, err = putOpts(ctx, r, bucket, object, nil) + opts, err = getOpts(ctx, r, bucket, object) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return @@ -2204,6 +2175,10 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http if isEncrypted { etag = tryDecryptETag(objectEncryptionKey[:], partInfo.ETag, crypto.SSEC.IsRequested(r.Header)) } + + // We must not use the http.Header().Set method here because some (broken) + // clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive). + // Therefore, we have to set the ETag directly as map entry. w.Header()[xhttp.ETag] = []string{"\"" + etag + "\""} writeSuccessResponseHeadersOnly(w) @@ -2445,8 +2420,8 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite } var objectEncryptionKey []byte - var opts ObjectOptions var isEncrypted, ssec bool + var opts ObjectOptions if objectAPI.IsEncryptionSupported() { mi, err := objectAPI.GetMultipartInfo(ctx, bucket, object, uploadID, opts) if err != nil { @@ -2507,7 +2482,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite writeErrorResponseWithoutXMLHeader := func(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) { switch err.Code { case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum": - // Set retry-after header to indicate user-agents to retry request after 120secs. + // Set retxry-after header to indicate user-agents to retry request after 120secs. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After w.Header().Set(xhttp.RetryAfter, "120") } @@ -2552,10 +2527,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite } } - // Set etag. - w.Header()[xhttp.ETag] = []string{"\"" + objInfo.ETag + "\""} - - setAmzExpirationHeader(w, bucket, objInfo) + setPutObjHeaders(w, objInfo, false) // Write success response. writeSuccessResponseXML(w, encodedSuccessResponse) @@ -2599,11 +2571,6 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http. return } - if vid := r.URL.Query().Get("versionId"); vid != "" && vid != "null" { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchVersion), r.URL, guessIsBrowserReq(r)) - return - } - getObjectInfo := objectAPI.GetObjectInfo if api.CacheAPI() != nil { getObjectInfo = api.CacheAPI().GetObjectInfo @@ -2617,18 +2584,30 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http. } } + opts, err := getOpts(ctx, r, bucket, object) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + apiErr := ErrNone if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled { - apiErr = enforceRetentionBypassForDelete(ctx, r, bucket, object, getObjectInfo) - if apiErr != ErrNone && apiErr != ErrNoSuchKey { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErr), r.URL, guessIsBrowserReq(r)) - return + if opts.VersionID != "" { + apiErr = enforceRetentionBypassForDelete(ctx, r, bucket, ObjectToDelete{ + ObjectName: object, + VersionID: opts.VersionID, + }, getObjectInfo) + if apiErr != ErrNone && apiErr != ErrNoSuchKey { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErr), r.URL, guessIsBrowserReq(r)) + return + } } } if apiErr == ErrNone { // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html - if err := deleteObject(ctx, objectAPI, api.CacheAPI(), bucket, object, r); err != nil { + objInfo, err := deleteObject(ctx, objectAPI, api.CacheAPI(), bucket, object, r, opts) + if err != nil { switch err.(type) { case BucketNotFound: // When bucket doesn't exist specially handle it. @@ -2637,6 +2616,7 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http. } // Ignore delete object errors while replying to client, since we are suppposed to reply only 204. } + setPutObjHeaders(w, objInfo, true) } writeSuccessNoContent(w) @@ -2656,11 +2636,6 @@ func (api objectAPIHandlers) PutObjectLegalHoldHandler(w http.ResponseWriter, r return } - if vid := r.URL.Query().Get("versionId"); vid != "" && vid != "null" { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchVersion), r.URL, guessIsBrowserReq(r)) - return - } - objectAPI := api.ObjectAPI() if objectAPI == nil { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) @@ -2697,6 +2672,7 @@ func (api objectAPIHandlers) PutObjectLegalHoldHandler(w http.ResponseWriter, r if api.CacheAPI() != nil { getObjectInfo = api.CacheAPI().GetObjectInfo } + opts, err := getOpts(ctx, r, bucket, object) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) @@ -2713,7 +2689,9 @@ func (api objectAPIHandlers) PutObjectLegalHoldHandler(w http.ResponseWriter, r objInfo.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags } objInfo.metadataOnly = true - if _, err = objectAPI.CopyObject(ctx, bucket, object, bucket, object, objInfo, ObjectOptions{}, ObjectOptions{}); err != nil { + if _, err = objectAPI.CopyObject(ctx, bucket, object, bucket, object, objInfo, ObjectOptions{ + VersionID: opts.VersionID, + }, ObjectOptions{}); err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } @@ -2746,11 +2724,6 @@ func (api objectAPIHandlers) GetObjectLegalHoldHandler(w http.ResponseWriter, r return } - if vid := r.URL.Query().Get("versionId"); vid != "" && vid != "null" { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchVersion), r.URL, guessIsBrowserReq(r)) - return - } - objectAPI := api.ObjectAPI() if objectAPI == nil { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) @@ -2816,11 +2789,6 @@ func (api objectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r return } - if vid := r.URL.Query().Get("versionId"); vid != "" && vid != "null" { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchVersion), r.URL, guessIsBrowserReq(r)) - return - } - objectAPI := api.ObjectAPI() if objectAPI == nil { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) @@ -2856,6 +2824,12 @@ func (api objectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r return } + opts, err := getOpts(ctx, r, bucket, object) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + getObjectInfo := objectAPI.GetObjectInfo if api.CacheAPI() != nil { getObjectInfo = api.CacheAPI().GetObjectInfo @@ -2873,7 +2847,9 @@ func (api objectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r objInfo.UserDefined[xhttp.AmzObjectTagging] = objInfo.UserTags } objInfo.metadataOnly = true // Perform only metadata updates. - if _, err = objectAPI.CopyObject(ctx, bucket, object, bucket, object, objInfo, ObjectOptions{}, ObjectOptions{}); err != nil { + if _, err = objectAPI.CopyObject(ctx, bucket, object, bucket, object, objInfo, ObjectOptions{ + VersionID: opts.VersionID, + }, ObjectOptions{}); err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } @@ -2904,11 +2880,6 @@ func (api objectAPIHandlers) GetObjectRetentionHandler(w http.ResponseWriter, r return } - if vid := r.URL.Query().Get("versionId"); vid != "" && vid != "null" { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchVersion), r.URL, guessIsBrowserReq(r)) - return - } - objectAPI := api.ObjectAPI() if objectAPI == nil { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r)) @@ -2980,8 +2951,14 @@ func (api objectAPIHandlers) GetObjectTaggingHandler(w http.ResponseWriter, r *h return } + opts, err := getOpts(ctx, r, bucket, object) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + // Get object tags - tags, err := objAPI.GetObjectTags(ctx, bucket, object) + tags, err := objAPI.GetObjectTags(ctx, bucket, object, opts) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return @@ -3025,8 +3002,14 @@ func (api objectAPIHandlers) PutObjectTaggingHandler(w http.ResponseWriter, r *h return } + opts, err := getOpts(ctx, r, bucket, object) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + // Put object tags - err = objAPI.PutObjectTags(ctx, bucket, object, tags.String()) + err = objAPI.PutObjectTags(ctx, bucket, object, tags.String(), opts) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return @@ -3064,8 +3047,14 @@ func (api objectAPIHandlers) DeleteObjectTaggingHandler(w http.ResponseWriter, r return } + opts, err := getOpts(ctx, r, bucket, object) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + // Delete object tags - if err = objAPI.DeleteObjectTags(ctx, bucket, object); err != nil && err != errConfigNotFound { + if err = objAPI.DeleteObjectTags(ctx, bucket, object, opts); err != nil && err != errConfigNotFound { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } diff --git a/cmd/object-handlers_test.go b/cmd/object-handlers_test.go index 592029553..3b7c2d71d 100644 --- a/cmd/object-handlers_test.go +++ b/cmd/object-handlers_test.go @@ -56,7 +56,7 @@ const ( MissingUploadID ) -// Wrapper for calling HeadObject API handler tests for both XL multiple disks and FS single drive setup. +// Wrapper for calling HeadObject API handler tests for both Erasure multiple disks and FS single drive setup. func TestAPIHeadObjectHandler(t *testing.T) { ExecObjectLayerAPITest(t, testAPIHeadObjectHandler, []string{"HeadObject"}) } @@ -322,7 +322,7 @@ func testAPIHeadObjectHandlerWithEncryption(obj ObjectLayer, instanceType, bucke } } -// Wrapper for calling GetObject API handler tests for both XL multiple disks and FS single drive setup. +// Wrapper for calling GetObject API handler tests for both Erasure multiple disks and FS single drive setup. func TestAPIGetObjectHandler(t *testing.T) { globalPolicySys = NewPolicySys() defer func() { globalPolicySys = nil }() @@ -646,7 +646,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a ExecObjectLayerAPINilTest(t, nilBucket, nilObject, instanceType, apiRouter, nilReq) } -// Wrapper for calling GetObject API handler tests for both XL multiple disks and FS single drive setup. +// Wrapper for calling GetObject API handler tests for both Erasure multiple disks and FS single drive setup. func TestAPIGetObjectWithMPHandler(t *testing.T) { globalPolicySys = NewPolicySys() defer func() { globalPolicySys = nil }() @@ -844,7 +844,7 @@ func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName str } -// Wrapper for calling PutObject API handler tests using streaming signature v4 for both XL multiple disks and FS single drive setup. +// Wrapper for calling PutObject API handler tests using streaming signature v4 for both Erasure multiple disks and FS single drive setup. func TestAPIPutObjectStreamSigV4Handler(t *testing.T) { defer DetectTestLeak(t)() ExecObjectLayerAPITest(t, testAPIPutObjectStreamSigV4Handler, []string{"PutObject"}) @@ -1162,7 +1162,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam } } -// Wrapper for calling PutObject API handler tests for both XL multiple disks and FS single drive setup. +// Wrapper for calling PutObject API handler tests for both Erasure multiple disks and FS single drive setup. func TestAPIPutObjectHandler(t *testing.T) { defer DetectTestLeak(t)() ExecObjectLayerAPITest(t, testAPIPutObjectHandler, []string{"PutObject"}) @@ -1522,7 +1522,7 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam } } -// Wrapper for calling Copy Object Part API handler tests for both XL multiple disks and single node setup. +// Wrapper for calling Copy Object Part API handler tests for both Erasure multiple disks and single node setup. func TestAPICopyObjectPartHandler(t *testing.T) { defer DetectTestLeak(t)() ExecObjectLayerAPITest(t, testAPICopyObjectPartHandler, []string{"CopyObjectPart"}) @@ -1766,7 +1766,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri copySourceHeader: url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName) + "?versionId=17", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - expectedRespStatus: http.StatusNotFound, + expectedRespStatus: http.StatusBadRequest, }, // Test case - 16, copy part 1 from from newObject1 with null X-Amz-Copy-Source-Version-Id { @@ -1783,10 +1783,10 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri bucketName: bucketName, uploadID: uploadID, copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), - copySourceVersionID: "17", + copySourceVersionID: "17", // invalid id accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - expectedRespStatus: http.StatusNotFound, + expectedRespStatus: http.StatusBadRequest, }, } @@ -1816,6 +1816,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri if testCase.copySourceRange != "" { req.Header.Set("X-Amz-Copy-Source-Range", testCase.copySourceRange) } + // Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler. // Call the ServeHTTP to execute the handler, `func (api objectAPIHandlers) CopyObjectHandler` handles the request. apiRouter.ServeHTTP(rec, req) @@ -1861,7 +1862,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri } -// Wrapper for calling Copy Object API handler tests for both XL multiple disks and single node setup. +// Wrapper for calling Copy Object API handler tests for both Erasure multiple disks and single node setup. func TestAPICopyObjectHandler(t *testing.T) { defer DetectTestLeak(t)() ExecObjectLayerAPITest(t, testAPICopyObjectHandler, []string{"CopyObject"}) @@ -2159,7 +2160,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, copySourceHeader: url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName) + "?versionId=17", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - expectedRespStatus: http.StatusNotFound, + expectedRespStatus: http.StatusBadRequest, }, // Test case - 19, copy metadata from newObject1 with null X-Amz-Copy-Source-Version-Id { @@ -2179,7 +2180,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, copySourceVersionID: "17", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - expectedRespStatus: http.StatusNotFound, + expectedRespStatus: http.StatusBadRequest, }, } @@ -2319,7 +2320,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, } -// Wrapper for calling NewMultipartUpload tests for both XL multiple disks and single node setup. +// Wrapper for calling NewMultipartUpload tests for both Erasure multiple disks and single node setup. // First register the HTTP handler for NewMutlipartUpload, then a HTTP request for NewMultipart upload is made. // The UploadID from the response body is parsed and its existence is asserted with an attempt to ListParts using it. func TestAPINewMultipartHandler(t *testing.T) { @@ -2465,7 +2466,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string } -// Wrapper for calling NewMultipartUploadParallel tests for both XL multiple disks and single node setup. +// Wrapper for calling NewMultipartUploadParallel tests for both Erasure multiple disks and single node setup. // The objective of the test is to initialte multipart upload on the same object 10 times concurrently, // The UploadID from the response body is parsed and its existence is asserted with an attempt to ListParts using it. func TestAPINewMultipartHandlerParallel(t *testing.T) { @@ -3064,7 +3065,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri ExecObjectLayerAPINilTest(t, nilBucket, nilObject, instanceType, apiRouter, nilReq) } -// Wrapper for calling Delete Object API handler tests for both XL multiple disks and FS single drive setup. +// Wrapper for calling Delete Object API handler tests for both Erasure multiple disks and FS single drive setup. func TestAPIDeleteObjectHandler(t *testing.T) { defer DetectTestLeak(t)() ExecObjectLayerAPITest(t, testAPIDeleteObjectHandler, []string{"DeleteObject"}) diff --git a/cmd/object_api_suite_test.go b/cmd/object_api_suite_test.go index cd8764397..45939478a 100644 --- a/cmd/object_api_suite_test.go +++ b/cmd/object_api_suite_test.go @@ -70,20 +70,20 @@ func (r *testOneByteReadNoEOF) Read(p []byte) (n int, err error) { type ObjectLayerAPISuite struct{} -// Wrapper for calling testMakeBucket for both XL and FS. +// Wrapper for calling testMakeBucket for both Erasure and FS. func (s *ObjectLayerAPISuite) TestMakeBucket(t *testing.T) { ExecObjectLayerTest(t, testMakeBucket) } // Tests validate bucket creation. func testMakeBucket(obj ObjectLayer, instanceType string, t TestErrHandler) { - err := obj.MakeBucketWithLocation(context.Background(), "bucket-unknown", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "bucket-unknown", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } } -// Wrapper for calling testMultipartObjectCreation for both XL and FS. +// Wrapper for calling testMultipartObjectCreation for both Erasure and FS. func (s *ObjectLayerAPISuite) TestMultipartObjectCreation(t *testing.T) { ExecObjectLayerTest(t, testMultipartObjectCreation) } @@ -91,7 +91,7 @@ func (s *ObjectLayerAPISuite) TestMultipartObjectCreation(t *testing.T) { // Tests validate creation of part files during Multipart operation. func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErrHandler) { var opts ObjectOptions - err := obj.MakeBucketWithLocation(context.Background(), "bucket", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -127,7 +127,7 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErr } } -// Wrapper for calling testMultipartObjectAbort for both XL and FS. +// Wrapper for calling testMultipartObjectAbort for both Erasure and FS. func (s *ObjectLayerAPISuite) TestMultipartObjectAbort(t *testing.T) { ExecObjectLayerTest(t, testMultipartObjectAbort) } @@ -135,7 +135,7 @@ func (s *ObjectLayerAPISuite) TestMultipartObjectAbort(t *testing.T) { // Tests validate abortion of Multipart operation. func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHandler) { var opts ObjectOptions - err := obj.MakeBucketWithLocation(context.Background(), "bucket", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -172,7 +172,7 @@ func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHan } } -// Wrapper for calling testMultipleObjectCreation for both XL and FS. +// Wrapper for calling testMultipleObjectCreation for both Erasure and FS. func (s *ObjectLayerAPISuite) TestMultipleObjectCreation(t *testing.T) { ExecObjectLayerTest(t, testMultipleObjectCreation) } @@ -181,7 +181,7 @@ func (s *ObjectLayerAPISuite) TestMultipleObjectCreation(t *testing.T) { func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrHandler) { objects := make(map[string][]byte) var opts ObjectOptions - err := obj.MakeBucketWithLocation(context.Background(), "bucket", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -229,14 +229,14 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH } } -// Wrapper for calling TestPaging for both XL and FS. +// Wrapper for calling TestPaging for both Erasure and FS. func (s *ObjectLayerAPISuite) TestPaging(t *testing.T) { ExecObjectLayerTest(t, testPaging) } // Tests validate creation of objects and the order of listing using various filters for ListObjects operation. func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) { - obj.MakeBucketWithLocation(context.Background(), "bucket", "", false) + obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) result, err := obj.ListObjects(context.Background(), "bucket", "", "", "", 0) if err != nil { t.Fatalf("%s: %s", instanceType, err) @@ -433,14 +433,14 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) { } } -// Wrapper for calling testObjectOverwriteWorks for both XL and FS. +// Wrapper for calling testObjectOverwriteWorks for both Erasure and FS. func (s *ObjectLayerAPISuite) TestObjectOverwriteWorks(t *testing.T) { ExecObjectLayerTest(t, testObjectOverwriteWorks) } // Tests validate overwriting of an existing object. func testObjectOverwriteWorks(obj ObjectLayer, instanceType string, t TestErrHandler) { - err := obj.MakeBucketWithLocation(context.Background(), "bucket", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -470,7 +470,7 @@ func testObjectOverwriteWorks(obj ObjectLayer, instanceType string, t TestErrHan } } -// Wrapper for calling testNonExistantBucketOperations for both XL and FS. +// Wrapper for calling testNonExistantBucketOperations for both Erasure and FS. func (s *ObjectLayerAPISuite) TestNonExistantBucketOperations(t *testing.T) { ExecObjectLayerTest(t, testNonExistantBucketOperations) } @@ -487,18 +487,18 @@ func testNonExistantBucketOperations(obj ObjectLayer, instanceType string, t Tes } } -// Wrapper for calling testBucketRecreateFails for both XL and FS. +// Wrapper for calling testBucketRecreateFails for both Erasure and FS. func (s *ObjectLayerAPISuite) TestBucketRecreateFails(t *testing.T) { ExecObjectLayerTest(t, testBucketRecreateFails) } // Tests validate that recreation of the bucket fails. func testBucketRecreateFails(obj ObjectLayer, instanceType string, t TestErrHandler) { - err := obj.MakeBucketWithLocation(context.Background(), "string", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "string", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } - err = obj.MakeBucketWithLocation(context.Background(), "string", "", false) + err = obj.MakeBucketWithLocation(context.Background(), "string", BucketOptions{}) if err == nil { t.Fatalf("%s: Expected error but found nil.", instanceType) } @@ -508,7 +508,7 @@ func testBucketRecreateFails(obj ObjectLayer, instanceType string, t TestErrHand } } -// Wrapper for calling testPutObject for both XL and FS. +// Wrapper for calling testPutObject for both Erasure and FS. func (s *ObjectLayerAPISuite) TestPutObject(t *testing.T) { ExecObjectLayerTest(t, testPutObject) } @@ -519,7 +519,7 @@ func testPutObject(obj ObjectLayer, instanceType string, t TestErrHandler) { length := int64(len(content)) readerEOF := newTestReaderEOF(content) readerNoEOF := newTestReaderNoEOF(content) - err := obj.MakeBucketWithLocation(context.Background(), "bucket", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -552,14 +552,14 @@ func testPutObject(obj ObjectLayer, instanceType string, t TestErrHandler) { } } -// Wrapper for calling testPutObjectInSubdir for both XL and FS. +// Wrapper for calling testPutObjectInSubdir for both Erasure and FS. func (s *ObjectLayerAPISuite) TestPutObjectInSubdir(t *testing.T) { ExecObjectLayerTest(t, testPutObjectInSubdir) } // Tests validate PutObject with subdirectory prefix. func testPutObjectInSubdir(obj ObjectLayer, instanceType string, t TestErrHandler) { - err := obj.MakeBucketWithLocation(context.Background(), "bucket", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -584,7 +584,7 @@ func testPutObjectInSubdir(obj ObjectLayer, instanceType string, t TestErrHandle } } -// Wrapper for calling testListBuckets for both XL and FS. +// Wrapper for calling testListBuckets for both Erasure and FS. func (s *ObjectLayerAPISuite) TestListBuckets(t *testing.T) { ExecObjectLayerTest(t, testListBuckets) } @@ -601,7 +601,7 @@ func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) { } // add one and test exists. - err = obj.MakeBucketWithLocation(context.Background(), "bucket1", "", false) + err = obj.MakeBucketWithLocation(context.Background(), "bucket1", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -615,7 +615,7 @@ func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) { } // add two and test exists. - err = obj.MakeBucketWithLocation(context.Background(), "bucket2", "", false) + err = obj.MakeBucketWithLocation(context.Background(), "bucket2", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -629,7 +629,7 @@ func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) { } // add three and test exists + prefix. - err = obj.MakeBucketWithLocation(context.Background(), "bucket22", "", false) + err = obj.MakeBucketWithLocation(context.Background(), "bucket22", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -643,7 +643,7 @@ func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) { } } -// Wrapper for calling testListBucketsOrder for both XL and FS. +// Wrapper for calling testListBucketsOrder for both Erasure and FS. func (s *ObjectLayerAPISuite) TestListBucketsOrder(t *testing.T) { ExecObjectLayerTest(t, testListBucketsOrder) } @@ -653,11 +653,11 @@ func testListBucketsOrder(obj ObjectLayer, instanceType string, t TestErrHandler // if implementation contains a map, order of map keys will vary. // this ensures they return in the same order each time. // add one and test exists. - err := obj.MakeBucketWithLocation(context.Background(), "bucket1", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "bucket1", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } - err = obj.MakeBucketWithLocation(context.Background(), "bucket2", "", false) + err = obj.MakeBucketWithLocation(context.Background(), "bucket2", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -677,7 +677,7 @@ func testListBucketsOrder(obj ObjectLayer, instanceType string, t TestErrHandler } } -// Wrapper for calling testListObjectsTestsForNonExistantBucket for both XL and FS. +// Wrapper for calling testListObjectsTestsForNonExistantBucket for both Erasure and FS. func (s *ObjectLayerAPISuite) TestListObjectsTestsForNonExistantBucket(t *testing.T) { ExecObjectLayerTest(t, testListObjectsTestsForNonExistantBucket) } @@ -699,14 +699,14 @@ func testListObjectsTestsForNonExistantBucket(obj ObjectLayer, instanceType stri } } -// Wrapper for calling testNonExistantObjectInBucket for both XL and FS. +// Wrapper for calling testNonExistantObjectInBucket for both Erasure and FS. func (s *ObjectLayerAPISuite) TestNonExistantObjectInBucket(t *testing.T) { ExecObjectLayerTest(t, testNonExistantObjectInBucket) } // Tests validate that GetObject fails on a non-existent bucket as expected. func testNonExistantObjectInBucket(obj ObjectLayer, instanceType string, t TestErrHandler) { - err := obj.MakeBucketWithLocation(context.Background(), "bucket", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -726,7 +726,7 @@ func testNonExistantObjectInBucket(obj ObjectLayer, instanceType string, t TestE } } -// Wrapper for calling testGetDirectoryReturnsObjectNotFound for both XL and FS. +// Wrapper for calling testGetDirectoryReturnsObjectNotFound for both Erasure and FS. func (s *ObjectLayerAPISuite) TestGetDirectoryReturnsObjectNotFound(t *testing.T) { ExecObjectLayerTest(t, testGetDirectoryReturnsObjectNotFound) } @@ -734,7 +734,7 @@ func (s *ObjectLayerAPISuite) TestGetDirectoryReturnsObjectNotFound(t *testing.T // Tests validate that GetObject on an existing directory fails as expected. func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string, t TestErrHandler) { bucketName := "bucket" - err := obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -769,14 +769,14 @@ func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string, } } -// Wrapper for calling testContentType for both XL and FS. +// Wrapper for calling testContentType for both Erasure and FS. func (s *ObjectLayerAPISuite) TestContentType(t *testing.T) { ExecObjectLayerTest(t, testContentType) } // Test content-type. func testContentType(obj ObjectLayer, instanceType string, t TestErrHandler) { - err := obj.MakeBucketWithLocation(context.Background(), "bucket", "", false) + err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) if err != nil { t.Fatalf("%s: %s", instanceType, err) } diff --git a/cmd/os-readdir_other.go b/cmd/os-readdir_other.go index febd16f48..a7fd75ee2 100644 --- a/cmd/os-readdir_other.go +++ b/cmd/os-readdir_other.go @@ -21,7 +21,7 @@ package cmd import ( "io" "os" - "strings" + "syscall" ) // Return all the entries at the directory dirPath. @@ -34,16 +34,7 @@ func readDir(dirPath string) (entries []string, err error) { func readDirFilterFn(dirPath string, filter func(name string, typ os.FileMode) error) error { d, err := os.Open(dirPath) if err != nil { - // File is really not found. - if os.IsNotExist(err) { - return errFileNotFound - } - - // File path cannot be verified since one of the parents is a file. - if strings.Contains(err.Error(), "not a directory") { - return errFileNotFound - } - return err + return osErrToFileErr(err) } defer d.Close() @@ -55,7 +46,7 @@ func readDirFilterFn(dirPath string, filter func(name string, typ os.FileMode) e if err == io.EOF { break } - return err + return osErrToFileErr(err) } for _, fi := range fis { if err = filter(fi.Name(), fi.Mode()); err == errDoneForNow { @@ -71,16 +62,7 @@ func readDirFilterFn(dirPath string, filter func(name string, typ os.FileMode) e func readDirN(dirPath string, count int) (entries []string, err error) { d, err := os.Open(dirPath) if err != nil { - // File is really not found. - if os.IsNotExist(err) { - return nil, errFileNotFound - } - - // File path cannot be verified since one of the parents is a file. - if strings.Contains(err.Error(), "not a directory") { - return nil, errFileNotFound - } - return nil, err + return nil, osErrToFileErr(err) } defer d.Close() @@ -99,7 +81,7 @@ func readDirN(dirPath string, count int) (entries []string, err error) { if err == io.EOF { break } - return nil, err + return nil, osErrToFileErr(err) } if count > 0 { if remaining <= len(fis) { @@ -125,3 +107,8 @@ func readDirN(dirPath string, count int) (entries []string, err error) { } return entries, nil } + +func globalSync() { + // no-op not sure about plan9/solaris support for syscall support + syscall.Sync() +} diff --git a/cmd/os-readdir_test.go b/cmd/os-readdir_test.go index 2c6afb2b5..64ef1ea3b 100644 --- a/cmd/os-readdir_test.go +++ b/cmd/os-readdir_test.go @@ -68,7 +68,7 @@ type result struct { func mustSetupDir(t *testing.T) string { // Create unique test directory. - dir, err := ioutil.TempDir(globalTestTmpDir, "minio-posix-list-dir") + dir, err := ioutil.TempDir(globalTestTmpDir, "minio-list-dir") if err != nil { t.Fatalf("Unable to setup directory, %s", err) } diff --git a/cmd/os-readdir_unix.go b/cmd/os-readdir_unix.go index 09494f799..ee318ce3f 100644 --- a/cmd/os-readdir_unix.go +++ b/cmd/os-readdir_unix.go @@ -87,17 +87,11 @@ func readDir(dirPath string) (entries []string, err error) { // readDir applies the filter function on each entries at dirPath, doesn't recurse into // the directory itself. func readDirFilterFn(dirPath string, filter func(name string, typ os.FileMode) error) error { - fd, err := syscall.Open(dirPath, 0, 0) + f, err := os.Open(dirPath) if err != nil { - if os.IsNotExist(err) || isSysErrNotDir(err) { - return errFileNotFound - } - if os.IsPermission(err) { - return errFileAccessDenied - } - return err + return osErrToFileErr(err) } - defer syscall.Close(fd) + defer f.Close() buf := make([]byte, blockSize) boff := 0 // starting read position in buf @@ -106,7 +100,7 @@ func readDirFilterFn(dirPath string, filter func(name string, typ os.FileMode) e for { if boff >= nbuf { boff = 0 - nbuf, err = syscall.ReadDirent(fd, buf) + nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) if err != nil { if isSysErrNotDir(err) { return errFileNotFound @@ -140,17 +134,11 @@ func readDirFilterFn(dirPath string, filter func(name string, typ os.FileMode) e // Return count entries at the directory dirPath and all entries // if count is set to -1 func readDirN(dirPath string, count int) (entries []string, err error) { - fd, err := syscall.Open(dirPath, 0, 0) + f, err := os.Open(dirPath) if err != nil { - if os.IsNotExist(err) || isSysErrNotDir(err) { - return nil, errFileNotFound - } - if os.IsPermission(err) { - return nil, errFileAccessDenied - } - return nil, err + return nil, osErrToFileErr(err) } - defer syscall.Close(fd) + defer f.Close() bufp := direntPool.Get().(*[]byte) defer direntPool.Put(bufp) @@ -161,7 +149,7 @@ func readDirN(dirPath string, count int) (entries []string, err error) { for count != 0 { if boff >= nbuf { boff = 0 - nbuf, err = syscall.ReadDirent(fd, *bufp) + nbuf, err = syscall.ReadDirent(int(f.Fd()), *bufp) if err != nil { if isSysErrNotDir(err) { return nil, errFileNotFound @@ -209,3 +197,7 @@ func readDirN(dirPath string, count int) (entries []string, err error) { } return } + +func globalSync() { + syscall.Sync() +} diff --git a/cmd/os-readdir_windows.go b/cmd/os-readdir_windows.go index 377bf38f6..191ad6a24 100644 --- a/cmd/os-readdir_windows.go +++ b/cmd/os-readdir_windows.go @@ -20,7 +20,6 @@ package cmd import ( "os" - "strings" "syscall" ) @@ -32,43 +31,24 @@ func readDir(dirPath string) (entries []string, err error) { // readDir applies the filter function on each entries at dirPath, doesn't recurse into // the directory itself. func readDirFilterFn(dirPath string, filter func(name string, typ os.FileMode) error) error { - d, err := os.Open(dirPath) + f, err := os.Open(dirPath) if err != nil { - // File is really not found. - if os.IsNotExist(err) { - return errFileNotFound - } - - // File path cannot be verified since one of the parents is a file. - if strings.Contains(err.Error(), "not a directory") { - return errFileNotFound - } - return err - } - defer d.Close() - - st, err := d.Stat() - if err != nil { - return err - } - // Not a directory return error. - if !st.IsDir() { - return errFileAccessDenied + return osErrToFileErr(err) } + defer f.Close() data := &syscall.Win32finddata{} for { - e := syscall.FindNextFile(syscall.Handle(d.Fd()), data) + e := syscall.FindNextFile(syscall.Handle(f.Fd()), data) if e != nil { if e == syscall.ERROR_NO_MORE_FILES { break } else { - err = &os.PathError{ + return osErrToFileErr(&os.PathError{ Op: "FindNextFile", Path: dirPath, Err: e, - } - return err + }) } } name := syscall.UTF16ToString(data.FileName[0:]) @@ -82,7 +62,7 @@ func readDirFilterFn(dirPath string, filter func(name string, typ os.FileMode) e if data.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { typ = os.ModeDir } - if err = filter(name, typ); err == errDoneForNow { + if e = filter(name, typ); e == errDoneForNow { // filtering requested to return by caller. return nil } @@ -93,43 +73,25 @@ func readDirFilterFn(dirPath string, filter func(name string, typ os.FileMode) e // Return N entries at the directory dirPath. If count is -1, return all entries func readDirN(dirPath string, count int) (entries []string, err error) { - d, err := os.Open(dirPath) + f, err := os.Open(dirPath) if err != nil { - // File is really not found. - if os.IsNotExist(err) { - return nil, errFileNotFound - } - - // File path cannot be verified since one of the parents is a file. - if strings.Contains(err.Error(), "not a directory") { - return nil, errFileNotFound - } - return nil, err - } - defer d.Close() - - st, err := d.Stat() - if err != nil { - return nil, err - } - // Not a directory return error. - if !st.IsDir() { - return nil, errFileNotFound + return nil, osErrToFileErr(err) } + defer f.Close() data := &syscall.Win32finddata{} for count != 0 { - err = syscall.FindNextFile(syscall.Handle(d.Fd()), data) - if err != nil { - if err == syscall.ERROR_NO_MORE_FILES { + e := syscall.FindNextFile(syscall.Handle(f.Fd()), data) + if e != nil { + if e == syscall.ERROR_NO_MORE_FILES { break } else { - return nil, &os.PathError{ + return nil, osErrToFileErr(&os.PathError{ Op: "FindNextFile", Path: dirPath, - Err: err, - } + Err: e, + }) } } @@ -147,5 +109,10 @@ func readDirN(dirPath string, count int) (entries []string, err error) { } count-- } + return entries, nil } + +func globalSync() { + // no-op on windows +} diff --git a/cmd/os-reliable_test.go b/cmd/os-reliable_test.go index ba1b1a256..c2cee0d27 100644 --- a/cmd/os-reliable_test.go +++ b/cmd/os-reliable_test.go @@ -23,10 +23,10 @@ import ( // Tests - mkdirAll() func TestOSMkdirAll(t *testing.T) { - // create posix test setup - _, path, err := newPosixTestSetup() + // create xlStorage test setup + _, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) @@ -45,10 +45,10 @@ func TestOSMkdirAll(t *testing.T) { // Tests - renameAll() func TestOSRenameAll(t *testing.T) { - // create posix test setup - _, path, err := newPosixTestSetup() + // create xlStorage test setup + _, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) diff --git a/cmd/peer-rest-server.go b/cmd/peer-rest-server.go index 4799e5ca0..8bd1807fc 100644 --- a/cmd/peer-rest-server.go +++ b/cmd/peer-rest-server.go @@ -667,8 +667,8 @@ func (s *peerRESTServer) CycleServerBloomFilterHandler(w http.ResponseWriter, r s.writeErrorResponse(w, err) return } + logger.LogIf(ctx, gob.NewEncoder(w).Encode(bf)) - w.(http.Flusher).Flush() } // PutBucketNotificationHandler - Set bucket policy. @@ -702,7 +702,7 @@ func (s *peerRESTServer) PutBucketNotificationHandler(w http.ResponseWriter, r * } // Return disk IDs of all the local disks. -func getLocalDiskIDs(z *xlZones) []string { +func getLocalDiskIDs(z *erasureZones) []string { var ids []string for zoneIdx := range z.zones { @@ -746,7 +746,7 @@ func (s *peerRESTServer) GetLocalDiskIDs(w http.ResponseWriter, r *http.Request) return } - z, ok := objLayer.(*xlZones) + z, ok := objLayer.(*erasureZones) if !ok { s.writeErrorResponse(w, errServerNotInitialized) return diff --git a/cmd/posix-diskid-check.go b/cmd/posix-diskid-check.go deleted file mode 100644 index abe5bd5f1..000000000 --- a/cmd/posix-diskid-check.go +++ /dev/null @@ -1,224 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "context" - "io" -) - -// Detects change in underlying disk. -type posixDiskIDCheck struct { - storage *posix - diskID string -} - -func (p *posixDiskIDCheck) String() string { - return p.storage.String() -} - -func (p *posixDiskIDCheck) IsOnline() bool { - storedDiskID, err := p.storage.GetDiskID() - if err != nil { - return false - } - return storedDiskID == p.diskID -} - -func (p *posixDiskIDCheck) IsLocal() bool { - return p.storage.IsLocal() -} - -func (p *posixDiskIDCheck) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCache) (dataUsageCache, error) { - return p.storage.CrawlAndGetDataUsage(ctx, cache) -} - -func (p *posixDiskIDCheck) Hostname() string { - return p.storage.Hostname() -} - -func (p *posixDiskIDCheck) Close() error { - return p.storage.Close() -} - -func (p *posixDiskIDCheck) GetDiskID() (string, error) { - return p.storage.GetDiskID() -} - -func (p *posixDiskIDCheck) SetDiskID(id string) { - p.diskID = id -} - -func (p *posixDiskIDCheck) isDiskStale() bool { - if p.diskID == "" { - // For empty disk-id we allow the call as the server might be coming up and trying to read format.json - // or create format.json - return false - } - storedDiskID, err := p.storage.GetDiskID() - if err == nil && p.diskID == storedDiskID { - return false - } - return true -} - -func (p *posixDiskIDCheck) DiskInfo() (info DiskInfo, err error) { - if p.isDiskStale() { - return info, errDiskNotFound - } - return p.storage.DiskInfo() -} - -func (p *posixDiskIDCheck) MakeVolBulk(volumes ...string) (err error) { - if p.isDiskStale() { - return errDiskNotFound - } - return p.storage.MakeVolBulk(volumes...) -} - -func (p *posixDiskIDCheck) MakeVol(volume string) (err error) { - if p.isDiskStale() { - return errDiskNotFound - } - return p.storage.MakeVol(volume) -} - -func (p *posixDiskIDCheck) ListVols() ([]VolInfo, error) { - if p.isDiskStale() { - return nil, errDiskNotFound - } - return p.storage.ListVols() -} - -func (p *posixDiskIDCheck) StatVol(volume string) (vol VolInfo, err error) { - if p.isDiskStale() { - return vol, errDiskNotFound - } - return p.storage.StatVol(volume) -} - -func (p *posixDiskIDCheck) DeleteVol(volume string, forceDelete bool) (err error) { - if p.isDiskStale() { - return errDiskNotFound - } - return p.storage.DeleteVol(volume, forceDelete) -} - -func (p *posixDiskIDCheck) Walk(volume, dirPath string, marker string, recursive bool, leafFile string, readMetadataFn readMetadataFunc, endWalkCh <-chan struct{}) (chan FileInfo, error) { - if p.isDiskStale() { - return nil, errDiskNotFound - } - return p.storage.Walk(volume, dirPath, marker, recursive, leafFile, readMetadataFn, endWalkCh) -} - -func (p *posixDiskIDCheck) WalkSplunk(volume, dirPath string, marker string, endWalkCh <-chan struct{}) (chan FileInfo, error) { - if p.isDiskStale() { - return nil, errDiskNotFound - } - return p.storage.WalkSplunk(volume, dirPath, marker, endWalkCh) -} - -func (p *posixDiskIDCheck) ListDir(volume, dirPath string, count int, leafFile string) ([]string, error) { - if p.isDiskStale() { - return nil, errDiskNotFound - } - return p.storage.ListDir(volume, dirPath, count, leafFile) -} - -func (p *posixDiskIDCheck) ReadFile(volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) { - if p.isDiskStale() { - return 0, errDiskNotFound - } - return p.storage.ReadFile(volume, path, offset, buf, verifier) -} - -func (p *posixDiskIDCheck) AppendFile(volume string, path string, buf []byte) (err error) { - if p.isDiskStale() { - return errDiskNotFound - } - return p.storage.AppendFile(volume, path, buf) -} - -func (p *posixDiskIDCheck) CreateFile(volume, path string, size int64, reader io.Reader) error { - if p.isDiskStale() { - return errDiskNotFound - } - return p.storage.CreateFile(volume, path, size, reader) -} - -func (p *posixDiskIDCheck) ReadFileStream(volume, path string, offset, length int64) (io.ReadCloser, error) { - if p.isDiskStale() { - return nil, errDiskNotFound - } - return p.storage.ReadFileStream(volume, path, offset, length) -} - -func (p *posixDiskIDCheck) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) error { - if p.isDiskStale() { - return errDiskNotFound - } - return p.storage.RenameFile(srcVolume, srcPath, dstVolume, dstPath) -} - -func (p *posixDiskIDCheck) StatFile(volume string, path string) (file FileInfo, err error) { - if p.isDiskStale() { - return file, errDiskNotFound - } - return p.storage.StatFile(volume, path) -} - -func (p *posixDiskIDCheck) DeleteFile(volume string, path string) (err error) { - if p.isDiskStale() { - return errDiskNotFound - } - return p.storage.DeleteFile(volume, path) -} - -func (p *posixDiskIDCheck) DeleteFileBulk(volume string, paths []string) (errs []error, err error) { - if p.isDiskStale() { - return nil, errDiskNotFound - } - return p.storage.DeleteFileBulk(volume, paths) -} - -func (p *posixDiskIDCheck) DeletePrefixes(volume string, paths []string) (errs []error, err error) { - if p.isDiskStale() { - return nil, errDiskNotFound - } - return p.storage.DeletePrefixes(volume, paths) -} - -func (p *posixDiskIDCheck) VerifyFile(volume, path string, size int64, algo BitrotAlgorithm, sum []byte, shardSize int64) error { - if p.isDiskStale() { - return errDiskNotFound - } - return p.storage.VerifyFile(volume, path, size, algo, sum, shardSize) -} - -func (p *posixDiskIDCheck) WriteAll(volume string, path string, reader io.Reader) (err error) { - if p.isDiskStale() { - return errDiskNotFound - } - return p.storage.WriteAll(volume, path, reader) -} - -func (p *posixDiskIDCheck) ReadAll(volume string, path string) (buf []byte, err error) { - if p.isDiskStale() { - return nil, errDiskNotFound - } - return p.storage.ReadAll(volume, path) -} diff --git a/cmd/post-policy_test.go b/cmd/post-policy_test.go index 005b22a0a..08baaffb4 100644 --- a/cmd/post-policy_test.go +++ b/cmd/post-policy_test.go @@ -113,7 +113,7 @@ func newPostPolicyBytesV2(bucketName, objectKey string, expiration time.Time) [] return []byte(retStr) } -// Wrapper for calling TestPostPolicyBucketHandler tests for both XL multiple disks and single node setup. +// Wrapper for calling TestPostPolicyBucketHandler tests for both Erasure multiple disks and single node setup. func TestPostPolicyBucketHandler(t *testing.T) { ExecObjectLayerTest(t, testPostPolicyBucketHandler) } @@ -128,7 +128,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr bucketName := getRandomBucketName() var opts ObjectOptions - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestAPIEndPoints(obj, []string{"PostPolicy"}) credentials := globalActiveCred @@ -140,7 +140,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr // objectNames[0]. // uploadIds [0]. // Create bucket before initiating NewMultipartUpload. - err := obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) @@ -414,7 +414,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr } -// Wrapper for calling TestPostPolicyBucketHandlerRedirect tests for both XL multiple disks and single node setup. +// Wrapper for calling TestPostPolicyBucketHandlerRedirect tests for both Erasure multiple disks and single node setup. func TestPostPolicyBucketHandlerRedirect(t *testing.T) { ExecObjectLayerTest(t, testPostPolicyBucketHandlerRedirect) } @@ -442,7 +442,7 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t t.Fatal(err) } - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestAPIEndPoints(obj, []string{"PostPolicy"}) credentials := globalActiveCred @@ -450,7 +450,7 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t curTime := UTCNow() curTimePlus5Min := curTime.Add(time.Minute * 5) - err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) diff --git a/cmd/prepare-storage.go b/cmd/prepare-storage.go index 9c813c96e..d29c58687 100644 --- a/cmd/prepare-storage.go +++ b/cmd/prepare-storage.go @@ -60,7 +60,7 @@ var printEndpointError = func() func(Endpoint, error) { }() // Migrates backend format of local disks. -func formatXLMigrateLocalEndpoints(endpoints Endpoints) error { +func formatErasureMigrateLocalEndpoints(endpoints Endpoints) error { g := errgroup.WithNErrs(len(endpoints)) for index, endpoint := range endpoints { if !endpoint.IsLocal { @@ -76,7 +76,7 @@ func formatXLMigrateLocalEndpoints(endpoints Endpoints) error { } return fmt.Errorf("unable to access (%s) %w", formatPath, err) } - return formatXLMigrate(epPath) + return formatErasureMigrate(epPath) }, index) } for _, err := range g.Wait() { @@ -88,7 +88,7 @@ func formatXLMigrateLocalEndpoints(endpoints Endpoints) error { } // Cleans up tmp directory of local disks. -func formatXLCleanupTmpLocalEndpoints(endpoints Endpoints) error { +func formatErasureCleanupTmpLocalEndpoints(endpoints Endpoints) error { g := errgroup.WithNErrs(len(endpoints)) for index, endpoint := range endpoints { if !endpoint.IsLocal { @@ -157,7 +157,7 @@ func formatXLCleanupTmpLocalEndpoints(endpoints Endpoints) error { // the disk UUID association. Below error message is returned when // we see this situation in format.json, for more info refer // https://github.com/minio/minio/issues/5667 -var errXLV3ThisEmpty = fmt.Errorf("XL format version 3 has This field empty") +var errErasureV3ThisEmpty = fmt.Errorf("Erasure format version 3 has This field empty") // IsServerResolvable - checks if the endpoint is resolvable // by sending a naked HTTP request with liveness checks. @@ -199,10 +199,10 @@ func IsServerResolvable(endpoint Endpoint) error { return nil } -// connect to list of endpoints and load all XL disk formats, validate the formats are correct +// connect to list of endpoints and load all Erasure disk formats, validate the formats are correct // and are in quorum, if no formats are found attempt to initialize all of them for the first // time. additionally make sure to close all the disks used in this attempt. -func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints, zoneCount, setCount, drivesPerSet int, deploymentID string) (storageDisks []StorageAPI, format *formatXLV3, err error) { +func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints, zoneCount, setCount, drivesPerSet int, deploymentID string) (storageDisks []StorageAPI, format *formatErasureV3, err error) { // Initialize all storage disks storageDisks, errs := initStorageDisksWithErrors(endpoints) @@ -224,7 +224,7 @@ func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints, } // Attempt to load all `format.json` from all disks. - formatConfigs, sErrs := loadFormatXLAll(storageDisks, false) + formatConfigs, sErrs := loadFormatErasureAll(storageDisks, false) // Check if we have for i, sErr := range sErrs { if _, ok := formatCriticalErrors[sErr]; ok { @@ -241,19 +241,19 @@ func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints, // Pre-emptively check if one of the formatted disks // is invalid. This function returns success for the // most part unless one of the formats is not consistent - // with expected XL format. For example if a user is - // trying to pool FS backend into an XL set. - if err = checkFormatXLValues(formatConfigs, drivesPerSet); err != nil { + // with expected Erasure format. For example if a user is + // trying to pool FS backend into an Erasure set. + if err = checkFormatErasureValues(formatConfigs, drivesPerSet); err != nil { return nil, nil, err } // All disks report unformatted we should initialized everyone. - if shouldInitXLDisks(sErrs) && firstDisk { + if shouldInitErasureDisks(sErrs) && firstDisk { logger.Info("Formatting %s zone, %v set(s), %v drives per set.", humanize.Ordinal(zoneCount), setCount, drivesPerSet) // Initialize erasure code format on disks - format, err = initFormatXL(GlobalContext, storageDisks, setCount, drivesPerSet, deploymentID) + format, err = initFormatErasure(GlobalContext, storageDisks, setCount, drivesPerSet, deploymentID) if err != nil { return nil, nil, err } @@ -281,16 +281,16 @@ func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints, // This migration failed to capture '.This' field properly which indicates // the disk UUID association. Below function is called to handle and fix // this regression, for more info refer https://github.com/minio/minio/issues/5667 - if err = fixFormatXLV3(storageDisks, endpoints, formatConfigs); err != nil { + if err = fixFormatErasureV3(storageDisks, endpoints, formatConfigs); err != nil { return nil, nil, err } // If any of the .This field is still empty, we return error. - if formatXLV3ThisEmpty(formatConfigs) { - return nil, nil, errXLV3ThisEmpty + if formatErasureV3ThisEmpty(formatConfigs) { + return nil, nil, errErasureV3ThisEmpty } - format, err = getFormatXLInQuorum(formatConfigs) + format, err = getFormatErasureInQuorum(formatConfigs) if err != nil { return nil, nil, err } @@ -300,35 +300,35 @@ func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints, if !firstDisk { return nil, nil, errNotFirstDisk } - if err = formatXLFixDeploymentID(endpoints, storageDisks, format); err != nil { + if err = formatErasureFixDeploymentID(endpoints, storageDisks, format); err != nil { return nil, nil, err } } globalDeploymentID = format.ID - if err = formatXLFixLocalDeploymentID(endpoints, storageDisks, format); err != nil { + if err = formatErasureFixLocalDeploymentID(endpoints, storageDisks, format); err != nil { return nil, nil, err } // The will always recreate some directories inside .minio.sys of // the local disk such as tmp, multipart and background-ops - initXLMetaVolumesInLocalDisks(storageDisks, formatConfigs) + initErasureMetaVolumesInLocalDisks(storageDisks, formatConfigs) return storageDisks, format, nil } // Format disks before initialization of object layer. -func waitForFormatXL(firstDisk bool, endpoints Endpoints, zoneCount, setCount, drivesPerSet int, deploymentID string) ([]StorageAPI, *formatXLV3, error) { +func waitForFormatErasure(firstDisk bool, endpoints Endpoints, zoneCount, setCount, drivesPerSet int, deploymentID string) ([]StorageAPI, *formatErasureV3, error) { if len(endpoints) == 0 || setCount == 0 || drivesPerSet == 0 { return nil, nil, errInvalidArgument } - if err := formatXLMigrateLocalEndpoints(endpoints); err != nil { + if err := formatErasureMigrateLocalEndpoints(endpoints); err != nil { return nil, nil, err } - if err := formatXLCleanupTmpLocalEndpoints(endpoints); err != nil { + if err := formatErasureCleanupTmpLocalEndpoints(endpoints); err != nil { return nil, nil, err } @@ -358,11 +358,11 @@ func waitForFormatXL(firstDisk bool, endpoints Endpoints, zoneCount, setCount, d // Fresh setup, wait for other servers to come up. logger.Info("Waiting for all other servers to be online to format the disks.") continue - case errXLReadQuorum: + case errErasureReadQuorum: // no quorum available continue to wait for minimum number of servers. logger.Info("Waiting for a minimum of %d disks to come online (elapsed %s)\n", len(endpoints)/2, getElapsedTime()) continue - case errXLV3ThisEmpty: + case errErasureV3ThisEmpty: // need to wait for this error to be healed, so continue. continue default: diff --git a/cmd/routers.go b/cmd/routers.go index e78e72628..85a137cdc 100644 --- a/cmd/routers.go +++ b/cmd/routers.go @@ -22,8 +22,8 @@ import ( "github.com/gorilla/mux" ) -// Composed function registering routers for only distributed XL setup. -func registerDistXLRouters(router *mux.Router, endpointZones EndpointZones) { +// Composed function registering routers for only distributed Erasure setup. +func registerDistErasureRouters(router *mux.Router, endpointZones EndpointZones) { // Register storage REST router only if its a distributed setup. registerStorageRESTHandlers(router, endpointZones) @@ -87,8 +87,8 @@ func configureServerHandler(endpointZones EndpointZones) (http.Handler, error) { router := mux.NewRouter().SkipClean(true).UseEncodedPath() // Initialize distributed NS lock. - if globalIsDistXL { - registerDistXLRouters(router, endpointZones) + if globalIsDistErasure { + registerDistErasureRouters(router, endpointZones) } // Add STS router always. diff --git a/cmd/server-main.go b/cmd/server-main.go index f02de2528..21eb9af8a 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -116,9 +116,9 @@ func serverHandleCmdArgs(ctx *cli.Context) { globalMinioHost, globalMinioPort = mustSplitHostPort(globalMinioAddr) endpoints := strings.Fields(env.Get(config.EnvEndpoints, "")) if len(endpoints) > 0 { - globalEndpoints, globalXLSetDriveCount, setupType, err = createServerEndpoints(globalCLIContext.Addr, endpoints...) + globalEndpoints, globalErasureSetDriveCount, setupType, err = createServerEndpoints(globalCLIContext.Addr, endpoints...) } else { - globalEndpoints, globalXLSetDriveCount, setupType, err = createServerEndpoints(globalCLIContext.Addr, ctx.Args()...) + globalEndpoints, globalErasureSetDriveCount, setupType, err = createServerEndpoints(globalCLIContext.Addr, ctx.Args()...) } logger.FatalIf(err, "Invalid command line arguments") @@ -128,10 +128,10 @@ func serverHandleCmdArgs(ctx *cli.Context) { // To avoid this error situation we check for port availability. logger.FatalIf(checkPortAvailability(globalMinioHost, globalMinioPort), "Unable to start the server") - globalIsXL = (setupType == XLSetupType) - globalIsDistXL = (setupType == DistXLSetupType) - if globalIsDistXL { - globalIsXL = true + globalIsErasure = (setupType == ErasureSetupType) + globalIsDistErasure = (setupType == DistErasureSetupType) + if globalIsDistErasure { + globalIsErasure = true } } @@ -167,6 +167,9 @@ func newAllSubsystems() { // Create new bucket quota subsystem globalBucketQuotaSys = NewBucketQuotaSys() + + // Create new bucket versioning subsystem + globalBucketVersioningSys = NewBucketVersioningSys() } func initSafeMode(ctx context.Context, newObject ObjectLayer) (err error) { @@ -225,7 +228,7 @@ func initSafeMode(ctx context.Context, newObject ObjectLayer) (err error) { } // These messages only meant primarily for distributed setup, so only log during distributed setup. - if globalIsDistXL { + if globalIsDistErasure { logger.Info("Waiting for all MinIO sub-systems to be initialized.. lock acquired") } @@ -237,7 +240,7 @@ func initSafeMode(ctx context.Context, newObject ObjectLayer) (err error) { // if all sub-systems initialized successfully return right away if err = initAllSubsystems(retryCtx, newObject); err == nil { // All successful return. - if globalIsDistXL { + if globalIsDistErasure { // These messages only meant primarily for distributed setup, so only log during distributed setup. logger.Info("All MinIO sub-systems initialized successfully") } @@ -278,7 +281,7 @@ func initAllSubsystems(ctx context.Context, newObject ObjectLayer) (err error) { // you want to add extra context to your error. This // ensures top level retry works accordingly. var buckets []BucketInfo - if globalIsDistXL || globalIsXL { + if globalIsDistErasure || globalIsErasure { // List buckets to heal, and be re-used for loading configs. buckets, err = newObject.ListBucketsHeal(ctx) if err != nil { @@ -289,7 +292,7 @@ func initAllSubsystems(ctx context.Context, newObject ObjectLayer) (err error) { wquorum := &InsufficientWriteQuorum{} rquorum := &InsufficientReadQuorum{} for _, bucket := range buckets { - if err = newObject.MakeBucketWithLocation(ctx, bucket.Name, "", false); err != nil { + if err = newObject.MakeBucketWithLocation(ctx, bucket.Name, BucketOptions{}); err != nil { if errors.As(err, &wquorum) || errors.As(err, &rquorum) { // Return the error upwards for the caller to retry. return fmt.Errorf("Unable to heal bucket: %w", err) @@ -346,7 +349,7 @@ func startBackgroundOps(ctx context.Context, objAPI ObjectLayer) { // No unlock for "leader" lock. } - if globalIsXL { + if globalIsErasure { initGlobalHeal(ctx, objAPI) } @@ -396,7 +399,7 @@ func serverMain(ctx *cli.Context) { }() // Is distributed setup, error out if no certificates are found for HTTPS endpoints. - if globalIsDistXL { + if globalIsDistErasure { if globalEndpoints.HTTPS() && !globalIsSSL { logger.Fatal(config.ErrNoCertsAndHTTPSEndpoints(nil), "Unable to start the server") } @@ -410,7 +413,7 @@ func serverMain(ctx *cli.Context) { checkUpdate(getMinioMode()) } - if !globalActiveCred.IsValid() && globalIsDistXL { + if !globalActiveCred.IsValid() && globalIsDistErasure { logger.Fatal(config.ErrEnvCredentialsMissingDistributed(nil), "Unable to initialize the server in distributed mode") } @@ -418,7 +421,7 @@ func serverMain(ctx *cli.Context) { // Set system resources to maximum. setMaxResources() - if globalIsXL { + if globalIsErasure { // Init global heal state globalAllHealState = initHealState() globalBackgroundHealState = initHealState() @@ -467,7 +470,7 @@ func serverMain(ctx *cli.Context) { globalHTTPServer = httpServer globalObjLayerMutex.Unlock() - if globalIsDistXL && globalEndpoints.FirstLocal() { + if globalIsDistErasure && globalEndpoints.FirstLocal() { for { // Additionally in distributed setup, validate the setup and configuration. err := verifyServerSystemConfig(globalEndpoints) @@ -502,7 +505,7 @@ func serverMain(ctx *cli.Context) { newAllSubsystems() // Enable healing to heal drives if possible - if globalIsXL { + if globalIsErasure { initBackgroundHealing(GlobalContext, newObject) initLocalDisksAutoHeal(GlobalContext, newObject) } @@ -549,5 +552,5 @@ func newObjectLayer(ctx context.Context, endpointZones EndpointZones) (newObject return NewFSObjectLayer(endpointZones[0].Endpoints[0].Path) } - return newXLZones(ctx, endpointZones) + return newErasureZones(ctx, endpointZones) } diff --git a/cmd/server-main_test.go b/cmd/server-main_test.go index e05bbdcde..07340bc87 100644 --- a/cmd/server-main_test.go +++ b/cmd/server-main_test.go @@ -43,7 +43,7 @@ func TestNewObjectLayer(t *testing.T) { t.Fatal("Unexpected object layer detected", reflect.TypeOf(obj)) } - // Tests for XL object layer initialization. + // Tests for Erasure object layer initialization. // Create temporary backend for the test server. nDisks = 16 @@ -58,7 +58,7 @@ func TestNewObjectLayer(t *testing.T) { t.Fatal("Unexpected object layer initialization error", err) } - _, ok = obj.(*xlZones) + _, ok = obj.(*erasureZones) if !ok { t.Fatal("Unexpected object layer detected", reflect.TypeOf(obj)) } diff --git a/cmd/server_test.go b/cmd/server_test.go index df59bf654..8cf86e7e8 100644 --- a/cmd/server_test.go +++ b/cmd/server_test.go @@ -36,7 +36,7 @@ import ( "github.com/minio/minio/pkg/bucket/policy" ) -// API suite container common to both FS and XL. +// API suite container common to both FS and Erasure. type TestSuiteCommon struct { serverType string testServer TestServer @@ -124,10 +124,10 @@ func TestServerSuite(t *testing.T) { {serverType: "FS", signer: signerV2}, // Init and run test on FS backend, with tls enabled. {serverType: "FS", signer: signerV4, secure: true}, - // Init and run test on XL backend. - {serverType: "XL", signer: signerV4}, - // Init and run test on XLSet backend. - {serverType: "XLSet", signer: signerV4}, + // Init and run test on Erasure backend. + {serverType: "Erasure", signer: signerV4}, + // Init and run test on ErasureSet backend. + {serverType: "ErasureSet", signer: signerV4}, } for i, testCase := range testCases { t.Run(fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.serverType), func(t *testing.T) { @@ -516,7 +516,7 @@ func (s *TestSuiteCommon) TestDeleteMultipleObjects(c *check) { // assert the status of http response. c.Assert(response.StatusCode, http.StatusOK) // Append all objects. - delObjReq.Objects = append(delObjReq.Objects, ObjectIdentifier{ + delObjReq.Objects = append(delObjReq.Objects, ObjectToDelete{ ObjectName: objName, }) } @@ -539,7 +539,10 @@ func (s *TestSuiteCommon) TestDeleteMultipleObjects(c *check) { c.Assert(err, nil) for i := 0; i < 10; i++ { // All the objects should be under deleted list (including non-existent object) - c.Assert(deleteResp.DeletedObjects[i], delObjReq.Objects[i]) + c.Assert(deleteResp.DeletedObjects[i], DeletedObject{ + ObjectName: delObjReq.Objects[i].ObjectName, + VersionID: delObjReq.Objects[i].VersionID, + }) } c.Assert(len(deleteResp.Errors), 0) @@ -559,7 +562,10 @@ func (s *TestSuiteCommon) TestDeleteMultipleObjects(c *check) { c.Assert(err, nil) c.Assert(len(deleteResp.DeletedObjects), len(delObjReq.Objects)) for i := 0; i < 10; i++ { - c.Assert(deleteResp.DeletedObjects[i], delObjReq.Objects[i]) + c.Assert(deleteResp.DeletedObjects[i], DeletedObject{ + ObjectName: delObjReq.Objects[i].ObjectName, + VersionID: delObjReq.Objects[i].VersionID, + }) } c.Assert(len(deleteResp.Errors), 0) } diff --git a/cmd/setup-type.go b/cmd/setup-type.go index 516e58213..df91868dd 100644 --- a/cmd/setup-type.go +++ b/cmd/setup-type.go @@ -26,11 +26,11 @@ const ( // FSSetupType - FS setup type enum. FSSetupType - // XLSetupType - XL setup type enum. - XLSetupType + // ErasureSetupType - Erasure setup type enum. + ErasureSetupType - // DistXLSetupType - Distributed XL setup type enum. - DistXLSetupType + // DistErasureSetupType - Distributed Erasure setup type enum. + DistErasureSetupType // GatewaySetupType - gateway setup type enum. GatewaySetupType @@ -40,10 +40,10 @@ func (setupType SetupType) String() string { switch setupType { case FSSetupType: return globalMinioModeFS - case XLSetupType: - return globalMinioModeXL - case DistXLSetupType: - return globalMinioModeDistXL + case ErasureSetupType: + return globalMinioModeErasure + case DistErasureSetupType: + return globalMinioModeDistErasure case GatewaySetupType: return globalMinioModeGatewayPrefix } diff --git a/cmd/storage-datatypes.go b/cmd/storage-datatypes.go index 12f3ed194..775ffecb7 100644 --- a/cmd/storage-datatypes.go +++ b/cmd/storage-datatypes.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,8 +19,6 @@ package cmd import ( "os" "time" - - xhttp "github.com/minio/minio/cmd/http" ) // VolInfo - represents volume stat information. @@ -39,6 +37,29 @@ type FilesInfo struct { IsTruncated bool } +// FilesInfoVersions represents a list of file versions, +// additionally indicates if the list is last. +type FilesInfoVersions struct { + FilesVersions []FileInfoVersions + IsTruncated bool +} + +// FileInfoVersions represent a list of versions for a given file. +type FileInfoVersions struct { + // Name of the volume. + Volume string + + // Name of the file. + Name string + + // Represents the latest mod time of the + // latest version. + LatestModTime time.Time + + Versions []FileInfo + Deleted []FileInfo +} + // FileInfo - represents file stat information. type FileInfo struct { // Name of the volume. @@ -47,7 +68,21 @@ type FileInfo struct { // Name of the file. Name string - // Date and time when the file was last modified. + // Version of the file. + VersionID string + + // Indicates if the version is the latest + IsLatest bool + + // Deleted is set when this FileInfo represents + // a deleted marker for a versioned bucket. + Deleted bool + + // DataDir of the file + DataDir string + + // Date and time when the file was last modified, if Deleted + // is 'true' this value represents when while was deleted. ModTime time.Time // Total file size. @@ -62,49 +97,18 @@ type FileInfo struct { // All the parts per object. Parts []ObjectPartInfo - Quorum int + // Erasure info for all objects. + Erasure ErasureInfo } -// ToObjectInfo converts FileInfo into objectInfo. -func (entry FileInfo) ToObjectInfo() ObjectInfo { - var objInfo ObjectInfo - if HasSuffix(entry.Name, SlashSeparator) { - objInfo = ObjectInfo{ - Bucket: entry.Volume, - Name: entry.Name, - IsDir: true, - } - } else { - objInfo = ObjectInfo{ - IsDir: false, - Bucket: entry.Volume, - Name: entry.Name, - ModTime: entry.ModTime, - Size: entry.Size, - ContentType: entry.Metadata["content-type"], - ContentEncoding: entry.Metadata["content-encoding"], - } - - // Extract object tagging information - objInfo.UserTags = entry.Metadata[xhttp.AmzObjectTagging] - - // Extract etag from metadata. - objInfo.ETag = extractETag(entry.Metadata) - - // All the parts per object. - objInfo.Parts = entry.Parts - - // etag/md5Sum has already been extracted. We need to - // remove to avoid it from appearing as part of - // response headers. e.g, X-Minio-* or X-Amz-*. - objInfo.UserDefined = cleanMetadata(entry.Metadata) - - // Update storage class - if sc, ok := entry.Metadata[xhttp.AmzStorageClass]; ok { - objInfo.StorageClass = sc - } else { - objInfo.StorageClass = globalMinioDefaultStorageClass - } +// newFileInfo - initializes new FileInfo, allocates a fresh erasure info. +func newFileInfo(object string, dataBlocks, parityBlocks int) (fi FileInfo) { + fi.Erasure = ErasureInfo{ + Algorithm: erasureAlgorithm, + DataBlocks: dataBlocks, + ParityBlocks: parityBlocks, + BlockSize: blockSizeV1, + Distribution: hashOrder(object, dataBlocks+parityBlocks), } - return objInfo + return fi } diff --git a/cmd/storage-errors.go b/cmd/storage-errors.go index 6a674e1cc..bcd38f4b0 100644 --- a/cmd/storage-errors.go +++ b/cmd/storage-errors.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2015, 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2015-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,6 +16,8 @@ package cmd +import "os" + // errUnexpected - unexpected error, requires manual intervention. var errUnexpected = StorageErr("Unexpected error, please report this issue at https://github.com/minio/minio/issues") @@ -31,6 +33,9 @@ var errUnsupportedDisk = StorageErr("disk does not support O_DIRECT") // errDiskFull - cannot create volume or files when disk is full. var errDiskFull = StorageErr("disk path full") +// errDiskNotDir - cannot use storage disk if its not a directory +var errDiskNotDir = StorageErr("disk is not directory or mountpoint") + // errDiskNotFound - cannot find the underlying configured disk anymore. var errDiskNotFound = StorageErr("disk not found") @@ -46,6 +51,9 @@ var errDiskAccessDenied = StorageErr("disk access denied") // errFileNotFound - cannot find the file. var errFileNotFound = StorageErr("file not found") +// errFileNotFound - cannot find requested file version. +var errFileVersionNotFound = StorageErr("file version not found") + // errTooManyOpenFiles - too many open files. var errTooManyOpenFiles = StorageErr("too many open files") @@ -92,7 +100,7 @@ var errLessData = StorageErr("less data available than what was requested") // errMoreData = returned when more data was sent by the caller than what it was supposed to. var errMoreData = StorageErr("more data was sent than what was advertised") -// StorageErr represents error generated by posix call. +// StorageErr represents error generated by xlStorage call. type StorageErr string func (h StorageErr) Error() string { @@ -107,3 +115,32 @@ var baseErrs = []error{ } var baseIgnoredErrs = baseErrs + +// Is a one place function which converts all os.PathError +// into a more FS object layer friendly form, converts +// known errors into their typed form for top level +// interpretation. +func osErrToFileErr(err error) error { + if err == nil { + return nil + } + if os.IsNotExist(err) { + return errFileNotFound + } + if os.IsPermission(err) { + return errFileAccessDenied + } + if isSysErrNotDir(err) { + return errFileNotFound + } + if isSysErrPathNotFound(err) { + return errFileNotFound + } + if isSysErrTooManyFiles(err) { + return errTooManyOpenFiles + } + if isSysErrHandleInvalid(err) { + return errFileNotFound + } + return err +} diff --git a/cmd/storage-interface.go b/cmd/storage-interface.go index 7cfe37f8d..104256cd2 100644 --- a/cmd/storage-interface.go +++ b/cmd/storage-interface.go @@ -44,24 +44,31 @@ type StorageAPI interface { StatVol(volume string) (vol VolInfo, err error) DeleteVol(volume string, forceDelete bool) (err error) + // WalkVersions in sorted order directly on disk. + WalkVersions(volume, dirPath string, marker string, recursive bool, endWalkCh <-chan struct{}) (chan FileInfoVersions, error) // Walk in sorted order directly on disk. - Walk(volume, dirPath string, marker string, recursive bool, leafFile string, - readMetadataFn readMetadataFunc, endWalkCh <-chan struct{}) (chan FileInfo, error) + Walk(volume, dirPath string, marker string, recursive bool, endWalkCh <-chan struct{}) (chan FileInfo, error) // Walk in sorted order directly on disk. WalkSplunk(volume, dirPath string, marker string, endWalkCh <-chan struct{}) (chan FileInfo, error) + // Metadata operations + DeleteVersion(volume, path string, fi FileInfo) error + DeleteVersions(volume string, versions []FileInfo) []error + WriteMetadata(volume, path string, fi FileInfo) error + ReadVersion(volume, path, versionID string) (FileInfo, error) + RenameData(srcVolume, srcPath, dataDir, dstVolume, dstPath string) error + // File operations. - ListDir(volume, dirPath string, count int, leafFile string) ([]string, error) + ListDir(volume, dirPath string, count int) ([]string, error) ReadFile(volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) AppendFile(volume string, path string, buf []byte) (err error) CreateFile(volume, path string, size int64, reader io.Reader) error ReadFileStream(volume, path string, offset, length int64) (io.ReadCloser, error) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) error - StatFile(volume string, path string) (file FileInfo, err error) + CheckParts(volume string, path string, fi FileInfo) error + CheckFile(volume string, path string) (err error) DeleteFile(volume string, path string) (err error) - DeleteFileBulk(volume string, paths []string) (errs []error, err error) - DeletePrefixes(volume string, paths []string) (errs []error, err error) - VerifyFile(volume, path string, size int64, algo BitrotAlgorithm, sum []byte, shardSize int64) error + VerifyFile(volume, path string, fi FileInfo) error // Write all data, syncs the data to disk. WriteAll(volume string, path string, reader io.Reader) (err error) diff --git a/cmd/storage-rest-client.go b/cmd/storage-rest-client.go index d21934f0c..c683d35bf 100644 --- a/cmd/storage-rest-client.go +++ b/cmd/storage-rest-client.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2018-2019 MinIO, Inc. + * MinIO Cloud Storage, (C) 2018-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -270,6 +270,33 @@ func (client *storageRESTClient) CreateFile(volume, path string, length int64, r return err } +func (client *storageRESTClient) WriteMetadata(volume, path string, fi FileInfo) error { + values := make(url.Values) + values.Set(storageRESTVolume, volume) + values.Set(storageRESTFilePath, path) + + var reader bytes.Buffer + if err := gob.NewEncoder(&reader).Encode(fi); err != nil { + return err + } + + respBody, err := client.call(storageRESTMethodWriteMetadata, values, &reader, -1) + defer http.DrainBody(respBody) + return err +} + +func (client *storageRESTClient) DeleteVersion(volume, path string, fi FileInfo) error { + values := make(url.Values) + values.Set(storageRESTVolume, volume) + values.Set(storageRESTFilePath, path) + values.Set(storageRESTVersionID, fi.VersionID) + values.Set(storageRESTDeleteMarker, strconv.FormatBool(fi.Deleted)) + + respBody, err := client.call(storageRESTMethodDeleteVersion, values, nil, -1) + defer http.DrainBody(respBody) + return err +} + // WriteAll - write all data to a file. func (client *storageRESTClient) WriteAll(volume, path string, reader io.Reader) error { values := make(url.Values) @@ -280,18 +307,60 @@ func (client *storageRESTClient) WriteAll(volume, path string, reader io.Reader) return err } -// StatFile - stat a file. -func (client *storageRESTClient) StatFile(volume, path string) (info FileInfo, err error) { +// CheckFile - stat a file metadata. +func (client *storageRESTClient) CheckFile(volume, path string) error { values := make(url.Values) values.Set(storageRESTVolume, volume) values.Set(storageRESTFilePath, path) - respBody, err := client.call(storageRESTMethodStatFile, values, nil, -1) + respBody, err := client.call(storageRESTMethodCheckFile, values, nil, -1) + defer http.DrainBody(respBody) + return err +} + +// CheckParts - stat all file parts. +func (client *storageRESTClient) CheckParts(volume, path string, fi FileInfo) error { + values := make(url.Values) + values.Set(storageRESTVolume, volume) + values.Set(storageRESTFilePath, path) + + var reader bytes.Buffer + if err := gob.NewEncoder(&reader).Encode(fi); err != nil { + return err + } + + respBody, err := client.call(storageRESTMethodWriteMetadata, values, &reader, -1) + defer http.DrainBody(respBody) + return err +} + +// RenameData - rename source path to destination path atomically, metadata and data file. +func (client *storageRESTClient) RenameData(srcVolume, srcPath, dataDir, dstVolume, dstPath string) (err error) { + values := make(url.Values) + values.Set(storageRESTSrcVolume, srcVolume) + values.Set(storageRESTSrcPath, srcPath) + values.Set(storageRESTDataDir, dataDir) + values.Set(storageRESTDstVolume, dstVolume) + values.Set(storageRESTDstPath, dstPath) + respBody, err := client.call(storageRESTMethodRenameData, values, nil, -1) + defer http.DrainBody(respBody) + + return err +} + +func (client *storageRESTClient) ReadVersion(volume, path, versionID string) (fi FileInfo, err error) { + values := make(url.Values) + values.Set(storageRESTVolume, volume) + values.Set(storageRESTFilePath, path) + values.Set(storageRESTVersionID, versionID) + + respBody, err := client.call(storageRESTMethodReadVersion, values, nil, -1) if err != nil { - return info, err + return fi, err } defer http.DrainBody(respBody) - err = gob.NewDecoder(respBody).Decode(&info) - return info, err + + err = gob.NewDecoder(respBody).Decode(&fi) + return fi, err } // ReadAll - reads all contents of a file. @@ -378,14 +447,47 @@ func (client *storageRESTClient) WalkSplunk(volume, dirPath, marker string, endW return ch, nil } -func (client *storageRESTClient) Walk(volume, dirPath, marker string, recursive bool, leafFile string, - readMetadataFn readMetadataFunc, endWalkCh <-chan struct{}) (chan FileInfo, error) { +func (client *storageRESTClient) WalkVersions(volume, dirPath, marker string, recursive bool, endWalkCh <-chan struct{}) (chan FileInfoVersions, error) { + values := make(url.Values) + values.Set(storageRESTVolume, volume) + values.Set(storageRESTDirPath, dirPath) + values.Set(storageRESTMarkerPath, marker) + values.Set(storageRESTRecursive, strconv.FormatBool(recursive)) + respBody, err := client.call(storageRESTMethodWalk, values, nil, -1) + if err != nil { + return nil, err + } + + ch := make(chan FileInfoVersions) + go func() { + defer close(ch) + defer http.DrainBody(respBody) + + decoder := gob.NewDecoder(respBody) + for { + var fi FileInfoVersions + if gerr := decoder.Decode(&fi); gerr != nil { + // Upon error return + return + } + select { + case ch <- fi: + case <-endWalkCh: + return + } + + } + }() + + return ch, nil +} + +func (client *storageRESTClient) Walk(volume, dirPath, marker string, recursive bool, endWalkCh <-chan struct{}) (chan FileInfo, error) { values := make(url.Values) values.Set(storageRESTVolume, volume) values.Set(storageRESTDirPath, dirPath) values.Set(storageRESTMarkerPath, marker) values.Set(storageRESTRecursive, strconv.FormatBool(recursive)) - values.Set(storageRESTLeafFile, leafFile) respBody, err := client.call(storageRESTMethodWalk, values, nil, -1) if err != nil { return nil, err @@ -416,12 +518,11 @@ func (client *storageRESTClient) Walk(volume, dirPath, marker string, recursive } // ListDir - lists a directory. -func (client *storageRESTClient) ListDir(volume, dirPath string, count int, leafFile string) (entries []string, err error) { +func (client *storageRESTClient) ListDir(volume, dirPath string, count int) (entries []string, err error) { values := make(url.Values) values.Set(storageRESTVolume, volume) values.Set(storageRESTDirPath, dirPath) values.Set(storageRESTCount, strconv.Itoa(count)) - values.Set(storageRESTLeafFile, leafFile) respBody, err := client.call(storageRESTMethodListDir, values, nil, -1) if err != nil { return nil, err @@ -441,78 +542,54 @@ func (client *storageRESTClient) DeleteFile(volume, path string) error { return err } -// DeleteFileBulk - deletes files in bulk. -func (client *storageRESTClient) DeleteFileBulk(volume string, paths []string) (errs []error, err error) { - if len(paths) == 0 { - return errs, err +// DeleteVersions - deletes list of specified versions if present +func (client *storageRESTClient) DeleteVersions(volume string, versions []FileInfo) (errs []error) { + if len(versions) == 0 { + return errs } + values := make(url.Values) values.Set(storageRESTVolume, volume) + values.Set(storageRESTTotalVersions, strconv.Itoa(len(versions))) var buffer bytes.Buffer - for _, path := range paths { - buffer.WriteString(path) - buffer.WriteString("\n") + encoder := gob.NewEncoder(&buffer) + for _, version := range versions { + encoder.Encode(&version) } - respBody, err := client.call(storageRESTMethodDeleteFileBulk, values, &buffer, -1) + errs = make([]error, len(versions)) + + respBody, err := client.call(storageRESTMethodDeleteVersions, values, &buffer, -1) defer http.DrainBody(respBody) if err != nil { - return nil, err + for i := range errs { + errs[i] = err + } + return errs } reader, err := waitForHTTPResponse(respBody) if err != nil { - return nil, err + for i := range errs { + errs[i] = err + } + return errs } - dErrResp := &DeleteFileBulkErrsResp{} + dErrResp := &DeleteVersionsErrsResp{} if err = gob.NewDecoder(reader).Decode(dErrResp); err != nil { - return nil, err + for i := range errs { + errs[i] = err + } + return errs } - for _, dErr := range dErrResp.Errs { - errs = append(errs, toStorageErr(dErr)) + for i, dErr := range dErrResp.Errs { + errs[i] = toStorageErr(dErr) } - return errs, nil -} - -// DeletePrefixes - deletes prefixes in bulk. -func (client *storageRESTClient) DeletePrefixes(volume string, paths []string) (errs []error, err error) { - if len(paths) == 0 { - return errs, err - } - values := make(url.Values) - values.Set(storageRESTVolume, volume) - - var buffer bytes.Buffer - for _, path := range paths { - buffer.WriteString(path) - buffer.WriteString("\n") - } - - respBody, err := client.call(storageRESTMethodDeletePrefixes, values, &buffer, -1) - defer http.DrainBody(respBody) - if err != nil { - return nil, err - } - - reader, err := waitForHTTPResponse(respBody) - if err != nil { - return nil, err - } - - dErrResp := &DeletePrefixesErrsResp{} - if err = gob.NewDecoder(reader).Decode(dErrResp); err != nil { - return nil, err - } - - for _, dErr := range dErrResp.Errs { - errs = append(errs, toStorageErr(dErr)) - } - - return errs, nil + return errs } // RenameFile - renames a file. @@ -527,28 +604,32 @@ func (client *storageRESTClient) RenameFile(srcVolume, srcPath, dstVolume, dstPa return err } -func (client *storageRESTClient) VerifyFile(volume, path string, size int64, algo BitrotAlgorithm, sum []byte, shardSize int64) error { +func (client *storageRESTClient) VerifyFile(volume, path string, fi FileInfo) error { values := make(url.Values) values.Set(storageRESTVolume, volume) values.Set(storageRESTFilePath, path) - values.Set(storageRESTBitrotAlgo, algo.String()) - values.Set(storageRESTLength, strconv.FormatInt(size, 10)) - values.Set(storageRESTShardSize, strconv.Itoa(int(shardSize))) - values.Set(storageRESTBitrotHash, hex.EncodeToString(sum)) - respBody, err := client.call(storageRESTMethodVerifyFile, values, nil, -1) + var reader bytes.Buffer + if err := gob.NewEncoder(&reader).Encode(fi); err != nil { + return err + } + + respBody, err := client.call(storageRESTMethodVerifyFile, values, &reader, -1) defer http.DrainBody(respBody) if err != nil { return err } - reader, err := waitForHTTPResponse(respBody) + + respReader, err := waitForHTTPResponse(respBody) if err != nil { return err } + verifyResp := &VerifyFileResp{} - if err = gob.NewDecoder(reader).Decode(verifyResp); err != nil { + if err = gob.NewDecoder(respReader).Decode(verifyResp); err != nil { return err } + return toStorageErr(verifyResp.Err) } diff --git a/cmd/storage-rest-common.go b/cmd/storage-rest-common.go index e9d8ed5c7..37e02c337 100644 --- a/cmd/storage-rest-common.go +++ b/cmd/storage-rest-common.go @@ -17,7 +17,7 @@ package cmd const ( - storageRESTVersion = "v17" // RemoveBucket API change + storageRESTVersion = "v20" // Re-implementation of storage layer storageRESTVersionPrefix = SlashSeparator + storageRESTVersion storageRESTPrefix = minioReservedBucketPath + "/storage" ) @@ -34,38 +34,45 @@ const ( storageRESTMethodAppendFile = "/appendfile" storageRESTMethodCreateFile = "/createfile" storageRESTMethodWriteAll = "/writeall" - storageRESTMethodStatFile = "/statfile" + storageRESTMethodWriteMetadata = "/writemetadata" + storageRESTMethodDeleteVersion = "/deleteversion" + storageRESTMethodReadVersion = "/readversion" + storageRESTMethodRenameData = "/renamedata" + storageRESTMethodCheckParts = "/checkparts" + storageRESTMethodCheckFile = "/checkfile" storageRESTMethodReadAll = "/readall" storageRESTMethodReadFile = "/readfile" storageRESTMethodReadFileStream = "/readfilestream" storageRESTMethodListDir = "/listdir" storageRESTMethodWalk = "/walk" + storageRESTMethodWalkVersions = "/walkversions" storageRESTMethodWalkSplunk = "/walksplunk" storageRESTMethodDeleteFile = "/deletefile" - storageRESTMethodDeleteFileBulk = "/deletefilebulk" - storageRESTMethodDeletePrefixes = "/deleteprefixes" + storageRESTMethodDeleteVersions = "/deleteverions" storageRESTMethodRenameFile = "/renamefile" storageRESTMethodVerifyFile = "/verifyfile" ) const ( - storageRESTVolume = "volume" - storageRESTVolumes = "volumes" - storageRESTDirPath = "dir-path" - storageRESTFilePath = "file-path" - storageRESTSrcVolume = "source-volume" - storageRESTSrcPath = "source-path" - storageRESTDstVolume = "destination-volume" - storageRESTDstPath = "destination-path" - storageRESTOffset = "offset" - storageRESTLength = "length" - storageRESTShardSize = "shard-size" - storageRESTCount = "count" - storageRESTMarkerPath = "marker" - storageRESTLeafFile = "leaf-file" - storageRESTRecursive = "recursive" - storageRESTBitrotAlgo = "bitrot-algo" - storageRESTBitrotHash = "bitrot-hash" - storageRESTDiskID = "disk-id" - storageRESTForceDelete = "force-delete" + storageRESTVolume = "volume" + storageRESTVolumes = "volumes" + storageRESTDirPath = "dir-path" + storageRESTFilePath = "file-path" + storageRESTVersionID = "version-id" + storageRESTTotalVersions = "total-versions" + storageRESTDeleteMarker = "delete-marker" + storageRESTSrcVolume = "source-volume" + storageRESTSrcPath = "source-path" + storageRESTDataDir = "data-dir" + storageRESTDstVolume = "destination-volume" + storageRESTDstPath = "destination-path" + storageRESTOffset = "offset" + storageRESTLength = "length" + storageRESTCount = "count" + storageRESTMarkerPath = "marker" + storageRESTRecursive = "recursive" + storageRESTBitrotAlgo = "bitrot-algo" + storageRESTBitrotHash = "bitrot-hash" + storageRESTDiskID = "disk-id" + storageRESTForceDelete = "force-delete" ) diff --git a/cmd/storage-rest-server.go b/cmd/storage-rest-server.go index 9411c96c5..d4404c7b7 100644 --- a/cmd/storage-rest-server.go +++ b/cmd/storage-rest-server.go @@ -43,7 +43,7 @@ var errDiskStale = errors.New("disk stale") // To abstract a disk over network. type storageRESTServer struct { - storage *posix + storage *xlStorage } func (s *storageRESTServer) writeErrorResponse(w http.ResponseWriter, err error) { @@ -269,6 +269,70 @@ func (s *storageRESTServer) CreateFileHandler(w http.ResponseWriter, r *http.Req } } +// DeleteVersion delete updated metadata. +func (s *storageRESTServer) DeleteVersionHandler(w http.ResponseWriter, r *http.Request) { + if !s.IsValid(w, r) { + return + } + vars := mux.Vars(r) + volume := vars[storageRESTVolume] + filePath := vars[storageRESTFilePath] + versionID := vars[storageRESTVersionID] + deleteMarker := vars[storageRESTDeleteMarker] == "true" + + err := s.storage.DeleteVersion(volume, filePath, FileInfo{VersionID: versionID, Deleted: deleteMarker}) + if err != nil { + s.writeErrorResponse(w, err) + } +} + +// ReadVersion delete updated metadata. +func (s *storageRESTServer) ReadVersionHandler(w http.ResponseWriter, r *http.Request) { + if !s.IsValid(w, r) { + return + } + vars := mux.Vars(r) + volume := vars[storageRESTVolume] + filePath := vars[storageRESTFilePath] + versionID := vars[storageRESTVersionID] + + fi, err := s.storage.ReadVersion(volume, filePath, versionID) + if err != nil { + s.writeErrorResponse(w, err) + return + } + + gob.NewEncoder(w).Encode(fi) + w.(http.Flusher).Flush() +} + +// WriteMetadata write new updated metadata. +func (s *storageRESTServer) WriteMetadataHandler(w http.ResponseWriter, r *http.Request) { + if !s.IsValid(w, r) { + return + } + vars := mux.Vars(r) + volume := vars[storageRESTVolume] + filePath := vars[storageRESTFilePath] + + if r.ContentLength < 0 { + s.writeErrorResponse(w, errInvalidArgument) + return + } + + var fi FileInfo + err := gob.NewDecoder(r.Body).Decode(&fi) + if err != nil { + s.writeErrorResponse(w, err) + return + } + + err = s.storage.WriteMetadata(volume, filePath, fi) + if err != nil { + s.writeErrorResponse(w, err) + } +} + // WriteAllHandler - write to file all content. func (s *storageRESTServer) WriteAllHandler(w http.ResponseWriter, r *http.Request) { if !s.IsValid(w, r) { @@ -289,8 +353,8 @@ func (s *storageRESTServer) WriteAllHandler(w http.ResponseWriter, r *http.Reque } } -// StatFileHandler - stat a file. -func (s *storageRESTServer) StatFileHandler(w http.ResponseWriter, r *http.Request) { +// CheckPartsHandler - check if a file metadata exists. +func (s *storageRESTServer) CheckPartsHandler(w http.ResponseWriter, r *http.Request) { if !s.IsValid(w, r) { return } @@ -298,13 +362,34 @@ func (s *storageRESTServer) StatFileHandler(w http.ResponseWriter, r *http.Reque volume := vars[storageRESTVolume] filePath := vars[storageRESTFilePath] - info, err := s.storage.StatFile(volume, filePath) - if err != nil { + if r.ContentLength < 0 { + s.writeErrorResponse(w, errInvalidArgument) + return + } + + var fi FileInfo + if err := gob.NewDecoder(r.Body).Decode(&fi); err != nil { s.writeErrorResponse(w, err) return } - gob.NewEncoder(w).Encode(info) - w.(http.Flusher).Flush() + + if err := s.storage.CheckParts(volume, filePath, fi); err != nil { + s.writeErrorResponse(w, err) + } +} + +// CheckFileHandler - check if a file metadata exists. +func (s *storageRESTServer) CheckFileHandler(w http.ResponseWriter, r *http.Request) { + if !s.IsValid(w, r) { + return + } + vars := mux.Vars(r) + volume := vars[storageRESTVolume] + filePath := vars[storageRESTFilePath] + + if err := s.storage.CheckFile(volume, filePath); err != nil { + s.writeErrorResponse(w, err) + } } // ReadAllHandler - read all the contents of a file. @@ -400,26 +485,6 @@ func (s *storageRESTServer) ReadFileStreamHandler(w http.ResponseWriter, r *http io.Copy(w, rc) w.(http.Flusher).Flush() - -} - -// readMetadata func provides the function types for reading leaf metadata. -type readMetadataFunc func(buf []byte, volume, entry string) FileInfo - -func readMetadata(buf []byte, volume, entry string) FileInfo { - m, err := xlMetaV1UnmarshalJSON(GlobalContext, buf) - if err != nil { - return FileInfo{} - } - return FileInfo{ - Volume: volume, - Name: entry, - ModTime: m.Stat.ModTime, - Size: m.Stat.Size, - Metadata: m.Meta, - Parts: m.Parts, - Quorum: m.Erasure.DataBlocks, - } } // WalkHandler - remote caller to start walking at a requested directory path. @@ -446,6 +511,35 @@ func (s *storageRESTServer) WalkSplunkHandler(w http.ResponseWriter, r *http.Req w.(http.Flusher).Flush() } +// WalkVersionsHandler - remote caller to start walking at a requested directory path. +func (s *storageRESTServer) WalkVersionsHandler(w http.ResponseWriter, r *http.Request) { + if !s.IsValid(w, r) { + return + } + vars := mux.Vars(r) + volume := vars[storageRESTVolume] + dirPath := vars[storageRESTDirPath] + markerPath := vars[storageRESTMarkerPath] + recursive, err := strconv.ParseBool(vars[storageRESTRecursive]) + if err != nil { + s.writeErrorResponse(w, err) + return + } + + w.Header().Set(xhttp.ContentType, "text/event-stream") + encoder := gob.NewEncoder(w) + + fch, err := s.storage.WalkVersions(volume, dirPath, markerPath, recursive, r.Context().Done()) + if err != nil { + s.writeErrorResponse(w, err) + return + } + for fi := range fch { + encoder.Encode(&fi) + } + w.(http.Flusher).Flush() +} + // WalkHandler - remote caller to start walking at a requested directory path. func (s *storageRESTServer) WalkHandler(w http.ResponseWriter, r *http.Request) { if !s.IsValid(w, r) { @@ -460,12 +554,11 @@ func (s *storageRESTServer) WalkHandler(w http.ResponseWriter, r *http.Request) s.writeErrorResponse(w, err) return } - leafFile := vars[storageRESTLeafFile] w.Header().Set(xhttp.ContentType, "text/event-stream") encoder := gob.NewEncoder(w) - fch, err := s.storage.Walk(volume, dirPath, markerPath, recursive, leafFile, readMetadata, r.Context().Done()) + fch, err := s.storage.Walk(volume, dirPath, markerPath, recursive, r.Context().Done()) if err != nil { s.writeErrorResponse(w, err) return @@ -484,14 +577,13 @@ func (s *storageRESTServer) ListDirHandler(w http.ResponseWriter, r *http.Reques vars := mux.Vars(r) volume := vars[storageRESTVolume] dirPath := vars[storageRESTDirPath] - leafFile := vars[storageRESTLeafFile] count, err := strconv.Atoi(vars[storageRESTCount]) if err != nil { s.writeErrorResponse(w, err) return } - entries, err := s.storage.ListDir(volume, dirPath, count, leafFile) + entries, err := s.storage.ListDir(volume, dirPath, count) if err != nil { s.writeErrorResponse(w, err) return @@ -515,96 +607,67 @@ func (s *storageRESTServer) DeleteFileHandler(w http.ResponseWriter, r *http.Req } } -// DeleteFileBulkErrsResp - collection of deleteFile errors -// for bulk deletes -type DeleteFileBulkErrsResp struct { +// DeleteVersionsErrsResp - collection of delete errors +// for bulk version deletes +type DeleteVersionsErrsResp struct { Errs []error } -// DeleteFileBulkHandler - delete a file. -func (s *storageRESTServer) DeleteFileBulkHandler(w http.ResponseWriter, r *http.Request) { +// DeleteVersionsHandler - delete a set of a versions. +func (s *storageRESTServer) DeleteVersionsHandler(w http.ResponseWriter, r *http.Request) { if !s.IsValid(w, r) { return } + vars := r.URL.Query() volume := vars.Get(storageRESTVolume) - bio := bufio.NewScanner(r.Body) - var filePaths []string - for bio.Scan() { - filePaths = append(filePaths, bio.Text()) - } - - if err := bio.Err(); err != nil { + totalVersions, err := strconv.Atoi(vars.Get(storageRESTTotalVersions)) + if err != nil { s.writeErrorResponse(w, err) return } - dErrsResp := &DeleteFileBulkErrsResp{Errs: make([]error, len(filePaths))} + versions := make([]FileInfo, totalVersions) + decoder := gob.NewDecoder(r.Body) + for i := 0; i < totalVersions; i++ { + if err := decoder.Decode(&versions[i]); err != nil { + s.writeErrorResponse(w, err) + return + } + } + + dErrsResp := &DeleteVersionsErrsResp{Errs: make([]error, totalVersions)} w.Header().Set(xhttp.ContentType, "text/event-stream") encoder := gob.NewEncoder(w) done := keepHTTPResponseAlive(w) - errs, err := s.storage.DeleteFileBulk(volume, filePaths) + errs := s.storage.DeleteVersions(volume, versions) done(nil) - - for idx := range filePaths { - if err != nil { - dErrsResp.Errs[idx] = StorageErr(err.Error()) - } else { - if errs[idx] != nil { - dErrsResp.Errs[idx] = StorageErr(errs[idx].Error()) - } + for idx := range versions { + if errs[idx] != nil { + dErrsResp.Errs[idx] = StorageErr(errs[idx].Error()) } } - encoder.Encode(dErrsResp) w.(http.Flusher).Flush() } -// DeletePrefixesErrsResp - collection of delete errors -// for bulk prefixes deletes -type DeletePrefixesErrsResp struct { - Errs []error -} - -// DeletePrefixesHandler - delete a set of a prefixes. -func (s *storageRESTServer) DeletePrefixesHandler(w http.ResponseWriter, r *http.Request) { +// RenameDataHandler - renames a meta object and data dir to destination. +func (s *storageRESTServer) RenameDataHandler(w http.ResponseWriter, r *http.Request) { if !s.IsValid(w, r) { return } - vars := r.URL.Query() - volume := vars.Get(storageRESTVolume) - - bio := bufio.NewScanner(r.Body) - var prefixes []string - for bio.Scan() { - prefixes = append(prefixes, bio.Text()) - } - - if err := bio.Err(); err != nil { + vars := mux.Vars(r) + srcVolume := vars[storageRESTSrcVolume] + srcFilePath := vars[storageRESTSrcPath] + dataDir := vars[storageRESTDataDir] + dstVolume := vars[storageRESTDstVolume] + dstFilePath := vars[storageRESTDstPath] + err := s.storage.RenameData(srcVolume, srcFilePath, dataDir, dstVolume, dstFilePath) + if err != nil { s.writeErrorResponse(w, err) - return } - - dErrsResp := &DeletePrefixesErrsResp{Errs: make([]error, len(prefixes))} - - w.Header().Set(xhttp.ContentType, "text/event-stream") - encoder := gob.NewEncoder(w) - done := keepHTTPResponseAlive(w) - errs, err := s.storage.DeletePrefixes(volume, prefixes) - done(nil) - for idx := range prefixes { - if err != nil { - dErrsResp.Errs[idx] = StorageErr(err.Error()) - } else { - if errs[idx] != nil { - dErrsResp.Errs[idx] = StorageErr(errs[idx].Error()) - } - } - } - encoder.Encode(dErrsResp) - w.(http.Flusher).Flush() } // RenameFileHandler - rename a file. @@ -701,42 +764,31 @@ type VerifyFileResp struct { Err error } -// VerifyFile - Verify the file for bitrot errors. -func (s *storageRESTServer) VerifyFile(w http.ResponseWriter, r *http.Request) { +// VerifyFileHandler - Verify all part of file for bitrot errors. +func (s *storageRESTServer) VerifyFileHandler(w http.ResponseWriter, r *http.Request) { if !s.IsValid(w, r) { return } vars := mux.Vars(r) volume := vars[storageRESTVolume] filePath := vars[storageRESTFilePath] - size, err := strconv.ParseInt(vars[storageRESTLength], 10, 0) - if err != nil { - s.writeErrorResponse(w, err) - return - } - shardSize, err := strconv.Atoi(vars[storageRESTShardSize]) - if err != nil { - s.writeErrorResponse(w, err) - return - } - hashStr := vars[storageRESTBitrotHash] - var hash []byte - if hashStr != "" { - hash, err = hex.DecodeString(hashStr) - if err != nil { - s.writeErrorResponse(w, err) - return - } - } - algoStr := vars[storageRESTBitrotAlgo] - if algoStr == "" { + + if r.ContentLength < 0 { s.writeErrorResponse(w, errInvalidArgument) return } + + var fi FileInfo + err := gob.NewDecoder(r.Body).Decode(&fi) + if err != nil { + s.writeErrorResponse(w, err) + return + } + w.Header().Set(xhttp.ContentType, "text/event-stream") encoder := gob.NewEncoder(w) done := keepHTTPResponseAlive(w) - err = s.storage.VerifyFile(volume, filePath, size, BitrotAlgorithmFromString(algoStr), hash, int64(shardSize)) + err = s.storage.VerifyFile(volume, filePath, fi) done(nil) vresp := &VerifyFileResp{} if err != nil { @@ -753,7 +805,7 @@ func registerStorageRESTHandlers(router *mux.Router, endpointZones EndpointZones if !endpoint.IsLocal { continue } - storage, err := newPosix(endpoint.Path, endpoint.Host) + storage, err := newXLStorage(endpoint.Path, endpoint.Host) if err != nil { if err == errMinDiskSize { logger.Fatal(config.ErrUnableToWriteInBackend(err).Hint(err.Error()), "Unable to initialize backend") @@ -768,10 +820,8 @@ func registerStorageRESTHandlers(router *mux.Router, endpointZones EndpointZones } else { username = "" } - hint := fmt.Sprintf("Run the following command to add the convenient permissions: `sudo chown %s %s && sudo chmod u+rxw %s`", - username, endpoint.Path, endpoint.Path) - logger.Fatal(config.ErrUnableToWriteInBackend(err).Hint(hint), - "Unable to initialize posix backend") + hint := fmt.Sprintf("Run the following command to add the convenient permissions: `sudo chown %s %s && sudo chmod u+rxw %s`", username, endpoint.Path, endpoint.Path) + logger.Fatal(config.ErrUnableToWriteInBackend(err).Hint(hint), "Unable to initialize posix backend") } server := &storageRESTServer{storage: storage} @@ -790,11 +840,23 @@ func registerStorageRESTHandlers(router *mux.Router, endpointZones EndpointZones Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodWriteAll).HandlerFunc(httpTraceHdrs(server.WriteAllHandler)). Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodWriteMetadata).HandlerFunc(httpTraceHdrs(server.WriteMetadataHandler)). + Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDeleteVersion).HandlerFunc(httpTraceHdrs(server.DeleteVersionHandler)). + Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTVersionID, storageRESTDeleteMarker)...) + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadVersion).HandlerFunc(httpTraceHdrs(server.ReadVersionHandler)). + Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTVersionID)...) + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodRenameData).HandlerFunc(httpTraceHdrs(server.RenameDataHandler)). + Queries(restQueries(storageRESTSrcVolume, storageRESTSrcPath, storageRESTDataDir, + storageRESTDstVolume, storageRESTDstPath)...) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCreateFile).HandlerFunc(httpTraceHdrs(server.CreateFileHandler)). Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTLength)...) - subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodStatFile).HandlerFunc(httpTraceHdrs(server.StatFileHandler)). + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCheckFile).HandlerFunc(httpTraceHdrs(server.CheckFileHandler)). Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCheckParts).HandlerFunc(httpTraceHdrs(server.CheckPartsHandler)). + Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadAll).HandlerFunc(httpTraceHdrs(server.ReadAllHandler)). Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadFile).HandlerFunc(httpTraceHdrs(server.ReadFileHandler)). @@ -802,22 +864,23 @@ func registerStorageRESTHandlers(router *mux.Router, endpointZones EndpointZones subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadFileStream).HandlerFunc(httpTraceHdrs(server.ReadFileStreamHandler)). Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTOffset, storageRESTLength)...) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodListDir).HandlerFunc(httpTraceHdrs(server.ListDirHandler)). - Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTCount, storageRESTLeafFile)...) + Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTCount)...) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodWalk).HandlerFunc(httpTraceHdrs(server.WalkHandler)). - Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTMarkerPath, storageRESTRecursive, storageRESTLeafFile)...) + Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTMarkerPath, storageRESTRecursive)...) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodWalkSplunk).HandlerFunc(httpTraceHdrs(server.WalkSplunkHandler)). Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTMarkerPath)...) - subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDeletePrefixes).HandlerFunc(httpTraceHdrs(server.DeletePrefixesHandler)). - Queries(restQueries(storageRESTVolume)...) + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodWalkVersions).HandlerFunc(httpTraceHdrs(server.WalkVersionsHandler)). + Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTMarkerPath)...) + + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDeleteVersions).HandlerFunc(httpTraceHdrs(server.DeleteVersionsHandler)). + Queries(restQueries(storageRESTVolume, storageRESTTotalVersions)...) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDeleteFile).HandlerFunc(httpTraceHdrs(server.DeleteFileHandler)). Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) - subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDeleteFileBulk).HandlerFunc(httpTraceHdrs(server.DeleteFileBulkHandler)). - Queries(restQueries(storageRESTVolume)...) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodRenameFile).HandlerFunc(httpTraceHdrs(server.RenameFileHandler)). Queries(restQueries(storageRESTSrcVolume, storageRESTSrcPath, storageRESTDstVolume, storageRESTDstPath)...) - subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodVerifyFile).HandlerFunc(httpTraceHdrs(server.VerifyFile)). - Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTBitrotAlgo, storageRESTBitrotHash, storageRESTLength, storageRESTShardSize)...) + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodVerifyFile).HandlerFunc(httpTraceHdrs(server.VerifyFileHandler)). + Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) } } } diff --git a/cmd/storage-rest_test.go b/cmd/storage-rest_test.go index 525e50a5c..6c5b9d7cc 100644 --- a/cmd/storage-rest_test.go +++ b/cmd/storage-rest_test.go @@ -191,7 +191,7 @@ func testStorageAPIDeleteVol(t *testing.T, storage StorageAPI) { } } -func testStorageAPIStatFile(t *testing.T, storage StorageAPI) { +func testStorageAPICheckFile(t *testing.T, storage StorageAPI) { tmpGlobalServerConfig := globalServerConfig defer func() { globalServerConfig = tmpGlobalServerConfig @@ -202,7 +202,7 @@ func testStorageAPIStatFile(t *testing.T, storage StorageAPI) { if err != nil { t.Fatalf("unexpected error %v", err) } - err = storage.AppendFile("foo", "myobject", []byte("foo")) + err = storage.AppendFile("foo", pathJoin("myobject", xlStorageFormatFile), []byte("foo")) if err != nil { t.Fatalf("unexpected error %v", err) } @@ -218,18 +218,12 @@ func testStorageAPIStatFile(t *testing.T, storage StorageAPI) { } for i, testCase := range testCases { - result, err := storage.StatFile(testCase.volumeName, testCase.objectName) + err := storage.CheckFile(testCase.volumeName, testCase.objectName) expectErr := (err != nil) if expectErr != testCase.expectErr { t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) } - - if !testCase.expectErr { - if result.Name != testCase.objectName { - t.Fatalf("case %v: result: expected: %+v, got: %+v", i+1, testCase.objectName, result.Name) - } - } } } @@ -261,7 +255,7 @@ func testStorageAPIListDir(t *testing.T, storage StorageAPI) { } for i, testCase := range testCases { - result, err := storage.ListDir(testCase.volumeName, testCase.prefix, -1, "") + result, err := storage.ListDir(testCase.volumeName, testCase.prefix, -1) expectErr := (err != nil) if expectErr != testCase.expectErr { @@ -586,7 +580,7 @@ func TestStorageRESTClientDeleteVol(t *testing.T) { testStorageAPIDeleteVol(t, restClient) } -func TestStorageRESTClientStatFile(t *testing.T) { +func TestStorageRESTClientCheckFile(t *testing.T) { httpServer, restClient, prevGlobalServerConfig, endpointPath := newStorageRESTHTTPServerClient(t) defer httpServer.Close() defer func() { @@ -594,7 +588,7 @@ func TestStorageRESTClientStatFile(t *testing.T) { }() defer os.RemoveAll(endpointPath) - testStorageAPIStatFile(t, restClient) + testStorageAPICheckFile(t, restClient) } func TestStorageRESTClientListDir(t *testing.T) { diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index 55213ab2e..2da3b7f92 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -79,7 +79,7 @@ func init() { } // Set as non-distributed. - globalIsDistXL = false + globalIsDistErasure = false // Disable printing console messages during tests. color.Output = ioutil.Discard @@ -93,6 +93,8 @@ func init() { logger.Disable = true initHelp() + + resetTestGlobals() // Uncomment the following line to see trace logs during unit tests. // logger.AddTarget(console.New()) } @@ -173,11 +175,11 @@ func prepareFS() (ObjectLayer, string, error) { return obj, fsDirs[0], nil } -func prepareXLSets32(ctx context.Context) (ObjectLayer, []string, error) { - return prepareXL(ctx, 32) +func prepareErasureSets32(ctx context.Context) (ObjectLayer, []string, error) { + return prepareErasure(ctx, 32) } -func prepareXL(ctx context.Context, nDisks int) (ObjectLayer, []string, error) { +func prepareErasure(ctx context.Context, nDisks int) (ObjectLayer, []string, error) { fsDirs, err := getRandomDisks(nDisks) if err != nil { return nil, nil, err @@ -190,8 +192,8 @@ func prepareXL(ctx context.Context, nDisks int) (ObjectLayer, []string, error) { return obj, fsDirs, nil } -func prepareXL16(ctx context.Context) (ObjectLayer, []string, error) { - return prepareXL(ctx, 16) +func prepareErasure16(ctx context.Context) (ObjectLayer, []string, error) { + return prepareErasure(ctx, 16) } // Initialize FS objects. @@ -205,9 +207,10 @@ func initFSObjects(disk string, t *testing.T) (obj ObjectLayer) { return obj } -// TestErrHandler - Golang Testing.T and Testing.B, and gocheck.C satisfy this interface. +// TestErrHandler - Go testing.T satisfy this interface. // This makes it easy to run the TestServer from any of the tests. -// Using this interface, functionalities to be used in tests can be made generalized, and can be integrated in benchmarks/unit tests/go check suite tests. +// Using this interface, functionalities to be used in tests can be +// made generalized, and can be integrated in benchmarks/unit tests/go check suite tests. type TestErrHandler interface { Log(args ...interface{}) Logf(format string, args ...interface{}) @@ -222,11 +225,11 @@ const ( // FSTestStr is the string which is used as notation for Single node ObjectLayer in the unit tests. FSTestStr string = "FS" - // XLTestStr is the string which is used as notation for XL ObjectLayer in the unit tests. - XLTestStr string = "XL" + // ErasureTestStr is the string which is used as notation for Erasure ObjectLayer in the unit tests. + ErasureTestStr string = "Erasure" - // XLSetsTestStr is the string which is used as notation for XL sets object layer in the unit tests. - XLSetsTestStr string = "XLSet" + // ErasureSetsTestStr is the string which is used as notation for Erasure sets object layer in the unit tests. + ErasureSetsTestStr string = "ErasureSet" ) const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" @@ -272,7 +275,7 @@ func isSameType(obj1, obj2 interface{}) bool { // TestServer encapsulates an instantiation of a MinIO instance with a temporary backend. // Example usage: -// s := StartTestServer(t,"XL") +// s := StartTestServer(t,"Erasure") // defer s.Stop() type TestServer struct { Root string @@ -284,14 +287,14 @@ type TestServer struct { cancel context.CancelFunc } -// UnstartedTestServer - Configures a temp FS/XL backend, +// UnstartedTestServer - Configures a temp FS/Erasure backend, // initializes the endpoints and configures the test server. // The server should be started using the Start() method. func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer { ctx, cancel := context.WithCancel(context.Background()) // create an instance of TestServer. testServer := TestServer{cancel: cancel} - // return FS/XL object layer and temp backend. + // return FS/Erasure object layer and temp backend. objLayer, disks, err := prepareTestBackend(ctx, instanceType) if err != nil { t.Fatal(err) @@ -396,8 +399,8 @@ func resetGlobalEndpoints() { globalEndpoints = EndpointZones{} } -func resetGlobalIsXL() { - globalIsXL = false +func resetGlobalIsErasure() { + globalIsErasure = false } // reset global heal state @@ -445,8 +448,8 @@ func resetTestGlobals() { resetGlobalConfig() // Reset global endpoints. resetGlobalEndpoints() - // Reset global isXL flag. - resetGlobalIsXL() + // Reset global isErasure flag. + resetGlobalIsErasure() // Reset global heal state resetGlobalHealState() // Reset globalIAMSys to `nil` @@ -1549,7 +1552,7 @@ func newTestObjectLayer(ctx context.Context, endpointZones EndpointZones) (newOb return NewFSObjectLayer(endpointZones[0].Endpoints[0].Path) } - z, err := newXLZones(ctx, endpointZones) + z, err := newErasureZones(ctx, endpointZones) if err != nil { return nil, err } @@ -1570,7 +1573,7 @@ func initObjectLayer(ctx context.Context, endpointZones EndpointZones) (ObjectLa var formattedDisks []StorageAPI // Should use the object layer tests for validating cache. - if z, ok := objLayer.(*xlZones); ok { + if z, ok := objLayer.(*erasureZones); ok { formattedDisks = z.zones[0].GetDisks(0)() } @@ -1608,12 +1611,12 @@ func initAPIHandlerTest(obj ObjectLayer, endpoints []string) (string, http.Handl bucketName := getRandomBucketName() // Create bucket. - err := obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err := obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { // failed to create newbucket, return err. return "", nil, err } - // Register the API end points with XL object layer. + // Register the API end points with Erasure object layer. // Registering only the GetObject handler. apiRouter := initTestAPIEndPoints(obj, endpoints) f := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -1624,16 +1627,16 @@ func initAPIHandlerTest(obj ObjectLayer, endpoints []string) (string, http.Handl } // prepare test backend. -// create FS/XL/XLSet backend. +// create FS/Erasure/ErasureSet backend. // return object layer, backend disks. func prepareTestBackend(ctx context.Context, instanceType string) (ObjectLayer, []string, error) { switch instanceType { - // Total number of disks for XL sets backend is set to 32. - case XLSetsTestStr: - return prepareXLSets32(ctx) - // Total number of disks for XL backend is set to 16. - case XLTestStr: - return prepareXL16(ctx) + // Total number of disks for Erasure sets backend is set to 32. + case ErasureSetsTestStr: + return prepareErasureSets32(ctx) + // Total number of disks for Erasure backend is set to 16. + case ErasureTestStr: + return prepareErasure16(ctx) default: // return FS backend by default. obj, disk, err := prepareFS() @@ -1801,7 +1804,7 @@ func ExecObjectLayerAPINilTest(t TestErrHandler, bucketName, objectName, instanc } // ExecObjectLayerAPITest - executes object layer API tests. -// Creates single node and XL ObjectLayer instance, registers the specified API end points and runs test for both the layers. +// Creates single node and Erasure ObjectLayer instance, registers the specified API end points and runs test for both the layers. func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints []string) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1831,19 +1834,20 @@ func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints [ // Executing the object layer tests for single node setup. objAPITest(objLayer, FSTestStr, bucketFS, fsAPIRouter, credentials, t) - objLayer, xlDisks, err := prepareXL16(ctx) + objLayer, erasureDisks, err := prepareErasure16(ctx) if err != nil { - t.Fatalf("Initialization of object layer failed for XL setup: %s", err) + t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err) } - bucketXL, xlAPIRouter, err := initAPIHandlerTest(objLayer, endpoints) + bucketErasure, erAPIRouter, err := initAPIHandlerTest(objLayer, endpoints) if err != nil { t.Fatalf("Initialzation of API handler tests failed: %s", err) } - // Executing the object layer tests for XL. - objAPITest(objLayer, XLTestStr, bucketXL, xlAPIRouter, credentials, t) + // Executing the object layer tests for Erasure. + objAPITest(objLayer, ErasureTestStr, bucketErasure, erAPIRouter, credentials, t) + // clean up the temporary test backend. - removeRoots(append(xlDisks, fsDir)) + removeRoots(append(erasureDisks, fsDir)) } // function to be passed to ExecObjectLayerAPITest, for executing object layr API handler tests. @@ -1860,7 +1864,7 @@ type objTestTypeWithDirs func(obj ObjectLayer, instanceType string, dirs []strin type objTestDiskNotFoundType func(obj ObjectLayer, instanceType string, dirs []string, t *testing.T) // ExecObjectLayerTest - executes object layer tests. -// Creates single node and XL ObjectLayer instance and runs test for both the layers. +// Creates single node and Erasure ObjectLayer instance and runs test for both the layers. func ExecObjectLayerTest(t TestErrHandler, objTest objTestType) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1885,27 +1889,27 @@ func ExecObjectLayerTest(t TestErrHandler, objTest objTestType) { newAllSubsystems() - objLayer, fsDirs, err := prepareXLSets32(ctx) + objLayer, fsDirs, err := prepareErasureSets32(ctx) if err != nil { - t.Fatalf("Initialization of object layer failed for XL setup: %s", err) + t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err) } initAllSubsystems(ctx, objLayer) defer removeRoots(append(fsDirs, fsDir)) - // Executing the object layer tests for XL. - objTest(objLayer, XLTestStr, t) + // Executing the object layer tests for Erasure. + objTest(objLayer, ErasureTestStr, t) } // ExecObjectLayerTestWithDirs - executes object layer tests. -// Creates single node and XL ObjectLayer instance and runs test for both the layers. +// Creates single node and Erasure ObjectLayer instance and runs test for both the layers. func ExecObjectLayerTestWithDirs(t TestErrHandler, objTest objTestTypeWithDirs) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - objLayer, fsDirs, err := prepareXL16(ctx) + objLayer, fsDirs, err := prepareErasure16(ctx) if err != nil { - t.Fatalf("Initialization of object layer failed for XL setup: %s", err) + t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err) } // initialize the server and obtain the credentials and root. @@ -1914,28 +1918,28 @@ func ExecObjectLayerTestWithDirs(t TestErrHandler, objTest objTestTypeWithDirs) t.Fatal("Unexpected error", err) } - // Executing the object layer tests for XL. - objTest(objLayer, XLTestStr, fsDirs, t) + // Executing the object layer tests for Erasure. + objTest(objLayer, ErasureTestStr, fsDirs, t) defer removeRoots(fsDirs) } // ExecObjectLayerDiskAlteredTest - executes object layer tests while altering -// disks in between tests. Creates XL ObjectLayer instance and runs test for XL layer. +// disks in between tests. Creates Erasure ObjectLayer instance and runs test for Erasure layer. func ExecObjectLayerDiskAlteredTest(t *testing.T, objTest objTestDiskNotFoundType) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - objLayer, fsDirs, err := prepareXL16(ctx) + objLayer, fsDirs, err := prepareErasure16(ctx) if err != nil { - t.Fatalf("Initialization of object layer failed for XL setup: %s", err) + t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err) } if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil { t.Fatal("Failed to create config directory", err) } - // Executing the object layer tests for XL. - objTest(objLayer, XLTestStr, fsDirs, t) + // Executing the object layer tests for Erasure. + objTest(objLayer, ErasureTestStr, fsDirs, t) defer removeRoots(fsDirs) } @@ -1943,7 +1947,7 @@ func ExecObjectLayerDiskAlteredTest(t *testing.T, objTest objTestDiskNotFoundTyp type objTestStaleFilesType func(obj ObjectLayer, instanceType string, dirs []string, t *testing.T) // ExecObjectLayerStaleFilesTest - executes object layer tests those leaves stale -// files/directories under .minio/tmp. Creates XL ObjectLayer instance and runs test for XL layer. +// files/directories under .minio/tmp. Creates Erasure ObjectLayer instance and runs test for Erasure layer. func ExecObjectLayerStaleFilesTest(t *testing.T, objTest objTestStaleFilesType) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1951,18 +1955,18 @@ func ExecObjectLayerStaleFilesTest(t *testing.T, objTest objTestStaleFilesType) nDisks := 16 erasureDisks, err := getRandomDisks(nDisks) if err != nil { - t.Fatalf("Initialization of disks for XL setup: %s", err) + t.Fatalf("Initialization of disks for Erasure setup: %s", err) } objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(erasureDisks...)) if err != nil { - t.Fatalf("Initialization of object layer failed for XL setup: %s", err) + t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err) } if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil { t.Fatal("Failed to create config directory", err) } - // Executing the object layer tests for XL. - objTest(objLayer, XLTestStr, erasureDisks, t) + // Executing the object layer tests for Erasure. + objTest(objLayer, ErasureTestStr, erasureDisks, t) defer removeRoots(erasureDisks) } @@ -2088,7 +2092,7 @@ func registerAPIFunctions(muxRouter *mux.Router, objLayer ObjectLayer, apiFuncti registerBucketLevelFunc(bucketRouter, api, apiFunctions...) } -// Takes in XL object layer, and the list of API end points to be tested/required, registers the API end points and returns the HTTP handler. +// Takes in Erasure object layer, and the list of API end points to be tested/required, registers the API end points and returns the HTTP handler. // Need isolated registration of API end points while writing unit tests for end points. // All the API end points are registered only for the default case. func initTestAPIEndPoints(objLayer ObjectLayer, apiFunctions []string) http.Handler { diff --git a/cmd/tree-walk_test.go b/cmd/tree-walk_test.go index 0ea5e02d1..3dd5be28f 100644 --- a/cmd/tree-walk_test.go +++ b/cmd/tree-walk_test.go @@ -27,24 +27,6 @@ import ( "time" ) -// Returns function "listDir" of the type listDirFunc. -// disks - used for doing disk.ListDir() -func listDirFactory(ctx context.Context, disk StorageAPI) ListDirFunc { - // Returns sorted merged entries from all the disks. - listDir := func(volume, dirPath, dirEntry string) (bool, []string) { - entries, err := disk.ListDir(volume, dirPath, -1, xlMetaJSONFile) - if err != nil { - return false, nil - } - if len(entries) == 0 { - return true, nil - } - sort.Strings(entries) - return false, filterMatchingPrefix(entries, dirEntry) - } - return listDir -} - // Fixed volume name that could be used across tests const volume = "testvolume" @@ -101,6 +83,22 @@ func createNamespace(disk StorageAPI, volume string, files []string) error { return err } +// Returns function "listDir" of the type listDirFunc. +// disks - used for doing disk.ListDir() +func listDirFactory(ctx context.Context, disk StorageAPI) ListDirFunc { + return func(volume, dirPath, dirEntry string) (emptyDir bool, entries []string) { + entries, err := disk.ListDir(volume, dirPath, -1) + if err != nil { + return false, nil + } + if len(entries) == 0 { + return true, nil + } + sort.Strings(entries) + return false, filterMatchingPrefix(entries, dirEntry) + } +} + // Test if tree walker returns entries matching prefix alone are received // when a non empty prefix is supplied. func testTreeWalkPrefix(t *testing.T, listDir ListDirFunc) { @@ -237,66 +235,6 @@ func TestTreeWalkTimeout(t *testing.T) { } } -// Test ListDir - listDir is expected to only list one disk. -func TestListDir(t *testing.T) { - file1 := "file1" - file2 := "file2" - // Create two backend directories fsDir1 and fsDir2. - fsDir1, err := ioutil.TempDir(globalTestTmpDir, "minio-") - if err != nil { - t.Errorf("Unable to create tmp directory: %s", err) - } - fsDir2, err := ioutil.TempDir(globalTestTmpDir, "minio-") - if err != nil { - t.Errorf("Unable to create tmp directory: %s", err) - } - - // Create two StorageAPIs disk1 and disk2. - endpoints := mustGetNewEndpoints(fsDir1) - disk1, err := newStorageAPI(endpoints[0]) - if err != nil { - t.Errorf("Unable to create StorageAPI: %s", err) - } - - endpoints = mustGetNewEndpoints(fsDir2) - disk2, err := newStorageAPI(endpoints[0]) - if err != nil { - t.Errorf("Unable to create StorageAPI: %s", err) - } - - // create listDir function. - listDir1 := listDirFactory(context.Background(), disk1) - listDir2 := listDirFactory(context.Background(), disk2) - - // Create file1 in fsDir1 and file2 in fsDir2. - disks := []StorageAPI{disk1, disk2} - for i, disk := range disks { - err = createNamespace(disk, volume, []string{fmt.Sprintf("file%d", i+1)}) - if err != nil { - t.Fatal(err) - } - } - - // Should list "file1" from fsDir1. - _, entries := listDir1(volume, "", "") - if len(entries) != 1 { - t.Fatal("Expected the number of entries to be 1") - } - - if entries[0] != file1 { - t.Fatal("Expected the entry to be file1") - } - - _, entries = listDir2(volume, "", "") - if len(entries) != 1 { - t.Fatal("Expected the number of entries to be 1") - } - - if entries[0] != file2 { - t.Fatal("Expected the entry to be file2") - } -} - // TestRecursiveWalk - tests if treeWalk returns entries correctly with and // without recursively traversing prefixes. func TestRecursiveTreeWalk(t *testing.T) { diff --git a/cmd/update_test.go b/cmd/update_test.go index 5a4b73afd..d1bd75e2d 100644 --- a/cmd/update_test.go +++ b/cmd/update_test.go @@ -136,14 +136,14 @@ func TestUserAgent(t *testing.T) { { envName: "MESOS_CONTAINER_NAME", envValue: "mesos-11111", - mode: globalMinioModeXL, - expectedStr: fmt.Sprintf("MinIO (%s; %s; %s; %s; source) MinIO/DEVELOPMENT.GOGET MinIO/DEVELOPMENT.GOGET MinIO/DEVELOPMENT.GOGET MinIO/universe-%s", runtime.GOOS, runtime.GOARCH, globalMinioModeXL, "dcos", "mesos-1111"), + mode: globalMinioModeErasure, + expectedStr: fmt.Sprintf("MinIO (%s; %s; %s; %s; source) MinIO/DEVELOPMENT.GOGET MinIO/DEVELOPMENT.GOGET MinIO/DEVELOPMENT.GOGET MinIO/universe-%s", runtime.GOOS, runtime.GOARCH, globalMinioModeErasure, "dcos", "mesos-1111"), }, { envName: "KUBERNETES_SERVICE_HOST", envValue: "10.11.148.5", - mode: globalMinioModeXL, - expectedStr: fmt.Sprintf("MinIO (%s; %s; %s; %s; source) MinIO/DEVELOPMENT.GOGET MinIO/DEVELOPMENT.GOGET MinIO/DEVELOPMENT.GOGET", runtime.GOOS, runtime.GOARCH, globalMinioModeXL, "kubernetes"), + mode: globalMinioModeErasure, + expectedStr: fmt.Sprintf("MinIO (%s; %s; %s; %s; source) MinIO/DEVELOPMENT.GOGET MinIO/DEVELOPMENT.GOGET MinIO/DEVELOPMENT.GOGET", runtime.GOOS, runtime.GOARCH, globalMinioModeErasure, "kubernetes"), }, } diff --git a/cmd/utils.go b/cmd/utils.go index 1b28ba915..0a32e4b88 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -614,10 +614,10 @@ func lcp(l []string) string { // Returns the mode in which MinIO is running func getMinioMode() string { mode := globalMinioModeFS - if globalIsDistXL { - mode = globalMinioModeDistXL - } else if globalIsXL { - mode = globalMinioModeXL + if globalIsDistErasure { + mode = globalMinioModeDistErasure + } else if globalIsErasure { + mode = globalMinioModeErasure } else if globalIsGateway { mode = globalMinioModeGatewayPrefix + globalGatewayName } diff --git a/cmd/utils_test.go b/cmd/utils_test.go index 39ed623d7..087f9842c 100644 --- a/cmd/utils_test.go +++ b/cmd/utils_test.go @@ -366,7 +366,7 @@ func TestJSONSave(t *testing.T) { t.Fatal(err) } if fi1.Size() != fi2.Size() { - t.Fatal("Size should not differ after jsonSave()", fi1.Size(), fi2.Size(), f.Name()) + t.Fatal("Size should not differs after jsonSave()", fi1.Size(), fi2.Size(), f.Name()) } } @@ -473,14 +473,14 @@ func TestGetMinioMode(t *testing.T) { t.Fatalf("Expected %s got %s", expected, mode) } } - globalIsDistXL = true - testMinioMode(globalMinioModeDistXL) + globalIsDistErasure = true + testMinioMode(globalMinioModeDistErasure) - globalIsDistXL = false - globalIsXL = true - testMinioMode(globalMinioModeXL) + globalIsDistErasure = false + globalIsErasure = true + testMinioMode(globalMinioModeErasure) - globalIsDistXL, globalIsXL = false, false + globalIsDistErasure, globalIsErasure = false, false testMinioMode(globalMinioModeFS) globalIsGateway, globalGatewayName = true, "azure" diff --git a/cmd/web-handlers.go b/cmd/web-handlers.go index 8f6f0c968..95528a0fa 100644 --- a/cmd/web-handlers.go +++ b/cmd/web-handlers.go @@ -169,11 +169,16 @@ func (web *webAPIHandlers) MakeBucket(r *http.Request, args *MakeBucketArgs, rep return toJSONError(ctx, errInvalidBucketName) } + opts := BucketOptions{ + Location: globalServerRegion, + LockEnabled: false, + } + if globalDNSConfig != nil { if _, err := globalDNSConfig.Get(args.BucketName); err != nil { if err == dns.ErrNoEntriesFound { // Proceed to creating a bucket. - if err = objectAPI.MakeBucketWithLocation(ctx, args.BucketName, globalServerRegion, false); err != nil { + if err = objectAPI.MakeBucketWithLocation(ctx, args.BucketName, opts); err != nil { return toJSONError(ctx, err) } if err = globalDNSConfig.Put(args.BucketName); err != nil { @@ -189,7 +194,7 @@ func (web *webAPIHandlers) MakeBucket(r *http.Request, args *MakeBucketArgs, rep return toJSONError(ctx, errBucketAlreadyExists) } - if err := objectAPI.MakeBucketWithLocation(ctx, args.BucketName, globalServerRegion, false); err != nil { + if err := objectAPI.MakeBucketWithLocation(ctx, args.BucketName, opts); err != nil { return toJSONError(ctx, err, args.BucketName) } @@ -259,16 +264,15 @@ func (web *webAPIHandlers) DeleteBucket(r *http.Request, args *RemoveBucketArgs, return toJSONError(ctx, err, args.BucketName) } + globalNotificationSys.DeleteBucketMetadata(ctx, args.BucketName) + if globalDNSConfig != nil { if err := globalDNSConfig.Delete(args.BucketName); err != nil { - // Deleting DNS entry failed, attempt to create the bucket again. - objectAPI.MakeBucketWithLocation(ctx, args.BucketName, "", false) + logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually using etcdctl", err)) return toJSONError(ctx, err) } } - globalNotificationSys.DeleteBucketMetadata(ctx, args.BucketName) - return nil } @@ -583,11 +587,6 @@ func (web *webAPIHandlers) RemoveObject(r *http.Request, args *RemoveObjectArgs, return toJSONError(ctx, errServerNotInitialized) } - getObjectInfo := objectAPI.GetObjectInfo - if web.CacheAPI() != nil { - getObjectInfo = web.CacheAPI().GetObjectInfo - } - deleteObjects := objectAPI.DeleteObjects if web.CacheAPI() != nil { deleteObjects = web.CacheAPI().DeleteObjects @@ -656,13 +655,14 @@ func (web *webAPIHandlers) RemoveObject(r *http.Request, args *RemoveObjectArgs, return nil } + versioned := globalBucketVersioningSys.Enabled(args.BucketName) + var err error next: for _, objectName := range args.Objects { // If not a directory, remove the object. if !HasSuffix(objectName, SlashSeparator) && objectName != "" { // Check permissions for non-anonymous user. - govBypassPerms := false if authErr != errNoAuthToken { if !globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, @@ -675,17 +675,6 @@ next: }) { return toJSONError(ctx, errAccessDenied) } - if globalIAMSys.IsAllowed(iampolicy.Args{ - AccountName: claims.AccessKey, - Action: iampolicy.BypassGovernanceRetentionAction, - BucketName: args.BucketName, - ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), - IsOwner: owner, - ObjectName: objectName, - Claims: claims.Map(), - }) { - govBypassPerms = true - } } if authErr == errNoAuthToken { @@ -699,32 +688,10 @@ next: }) { return toJSONError(ctx, errAccessDenied) } - - // Check if object is allowed to be deleted anonymously - if globalPolicySys.IsAllowed(policy.Args{ - Action: policy.BypassGovernanceRetentionAction, - BucketName: args.BucketName, - ConditionValues: getConditionValues(r, "", "", nil), - IsOwner: false, - ObjectName: objectName, - }) { - govBypassPerms = true - } } - apiErr := enforceRetentionBypassForDeleteWeb(ctx, r, args.BucketName, objectName, getObjectInfo, govBypassPerms) - if apiErr == ErrObjectLocked { - return toJSONError(ctx, errLockedObject) - } - if apiErr != ErrNone && apiErr != ErrNoSuchKey { - return toJSONError(ctx, errAccessDenied) - } - if apiErr == ErrNone { - if err = deleteObject(ctx, objectAPI, web.CacheAPI(), args.BucketName, objectName, r); err != nil { - break next - } - } - continue + _, err = deleteObject(ctx, objectAPI, web.CacheAPI(), args.BucketName, objectName, r, ObjectOptions{}) + logger.LogIf(ctx, err) } if authErr == errNoAuthToken { @@ -761,13 +728,16 @@ next: } for { - var objects []string + var objects []ObjectToDelete for obj := range objInfoCh { if len(objects) == maxDeleteList { // Reached maximum delete requests, attempt a delete for now. break } - objects = append(objects, obj.Name) + objects = append(objects, ObjectToDelete{ + ObjectName: obj.Name, + VersionID: obj.VersionID, + }) } // Nothing to do. @@ -776,10 +746,12 @@ next: } // Deletes a list of objects. - _, err = deleteObjects(ctx, args.BucketName, objects) - if err != nil { - logger.LogIf(ctx, err) - break next + _, errs := deleteObjects(ctx, args.BucketName, objects, ObjectOptions{Versioned: versioned}) + for _, err := range errs { + if err != nil { + logger.LogIf(ctx, err) + break next + } } } } @@ -1084,6 +1056,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) { // get gateway encryption options var opts ObjectOptions opts, err = putOpts(ctx, r, bucket, object, metadata) + if err != nil { writeErrorResponseHeadersOnly(w, toAPIError(ctx, err)) return diff --git a/cmd/web-handlers_test.go b/cmd/web-handlers_test.go index 676410195..e1e10e250 100644 --- a/cmd/web-handlers_test.go +++ b/cmd/web-handlers_test.go @@ -136,7 +136,7 @@ func TestWebHandlerLogin(t *testing.T) { // testLoginWebHandler - Test login web handler func testLoginWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -176,7 +176,7 @@ func TestWebHandlerStorageInfo(t *testing.T) { // testStorageInfoWebHandler - Test StorageInfo web handler func testStorageInfoWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { // get random bucket name. - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -209,7 +209,7 @@ func TestWebHandlerServerInfo(t *testing.T) { // testServerInfoWebHandler - Test ServerInfo web handler func testServerInfoWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -251,7 +251,7 @@ func TestWebHandlerMakeBucket(t *testing.T) { // testMakeBucketWebHandler - Test MakeBucket web handler func testMakeBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -315,7 +315,7 @@ func testDeleteBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrH bucketName := getRandomBucketName() var opts ObjectOptions - err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { t.Fatalf("failed to create bucket: %s (%s)", err.Error(), instanceType) } @@ -381,7 +381,7 @@ func testDeleteBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrH // If we created the bucket with an object, now delete the object to cleanup. if test.initWithObject { - err = obj.DeleteObject(context.Background(), test.bucketName, "object") + _, err = obj.DeleteObject(context.Background(), test.bucketName, "object", ObjectOptions{}) if err != nil { t.Fatalf("could not delete object, %s", err.Error()) } @@ -393,7 +393,7 @@ func testDeleteBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrH continue } - err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { // failed to create new bucket, abort. t.Fatalf("failed to create new bucket (%s): %s", instanceType, err.Error()) @@ -408,7 +408,7 @@ func TestWebHandlerListBuckets(t *testing.T) { // testListBucketsHandler - Test ListBuckets web handler func testListBucketsWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -421,7 +421,7 @@ func testListBucketsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa bucketName := getRandomBucketName() // Create bucket. - err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { // failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err) @@ -456,7 +456,7 @@ func TestWebHandlerListObjects(t *testing.T) { // testListObjectsHandler - Test ListObjects web handler func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -472,7 +472,7 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa objectSize := 1 * humanize.KiByte // Create bucket. - err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { // failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err) @@ -537,7 +537,7 @@ func TestWebHandlerRemoveObject(t *testing.T) { // testRemoveObjectWebHandler - Test RemoveObjectObject web handler func testRemoveObjectWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -552,7 +552,7 @@ func testRemoveObjectWebHandler(obj ObjectLayer, instanceType string, t TestErrH objectSize := 1 * humanize.KiByte // Create bucket. - err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { // failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err) @@ -628,7 +628,7 @@ func TestWebHandlerGenerateAuth(t *testing.T) { // testGenerateAuthWebHandler - Test GenerateAuth web handler func testGenerateAuthWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -727,7 +727,7 @@ func TestWebHandlerUpload(t *testing.T) { // testUploadWebHandler - Test Upload web handler func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -766,7 +766,7 @@ func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler return rec.Code } // Create bucket. - err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { // failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err) @@ -809,7 +809,7 @@ func TestWebHandlerDownload(t *testing.T) { // testDownloadWebHandler - Test Download web handler func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -841,7 +841,7 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl } // Create bucket. - err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { // failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err) @@ -919,7 +919,7 @@ func testWebHandlerDownloadZip(obj ObjectLayer, instanceType string, t TestErrHa fileThree := "cccccccccccccc" // Create bucket. - err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) if err != nil { // failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err) @@ -990,7 +990,7 @@ func TestWebHandlerPresignedGetHandler(t *testing.T) { } func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrHandler) { - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) credentials := globalActiveCred @@ -1006,7 +1006,7 @@ func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrH objectSize := 1 * humanize.KiByte // Create bucket. - err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { // failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err) @@ -1039,7 +1039,7 @@ func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrH t.Fatalf("Failed, %v", err) } - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter = initTestAPIEndPoints(obj, []string{"GetObject"}) // Initialize a new api recorder. @@ -1062,7 +1062,7 @@ func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrH t.Fatal("Read data is not equal was what was expected") } - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter = initTestWebRPCEndPoint(obj) presignGetReq = PresignedGetArgs{ @@ -1093,15 +1093,15 @@ func TestWebCheckAuthorization(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - // Prepare XL backend - obj, fsDirs, err := prepareXL16(ctx) + // Prepare Erasure backend + obj, fsDirs, err := prepareErasure16(ctx) if err != nil { - t.Fatalf("Initialization of object layer failed for XL setup: %s", err) + t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err) } - // Executing the object layer tests for XL. + // Executing the object layer tests for Erasure. defer removeRoots(fsDirs) - // Register the API end points with XL/FS object layer. + // Register the API end points with Erasure/FS object layer. apiRouter := initTestWebRPCEndPoint(obj) // initialize the server and obtain the credentials and root. @@ -1185,12 +1185,12 @@ func TestWebObjectLayerFaultyDisks(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - // Prepare XL backend - obj, fsDirs, err := prepareXL16(ctx) + // Prepare Erasure backend + obj, fsDirs, err := prepareErasure16(ctx) if err != nil { - t.Fatalf("Initialization of object layer failed for XL setup: %s", err) + t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err) } - // Executing the object layer tests for XL. + // Executing the object layer tests for Erasure. defer removeRoots(fsDirs) // initialize the server and obtain the credentials and root. @@ -1201,23 +1201,23 @@ func TestWebObjectLayerFaultyDisks(t *testing.T) { } bucketName := "mybucket" - err = obj.MakeBucketWithLocation(context.Background(), bucketName, "", false) + err = obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) if err != nil { t.Fatal("Cannot make bucket:", err) } - // Set faulty disks to XL backend - z := obj.(*xlZones) + // Set faulty disks to Erasure backend + z := obj.(*erasureZones) xl := z.zones[0].sets[0] - xlDisks := xl.getDisks() - z.zones[0].xlDisksMu.Lock() + erasureDisks := xl.getDisks() + z.zones[0].erasureDisksMu.Lock() xl.getDisks = func() []StorageAPI { - for i, d := range xlDisks { - xlDisks[i] = newNaughtyDisk(d, nil, errFaultyDisk) + for i, d := range erasureDisks { + erasureDisks[i] = newNaughtyDisk(d, nil, errFaultyDisk) } - return xlDisks + return erasureDisks } - z.zones[0].xlDisksMu.Unlock() + z.zones[0].erasureDisksMu.Unlock() // Initialize web rpc endpoint. apiRouter := initTestWebRPCEndPoint(obj) diff --git a/cmd/xl-sets_test.go b/cmd/xl-sets_test.go deleted file mode 100644 index 91d2f9522..000000000 --- a/cmd/xl-sets_test.go +++ /dev/null @@ -1,153 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "context" - "os" - "path/filepath" - "testing" -) - -// TestCrcHashMod - test crc hash. -func TestCrcHashMod(t *testing.T) { - testCases := []struct { - objectName string - crcHash int - }{ - // cases which should pass the test. - // passing in valid object name. - {"object", 12}, - {"The Shining Script .pdf", 14}, - {"Cost Benefit Analysis (2009-2010).pptx", 13}, - {"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", 1}, - {"SHØRT", 9}, - {"There are far too many object names, and far too few bucket names!", 13}, - {"a/b/c/", 1}, - {"/a/b/c", 4}, - {string([]byte{0xff, 0xfe, 0xfd}), 13}, - } - - // Tests hashing order to be consistent. - for i, testCase := range testCases { - if crcHashElement := hashKey("CRCMOD", testCase.objectName, 16); crcHashElement != testCase.crcHash { - t.Errorf("Test case %d: Expected \"%v\" but failed \"%v\"", i+1, testCase.crcHash, crcHashElement) - } - } - - if crcHashElement := hashKey("CRCMOD", "This will fail", -1); crcHashElement != -1 { - t.Errorf("Test: Expected \"-1\" but got \"%v\"", crcHashElement) - } - - if crcHashElement := hashKey("CRCMOD", "This will fail", 0); crcHashElement != -1 { - t.Errorf("Test: Expected \"-1\" but got \"%v\"", crcHashElement) - } - - if crcHashElement := hashKey("UNKNOWN", "This will fail", 0); crcHashElement != -1 { - t.Errorf("Test: Expected \"-1\" but got \"%v\"", crcHashElement) - } -} - -// TestNewXL - tests initialization of all input disks -// and constructs a valid `XL` object -func TestNewXLSets(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - var nDisks = 16 // Maximum disks. - var erasureDisks []string - for i := 0; i < nDisks; i++ { - // Do not attempt to create this path, the test validates - // so that newXLSets initializes non existing paths - // and successfully returns initialized object layer. - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - erasureDisks = append(erasureDisks, disk) - defer os.RemoveAll(disk) - } - - endpoints := mustGetNewEndpoints(erasureDisks...) - _, _, err := waitForFormatXL(true, endpoints, 1, 0, 16, "") - if err != errInvalidArgument { - t.Fatalf("Expecting error, got %s", err) - } - - _, _, err = waitForFormatXL(true, nil, 1, 1, 16, "") - if err != errInvalidArgument { - t.Fatalf("Expecting error, got %s", err) - } - - // Initializes all erasure disks - storageDisks, format, err := waitForFormatXL(true, endpoints, 1, 1, 16, "") - if err != nil { - t.Fatalf("Unable to format disks for erasure, %s", err) - } - - if _, err := newXLSets(ctx, endpoints, storageDisks, format); err != nil { - t.Fatalf("Unable to initialize erasure") - } -} - -// TestHashedLayer - tests the hashed layer which will be returned -// consistently for a given object name. -func TestHashedLayer(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - var objs []*xlObjects - - for i := 0; i < 16; i++ { - obj, fsDirs, err := prepareXL16(ctx) - if err != nil { - t.Fatal("Unable to initialize 'XL' object layer.", err) - } - - // Remove all dirs. - for _, dir := range fsDirs { - defer os.RemoveAll(dir) - } - - z := obj.(*xlZones) - objs = append(objs, z.zones[0].sets[0]) - } - - sets := &xlSets{sets: objs, distributionAlgo: "CRCMOD"} - - testCases := []struct { - objectName string - expectedObj *xlObjects - }{ - // cases which should pass the test. - // passing in valid object name. - {"object", objs[12]}, - {"The Shining Script .pdf", objs[14]}, - {"Cost Benefit Analysis (2009-2010).pptx", objs[13]}, - {"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", objs[1]}, - {"SHØRT", objs[9]}, - {"There are far too many object names, and far too few bucket names!", objs[13]}, - {"a/b/c/", objs[1]}, - {"/a/b/c", objs[4]}, - {string([]byte{0xff, 0xfe, 0xfd}), objs[13]}, - } - - // Tests hashing order to be consistent. - for i, testCase := range testCases { - gotObj := sets.getHashedSet(testCase.objectName) - if gotObj != testCase.expectedObj { - t.Errorf("Test case %d: Expected \"%#v\" but failed \"%#v\"", i+1, testCase.expectedObj, gotObj) - } - } -} diff --git a/cmd/xl-storage-disk-id-check.go b/cmd/xl-storage-disk-id-check.go new file mode 100644 index 000000000..b3a2894e8 --- /dev/null +++ b/cmd/xl-storage-disk-id-check.go @@ -0,0 +1,266 @@ +/* + * MinIO Cloud Storage, (C) 2019-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "context" + "io" +) + +// Detects change in underlying disk. +type xlStorageDiskIDCheck struct { + storage *xlStorage + diskID string +} + +func (p *xlStorageDiskIDCheck) String() string { + return p.storage.String() +} + +func (p *xlStorageDiskIDCheck) IsOnline() bool { + storedDiskID, err := p.storage.GetDiskID() + if err != nil { + return false + } + return storedDiskID == p.diskID +} + +func (p *xlStorageDiskIDCheck) IsLocal() bool { + return p.storage.IsLocal() +} + +func (p *xlStorageDiskIDCheck) Hostname() string { + return p.storage.Hostname() +} + +func (p *xlStorageDiskIDCheck) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCache) (dataUsageCache, error) { + if p.isDiskStale() { + return dataUsageCache{}, errDiskNotFound + } + return p.storage.CrawlAndGetDataUsage(ctx, cache) +} + +func (p *xlStorageDiskIDCheck) Close() error { + return p.storage.Close() +} + +func (p *xlStorageDiskIDCheck) GetDiskID() (string, error) { + return p.storage.GetDiskID() +} + +func (p *xlStorageDiskIDCheck) SetDiskID(id string) { + p.diskID = id +} + +func (p *xlStorageDiskIDCheck) isDiskStale() bool { + if p.diskID == "" { + // For empty disk-id we allow the call as the server might be coming up and trying to read format.json + // or create format.json + return false + } + storedDiskID, err := p.storage.GetDiskID() + if err == nil && p.diskID == storedDiskID { + return false + } + return true +} + +func (p *xlStorageDiskIDCheck) DiskInfo() (info DiskInfo, err error) { + if p.isDiskStale() { + return info, errDiskNotFound + } + return p.storage.DiskInfo() +} + +func (p *xlStorageDiskIDCheck) MakeVolBulk(volumes ...string) (err error) { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.MakeVolBulk(volumes...) +} + +func (p *xlStorageDiskIDCheck) MakeVol(volume string) (err error) { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.MakeVol(volume) +} + +func (p *xlStorageDiskIDCheck) ListVols() ([]VolInfo, error) { + if p.isDiskStale() { + return nil, errDiskNotFound + } + return p.storage.ListVols() +} + +func (p *xlStorageDiskIDCheck) StatVol(volume string) (vol VolInfo, err error) { + if p.isDiskStale() { + return vol, errDiskNotFound + } + return p.storage.StatVol(volume) +} + +func (p *xlStorageDiskIDCheck) DeleteVol(volume string, forceDelete bool) (err error) { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.DeleteVol(volume, forceDelete) +} + +func (p *xlStorageDiskIDCheck) WalkVersions(volume, dirPath string, marker string, recursive bool, endWalkCh <-chan struct{}) (chan FileInfoVersions, error) { + if p.isDiskStale() { + return nil, errDiskNotFound + } + return p.storage.WalkVersions(volume, dirPath, marker, recursive, endWalkCh) +} + +func (p *xlStorageDiskIDCheck) Walk(volume, dirPath string, marker string, recursive bool, endWalkCh <-chan struct{}) (chan FileInfo, error) { + if p.isDiskStale() { + return nil, errDiskNotFound + } + return p.storage.Walk(volume, dirPath, marker, recursive, endWalkCh) +} + +func (p *xlStorageDiskIDCheck) WalkSplunk(volume, dirPath string, marker string, endWalkCh <-chan struct{}) (chan FileInfo, error) { + if p.isDiskStale() { + return nil, errDiskNotFound + } + return p.storage.WalkSplunk(volume, dirPath, marker, endWalkCh) +} + +func (p *xlStorageDiskIDCheck) ListDir(volume, dirPath string, count int) ([]string, error) { + if p.isDiskStale() { + return nil, errDiskNotFound + } + return p.storage.ListDir(volume, dirPath, count) +} + +func (p *xlStorageDiskIDCheck) ReadFile(volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) { + if p.isDiskStale() { + return 0, errDiskNotFound + } + return p.storage.ReadFile(volume, path, offset, buf, verifier) +} + +func (p *xlStorageDiskIDCheck) AppendFile(volume string, path string, buf []byte) (err error) { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.AppendFile(volume, path, buf) +} + +func (p *xlStorageDiskIDCheck) CreateFile(volume, path string, size int64, reader io.Reader) error { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.CreateFile(volume, path, size, reader) +} + +func (p *xlStorageDiskIDCheck) ReadFileStream(volume, path string, offset, length int64) (io.ReadCloser, error) { + if p.isDiskStale() { + return nil, errDiskNotFound + } + return p.storage.ReadFileStream(volume, path, offset, length) +} + +func (p *xlStorageDiskIDCheck) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) error { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.RenameFile(srcVolume, srcPath, dstVolume, dstPath) +} + +func (p *xlStorageDiskIDCheck) RenameData(srcVolume, srcPath, dataDir, dstVolume, dstPath string) error { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.RenameData(srcVolume, srcPath, dataDir, dstVolume, dstPath) +} + +func (p *xlStorageDiskIDCheck) CheckParts(volume string, path string, fi FileInfo) (err error) { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.CheckParts(volume, path, fi) +} + +func (p *xlStorageDiskIDCheck) CheckFile(volume string, path string) (err error) { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.CheckFile(volume, path) +} + +func (p *xlStorageDiskIDCheck) DeleteFile(volume string, path string) (err error) { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.DeleteFile(volume, path) +} + +func (p *xlStorageDiskIDCheck) DeleteVersions(volume string, versions []FileInfo) (errs []error) { + if p.isDiskStale() { + errs = make([]error, len(versions)) + for i := range errs { + errs[i] = errDiskNotFound + } + return errs + } + return p.storage.DeleteVersions(volume, versions) +} + +func (p *xlStorageDiskIDCheck) VerifyFile(volume, path string, fi FileInfo) error { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.VerifyFile(volume, path, fi) +} + +func (p *xlStorageDiskIDCheck) WriteAll(volume string, path string, reader io.Reader) (err error) { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.WriteAll(volume, path, reader) +} + +func (p *xlStorageDiskIDCheck) DeleteVersion(volume, path string, fi FileInfo) (err error) { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.DeleteVersion(volume, path, fi) +} + +func (p *xlStorageDiskIDCheck) WriteMetadata(volume, path string, fi FileInfo) (err error) { + if p.isDiskStale() { + return errDiskNotFound + } + return p.storage.WriteMetadata(volume, path, fi) +} + +func (p *xlStorageDiskIDCheck) ReadVersion(volume, path, versionID string) (fi FileInfo, err error) { + if p.isDiskStale() { + return fi, errDiskNotFound + } + return p.storage.ReadVersion(volume, path, versionID) +} + +func (p *xlStorageDiskIDCheck) ReadAll(volume string, path string) (buf []byte, err error) { + if p.isDiskStale() { + return nil, errDiskNotFound + } + return p.storage.ReadAll(volume, path) +} diff --git a/cmd/posix-errors.go b/cmd/xl-storage-errors.go similarity index 98% rename from cmd/posix-errors.go rename to cmd/xl-storage-errors.go index 652333260..5819c241d 100644 --- a/cmd/posix-errors.go +++ b/cmd/xl-storage-errors.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cmd/posix-errors_test.go b/cmd/xl-storage-errors_test.go similarity index 96% rename from cmd/posix-errors_test.go rename to cmd/xl-storage-errors_test.go index da4624be3..0e0db6cd3 100644 --- a/cmd/posix-errors_test.go +++ b/cmd/xl-storage-errors_test.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cmd/xl-storage-format-utils.go b/cmd/xl-storage-format-utils.go new file mode 100644 index 000000000..90a848bc2 --- /dev/null +++ b/cmd/xl-storage-format-utils.go @@ -0,0 +1,81 @@ +/* + * MinIO Cloud Storage, (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + jsoniter "github.com/json-iterator/go" +) + +func getFileInfoVersions(xlMetaBuf []byte, volume, path string) (FileInfoVersions, error) { + if isXL2V1Format(xlMetaBuf) { + var xlMeta xlMetaV2 + if err := xlMeta.Load(xlMetaBuf); err != nil { + return FileInfoVersions{}, err + } + versions, deletedVersions, latestModTime, err := xlMeta.ListVersions(volume, path) + if err != nil { + return FileInfoVersions{}, err + } + return FileInfoVersions{ + Volume: volume, + Name: path, + Versions: versions, + Deleted: deletedVersions, + LatestModTime: latestModTime, + }, nil + } + + xlMeta := &xlMetaV1Object{} + var json = jsoniter.ConfigCompatibleWithStandardLibrary + if err := json.Unmarshal(xlMetaBuf, xlMeta); err != nil { + return FileInfoVersions{}, errFileCorrupt + } + + fi, err := xlMeta.ToFileInfo(volume, path) + if err != nil { + return FileInfoVersions{}, err + } + + fi.IsLatest = true // No versions so current version is latest. + return FileInfoVersions{ + Volume: volume, + Name: path, + Versions: []FileInfo{fi}, + LatestModTime: fi.ModTime, + }, nil +} + +func getFileInfo(xlMetaBuf []byte, volume, path, versionID string) (FileInfo, error) { + if isXL2V1Format(xlMetaBuf) { + var xlMeta xlMetaV2 + if err := xlMeta.Load(xlMetaBuf); err != nil { + return FileInfo{}, err + } + return xlMeta.ToFileInfo(volume, path, versionID) + } + + xlMeta := &xlMetaV1Object{} + var json = jsoniter.ConfigCompatibleWithStandardLibrary + if err := json.Unmarshal(xlMetaBuf, xlMeta); err != nil { + return FileInfo{}, errFileCorrupt + } + fi, err := xlMeta.ToFileInfo(volume, path) + if err == errFileNotFound && versionID != "" { + return fi, errFileVersionNotFound + } + return fi, err +} diff --git a/cmd/xl-storage-format-v1.go b/cmd/xl-storage-format-v1.go new file mode 100644 index 000000000..af5ca731b --- /dev/null +++ b/cmd/xl-storage-format-v1.go @@ -0,0 +1,208 @@ +/* + * MinIO Cloud Storage, (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "time" + + jsoniter "github.com/json-iterator/go" + "github.com/minio/minio/cmd/logger" +) + +// XL constants. +const ( + // XL metadata file carries per object metadata. + xlStorageFormatFileV1 = "xl.json" +) + +// Valid - tells us if the format is sane by validating +// format version and erasure coding information. +func (m *xlMetaV1Object) valid() bool { + return isXLMetaFormatValid(m.Version, m.Format) && + isXLMetaErasureInfoValid(m.Erasure.DataBlocks, m.Erasure.ParityBlocks) +} + +// Verifies if the backend format metadata is sane by validating +// the version string and format style. +func isXLMetaFormatValid(version, format string) bool { + return ((version == xlMetaVersion101 || + version == xlMetaVersion100) && + format == xlMetaFormat) +} + +// Verifies if the backend format metadata is sane by validating +// the ErasureInfo, i.e. data and parity blocks. +func isXLMetaErasureInfoValid(data, parity int) bool { + return ((data >= parity) && (data != 0) && (parity != 0)) +} + +//go:generate msgp -file=$GOFILE -unexported + +// A xlMetaV1Object represents `xl.meta` metadata header. +type xlMetaV1Object struct { + Version string `json:"version"` // Version of the current `xl.meta`. + Format string `json:"format"` // Format of the current `xl.meta`. + Stat StatInfo `json:"stat"` // Stat of the current object `xl.meta`. + // Erasure coded info for the current object `xl.meta`. + Erasure ErasureInfo `json:"erasure"` + // MinIO release tag for current object `xl.meta`. + Minio struct { + Release string `json:"release"` + } `json:"minio"` + // Metadata map for current object `xl.meta`. + Meta map[string]string `json:"meta,omitempty"` + // Captures all the individual object `xl.meta`. + Parts []ObjectPartInfo `json:"parts,omitempty"` + + // Dummy values used for legacy use cases. + VersionID string `json:"versionId,omitempty"` + DataDir string `json:"dataDir,omitempty"` // always points to "legacy" +} + +// StatInfo - carries stat information of the object. +type StatInfo struct { + Size int64 `json:"size"` // Size of the object `xl.meta`. + ModTime time.Time `json:"modTime"` // ModTime of the object `xl.meta`. +} + +// ErasureInfo holds erasure coding and bitrot related information. +type ErasureInfo struct { + // Algorithm is the string representation of erasure-coding-algorithm + Algorithm string `json:"algorithm"` + // DataBlocks is the number of data blocks for erasure-coding + DataBlocks int `json:"data"` + // ParityBlocks is the number of parity blocks for erasure-coding + ParityBlocks int `json:"parity"` + // BlockSize is the size of one erasure-coded block + BlockSize int64 `json:"blockSize"` + // Index is the index of the current disk + Index int `json:"index"` + // Distribution is the distribution of the data and parity blocks + Distribution []int `json:"distribution"` + // Checksums holds all bitrot checksums of all erasure encoded blocks + Checksums []ChecksumInfo `json:"checksum,omitempty"` +} + +// BitrotAlgorithm specifies a algorithm used for bitrot protection. +type BitrotAlgorithm uint + +const ( + // SHA256 represents the SHA-256 hash function + SHA256 BitrotAlgorithm = 1 + iota + // HighwayHash256 represents the HighwayHash-256 hash function + HighwayHash256 + // HighwayHash256S represents the Streaming HighwayHash-256 hash function + HighwayHash256S + // BLAKE2b512 represents the BLAKE2b-512 hash function + BLAKE2b512 +) + +// DefaultBitrotAlgorithm is the default algorithm used for bitrot protection. +const ( + DefaultBitrotAlgorithm = HighwayHash256S +) + +// ObjectPartInfo Info of each part kept in the multipart metadata +// file after CompleteMultipartUpload() is called. +type ObjectPartInfo struct { + ETag string `json:"etag,omitempty"` + Number int `json:"number"` + Size int64 `json:"size"` + ActualSize int64 `json:"actualSize"` +} + +// ChecksumInfo - carries checksums of individual scattered parts per disk. +type ChecksumInfo struct { + PartNumber int + Algorithm BitrotAlgorithm + Hash []byte +} + +type checksumInfoJSON struct { + Name string `json:"name"` + Algorithm string `json:"algorithm"` + Hash string `json:"hash,omitempty"` +} + +// MarshalJSON marshals the ChecksumInfo struct +func (c ChecksumInfo) MarshalJSON() ([]byte, error) { + info := checksumInfoJSON{ + Name: fmt.Sprintf("part.%d", c.PartNumber), + Algorithm: c.Algorithm.String(), + Hash: hex.EncodeToString(c.Hash), + } + return json.Marshal(info) +} + +// UnmarshalJSON - custom checksum info unmarshaller +func (c *ChecksumInfo) UnmarshalJSON(data []byte) error { + var info checksumInfoJSON + var json = jsoniter.ConfigCompatibleWithStandardLibrary + if err := json.Unmarshal(data, &info); err != nil { + return err + } + sum, err := hex.DecodeString(info.Hash) + if err != nil { + return err + } + c.Algorithm = BitrotAlgorithmFromString(info.Algorithm) + c.Hash = sum + if _, err = fmt.Sscanf(info.Name, "part.%d", &c.PartNumber); err != nil { + return err + } + + if !c.Algorithm.Available() { + logger.LogIf(GlobalContext, errBitrotHashAlgoInvalid) + return errBitrotHashAlgoInvalid + } + return nil +} + +// constant and shouldn't be changed. +const legacyDataDir = "legacy" + +func (m *xlMetaV1Object) ToFileInfo(volume, path string) (FileInfo, error) { + if !m.valid() { + return FileInfo{}, errFileCorrupt + } + return FileInfo{ + Volume: volume, + Name: path, + ModTime: m.Stat.ModTime, + Size: m.Stat.Size, + Metadata: m.Meta, + Parts: m.Parts, + Erasure: m.Erasure, + VersionID: m.VersionID, + DataDir: m.DataDir, + }, nil +} + +// XL metadata constants. +const ( + // XL meta version. + xlMetaVersion101 = "1.0.1" + + // XL meta version. + xlMetaVersion100 = "1.0.0" + + // XL meta format string. + xlMetaFormat = "xl" +) diff --git a/cmd/xl-storage-format-v1_gen.go b/cmd/xl-storage-format-v1_gen.go new file mode 100644 index 000000000..6c88e0daf --- /dev/null +++ b/cmd/xl-storage-format-v1_gen.go @@ -0,0 +1,1568 @@ +package cmd + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *BitrotAlgorithm) DecodeMsg(dc *msgp.Reader) (err error) { + { + var zb0001 uint + zb0001, err = dc.ReadUint() + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = BitrotAlgorithm(zb0001) + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z BitrotAlgorithm) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteUint(uint(z)) + if err != nil { + err = msgp.WrapError(err) + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z BitrotAlgorithm) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendUint(o, uint(z)) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *BitrotAlgorithm) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var zb0001 uint + zb0001, bts, err = msgp.ReadUintBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = BitrotAlgorithm(zb0001) + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z BitrotAlgorithm) Msgsize() (s int) { + s = msgp.UintSize + return +} + +// DecodeMsg implements msgp.Decodable +func (z *ChecksumInfo) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "PartNumber": + z.PartNumber, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "PartNumber") + return + } + case "Algorithm": + { + var zb0002 uint + zb0002, err = dc.ReadUint() + if err != nil { + err = msgp.WrapError(err, "Algorithm") + return + } + z.Algorithm = BitrotAlgorithm(zb0002) + } + case "Hash": + z.Hash, err = dc.ReadBytes(z.Hash) + if err != nil { + err = msgp.WrapError(err, "Hash") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *ChecksumInfo) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 3 + // write "PartNumber" + err = en.Append(0x83, 0xaa, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72) + if err != nil { + return + } + err = en.WriteInt(z.PartNumber) + if err != nil { + err = msgp.WrapError(err, "PartNumber") + return + } + // write "Algorithm" + err = en.Append(0xa9, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d) + if err != nil { + return + } + err = en.WriteUint(uint(z.Algorithm)) + if err != nil { + err = msgp.WrapError(err, "Algorithm") + return + } + // write "Hash" + err = en.Append(0xa4, 0x48, 0x61, 0x73, 0x68) + if err != nil { + return + } + err = en.WriteBytes(z.Hash) + if err != nil { + err = msgp.WrapError(err, "Hash") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *ChecksumInfo) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 3 + // string "PartNumber" + o = append(o, 0x83, 0xaa, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72) + o = msgp.AppendInt(o, z.PartNumber) + // string "Algorithm" + o = append(o, 0xa9, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d) + o = msgp.AppendUint(o, uint(z.Algorithm)) + // string "Hash" + o = append(o, 0xa4, 0x48, 0x61, 0x73, 0x68) + o = msgp.AppendBytes(o, z.Hash) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ChecksumInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "PartNumber": + z.PartNumber, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PartNumber") + return + } + case "Algorithm": + { + var zb0002 uint + zb0002, bts, err = msgp.ReadUintBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Algorithm") + return + } + z.Algorithm = BitrotAlgorithm(zb0002) + } + case "Hash": + z.Hash, bts, err = msgp.ReadBytesBytes(bts, z.Hash) + if err != nil { + err = msgp.WrapError(err, "Hash") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ChecksumInfo) Msgsize() (s int) { + s = 1 + 11 + msgp.IntSize + 10 + msgp.UintSize + 5 + msgp.BytesPrefixSize + len(z.Hash) + return +} + +// DecodeMsg implements msgp.Decodable +func (z *ErasureInfo) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Algorithm": + z.Algorithm, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Algorithm") + return + } + case "DataBlocks": + z.DataBlocks, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "DataBlocks") + return + } + case "ParityBlocks": + z.ParityBlocks, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "ParityBlocks") + return + } + case "BlockSize": + z.BlockSize, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "BlockSize") + return + } + case "Index": + z.Index, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "Index") + return + } + case "Distribution": + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Distribution") + return + } + if cap(z.Distribution) >= int(zb0002) { + z.Distribution = (z.Distribution)[:zb0002] + } else { + z.Distribution = make([]int, zb0002) + } + for za0001 := range z.Distribution { + z.Distribution[za0001], err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "Distribution", za0001) + return + } + } + case "Checksums": + var zb0003 uint32 + zb0003, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Checksums") + return + } + if cap(z.Checksums) >= int(zb0003) { + z.Checksums = (z.Checksums)[:zb0003] + } else { + z.Checksums = make([]ChecksumInfo, zb0003) + } + for za0002 := range z.Checksums { + err = z.Checksums[za0002].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Checksums", za0002) + return + } + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *ErasureInfo) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 7 + // write "Algorithm" + err = en.Append(0x87, 0xa9, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d) + if err != nil { + return + } + err = en.WriteString(z.Algorithm) + if err != nil { + err = msgp.WrapError(err, "Algorithm") + return + } + // write "DataBlocks" + err = en.Append(0xaa, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73) + if err != nil { + return + } + err = en.WriteInt(z.DataBlocks) + if err != nil { + err = msgp.WrapError(err, "DataBlocks") + return + } + // write "ParityBlocks" + err = en.Append(0xac, 0x50, 0x61, 0x72, 0x69, 0x74, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73) + if err != nil { + return + } + err = en.WriteInt(z.ParityBlocks) + if err != nil { + err = msgp.WrapError(err, "ParityBlocks") + return + } + // write "BlockSize" + err = en.Append(0xa9, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x53, 0x69, 0x7a, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.BlockSize) + if err != nil { + err = msgp.WrapError(err, "BlockSize") + return + } + // write "Index" + err = en.Append(0xa5, 0x49, 0x6e, 0x64, 0x65, 0x78) + if err != nil { + return + } + err = en.WriteInt(z.Index) + if err != nil { + err = msgp.WrapError(err, "Index") + return + } + // write "Distribution" + err = en.Append(0xac, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Distribution))) + if err != nil { + err = msgp.WrapError(err, "Distribution") + return + } + for za0001 := range z.Distribution { + err = en.WriteInt(z.Distribution[za0001]) + if err != nil { + err = msgp.WrapError(err, "Distribution", za0001) + return + } + } + // write "Checksums" + err = en.Append(0xa9, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Checksums))) + if err != nil { + err = msgp.WrapError(err, "Checksums") + return + } + for za0002 := range z.Checksums { + err = z.Checksums[za0002].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Checksums", za0002) + return + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *ErasureInfo) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 7 + // string "Algorithm" + o = append(o, 0x87, 0xa9, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d) + o = msgp.AppendString(o, z.Algorithm) + // string "DataBlocks" + o = append(o, 0xaa, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73) + o = msgp.AppendInt(o, z.DataBlocks) + // string "ParityBlocks" + o = append(o, 0xac, 0x50, 0x61, 0x72, 0x69, 0x74, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73) + o = msgp.AppendInt(o, z.ParityBlocks) + // string "BlockSize" + o = append(o, 0xa9, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x53, 0x69, 0x7a, 0x65) + o = msgp.AppendInt64(o, z.BlockSize) + // string "Index" + o = append(o, 0xa5, 0x49, 0x6e, 0x64, 0x65, 0x78) + o = msgp.AppendInt(o, z.Index) + // string "Distribution" + o = append(o, 0xac, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e) + o = msgp.AppendArrayHeader(o, uint32(len(z.Distribution))) + for za0001 := range z.Distribution { + o = msgp.AppendInt(o, z.Distribution[za0001]) + } + // string "Checksums" + o = append(o, 0xa9, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Checksums))) + for za0002 := range z.Checksums { + o, err = z.Checksums[za0002].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Checksums", za0002) + return + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ErasureInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Algorithm": + z.Algorithm, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Algorithm") + return + } + case "DataBlocks": + z.DataBlocks, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DataBlocks") + return + } + case "ParityBlocks": + z.ParityBlocks, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ParityBlocks") + return + } + case "BlockSize": + z.BlockSize, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "BlockSize") + return + } + case "Index": + z.Index, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Index") + return + } + case "Distribution": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Distribution") + return + } + if cap(z.Distribution) >= int(zb0002) { + z.Distribution = (z.Distribution)[:zb0002] + } else { + z.Distribution = make([]int, zb0002) + } + for za0001 := range z.Distribution { + z.Distribution[za0001], bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Distribution", za0001) + return + } + } + case "Checksums": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Checksums") + return + } + if cap(z.Checksums) >= int(zb0003) { + z.Checksums = (z.Checksums)[:zb0003] + } else { + z.Checksums = make([]ChecksumInfo, zb0003) + } + for za0002 := range z.Checksums { + bts, err = z.Checksums[za0002].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Checksums", za0002) + return + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ErasureInfo) Msgsize() (s int) { + s = 1 + 10 + msgp.StringPrefixSize + len(z.Algorithm) + 11 + msgp.IntSize + 13 + msgp.IntSize + 10 + msgp.Int64Size + 6 + msgp.IntSize + 13 + msgp.ArrayHeaderSize + (len(z.Distribution) * (msgp.IntSize)) + 10 + msgp.ArrayHeaderSize + for za0002 := range z.Checksums { + s += z.Checksums[za0002].Msgsize() + } + return +} + +// DecodeMsg implements msgp.Decodable +func (z *ObjectPartInfo) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "ETag": + z.ETag, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "ETag") + return + } + case "Number": + z.Number, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "Number") + return + } + case "Size": + z.Size, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "Size") + return + } + case "ActualSize": + z.ActualSize, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "ActualSize") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *ObjectPartInfo) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 4 + // write "ETag" + err = en.Append(0x84, 0xa4, 0x45, 0x54, 0x61, 0x67) + if err != nil { + return + } + err = en.WriteString(z.ETag) + if err != nil { + err = msgp.WrapError(err, "ETag") + return + } + // write "Number" + err = en.Append(0xa6, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72) + if err != nil { + return + } + err = en.WriteInt(z.Number) + if err != nil { + err = msgp.WrapError(err, "Number") + return + } + // write "Size" + err = en.Append(0xa4, 0x53, 0x69, 0x7a, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.Size) + if err != nil { + err = msgp.WrapError(err, "Size") + return + } + // write "ActualSize" + err = en.Append(0xaa, 0x41, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.ActualSize) + if err != nil { + err = msgp.WrapError(err, "ActualSize") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *ObjectPartInfo) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 4 + // string "ETag" + o = append(o, 0x84, 0xa4, 0x45, 0x54, 0x61, 0x67) + o = msgp.AppendString(o, z.ETag) + // string "Number" + o = append(o, 0xa6, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72) + o = msgp.AppendInt(o, z.Number) + // string "Size" + o = append(o, 0xa4, 0x53, 0x69, 0x7a, 0x65) + o = msgp.AppendInt64(o, z.Size) + // string "ActualSize" + o = append(o, 0xaa, 0x41, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65) + o = msgp.AppendInt64(o, z.ActualSize) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ObjectPartInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "ETag": + z.ETag, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ETag") + return + } + case "Number": + z.Number, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Number") + return + } + case "Size": + z.Size, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Size") + return + } + case "ActualSize": + z.ActualSize, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ActualSize") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ObjectPartInfo) Msgsize() (s int) { + s = 1 + 5 + msgp.StringPrefixSize + len(z.ETag) + 7 + msgp.IntSize + 5 + msgp.Int64Size + 11 + msgp.Int64Size + return +} + +// DecodeMsg implements msgp.Decodable +func (z *StatInfo) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Size": + z.Size, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "Size") + return + } + case "ModTime": + z.ModTime, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "ModTime") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z StatInfo) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 2 + // write "Size" + err = en.Append(0x82, 0xa4, 0x53, 0x69, 0x7a, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.Size) + if err != nil { + err = msgp.WrapError(err, "Size") + return + } + // write "ModTime" + err = en.Append(0xa7, 0x4d, 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteTime(z.ModTime) + if err != nil { + err = msgp.WrapError(err, "ModTime") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z StatInfo) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 2 + // string "Size" + o = append(o, 0x82, 0xa4, 0x53, 0x69, 0x7a, 0x65) + o = msgp.AppendInt64(o, z.Size) + // string "ModTime" + o = append(o, 0xa7, 0x4d, 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65) + o = msgp.AppendTime(o, z.ModTime) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *StatInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Size": + z.Size, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Size") + return + } + case "ModTime": + z.ModTime, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ModTime") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z StatInfo) Msgsize() (s int) { + s = 1 + 5 + msgp.Int64Size + 8 + msgp.TimeSize + return +} + +// DecodeMsg implements msgp.Decodable +func (z *checksumInfoJSON) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Name": + z.Name, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + case "Algorithm": + z.Algorithm, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Algorithm") + return + } + case "Hash": + z.Hash, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Hash") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z checksumInfoJSON) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 3 + // write "Name" + err = en.Append(0x83, 0xa4, 0x4e, 0x61, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Name) + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + // write "Algorithm" + err = en.Append(0xa9, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d) + if err != nil { + return + } + err = en.WriteString(z.Algorithm) + if err != nil { + err = msgp.WrapError(err, "Algorithm") + return + } + // write "Hash" + err = en.Append(0xa4, 0x48, 0x61, 0x73, 0x68) + if err != nil { + return + } + err = en.WriteString(z.Hash) + if err != nil { + err = msgp.WrapError(err, "Hash") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z checksumInfoJSON) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 3 + // string "Name" + o = append(o, 0x83, 0xa4, 0x4e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.Name) + // string "Algorithm" + o = append(o, 0xa9, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d) + o = msgp.AppendString(o, z.Algorithm) + // string "Hash" + o = append(o, 0xa4, 0x48, 0x61, 0x73, 0x68) + o = msgp.AppendString(o, z.Hash) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *checksumInfoJSON) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Name": + z.Name, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + case "Algorithm": + z.Algorithm, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Algorithm") + return + } + case "Hash": + z.Hash, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Hash") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z checksumInfoJSON) Msgsize() (s int) { + s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 10 + msgp.StringPrefixSize + len(z.Algorithm) + 5 + msgp.StringPrefixSize + len(z.Hash) + return +} + +// DecodeMsg implements msgp.Decodable +func (z *xlMetaV1Object) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Version": + z.Version, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + case "Format": + z.Format, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Format") + return + } + case "Stat": + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Stat") + return + } + for zb0002 > 0 { + zb0002-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "Stat") + return + } + switch msgp.UnsafeString(field) { + case "Size": + z.Stat.Size, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "Stat", "Size") + return + } + case "ModTime": + z.Stat.ModTime, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "Stat", "ModTime") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "Stat") + return + } + } + } + case "Erasure": + err = z.Erasure.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Erasure") + return + } + case "Minio": + var zb0003 uint32 + zb0003, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Minio") + return + } + for zb0003 > 0 { + zb0003-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "Minio") + return + } + switch msgp.UnsafeString(field) { + case "Release": + z.Minio.Release, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Minio", "Release") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "Minio") + return + } + } + } + case "Meta": + var zb0004 uint32 + zb0004, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } + if z.Meta == nil { + z.Meta = make(map[string]string, zb0004) + } else if len(z.Meta) > 0 { + for key := range z.Meta { + delete(z.Meta, key) + } + } + for zb0004 > 0 { + zb0004-- + var za0001 string + var za0002 string + za0001, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } + za0002, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Meta", za0001) + return + } + z.Meta[za0001] = za0002 + } + case "Parts": + var zb0005 uint32 + zb0005, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Parts") + return + } + if cap(z.Parts) >= int(zb0005) { + z.Parts = (z.Parts)[:zb0005] + } else { + z.Parts = make([]ObjectPartInfo, zb0005) + } + for za0003 := range z.Parts { + err = z.Parts[za0003].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Parts", za0003) + return + } + } + case "VersionID": + z.VersionID, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "VersionID") + return + } + case "DataDir": + z.DataDir, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "DataDir") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *xlMetaV1Object) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 9 + // write "Version" + err = en.Append(0x89, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + if err != nil { + return + } + err = en.WriteString(z.Version) + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + // write "Format" + err = en.Append(0xa6, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74) + if err != nil { + return + } + err = en.WriteString(z.Format) + if err != nil { + err = msgp.WrapError(err, "Format") + return + } + // write "Stat" + err = en.Append(0xa4, 0x53, 0x74, 0x61, 0x74) + if err != nil { + return + } + // map header, size 2 + // write "Size" + err = en.Append(0x82, 0xa4, 0x53, 0x69, 0x7a, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.Stat.Size) + if err != nil { + err = msgp.WrapError(err, "Stat", "Size") + return + } + // write "ModTime" + err = en.Append(0xa7, 0x4d, 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteTime(z.Stat.ModTime) + if err != nil { + err = msgp.WrapError(err, "Stat", "ModTime") + return + } + // write "Erasure" + err = en.Append(0xa7, 0x45, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65) + if err != nil { + return + } + err = z.Erasure.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Erasure") + return + } + // write "Minio" + err = en.Append(0xa5, 0x4d, 0x69, 0x6e, 0x69, 0x6f) + if err != nil { + return + } + // map header, size 1 + // write "Release" + err = en.Append(0x81, 0xa7, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65) + if err != nil { + return + } + err = en.WriteString(z.Minio.Release) + if err != nil { + err = msgp.WrapError(err, "Minio", "Release") + return + } + // write "Meta" + err = en.Append(0xa4, 0x4d, 0x65, 0x74, 0x61) + if err != nil { + return + } + err = en.WriteMapHeader(uint32(len(z.Meta))) + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } + for za0001, za0002 := range z.Meta { + err = en.WriteString(za0001) + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } + err = en.WriteString(za0002) + if err != nil { + err = msgp.WrapError(err, "Meta", za0001) + return + } + } + // write "Parts" + err = en.Append(0xa5, 0x50, 0x61, 0x72, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Parts))) + if err != nil { + err = msgp.WrapError(err, "Parts") + return + } + for za0003 := range z.Parts { + err = z.Parts[za0003].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Parts", za0003) + return + } + } + // write "VersionID" + err = en.Append(0xa9, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x44) + if err != nil { + return + } + err = en.WriteString(z.VersionID) + if err != nil { + err = msgp.WrapError(err, "VersionID") + return + } + // write "DataDir" + err = en.Append(0xa7, 0x44, 0x61, 0x74, 0x61, 0x44, 0x69, 0x72) + if err != nil { + return + } + err = en.WriteString(z.DataDir) + if err != nil { + err = msgp.WrapError(err, "DataDir") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *xlMetaV1Object) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 9 + // string "Version" + o = append(o, 0x89, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.Version) + // string "Format" + o = append(o, 0xa6, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74) + o = msgp.AppendString(o, z.Format) + // string "Stat" + o = append(o, 0xa4, 0x53, 0x74, 0x61, 0x74) + // map header, size 2 + // string "Size" + o = append(o, 0x82, 0xa4, 0x53, 0x69, 0x7a, 0x65) + o = msgp.AppendInt64(o, z.Stat.Size) + // string "ModTime" + o = append(o, 0xa7, 0x4d, 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65) + o = msgp.AppendTime(o, z.Stat.ModTime) + // string "Erasure" + o = append(o, 0xa7, 0x45, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65) + o, err = z.Erasure.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Erasure") + return + } + // string "Minio" + o = append(o, 0xa5, 0x4d, 0x69, 0x6e, 0x69, 0x6f) + // map header, size 1 + // string "Release" + o = append(o, 0x81, 0xa7, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65) + o = msgp.AppendString(o, z.Minio.Release) + // string "Meta" + o = append(o, 0xa4, 0x4d, 0x65, 0x74, 0x61) + o = msgp.AppendMapHeader(o, uint32(len(z.Meta))) + for za0001, za0002 := range z.Meta { + o = msgp.AppendString(o, za0001) + o = msgp.AppendString(o, za0002) + } + // string "Parts" + o = append(o, 0xa5, 0x50, 0x61, 0x72, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Parts))) + for za0003 := range z.Parts { + o, err = z.Parts[za0003].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Parts", za0003) + return + } + } + // string "VersionID" + o = append(o, 0xa9, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x44) + o = msgp.AppendString(o, z.VersionID) + // string "DataDir" + o = append(o, 0xa7, 0x44, 0x61, 0x74, 0x61, 0x44, 0x69, 0x72) + o = msgp.AppendString(o, z.DataDir) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *xlMetaV1Object) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Version": + z.Version, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + case "Format": + z.Format, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Format") + return + } + case "Stat": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Stat") + return + } + for zb0002 > 0 { + zb0002-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "Stat") + return + } + switch msgp.UnsafeString(field) { + case "Size": + z.Stat.Size, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Stat", "Size") + return + } + case "ModTime": + z.Stat.ModTime, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Stat", "ModTime") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "Stat") + return + } + } + } + case "Erasure": + bts, err = z.Erasure.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Erasure") + return + } + case "Minio": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Minio") + return + } + for zb0003 > 0 { + zb0003-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "Minio") + return + } + switch msgp.UnsafeString(field) { + case "Release": + z.Minio.Release, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Minio", "Release") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "Minio") + return + } + } + } + case "Meta": + var zb0004 uint32 + zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } + if z.Meta == nil { + z.Meta = make(map[string]string, zb0004) + } else if len(z.Meta) > 0 { + for key := range z.Meta { + delete(z.Meta, key) + } + } + for zb0004 > 0 { + var za0001 string + var za0002 string + zb0004-- + za0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } + za0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Meta", za0001) + return + } + z.Meta[za0001] = za0002 + } + case "Parts": + var zb0005 uint32 + zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Parts") + return + } + if cap(z.Parts) >= int(zb0005) { + z.Parts = (z.Parts)[:zb0005] + } else { + z.Parts = make([]ObjectPartInfo, zb0005) + } + for za0003 := range z.Parts { + bts, err = z.Parts[za0003].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Parts", za0003) + return + } + } + case "VersionID": + z.VersionID, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "VersionID") + return + } + case "DataDir": + z.DataDir, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DataDir") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *xlMetaV1Object) Msgsize() (s int) { + s = 1 + 8 + msgp.StringPrefixSize + len(z.Version) + 7 + msgp.StringPrefixSize + len(z.Format) + 5 + 1 + 5 + msgp.Int64Size + 8 + msgp.TimeSize + 8 + z.Erasure.Msgsize() + 6 + 1 + 8 + msgp.StringPrefixSize + len(z.Minio.Release) + 5 + msgp.MapHeaderSize + if z.Meta != nil { + for za0001, za0002 := range z.Meta { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) + } + } + s += 6 + msgp.ArrayHeaderSize + for za0003 := range z.Parts { + s += z.Parts[za0003].Msgsize() + } + s += 10 + msgp.StringPrefixSize + len(z.VersionID) + 8 + msgp.StringPrefixSize + len(z.DataDir) + return +} diff --git a/cmd/xl-storage-format-v1_gen_test.go b/cmd/xl-storage-format-v1_gen_test.go new file mode 100644 index 000000000..0b66c8938 --- /dev/null +++ b/cmd/xl-storage-format-v1_gen_test.go @@ -0,0 +1,688 @@ +package cmd + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "bytes" + "testing" + + "github.com/tinylib/msgp/msgp" +) + +func TestMarshalUnmarshalChecksumInfo(t *testing.T) { + v := ChecksumInfo{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgChecksumInfo(b *testing.B) { + v := ChecksumInfo{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgChecksumInfo(b *testing.B) { + v := ChecksumInfo{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalChecksumInfo(b *testing.B) { + v := ChecksumInfo{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeChecksumInfo(t *testing.T) { + v := ChecksumInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeChecksumInfo Msgsize() is inaccurate") + } + + vn := ChecksumInfo{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeChecksumInfo(b *testing.B) { + v := ChecksumInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeChecksumInfo(b *testing.B) { + v := ChecksumInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalErasureInfo(t *testing.T) { + v := ErasureInfo{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgErasureInfo(b *testing.B) { + v := ErasureInfo{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgErasureInfo(b *testing.B) { + v := ErasureInfo{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalErasureInfo(b *testing.B) { + v := ErasureInfo{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeErasureInfo(t *testing.T) { + v := ErasureInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeErasureInfo Msgsize() is inaccurate") + } + + vn := ErasureInfo{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeErasureInfo(b *testing.B) { + v := ErasureInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeErasureInfo(b *testing.B) { + v := ErasureInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalObjectPartInfo(t *testing.T) { + v := ObjectPartInfo{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgObjectPartInfo(b *testing.B) { + v := ObjectPartInfo{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgObjectPartInfo(b *testing.B) { + v := ObjectPartInfo{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalObjectPartInfo(b *testing.B) { + v := ObjectPartInfo{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeObjectPartInfo(t *testing.T) { + v := ObjectPartInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeObjectPartInfo Msgsize() is inaccurate") + } + + vn := ObjectPartInfo{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeObjectPartInfo(b *testing.B) { + v := ObjectPartInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeObjectPartInfo(b *testing.B) { + v := ObjectPartInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalStatInfo(t *testing.T) { + v := StatInfo{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgStatInfo(b *testing.B) { + v := StatInfo{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgStatInfo(b *testing.B) { + v := StatInfo{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalStatInfo(b *testing.B) { + v := StatInfo{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeStatInfo(t *testing.T) { + v := StatInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeStatInfo Msgsize() is inaccurate") + } + + vn := StatInfo{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeStatInfo(b *testing.B) { + v := StatInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeStatInfo(b *testing.B) { + v := StatInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalchecksumInfoJSON(t *testing.T) { + v := checksumInfoJSON{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgchecksumInfoJSON(b *testing.B) { + v := checksumInfoJSON{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgchecksumInfoJSON(b *testing.B) { + v := checksumInfoJSON{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalchecksumInfoJSON(b *testing.B) { + v := checksumInfoJSON{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodechecksumInfoJSON(t *testing.T) { + v := checksumInfoJSON{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodechecksumInfoJSON Msgsize() is inaccurate") + } + + vn := checksumInfoJSON{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodechecksumInfoJSON(b *testing.B) { + v := checksumInfoJSON{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodechecksumInfoJSON(b *testing.B) { + v := checksumInfoJSON{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalxlMetaV1Object(t *testing.T) { + v := xlMetaV1Object{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgxlMetaV1Object(b *testing.B) { + v := xlMetaV1Object{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgxlMetaV1Object(b *testing.B) { + v := xlMetaV1Object{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalxlMetaV1Object(b *testing.B) { + v := xlMetaV1Object{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodexlMetaV1Object(t *testing.T) { + v := xlMetaV1Object{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodexlMetaV1Object Msgsize() is inaccurate") + } + + vn := xlMetaV1Object{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodexlMetaV1Object(b *testing.B) { + v := xlMetaV1Object{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodexlMetaV1Object(b *testing.B) { + v := xlMetaV1Object{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/cmd/xl-storage-format-v2.go b/cmd/xl-storage-format-v2.go new file mode 100644 index 000000000..f8c09eec1 --- /dev/null +++ b/cmd/xl-storage-format-v2.go @@ -0,0 +1,601 @@ +/* + * MinIO Cloud Storage, (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "bytes" + "fmt" + "strings" + "time" + + "github.com/google/uuid" + "github.com/minio/minio/cmd/logger" +) + +var ( + // XL header specifies the format + xlHeader = [4]byte{'X', 'L', '2', ' '} + + // XLv2 version 1 + xlVersionV1 = [4]byte{'1', ' ', ' ', ' '} +) + +func checkXL2V1(buf []byte) error { + if len(buf) <= 8 { + return fmt.Errorf("xlMeta: no data") + } + + if !bytes.Equal(buf[:4], xlHeader[:]) { + return fmt.Errorf("xlMeta: unknown XLv2 header %s", xlHeader) + } + + if !bytes.Equal(buf[4:8], xlVersionV1[:]) { + return fmt.Errorf("xlMeta: unknown XLv2 version %s", xlVersionV1) + } + + return nil +} + +func isXL2V1Format(buf []byte) bool { + return checkXL2V1(buf) == nil +} + +// The []journal contains all the different versions of the object. +// +// This array can have 3 kinds of objects: +// +// ``object``: If the object is uploaded the usual way: putobject, multipart-put, copyobject +// +// ``delete``: This is the delete-marker +// +// ``legacyObject``: This is the legacy object in xlV1 format, preserved until its overwritten +// +// The most recently updated element in the array is considered the latest version. + +// Backend directory tree structure: +// disk1/ +// └── bucket +// └── object +// ├── a192c1d5-9bd5-41fd-9a90-ab10e165398d +// │ └── part.1 +// ├── c06e0436-f813-447e-ae5e-f2564df9dfd4 +// │ └── part.1 +// ├── df433928-2dcf-47b1-a786-43efa0f6b424 +// │ └── part.1 +// ├── legacy +// │ └── part.1 +// └── xl.meta + +//go:generate msgp -file=$GOFILE -unexported + +// VersionType defines the type of journal type of the current entry. +type VersionType uint8 + +// List of different types of journal type +const ( + invalidVersionType VersionType = 0 + ObjectType VersionType = 1 + DeleteType VersionType = 2 + LegacyType VersionType = 3 + lastVersionType VersionType = 4 +) + +func (e VersionType) valid() bool { + return e > invalidVersionType && e < lastVersionType +} + +// ErasureAlgo defines common type of different erasure algorithms +type ErasureAlgo uint8 + +// List of currently supported erasure coding algorithms +const ( + invalidErasureAlgo ErasureAlgo = 0 + ReedSolomon ErasureAlgo = 1 + lastErasureAlgo ErasureAlgo = 2 +) + +func (e ErasureAlgo) valid() bool { + return e > invalidErasureAlgo && e < lastErasureAlgo +} + +func (e ErasureAlgo) String() string { + switch e { + case ReedSolomon: + return "reedsolomon" + } + return "" +} + +// ChecksumAlgo defines common type of different checksum algorithms +type ChecksumAlgo uint8 + +// List of currently supported checksum algorithms +const ( + invalidChecksumAlgo ChecksumAlgo = 0 + HighwayHash ChecksumAlgo = 1 + lastChecksumAlgo ChecksumAlgo = 2 +) + +func (e ChecksumAlgo) valid() bool { + return e > invalidChecksumAlgo && e < lastChecksumAlgo +} + +// xlMetaV2DeleteMarker defines the data struct for the delete marker journal type +type xlMetaV2DeleteMarker struct { + VersionID [16]byte `json:"ID" msg:"ID"` // Version ID for delete marker + ModTime int64 `json:"MTime" msg:"MTime"` // Object delete marker modified time +} + +// xlMetaV2Object defines the data struct for object journal type +type xlMetaV2Object struct { + VersionID [16]byte `json:"ID" msg:"ID"` // Version ID + DataDir [16]byte `json:"DDir" msg:"DDir"` // Data dir ID + ErasureAlgorithm ErasureAlgo `json:"EcAlgo" msg:"EcAlgo"` // Erasure coding algorithm + ErasureM int `json:"EcM" msg:"EcM"` // Erasure data blocks + ErasureN int `json:"EcN" msg:"EcN"` // Erasure parity blocks + ErasureBlockSize int64 `json:"EcBSize" msg:"EcBSize"` // Erasure block size + ErasureIndex int `json:"EcIndex" msg:"EcIndex"` // Erasure disk index + ErasureDist []uint8 `json:"EcDist" msg:"EcDist"` // Erasure distribution + BitrotChecksumAlgo ChecksumAlgo `json:"CSumAlgo" msg:"CSumAlgo"` // Bitrot checksum algo + PartNumbers []int `json:"PartNums" msg:"PartNums"` // Part Numbers + PartETags []string `json:"PartETags" msg:"PartETags"` // Part ETags + PartSizes []int64 `json:"PartSizes" msg:"PartSizes"` // Part Sizes + PartActualSizes []int64 `json:"PartASizes,omitempty" msg:"PartASizes,omitempty"` // Part ActualSizes (compression) + StatSize int64 `json:"Size" msg:"Size"` // Object version size + StatModTime int64 `json:"MTime" msg:"MTime"` // Object version modified time + MetaSys map[string][]byte `json:"MetaSys,omitempty" msg:"MetaSys,omitempty"` // Object version internal metadata + MetaUser map[string]string `json:"MetaUsr,omitempty" msg:"MetaUsr,omitempty"` // Object version metadata set by user +} + +// xlMetaV2Version describes the jouranal entry, Type defines +// the current journal entry type other types might be nil based +// on what Type field carries, it is imperative for the caller +// to verify which journal type first before accessing rest of the fields. +type xlMetaV2Version struct { + Type VersionType `json:"Type" msg:"Type"` + ObjectV1 *xlMetaV1Object `json:"V1Obj,omitempty" msg:"V1Obj,omitempty"` + ObjectV2 *xlMetaV2Object `json:"V2Obj,omitempty" msg:"V2Obj,omitempty"` + DeleteMarker *xlMetaV2DeleteMarker `json:"DelObj,omitempty" msg:"DelObj,omitempty"` +} + +// Valid xl meta xlMetaV2Version is valid +func (j xlMetaV2Version) Valid() bool { + switch j.Type { + case LegacyType: + return j.ObjectV1 != nil && j.ObjectV1.valid() + case ObjectType: + return j.ObjectV2 != nil && + j.ObjectV2.ErasureAlgorithm.valid() && + j.ObjectV2.BitrotChecksumAlgo.valid() && + isXLMetaErasureInfoValid(j.ObjectV2.ErasureM, j.ObjectV2.ErasureN) + case DeleteType: + return j.DeleteMarker != nil && j.DeleteMarker.ModTime > 0 + } + return false +} + +// xlMetaV2 - object meta structure defines the format and list of +// the journals for the object. +type xlMetaV2 struct { + Versions []xlMetaV2Version `json:"Versions" msg:"Versions"` +} + +// AddLegacy adds a legacy version, is only called when no prior +// versions exist, safe to use it by only one function in xl-storage(RenameData) +func (z *xlMetaV2) AddLegacy(m *xlMetaV1Object) error { + if !m.valid() { + return errFileCorrupt + } + m.VersionID = nullVersionID + m.DataDir = legacyDataDir + z.Versions = []xlMetaV2Version{ + { + Type: LegacyType, + ObjectV1: m, + }, + } + return nil +} + +// Load unmarshal and load the entire message pack. +func (z *xlMetaV2) Load(buf []byte) error { + if err := checkXL2V1(buf); err != nil { + return err + } + _, err := z.UnmarshalMsg(buf[8:]) + return err +} + +// AddVersion adds a new version +func (z *xlMetaV2) AddVersion(fi FileInfo) error { + if fi.Deleted { + uv, err := uuid.Parse(fi.VersionID) + if err != nil { + return err + } + z.Versions = append(z.Versions, xlMetaV2Version{ + Type: DeleteType, + DeleteMarker: &xlMetaV2DeleteMarker{ + VersionID: uv, + ModTime: fi.ModTime.Unix(), + }, + }) + return nil + } + + var uv uuid.UUID + var err error + // null version Id means empty version Id. + if fi.VersionID == nullVersionID { + fi.VersionID = "" + } + + if fi.VersionID != "" { + uv, err = uuid.Parse(fi.VersionID) + if err != nil { + return err + } + } + + dd, err := uuid.Parse(fi.DataDir) + if err != nil { + return err + } + + ventry := xlMetaV2Version{ + Type: ObjectType, + ObjectV2: &xlMetaV2Object{ + VersionID: uv, + DataDir: dd, + StatSize: fi.Size, + StatModTime: fi.ModTime.Unix(), + ErasureAlgorithm: ReedSolomon, + ErasureM: fi.Erasure.DataBlocks, + ErasureN: fi.Erasure.ParityBlocks, + ErasureBlockSize: fi.Erasure.BlockSize, + ErasureIndex: fi.Erasure.Index, + BitrotChecksumAlgo: HighwayHash, + ErasureDist: make([]uint8, len(fi.Erasure.Distribution)), + PartNumbers: make([]int, len(fi.Parts)), + PartETags: make([]string, len(fi.Parts)), + PartSizes: make([]int64, len(fi.Parts)), + PartActualSizes: make([]int64, len(fi.Parts)), + MetaSys: make(map[string][]byte), + MetaUser: make(map[string]string), + }, + } + + for i := range fi.Erasure.Distribution { + ventry.ObjectV2.ErasureDist[i] = uint8(fi.Erasure.Distribution[i]) + } + + for i := range fi.Parts { + ventry.ObjectV2.PartSizes[i] = fi.Parts[i].Size + if fi.Parts[i].ETag != "" { + ventry.ObjectV2.PartETags[i] = fi.Parts[i].ETag + } + ventry.ObjectV2.PartNumbers[i] = fi.Parts[i].Number + ventry.ObjectV2.PartActualSizes[i] = fi.Parts[i].ActualSize + } + + for k, v := range fi.Metadata { + if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) { + ventry.ObjectV2.MetaSys[k] = []byte(v) + } else { + ventry.ObjectV2.MetaUser[k] = v + } + } + + for i, version := range z.Versions { + if !version.Valid() { + return errFileCorrupt + } + switch version.Type { + case LegacyType: + // This would convert legacy type into new ObjectType + // this means that we are basically purging the `null` + // version of the object. + if version.ObjectV1.VersionID == fi.VersionID { + z.Versions[i] = ventry + return nil + } + case ObjectType: + if bytes.Equal(version.ObjectV2.VersionID[:], uv[:]) { + z.Versions[i] = ventry + return nil + } + case DeleteType: + // Allowing delete marker to replaced with an proper + // object data type as well, this is not S3 complaint + // behavior but kept here for future flexibility. + if bytes.Equal(version.DeleteMarker.VersionID[:], uv[:]) { + z.Versions[i] = ventry + return nil + } + } + } + + z.Versions = append(z.Versions, ventry) + return nil +} + +func newXLMetaV2(fi FileInfo) (xlMetaV2, error) { + xlMeta := xlMetaV2{} + return xlMeta, xlMeta.AddVersion(fi) +} + +func (j xlMetaV2DeleteMarker) ToFileInfo(volume, path string) (FileInfo, error) { + fi := FileInfo{ + Volume: volume, + Name: path, + ModTime: time.Unix(j.ModTime, 0).UTC(), + VersionID: uuid.UUID(j.VersionID).String(), + Deleted: true, + } + return fi, nil +} + +func (j xlMetaV2Object) ToFileInfo(volume, path string) (FileInfo, error) { + versionID := "" + var uv uuid.UUID + // check if the version is not "null" + if !bytes.Equal(j.VersionID[:], uv[:]) { + versionID = uuid.UUID(j.VersionID).String() + } + fi := FileInfo{ + Volume: volume, + Name: path, + Size: j.StatSize, + ModTime: time.Unix(j.StatModTime, 0).UTC(), + VersionID: versionID, + } + fi.Parts = make([]ObjectPartInfo, len(j.PartNumbers)) + for i := range fi.Parts { + fi.Parts[i].Number = int(j.PartNumbers[i]) + fi.Parts[i].Size = int64(j.PartSizes[i]) + fi.Parts[i].ETag = j.PartETags[i] + fi.Parts[i].ActualSize = int64(j.PartActualSizes[i]) + } + fi.Erasure.Checksums = make([]ChecksumInfo, len(j.PartSizes)) + for i := range fi.Parts { + fi.Erasure.Checksums[i].PartNumber = fi.Parts[i].Number + switch j.BitrotChecksumAlgo { + case HighwayHash: + fi.Erasure.Checksums[i].Algorithm = HighwayHash256S + fi.Erasure.Checksums[i].Hash = []byte{} + default: + return FileInfo{}, fmt.Errorf("unknown BitrotChecksumAlgo: %v", j.BitrotChecksumAlgo) + } + } + fi.Metadata = make(map[string]string, len(j.MetaUser)+len(j.MetaSys)) + for k, v := range j.MetaUser { + fi.Metadata[k] = v + } + for k, v := range j.MetaSys { + if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) { + fi.Metadata[k] = string(v) + } + } + fi.Erasure.Algorithm = j.ErasureAlgorithm.String() + fi.Erasure.Index = j.ErasureIndex + fi.Erasure.BlockSize = j.ErasureBlockSize + fi.Erasure.DataBlocks = j.ErasureM + fi.Erasure.ParityBlocks = j.ErasureN + fi.Erasure.Distribution = make([]int, len(j.ErasureDist)) + for i := range j.ErasureDist { + fi.Erasure.Distribution[i] = int(j.ErasureDist[i]) + } + fi.DataDir = uuid.UUID(j.DataDir).String() + return fi, nil +} + +// DeleteVersion deletes the version specified by version id. +// returns to the caller which dataDir to delete, also +// indicates if this is the last version. +func (z *xlMetaV2) DeleteVersion(fi FileInfo) (string, bool, error) { + // This is a situation where versionId is explicitly + // specified as "null", as we do not save "null" + // string it is considered empty. But empty also + // means the version which matches will be purged. + if fi.VersionID == nullVersionID { + fi.VersionID = "" + } + var uv uuid.UUID + if fi.VersionID != "" { + uv, _ = uuid.Parse(fi.VersionID) + } + for i, version := range z.Versions { + if !version.Valid() { + return "", false, errFileCorrupt + } + switch version.Type { + case LegacyType: + if version.ObjectV1.VersionID == fi.VersionID { + z.Versions = append(z.Versions[:i], z.Versions[i+1:]...) + return version.ObjectV1.DataDir, len(z.Versions) == 0, nil + } + case ObjectType: + if bytes.Equal(version.ObjectV2.VersionID[:], uv[:]) { + z.Versions = append(z.Versions[:i], z.Versions[i+1:]...) + return uuid.UUID(version.ObjectV2.DataDir).String(), len(z.Versions) == 0, nil + } + case DeleteType: + if bytes.Equal(version.DeleteMarker.VersionID[:], uv[:]) { + z.Versions = append(z.Versions[:i], z.Versions[i+1:]...) + return "", len(z.Versions) == 0, nil + } + } + } + return "", false, errFileVersionNotFound +} + +// TotalSize returns the total size of all versions. +func (z xlMetaV2) TotalSize() int64 { + var total int64 + for i := range z.Versions { + switch z.Versions[i].Type { + case ObjectType: + total += z.Versions[i].ObjectV2.StatSize + case LegacyType: + total += z.Versions[i].ObjectV1.Stat.Size + } + } + return total +} + +// ListVersions lists current versions, and current deleted +// versions returns error for unexpected entries. +func (z xlMetaV2) ListVersions(volume, path string) (versions []FileInfo, deleted []FileInfo, modTime time.Time, err error) { + var latestModTime time.Time + var latestVersionID string + for _, version := range z.Versions { + if !version.Valid() { + return nil, nil, latestModTime, errFileCorrupt + } + var fi FileInfo + switch version.Type { + case ObjectType: + fi, err = version.ObjectV2.ToFileInfo(volume, path) + case DeleteType: + fi, err = version.DeleteMarker.ToFileInfo(volume, path) + case LegacyType: + fi, err = version.ObjectV1.ToFileInfo(volume, path) + default: + continue + } + if err != nil { + return nil, nil, latestModTime, err + } + if fi.ModTime.After(latestModTime) { + latestModTime = fi.ModTime + latestVersionID = fi.VersionID + } + switch version.Type { + case LegacyType: + fallthrough + case ObjectType: + versions = append(versions, fi) + case DeleteType: + deleted = append(deleted, fi) + + } + } + + // Since we can never have duplicate versions the versionID + // if it matches first with deleted markers then we are sure + // that actual versions wouldn't be latest, so we can return + // early if we find the version in delete markers. + for i := range deleted { + if deleted[i].VersionID == latestVersionID { + deleted[i].IsLatest = true + return versions, deleted, latestModTime, nil + } + } + // We didn't find the version in delete markers so latest version + // is indeed one of the actual version of the object with data. + for i := range versions { + if versions[i].VersionID != latestVersionID { + continue + } + versions[i].IsLatest = true + break + } + return versions, deleted, latestModTime, nil +} + +// ToFileInfo converts xlMetaV2 into a common FileInfo datastructure +// for consumption across callers. +func (z xlMetaV2) ToFileInfo(volume, path, versionID string) (FileInfo, error) { + var uv uuid.UUID + if versionID != "" { + uv, _ = uuid.Parse(versionID) + } + + if versionID == "" { + var latestModTime time.Time + var latestIndex int + for i, version := range z.Versions { + if !version.Valid() { + logger.LogIf(GlobalContext, fmt.Errorf("invalid version detected %#v", version)) + return FileInfo{}, errFileNotFound + } + var modTime time.Time + switch version.Type { + case ObjectType: + modTime = time.Unix(version.ObjectV2.StatModTime, 0) + case DeleteType: + modTime = time.Unix(version.DeleteMarker.ModTime, 0) + case LegacyType: + modTime = version.ObjectV1.Stat.ModTime + default: + continue + } + if modTime.After(latestModTime) { + latestModTime = modTime + latestIndex = i + } + } + if len(z.Versions) >= 1 { + switch z.Versions[latestIndex].Type { + case ObjectType: + return z.Versions[latestIndex].ObjectV2.ToFileInfo(volume, path) + case DeleteType: + return z.Versions[latestIndex].DeleteMarker.ToFileInfo(volume, path) + case LegacyType: + return z.Versions[latestIndex].ObjectV1.ToFileInfo(volume, path) + } + } + return FileInfo{}, errFileNotFound + } + + for _, version := range z.Versions { + if !version.Valid() { + logger.LogIf(GlobalContext, fmt.Errorf("invalid version detected %#v", version)) + if versionID == "" { + return FileInfo{}, errFileNotFound + } + return FileInfo{}, errFileVersionNotFound + } + switch version.Type { + case ObjectType: + if bytes.Equal(version.ObjectV2.VersionID[:], uv[:]) { + return version.ObjectV2.ToFileInfo(volume, path) + } + case LegacyType: + if version.ObjectV1.VersionID == versionID { + return version.ObjectV1.ToFileInfo(volume, path) + } + case DeleteType: + if bytes.Equal(version.DeleteMarker.VersionID[:], uv[:]) { + return version.DeleteMarker.ToFileInfo(volume, path) + } + default: + logger.LogIf(GlobalContext, fmt.Errorf("unknown version type: %v", version.Type)) + if versionID == "" { + return FileInfo{}, errFileNotFound + } + + return FileInfo{}, errFileVersionNotFound + } + } + + if versionID == "" { + return FileInfo{}, errFileNotFound + } + + return FileInfo{}, errFileVersionNotFound +} diff --git a/cmd/xl-storage-format-v2_gen.go b/cmd/xl-storage-format-v2_gen.go new file mode 100644 index 000000000..da111d395 --- /dev/null +++ b/cmd/xl-storage-format-v2_gen.go @@ -0,0 +1,1780 @@ +package cmd + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *ChecksumAlgo) DecodeMsg(dc *msgp.Reader) (err error) { + { + var zb0001 uint8 + zb0001, err = dc.ReadUint8() + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = ChecksumAlgo(zb0001) + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z ChecksumAlgo) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteUint8(uint8(z)) + if err != nil { + err = msgp.WrapError(err) + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z ChecksumAlgo) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendUint8(o, uint8(z)) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ChecksumAlgo) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var zb0001 uint8 + zb0001, bts, err = msgp.ReadUint8Bytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = ChecksumAlgo(zb0001) + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z ChecksumAlgo) Msgsize() (s int) { + s = msgp.Uint8Size + return +} + +// DecodeMsg implements msgp.Decodable +func (z *ErasureAlgo) DecodeMsg(dc *msgp.Reader) (err error) { + { + var zb0001 uint8 + zb0001, err = dc.ReadUint8() + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = ErasureAlgo(zb0001) + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z ErasureAlgo) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteUint8(uint8(z)) + if err != nil { + err = msgp.WrapError(err) + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z ErasureAlgo) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendUint8(o, uint8(z)) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ErasureAlgo) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var zb0001 uint8 + zb0001, bts, err = msgp.ReadUint8Bytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = ErasureAlgo(zb0001) + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z ErasureAlgo) Msgsize() (s int) { + s = msgp.Uint8Size + return +} + +// DecodeMsg implements msgp.Decodable +func (z *VersionType) DecodeMsg(dc *msgp.Reader) (err error) { + { + var zb0001 uint8 + zb0001, err = dc.ReadUint8() + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = VersionType(zb0001) + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z VersionType) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteUint8(uint8(z)) + if err != nil { + err = msgp.WrapError(err) + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z VersionType) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendUint8(o, uint8(z)) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *VersionType) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var zb0001 uint8 + zb0001, bts, err = msgp.ReadUint8Bytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = VersionType(zb0001) + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z VersionType) Msgsize() (s int) { + s = msgp.Uint8Size + return +} + +// DecodeMsg implements msgp.Decodable +func (z *xlMetaV2) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Versions": + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Versions") + return + } + if cap(z.Versions) >= int(zb0002) { + z.Versions = (z.Versions)[:zb0002] + } else { + z.Versions = make([]xlMetaV2Version, zb0002) + } + for za0001 := range z.Versions { + err = z.Versions[za0001].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Versions", za0001) + return + } + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *xlMetaV2) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 1 + // write "Versions" + err = en.Append(0x81, 0xa8, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Versions))) + if err != nil { + err = msgp.WrapError(err, "Versions") + return + } + for za0001 := range z.Versions { + err = z.Versions[za0001].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Versions", za0001) + return + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *xlMetaV2) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 1 + // string "Versions" + o = append(o, 0x81, 0xa8, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Versions))) + for za0001 := range z.Versions { + o, err = z.Versions[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Versions", za0001) + return + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *xlMetaV2) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Versions": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Versions") + return + } + if cap(z.Versions) >= int(zb0002) { + z.Versions = (z.Versions)[:zb0002] + } else { + z.Versions = make([]xlMetaV2Version, zb0002) + } + for za0001 := range z.Versions { + bts, err = z.Versions[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Versions", za0001) + return + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *xlMetaV2) Msgsize() (s int) { + s = 1 + 9 + msgp.ArrayHeaderSize + for za0001 := range z.Versions { + s += z.Versions[za0001].Msgsize() + } + return +} + +// DecodeMsg implements msgp.Decodable +func (z *xlMetaV2DeleteMarker) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "ID": + err = dc.ReadExactBytes((z.VersionID)[:]) + if err != nil { + err = msgp.WrapError(err, "VersionID") + return + } + case "MTime": + z.ModTime, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "ModTime") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *xlMetaV2DeleteMarker) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 2 + // write "ID" + err = en.Append(0x82, 0xa2, 0x49, 0x44) + if err != nil { + return + } + err = en.WriteBytes((z.VersionID)[:]) + if err != nil { + err = msgp.WrapError(err, "VersionID") + return + } + // write "MTime" + err = en.Append(0xa5, 0x4d, 0x54, 0x69, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.ModTime) + if err != nil { + err = msgp.WrapError(err, "ModTime") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *xlMetaV2DeleteMarker) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 2 + // string "ID" + o = append(o, 0x82, 0xa2, 0x49, 0x44) + o = msgp.AppendBytes(o, (z.VersionID)[:]) + // string "MTime" + o = append(o, 0xa5, 0x4d, 0x54, 0x69, 0x6d, 0x65) + o = msgp.AppendInt64(o, z.ModTime) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *xlMetaV2DeleteMarker) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "ID": + bts, err = msgp.ReadExactBytes(bts, (z.VersionID)[:]) + if err != nil { + err = msgp.WrapError(err, "VersionID") + return + } + case "MTime": + z.ModTime, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ModTime") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *xlMetaV2DeleteMarker) Msgsize() (s int) { + s = 1 + 3 + msgp.ArrayHeaderSize + (16 * (msgp.ByteSize)) + 6 + msgp.Int64Size + return +} + +// DecodeMsg implements msgp.Decodable +func (z *xlMetaV2Object) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "ID": + err = dc.ReadExactBytes((z.VersionID)[:]) + if err != nil { + err = msgp.WrapError(err, "VersionID") + return + } + case "DDir": + err = dc.ReadExactBytes((z.DataDir)[:]) + if err != nil { + err = msgp.WrapError(err, "DataDir") + return + } + case "EcAlgo": + { + var zb0002 uint8 + zb0002, err = dc.ReadUint8() + if err != nil { + err = msgp.WrapError(err, "ErasureAlgorithm") + return + } + z.ErasureAlgorithm = ErasureAlgo(zb0002) + } + case "EcM": + z.ErasureM, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "ErasureM") + return + } + case "EcN": + z.ErasureN, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "ErasureN") + return + } + case "EcBSize": + z.ErasureBlockSize, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "ErasureBlockSize") + return + } + case "EcIndex": + z.ErasureIndex, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "ErasureIndex") + return + } + case "EcDist": + var zb0003 uint32 + zb0003, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "ErasureDist") + return + } + if cap(z.ErasureDist) >= int(zb0003) { + z.ErasureDist = (z.ErasureDist)[:zb0003] + } else { + z.ErasureDist = make([]uint8, zb0003) + } + for za0003 := range z.ErasureDist { + z.ErasureDist[za0003], err = dc.ReadUint8() + if err != nil { + err = msgp.WrapError(err, "ErasureDist", za0003) + return + } + } + case "CSumAlgo": + { + var zb0004 uint8 + zb0004, err = dc.ReadUint8() + if err != nil { + err = msgp.WrapError(err, "BitrotChecksumAlgo") + return + } + z.BitrotChecksumAlgo = ChecksumAlgo(zb0004) + } + case "PartNums": + var zb0005 uint32 + zb0005, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "PartNumbers") + return + } + if cap(z.PartNumbers) >= int(zb0005) { + z.PartNumbers = (z.PartNumbers)[:zb0005] + } else { + z.PartNumbers = make([]int, zb0005) + } + for za0004 := range z.PartNumbers { + z.PartNumbers[za0004], err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "PartNumbers", za0004) + return + } + } + case "PartETags": + var zb0006 uint32 + zb0006, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "PartETags") + return + } + if cap(z.PartETags) >= int(zb0006) { + z.PartETags = (z.PartETags)[:zb0006] + } else { + z.PartETags = make([]string, zb0006) + } + for za0005 := range z.PartETags { + z.PartETags[za0005], err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "PartETags", za0005) + return + } + } + case "PartSizes": + var zb0007 uint32 + zb0007, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "PartSizes") + return + } + if cap(z.PartSizes) >= int(zb0007) { + z.PartSizes = (z.PartSizes)[:zb0007] + } else { + z.PartSizes = make([]int64, zb0007) + } + for za0006 := range z.PartSizes { + z.PartSizes[za0006], err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "PartSizes", za0006) + return + } + } + case "PartASizes": + var zb0008 uint32 + zb0008, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "PartActualSizes") + return + } + if cap(z.PartActualSizes) >= int(zb0008) { + z.PartActualSizes = (z.PartActualSizes)[:zb0008] + } else { + z.PartActualSizes = make([]int64, zb0008) + } + for za0007 := range z.PartActualSizes { + z.PartActualSizes[za0007], err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "PartActualSizes", za0007) + return + } + } + case "Size": + z.StatSize, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "StatSize") + return + } + case "MTime": + z.StatModTime, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "StatModTime") + return + } + case "MetaSys": + var zb0009 uint32 + zb0009, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "MetaSys") + return + } + if z.MetaSys == nil { + z.MetaSys = make(map[string][]byte, zb0009) + } else if len(z.MetaSys) > 0 { + for key := range z.MetaSys { + delete(z.MetaSys, key) + } + } + for zb0009 > 0 { + zb0009-- + var za0008 string + var za0009 []byte + za0008, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "MetaSys") + return + } + za0009, err = dc.ReadBytes(za0009) + if err != nil { + err = msgp.WrapError(err, "MetaSys", za0008) + return + } + z.MetaSys[za0008] = za0009 + } + case "MetaUsr": + var zb0010 uint32 + zb0010, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "MetaUser") + return + } + if z.MetaUser == nil { + z.MetaUser = make(map[string]string, zb0010) + } else if len(z.MetaUser) > 0 { + for key := range z.MetaUser { + delete(z.MetaUser, key) + } + } + for zb0010 > 0 { + zb0010-- + var za0010 string + var za0011 string + za0010, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "MetaUser") + return + } + za0011, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "MetaUser", za0010) + return + } + z.MetaUser[za0010] = za0011 + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) { + // omitempty: check for empty values + zb0001Len := uint32(17) + var zb0001Mask uint32 /* 17 bits */ + if z.PartActualSizes == nil { + zb0001Len-- + zb0001Mask |= 0x1000 + } + if z.MetaSys == nil { + zb0001Len-- + zb0001Mask |= 0x8000 + } + if z.MetaUser == nil { + zb0001Len-- + zb0001Mask |= 0x10000 + } + // variable map header, size zb0001Len + err = en.WriteMapHeader(zb0001Len) + if err != nil { + return + } + if zb0001Len == 0 { + return + } + // write "ID" + err = en.Append(0xa2, 0x49, 0x44) + if err != nil { + return + } + err = en.WriteBytes((z.VersionID)[:]) + if err != nil { + err = msgp.WrapError(err, "VersionID") + return + } + // write "DDir" + err = en.Append(0xa4, 0x44, 0x44, 0x69, 0x72) + if err != nil { + return + } + err = en.WriteBytes((z.DataDir)[:]) + if err != nil { + err = msgp.WrapError(err, "DataDir") + return + } + // write "EcAlgo" + err = en.Append(0xa6, 0x45, 0x63, 0x41, 0x6c, 0x67, 0x6f) + if err != nil { + return + } + err = en.WriteUint8(uint8(z.ErasureAlgorithm)) + if err != nil { + err = msgp.WrapError(err, "ErasureAlgorithm") + return + } + // write "EcM" + err = en.Append(0xa3, 0x45, 0x63, 0x4d) + if err != nil { + return + } + err = en.WriteInt(z.ErasureM) + if err != nil { + err = msgp.WrapError(err, "ErasureM") + return + } + // write "EcN" + err = en.Append(0xa3, 0x45, 0x63, 0x4e) + if err != nil { + return + } + err = en.WriteInt(z.ErasureN) + if err != nil { + err = msgp.WrapError(err, "ErasureN") + return + } + // write "EcBSize" + err = en.Append(0xa7, 0x45, 0x63, 0x42, 0x53, 0x69, 0x7a, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.ErasureBlockSize) + if err != nil { + err = msgp.WrapError(err, "ErasureBlockSize") + return + } + // write "EcIndex" + err = en.Append(0xa7, 0x45, 0x63, 0x49, 0x6e, 0x64, 0x65, 0x78) + if err != nil { + return + } + err = en.WriteInt(z.ErasureIndex) + if err != nil { + err = msgp.WrapError(err, "ErasureIndex") + return + } + // write "EcDist" + err = en.Append(0xa6, 0x45, 0x63, 0x44, 0x69, 0x73, 0x74) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.ErasureDist))) + if err != nil { + err = msgp.WrapError(err, "ErasureDist") + return + } + for za0003 := range z.ErasureDist { + err = en.WriteUint8(z.ErasureDist[za0003]) + if err != nil { + err = msgp.WrapError(err, "ErasureDist", za0003) + return + } + } + // write "CSumAlgo" + err = en.Append(0xa8, 0x43, 0x53, 0x75, 0x6d, 0x41, 0x6c, 0x67, 0x6f) + if err != nil { + return + } + err = en.WriteUint8(uint8(z.BitrotChecksumAlgo)) + if err != nil { + err = msgp.WrapError(err, "BitrotChecksumAlgo") + return + } + // write "PartNums" + err = en.Append(0xa8, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.PartNumbers))) + if err != nil { + err = msgp.WrapError(err, "PartNumbers") + return + } + for za0004 := range z.PartNumbers { + err = en.WriteInt(z.PartNumbers[za0004]) + if err != nil { + err = msgp.WrapError(err, "PartNumbers", za0004) + return + } + } + // write "PartETags" + err = en.Append(0xa9, 0x50, 0x61, 0x72, 0x74, 0x45, 0x54, 0x61, 0x67, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.PartETags))) + if err != nil { + err = msgp.WrapError(err, "PartETags") + return + } + for za0005 := range z.PartETags { + err = en.WriteString(z.PartETags[za0005]) + if err != nil { + err = msgp.WrapError(err, "PartETags", za0005) + return + } + } + // write "PartSizes" + err = en.Append(0xa9, 0x50, 0x61, 0x72, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.PartSizes))) + if err != nil { + err = msgp.WrapError(err, "PartSizes") + return + } + for za0006 := range z.PartSizes { + err = en.WriteInt64(z.PartSizes[za0006]) + if err != nil { + err = msgp.WrapError(err, "PartSizes", za0006) + return + } + } + if (zb0001Mask & 0x1000) == 0 { // if not empty + // write "PartASizes" + err = en.Append(0xaa, 0x50, 0x61, 0x72, 0x74, 0x41, 0x53, 0x69, 0x7a, 0x65, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.PartActualSizes))) + if err != nil { + err = msgp.WrapError(err, "PartActualSizes") + return + } + for za0007 := range z.PartActualSizes { + err = en.WriteInt64(z.PartActualSizes[za0007]) + if err != nil { + err = msgp.WrapError(err, "PartActualSizes", za0007) + return + } + } + } + // write "Size" + err = en.Append(0xa4, 0x53, 0x69, 0x7a, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.StatSize) + if err != nil { + err = msgp.WrapError(err, "StatSize") + return + } + // write "MTime" + err = en.Append(0xa5, 0x4d, 0x54, 0x69, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.StatModTime) + if err != nil { + err = msgp.WrapError(err, "StatModTime") + return + } + if (zb0001Mask & 0x8000) == 0 { // if not empty + // write "MetaSys" + err = en.Append(0xa7, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x79, 0x73) + if err != nil { + return + } + err = en.WriteMapHeader(uint32(len(z.MetaSys))) + if err != nil { + err = msgp.WrapError(err, "MetaSys") + return + } + for za0008, za0009 := range z.MetaSys { + err = en.WriteString(za0008) + if err != nil { + err = msgp.WrapError(err, "MetaSys") + return + } + err = en.WriteBytes(za0009) + if err != nil { + err = msgp.WrapError(err, "MetaSys", za0008) + return + } + } + } + if (zb0001Mask & 0x10000) == 0 { // if not empty + // write "MetaUsr" + err = en.Append(0xa7, 0x4d, 0x65, 0x74, 0x61, 0x55, 0x73, 0x72) + if err != nil { + return + } + err = en.WriteMapHeader(uint32(len(z.MetaUser))) + if err != nil { + err = msgp.WrapError(err, "MetaUser") + return + } + for za0010, za0011 := range z.MetaUser { + err = en.WriteString(za0010) + if err != nil { + err = msgp.WrapError(err, "MetaUser") + return + } + err = en.WriteString(za0011) + if err != nil { + err = msgp.WrapError(err, "MetaUser", za0010) + return + } + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *xlMetaV2Object) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(17) + var zb0001Mask uint32 /* 17 bits */ + if z.PartActualSizes == nil { + zb0001Len-- + zb0001Mask |= 0x1000 + } + if z.MetaSys == nil { + zb0001Len-- + zb0001Mask |= 0x8000 + } + if z.MetaUser == nil { + zb0001Len-- + zb0001Mask |= 0x10000 + } + // variable map header, size zb0001Len + o = msgp.AppendMapHeader(o, zb0001Len) + if zb0001Len == 0 { + return + } + // string "ID" + o = append(o, 0xa2, 0x49, 0x44) + o = msgp.AppendBytes(o, (z.VersionID)[:]) + // string "DDir" + o = append(o, 0xa4, 0x44, 0x44, 0x69, 0x72) + o = msgp.AppendBytes(o, (z.DataDir)[:]) + // string "EcAlgo" + o = append(o, 0xa6, 0x45, 0x63, 0x41, 0x6c, 0x67, 0x6f) + o = msgp.AppendUint8(o, uint8(z.ErasureAlgorithm)) + // string "EcM" + o = append(o, 0xa3, 0x45, 0x63, 0x4d) + o = msgp.AppendInt(o, z.ErasureM) + // string "EcN" + o = append(o, 0xa3, 0x45, 0x63, 0x4e) + o = msgp.AppendInt(o, z.ErasureN) + // string "EcBSize" + o = append(o, 0xa7, 0x45, 0x63, 0x42, 0x53, 0x69, 0x7a, 0x65) + o = msgp.AppendInt64(o, z.ErasureBlockSize) + // string "EcIndex" + o = append(o, 0xa7, 0x45, 0x63, 0x49, 0x6e, 0x64, 0x65, 0x78) + o = msgp.AppendInt(o, z.ErasureIndex) + // string "EcDist" + o = append(o, 0xa6, 0x45, 0x63, 0x44, 0x69, 0x73, 0x74) + o = msgp.AppendArrayHeader(o, uint32(len(z.ErasureDist))) + for za0003 := range z.ErasureDist { + o = msgp.AppendUint8(o, z.ErasureDist[za0003]) + } + // string "CSumAlgo" + o = append(o, 0xa8, 0x43, 0x53, 0x75, 0x6d, 0x41, 0x6c, 0x67, 0x6f) + o = msgp.AppendUint8(o, uint8(z.BitrotChecksumAlgo)) + // string "PartNums" + o = append(o, 0xa8, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.PartNumbers))) + for za0004 := range z.PartNumbers { + o = msgp.AppendInt(o, z.PartNumbers[za0004]) + } + // string "PartETags" + o = append(o, 0xa9, 0x50, 0x61, 0x72, 0x74, 0x45, 0x54, 0x61, 0x67, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.PartETags))) + for za0005 := range z.PartETags { + o = msgp.AppendString(o, z.PartETags[za0005]) + } + // string "PartSizes" + o = append(o, 0xa9, 0x50, 0x61, 0x72, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.PartSizes))) + for za0006 := range z.PartSizes { + o = msgp.AppendInt64(o, z.PartSizes[za0006]) + } + if (zb0001Mask & 0x1000) == 0 { // if not empty + // string "PartASizes" + o = append(o, 0xaa, 0x50, 0x61, 0x72, 0x74, 0x41, 0x53, 0x69, 0x7a, 0x65, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.PartActualSizes))) + for za0007 := range z.PartActualSizes { + o = msgp.AppendInt64(o, z.PartActualSizes[za0007]) + } + } + // string "Size" + o = append(o, 0xa4, 0x53, 0x69, 0x7a, 0x65) + o = msgp.AppendInt64(o, z.StatSize) + // string "MTime" + o = append(o, 0xa5, 0x4d, 0x54, 0x69, 0x6d, 0x65) + o = msgp.AppendInt64(o, z.StatModTime) + if (zb0001Mask & 0x8000) == 0 { // if not empty + // string "MetaSys" + o = append(o, 0xa7, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x79, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.MetaSys))) + for za0008, za0009 := range z.MetaSys { + o = msgp.AppendString(o, za0008) + o = msgp.AppendBytes(o, za0009) + } + } + if (zb0001Mask & 0x10000) == 0 { // if not empty + // string "MetaUsr" + o = append(o, 0xa7, 0x4d, 0x65, 0x74, 0x61, 0x55, 0x73, 0x72) + o = msgp.AppendMapHeader(o, uint32(len(z.MetaUser))) + for za0010, za0011 := range z.MetaUser { + o = msgp.AppendString(o, za0010) + o = msgp.AppendString(o, za0011) + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *xlMetaV2Object) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "ID": + bts, err = msgp.ReadExactBytes(bts, (z.VersionID)[:]) + if err != nil { + err = msgp.WrapError(err, "VersionID") + return + } + case "DDir": + bts, err = msgp.ReadExactBytes(bts, (z.DataDir)[:]) + if err != nil { + err = msgp.WrapError(err, "DataDir") + return + } + case "EcAlgo": + { + var zb0002 uint8 + zb0002, bts, err = msgp.ReadUint8Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ErasureAlgorithm") + return + } + z.ErasureAlgorithm = ErasureAlgo(zb0002) + } + case "EcM": + z.ErasureM, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ErasureM") + return + } + case "EcN": + z.ErasureN, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ErasureN") + return + } + case "EcBSize": + z.ErasureBlockSize, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ErasureBlockSize") + return + } + case "EcIndex": + z.ErasureIndex, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ErasureIndex") + return + } + case "EcDist": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ErasureDist") + return + } + if cap(z.ErasureDist) >= int(zb0003) { + z.ErasureDist = (z.ErasureDist)[:zb0003] + } else { + z.ErasureDist = make([]uint8, zb0003) + } + for za0003 := range z.ErasureDist { + z.ErasureDist[za0003], bts, err = msgp.ReadUint8Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ErasureDist", za0003) + return + } + } + case "CSumAlgo": + { + var zb0004 uint8 + zb0004, bts, err = msgp.ReadUint8Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "BitrotChecksumAlgo") + return + } + z.BitrotChecksumAlgo = ChecksumAlgo(zb0004) + } + case "PartNums": + var zb0005 uint32 + zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PartNumbers") + return + } + if cap(z.PartNumbers) >= int(zb0005) { + z.PartNumbers = (z.PartNumbers)[:zb0005] + } else { + z.PartNumbers = make([]int, zb0005) + } + for za0004 := range z.PartNumbers { + z.PartNumbers[za0004], bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PartNumbers", za0004) + return + } + } + case "PartETags": + var zb0006 uint32 + zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PartETags") + return + } + if cap(z.PartETags) >= int(zb0006) { + z.PartETags = (z.PartETags)[:zb0006] + } else { + z.PartETags = make([]string, zb0006) + } + for za0005 := range z.PartETags { + z.PartETags[za0005], bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PartETags", za0005) + return + } + } + case "PartSizes": + var zb0007 uint32 + zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PartSizes") + return + } + if cap(z.PartSizes) >= int(zb0007) { + z.PartSizes = (z.PartSizes)[:zb0007] + } else { + z.PartSizes = make([]int64, zb0007) + } + for za0006 := range z.PartSizes { + z.PartSizes[za0006], bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "PartSizes", za0006) + return + } + } + case "PartASizes": + var zb0008 uint32 + zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PartActualSizes") + return + } + if cap(z.PartActualSizes) >= int(zb0008) { + z.PartActualSizes = (z.PartActualSizes)[:zb0008] + } else { + z.PartActualSizes = make([]int64, zb0008) + } + for za0007 := range z.PartActualSizes { + z.PartActualSizes[za0007], bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "PartActualSizes", za0007) + return + } + } + case "Size": + z.StatSize, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "StatSize") + return + } + case "MTime": + z.StatModTime, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "StatModTime") + return + } + case "MetaSys": + var zb0009 uint32 + zb0009, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MetaSys") + return + } + if z.MetaSys == nil { + z.MetaSys = make(map[string][]byte, zb0009) + } else if len(z.MetaSys) > 0 { + for key := range z.MetaSys { + delete(z.MetaSys, key) + } + } + for zb0009 > 0 { + var za0008 string + var za0009 []byte + zb0009-- + za0008, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MetaSys") + return + } + za0009, bts, err = msgp.ReadBytesBytes(bts, za0009) + if err != nil { + err = msgp.WrapError(err, "MetaSys", za0008) + return + } + z.MetaSys[za0008] = za0009 + } + case "MetaUsr": + var zb0010 uint32 + zb0010, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MetaUser") + return + } + if z.MetaUser == nil { + z.MetaUser = make(map[string]string, zb0010) + } else if len(z.MetaUser) > 0 { + for key := range z.MetaUser { + delete(z.MetaUser, key) + } + } + for zb0010 > 0 { + var za0010 string + var za0011 string + zb0010-- + za0010, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MetaUser") + return + } + za0011, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MetaUser", za0010) + return + } + z.MetaUser[za0010] = za0011 + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *xlMetaV2Object) Msgsize() (s int) { + s = 3 + 3 + msgp.ArrayHeaderSize + (16 * (msgp.ByteSize)) + 5 + msgp.ArrayHeaderSize + (16 * (msgp.ByteSize)) + 7 + msgp.Uint8Size + 4 + msgp.IntSize + 4 + msgp.IntSize + 8 + msgp.Int64Size + 8 + msgp.IntSize + 7 + msgp.ArrayHeaderSize + (len(z.ErasureDist) * (msgp.Uint8Size)) + 9 + msgp.Uint8Size + 9 + msgp.ArrayHeaderSize + (len(z.PartNumbers) * (msgp.IntSize)) + 10 + msgp.ArrayHeaderSize + for za0005 := range z.PartETags { + s += msgp.StringPrefixSize + len(z.PartETags[za0005]) + } + s += 10 + msgp.ArrayHeaderSize + (len(z.PartSizes) * (msgp.Int64Size)) + 11 + msgp.ArrayHeaderSize + (len(z.PartActualSizes) * (msgp.Int64Size)) + 5 + msgp.Int64Size + 6 + msgp.Int64Size + 8 + msgp.MapHeaderSize + if z.MetaSys != nil { + for za0008, za0009 := range z.MetaSys { + _ = za0009 + s += msgp.StringPrefixSize + len(za0008) + msgp.BytesPrefixSize + len(za0009) + } + } + s += 8 + msgp.MapHeaderSize + if z.MetaUser != nil { + for za0010, za0011 := range z.MetaUser { + _ = za0011 + s += msgp.StringPrefixSize + len(za0010) + msgp.StringPrefixSize + len(za0011) + } + } + return +} + +// DecodeMsg implements msgp.Decodable +func (z *xlMetaV2Version) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Type": + { + var zb0002 uint8 + zb0002, err = dc.ReadUint8() + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + z.Type = VersionType(zb0002) + } + case "V1Obj": + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "ObjectV1") + return + } + z.ObjectV1 = nil + } else { + if z.ObjectV1 == nil { + z.ObjectV1 = new(xlMetaV1Object) + } + err = z.ObjectV1.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "ObjectV1") + return + } + } + case "V2Obj": + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "ObjectV2") + return + } + z.ObjectV2 = nil + } else { + if z.ObjectV2 == nil { + z.ObjectV2 = new(xlMetaV2Object) + } + err = z.ObjectV2.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "ObjectV2") + return + } + } + case "DelObj": + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "DeleteMarker") + return + } + z.DeleteMarker = nil + } else { + if z.DeleteMarker == nil { + z.DeleteMarker = new(xlMetaV2DeleteMarker) + } + var zb0003 uint32 + zb0003, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "DeleteMarker") + return + } + for zb0003 > 0 { + zb0003-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "DeleteMarker") + return + } + switch msgp.UnsafeString(field) { + case "ID": + err = dc.ReadExactBytes((z.DeleteMarker.VersionID)[:]) + if err != nil { + err = msgp.WrapError(err, "DeleteMarker", "VersionID") + return + } + case "MTime": + z.DeleteMarker.ModTime, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "DeleteMarker", "ModTime") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "DeleteMarker") + return + } + } + } + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *xlMetaV2Version) EncodeMsg(en *msgp.Writer) (err error) { + // omitempty: check for empty values + zb0001Len := uint32(4) + var zb0001Mask uint8 /* 4 bits */ + if z.ObjectV1 == nil { + zb0001Len-- + zb0001Mask |= 0x2 + } + if z.ObjectV2 == nil { + zb0001Len-- + zb0001Mask |= 0x4 + } + if z.DeleteMarker == nil { + zb0001Len-- + zb0001Mask |= 0x8 + } + // variable map header, size zb0001Len + err = en.Append(0x80 | uint8(zb0001Len)) + if err != nil { + return + } + if zb0001Len == 0 { + return + } + // write "Type" + err = en.Append(0xa4, 0x54, 0x79, 0x70, 0x65) + if err != nil { + return + } + err = en.WriteUint8(uint8(z.Type)) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + if (zb0001Mask & 0x2) == 0 { // if not empty + // write "V1Obj" + err = en.Append(0xa5, 0x56, 0x31, 0x4f, 0x62, 0x6a) + if err != nil { + return + } + if z.ObjectV1 == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.ObjectV1.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "ObjectV1") + return + } + } + } + if (zb0001Mask & 0x4) == 0 { // if not empty + // write "V2Obj" + err = en.Append(0xa5, 0x56, 0x32, 0x4f, 0x62, 0x6a) + if err != nil { + return + } + if z.ObjectV2 == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.ObjectV2.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "ObjectV2") + return + } + } + } + if (zb0001Mask & 0x8) == 0 { // if not empty + // write "DelObj" + err = en.Append(0xa6, 0x44, 0x65, 0x6c, 0x4f, 0x62, 0x6a) + if err != nil { + return + } + if z.DeleteMarker == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + // map header, size 2 + // write "ID" + err = en.Append(0x82, 0xa2, 0x49, 0x44) + if err != nil { + return + } + err = en.WriteBytes((z.DeleteMarker.VersionID)[:]) + if err != nil { + err = msgp.WrapError(err, "DeleteMarker", "VersionID") + return + } + // write "MTime" + err = en.Append(0xa5, 0x4d, 0x54, 0x69, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.DeleteMarker.ModTime) + if err != nil { + err = msgp.WrapError(err, "DeleteMarker", "ModTime") + return + } + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *xlMetaV2Version) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(4) + var zb0001Mask uint8 /* 4 bits */ + if z.ObjectV1 == nil { + zb0001Len-- + zb0001Mask |= 0x2 + } + if z.ObjectV2 == nil { + zb0001Len-- + zb0001Mask |= 0x4 + } + if z.DeleteMarker == nil { + zb0001Len-- + zb0001Mask |= 0x8 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len == 0 { + return + } + // string "Type" + o = append(o, 0xa4, 0x54, 0x79, 0x70, 0x65) + o = msgp.AppendUint8(o, uint8(z.Type)) + if (zb0001Mask & 0x2) == 0 { // if not empty + // string "V1Obj" + o = append(o, 0xa5, 0x56, 0x31, 0x4f, 0x62, 0x6a) + if z.ObjectV1 == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.ObjectV1.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "ObjectV1") + return + } + } + } + if (zb0001Mask & 0x4) == 0 { // if not empty + // string "V2Obj" + o = append(o, 0xa5, 0x56, 0x32, 0x4f, 0x62, 0x6a) + if z.ObjectV2 == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.ObjectV2.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "ObjectV2") + return + } + } + } + if (zb0001Mask & 0x8) == 0 { // if not empty + // string "DelObj" + o = append(o, 0xa6, 0x44, 0x65, 0x6c, 0x4f, 0x62, 0x6a) + if z.DeleteMarker == nil { + o = msgp.AppendNil(o) + } else { + // map header, size 2 + // string "ID" + o = append(o, 0x82, 0xa2, 0x49, 0x44) + o = msgp.AppendBytes(o, (z.DeleteMarker.VersionID)[:]) + // string "MTime" + o = append(o, 0xa5, 0x4d, 0x54, 0x69, 0x6d, 0x65) + o = msgp.AppendInt64(o, z.DeleteMarker.ModTime) + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *xlMetaV2Version) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Type": + { + var zb0002 uint8 + zb0002, bts, err = msgp.ReadUint8Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + z.Type = VersionType(zb0002) + } + case "V1Obj": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.ObjectV1 = nil + } else { + if z.ObjectV1 == nil { + z.ObjectV1 = new(xlMetaV1Object) + } + bts, err = z.ObjectV1.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "ObjectV1") + return + } + } + case "V2Obj": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.ObjectV2 = nil + } else { + if z.ObjectV2 == nil { + z.ObjectV2 = new(xlMetaV2Object) + } + bts, err = z.ObjectV2.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "ObjectV2") + return + } + } + case "DelObj": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.DeleteMarker = nil + } else { + if z.DeleteMarker == nil { + z.DeleteMarker = new(xlMetaV2DeleteMarker) + } + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DeleteMarker") + return + } + for zb0003 > 0 { + zb0003-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "DeleteMarker") + return + } + switch msgp.UnsafeString(field) { + case "ID": + bts, err = msgp.ReadExactBytes(bts, (z.DeleteMarker.VersionID)[:]) + if err != nil { + err = msgp.WrapError(err, "DeleteMarker", "VersionID") + return + } + case "MTime": + z.DeleteMarker.ModTime, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "DeleteMarker", "ModTime") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "DeleteMarker") + return + } + } + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *xlMetaV2Version) Msgsize() (s int) { + s = 1 + 5 + msgp.Uint8Size + 6 + if z.ObjectV1 == nil { + s += msgp.NilSize + } else { + s += z.ObjectV1.Msgsize() + } + s += 6 + if z.ObjectV2 == nil { + s += msgp.NilSize + } else { + s += z.ObjectV2.Msgsize() + } + s += 7 + if z.DeleteMarker == nil { + s += msgp.NilSize + } else { + s += 1 + 3 + msgp.ArrayHeaderSize + (16 * (msgp.ByteSize)) + 6 + msgp.Int64Size + } + return +} diff --git a/cmd/xl-storage-format-v2_gen_test.go b/cmd/xl-storage-format-v2_gen_test.go new file mode 100644 index 000000000..39f03c898 --- /dev/null +++ b/cmd/xl-storage-format-v2_gen_test.go @@ -0,0 +1,462 @@ +package cmd + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "bytes" + "testing" + + "github.com/tinylib/msgp/msgp" +) + +func TestMarshalUnmarshalxlMetaV2(t *testing.T) { + v := xlMetaV2{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgxlMetaV2(b *testing.B) { + v := xlMetaV2{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgxlMetaV2(b *testing.B) { + v := xlMetaV2{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalxlMetaV2(b *testing.B) { + v := xlMetaV2{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodexlMetaV2(t *testing.T) { + v := xlMetaV2{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodexlMetaV2 Msgsize() is inaccurate") + } + + vn := xlMetaV2{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodexlMetaV2(b *testing.B) { + v := xlMetaV2{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodexlMetaV2(b *testing.B) { + v := xlMetaV2{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalxlMetaV2DeleteMarker(t *testing.T) { + v := xlMetaV2DeleteMarker{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgxlMetaV2DeleteMarker(b *testing.B) { + v := xlMetaV2DeleteMarker{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgxlMetaV2DeleteMarker(b *testing.B) { + v := xlMetaV2DeleteMarker{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalxlMetaV2DeleteMarker(b *testing.B) { + v := xlMetaV2DeleteMarker{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodexlMetaV2DeleteMarker(t *testing.T) { + v := xlMetaV2DeleteMarker{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodexlMetaV2DeleteMarker Msgsize() is inaccurate") + } + + vn := xlMetaV2DeleteMarker{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodexlMetaV2DeleteMarker(b *testing.B) { + v := xlMetaV2DeleteMarker{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodexlMetaV2DeleteMarker(b *testing.B) { + v := xlMetaV2DeleteMarker{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalxlMetaV2Object(t *testing.T) { + v := xlMetaV2Object{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgxlMetaV2Object(b *testing.B) { + v := xlMetaV2Object{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgxlMetaV2Object(b *testing.B) { + v := xlMetaV2Object{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalxlMetaV2Object(b *testing.B) { + v := xlMetaV2Object{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodexlMetaV2Object(t *testing.T) { + v := xlMetaV2Object{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodexlMetaV2Object Msgsize() is inaccurate") + } + + vn := xlMetaV2Object{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodexlMetaV2Object(b *testing.B) { + v := xlMetaV2Object{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodexlMetaV2Object(b *testing.B) { + v := xlMetaV2Object{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalxlMetaV2Version(t *testing.T) { + v := xlMetaV2Version{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgxlMetaV2Version(b *testing.B) { + v := xlMetaV2Version{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgxlMetaV2Version(b *testing.B) { + v := xlMetaV2Version{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalxlMetaV2Version(b *testing.B) { + v := xlMetaV2Version{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodexlMetaV2Version(t *testing.T) { + v := xlMetaV2Version{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodexlMetaV2Version Msgsize() is inaccurate") + } + + vn := xlMetaV2Version{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodexlMetaV2Version(b *testing.B) { + v := xlMetaV2Version{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodexlMetaV2Version(b *testing.B) { + v := xlMetaV2Version{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/cmd/xl-v1-utils_test.go b/cmd/xl-storage-format_test.go similarity index 59% rename from cmd/xl-v1-utils_test.go rename to cmd/xl-storage-format_test.go index 7ab5e0cd1..aa612b6d4 100644 --- a/cmd/xl-v1-utils_test.go +++ b/cmd/xl-storage-format_test.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc. + * MinIO Cloud Storage, (C) 2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,130 +18,60 @@ package cmd import ( "bytes" - "context" "encoding/hex" "encoding/json" - "reflect" "testing" - humanize "github.com/dustin/go-humanize" + "github.com/dustin/go-humanize" + jsoniter "github.com/json-iterator/go" ) -// Tests caclculating disk count. -func TestDiskCount(t *testing.T) { - testCases := []struct { - disks []StorageAPI - diskCount int +func TestIsXLMetaFormatValid(t *testing.T) { + tests := []struct { + name int + version string + format string + want bool }{ - // Test case - 1 - { - disks: []StorageAPI{&posix{}, &posix{}, &posix{}, &posix{}}, - diskCount: 4, - }, - // Test case - 2 - { - disks: []StorageAPI{nil, &posix{}, &posix{}, &posix{}}, - diskCount: 3, - }, + {1, "123", "fs", false}, + {2, "123", xlMetaFormat, false}, + {3, xlMetaVersion100, "test", false}, + {4, xlMetaVersion101, "hello", false}, + {5, xlMetaVersion100, xlMetaFormat, true}, + {6, xlMetaVersion101, xlMetaFormat, true}, } - for i, testCase := range testCases { - cdiskCount := diskCount(testCase.disks) - if cdiskCount != testCase.diskCount { - t.Errorf("Test %d: Expected %d, got %d", i+1, testCase.diskCount, cdiskCount) + for _, tt := range tests { + if got := isXLMetaFormatValid(tt.version, tt.format); got != tt.want { + t.Errorf("Test %d: Expected %v but received %v", tt.name, got, tt.want) } } } -// Test for reduceErrs, reduceErr reduces collection -// of errors into a single maximal error with in the list. -func TestReduceErrs(t *testing.T) { - // List all of all test cases to validate various cases of reduce errors. - testCases := []struct { - errs []error - ignoredErrs []error - err error +func TestIsXLMetaErasureInfoValid(t *testing.T) { + tests := []struct { + name int + data int + parity int + want bool }{ - // Validate if have reduced properly. - {[]error{ - errDiskNotFound, - errDiskNotFound, - errDiskFull, - }, []error{}, errXLReadQuorum}, - // Validate if have no consensus. - {[]error{ - errDiskFull, - errDiskNotFound, - nil, nil, - }, []error{}, errXLReadQuorum}, - // Validate if have consensus and errors ignored. - {[]error{ - errVolumeNotFound, - errVolumeNotFound, - errVolumeNotFound, - errVolumeNotFound, - errVolumeNotFound, - errDiskNotFound, - errDiskNotFound, - }, []error{errDiskNotFound}, errVolumeNotFound}, - {[]error{}, []error{}, errXLReadQuorum}, - {[]error{errFileNotFound, errFileNotFound, errFileNotFound, - errFileNotFound, errFileNotFound, nil, nil, nil, nil, nil}, - nil, nil}, + {1, 5, 6, false}, + {2, 5, 5, true}, + {3, 0, 5, false}, + {4, 5, 0, false}, + {5, 5, 0, false}, + {6, 5, 4, true}, } - // Validates list of all the testcases for returning valid errors. - for i, testCase := range testCases { - gotErr := reduceReadQuorumErrs(GlobalContext, testCase.errs, testCase.ignoredErrs, 5) - if gotErr != testCase.err { - t.Errorf("Test %d : expected %s, got %s", i+1, testCase.err, gotErr) - } - gotNewErr := reduceWriteQuorumErrs(GlobalContext, testCase.errs, testCase.ignoredErrs, 6) - if gotNewErr != errXLWriteQuorum { - t.Errorf("Test %d : expected %s, got %s", i+1, errXLWriteQuorum, gotErr) + for _, tt := range tests { + if got := isXLMetaErasureInfoValid(tt.data, tt.parity); got != tt.want { + t.Errorf("Test %d: Expected %v but received %v", tt.name, got, tt.want) } } } -// TestHashOrder - test order of ints in array -func TestHashOrder(t *testing.T) { - testCases := []struct { - objectName string - hashedOrder []int - }{ - // cases which should pass the test. - // passing in valid object name. - {"object", []int{14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}}, - {"The Shining Script .pdf", []int{16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}}, - {"Cost Benefit Analysis (2009-2010).pptx", []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}}, - {"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", []int{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2}}, - {"SHØRT", []int{11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, - {"There are far too many object names, and far too few bucket names!", []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}}, - {"a/b/c/", []int{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2}}, - {"/a/b/c", []int{6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5}}, - {string([]byte{0xff, 0xfe, 0xfd}), []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}}, - } - - // Tests hashing order to be consistent. - for i, testCase := range testCases { - hashedOrder := hashOrder(testCase.objectName, 16) - if !reflect.DeepEqual(testCase.hashedOrder, hashedOrder) { - t.Errorf("Test case %d: Expected \"%v\" but failed \"%v\"", i+1, testCase.hashedOrder, hashedOrder) - } - } - - // Tests hashing order to fail for when order is '-1'. - if hashedOrder := hashOrder("This will fail", -1); hashedOrder != nil { - t.Errorf("Test: Expect \"nil\" but failed \"%#v\"", hashedOrder) - } - - if hashedOrder := hashOrder("This will fail", 0); hashedOrder != nil { - t.Errorf("Test: Expect \"nil\" but failed \"%#v\"", hashedOrder) - } -} - -// newTestXLMetaV1 - initializes new xlMetaV1, adds version, allocates a fresh erasure info and metadata. -func newTestXLMetaV1() xlMetaV1 { - xlMeta := xlMetaV1{} - xlMeta.Version = xlMetaVersion +// newTestXLMetaV1 - initializes new xlMetaV1Object, adds version, allocates a fresh erasure info and metadata. +func newTestXLMetaV1() xlMetaV1Object { + xlMeta := xlMetaV1Object{} + xlMeta.Version = xlMetaVersion101 xlMeta.Format = xlMetaFormat xlMeta.Minio.Release = "test" xlMeta.Erasure = ErasureInfo{ @@ -152,7 +82,7 @@ func newTestXLMetaV1() xlMetaV1 { Index: 10, Distribution: []int{9, 10, 1, 2, 3, 4, 5, 6, 7, 8}, } - xlMeta.Stat = statInfo{ + xlMeta.Stat = StatInfo{ Size: int64(20), ModTime: UTCNow(), } @@ -163,7 +93,7 @@ func newTestXLMetaV1() xlMetaV1 { return xlMeta } -func (m *xlMetaV1) AddTestObjectCheckSum(partNumber int, algorithm BitrotAlgorithm, hash string) { +func (m *xlMetaV1Object) AddTestObjectCheckSum(partNumber int, algorithm BitrotAlgorithm, hash string) { checksum, err := hex.DecodeString(hash) if err != nil { panic(err) @@ -172,7 +102,7 @@ func (m *xlMetaV1) AddTestObjectCheckSum(partNumber int, algorithm BitrotAlgorit } // AddTestObjectPart - add a new object part in order. -func (m *xlMetaV1) AddTestObjectPart(partNumber int, partSize int64) { +func (m *xlMetaV1Object) AddTestObjectPart(partNumber int, partSize int64) { partInfo := ObjectPartInfo{ Number: partNumber, Size: partSize, @@ -182,7 +112,7 @@ func (m *xlMetaV1) AddTestObjectPart(partNumber int, partSize int64) { m.Parts[partNumber-1] = partInfo } -// Constructs xlMetaV1{} for given number of parts and converts it into bytes. +// Constructs xlMetaV1Object{} for given number of parts and converts it into bytes. func getXLMetaBytes(totalParts int) []byte { xlSampleMeta := getSampleXLMeta(totalParts) xlMetaBytes, err := json.Marshal(xlSampleMeta) @@ -192,15 +122,15 @@ func getXLMetaBytes(totalParts int) []byte { return xlMetaBytes } -// Returns sample xlMetaV1{} for number of parts. -func getSampleXLMeta(totalParts int) xlMetaV1 { +// Returns sample xlMetaV1Object{} for number of parts. +func getSampleXLMeta(totalParts int) xlMetaV1Object { xlMeta := newTestXLMetaV1() // Number of checksum info == total parts. xlMeta.Erasure.Checksums = make([]ChecksumInfo, totalParts) // total number of parts. xlMeta.Parts = make([]ObjectPartInfo, totalParts) for i := 0; i < totalParts; i++ { - // hard coding hash and algo value for the checksum, Since we are benchmarking the parsing of xl.json the magnitude doesn't affect the test, + // hard coding hash and algo value for the checksum, Since we are benchmarking the parsing of xl.meta the magnitude doesn't affect the test, // The magnitude doesn't make a difference, only the size does. xlMeta.AddTestObjectCheckSum(i+1, BLAKE2b512, "a23f5eff248c4372badd9f3b2455a285cd4ca86c3d9a570b091d3fc5cd7ca6d9484bbea3f8c5d8d4f84daae96874419eda578fd736455334afbac2c924b3915a") xlMeta.AddTestObjectPart(i+1, 67108864) @@ -209,8 +139,8 @@ func getSampleXLMeta(totalParts int) xlMetaV1 { } // Compare the unmarshaled XLMetaV1 with the one obtained from jsoniter parsing. -func compareXLMetaV1(t *testing.T, unMarshalXLMeta, jsoniterXLMeta xlMetaV1) { - // Start comparing the fields of xlMetaV1 obtained from jsoniter parsing with one parsed using json unmarshaling. +func compareXLMetaV1(t *testing.T, unMarshalXLMeta, jsoniterXLMeta xlMetaV1Object) { + // Start comparing the fields of xlMetaV1Object obtained from jsoniter parsing with one parsed using json unmarshaling. if unMarshalXLMeta.Version != jsoniterXLMeta.Version { t.Errorf("Expected the Version to be \"%s\", but got \"%s\".", unMarshalXLMeta.Version, jsoniterXLMeta.Version) } @@ -297,13 +227,14 @@ func compareXLMetaV1(t *testing.T, unMarshalXLMeta, jsoniterXLMeta xlMetaV1) { func TestGetXLMetaV1Jsoniter1(t *testing.T) { xlMetaJSON := getXLMetaBytes(1) - var unMarshalXLMeta xlMetaV1 + var unMarshalXLMeta xlMetaV1Object if err := json.Unmarshal(xlMetaJSON, &unMarshalXLMeta); err != nil { t.Errorf("Unmarshalling failed: %v", err) } - jsoniterXLMeta, err := xlMetaV1UnmarshalJSON(GlobalContext, xlMetaJSON) - if err != nil { + var jsoniterXLMeta xlMetaV1Object + var json = jsoniter.ConfigCompatibleWithStandardLibrary + if err := json.Unmarshal(xlMetaJSON, &jsoniterXLMeta); err != nil { t.Errorf("jsoniter parsing of XLMeta failed: %v", err) } compareXLMetaV1(t, unMarshalXLMeta, jsoniterXLMeta) @@ -315,14 +246,17 @@ func TestGetXLMetaV1Jsoniter10(t *testing.T) { xlMetaJSON := getXLMetaBytes(10) - var unMarshalXLMeta xlMetaV1 + var unMarshalXLMeta xlMetaV1Object if err := json.Unmarshal(xlMetaJSON, &unMarshalXLMeta); err != nil { t.Errorf("Unmarshalling failed: %v", err) } - jsoniterXLMeta, err := xlMetaV1UnmarshalJSON(GlobalContext, xlMetaJSON) - if err != nil { + + var jsoniterXLMeta xlMetaV1Object + var json = jsoniter.ConfigCompatibleWithStandardLibrary + if err := json.Unmarshal(xlMetaJSON, &jsoniterXLMeta); err != nil { t.Errorf("jsoniter parsing of XLMeta failed: %v", err) } + compareXLMetaV1(t, unMarshalXLMeta, jsoniterXLMeta) } @@ -382,70 +316,3 @@ func TestGetPartSizeFromIdx(t *testing.T) { } } } - -func TestShuffleDisks(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - nDisks := 16 - disks, err := getRandomDisks(nDisks) - if err != nil { - t.Fatal(err) - } - objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(disks...)) - if err != nil { - removeRoots(disks) - t.Fatal(err) - } - defer removeRoots(disks) - z := objLayer.(*xlZones) - testShuffleDisks(t, z) -} - -// Test shuffleDisks which returns shuffled slice of disks for their actual distribution. -func testShuffleDisks(t *testing.T, z *xlZones) { - disks := z.zones[0].GetDisks(0)() - distribution := []int{16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15} - shuffledDisks := shuffleDisks(disks, distribution) - // From the "distribution" above you can notice that: - // 1st data block is in the 9th disk (i.e distribution index 8) - // 2nd data block is in the 8th disk (i.e distribution index 7) and so on. - if shuffledDisks[0] != disks[8] || - shuffledDisks[1] != disks[7] || - shuffledDisks[2] != disks[9] || - shuffledDisks[3] != disks[6] || - shuffledDisks[4] != disks[10] || - shuffledDisks[5] != disks[5] || - shuffledDisks[6] != disks[11] || - shuffledDisks[7] != disks[4] || - shuffledDisks[8] != disks[12] || - shuffledDisks[9] != disks[3] || - shuffledDisks[10] != disks[13] || - shuffledDisks[11] != disks[2] || - shuffledDisks[12] != disks[14] || - shuffledDisks[13] != disks[1] || - shuffledDisks[14] != disks[15] || - shuffledDisks[15] != disks[0] { - t.Errorf("shuffleDisks returned incorrect order.") - } -} - -// TestEvalDisks tests the behavior of evalDisks -func TestEvalDisks(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - nDisks := 16 - disks, err := getRandomDisks(nDisks) - if err != nil { - t.Fatal(err) - } - objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(disks...)) - if err != nil { - removeRoots(disks) - t.Fatal(err) - } - defer removeRoots(disks) - z := objLayer.(*xlZones) - testShuffleDisks(t, z) -} diff --git a/cmd/posix.go b/cmd/xl-storage.go similarity index 64% rename from cmd/posix.go rename to cmd/xl-storage.go index cd96bdd8a..42fe7c9a7 100644 --- a/cmd/posix.go +++ b/cmd/xl-storage.go @@ -23,6 +23,7 @@ import ( "crypto/rand" "encoding/hex" "errors" + "fmt" "io" "io/ioutil" "os" @@ -46,6 +47,7 @@ import ( ) const ( + nullVersionID = "null" diskMinFreeSpace = 900 * humanize.MiByte // Min 900MiB free space. diskMinTotalSpace = diskMinFreeSpace // Min 900MiB total space. readBlockSize = 4 * humanize.MiByte // Default read block size 4MiB. @@ -60,6 +62,9 @@ const ( // Wait interval to check if active IO count is low // to proceed crawling to compute data usage lowActiveIOWaitTick = 100 * time.Millisecond + + // XL metadata file carries per object metadata. + xlStorageFormatFile = "xl.meta" ) // isValidVolname verifies a volname name in accordance with object @@ -77,8 +82,8 @@ func isValidVolname(volname string) bool { return true } -// posix - implements StorageAPI interface. -type posix struct { +// xlStorage - implements StorageAPI interface. +type xlStorage struct { // Disk usage metrics totalUsed uint64 // ref: https://golang.org/pkg/sync/atomic/#pkg-note-BUG @@ -96,9 +101,8 @@ type posix struct { formatFileInfo os.FileInfo formatLastCheck time.Time - // Disk usage metrics - stopUsageCh chan struct{} + ctx context.Context sync.RWMutex } @@ -165,7 +169,7 @@ func getValidPath(path string, requireDirectIO bool) (string, error) { } } if fi != nil && !fi.IsDir() { - return path, syscall.ENOTDIR + return path, errDiskNotDir } di, err := getDiskInfo(path) @@ -206,7 +210,7 @@ func getValidPath(path string, requireDirectIO bool) (string, error) { // isDirEmpty - returns whether given directory is empty or not. func isDirEmpty(dirname string) bool { - f, err := os.Open((dirname)) + f, err := os.Open(dirname) if err != nil { if !os.IsNotExist(err) { logger.LogIf(GlobalContext, err) @@ -216,12 +220,10 @@ func isDirEmpty(dirname string) bool { } defer f.Close() // List one entry. - _, err = f.Readdirnames(1) - if err != io.EOF { + if _, err = f.Readdirnames(1); err != io.EOF { if !os.IsNotExist(err) { logger.LogIf(GlobalContext, err) } - return false } // Returns true if we have reached EOF, directory is indeed empty. @@ -229,16 +231,13 @@ func isDirEmpty(dirname string) bool { } // Initialize a new storage disk. -func newPosix(path string, hostname string) (*posix, error) { +func newXLStorage(path string, hostname string) (*xlStorage, error) { var err error if path, err = getValidPath(path, true); err != nil { return nil, err } - _, err = os.Stat(path) - if err != nil { - return nil, err - } - p := &posix{ + + p := &xlStorage{ diskPath: path, hostname: hostname, pool: sync.Pool{ @@ -247,13 +246,13 @@ func newPosix(path string, hostname string) (*posix, error) { return &b }, }, - stopUsageCh: make(chan struct{}), - diskMount: mountinfo.IsLikelyMountPoint(path), + diskMount: mountinfo.IsLikelyMountPoint(path), // Allow disk usage crawler to run with up to 2 concurrent // I/O ops, if and when activeIOCount reaches this // value disk usage routine suspends the crawler // and waits until activeIOCount reaches below this threshold. maxActiveIOCount: 3, + ctx: GlobalContext, } // Success. @@ -319,7 +318,7 @@ func checkDiskFree(diskPath string, neededSpace int64) (err error) { } var di disk.Info - di, err = getDiskInfo((diskPath)) + di, err = getDiskInfo(diskPath) if err != nil { return err } @@ -337,34 +336,33 @@ func checkDiskFree(diskPath string, neededSpace int64) (err error) { } // Implements stringer compatible interface. -func (s *posix) String() string { +func (s *xlStorage) String() string { return s.diskPath } -func (s *posix) Hostname() string { +func (s *xlStorage) Hostname() string { return s.hostname } -func (s *posix) Close() error { - close(s.stopUsageCh) +func (*xlStorage) Close() error { return nil } -func (s *posix) IsOnline() bool { +func (s *xlStorage) IsOnline() bool { return true } -func (s *posix) IsLocal() bool { +func (s *xlStorage) IsLocal() bool { return true } -func (s *posix) waitForLowActiveIO() { +func (s *xlStorage) waitForLowActiveIO() { for atomic.LoadInt32(&s.activeIOCount) >= s.maxActiveIOCount { time.Sleep(lowActiveIOWaitTick) } } -func (s *posix) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCache) (dataUsageCache, error) { +func (s *xlStorage) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCache) (dataUsageCache, error) { // Check if the current bucket has a configured lifecycle policy lc, err := globalLifecycleSys.Get(cache.Info.Name) if err == nil && lc.HasActiveRules("", true) { @@ -374,21 +372,18 @@ func (s *posix) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCache) // Get object api objAPI := newObjectLayerWithoutSafeModeFn() if objAPI == nil { - return cache, errors.New("object layer not initialized") + return cache, errServerNotInitialized } + dataUsageInfo, err := crawlDataFolder(ctx, s.diskPath, cache, s.waitForLowActiveIO, func(item crawlItem) (int64, error) { - // Look for `xl.json' at the leaf. - if !strings.HasSuffix(item.Path, SlashSeparator+xlMetaJSONFile) { - // if no xl.json found, skip the file. + // Look for `xl.meta/xl.json' at the leaf. + if !strings.HasSuffix(item.Path, SlashSeparator+xlStorageFormatFile) && + !strings.HasSuffix(item.Path, SlashSeparator+xlStorageFormatFileV1) { + // if no xl.meta/xl.json found, skip the file. return 0, errSkipFile } - xlMetaBuf, err := ioutil.ReadFile(item.Path) - if err != nil { - return 0, errSkipFile - } - - meta, err := xlMetaV1UnmarshalJSON(ctx, xlMetaBuf) + buf, err := ioutil.ReadFile(item.Path) if err != nil { return 0, errSkipFile } @@ -396,11 +391,25 @@ func (s *posix) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCache) // Remove filename which is the meta file. item.transformMetaDir() - return item.applyActions(ctx, objAPI, - actionMeta{oi: meta.ToObjectInfo(item.bucket, item.objectPath()), - meta: meta.Meta, - }), nil + fivs, err := getFileInfoVersions(buf, item.bucket, item.objectPath()) + if err != nil { + return 0, errSkipFile + } + + var totalSize int64 + for _, version := range fivs.Versions { + size := item.applyActions(ctx, objAPI, actionMeta{oi: version.ToObjectInfo(item.bucket, item.objectPath())}) + totalSize += size + } + + // Delete markers have no size, nothing to do here. + for _, deleted := range fivs.Deleted { + item.applyActions(ctx, objAPI, actionMeta{oi: deleted.ToObjectInfo(item.bucket, item.objectPath())}) + } + + return totalSize, nil }) + if err != nil { return dataUsageInfo, err } @@ -411,6 +420,7 @@ func (s *posix) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCache) total = &dataUsageEntry{} } atomic.StoreUint64(&s.totalUsed, uint64(total.Size)) + return dataUsageInfo, nil } @@ -427,7 +437,7 @@ type DiskInfo struct { // DiskInfo provides current information about disk space usage, // total free inodes and underlying filesystem. -func (s *posix) DiskInfo() (info DiskInfo, err error) { +func (s *xlStorage) DiskInfo() (info DiskInfo, err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -461,7 +471,7 @@ func (s *posix) DiskInfo() (info DiskInfo, err error) { // corresponding valid volume names on the backend in a platform // compatible way for all operating systems. If volume is not found // an error is generated. -func (s *posix) getVolDir(volume string) (string, error) { +func (s *xlStorage) getVolDir(volume string) (string, error) { if volume == "" || volume == "." || volume == ".." { return "", errVolumeNotFound } @@ -470,7 +480,7 @@ func (s *posix) getVolDir(volume string) (string, error) { } // GetDiskID - returns the cached disk uuid -func (s *posix) GetDiskID() (string, error) { +func (s *xlStorage) GetDiskID() (string, error) { s.RLock() diskID := s.diskID fileInfo := s.formatFileInfo @@ -510,24 +520,24 @@ func (s *posix) GetDiskID() (string, error) { if err != nil { return "", errCorruptedFormat } - format := &formatXLV3{} + format := &formatErasureV3{} var json = jsoniter.ConfigCompatibleWithStandardLibrary if err = json.Unmarshal(b, &format); err != nil { return "", errCorruptedFormat } - s.diskID = format.XL.This + s.diskID = format.Erasure.This s.formatFileInfo = fi s.formatLastCheck = time.Now() return s.diskID, nil } // Make a volume entry. -func (s *posix) SetDiskID(id string) { - // NO-OP for posix as it is handled either by posixDiskIDCheck{} for local disks or +func (s *xlStorage) SetDiskID(id string) { + // NO-OP for xlStorage as it is handled either by xlStorageDiskIDCheck{} for local disks or // storage rest server for remote disks. } -func (s *posix) MakeVolBulk(volumes ...string) (err error) { +func (s *xlStorage) MakeVolBulk(volumes ...string) (err error) { for _, volume := range volumes { if err = s.MakeVol(volume); err != nil { if os.IsPermission(err) { @@ -539,7 +549,7 @@ func (s *posix) MakeVolBulk(volumes ...string) (err error) { } // Make a volume entry. -func (s *posix) MakeVol(volume string) (err error) { +func (s *xlStorage) MakeVol(volume string) (err error) { if !isValidVolname(volume) { return errInvalidArgument } @@ -573,7 +583,7 @@ func (s *posix) MakeVol(volume string) (err error) { } // ListVols - list volumes. -func (s *posix) ListVols() (volsInfo []VolInfo, err error) { +func (s *xlStorage) ListVols() (volsInfo []VolInfo, err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -633,7 +643,7 @@ func listVols(dirPath string) ([]VolInfo, error) { } // StatVol - get volume info. -func (s *posix) StatVol(volume string) (volInfo VolInfo, err error) { +func (s *xlStorage) StatVol(volume string) (volInfo VolInfo, err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -665,7 +675,7 @@ func (s *posix) StatVol(volume string) (volInfo VolInfo, err error) { } // DeleteVol - delete a volume. -func (s *posix) DeleteVol(volume string, forceDelete bool) (err error) { +func (s *xlStorage) DeleteVol(volume string, forceDelete bool) (err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -704,7 +714,7 @@ const guidSplunk = "guidSplunk" // ListDirSplunk - return all the entries at the given directory path. // If an entry is a directory it will be returned with a trailing SlashSeparator. -func (s *posix) ListDirSplunk(volume, dirPath string, count int) (entries []string, err error) { +func (s *xlStorage) ListDirSplunk(volume, dirPath string, count int) (entries []string, err error) { guidIndex := strings.Index(dirPath, guidSplunk) if guidIndex != -1 { return nil, nil @@ -746,8 +756,16 @@ func (s *posix) ListDirSplunk(volume, dirPath string, count int) (entries []stri if entry != receiptJSON { continue } - if _, serr := os.Stat(pathJoin(dirPath, entry, xlMetaJSONFile)); serr == nil { + _, err = os.Stat(pathJoin(dirPath, entry, xlStorageFormatFile)) + if err == nil { entries[i] = strings.TrimSuffix(entry, SlashSeparator) + continue + } + if os.IsNotExist(err) { + if err = s.renameLegacyMetadata(volume, entry); err == nil { + // Rename was successful means we found old `xl.json` + entries[i] = strings.TrimSuffix(entry, SlashSeparator) + } } } @@ -758,7 +776,7 @@ func (s *posix) ListDirSplunk(volume, dirPath string, count int) (entries []stri // sorted order, additionally along with metadata about each of those entries. // Implemented specifically for Splunk backend structure and List call with // delimiter as "guidSplunk" -func (s *posix) WalkSplunk(volume, dirPath, marker string, endWalkCh <-chan struct{}) (ch chan FileInfo, err error) { +func (s *xlStorage) WalkSplunk(volume, dirPath, marker string, endWalkCh <-chan struct{}) (ch chan FileInfo, err error) { // Verify if volume is valid and it exists. volumeDir, err := s.getVolDir(volume) if err != nil { @@ -805,11 +823,20 @@ func (s *posix) WalkSplunk(volume, dirPath, marker string, endWalkCh <-chan stru Mode: os.ModeDir, } } else { - xlMetaBuf, err := ioutil.ReadFile(pathJoin(volumeDir, walkResult.entry, xlMetaJSONFile)) + var err error + var xlMetaBuf []byte + xlMetaBuf, err = ioutil.ReadFile(pathJoin(volumeDir, walkResult.entry, xlStorageFormatFile)) if err != nil { continue } - fi = readMetadata(xlMetaBuf, volume, walkResult.entry) + fi, err = getFileInfo(xlMetaBuf, volume, walkResult.entry, "") + if err != nil { + continue + } + if fi.Deleted { + // Ignore delete markers. + continue + } } select { case ch <- fi: @@ -822,11 +849,89 @@ func (s *posix) WalkSplunk(volume, dirPath, marker string, endWalkCh <-chan stru return ch, nil } +// WalkVersions - is a sorted walker which returns file entries in lexically sorted order, +// additionally along with metadata version info about each of those entries. +func (s *xlStorage) WalkVersions(volume, dirPath, marker string, recursive bool, endWalkCh <-chan struct{}) (ch chan FileInfoVersions, err error) { + atomic.AddInt32(&s.activeIOCount, 1) + defer func() { + atomic.AddInt32(&s.activeIOCount, -1) + }() + + // Verify if volume is valid and it exists. + volumeDir, err := s.getVolDir(volume) + if err != nil { + return nil, err + } + + // Stat a volume entry. + _, err = os.Stat(volumeDir) + if err != nil { + if os.IsNotExist(err) { + return nil, errVolumeNotFound + } else if isSysErrIO(err) { + return nil, errFaultyDisk + } + return nil, err + } + + // buffer channel matches the S3 ListObjects implementation + ch = make(chan FileInfoVersions, maxObjectList) + go func() { + defer close(ch) + listDir := func(volume, dirPath, dirEntry string) (emptyDir bool, entries []string) { + entries, err := s.ListDir(volume, dirPath, -1) + if err != nil { + return false, nil + } + if len(entries) == 0 { + return true, nil + } + sort.Strings(entries) + return false, filterMatchingPrefix(entries, dirEntry) + } + + walkResultCh := startTreeWalk(GlobalContext, volume, dirPath, marker, recursive, listDir, endWalkCh) + for { + walkResult, ok := <-walkResultCh + if !ok { + return + } + var fiv FileInfoVersions + if HasSuffix(walkResult.entry, SlashSeparator) { + fiv = FileInfoVersions{ + Versions: []FileInfo{ + { + Volume: volume, + Name: walkResult.entry, + Mode: os.ModeDir, + }, + }, + } + } else { + xlMetaBuf, err := ioutil.ReadFile(pathJoin(volumeDir, walkResult.entry, xlStorageFormatFile)) + if err != nil { + continue + } + + fiv, err = getFileInfoVersions(xlMetaBuf, volume, walkResult.entry) + if err != nil { + continue + } + } + select { + case ch <- fiv: + case <-endWalkCh: + return + } + } + }() + + return ch, nil +} + // Walk - is a sorted walker which returns file entries in lexically // sorted order, additionally along with metadata about each of those entries. -func (s *posix) Walk(volume, dirPath, marker string, recursive bool, leafFile string, - readMetadataFn readMetadataFunc, endWalkCh <-chan struct{}) (ch chan FileInfo, err error) { - +func (s *xlStorage) Walk(volume, dirPath, marker string, recursive bool, endWalkCh <-chan struct{}) (ch chan FileInfo, err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -853,8 +958,8 @@ func (s *posix) Walk(volume, dirPath, marker string, recursive bool, leafFile st ch = make(chan FileInfo, maxObjectList) go func() { defer close(ch) - listDir := func(volume, dirPath, dirEntry string) (bool, []string) { - entries, err := s.ListDir(volume, dirPath, -1, leafFile) + listDir := func(volume, dirPath, dirEntry string) (emptyDir bool, entries []string) { + entries, err := s.ListDir(volume, dirPath, -1) if err != nil { return false, nil } @@ -879,11 +984,20 @@ func (s *posix) Walk(volume, dirPath, marker string, recursive bool, leafFile st Mode: os.ModeDir, } } else { - xlMetaBuf, err := ioutil.ReadFile(pathJoin(volumeDir, walkResult.entry, leafFile)) + var err error + var xlMetaBuf []byte + xlMetaBuf, err = ioutil.ReadFile(pathJoin(volumeDir, walkResult.entry, xlStorageFormatFile)) if err != nil { continue } - fi = readMetadataFn(xlMetaBuf, volume, walkResult.entry) + fi, err = getFileInfo(xlMetaBuf, volume, walkResult.entry, "") + if err != nil { + continue + } + if fi.Deleted { + // Ignore delete markers. + continue + } } select { case ch <- fi: @@ -898,7 +1012,7 @@ func (s *posix) Walk(volume, dirPath, marker string, recursive bool, leafFile st // ListDir - return all the entries at the given directory path. // If an entry is a directory it will be returned with a trailing SlashSeparator. -func (s *posix) ListDir(volume, dirPath string, count int, leafFile string) (entries []string, err error) { +func (s *xlStorage) ListDir(volume, dirPath string, count int) (entries []string, err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -929,16 +1043,241 @@ func (s *posix) ListDir(volume, dirPath string, count int, leafFile string) (ent return nil, err } - // If leaf file is specified, filter out the entries. - if leafFile != "" { - for i, entry := range entries { - if _, serr := os.Stat(pathJoin(dirPath, entry, leafFile)); serr == nil { + for i, entry := range entries { + _, err = os.Stat(pathJoin(dirPath, entry, xlStorageFormatFile)) + if err == nil { + entries[i] = strings.TrimSuffix(entry, SlashSeparator) + continue + } + if os.IsNotExist(err) { + if err = s.renameLegacyMetadata(volume, entry); err == nil { + // if rename was successful, means we did find old `xl.json` entries[i] = strings.TrimSuffix(entry, SlashSeparator) + continue } } } - return entries, err + return entries, nil +} + +// DeleteVersions deletes slice of versions, it can be same object +// or multiple objects. +func (s *xlStorage) DeleteVersions(volume string, versions []FileInfo) []error { + errs := make([]error, len(versions)) + for i, version := range versions { + if err := s.DeleteVersion(volume, version.Name, version); err != nil { + errs[i] = err + } + } + + return errs +} + +// DeleteVersion - deletes FileInfo metadata for path at `xl.meta` +func (s *xlStorage) DeleteVersion(volume, path string, fi FileInfo) error { + if HasSuffix(path, SlashSeparator) { + return s.DeleteFile(volume, path) + } + + buf, err := s.ReadAll(volume, pathJoin(path, xlStorageFormatFile)) + if err != nil { + return err + } + + if len(buf) == 0 { + return errFileNotFound + } + + atomic.AddInt32(&s.activeIOCount, 1) + defer func() { + atomic.AddInt32(&s.activeIOCount, -1) + }() + + volumeDir, err := s.getVolDir(volume) + if err != nil { + return err + } + + if !isXL2V1Format(buf) { + // Delete the meta file, if there are no more versions the + // top level parent is automatically removed. + filePath := pathJoin(volumeDir, path, xlStorageFormatFile) + if err = checkPathLength(filePath); err != nil { + return err + } + + return deleteFile(volumeDir, filePath, false) + } + + var xlMeta xlMetaV2 + if err = xlMeta.Load(buf); err != nil { + return err + } + + if fi.Deleted { + if err = xlMeta.AddVersion(fi); err != nil { + return err + } + buf, err = xlMeta.MarshalMsg(append(xlHeader[:], xlVersionV1[:]...)) + if err != nil { + return err + } + return s.WriteAll(volume, pathJoin(path, xlStorageFormatFile), bytes.NewReader(buf)) + } + + dataDir, lastVersion, err := xlMeta.DeleteVersion(fi) + if err != nil { + return err + } + + buf, err = xlMeta.MarshalMsg(append(xlHeader[:], xlVersionV1[:]...)) + if err != nil { + return err + } + + // when data-dir is specified. + if dataDir != "" { + filePath := pathJoin(volumeDir, path, dataDir) + if err = checkPathLength(filePath); err != nil { + return err + } + + if err = removeAll(filePath); err != nil { + return err + } + } + + if !lastVersion { + return s.WriteAll(volume, pathJoin(path, xlStorageFormatFile), bytes.NewReader(buf)) + } + + // Delete the meta file, if there are no more versions the + // top level parent is automatically removed. + filePath := pathJoin(volumeDir, path, xlStorageFormatFile) + if err = checkPathLength(filePath); err != nil { + return err + } + + return deleteFile(volumeDir, filePath, false) +} + +// WriteMetadata - writes FileInfo metadata for path at `xl.meta` +func (s *xlStorage) WriteMetadata(volume, path string, fi FileInfo) error { + buf, err := s.ReadAll(volume, pathJoin(path, xlStorageFormatFile)) + if err != nil && err != errFileNotFound { + return err + } + + atomic.AddInt32(&s.activeIOCount, 1) + defer func() { + atomic.AddInt32(&s.activeIOCount, -1) + }() + + var xlMeta xlMetaV2 + if !isXL2V1Format(buf) { + xlMeta, err = newXLMetaV2(fi) + if err != nil { + return err + } + buf, err = xlMeta.MarshalMsg(append(xlHeader[:], xlVersionV1[:]...)) + if err != nil { + return err + } + } else { + if err = xlMeta.Load(buf); err != nil { + return err + } + if err = xlMeta.AddVersion(fi); err != nil { + return err + } + buf, err = xlMeta.MarshalMsg(append(xlHeader[:], xlVersionV1[:]...)) + if err != nil { + return err + } + } + + return s.WriteAll(volume, pathJoin(path, xlStorageFormatFile), bytes.NewReader(buf)) +} + +func (s *xlStorage) renameLegacyMetadata(volume, path string) error { + atomic.AddInt32(&s.activeIOCount, 1) + defer func() { + atomic.AddInt32(&s.activeIOCount, -1) + }() + + volumeDir, err := s.getVolDir(volume) + if err != nil { + return err + } + + //gi Stat a volume entry. + _, err = os.Stat(volumeDir) + if err != nil { + if os.IsNotExist(err) { + return errVolumeNotFound + } else if isSysErrIO(err) { + return errFaultyDisk + } else if isSysErrTooManyFiles(err) { + return errTooManyOpenFiles + } + return err + } + + // Validate file path length, before reading. + filePath := pathJoin(volumeDir, path) + if err = checkPathLength(filePath); err != nil { + return err + } + + srcFilePath := pathJoin(filePath, xlStorageFormatFileV1) + dstFilePath := pathJoin(filePath, xlStorageFormatFile) + if err = os.Rename(srcFilePath, dstFilePath); err != nil { + switch { + case isSysErrNotDir(err): + return errFileNotFound + case isSysErrPathNotFound(err): + return errFileNotFound + case isSysErrCrossDevice(err): + return fmt.Errorf("%w (%s)->(%s)", errCrossDeviceLink, srcFilePath, dstFilePath) + case os.IsNotExist(err): + return errFileNotFound + case os.IsExist(err): + // This is returned only when destination is a directory and we + // are attempting a rename from file to directory. + return errIsNotRegular + default: + return err + } + } + return nil +} + +// ReadVersion - reads metadata and returns FileInfo at path `xl.meta` +func (s *xlStorage) ReadVersion(volume, path, versionID string) (fi FileInfo, err error) { + buf, err := s.ReadAll(volume, pathJoin(path, xlStorageFormatFile)) + if err != nil { + if err == errFileNotFound { + if err = s.renameLegacyMetadata(volume, path); err != nil { + return fi, err + } + buf, err = s.ReadAll(volume, pathJoin(path, xlStorageFormatFile)) + if err != nil { + return fi, err + } + } else { + return fi, err + } + } + + if len(buf) == 0 { + if versionID != "" { + return fi, errFileVersionNotFound + } + return fi, errFileNotFound + } + + return getFileInfo(buf, volume, path, versionID) } // ReadAll reads from r until an error or EOF and returns the data it read. @@ -947,7 +1286,7 @@ func (s *posix) ListDir(volume, dirPath string, count int, leafFile string) (ent // as an error to be reported. // This API is meant to be used on files which have small memory footprint, do // not use this on large files as it would cause server to crash. -func (s *posix) ReadAll(volume, path string) (buf []byte, err error) { +func (s *xlStorage) ReadAll(volume, path string) (buf []byte, err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -957,8 +1296,9 @@ func (s *posix) ReadAll(volume, path string) (buf []byte, err error) { if err != nil { return nil, err } + // Stat a volume entry. - _, err = os.Stat((volumeDir)) + _, err = os.Stat(volumeDir) if err != nil { if os.IsNotExist(err) { return nil, errVolumeNotFound @@ -972,12 +1312,12 @@ func (s *posix) ReadAll(volume, path string) (buf []byte, err error) { // Validate file path length, before reading. filePath := pathJoin(volumeDir, path) - if err = checkPathLength((filePath)); err != nil { + if err = checkPathLength(filePath); err != nil { return nil, err } // Open the file for reading. - buf, err = ioutil.ReadFile((filePath)) + buf, err = ioutil.ReadFile(filePath) if err != nil { if os.IsNotExist(err) { return nil, errFileNotFound @@ -1009,7 +1349,7 @@ func (s *posix) ReadAll(volume, path string) (buf []byte, err error) { // // Additionally ReadFile also starts reading from an offset. ReadFile // semantics are same as io.ReadFull. -func (s *posix) ReadFile(volume, path string, offset int64, buffer []byte, verifier *BitrotVerifier) (int64, error) { +func (s *xlStorage) ReadFile(volume, path string, offset int64, buffer []byte, verifier *BitrotVerifier) (int64, error) { if offset < 0 { return 0, errInvalidArgument } @@ -1027,7 +1367,7 @@ func (s *posix) ReadFile(volume, path string, offset int64, buffer []byte, verif var n int // Stat a volume entry. - _, err = os.Stat((volumeDir)) + _, err = os.Stat(volumeDir) if err != nil { if os.IsNotExist(err) { return 0, errVolumeNotFound @@ -1039,12 +1379,12 @@ func (s *posix) ReadFile(volume, path string, offset int64, buffer []byte, verif // Validate effective path length before reading. filePath := pathJoin(volumeDir, path) - if err = checkPathLength((filePath)); err != nil { + if err = checkPathLength(filePath); err != nil { return 0, err } // Open the file for reading. - file, err := os.Open((filePath)) + file, err := os.Open(filePath) if err != nil { switch { case os.IsNotExist(err): @@ -1108,13 +1448,13 @@ func (s *posix) ReadFile(volume, path string, offset int64, buffer []byte, verif return int64(len(buffer)), nil } -func (s *posix) openFile(volume, path string, mode int) (f *os.File, err error) { +func (s *xlStorage) openFile(volume, path string, mode int) (f *os.File, err error) { volumeDir, err := s.getVolDir(volume) if err != nil { return nil, err } // Stat a volume entry. - _, err = os.Stat((volumeDir)) + _, err = os.Stat(volumeDir) if err != nil { if os.IsNotExist(err) { return nil, errVolumeNotFound @@ -1125,7 +1465,7 @@ func (s *posix) openFile(volume, path string, mode int) (f *os.File, err error) } filePath := pathJoin(volumeDir, path) - if err = checkPathLength((filePath)); err != nil { + if err = checkPathLength(filePath); err != nil { return nil, err } @@ -1164,7 +1504,7 @@ func (s *posix) openFile(volume, path string, mode int) (f *os.File, err error) } // ReadFileStream - Returns the read stream of the file. -func (s *posix) ReadFileStream(volume, path string, offset, length int64) (io.ReadCloser, error) { +func (s *xlStorage) ReadFileStream(volume, path string, offset, length int64) (io.ReadCloser, error) { if offset < 0 { return nil, errInvalidArgument } @@ -1174,7 +1514,7 @@ func (s *posix) ReadFileStream(volume, path string, offset, length int64) (io.Re return nil, err } // Stat a volume entry. - _, err = os.Stat((volumeDir)) + _, err = os.Stat(volumeDir) if err != nil { if os.IsNotExist(err) { return nil, errVolumeNotFound @@ -1186,12 +1526,12 @@ func (s *posix) ReadFileStream(volume, path string, offset, length int64) (io.Re // Validate effective path length before reading. filePath := pathJoin(volumeDir, path) - if err = checkPathLength((filePath)); err != nil { + if err = checkPathLength(filePath); err != nil { return nil, err } // Open the file for reading. - file, err := os.Open((filePath)) + file, err := os.Open(filePath) if err != nil { switch { case os.IsNotExist(err): @@ -1257,7 +1597,7 @@ func (c closeWrapper) Close() error { } // CreateFile - creates the file. -func (s *posix) CreateFile(volume, path string, fileSize int64, r io.Reader) (err error) { +func (s *xlStorage) CreateFile(volume, path string, fileSize int64, r io.Reader) (err error) { if fileSize < -1 { return errInvalidArgument } @@ -1280,7 +1620,7 @@ func (s *posix) CreateFile(volume, path string, fileSize int64, r io.Reader) (er return err } // Stat a volume entry. - _, err = os.Stat((volumeDir)) + _, err = os.Stat(volumeDir) if err != nil { if os.IsNotExist(err) { return errVolumeNotFound @@ -1291,7 +1631,7 @@ func (s *posix) CreateFile(volume, path string, fileSize int64, r io.Reader) (er } filePath := pathJoin(volumeDir, path) - if err = checkPathLength((filePath)); err != nil { + if err = checkPathLength(filePath); err != nil { return err } @@ -1360,15 +1700,13 @@ func (s *posix) CreateFile(volume, path string, fileSize int64, r io.Reader) (er return nil } -func (s *posix) WriteAll(volume, path string, reader io.Reader) (err error) { +func (s *xlStorage) WriteAll(volume, path string, reader io.Reader) (err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) }() - // Create file if not found. Note that it is created with os.O_EXCL flag as the file - // always is supposed to be created in the tmp directory with a unique file name. - w, err := s.openFile(volume, path, os.O_CREATE|os.O_SYNC|os.O_WRONLY|os.O_EXCL) + w, err := s.openFile(volume, path, os.O_CREATE|os.O_SYNC|os.O_WRONLY) if err != nil { return err } @@ -1384,7 +1722,7 @@ func (s *posix) WriteAll(volume, path string, reader io.Reader) (err error) { // AppendFile - append a byte array at path, if file doesn't exist at // path this call explicitly creates it. -func (s *posix) AppendFile(volume, path string, buf []byte) (err error) { +func (s *xlStorage) AppendFile(volume, path string, buf []byte) (err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -1405,8 +1743,8 @@ func (s *posix) AppendFile(volume, path string, buf []byte) (err error) { return w.Close() } -// StatFile - get file info. -func (s *posix) StatFile(volume, path string) (file FileInfo, err error) { +// CheckParts check if path has necessary parts available. +func (s *xlStorage) CheckParts(volume, path string, fi FileInfo) error { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -1414,48 +1752,83 @@ func (s *posix) StatFile(volume, path string) (file FileInfo, err error) { volumeDir, err := s.getVolDir(volume) if err != nil { - return FileInfo{}, err - } - // Stat a volume entry. - _, err = os.Stat((volumeDir)) - if err != nil { - if os.IsNotExist(err) { - return FileInfo{}, errVolumeNotFound - } - return FileInfo{}, err + return err } - filePath := slashpath.Join(volumeDir, path) - if err = checkPathLength((filePath)); err != nil { - return FileInfo{}, err + // Stat a volume entry. + if _, err = os.Stat(volumeDir); err != nil { + if os.IsNotExist(err) { + return errVolumeNotFound + } + return err } - st, err := os.Stat((filePath)) - if err != nil { - switch { - case os.IsNotExist(err): - // File is really not found. - return FileInfo{}, errFileNotFound - case isSysErrIO(err): - return FileInfo{}, errFaultyDisk - case isSysErrNotDir(err): - // File path cannot be verified since one of the parents is a file. - return FileInfo{}, errFileNotFound - default: - // Return all errors here. - return FileInfo{}, err + + for _, part := range fi.Parts { + partPath := pathJoin(path, fi.DataDir, fmt.Sprintf("part.%d", part.Number)) + filePath := pathJoin(volumeDir, partPath) + if err = checkPathLength(filePath); err != nil { + return err + } + st, err := os.Stat(filePath) + if err != nil { + return osErrToFileErr(err) + } + if st.Mode().IsDir() { + return errFileNotFound } } + + return nil +} + +// CheckFile check if path has necessary metadata. +func (s *xlStorage) CheckFile(volume, path string) error { + atomic.AddInt32(&s.activeIOCount, 1) + defer func() { + atomic.AddInt32(&s.activeIOCount, -1) + }() + + volumeDir, err := s.getVolDir(volume) + if err != nil { + return err + } + + // Stat a volume entry. + _, err = os.Stat(volumeDir) + if err != nil { + if os.IsNotExist(err) { + return errVolumeNotFound + } + return err + } + + filePath := pathJoin(volumeDir, path, xlStorageFormatFile) + if err = checkPathLength(filePath); err != nil { + return err + } + + filePathOld := pathJoin(volumeDir, path, xlStorageFormatFileV1) + if err = checkPathLength(filePathOld); err != nil { + return err + } + + st, err := os.Stat(filePath) + if err != nil && !os.IsNotExist(err) { + return osErrToFileErr(err) + } + if st == nil { + st, err = os.Stat(filePathOld) + if err != nil { + return osErrToFileErr(err) + } + } + // If its a directory its not a regular file. if st.Mode().IsDir() { - return FileInfo{}, errFileNotFound + return errFileNotFound } - return FileInfo{ - Volume: volume, - Name: path, - ModTime: st.ModTime(), - Size: st.Size(), - Mode: st.Mode(), - }, nil + + return nil } // deleteFile deletes a file or a directory if its empty unless recursive @@ -1467,6 +1840,7 @@ func deleteFile(basePath, deletePath string, recursive bool) error { if basePath == "" || deletePath == "" { return nil } + isObjectDir := HasSuffix(deletePath, SlashSeparator) basePath = filepath.Clean(basePath) deletePath = filepath.Clean(deletePath) if !strings.HasPrefix(deletePath, basePath) || deletePath == basePath { @@ -1482,6 +1856,11 @@ func deleteFile(basePath, deletePath string, recursive bool) error { if err != nil { switch { case isSysErrNotEmpty(err): + // if object is a directory, but if its not empty + // return FileNotFound to indicate its an empty prefix. + if isObjectDir { + return errFileNotFound + } // Ignore errors if the directory is not empty. The server relies on // this functionality, and sometimes uses recursion that should not // error on parent directories. @@ -1506,53 +1885,8 @@ func deleteFile(basePath, deletePath string, recursive bool) error { return nil } -// DeletePrefixes forcibly deletes all the contents of a set of specified paths. -// Parent directories are automatically removed if they become empty. err can -// bil nil while errs can contain some errors for corresponding objects. No error -// is set if a specified prefix path does not exist. -func (s *posix) DeletePrefixes(volume string, paths []string) (errs []error, err error) { - atomic.AddInt32(&s.activeIOCount, 1) - defer func() { - atomic.AddInt32(&s.activeIOCount, -1) - }() - - volumeDir, err := s.getVolDir(volume) - if err != nil { - return nil, err - } - - // Stat a volume entry. - _, err = os.Stat(volumeDir) - if err != nil { - if os.IsNotExist(err) { - return nil, errVolumeNotFound - } else if os.IsPermission(err) { - return nil, errVolumeAccessDenied - } else if isSysErrIO(err) { - return nil, errFaultyDisk - } - return nil, err - } - - errs = make([]error, len(paths)) - // Following code is needed so that we retain SlashSeparator - // suffix if any in path argument. - for idx, path := range paths { - filePath := pathJoin(volumeDir, path) - errs[idx] = checkPathLength(filePath) - if errs[idx] != nil { - continue - } - // Delete file or a directory recursively, delete parent - // directory as well if its empty. - errs[idx] = deleteFile(volumeDir, filePath, true) - } - - return -} - // DeleteFile - delete a file at path. -func (s *posix) DeleteFile(volume, path string) (err error) { +func (s *xlStorage) DeleteFile(volume, path string) (err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -1587,7 +1921,7 @@ func (s *posix) DeleteFile(volume, path string) (err error) { return deleteFile(volumeDir, filePath, false) } -func (s *posix) DeleteFileBulk(volume string, paths []string) (errs []error, err error) { +func (s *xlStorage) DeleteFileBulk(volume string, paths []string) (errs []error, err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -1626,8 +1960,215 @@ func (s *posix) DeleteFileBulk(volume string, paths []string) (errs []error, err return } +// RenameData - rename source path to destination path atomically, metadata and data directory. +func (s *xlStorage) RenameData(srcVolume, srcPath, dataDir, dstVolume, dstPath string) (err error) { + atomic.AddInt32(&s.activeIOCount, 1) + defer func() { + atomic.AddInt32(&s.activeIOCount, -1) + }() + + srcVolumeDir, err := s.getVolDir(srcVolume) + if err != nil { + return err + } + + dstVolumeDir, err := s.getVolDir(dstVolume) + if err != nil { + return err + } + + // Stat a volume entry. + _, err = os.Stat(srcVolumeDir) + if err != nil { + if os.IsNotExist(err) { + return errVolumeNotFound + } else if isSysErrIO(err) { + return errFaultyDisk + } + return err + } + _, err = os.Stat(dstVolumeDir) + if err != nil { + if os.IsNotExist(err) { + return errVolumeNotFound + } else if isSysErrIO(err) { + return errFaultyDisk + } + return err + } + + srcFilePath := slashpath.Join(srcVolumeDir, pathJoin(srcPath, xlStorageFormatFile)) + dstFilePath := slashpath.Join(dstVolumeDir, pathJoin(dstPath, xlStorageFormatFile)) + + var srcDataPath string + var dstDataPath string + if dataDir != "" { + srcDataPath = retainSlash(pathJoin(srcVolumeDir, srcPath, dataDir)) + // make sure to always use path.Join here, do not use pathJoin as + // it would additionally add `/` at the end and it comes in the + // way of renameAll(), parentDir creation. + dstDataPath = slashpath.Join(dstVolumeDir, dstPath, dataDir) + } + + if err = checkPathLength(srcFilePath); err != nil { + return err + } + + if err = checkPathLength(dstFilePath); err != nil { + return err + } + + srcBuf, err := ioutil.ReadFile(srcFilePath) + if err != nil { + return osErrToFileErr(err) + } + + fi, err := getFileInfo(srcBuf, dstVolume, dstPath, "") + if err != nil { + return err + } + + dstBuf, err := ioutil.ReadFile(dstFilePath) + if err != nil && !os.IsNotExist(err) { + return osErrToFileErr(err) + } + + var xlMeta xlMetaV2 + var legacyPreserved bool + if len(dstBuf) > 0 { + if isXL2V1Format(dstBuf) { + if err = xlMeta.Load(dstBuf); err != nil { + logger.LogIf(s.ctx, err) + return errFileCorrupt + } + } else { + // This code-path is to preserve the legacy data. + xlMetaLegacy := &xlMetaV1Object{} + var json = jsoniter.ConfigCompatibleWithStandardLibrary + if err := json.Unmarshal(dstBuf, xlMetaLegacy); err != nil { + logger.LogIf(s.ctx, err) + return errFileCorrupt + } + if err = xlMeta.AddLegacy(xlMetaLegacy); err != nil { + logger.LogIf(s.ctx, err) + return errFileCorrupt + } + legacyPreserved = true + } + } else { + // It is possible that some drives may not have `xl.meta` file + // in such scenarios verify if atleast `part.1` files exist + // to verify for legacy version. + currentDataPath := pathJoin(dstVolumeDir, dstPath) + entries, err := readDirN(currentDataPath, 1) + if err != nil && err != errFileNotFound { + return osErrToFileErr(err) + } + for _, entry := range entries { + if entry == xlStorageFormatFile { + continue + } + if strings.HasSuffix(entry, slashSeparator) { + continue + } + if strings.HasPrefix(entry, "part.") { + legacyPreserved = true + break + } + } + } + + if legacyPreserved { + // Preserve all the legacy data, could be slow, but at max there can be 10,000 parts. + currentDataPath := pathJoin(dstVolumeDir, dstPath) + entries, err := readDir(currentDataPath) + if err != nil { + return osErrToFileErr(err) + } + legacyDataPath := pathJoin(dstVolumeDir, dstPath, legacyDataDir) + // legacy data dir means its old content, honor system umask. + if err = os.Mkdir(legacyDataPath, 0777); err != nil { + if isSysErrIO(err) { + return errFaultyDisk + } + return osErrToFileErr(err) + } + + for _, entry := range entries { + if entry == xlStorageFormatFile { + continue + } + + if err = os.Rename(pathJoin(currentDataPath, entry), pathJoin(legacyDataPath, entry)); err != nil { + if isSysErrIO(err) { + return errFaultyDisk + } + return osErrToFileErr(err) + } + + // Sync all the metadata operations once renames are done. + globalSync() + } + } + + var oldDstDataPath string + if fi.VersionID == "" { + // return the latest "null" versionId info + ofi, err := xlMeta.ToFileInfo(dstVolume, dstPath, nullVersionID) + if err == nil { + // Purge the destination path as we are not preserving anything + // versioned object was not requested. + oldDstDataPath = pathJoin(dstVolumeDir, dstPath, ofi.DataDir) + } + } + + if err = xlMeta.AddVersion(fi); err != nil { + return err + } + + dstBuf, err = xlMeta.MarshalMsg(append(xlHeader[:], xlVersionV1[:]...)) + if err != nil { + return errFileCorrupt + } + + if err = s.WriteAll(srcVolume, pathJoin(srcPath, xlStorageFormatFile), bytes.NewReader(dstBuf)); err != nil { + return err + } + + if err = renameAll(srcFilePath, dstFilePath); err != nil { + if isSysErrIO(err) { + return errFaultyDisk + } + return err + } + + if srcDataPath != "" { + removeAll(oldDstDataPath) + removeAll(dstDataPath) + if err = renameAll(srcDataPath, dstDataPath); err != nil { + if isSysErrIO(err) { + return errFaultyDisk + } + return err + } + } + + // Remove parent dir of the source file if empty + if parentDir := slashpath.Dir(srcFilePath); isDirEmpty(parentDir) { + deleteFile(srcVolumeDir, parentDir, false) + } + + if srcDataPath != "" { + if parentDir := slashpath.Dir(srcDataPath); isDirEmpty(parentDir) { + deleteFile(srcVolumeDir, parentDir, false) + } + } + + return nil +} + // RenameFile - rename source path to destination path atomically. -func (s *posix) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err error) { +func (s *xlStorage) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err error) { atomic.AddInt32(&s.activeIOCount, 1) defer func() { atomic.AddInt32(&s.activeIOCount, -1) @@ -1715,40 +2256,11 @@ func (s *posix) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err e return nil } -func (s *posix) VerifyFile(volume, path string, fileSize int64, algo BitrotAlgorithm, sum []byte, shardSize int64) (err error) { - atomic.AddInt32(&s.activeIOCount, 1) - defer func() { - atomic.AddInt32(&s.activeIOCount, -1) - }() - - volumeDir, err := s.getVolDir(volume) - if err != nil { - return err - } - - // Stat a volume entry. - _, err = os.Stat(volumeDir) - if err != nil { - if os.IsNotExist(err) { - return errVolumeNotFound - } else if isSysErrIO(err) { - return errFaultyDisk - } else if os.IsPermission(err) { - return errVolumeAccessDenied - } - return err - } - - // Validate effective path length before reading. - filePath := pathJoin(volumeDir, path) - if err = checkPathLength(filePath); err != nil { - return err - } - +func (s *xlStorage) bitrotVerify(partPath string, partSize int64, algo BitrotAlgorithm, sum []byte, shardSize int64) error { // Open the file for reading. - file, err := os.Open(filePath) + file, err := os.Open(partPath) if err != nil { - return osErrToFSFileErr(err) + return osErrToFileErr(err) } // Close the file descriptor. @@ -1783,7 +2295,7 @@ func (s *posix) VerifyFile(volume, path string, fileSize int64, algo BitrotAlgor // Calculate the size of the bitrot file and compare // it with the actual file size. - if size != bitrotShardFileSize(fileSize, shardSize, algo) { + if size != bitrotShardFileSize(partSize, shardSize, algo) { return errFileCorrupt } @@ -1814,3 +2326,50 @@ func (s *posix) VerifyFile(volume, path string, fileSize int64, algo BitrotAlgor } } } + +func (s *xlStorage) VerifyFile(volume, path string, fi FileInfo) (err error) { + atomic.AddInt32(&s.activeIOCount, 1) + defer func() { + atomic.AddInt32(&s.activeIOCount, -1) + }() + + volumeDir, err := s.getVolDir(volume) + if err != nil { + return err + } + + // Stat a volume entry. + _, err = os.Stat(volumeDir) + if err != nil { + if os.IsNotExist(err) { + return errVolumeNotFound + } else if isSysErrIO(err) { + return errFaultyDisk + } else if os.IsPermission(err) { + return errVolumeAccessDenied + } + return err + } + + erasure := fi.Erasure + for _, part := range fi.Parts { + checksumInfo := erasure.GetChecksumInfo(part.Number) + partPath := pathJoin(volumeDir, path, fi.DataDir, fmt.Sprintf("part.%d", part.Number)) + if err := s.bitrotVerify(partPath, + erasure.ShardFileSize(part.Size), + checksumInfo.Algorithm, + checksumInfo.Hash, erasure.ShardSize()); err != nil { + if !IsErr(err, []error{ + errFileNotFound, + errVolumeNotFound, + errFileCorrupt, + }...) { + logger.GetReqInfo(s.ctx).AppendTags("disk", s.String()) + logger.LogIf(s.ctx, err) + } + return err + } + } + + return nil +} diff --git a/cmd/posix_test.go b/cmd/xl-storage_test.go similarity index 69% rename from cmd/posix_test.go rename to cmd/xl-storage_test.go index 0c3cb23f4..dfbaa5ab8 100644 --- a/cmd/posix_test.go +++ b/cmd/xl-storage_test.go @@ -113,15 +113,16 @@ func TestIsValidVolname(t *testing.T) { } } -// creates a temp dir and sets up posix layer. -// returns posix layer, temp dir path to be used for the purpose of tests. -func newPosixTestSetup() (StorageAPI, string, error) { +// creates a temp dir and sets up xlStorage layer. +// returns xlStorage layer, temp dir path to be used for the purpose of tests. +func newXLStorageTestSetup() (*xlStorageDiskIDCheck, string, error) { diskPath, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { return nil, "", err } - // Initialize a new posix layer. - storage, err := newPosix(diskPath, "") + + // Initialize a new xlStorage layer. + storage, err := newXLStorage(diskPath, "") if err != nil { return nil, "", err } @@ -134,7 +135,7 @@ func newPosixTestSetup() (StorageAPI, string, error) { if err != nil { return nil, "", err } - return &posixDiskIDCheck{storage: storage, diskID: "da017d62-70e3-45f1-8a1a-587707e69ad1"}, diskPath, nil + return &xlStorageDiskIDCheck{storage: storage, diskID: "da017d62-70e3-45f1-8a1a-587707e69ad1"}, diskPath, nil } // createPermDeniedFile - creates temporary directory and file with path '/mybucket/myobject' @@ -190,8 +191,8 @@ func removePermDeniedFile(permDeniedDir string) { } } -// TestPosixs posix.getDiskInfo() -func TestPosixGetDiskInfo(t *testing.T) { +// TestXLStorages xlStorage.getDiskInfo() +func TestXLStorageGetDiskInfo(t *testing.T) { path, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { t.Fatalf("Unable to create a temporary directory, %s", err) @@ -214,7 +215,7 @@ func TestPosixGetDiskInfo(t *testing.T) { } } -func TestPosixIsDirEmpty(t *testing.T) { +func TestXLStorageIsDirEmpty(t *testing.T) { tmp, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { t.Fatal(err) @@ -250,51 +251,51 @@ func TestPosixIsDirEmpty(t *testing.T) { } } -// TestPosixReadAll - TestPosixs the functionality implemented by posix ReadAll storage API. -func TestPosixReadAll(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +// TestXLStorageReadAll - TestXLStorages the functionality implemented by xlStorage ReadAll storage API. +func TestXLStorageReadAll(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) // Create files for the test cases. - if err = posixStorage.MakeVol("exists"); err != nil { + if err = xlStorage.MakeVol("exists"); err != nil { t.Fatalf("Unable to create a volume \"exists\", %s", err) } - if err = posixStorage.AppendFile("exists", "as-directory/as-file", []byte("Hello, World")); err != nil { + if err = xlStorage.AppendFile("exists", "as-directory/as-file", []byte("Hello, World")); err != nil { t.Fatalf("Unable to create a file \"as-directory/as-file\", %s", err) } - if err = posixStorage.AppendFile("exists", "as-file", []byte("Hello, World")); err != nil { + if err = xlStorage.AppendFile("exists", "as-file", []byte("Hello, World")); err != nil { t.Fatalf("Unable to create a file \"as-file\", %s", err) } - if err = posixStorage.AppendFile("exists", "as-file-parent", []byte("Hello, World")); err != nil { + if err = xlStorage.AppendFile("exists", "as-file-parent", []byte("Hello, World")); err != nil { t.Fatalf("Unable to create a file \"as-file-parent\", %s", err) } - // TestPosixcases to validate different conditions for ReadAll API. + // TestXLStoragecases to validate different conditions for ReadAll API. testCases := []struct { volume string path string err error }{ - // TestPosix case - 1. + // TestXLStorage case - 1. // Validate volume does not exist. { volume: "i-dont-exist", path: "", err: errVolumeNotFound, }, - // TestPosix case - 2. + // TestXLStorage case - 2. // Validate bad condition file does not exist. { volume: "exists", path: "as-file-not-found", err: errFileNotFound, }, - // TestPosix case - 3. + // TestXLStorage case - 3. // Validate bad condition file exists as prefix/directory and // we are attempting to read it. { @@ -302,21 +303,21 @@ func TestPosixReadAll(t *testing.T) { path: "as-directory", err: errFileNotFound, }, - // TestPosix case - 4. + // TestXLStorage case - 4. { volume: "exists", path: "as-file-parent/as-file", err: errFileNotFound, }, - // TestPosix case - 5. + // TestXLStorage case - 5. // Validate the good condition file exists and we are able to read it. { volume: "exists", path: "as-file", err: nil, }, - // TestPosix case - 6. - // TestPosix case with invalid volume name. + // TestXLStorage case - 6. + // TestXLStorage case with invalid volume name. { volume: "ab", path: "as-file", @@ -327,20 +328,20 @@ func TestPosixReadAll(t *testing.T) { var dataRead []byte // Run through all the test cases and validate for ReadAll. for i, testCase := range testCases { - dataRead, err = posixStorage.ReadAll(testCase.volume, testCase.path) + dataRead, err = xlStorage.ReadAll(testCase.volume, testCase.path) if err != testCase.err { - t.Fatalf("TestPosix %d: Expected err \"%s\", got err \"%s\"", i+1, testCase.err, err) + t.Fatalf("TestXLStorage %d: Expected err \"%s\", got err \"%s\"", i+1, testCase.err, err) } if err == nil { if string(dataRead) != string([]byte("Hello, World")) { - t.Errorf("TestPosix %d: Expected the data read to be \"%s\", but instead got \"%s\"", i+1, "Hello, World", string(dataRead)) + t.Errorf("TestXLStorage %d: Expected the data read to be \"%s\", but instead got \"%s\"", i+1, "Hello, World", string(dataRead)) } } } } -// TestPosixNewPosix all the cases handled in posix storage layer initialization. -func TestPosixNewPosix(t *testing.T) { +// TestNewXLStorage all the cases handled in xlStorage storage layer initialization. +func TestNewXLStorage(t *testing.T) { // Temporary dir name. tmpDirName := globalTestTmpDir + SlashSeparator + "minio-" + nextSuffix() // Temporary file name. @@ -349,7 +350,7 @@ func TestPosixNewPosix(t *testing.T) { f.Close() defer os.Remove(tmpFileName) - // List of all tests for posix initialization. + // List of all tests for xlStorage initialization. testCases := []struct { name string err error @@ -369,27 +370,27 @@ func TestPosixNewPosix(t *testing.T) { // not a directory. { tmpFileName, - syscall.ENOTDIR, + errDiskNotDir, }, } // Validate all test cases. for i, testCase := range testCases { - // Initialize a new posix layer. - _, err := newPosix(testCase.name, "") + // Initialize a new xlStorage layer. + _, err := newXLStorage(testCase.name, "") if err != testCase.err { - t.Fatalf("TestPosix %d failed wanted: %s, got: %s", i+1, err, testCase.err) + t.Fatalf("TestXLStorage %d failed wanted: %s, got: %s", i+1, err, testCase.err) } } } -// TestPosixMakeVol - TestPosix validate the logic for creation of new posix volume. +// TestXLStorageMakeVol - TestXLStorage validate the logic for creation of new xlStorage volume. // Asserts the failures too against the expected failures. -func TestPosixMakeVol(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +func TestXLStorageMakeVol(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) @@ -407,25 +408,25 @@ func TestPosixMakeVol(t *testing.T) { volName string expectedErr error }{ - // TestPosix case - 1. + // TestXLStorage case - 1. // A valid case, volume creation is expected to succeed. { volName: "success-vol", expectedErr: nil, }, - // TestPosix case - 2. + // TestXLStorage case - 2. // Case where a file exists by the name of the volume to be created. { volName: "vol-as-file", expectedErr: errVolumeExists, }, - // TestPosix case - 3. + // TestXLStorage case - 3. { volName: "existing-vol", expectedErr: errVolumeExists, }, - // TestPosix case - 5. - // TestPosix case with invalid volume name. + // TestXLStorage case - 5. + // TestXLStorage case with invalid volume name. { volName: "ab", expectedErr: errInvalidArgument, @@ -433,15 +434,12 @@ func TestPosixMakeVol(t *testing.T) { } for i, testCase := range testCases { - if _, ok := posixStorage.(*posixDiskIDCheck); !ok { - t.Errorf("Expected the StorageAPI to be of type *posix") - } - if err := posixStorage.MakeVol(testCase.volName); err != testCase.expectedErr { - t.Fatalf("TestPosix %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + if err := xlStorage.MakeVol(testCase.volName); err != testCase.expectedErr { + t.Fatalf("TestXLStorage %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } } - // TestPosix for permission denied. + // TestXLStorage for permission denied. if runtime.GOOS != globalWindowsOSName { permDeniedDir, err := ioutil.TempDir(globalTestTmpDir, "minio-") if err != nil { @@ -452,19 +450,19 @@ func TestPosixMakeVol(t *testing.T) { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - // Initialize posix storage layer for permission denied error. - _, err = newPosix(permDeniedDir, "") + // Initialize xlStorage storage layer for permission denied error. + _, err = newXLStorage(permDeniedDir, "") if err != nil && !os.IsPermission(err) { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } if err = os.Chmod(permDeniedDir, 0755); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - posixStorage, err = newPosix(permDeniedDir, "") + xlStorageNew, err := newXLStorage(permDeniedDir, "") if err != nil { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } // change backend permissions for MakeVol error. @@ -472,27 +470,27 @@ func TestPosixMakeVol(t *testing.T) { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - if err := posixStorage.MakeVol("test-vol"); err != errDiskAccessDenied { + if err := xlStorageNew.MakeVol("test-vol"); err != errDiskAccessDenied { t.Fatalf("expected: %s, got: %s", errDiskAccessDenied, err) } } } -// TestPosixDeleteVol - Validates the expected behavior of posix.DeleteVol for various cases. -func TestPosixDeleteVol(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +// TestXLStorageDeleteVol - Validates the expected behavior of xlStorage.DeleteVol for various cases. +func TestXLStorageDeleteVol(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) // Setup test environment. - if err = posixStorage.MakeVol("success-vol"); err != nil { + if err = xlStorage.MakeVol("success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } - // TestPosix failure cases. + // TestXLStorage failure cases. vol := slashpath.Join(path, "nonempty-vol") if err = os.Mkdir(vol, 0777); err != nil { t.Fatalf("Unable to create directory, %s", err) @@ -505,25 +503,25 @@ func TestPosixDeleteVol(t *testing.T) { volName string expectedErr error }{ - // TestPosix case - 1. + // TestXLStorage case - 1. // A valida case. Empty vol, should be possible to delete. { volName: "success-vol", expectedErr: nil, }, - // TestPosix case - 2. + // TestXLStorage case - 2. // volume is non-existent. { volName: "nonexistent-vol", expectedErr: errVolumeNotFound, }, - // TestPosix case - 3. + // TestXLStorage case - 3. // It shouldn't be possible to delete an non-empty volume, validating the same. { volName: "nonempty-vol", expectedErr: errVolumeNotEmpty, }, - // TestPosix case - 5. + // TestXLStorage case - 5. // Invalid volume name. { volName: "ab", @@ -532,15 +530,12 @@ func TestPosixDeleteVol(t *testing.T) { } for i, testCase := range testCases { - if _, ok := posixStorage.(*posixDiskIDCheck); !ok { - t.Errorf("Expected the StorageAPI to be of type *posixDiskIDCheck") - } - if err = posixStorage.DeleteVol(testCase.volName, false); err != testCase.expectedErr { - t.Fatalf("TestPosix: %d, expected: %s, got: %s", i+1, testCase.expectedErr, err) + if err = xlStorage.DeleteVol(testCase.volName, false); err != testCase.expectedErr { + t.Fatalf("TestXLStorage: %d, expected: %s, got: %s", i+1, testCase.expectedErr, err) } } - // TestPosix for permission denied. + // TestXLStorage for permission denied. if runtime.GOOS != globalWindowsOSName { var permDeniedDir string if permDeniedDir, err = ioutil.TempDir(globalTestTmpDir, "minio-"); err != nil { @@ -554,19 +549,19 @@ func TestPosixDeleteVol(t *testing.T) { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - // Initialize posix storage layer for permission denied error. - _, err = newPosix(permDeniedDir, "") + // Initialize xlStorage storage layer for permission denied error. + _, err = newXLStorage(permDeniedDir, "") if err != nil && !os.IsPermission(err) { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } if err = os.Chmod(permDeniedDir, 0755); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - posixStorage, err = newPosix(permDeniedDir, "") + xlStorageNew, err := newXLStorage(permDeniedDir, "") if err != nil { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } // change backend permissions for MakeVol error. @@ -574,37 +569,37 @@ func TestPosixDeleteVol(t *testing.T) { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - if err = posixStorage.DeleteVol("mybucket", false); err != errDiskAccessDenied { + if err = xlStorageNew.DeleteVol("mybucket", false); err != errDiskAccessDenied { t.Fatalf("expected: Permission error, got: %s", err) } } - posixDeletedStorage, diskPath, err := newPosixTestSetup() + xlStorageDeletedStorage, diskPath, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } // removing the disk, used to recreate disk not found error. os.RemoveAll(diskPath) - // TestPosix for delete on an removed disk. + // TestXLStorage for delete on an removed disk. // should fail with disk not found. - err = posixDeletedStorage.DeleteVol("Del-Vol", false) + err = xlStorageDeletedStorage.DeleteVol("Del-Vol", false) if err != errDiskNotFound { t.Errorf("Expected: \"Disk not found\", got \"%s\"", err) } } -// TestPosixStatVol - TestPosixs validate the volume info returned by posix.StatVol() for various inputs. -func TestPosixStatVol(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +// TestXLStorageStatVol - TestXLStorages validate the volume info returned by xlStorage.StatVol() for various inputs. +func TestXLStorageStatVol(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) // Setup test environment. - if err = posixStorage.MakeVol("success-vol"); err != nil { + if err = xlStorage.MakeVol("success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } @@ -612,17 +607,17 @@ func TestPosixStatVol(t *testing.T) { volName string expectedErr error }{ - // TestPosix case - 1. + // TestXLStorage case - 1. { volName: "success-vol", expectedErr: nil, }, - // TestPosix case - 2. + // TestXLStorage case - 2. { volName: "nonexistent-vol", expectedErr: errVolumeNotFound, }, - // TestPosix case - 3. + // TestXLStorage case - 3. { volName: "ab", expectedErr: errVolumeNotFound, @@ -630,58 +625,57 @@ func TestPosixStatVol(t *testing.T) { } for i, testCase := range testCases { - if _, ok := posixStorage.(*posixDiskIDCheck); !ok { - t.Errorf("Expected the StorageAPI to be of type *posix") - } - volInfo, err := posixStorage.StatVol(testCase.volName) + var volInfo VolInfo + volInfo, err = xlStorage.StatVol(testCase.volName) if err != testCase.expectedErr { - t.Fatalf("TestPosix case : %d, Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + t.Fatalf("TestXLStorage case : %d, Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } if err == nil { if volInfo.Name != testCase.volName { - t.Errorf("TestPosix case %d: Expected the volume name to be \"%s\", instead found \"%s\"", i+1, volInfo.Name, testCase.volName) + t.Errorf("TestXLStorage case %d: Expected the volume name to be \"%s\", instead found \"%s\"", + i+1, volInfo.Name, testCase.volName) } } } - posixDeletedStorage, diskPath, err := newPosixTestSetup() + xlStorageDeletedStorage, diskPath, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } // removing the disk, used to recreate disk not found error. os.RemoveAll(diskPath) - // TestPosix for delete on an removed disk. + // TestXLStorage for delete on an removed disk. // should fail with disk not found. - _, err = posixDeletedStorage.StatVol("Stat vol") + _, err = xlStorageDeletedStorage.StatVol("Stat vol") if err != errDiskNotFound { t.Errorf("Expected: \"Disk not found\", got \"%s\"", err) } } -// TestPosixListVols - Validates the result and the error output for posix volume listing functionality posix.ListVols(). -func TestPosixListVols(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +// TestXLStorageListVols - Validates the result and the error output for xlStorage volume listing functionality xlStorage.ListVols(). +func TestXLStorageListVols(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } var volInfos []VolInfo - // TestPosix empty list vols. - if volInfos, err = posixStorage.ListVols(); err != nil { + // TestXLStorage empty list vols. + if volInfos, err = xlStorage.ListVols(); err != nil { t.Fatalf("expected: , got: %s", err) } else if len(volInfos) != 1 { t.Fatalf("expected: one entry, got: %s", volInfos) } - // TestPosix non-empty list vols. - if err = posixStorage.MakeVol("success-vol"); err != nil { + // TestXLStorage non-empty list vols. + if err = xlStorage.MakeVol("success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } - volInfos, err = posixStorage.ListVols() + volInfos, err = xlStorage.ListVols() if err != nil { t.Fatalf("expected: , got: %s", err) } @@ -702,35 +696,35 @@ func TestPosixListVols(t *testing.T) { // removing the path and simulating disk failure os.RemoveAll(path) // should fail with errDiskNotFound. - if _, err = posixStorage.ListVols(); err != errDiskNotFound { + if _, err = xlStorage.ListVols(); err != errDiskNotFound { t.Errorf("Expected to fail with \"%s\", but instead failed with \"%s\"", errDiskNotFound, err) } } -// TestPosixPosixListDir - TestPosixs validate the directory listing functionality provided by posix.ListDir . -func TestPosixPosixListDir(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +// TestXLStorageXlStorageListDir - TestXLStorages validate the directory listing functionality provided by xlStorage.ListDir . +func TestXLStorageXlStorageListDir(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) - // create posix test setup. - posixDeletedStorage, diskPath, err := newPosixTestSetup() + // create xlStorage test setup. + xlStorageDeletedStorage, diskPath, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } // removing the disk, used to recreate disk not found error. os.RemoveAll(diskPath) // Setup test environment. - if err = posixStorage.MakeVol("success-vol"); err != nil { + if err = xlStorage.MakeVol("success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } - if err = posixStorage.AppendFile("success-vol", "abc/def/ghi/success-file", []byte("Hello, world")); err != nil { + if err = xlStorage.AppendFile("success-vol", "abc/def/ghi/success-file", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err = posixStorage.AppendFile("success-vol", "abc/xyz/ghi/success-file", []byte("Hello, world")); err != nil { + if err = xlStorage.AppendFile("success-vol", "abc/xyz/ghi/success-file", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } @@ -741,7 +735,7 @@ func TestPosixPosixListDir(t *testing.T) { expectedListDir []string expectedErr error }{ - // TestPosix case - 1. + // TestXLStorage case - 1. // valid case with existing volume and file to delete. { srcVol: "success-vol", @@ -749,7 +743,7 @@ func TestPosixPosixListDir(t *testing.T) { expectedListDir: []string{"def/", "xyz/"}, expectedErr: nil, }, - // TestPosix case - 1. + // TestXLStorage case - 1. // valid case with existing volume and file to delete. { srcVol: "success-vol", @@ -757,7 +751,7 @@ func TestPosixPosixListDir(t *testing.T) { expectedListDir: []string{"ghi/"}, expectedErr: nil, }, - // TestPosix case - 1. + // TestXLStorage case - 1. // valid case with existing volume and file to delete. { srcVol: "success-vol", @@ -765,21 +759,21 @@ func TestPosixPosixListDir(t *testing.T) { expectedListDir: []string{"success-file"}, expectedErr: nil, }, - // TestPosix case - 2. + // TestXLStorage case - 2. { srcVol: "success-vol", srcPath: "abcdef", expectedErr: errFileNotFound, }, - // TestPosix case - 3. - // TestPosix case with invalid volume name. + // TestXLStorage case - 3. + // TestXLStorage case with invalid volume name. { srcVol: "ab", srcPath: "success-file", expectedErr: errVolumeNotFound, }, - // TestPosix case - 4. - // TestPosix case with non existent volume. + // TestXLStorage case - 4. + // TestXLStorage case with non existent volume. { srcVol: "non-existent-vol", srcPath: "success-file", @@ -789,83 +783,80 @@ func TestPosixPosixListDir(t *testing.T) { for i, testCase := range testCases { var dirList []string - if _, ok := posixStorage.(*posixDiskIDCheck); !ok { - t.Errorf("Expected the StorageAPI to be of type *posix") - } - dirList, err = posixStorage.ListDir(testCase.srcVol, testCase.srcPath, -1, "") + dirList, err = xlStorage.ListDir(testCase.srcVol, testCase.srcPath, -1) if err != testCase.expectedErr { - t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + t.Fatalf("TestXLStorage case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } if err == nil { for _, expected := range testCase.expectedListDir { if !strings.Contains(strings.Join(dirList, ","), expected) { - t.Errorf("TestPosix case %d: Expected the directory listing to be \"%v\", but got \"%v\"", i+1, testCase.expectedListDir, dirList) + t.Errorf("TestXLStorage case %d: Expected the directory listing to be \"%v\", but got \"%v\"", i+1, testCase.expectedListDir, dirList) } } } } - // TestPosix for permission denied. + // TestXLStorage for permission denied. if runtime.GOOS != globalWindowsOSName { permDeniedDir := createPermDeniedFile(t) defer removePermDeniedFile(permDeniedDir) - // Initialize posix storage layer for permission denied error. - _, err = newPosix(permDeniedDir, "") + // Initialize xlStorage storage layer for permission denied error. + _, err = newXLStorage(permDeniedDir, "") if err != nil && !os.IsPermission(err) { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } if err = os.Chmod(permDeniedDir, 0755); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - posixStorage, err = newPosix(permDeniedDir, "") + xlStorageNew, err := newXLStorage(permDeniedDir, "") if err != nil { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } - if err = posixStorage.DeleteFile("mybucket", "myobject"); err != errFileAccessDenied { + if err = xlStorageNew.DeleteFile("mybucket", "myobject"); err != errFileAccessDenied { t.Errorf("expected: %s, got: %s", errFileAccessDenied, err) } } - // TestPosix for delete on an removed disk. + // TestXLStorage for delete on an removed disk. // should fail with disk not found. - err = posixDeletedStorage.DeleteFile("del-vol", "my-file") + err = xlStorageDeletedStorage.DeleteFile("del-vol", "my-file") if err != errDiskNotFound { t.Errorf("Expected: \"Disk not found\", got \"%s\"", err) } } -// TestPosixDeleteFile - Series of test cases construct valid and invalid input data and validates the result and the error response. -func TestPosixDeleteFile(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +// TestXLStorageDeleteFile - Series of test cases construct valid and invalid input data and validates the result and the error response. +func TestXLStorageDeleteFile(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) - // create posix test setup - posixDeletedStorage, diskPath, err := newPosixTestSetup() + // create xlStorage test setup + xlStorageDeletedStorage, diskPath, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } // removing the disk, used to recreate disk not found error. os.RemoveAll(diskPath) // Setup test environment. - if err = posixStorage.MakeVol("success-vol"); err != nil { + if err = xlStorage.MakeVol("success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } - if err = posixStorage.AppendFile("success-vol", "success-file", []byte("Hello, world")); err != nil { + if err = xlStorage.AppendFile("success-vol", "success-file", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err = posixStorage.MakeVol("no-permissions"); err != nil { + if err = xlStorage.MakeVol("no-permissions"); err != nil { t.Fatalf("Unable to create volume, %s", err.Error()) } - if err = posixStorage.AppendFile("no-permissions", "dir/file", []byte("Hello, world")); err != nil { + if err = xlStorage.AppendFile("no-permissions", "dir/file", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err.Error()) } // Parent directory must have write permissions, this is read + execute. @@ -878,43 +869,43 @@ func TestPosixDeleteFile(t *testing.T) { srcPath string expectedErr error }{ - // TestPosix case - 1. + // TestXLStorage case - 1. // valid case with existing volume and file to delete. { srcVol: "success-vol", srcPath: "success-file", expectedErr: nil, }, - // TestPosix case - 2. + // TestXLStorage case - 2. // The file was deleted in the last case, so DeleteFile should fail. { srcVol: "success-vol", srcPath: "success-file", expectedErr: errFileNotFound, }, - // TestPosix case - 3. - // TestPosix case with segment of the volume name > 255. + // TestXLStorage case - 3. + // TestXLStorage case with segment of the volume name > 255. { srcVol: "my", srcPath: "success-file", expectedErr: errVolumeNotFound, }, - // TestPosix case - 4. - // TestPosix case with non-existent volume. + // TestXLStorage case - 4. + // TestXLStorage case with non-existent volume. { srcVol: "non-existent-vol", srcPath: "success-file", expectedErr: errVolumeNotFound, }, - // TestPosix case - 5. - // TestPosix case with src path segment > 255. + // TestXLStorage case - 5. + // TestXLStorage case with src path segment > 255. { srcVol: "success-vol", srcPath: "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", expectedErr: errFileNameTooLong, }, - // TestPosix case - 6. - // TestPosix case with undeletable parent directory. + // TestXLStorage case - 6. + // TestXLStorage case with undeletable parent directory. // File can delete, dir cannot delete because no-permissions doesn't have write perms. { srcVol: "no-permissions", @@ -924,59 +915,56 @@ func TestPosixDeleteFile(t *testing.T) { } for i, testCase := range testCases { - if _, ok := posixStorage.(*posixDiskIDCheck); !ok { - t.Errorf("Expected the StorageAPI to be of type *posix") - } - if err = posixStorage.DeleteFile(testCase.srcVol, testCase.srcPath); err != testCase.expectedErr { - t.Errorf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + if err = xlStorage.DeleteFile(testCase.srcVol, testCase.srcPath); err != testCase.expectedErr { + t.Errorf("TestXLStorage case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } } - // TestPosix for permission denied. + // TestXLStorage for permission denied. if runtime.GOOS != globalWindowsOSName { permDeniedDir := createPermDeniedFile(t) defer removePermDeniedFile(permDeniedDir) - // Initialize posix storage layer for permission denied error. - _, err = newPosix(permDeniedDir, "") + // Initialize xlStorage storage layer for permission denied error. + _, err = newXLStorage(permDeniedDir, "") if err != nil && !os.IsPermission(err) { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } if err = os.Chmod(permDeniedDir, 0755); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - posixStorage, err = newPosix(permDeniedDir, "") + xlStorageNew, err := newXLStorage(permDeniedDir, "") if err != nil { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } - if err = posixStorage.DeleteFile("mybucket", "myobject"); err != errFileAccessDenied { + if err = xlStorageNew.DeleteFile("mybucket", "myobject"); err != errFileAccessDenied { t.Errorf("expected: %s, got: %s", errFileAccessDenied, err) } } - // TestPosix for delete on an removed disk. + // TestXLStorage for delete on an removed disk. // should fail with disk not found. - err = posixDeletedStorage.DeleteFile("del-vol", "my-file") + err = xlStorageDeletedStorage.DeleteFile("del-vol", "my-file") if err != errDiskNotFound { t.Errorf("Expected: \"Disk not found\", got \"%s\"", err) } } -// TestPosixReadFile - TestPosixs posix.ReadFile with wide range of cases and asserts the result and error response. -func TestPosixReadFile(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +// TestXLStorageReadFile - TestXLStorages xlStorage.ReadFile with wide range of cases and asserts the result and error response. +func TestXLStorageReadFile(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) volume := "success-vol" // Setup test environment. - if err = posixStorage.MakeVol(volume); err != nil { + if err = xlStorage.MakeVol(volume); err != nil { t.Fatalf("Unable to create volume, %s", err) } @@ -1060,7 +1048,7 @@ func TestPosixReadFile(t *testing.T) { v := NewBitrotVerifier(SHA256, getSHA256Sum([]byte("hello, world"))) // Create test files for further reading. for i, appendFile := range appendFiles { - err = posixStorage.AppendFile(volume, appendFile.fileName, []byte("hello, world")) + err = xlStorage.AppendFile(volume, appendFile.fileName, []byte("hello, world")) if err != appendFile.expectedErr { t.Fatalf("Creating file failed: %d %#v, expected: %s, got: %s", i+1, appendFile, appendFile.expectedErr, err) } @@ -1069,7 +1057,7 @@ func TestPosixReadFile(t *testing.T) { { buf := make([]byte, 5) // Test for negative offset. - if _, err = posixStorage.ReadFile(volume, "myobject", -1, buf, v); err == nil { + if _, err = xlStorage.ReadFile(volume, "myobject", -1, buf, v); err == nil { t.Fatalf("expected: error, got: ") } } @@ -1079,7 +1067,7 @@ func TestPosixReadFile(t *testing.T) { var n int64 // Common read buffer. var buf = make([]byte, testCase.bufSize) - n, err = posixStorage.ReadFile(testCase.volume, testCase.fileName, testCase.offset, buf, v) + n, err = xlStorage.ReadFile(testCase.volume, testCase.fileName, testCase.offset, buf, v) if err != nil && testCase.expectedErr != nil { // Validate if the type string of the errors are an exact match. if err.Error() != testCase.expectedErr.Error() { @@ -1130,35 +1118,35 @@ func TestPosixReadFile(t *testing.T) { } } - // TestPosix for permission denied. + // TestXLStorage for permission denied. if runtime.GOOS != globalWindowsOSName { permDeniedDir := createPermDeniedFile(t) defer removePermDeniedFile(permDeniedDir) - // Initialize posix storage layer for permission denied error. - _, err = newPosix(permDeniedDir, "") + // Initialize xlStorage storage layer for permission denied error. + _, err = newXLStorage(permDeniedDir, "") if err != nil && !os.IsPermission(err) { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } if err = os.Chmod(permDeniedDir, 0755); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - posixPermStorage, err := newPosix(permDeniedDir, "") + xlStoragePermStorage, err := newXLStorage(permDeniedDir, "") if err != nil { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } // Common read buffer. var buf = make([]byte, 10) - if _, err = posixPermStorage.ReadFile("mybucket", "myobject", 0, buf, v); err != errFileAccessDenied { + if _, err = xlStoragePermStorage.ReadFile("mybucket", "myobject", 0, buf, v); err != errFileAccessDenied { t.Errorf("expected: %s, got: %s", errFileAccessDenied, err) } } } -var posixReadFileWithVerifyTests = []struct { +var xlStorageReadFileWithVerifyTests = []struct { file string offset int length int @@ -1183,18 +1171,18 @@ var posixReadFileWithVerifyTests = []struct { {file: "myobject", offset: 1000, length: 1001, algorithm: BLAKE2b512, expError: nil}, // 15 } -// TestPosixReadFile with bitrot verification - tests the posix level +// TestXLStorageReadFile with bitrot verification - tests the xlStorage level // ReadFile API with a BitrotVerifier. Only tests hashing related // functionality. Other functionality is tested with -// TestPosixReadFile. -func TestPosixReadFileWithVerify(t *testing.T) { +// TestXLStorageReadFile. +func TestXLStorageReadFileWithVerify(t *testing.T) { volume, object := "test-vol", "myobject" - posixStorage, path, err := newPosixTestSetup() + xlStorage, path, err := newXLStorageTestSetup() if err != nil { os.RemoveAll(path) - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } - if err = posixStorage.MakeVol(volume); err != nil { + if err = xlStorage.MakeVol(volume); err != nil { os.RemoveAll(path) t.Fatalf("Unable to create volume %s: %v", volume, err) } @@ -1203,12 +1191,12 @@ func TestPosixReadFileWithVerify(t *testing.T) { os.RemoveAll(path) t.Fatalf("Unable to create generate random data: %v", err) } - if err = posixStorage.AppendFile(volume, object, data); err != nil { + if err = xlStorage.AppendFile(volume, object, data); err != nil { os.RemoveAll(path) t.Fatalf("Unable to create object: %v", err) } - for i, test := range posixReadFileWithVerifyTests { + for i, test := range xlStorageReadFileWithVerifyTests { h := test.algorithm.New() h.Write(data) if test.expError != nil { @@ -1216,7 +1204,7 @@ func TestPosixReadFileWithVerify(t *testing.T) { } buffer := make([]byte, test.length) - n, err := posixStorage.ReadFile(volume, test.file, int64(test.offset), buffer, NewBitrotVerifier(test.algorithm, h.Sum(nil))) + n, err := xlStorage.ReadFile(volume, test.file, int64(test.offset), buffer, NewBitrotVerifier(test.algorithm, h.Sum(nil))) switch { case err == nil && test.expError != nil: @@ -1231,40 +1219,40 @@ func TestPosixReadFileWithVerify(t *testing.T) { } } -// TestPosixFormatFileChange - to test if changing the diskID makes the calls fail. -func TestPosixFormatFileChange(t *testing.T) { - posixStorage, path, err := newPosixTestSetup() +// TestXLStorageFormatFileChange - to test if changing the diskID makes the calls fail. +func TestXLStorageFormatFileChange(t *testing.T) { + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) - if err = posixStorage.MakeVol(volume); err != nil { + if err = xlStorage.MakeVol(volume); err != nil { t.Fatalf("MakeVol failed with %s", err) } // Change the format.json such that "this" is changed to "randomid". - if err = ioutil.WriteFile(pathJoin(posixStorage.String(), minioMetaBucket, formatConfigFile), []byte(`{"version":"1","format":"xl","id":"592a41c2-b7cc-4130-b883-c4b5cb15965b","xl":{"version":"3","this":"randomid","sets":[["e07285a6-8c73-4962-89c6-047fb939f803","33b8d431-482d-4376-b63c-626d229f0a29","cff6513a-4439-4dc1-bcaa-56c9e880c352","randomid","9c9f21d5-1f15-4737-bce6-835faa0d9626","0a59b346-1424-4fc2-9fa2-a2e80541d0c1","7924a3dc-b69a-4971-9a2e-014966d6aebb","4d2b8dd9-4e48-444b-bdca-c89194b26042"]],"distributionAlgo":"CRCMOD"}}`), 0644); err != nil { + if err = ioutil.WriteFile(pathJoin(xlStorage.String(), minioMetaBucket, formatConfigFile), []byte(`{"version":"1","format":"xl","id":"592a41c2-b7cc-4130-b883-c4b5cb15965b","xl":{"version":"3","this":"randomid","sets":[["e07285a6-8c73-4962-89c6-047fb939f803","33b8d431-482d-4376-b63c-626d229f0a29","cff6513a-4439-4dc1-bcaa-56c9e880c352","randomid","9c9f21d5-1f15-4737-bce6-835faa0d9626","0a59b346-1424-4fc2-9fa2-a2e80541d0c1","7924a3dc-b69a-4971-9a2e-014966d6aebb","4d2b8dd9-4e48-444b-bdca-c89194b26042"]],"distributionAlgo":"CRCMOD"}}`), 0644); err != nil { t.Fatalf("ioutil.WriteFile failed with %s", err) } - err = posixStorage.MakeVol(volume) + err = xlStorage.MakeVol(volume) if err != errVolumeExists { t.Fatalf("MakeVol expected to fail with errDiskNotFound but failed with %s", err) } } -// TestPosix posix.AppendFile() -func TestPosixAppendFile(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +// TestXLStorage xlStorage.AppendFile() +func TestXLStorageAppendFile(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) // Setup test environment. - if err = posixStorage.MakeVol("success-vol"); err != nil { + if err = xlStorage.MakeVol("success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } @@ -1279,11 +1267,11 @@ func TestPosixAppendFile(t *testing.T) { }{ {"myobject", nil}, {"path/to/my/object", nil}, - // TestPosix to append to previously created file. + // TestXLStorage to append to previously created file. {"myobject", nil}, - // TestPosix to use same path of previously created file. + // TestXLStorage to use same path of previously created file. {"path/to/my/testobject", nil}, - // TestPosix to use object is a directory now. + // TestXLStorage to use object is a directory now. {"object-as-dir", errIsNotRegular}, // path segment uses previously uploaded object. {"myobject/testobject", errFileAccessDenied}, @@ -1294,81 +1282,81 @@ func TestPosixAppendFile(t *testing.T) { } for i, testCase := range testCases { - if err = posixStorage.AppendFile("success-vol", testCase.fileName, []byte("hello, world")); err != testCase.expectedErr { + if err = xlStorage.AppendFile("success-vol", testCase.fileName, []byte("hello, world")); err != testCase.expectedErr { t.Errorf("Case: %d, expected: %s, got: %s", i+1, testCase.expectedErr, err) } } - // TestPosix for permission denied. + // TestXLStorage for permission denied. if runtime.GOOS != globalWindowsOSName { permDeniedDir := createPermDeniedFile(t) defer removePermDeniedFile(permDeniedDir) - var posixPermStorage StorageAPI - // Initialize posix storage layer for permission denied error. - _, err = newPosix(permDeniedDir, "") + var xlStoragePermStorage StorageAPI + // Initialize xlStorage storage layer for permission denied error. + _, err = newXLStorage(permDeniedDir, "") if err != nil && !os.IsPermission(err) { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } if err = os.Chmod(permDeniedDir, 0755); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - posixPermStorage, err = newPosix(permDeniedDir, "") + xlStoragePermStorage, err = newXLStorage(permDeniedDir, "") if err != nil { - t.Fatalf("Unable to initialize posix, %s", err) + t.Fatalf("Unable to initialize xlStorage, %s", err) } - if err = posixPermStorage.AppendFile("mybucket", "myobject", []byte("hello, world")); err != errFileAccessDenied { + if err = xlStoragePermStorage.AppendFile("mybucket", "myobject", []byte("hello, world")); err != errFileAccessDenied { t.Fatalf("expected: Permission error, got: %s", err) } } - // TestPosix case with invalid volume name. + // TestXLStorage case with invalid volume name. // A valid volume name should be atleast of size 3. - err = posixStorage.AppendFile("bn", "yes", []byte("hello, world")) + err = xlStorage.AppendFile("bn", "yes", []byte("hello, world")) if err != errVolumeNotFound { t.Fatalf("expected: \"Invalid argument error\", got: \"%s\"", err) } } -// TestPosix posix.RenameFile() -func TestPosixRenameFile(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +// TestXLStorage xlStorage.RenameFile() +func TestXLStorageRenameFile(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) // Setup test environment. - if err := posixStorage.MakeVol("src-vol"); err != nil { + if err := xlStorage.MakeVol("src-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } - if err := posixStorage.MakeVol("dest-vol"); err != nil { + if err := xlStorage.MakeVol("dest-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } - if err := posixStorage.AppendFile("src-vol", "file1", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile("src-vol", "file1", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err := posixStorage.AppendFile("src-vol", "file2", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile("src-vol", "file2", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err := posixStorage.AppendFile("src-vol", "file3", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile("src-vol", "file3", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err := posixStorage.AppendFile("src-vol", "file4", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile("src-vol", "file4", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err := posixStorage.AppendFile("src-vol", "file5", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile("src-vol", "file5", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err := posixStorage.AppendFile("src-vol", "path/to/file1", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile("src-vol", "path/to/file1", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } @@ -1379,7 +1367,7 @@ func TestPosixRenameFile(t *testing.T) { destPath string expectedErr error }{ - // TestPosix case - 1. + // TestXLStorage case - 1. { srcVol: "src-vol", destVol: "dest-vol", @@ -1387,7 +1375,7 @@ func TestPosixRenameFile(t *testing.T) { destPath: "file-one", expectedErr: nil, }, - // TestPosix case - 2. + // TestXLStorage case - 2. { srcVol: "src-vol", destVol: "dest-vol", @@ -1395,8 +1383,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "new-path/", expectedErr: nil, }, - // TestPosix case - 3. - // TestPosix to overwrite destination file. + // TestXLStorage case - 3. + // TestXLStorage to overwrite destination file. { srcVol: "src-vol", destVol: "dest-vol", @@ -1404,8 +1392,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "file-one", expectedErr: nil, }, - // TestPosix case - 4. - // TestPosix case with io error count set to 1. + // TestXLStorage case - 4. + // TestXLStorage case with io error count set to 1. // expected not to fail. { srcVol: "src-vol", @@ -1414,8 +1402,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "file-two", expectedErr: nil, }, - // TestPosix case - 5. - // TestPosix case with io error count set to maximum allowed count. + // TestXLStorage case - 5. + // TestXLStorage case with io error count set to maximum allowed count. // expected not to fail. { srcVol: "src-vol", @@ -1424,8 +1412,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "file-three", expectedErr: nil, }, - // TestPosix case - 6. - // TestPosix case with non-existent source file. + // TestXLStorage case - 6. + // TestXLStorage case with non-existent source file. { srcVol: "src-vol", destVol: "dest-vol", @@ -1433,8 +1421,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "file-three", expectedErr: errFileNotFound, }, - // TestPosix case - 7. - // TestPosix to check failure of source and destination are not same type. + // TestXLStorage case - 7. + // TestXLStorage to check failure of source and destination are not same type. { srcVol: "src-vol", destVol: "dest-vol", @@ -1442,8 +1430,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "file-one", expectedErr: errFileAccessDenied, }, - // TestPosix case - 8. - // TestPosix to check failure of destination directory exists. + // TestXLStorage case - 8. + // TestXLStorage to check failure of destination directory exists. { srcVol: "src-vol", destVol: "dest-vol", @@ -1451,8 +1439,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "new-path/", expectedErr: errFileAccessDenied, }, - // TestPosix case - 9. - // TestPosix case with source being a file and destination being a directory. + // TestXLStorage case - 9. + // TestXLStorage case with source being a file and destination being a directory. // Either both have to be files or directories. // Expecting to fail with `errFileAccessDenied`. { @@ -1462,8 +1450,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "new-path/", expectedErr: errFileAccessDenied, }, - // TestPosix case - 10. - // TestPosix case with non-existent source volume. + // TestXLStorage case - 10. + // TestXLStorage case with non-existent source volume. // Expecting to fail with `errVolumeNotFound`. { srcVol: "src-vol-non-existent", @@ -1472,8 +1460,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "new-path/", expectedErr: errVolumeNotFound, }, - // TestPosix case - 11. - // TestPosix case with non-existent destination volume. + // TestXLStorage case - 11. + // TestXLStorage case with non-existent destination volume. // Expecting to fail with `errVolumeNotFound`. { srcVol: "src-vol", @@ -1482,8 +1470,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "new-path/", expectedErr: errVolumeNotFound, }, - // TestPosix case - 12. - // TestPosix case with invalid src volume name. Length should be atleast 3. + // TestXLStorage case - 12. + // TestXLStorage case with invalid src volume name. Length should be atleast 3. // Expecting to fail with `errInvalidArgument`. { srcVol: "ab", @@ -1492,8 +1480,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "new-path/", expectedErr: errVolumeNotFound, }, - // TestPosix case - 13. - // TestPosix case with invalid destination volume name. Length should be atleast 3. + // TestXLStorage case - 13. + // TestXLStorage case with invalid destination volume name. Length should be atleast 3. // Expecting to fail with `errInvalidArgument`. { srcVol: "abcd", @@ -1502,8 +1490,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "new-path/", expectedErr: errVolumeNotFound, }, - // TestPosix case - 14. - // TestPosix case with invalid destination volume name. Length should be atleast 3. + // TestXLStorage case - 14. + // TestXLStorage case with invalid destination volume name. Length should be atleast 3. // Expecting to fail with `errInvalidArgument`. { srcVol: "abcd", @@ -1512,8 +1500,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "new-path/", expectedErr: errVolumeNotFound, }, - // TestPosix case - 15. - // TestPosix case with the parent of the destination being a file. + // TestXLStorage case - 15. + // TestXLStorage case with the parent of the destination being a file. // expected to fail with `errFileAccessDenied`. { srcVol: "src-vol", @@ -1522,8 +1510,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "file-one/parent-is-file", expectedErr: errFileAccessDenied, }, - // TestPosix case - 16. - // TestPosix case with segment of source file name more than 255. + // TestXLStorage case - 16. + // TestXLStorage case with segment of source file name more than 255. // expected not to fail. { srcVol: "src-vol", @@ -1532,8 +1520,8 @@ func TestPosixRenameFile(t *testing.T) { destPath: "file-six", expectedErr: errFileNameTooLong, }, - // TestPosix case - 17. - // TestPosix case with segment of destination file name more than 255. + // TestXLStorage case - 17. + // TestXLStorage case with segment of destination file name more than 255. // expected not to fail. { srcVol: "src-vol", @@ -1545,35 +1533,31 @@ func TestPosixRenameFile(t *testing.T) { } for i, testCase := range testCases { - if _, ok := posixStorage.(*posixDiskIDCheck); !ok { - t.Fatalf("Expected the StorageAPI to be of type *posix") - } - - if err := posixStorage.RenameFile(testCase.srcVol, testCase.srcPath, testCase.destVol, testCase.destPath); err != testCase.expectedErr { - t.Fatalf("TestPosix %d: Expected the error to be : \"%v\", got: \"%v\".", i+1, testCase.expectedErr, err) + if err := xlStorage.RenameFile(testCase.srcVol, testCase.srcPath, testCase.destVol, testCase.destPath); err != testCase.expectedErr { + t.Fatalf("TestXLStorage %d: Expected the error to be : \"%v\", got: \"%v\".", i+1, testCase.expectedErr, err) } } } -// TestPosix posix.StatFile() -func TestPosixStatFile(t *testing.T) { - // create posix test setup - posixStorage, path, err := newPosixTestSetup() +// TestXLStorage xlStorage.CheckFile() +func TestXLStorageCheckFile(t *testing.T) { + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) // Setup test environment. - if err := posixStorage.MakeVol("success-vol"); err != nil { + if err := xlStorage.MakeVol("success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } - if err := posixStorage.AppendFile("success-vol", "success-file", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile("success-vol", pathJoin("success-file", xlStorageFormatFile), []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err := posixStorage.AppendFile("success-vol", "path/to/success-file", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile("success-vol", pathJoin("path/to/success-file", xlStorageFormatFile), []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } @@ -1582,43 +1566,43 @@ func TestPosixStatFile(t *testing.T) { srcPath string expectedErr error }{ - // TestPosix case - 1. - // TestPosix case with valid inputs, expected to pass. + // TestXLStorage case - 1. + // TestXLStorage case with valid inputs, expected to pass. { srcVol: "success-vol", srcPath: "success-file", expectedErr: nil, }, - // TestPosix case - 2. - // TestPosix case with valid inputs, expected to pass. + // TestXLStorage case - 2. + // TestXLStorage case with valid inputs, expected to pass. { srcVol: "success-vol", srcPath: "path/to/success-file", expectedErr: nil, }, - // TestPosix case - 3. - // TestPosix case with non-existent file. + // TestXLStorage case - 3. + // TestXLStorage case with non-existent file. { srcVol: "success-vol", srcPath: "nonexistent-file", expectedErr: errFileNotFound, }, - // TestPosix case - 4. - // TestPosix case with non-existent file path. + // TestXLStorage case - 4. + // TestXLStorage case with non-existent file path. { srcVol: "success-vol", srcPath: "path/2/success-file", expectedErr: errFileNotFound, }, - // TestPosix case - 5. - // TestPosix case with path being a directory. + // TestXLStorage case - 5. + // TestXLStorage case with path being a directory. { srcVol: "success-vol", srcPath: "path", expectedErr: errFileNotFound, }, - // TestPosix case - 6. - // TestPosix case with non existent volume. + // TestXLStorage case - 6. + // TestXLStorage case with non existent volume. { srcVol: "non-existent-vol", srcPath: "success-file", @@ -1627,33 +1611,30 @@ func TestPosixStatFile(t *testing.T) { } for i, testCase := range testCases { - if _, ok := posixStorage.(*posixDiskIDCheck); !ok { - t.Errorf("Expected the StorageAPI to be of type *posix") - } - if _, err := posixStorage.StatFile(testCase.srcVol, testCase.srcPath); err != testCase.expectedErr { - t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) + if err := xlStorage.CheckFile(testCase.srcVol, testCase.srcPath); err != testCase.expectedErr { + t.Fatalf("TestXLStorage case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } } } -// Test posix.VerifyFile() -func TestPosixVerifyFile(t *testing.T) { +// Test xlStorage.VerifyFile() +func TestXLStorageVerifyFile(t *testing.T) { // We test 4 cases: // 1) Whole-file bitrot check on proper file // 2) Whole-file bitrot check on corrupted file // 3) Streaming bitrot check on proper file // 4) Streaming bitrot check on corrupted file - // create posix test setup - posixStorage, path, err := newPosixTestSetup() + // create xlStorage test setup + xlStorage, path, err := newXLStorageTestSetup() if err != nil { - t.Fatalf("Unable to create posix test setup, %s", err) + t.Fatalf("Unable to create xlStorage test setup, %s", err) } defer os.RemoveAll(path) volName := "testvol" fileName := "testfile" - if err := posixStorage.MakeVol(volName); err != nil { + if err := xlStorage.MakeVol(volName); err != nil { t.Fatal(err) } @@ -1667,29 +1648,29 @@ func TestPosixVerifyFile(t *testing.T) { h := algo.New() h.Write(data) hashBytes := h.Sum(nil) - if err := posixStorage.WriteAll(volName, fileName, bytes.NewBuffer(data)); err != nil { + if err := xlStorage.WriteAll(volName, fileName, bytes.NewBuffer(data)); err != nil { t.Fatal(err) } - if err := posixStorage.VerifyFile(volName, fileName, size, algo, hashBytes, 0); err != nil { + if err := xlStorage.storage.bitrotVerify(pathJoin(path, volName, fileName), size, algo, hashBytes, 0); err != nil { t.Fatal(err) } // 2) Whole-file bitrot check on corrupted file - if err := posixStorage.AppendFile(volName, fileName, []byte("a")); err != nil { + if err := xlStorage.AppendFile(volName, fileName, []byte("a")); err != nil { t.Fatal(err) } // Check if VerifyFile reports the incorrect file length (the correct length is `size+1`) - if err := posixStorage.VerifyFile(volName, fileName, size, algo, hashBytes, 0); err == nil { + if err := xlStorage.storage.bitrotVerify(pathJoin(path, volName, fileName), size, algo, hashBytes, 0); err == nil { t.Fatal("expected to fail bitrot check") } // Check if bitrot fails - if err := posixStorage.VerifyFile(volName, fileName, size+1, algo, hashBytes, 0); err == nil { + if err := xlStorage.storage.bitrotVerify(pathJoin(path, volName, fileName), size+1, algo, hashBytes, 0); err == nil { t.Fatal("expected to fail bitrot check") } - if err := posixStorage.DeleteFile(volName, fileName); err != nil { + if err := xlStorage.DeleteFile(volName, fileName); err != nil { t.Fatal(err) } @@ -1697,7 +1678,7 @@ func TestPosixVerifyFile(t *testing.T) { algo = HighwayHash256S shardSize := int64(1024 * 1024) shard := make([]byte, shardSize) - w := newStreamingBitrotWriter(posixStorage, volName, fileName, size, algo, shardSize) + w := newStreamingBitrotWriter(xlStorage, volName, fileName, size, algo, shardSize) reader := bytes.NewReader(data) for { // Using io.CopyBuffer instead of this loop will not work for us as io.CopyBuffer @@ -1713,12 +1694,12 @@ func TestPosixVerifyFile(t *testing.T) { t.Fatal(err) } w.Close() - if err := posixStorage.VerifyFile(volName, fileName, size, algo, nil, shardSize); err != nil { + if err := xlStorage.storage.bitrotVerify(pathJoin(path, volName, fileName), size, algo, nil, shardSize); err != nil { t.Fatal(err) } // 4) Streaming bitrot check on corrupted file - filePath := pathJoin(posixStorage.String(), volName, fileName) + filePath := pathJoin(xlStorage.String(), volName, fileName) f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0644) if err != nil { t.Fatal(err) @@ -1727,10 +1708,10 @@ func TestPosixVerifyFile(t *testing.T) { t.Fatal(err) } f.Close() - if err := posixStorage.VerifyFile(volName, fileName, size, algo, nil, shardSize); err == nil { + if err := xlStorage.storage.bitrotVerify(pathJoin(path, volName, fileName), size, algo, nil, shardSize); err == nil { t.Fatal("expected to fail bitrot check") } - if err := posixStorage.VerifyFile(volName, fileName, size+1, algo, nil, shardSize); err == nil { + if err := xlStorage.storage.bitrotVerify(pathJoin(path, volName, fileName), size+1, algo, nil, shardSize); err == nil { t.Fatal("expected to fail bitrot check") } } diff --git a/cmd/posix_unix_test.go b/cmd/xl-storage_unix_test.go similarity index 79% rename from cmd/posix_unix_test.go rename to cmd/xl-storage_unix_test.go index a6dedd847..8dc76b02b 100644 --- a/cmd/posix_unix_test.go +++ b/cmd/xl-storage_unix_test.go @@ -1,7 +1,7 @@ // +build linux darwin dragonfly freebsd netbsd openbsd /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -48,10 +48,10 @@ func TestIsValidUmaskVol(t *testing.T) { } testCase := testCases[0] - // Initialize a new posix layer. - disk, err := newPosix(tmpPath, "") + // Initialize a new xlStorage layer. + disk, err := newXLStorage(tmpPath, "") if err != nil { - t.Fatalf("Initializing posix failed with %s.", err) + t.Fatalf("Initializing xlStorage failed with %s.", err) } // Attempt to create a volume to verify the permissions later. @@ -90,10 +90,10 @@ func TestIsValidUmaskFile(t *testing.T) { } testCase := testCases[0] - // Initialize a new posix layer. - disk, err := newPosix(tmpPath, "") + // Initialize a new xlStorage layer. + disk, err := newXLStorage(tmpPath, "") if err != nil { - t.Fatalf("Initializing posix failed with %s.", err) + t.Fatalf("Initializing xlStorage failed with %s.", err) } // Attempt to create a volume to verify the permissions later. @@ -106,21 +106,12 @@ func TestIsValidUmaskFile(t *testing.T) { // Attempt to create a file to verify the permissions later. // AppendFile creates file with 0666 perms. - if err = disk.AppendFile(testCase.volName, "hello-world.txt", []byte("Hello World")); err != nil { + if err = disk.AppendFile(testCase.volName, pathJoin("hello-world.txt", xlStorageFormatFile), []byte("Hello World")); err != nil { t.Fatalf("Create a file `test` failed with %s expected to pass.", err) } - // StatFile - stat the file. - fi, err := disk.StatFile(testCase.volName, "hello-world.txt") - if err != nil { + // CheckFile - stat the file. + if err := disk.CheckFile(testCase.volName, "hello-world.txt"); err != nil { t.Fatalf("Stat failed with %s expected to pass.", err) } - - // Get umask of the bits stored. - currentUmask := 0666 - uint32(fi.Mode.Perm()) - - // Verify if umask is correct. - if int(currentUmask) != testCase.expectedUmask { - t.Fatalf("Umask check failed expected %d, got %d", testCase.expectedUmask, currentUmask) - } } diff --git a/cmd/posix_windows_test.go b/cmd/xl-storage_windows_test.go similarity index 92% rename from cmd/posix_windows_test.go rename to cmd/xl-storage_windows_test.go index 2ae38d8c7..38e82db51 100644 --- a/cmd/posix_windows_test.go +++ b/cmd/xl-storage_windows_test.go @@ -1,7 +1,7 @@ // +build windows /* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -48,7 +48,7 @@ func TestUNCPaths(t *testing.T) { // Instantiate posix object to manage a disk var fs StorageAPI - fs, err = newPosix(dir, "") + fs, err = newXLStorage(dir, "") if err != nil { t.Fatal(err) } @@ -72,7 +72,7 @@ func TestUNCPaths(t *testing.T) { } } -// Test to validate posix behavior on windows when a non-final path component is a file. +// Test to validate xlStorage behavior on windows when a non-final path component is a file. func TestUNCPathENOTDIR(t *testing.T) { // Instantiate posix object to manage a disk dir, err := ioutil.TempDir("", "testdisk-") @@ -83,7 +83,7 @@ func TestUNCPathENOTDIR(t *testing.T) { defer os.RemoveAll(dir) var fs StorageAPI - fs, err = newPosix(dir, "") + fs, err = newXLStorage(dir, "") if err != nil { t.Fatal(err) } diff --git a/cmd/xl-v1-list-objects-heal.go b/cmd/xl-v1-list-objects-heal.go deleted file mode 100644 index 4974b7721..000000000 --- a/cmd/xl-v1-list-objects-heal.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "context" - - "github.com/minio/minio/cmd/logger" - "github.com/minio/minio/pkg/madmin" -) - -// This is not implemented/needed anymore, look for xl-sets.ListBucketHeal() -func (xl xlObjects) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { - logger.LogIf(ctx, NotImplemented{}) - return nil, NotImplemented{} -} - -// This is not implemented/needed anymore, look for xl-sets.HealObjects() -func (xl xlObjects) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn healObjectFn) error { - logger.LogIf(ctx, NotImplemented{}) - return NotImplemented{} -} - -// this is not implemented/needed anymore, look for xl-sets.Walk() -func (xl xlObjects) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo) error { - logger.LogIf(ctx, NotImplemented{}) - return NotImplemented{} -} diff --git a/cmd/xl-v1-list-objects.go b/cmd/xl-v1-list-objects.go deleted file mode 100644 index 16e267293..000000000 --- a/cmd/xl-v1-list-objects.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "context" -) - -// ListObjects - list all objects at prefix, delimited by '/', shouldn't be called. -func (xl xlObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { - // Shouldn't be called - return loi, NotImplemented{} -} diff --git a/cmd/xl-v1-metadata.go b/cmd/xl-v1-metadata.go deleted file mode 100644 index 4941d2685..000000000 --- a/cmd/xl-v1-metadata.go +++ /dev/null @@ -1,459 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2016-2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "bytes" - "context" - "encoding/hex" - "encoding/json" - "fmt" - "net/http" - "path" - "sort" - "time" - - jsoniter "github.com/json-iterator/go" - xhttp "github.com/minio/minio/cmd/http" - "github.com/minio/minio/cmd/logger" - "github.com/minio/minio/pkg/sync/errgroup" - "github.com/minio/sha256-simd" -) - -const erasureAlgorithmKlauspost = "klauspost/reedsolomon/vandermonde" - -// ObjectPartInfo Info of each part kept in the multipart metadata -// file after CompleteMultipartUpload() is called. -type ObjectPartInfo struct { - ETag string `json:"etag,omitempty"` - Number int `json:"number"` - Size int64 `json:"size"` - ActualSize int64 `json:"actualSize"` -} - -// byObjectPartNumber is a collection satisfying sort.Interface. -type byObjectPartNumber []ObjectPartInfo - -func (t byObjectPartNumber) Len() int { return len(t) } -func (t byObjectPartNumber) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t byObjectPartNumber) Less(i, j int) bool { return t[i].Number < t[j].Number } - -// ChecksumInfo - carries checksums of individual scattered parts per disk. -type ChecksumInfo struct { - PartNumber int - Algorithm BitrotAlgorithm - Hash []byte -} - -type checksumInfoJSON struct { - Name string `json:"name"` - Algorithm string `json:"algorithm"` - Hash string `json:"hash,omitempty"` -} - -// MarshalJSON marshals the ChecksumInfo struct -func (c ChecksumInfo) MarshalJSON() ([]byte, error) { - info := checksumInfoJSON{ - Name: fmt.Sprintf("part.%d", c.PartNumber), - Algorithm: c.Algorithm.String(), - Hash: hex.EncodeToString(c.Hash), - } - return json.Marshal(info) -} - -// UnmarshalJSON - should never be called, instead xlMetaV1UnmarshalJSON() should be used. -func (c *ChecksumInfo) UnmarshalJSON(data []byte) error { - var info checksumInfoJSON - var json = jsoniter.ConfigCompatibleWithStandardLibrary - if err := json.Unmarshal(data, &info); err != nil { - return err - } - sum, err := hex.DecodeString(info.Hash) - if err != nil { - return err - } - c.Algorithm = BitrotAlgorithmFromString(info.Algorithm) - c.Hash = sum - if _, err = fmt.Sscanf(info.Name, "part.%d", &c.PartNumber); err != nil { - return err - } - - if !c.Algorithm.Available() { - logger.LogIf(GlobalContext, errBitrotHashAlgoInvalid) - return errBitrotHashAlgoInvalid - } - return nil -} - -// ErasureInfo holds erasure coding and bitrot related information. -type ErasureInfo struct { - // Algorithm is the string representation of erasure-coding-algorithm - Algorithm string `json:"algorithm"` - // DataBlocks is the number of data blocks for erasure-coding - DataBlocks int `json:"data"` - // ParityBlocks is the number of parity blocks for erasure-coding - ParityBlocks int `json:"parity"` - // BlockSize is the size of one erasure-coded block - BlockSize int64 `json:"blockSize"` - // Index is the index of the current disk - Index int `json:"index"` - // Distribution is the distribution of the data and parity blocks - Distribution []int `json:"distribution"` - // Checksums holds all bitrot checksums of all erasure encoded blocks - Checksums []ChecksumInfo `json:"checksum,omitempty"` -} - -// AddChecksumInfo adds a checksum of a part. -func (e *ErasureInfo) AddChecksumInfo(ckSumInfo ChecksumInfo) { - for i, sum := range e.Checksums { - if sum.PartNumber == ckSumInfo.PartNumber { - e.Checksums[i] = ckSumInfo - return - } - } - e.Checksums = append(e.Checksums, ckSumInfo) -} - -// GetChecksumInfo - get checksum of a part. -func (e ErasureInfo) GetChecksumInfo(partNumber int) (ckSum ChecksumInfo) { - for _, sum := range e.Checksums { - if sum.PartNumber == partNumber { - // Return the checksum - return sum - } - } - return ChecksumInfo{} -} - -// ShardFileSize - returns final erasure size from original size. -func (e ErasureInfo) ShardFileSize(totalLength int64) int64 { - if totalLength == 0 { - return 0 - } - if totalLength == -1 { - return -1 - } - numShards := totalLength / e.BlockSize - lastBlockSize := totalLength % e.BlockSize - lastShardSize := ceilFrac(lastBlockSize, int64(e.DataBlocks)) - return numShards*e.ShardSize() + lastShardSize -} - -// ShardSize - returns actual shared size from erasure blockSize. -func (e ErasureInfo) ShardSize() int64 { - return ceilFrac(e.BlockSize, int64(e.DataBlocks)) -} - -// statInfo - carries stat information of the object. -type statInfo struct { - Size int64 `json:"size"` // Size of the object `xl.json`. - ModTime time.Time `json:"modTime"` // ModTime of the object `xl.json`. -} - -// A xlMetaV1 represents `xl.json` metadata header. -type xlMetaV1 struct { - Version string `json:"version"` // Version of the current `xl.json`. - Format string `json:"format"` // Format of the current `xl.json`. - Stat statInfo `json:"stat"` // Stat of the current object `xl.json`. - // Erasure coded info for the current object `xl.json`. - Erasure ErasureInfo `json:"erasure"` - // MinIO release tag for current object `xl.json`. - Minio struct { - Release string `json:"release"` - } `json:"minio"` - // Metadata map for current object `xl.json`. - Meta map[string]string `json:"meta,omitempty"` - // Captures all the individual object `xl.json`. - Parts []ObjectPartInfo `json:"parts,omitempty"` -} - -// XL metadata constants. -const ( - // XL meta version. - xlMetaVersion = "1.0.1" - - // XL meta version. - xlMetaVersion100 = "1.0.0" - - // XL meta format string. - xlMetaFormat = "xl" - - // Add new constants here. -) - -// newXLMetaV1 - initializes new xlMetaV1, adds version, allocates a fresh erasure info. -func newXLMetaV1(object string, dataBlocks, parityBlocks int) (xlMeta xlMetaV1) { - xlMeta = xlMetaV1{} - xlMeta.Version = xlMetaVersion - xlMeta.Format = xlMetaFormat - xlMeta.Minio.Release = ReleaseTag - xlMeta.Erasure = ErasureInfo{ - Algorithm: erasureAlgorithmKlauspost, - DataBlocks: dataBlocks, - ParityBlocks: parityBlocks, - BlockSize: blockSizeV1, - Distribution: hashOrder(object, dataBlocks+parityBlocks), - } - return xlMeta -} - -// Return a new xlMetaV1 initialized using the given xlMetaV1. Used in healing to make sure that we do not copy -// over any part's checksum info which will differ for different disks. -func newXLMetaFromXLMeta(meta xlMetaV1) xlMetaV1 { - xlMeta := meta - xlMeta.Erasure.Checksums = nil - xlMeta.Parts = nil - return xlMeta -} - -// IsValid - tells if the format is sane by validating the version -// string, format and erasure info fields. -func (m xlMetaV1) IsValid() bool { - return isXLMetaFormatValid(m.Version, m.Format) && - isXLMetaErasureInfoValid(m.Erasure.DataBlocks, m.Erasure.ParityBlocks) -} - -// Verifies if the backend format metadata is sane by validating -// the version string and format style. -func isXLMetaFormatValid(version, format string) bool { - return ((version == xlMetaVersion || version == xlMetaVersion100) && - format == xlMetaFormat) -} - -// Verifies if the backend format metadata is sane by validating -// the ErasureInfo, i.e. data and parity blocks. -func isXLMetaErasureInfoValid(data, parity int) bool { - return ((data >= parity) && (data != 0) && (parity != 0)) -} - -// Converts metadata to object info. -func (m xlMetaV1) ToObjectInfo(bucket, object string) ObjectInfo { - objInfo := ObjectInfo{ - IsDir: false, - Bucket: bucket, - Name: object, - Size: m.Stat.Size, - ModTime: m.Stat.ModTime, - ContentType: m.Meta["content-type"], - ContentEncoding: m.Meta["content-encoding"], - } - // Update expires - var ( - t time.Time - e error - ) - if exp, ok := m.Meta["expires"]; ok { - if t, e = time.Parse(http.TimeFormat, exp); e == nil { - objInfo.Expires = t.UTC() - } - } - objInfo.backendType = BackendErasure - - // Extract etag from metadata. - objInfo.ETag = extractETag(m.Meta) - - // Add user tags to the object info - objInfo.UserTags = m.Meta[xhttp.AmzObjectTagging] - - // etag/md5Sum has already been extracted. We need to - // remove to avoid it from appearing as part of - // response headers. e.g, X-Minio-* or X-Amz-*. - // Tags have also been extracted, we remove that as well. - objInfo.UserDefined = cleanMetadata(m.Meta) - - // All the parts per object. - objInfo.Parts = m.Parts - - // Update storage class - if sc, ok := m.Meta[xhttp.AmzStorageClass]; ok { - objInfo.StorageClass = sc - } else { - objInfo.StorageClass = globalMinioDefaultStorageClass - } - - // Success. - return objInfo -} - -// objectPartIndex - returns the index of matching object part number. -func objectPartIndex(parts []ObjectPartInfo, partNumber int) int { - for i, part := range parts { - if partNumber == part.Number { - return i - } - } - return -1 -} - -// AddObjectPart - add a new object part in order. -func (m *xlMetaV1) AddObjectPart(partNumber int, partETag string, partSize int64, actualSize int64) { - partInfo := ObjectPartInfo{ - Number: partNumber, - ETag: partETag, - Size: partSize, - ActualSize: actualSize, - } - - // Update part info if it already exists. - for i, part := range m.Parts { - if partNumber == part.Number { - m.Parts[i] = partInfo - return - } - } - - // Proceed to include new part info. - m.Parts = append(m.Parts, partInfo) - - // Parts in xlMeta should be in sorted order by part number. - sort.Sort(byObjectPartNumber(m.Parts)) -} - -// ObjectToPartOffset - translate offset of an object to offset of its individual part. -func (m xlMetaV1) ObjectToPartOffset(ctx context.Context, offset int64) (partIndex int, partOffset int64, err error) { - if offset == 0 { - // Special case - if offset is 0, then partIndex and partOffset are always 0. - return 0, 0, nil - } - partOffset = offset - // Seek until object offset maps to a particular part offset. - for i, part := range m.Parts { - partIndex = i - // Offset is smaller than size we have reached the proper part offset. - if partOffset < part.Size { - return partIndex, partOffset, nil - } - // Continue to towards the next part. - partOffset -= part.Size - } - logger.LogIf(ctx, InvalidRange{}) - // Offset beyond the size of the object return InvalidRange. - return 0, 0, InvalidRange{} -} - -func getXLMetaInQuorum(ctx context.Context, metaArr []xlMetaV1, modTime time.Time, quorum int) (xmv xlMetaV1, e error) { - metaHashes := make([]string, len(metaArr)) - for i, meta := range metaArr { - if meta.IsValid() && meta.Stat.ModTime.Equal(modTime) { - h := sha256.New() - for _, part := range meta.Parts { - h.Write([]byte(fmt.Sprintf("part.%d", part.Number))) - } - metaHashes[i] = hex.EncodeToString(h.Sum(nil)) - } - } - - metaHashCountMap := make(map[string]int) - for _, hash := range metaHashes { - if hash == "" { - continue - } - metaHashCountMap[hash]++ - } - - maxHash := "" - maxCount := 0 - for hash, count := range metaHashCountMap { - if count > maxCount { - maxCount = count - maxHash = hash - } - } - - if maxCount < quorum { - return xlMetaV1{}, errXLReadQuorum - } - - for i, hash := range metaHashes { - if hash == maxHash { - return metaArr[i], nil - } - } - - return xlMetaV1{}, errXLReadQuorum -} - -// pickValidXLMeta - picks one valid xlMeta content and returns from a -// slice of xlmeta content. -func pickValidXLMeta(ctx context.Context, metaArr []xlMetaV1, modTime time.Time, quorum int) (xmv xlMetaV1, e error) { - return getXLMetaInQuorum(ctx, metaArr, modTime, quorum) -} - -// writeXLMetadata - writes `xl.json` to a single disk. -func writeXLMetadata(ctx context.Context, disk StorageAPI, bucket, prefix string, xlMeta xlMetaV1) error { - jsonFile := path.Join(prefix, xlMetaJSONFile) - - // Marshal json. - metadataBytes, err := json.Marshal(&xlMeta) - if err != nil { - logger.LogIf(ctx, err) - return err - } - - // Persist marshaled data. - err = disk.WriteAll(bucket, jsonFile, bytes.NewReader(metadataBytes)) - logger.LogIf(ctx, err) - return err -} - -// Rename `xl.json` content to destination location for each disk in order. -func renameXLMetadata(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, quorum int) ([]StorageAPI, error) { - isDir := false - srcXLJSON := path.Join(srcEntry, xlMetaJSONFile) - dstXLJSON := path.Join(dstEntry, xlMetaJSONFile) - return rename(ctx, disks, srcBucket, srcXLJSON, dstBucket, dstXLJSON, isDir, quorum, []error{errFileNotFound}) -} - -// writeUniqueXLMetadata - writes unique `xl.json` content for each disk in order. -func writeUniqueXLMetadata(ctx context.Context, disks []StorageAPI, bucket, prefix string, xlMetas []xlMetaV1, quorum int) ([]StorageAPI, error) { - g := errgroup.WithNErrs(len(disks)) - - // Start writing `xl.json` to all disks in parallel. - for index := range disks { - index := index - g.Go(func() error { - if disks[index] == nil { - return errDiskNotFound - } - // Pick one xlMeta for a disk at index. - xlMetas[index].Erasure.Index = index + 1 - return writeXLMetadata(ctx, disks[index], bucket, prefix, xlMetas[index]) - }, index) - } - - // Wait for all the routines. - mErrs := g.Wait() - - err := reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, quorum) - return evalDisks(disks, mErrs), err -} - -// Returns per object readQuorum and writeQuorum -// readQuorum is the min required disks to read data. -// writeQuorum is the min required disks to write data. -func objectQuorumFromMeta(ctx context.Context, xl xlObjects, partsMetaData []xlMetaV1, errs []error) (objectReadQuorum, objectWriteQuorum int, err error) { - // get the latest updated Metadata and a count of all the latest updated xlMeta(s) - latestXLMeta, err := getLatestXLMeta(ctx, partsMetaData, errs) - - if err != nil { - return 0, 0, err - } - - // Since all the valid erasure code meta updated at the same time are equivalent, pass dataBlocks - // from latestXLMeta to get the quorum - return latestXLMeta.Erasure.DataBlocks, latestXLMeta.Erasure.DataBlocks + 1, nil -} diff --git a/cmd/xl-v1-metadata_test.go b/cmd/xl-v1-metadata_test.go deleted file mode 100644 index fad2cd3cb..000000000 --- a/cmd/xl-v1-metadata_test.go +++ /dev/null @@ -1,249 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "testing" - "time" - - humanize "github.com/dustin/go-humanize" -) - -const ActualSize = 1000 - -// Test xlMetaV1.AddObjectPart() -func TestAddObjectPart(t *testing.T) { - testCases := []struct { - partNum int - expectedIndex int - }{ - {1, 0}, - {2, 1}, - {4, 2}, - {5, 3}, - {7, 4}, - // Insert part. - {3, 2}, - // Replace existing part. - {4, 3}, - // Missing part. - {6, -1}, - } - - // Setup. - xlMeta := newXLMetaV1("test-object", 8, 8) - if !xlMeta.IsValid() { - t.Fatalf("unable to get xl meta") - } - - // Test them. - for _, testCase := range testCases { - if testCase.expectedIndex > -1 { - xlMeta.AddObjectPart(testCase.partNum, "", int64(testCase.partNum+humanize.MiByte), ActualSize) - } - - if index := objectPartIndex(xlMeta.Parts, testCase.partNum); index != testCase.expectedIndex { - t.Fatalf("%+v: expected = %d, got: %d", testCase, testCase.expectedIndex, index) - } - } -} - -// Test objectPartIndex(). -// generates a sample xlMeta data and asserts the output of objectPartIndex() with the expected value. -func TestObjectPartIndex(t *testing.T) { - testCases := []struct { - partNum int - expectedIndex int - }{ - {2, 1}, - {1, 0}, - {5, 3}, - {4, 2}, - {7, 4}, - } - - // Setup. - xlMeta := newXLMetaV1("test-object", 8, 8) - if !xlMeta.IsValid() { - t.Fatalf("unable to get xl meta") - } - - // Add some parts for testing. - for _, testCase := range testCases { - xlMeta.AddObjectPart(testCase.partNum, "", int64(testCase.partNum+humanize.MiByte), ActualSize) - } - - // Add failure test case. - testCases = append(testCases, struct { - partNum int - expectedIndex int - }{6, -1}) - - // Test them. - for _, testCase := range testCases { - if index := objectPartIndex(xlMeta.Parts, testCase.partNum); index != testCase.expectedIndex { - t.Fatalf("%+v: expected = %d, got: %d", testCase, testCase.expectedIndex, index) - } - } -} - -// Test xlMetaV1.ObjectToPartOffset(). -func TestObjectToPartOffset(t *testing.T) { - // Setup. - xlMeta := newXLMetaV1("test-object", 8, 8) - if !xlMeta.IsValid() { - t.Fatalf("unable to get xl meta") - } - - // Add some parts for testing. - // Total size of all parts is 5,242,899 bytes. - for _, partNum := range []int{1, 2, 4, 5, 7} { - xlMeta.AddObjectPart(partNum, "", int64(partNum+humanize.MiByte), ActualSize) - } - - testCases := []struct { - offset int64 - expectedIndex int - expectedOffset int64 - expectedErr error - }{ - {0, 0, 0, nil}, - {1 * humanize.MiByte, 0, 1 * humanize.MiByte, nil}, - {1 + humanize.MiByte, 1, 0, nil}, - {2 + humanize.MiByte, 1, 1, nil}, - // Its valid for zero sized object. - {-1, 0, -1, nil}, - // Max fffset is always (size - 1). - {(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte) - 1, 4, 1048582, nil}, - // Error if offset is size. - {(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte), 0, 0, InvalidRange{}}, - } - - // Test them. - for _, testCase := range testCases { - index, offset, err := xlMeta.ObjectToPartOffset(GlobalContext, testCase.offset) - if err != testCase.expectedErr { - t.Fatalf("%+v: expected = %s, got: %s", testCase, testCase.expectedErr, err) - } - if index != testCase.expectedIndex { - t.Fatalf("%+v: index: expected = %d, got: %d", testCase, testCase.expectedIndex, index) - } - if offset != testCase.expectedOffset { - t.Fatalf("%+v: offset: expected = %d, got: %d", testCase, testCase.expectedOffset, offset) - } - } -} - -// Helper function to check if two xlMetaV1 values are similar. -func isXLMetaSimilar(m, n xlMetaV1) bool { - if m.Version != n.Version { - return false - } - if m.Format != n.Format { - return false - } - if len(m.Parts) != len(n.Parts) { - return false - } - return true -} - -func TestPickValidXLMeta(t *testing.T) { - obj := "object" - x1 := newXLMetaV1(obj, 4, 4) - now := UTCNow() - x1.Stat.ModTime = now - invalidX1 := x1 - invalidX1.Version = "invalid-version" - xs := []xlMetaV1{x1, x1, x1, x1} - invalidXS := []xlMetaV1{invalidX1, invalidX1, invalidX1, invalidX1} - testCases := []struct { - metaArr []xlMetaV1 - modTime time.Time - xlMeta xlMetaV1 - expectedErr error - }{ - { - metaArr: xs, - modTime: now, - xlMeta: x1, - expectedErr: nil, - }, - { - metaArr: invalidXS, - modTime: now, - xlMeta: invalidX1, - expectedErr: errXLReadQuorum, - }, - } - for i, test := range testCases { - xlMeta, err := pickValidXLMeta(GlobalContext, test.metaArr, test.modTime, len(test.metaArr)/2) - if test.expectedErr != nil { - if err.Error() != test.expectedErr.Error() { - t.Errorf("Test %d: Expected to fail with %v but received %v", - i+1, test.expectedErr, err) - } - } else { - if !isXLMetaSimilar(xlMeta, test.xlMeta) { - t.Errorf("Test %d: Expected %v but received %v", - i+1, test.xlMeta, xlMeta) - } - } - } -} - -func TestIsXLMetaFormatValid(t *testing.T) { - tests := []struct { - name int - version string - format string - want bool - }{ - {1, "123", "fs", false}, - {2, "123", xlMetaFormat, false}, - {3, xlMetaVersion, "test", false}, - {4, xlMetaVersion100, "hello", false}, - {5, xlMetaVersion, xlMetaFormat, true}, - {6, xlMetaVersion100, xlMetaFormat, true}, - } - for _, tt := range tests { - if got := isXLMetaFormatValid(tt.version, tt.format); got != tt.want { - t.Errorf("Test %d: Expected %v but received %v", tt.name, got, tt.want) - } - } -} - -func TestIsXLMetaErasureInfoValid(t *testing.T) { - tests := []struct { - name int - data int - parity int - want bool - }{ - {1, 5, 6, false}, - {2, 5, 5, true}, - {3, 0, 5, false}, - {4, 5, 0, false}, - {5, 5, 0, false}, - {6, 5, 4, true}, - } - for _, tt := range tests { - if got := isXLMetaErasureInfoValid(tt.data, tt.parity); got != tt.want { - t.Errorf("Test %d: Expected %v but received %v", tt.name, got, tt.want) - } - } -} diff --git a/cmd/xl-v1-multipart_test.go b/cmd/xl-v1-multipart_test.go deleted file mode 100644 index f2f14ed96..000000000 --- a/cmd/xl-v1-multipart_test.go +++ /dev/null @@ -1,75 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2014, 2015, 2016, 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "context" - "sync" - "testing" - "time" -) - -// Tests cleanup multipart uploads for erasure coded backend. -func TestXLCleanupStaleMultipartUploads(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Create an instance of xl backend - obj, fsDirs, err := prepareXL16(ctx) - if err != nil { - t.Fatal(err) - } - // Defer cleanup of backend directories - defer removeRoots(fsDirs) - - z := obj.(*xlZones) - xl := z.zones[0].sets[0] - - bucketName := "bucket" - objectName := "object" - var opts ObjectOptions - - obj.MakeBucketWithLocation(ctx, bucketName, "", false) - uploadID, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, opts) - if err != nil { - t.Fatal("Unexpected err: ", err) - } - - var cleanupWg sync.WaitGroup - cleanupWg.Add(1) - go func() { - defer cleanupWg.Done() - xl.cleanupStaleMultipartUploads(GlobalContext, time.Millisecond, 0, ctx.Done()) - }() - - // Wait for 100ms such that - we have given enough time for cleanup routine to kick in. - // Flaky on slow systems :/ - time.Sleep(100 * time.Millisecond) - - // Exit cleanup.. - cancel() - cleanupWg.Wait() - - // Check if upload id was already purged. - if err = obj.AbortMultipartUpload(context.Background(), bucketName, objectName, uploadID); err != nil { - if _, ok := err.(InvalidUploadID); !ok { - t.Fatal("Unexpected err: ", err) - } - } else { - t.Error("Item was not cleaned up.") - } -} diff --git a/cmd/xl-v1.go b/cmd/xl-v1.go deleted file mode 100644 index b5c154390..000000000 --- a/cmd/xl-v1.go +++ /dev/null @@ -1,391 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "context" - "errors" - "fmt" - "sort" - "sync" - "time" - - "github.com/minio/minio/cmd/logger" - "github.com/minio/minio/pkg/bpool" - "github.com/minio/minio/pkg/color" - "github.com/minio/minio/pkg/dsync" - "github.com/minio/minio/pkg/madmin" - "github.com/minio/minio/pkg/sync/errgroup" -) - -// XL constants. -const ( - // XL metadata file carries per object metadata. - xlMetaJSONFile = "xl.json" -) - -// OfflineDisk represents an unavailable disk. -var OfflineDisk StorageAPI // zero value is nil - -// partialUpload is a successful upload of an object -// but not written in all disks (having quorum) -type partialUpload struct { - bucket string - object string - failedSet int -} - -// xlObjects - Implements XL object layer. -type xlObjects struct { - GatewayUnsupported - - // getDisks returns list of storageAPIs. - getDisks func() []StorageAPI - - // getLockers returns list of remote and local lockers. - getLockers func() []dsync.NetLocker - - // getEndpoints returns list of endpoint strings belonging this set. - // some may be local and some remote. - getEndpoints func() []string - - // Locker mutex map. - nsMutex *nsLockMap - - // Byte pools used for temporary i/o buffers. - bp *bpool.BytePoolCap - - mrfUploadCh chan partialUpload -} - -// NewNSLock - initialize a new namespace RWLocker instance. -func (xl xlObjects) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker { - return xl.nsMutex.NewNSLock(ctx, xl.getLockers, bucket, objects...) -} - -// Shutdown function for object storage interface. -func (xl xlObjects) Shutdown(ctx context.Context) error { - // Add any object layer shutdown activities here. - closeStorageDisks(xl.getDisks()) - return nil -} - -// byDiskTotal is a collection satisfying sort.Interface. -type byDiskTotal []DiskInfo - -func (d byDiskTotal) Len() int { return len(d) } -func (d byDiskTotal) Swap(i, j int) { d[i], d[j] = d[j], d[i] } -func (d byDiskTotal) Less(i, j int) bool { - return d[i].Total < d[j].Total -} - -// getDisksInfo - fetch disks info across all other storage API. -func getDisksInfo(disks []StorageAPI, endpoints []string) (disksInfo []DiskInfo, errs []error, onlineDisks, offlineDisks madmin.BackendDisks) { - disksInfo = make([]DiskInfo, len(disks)) - onlineDisks = make(madmin.BackendDisks) - offlineDisks = make(madmin.BackendDisks) - - for _, ep := range endpoints { - if _, ok := offlineDisks[ep]; !ok { - offlineDisks[ep] = 0 - } - if _, ok := onlineDisks[ep]; !ok { - onlineDisks[ep] = 0 - } - } - - g := errgroup.WithNErrs(len(disks)) - for index := range disks { - index := index - g.Go(func() error { - if disks[index] == OfflineDisk { - // Storage disk is empty, perhaps ignored disk or not available. - return errDiskNotFound - } - info, err := disks[index].DiskInfo() - if err != nil { - if !IsErr(err, baseErrs...) { - reqInfo := (&logger.ReqInfo{}).AppendTags("disk", disks[index].String()) - ctx := logger.SetReqInfo(GlobalContext, reqInfo) - logger.LogIf(ctx, err) - } - return err - } - disksInfo[index] = info - return nil - }, index) - } - - errs = g.Wait() - // Wait for the routines. - for i, diskInfoErr := range errs { - if disks[i] == OfflineDisk { - continue - } - ep := endpoints[i] - if diskInfoErr != nil { - offlineDisks[ep]++ - continue - } - onlineDisks[ep]++ - } - - // Success. - return disksInfo, errs, onlineDisks, offlineDisks -} - -// Get an aggregated storage info across all disks. -func getStorageInfo(disks []StorageAPI, endpoints []string) (StorageInfo, []error) { - disksInfo, errs, onlineDisks, offlineDisks := getDisksInfo(disks, endpoints) - - // Sort so that the first element is the smallest. - sort.Sort(byDiskTotal(disksInfo)) - - // Combine all disks to get total usage - usedList := make([]uint64, len(disksInfo)) - totalList := make([]uint64, len(disksInfo)) - availableList := make([]uint64, len(disksInfo)) - mountPaths := make([]string, len(disksInfo)) - - for i, di := range disksInfo { - usedList[i] = di.Used - totalList[i] = di.Total - availableList[i] = di.Free - mountPaths[i] = di.MountPath - } - - storageInfo := StorageInfo{ - Used: usedList, - Total: totalList, - Available: availableList, - MountPaths: mountPaths, - } - - storageInfo.Backend.Type = BackendErasure - storageInfo.Backend.OnlineDisks = onlineDisks - storageInfo.Backend.OfflineDisks = offlineDisks - - return storageInfo, errs -} - -// StorageInfo - returns underlying storage statistics. -func (xl xlObjects) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) { - - disks := xl.getDisks() - endpoints := xl.getEndpoints() - if local { - var localDisks []StorageAPI - var localEndpoints []string - for i, disk := range disks { - if disk != nil { - if disk.IsLocal() { - // Append this local disk since local flag is true - localDisks = append(localDisks, disk) - localEndpoints = append(localEndpoints, endpoints[i]) - } - } - } - disks = localDisks - endpoints = localEndpoints - } - return getStorageInfo(disks, endpoints) -} - -// GetMetrics - is not implemented and shouldn't be called. -func (xl xlObjects) GetMetrics(ctx context.Context) (*Metrics, error) { - logger.LogIf(ctx, NotImplemented{}) - return &Metrics{}, NotImplemented{} -} - -// CrawlAndGetDataUsage will start crawling buckets and send updated totals as they are traversed. -// Updates are sent on a regular basis and the caller *must* consume them. -func (xl xlObjects) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error { - // This should only be called from runDataCrawler and this setup should not happen (zones). - return errors.New("xlObjects CrawlAndGetDataUsage not implemented") -} - -// CrawlAndGetDataUsage will start crawling buckets and send updated totals as they are traversed. -// Updates are sent on a regular basis and the caller *must* consume them. -func (xl xlObjects) crawlAndGetDataUsage(ctx context.Context, buckets []BucketInfo, bf *bloomFilter, updates chan<- dataUsageCache) error { - var disks []StorageAPI - - for _, d := range xl.getLoadBalancedDisks() { - if d == nil || !d.IsOnline() { - continue - } - disks = append(disks, d) - } - if len(disks) == 0 || len(buckets) == 0 { - return nil - } - - // Load bucket totals - oldCache := dataUsageCache{} - err := oldCache.load(ctx, xl, dataUsageCacheName) - if err != nil { - return err - } - - // New cache.. - cache := dataUsageCache{ - Info: dataUsageCacheInfo{ - Name: dataUsageRoot, - NextCycle: oldCache.Info.NextCycle, - }, - Cache: make(map[string]dataUsageEntry, len(oldCache.Cache)), - } - - // Put all buckets into channel. - bucketCh := make(chan BucketInfo, len(buckets)) - // Add new buckets first - for _, b := range buckets { - if oldCache.find(b.Name) == nil { - bucketCh <- b - } - } - // Add existing buckets. - for _, b := range buckets { - e := oldCache.find(b.Name) - if e != nil { - if bf == nil || bf.containsDir(b.Name) { - bucketCh <- b - cache.replace(b.Name, dataUsageRoot, *e) - } else { - if intDataUpdateTracker.debug { - logger.Info(color.Green("crawlAndGetDataUsage:")+" Skipping bucket %v, not updated", b.Name) - } - } - } - } - - close(bucketCh) - bucketResults := make(chan dataUsageEntryInfo, len(disks)) - - // Start async collector/saver. - // This goroutine owns the cache. - var saverWg sync.WaitGroup - saverWg.Add(1) - go func() { - const updateTime = 30 * time.Second - t := time.NewTicker(updateTime) - defer t.Stop() - defer saverWg.Done() - var lastSave time.Time - - saveLoop: - for { - select { - case <-ctx.Done(): - // Return without saving. - return - case <-t.C: - if cache.Info.LastUpdate.Equal(lastSave) { - continue - } - logger.LogIf(ctx, cache.save(ctx, xl, dataUsageCacheName)) - updates <- cache.clone() - lastSave = cache.Info.LastUpdate - case v, ok := <-bucketResults: - if !ok { - break saveLoop - } - cache.replace(v.Name, v.Parent, v.Entry) - cache.Info.LastUpdate = time.Now() - } - } - // Save final state... - cache.Info.NextCycle++ - cache.Info.LastUpdate = time.Now() - logger.LogIf(ctx, cache.save(ctx, xl, dataUsageCacheName)) - if intDataUpdateTracker.debug { - logger.Info(color.Green("crawlAndGetDataUsage:")+" Cache saved, Next Cycle: %d", cache.Info.NextCycle) - } - updates <- cache - }() - - // Start one crawler per disk - var wg sync.WaitGroup - wg.Add(len(disks)) - for i := range disks { - go func(i int) { - defer wg.Done() - disk := disks[i] - - for bucket := range bucketCh { - select { - case <-ctx.Done(): - return - default: - } - - // Load cache for bucket - cacheName := pathJoin(bucket.Name, dataUsageCacheName) - cache := dataUsageCache{} - logger.LogIf(ctx, cache.load(ctx, xl, cacheName)) - if cache.Info.Name == "" { - cache.Info.Name = bucket.Name - } - if cache.Info.Name != bucket.Name { - logger.LogIf(ctx, fmt.Errorf("cache name mismatch: %s != %s", cache.Info.Name, bucket.Name)) - cache.Info = dataUsageCacheInfo{ - Name: bucket.Name, - LastUpdate: time.Time{}, - NextCycle: 0, - } - } - - // Calc usage - before := cache.Info.LastUpdate - if bf != nil { - cache.Info.BloomFilter = bf.bytes() - } - cache, err = disk.CrawlAndGetDataUsage(ctx, cache) - cache.Info.BloomFilter = nil - if err != nil { - logger.LogIf(ctx, err) - if cache.Info.LastUpdate.After(before) { - logger.LogIf(ctx, cache.save(ctx, xl, cacheName)) - } - continue - } - - var root dataUsageEntry - if r := cache.root(); r != nil { - root = cache.flatten(*r) - } - bucketResults <- dataUsageEntryInfo{ - Name: cache.Info.Name, - Parent: dataUsageRoot, - Entry: root, - } - // Save cache - logger.LogIf(ctx, cache.save(ctx, xl, cacheName)) - } - }(i) - } - wg.Wait() - close(bucketResults) - saverWg.Wait() - - return nil -} - -// IsReady - shouldn't be called will panic. -func (xl xlObjects) IsReady(ctx context.Context) bool { - logger.CriticalIf(ctx, NotImplemented{}) - return true -} diff --git a/docs/bucket/versioning/DESIGN.md b/docs/bucket/versioning/DESIGN.md new file mode 100644 index 000000000..4593df048 --- /dev/null +++ b/docs/bucket/versioning/DESIGN.md @@ -0,0 +1,100 @@ +# Bucket Versioning Design Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/) + +Example of a version enabled bucket `engineering` +``` +/mnt/data02/engineering/backup.tar.gz +├─ 0379f361-695c-4509-b0b8-a4842d414579 +│ └─ part.1 +├─ 804fea2c-c21e-490b-98ff-cdb647047cb6 +│ └─ part.1 +├─ e063d138-4d6e-4e68-8576-12d1b4509cc3 +│ └─ part.1 +├─ e675c1f6-476d-4b46-be31-281c887aea7b +│ └─ part.1 +└─ xl.meta + +/mnt/data03/engineering/backup.tar.gz +├─ 0379f361-695c-4509-b0b8-a4842d414579 +│ └─ part.1 +├─ 804fea2c-c21e-490b-98ff-cdb647047cb6 +│ └─ part.1 +├─ e063d138-4d6e-4e68-8576-12d1b4509cc3 +│ └─ part.1 +├─ e675c1f6-476d-4b46-be31-281c887aea7b +│ └─ part.1 +└─ xl.meta + +/mnt/data04/engineering/backup.tar.gz +├─ 0379f361-695c-4509-b0b8-a4842d414579 +│ └─ part.1 +├─ 804fea2c-c21e-490b-98ff-cdb647047cb6 +│ └─ part.1 +├─ e063d138-4d6e-4e68-8576-12d1b4509cc3 +│ └─ part.1 +├─ e675c1f6-476d-4b46-be31-281c887aea7b +│ └─ part.1 +└─ xl.meta + +/mnt/data05/engineering/backup.tar.gz +├─ 0379f361-695c-4509-b0b8-a4842d414579 +│ └─ part.1 +├─ 804fea2c-c21e-490b-98ff-cdb647047cb6 +│ └─ part.1 +├─ e063d138-4d6e-4e68-8576-12d1b4509cc3 +│ └─ part.1 +├─ e675c1f6-476d-4b46-be31-281c887aea7b +│ └─ part.1 +└─ xl.meta +``` + +`xl.meta` is a msgpack file with following data structure, this is converted from binary format to JSON for convenience. +```json +{ + "Versions": [ + { + "Type": 1, + "V2Obj": { + "ID": "KWUs8S+8RZq4Vp5TWy6KFg==", + "DDir": "X3pDAFu8Rjyft7QD6t7W5g==", + "EcAlgo": 1, + "EcM": 2, + "EcN": 2, + "EcBSize": 10485760, + "EcIndex": 3, + "EcDist": [ + 3, + 4, + 1, + 2 + ], + "CSumAlgo": 1, + "PartNums": [ + 1 + ], + "PartETags": [ + "" + ], + "PartSizes": [ + 314 + ], + "PartASizes": [ + 282 + ], + "Size": 314, + "MTime": 1591820730, + "MetaSys": { + "X-Minio-Internal-Server-Side-Encryption-S3-Kms-Key-Id": "bXktbWluaW8ta2V5", + "X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key": "ZXlKaFpXRmtJam9pUVVWVExUSTFOaTFIUTAwdFNFMUJReTFUU0VFdE1qVTJJaXdpYVhZaU9pSkJMMVZzZFVnelZYVjZSR2N6UkhGWUwycEViRmRCUFQwaUxDSnViMjVqWlNJNklpdE9lbkJXVWtseFlWSlNVa2t2UVhNaUxDSmllWFJsY3lJNklrNDBabVZsZG5WU1NWVnRLMFoyUWpBMVlYTk9aMU41YVhoU1RrNUpkMDlhTkdKa2RuaGpLMjFuVDNnMFFYbFJhbE15V0hkU1pEZzNRMk54ZUN0SFFuSWlmUT09", + "X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "REFSRXYyLUhNQUMtU0hBMjU2", + "X-Minio-Internal-Server-Side-Encryption-Iv": "bW5YRDhRUGczMVhkc2pJT1V1UVlnbWJBcndIQVhpTUN1dnVBS0QwNUVpaz0=", + "X-Minio-Internal-Server-Side-Encryption-S3-Sealed-Key": "SUFBZkFPeUo5ZHVVSEkxYXFLU0NSRkJTTnM0QkVJNk9JWU1QcFVTSXFhK2dHVThXeE9oSHJCZWwwdnRvTldUNE8zS1BtcWluR0cydmlNNFRWa0N0Mmc9PQ==" + }, + "MetaUsr": { + "content-type": "application/octet-stream", + "etag": "20000f00f58c508b40720270929bd90e9f07b9bd78fb605e5432a67635fc34722e4fc53b1d5fab9ff8400eb9ded4fba2" + } + } + } + ] +} +``` diff --git a/docs/bucket/versioning/README.md b/docs/bucket/versioning/README.md new file mode 100644 index 000000000..40b6d2229 --- /dev/null +++ b/docs/bucket/versioning/README.md @@ -0,0 +1,37 @@ +# Bucket Versioning Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/) + +MinIO versioning is designed to keep multiple versions of an object in one bucket. For example, you could store `spark.csv` (version `ede336f2`) and `spark.csv` (version `fae684da`) in a single bucket. Versioning protects you from unintended overwrites, deletions, to apply retention policies and archive your objects. + +To custom data retention and storage usage, use object versioning with object lifecycle management. If you have an object expiration lifecycle policy in your non-versioned bucket and you want to maintain the same permanent delete behavior when on versioning-enabled bucket, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle policy will manage the deletes of the noncurrent object versions in the versioning-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) + +Versioning must be explicitly enabled on a bucket, versioning is not enabled by default. Object locking enabled buckets have versioning enabled automatically. Enabling and suspending versioning is done at the bucket level. + +Only MinIO generates version IDs, and they can't be edited. Version IDs are simply of `DCE 1.1 v4 UUID 4` (random data based), UUIDs are 128 bit numbers which are intended to have a high likelihood of uniqueness over space and time and are computationally difficult to guess. They are globally unique identifiers which can be locally generated without contacting a global registration authority. UUIDs are intended as unique identifiers for both mass tagging objects with an extremely short lifetime and to reliably identifying very persistent objects across a network. + +When you PUT an object in a versioning-enabled bucket, the noncurrent version is not overwritten. The following figure shows that when a new version of `spark.csv` is PUT into a bucket that already contains an object with the same name, the original object (ID = `ede336f2`) remains in the bucket, MinIO generates a new version (ID = `fae684da`), and adds the newer version to the bucket. + +![put](versioning_PUT_versionEnabled.png) + +This means accidental overwrites or deletes of objects are protected, allows previous version of on object to be retrieved. + +When you DELETE an object, all versions remain in the bucket and MinIO adds a delete marker, as shown below: + +![delete](versioning_DELETE_versionEnabled.png) + +Now the delete marker becomes the current version of the object. GET requests by default always retrieve the latest stored version. So performing a simple GET object request when the current version is a delete marker would return `404` `The specified key does not exist` as shown below: + +![get](versioning_GET_versionEnabled.png) + +GET requests by specifying a verison ID as shown below, you can retrieve the specific object version `fae684da`. + +![get_version_id](versioning_GET_versionEnabled_id.png) + +To permanently delete an object you need to specify the version you want to delete, only the user with appropriate permissions can permanently delete a version. As shown below DELETE request called with a specific version id permenantly deletes an object from a bucket. Delete marker is not added for DELETE requets with version id. + +![delete_version_id](versioning_DELETE_versionEnabled_id.png) + +## Features +- All Buckets on MinIO are always in one of the following states: unversioned (the default), versioning-enabled, or versioning-suspended. +- Versioning state applies to all of the objects in the versioning enabled bucket. The first time you enable a bucket for versioning, objects in the bucket are thereafter always versioned and given a unique version ID. +- Existing or newer buckets can be created with versioning enabled and eventually can be suspended as well. Existing versions of objects stay as is and can still be accessed using the version ID. +- All versions, including delete-markers should be deleted before deleting a bucket. diff --git a/docs/bucket/versioning/versioning_DELETE_versionEnabled.png b/docs/bucket/versioning/versioning_DELETE_versionEnabled.png new file mode 100644 index 0000000000000000000000000000000000000000..6394dc8ac35ec623afb9af8151e97eea7f4b74a7 GIT binary patch literal 47451 zcmeFZWmFx}vIcmN;O@bL1^3|Y9w4~8yF+kyhyVdXAP_vbyF-E#f;$A4puu7G$$j^| zdwq_F5TigK5OjGtDRl@0Ru2M!8Ad{Y z{t}PC0RD^h)6nryH}$4;adWn^b+DxL@O81IwDhsHfO<~!lov*mz-#$+4)NLnRbp|E6mG7<|+c+kZ1@5 z1;G~dN=I2qz}(r9)zre-%#zi|(FJrD0ud7RaWOTwxAdSiv$VE#5~ljy(m_ROYavXf z{X&^t*+tUQ##YYH&GMC>iiWwLy*a-Hm8b}ckdFXpz|qpfl+wr1!O2~~N0{nwy8_@d z^k+6I%DCko42<&t2Y;`vzs*=2R}bQ8#^Z(CnpQIg2mm}$-~r##mSu-+QdKFkg{|)ce8cz zuyuB#gtlpF=IrSqOhpCGQ~p=u&>8se)}7q{O9((5Y|t}o9IWhY|9ZKHt=0d@<80aaTcO9ve(TSrSLchCi4Djs&uzm5IRS>1m*%gw|7_bjv}0eMGLYfEig3lE$B zx$S>W$XVK2+klb$+bu8MY%PHdpraM0;-F%KN|NorL-le|7PHwCg|O`d>-le|7PHwCn$0;zIeK;Lg$sM0DODbYmY^a0g)@yt{uL8+uEtTW#_E^+7LkOigk)O?V}|1hS$j z5=-l0?c{yLkIflN6Pv=xA0Z7aOA8ESdU^^Bin>EM3dE`7VtNk30Hk7L^%2K}? zYTw*H)19qCN2clWYR}%D$SZBxT{hp`%XCj1eG>iu;Z0>+8sdg4)Cze(GwGvQxOaVf z=ms&$GI=Cy8**)(d`QHf=Kq7SA0PPhQ|HZNpQtc)7Aq$K3^v(zDdZ1f!4PB^@z@z6 zg0Z>)Im$30p$K+XVqduzN!(bW`@n19^MDZiR_#m_HODs8{5Tq{8W3_e=Eoh^p@--O`;6uNholwbBbuc9rNlKIQ!=VtE`>oV%3R!J$Z-WE z5pv~BfQXG9Mu44lQ+WXiMu5cWkV;AYP7J7x-my4f63XVXBmQ=ocJm8<|F@`9+dJ2i z%Nh3XBF5df`-(DBy*biSg;E}G?W3Mv&zElmTn51By=v>2iWoRywHl|QLSUiFKzSdr zJ?whc0`oI#{qpU{^?PB+>Z?0koQd>a5j`mn&QyX=m{s)A(L~^hKi@nPt=AL{USh_e z%EfJQ!NJb@R=2MG&71zQ1N;PS&(Ha*&+YUJTF5Az3)88jz#T#7qlz0#ah6FS?1o^5 z{&U?13iI1Q8{M9_2%h3ywxXBND~3g%v|wZt5>Y4t9KN`K(o#v`6Ze!twRm{jzOPOt&8{ zS(xctOR*IYxi99d+&D1;nIpO{&mr*Q$uMF(nL}G{a|bPDZ10AI&FILoT|R3FypN_skQvz} ze{l%MZt!6g;*zMhg^@PP%nSA_~5{pl1h~}G@?^w(b$J||Mp`kdlwr~ls~TI#eMB-3&Z-|I97TL& zB#8>DvC3?95v#w5@Cz6%Id$`;-j)tpsnNxn#i-|_rR~&J;e-+Nb4tqOs|_-ie06J* zV6FICIu0k+v;-@j-0i>=CczLj;b5|>LPZ>7#z!himD;Jw9x5diWv+xF|J(Ulg}GcuJp3WODFII*X7+wgSm zRaW8^B4rB(+t9>*Hi3r=_!M3^hI_%+hTP!JzJD41Yt2L_Qz2i|EuvLhDGre0&spSK~CB)`QCEPVJ z2bI}4RjyhptPax~Io4!_{B3)WJsU^!spC6K_ZKzWW7V_vj2dXKJX+gX2|o@P>^O*S zIWBp!`yQxPFs3VQxiu}0Dlwj6LfD=eqsmC3=}B4R;|CAH_#4orl~rk`M5P+n!ZWcN z>AkQwDg&~r3f~cqk;&b05R4SA_)W3cWa&I8xTbmk|&k)`T9Wx0ObmtQ8=4NWN6fDy@k zy7g$Q@RLNud!d7lt?j7Wov3^#49PI_v7YwNw(;GIRQb3^HsY9v_xTdQ#Bt*;F0?u< zzioNAD%(WUITt_UYr7gm_qKS?*eARU^GL>w4Q+In^?s**<@-4TnPO-`cn@OYY9__4 zlR(xey$-RI^{4mJVjc?L+k^IBCMy)^;+%)zN@Hh!nh*65ptiQf;;_LUL9!}EUTL|O z-ElZHGaW~?TKS{pe`dZCp_gZ2iWLw%>mlc1x5-29kH;doa5C$hEhCiL8L_WzIwu`x zT2Wu+u~WOZd(cy#=54;OC}`rTb6llgAsg_DWqPdb&CF|P7t?WTWv&)sTB~dkjeL)? z<$7^+D(^_S*|MX2!CHCH=|aazRe#eSvh+C zEk_zE5$dpMn+Z9*+xoioGRHETg~^Z{w(x?8gbyzo2riUg6n(A9t6m;eTeJP>!yFP~ z#Cwy3o>Ox{-;D8R)kcaN0>E_X7ql7maVC zX;pQdp@X&>DeL>Vh47PGOZ>A?mB`Do*H>m=P-52~GN~m})p81-(?ZCbbLlp2_t*l0gAaHXE{P3}J|B(X10<3B zB>QH77__3E;7{@BfXYNKQu!Z2 zs^^&fokZ2YdsH3tbRk8CUW?(MN36Od#%?;)r*t{Iz%KwD9|T<}s=bD}fL^AfC0O<5iLSYHqi+JT?5GDs(}AU0b3ub7t89+>dbr?A zb95N-McUAyK>DhmwvFz1%4WEZ@Vu&OJ(nTVAjY>NF3p)P%Hi4pY8kt!0G$eMIHXWW zX^-2FE-RYKc=&gJO|OB$k*iFo!WZX5E)WWF{)wSi&N|65w;c zOJr*mMrc)pfW56BLDO`RA#V=SBw>V>N?oTe1#R8q$4_F-&s%-n-IP=U(tMmkAOwIq zjsHG^sR7%+9{=q$)W66F7?w{8P~B>5fj;HU<3g{VV7To4{TL~%f0>=K-F}_UGJ7{n z5Qr2VEglD=QX9IpHjJ22iE<;}AK$sbpdk>T9a&Sp}>Tjq|#(gi2l7BP%v=(^46Agb_%0TCc=`CHAI4e&4~m< z#>GTiaJ>rNj<{r;ucN*3t9j7#3<929$Cro*H$r?tOTHV+3)>i!JE#eBM_1LhE3c74 zDNRPZ5rA@HNn?X0OSOY4UZ!T+RqYGDiyI?dDA$?SJI~D}_FrE_+{)lg%q6}6OC`Nt znCxn6pk3d8X=by#Y-TAD`~Ymz-;as+S;;7{?H9W(<1?GYxNhy&Cyx@0>EsP#*-Ng! z8wV6$whNe!H(h5qZ}q5FAdcN{(jLrL6N}Uv5#DZkT7r-9F4GJjRGVo>ur=1%2ub$vQNLo&VMpcv#>} zouZ_y!!st@io9|+>pd|yACa1M=o7HP1L*w-B0c4fQtY>UKSP_*GeRJmGE zE?3S77thAMT~A&Ru5Mb_udcLv;X+m~2pdehPJMyJF#ls;QDu5qN0|6pG;faGX}SFn zQFTQlF0X&C{foZcnbVS0e?QNHz%C!SDj3Z^qG%E zRpA}o^fdJUo+0eMbb7#UKc>j;<=^H}ZoIn`z2OQxxmp6tD}4owwBx9?!$*mFZ+qJ8 zcBsYmWU3SfQX$fSpJouy)p!xTbI?c3l3No#M%Hoso$1VqVFzh|`k0Hk*{SJz=2NKpU8Q z;s*bdl56wOLEv{jJ37mrcmf|Z%HnK##-%?#H?JvFkG)?`5#&GNg4Pgps%!vh&uLKvpw3K5Pq~T z*NN#VGdymBgZ4lx%F-a4g-W2MYBza@`J}jFB|ODGZGC#*X&`j-%;jh=0s?7Mi#nTO z$*$x_hp?`=ARDb*(J_{kHM-oFAcqZK?~i8I7T8efWf44{7MC8cy)+0h4!R7y+YI{D zsonnQ3+WWAupVwUM0~BP`x^qe+njm1n(7=x%rwpVB-wfJV0PuY{I~agVMTxBm{Ay; zp|>E92v2>ns|VzxLo16oR_yjwe*xK5U(f>^x&H+(FzY&=yXw$5KwzQSWzLiEBuqxi zyiz7^hxxPWpeZ-W@yKhZzL$?1%VsP;fKG(o{=QZ`@#Myl)|T~lZHcQeN@!RX3NB+i z?99hK?-9@irqV128y?0k!A?o}N22y6K_=d7f!pJx!UmHOd?px(TRv$>cV{UuH)D6U z8!?7E@C-kR@sFD=fDBFLquN>S?BI0qqmuH)E( za++H{EJ{S~l^45(##c z-+S~!;t8Qq6AI?TDTfEy0Zej5jJ?q(OmXAJjdxdl4UdFD*B7{z0?b##yrPlY6w4Zm z(|eWi`_V?ZlLUqPnRX|03nGcPLFa^v*DVXKCf2V{v{O*>NxhdZDXtEK9{O-lk3ddQ>P8Pszj20De_}9w*`Ot-bbbMf0wtd&%A*4*_bH%2L#A zuky>b`TKcyef*xa*#{-MvuwzpDvQ4{W{Co12JWg+y{vXvq;R=yD>kKMlaS4}QsaZ~ znV6q3FeL{88=ak*kF(e!wU5yvS%c>moarlYS0oS$MW~w{6LM<;65Ho>c^En zYy}BIJVppMO(lSa9h)f-9r)G7?lb=w4fP6jhCEBHcO3;)jJkp?dUHN1hPswnOHO6A zNNA>9qDuTp=roCBN5~!&M~au^t|%hTVreP`n-BrFxkaO2nXujfwsmGV;y|Mnc}}>$ za8%N?S)A$(i0`VS%QYw%nRscwYHn6)FTmSdHZ|w|C~45rQsc0~tri$ zm|2w1U#pKR<)0K&rw!SrC|DLrRiN)DG{=Abs$o^1l7E3v_QQ;KNFl*bvskT=D<$(M zSQu#5&j&WnCGe!d^DSz0eLgNrQDsE?eO&f&;7dL84gKz7B8T*(8-$%oS<%mxaEfVY zQ{)h()ecz0>AcsPD1vLSz7iEc=h%igIRV93>(_0hW@1JKV>ow;MzR3py+z+wH9FkL zUnsr`$wP1bB;^qxgNIvRG#HUMBa~`qmH5+@RExP8CsegiA|#AZAXfUiGR0atkeK8k z$8IU*k6KZiwI3aXQpNa)(mi~+ar#{N5Fm1eKag%Du(-@H|KEklDfuVIUpPZIT%*6w;OG#-ejA=-I!d zAcq=IrJzs?4#(=se-r)kUXC_Y>hz)GxqFiVPnt9V1kEyEzL;`^tV2Z$kIRfF^Nqq6 z-Ml%hAqzlZFur51A(jM8>bp`=G?-`Ap~$c>axB}PUoRKE~?104fE6abj+E+p{5Y{G~27scD67bWqdIdQh0d%+52r{7XeMZ_6} zKPXtwFL+TbBIZzZvMYXof+H2tr@@iU{8NgWc2ltiJiO1}$Y~7Z$-DCrO%IqOr_Wi1 z&glWH4XGJ{uPH+VREVV(u1qS!d$-hIc)%xPVaQ;eV|H!->G_Lop6#G^ZDW|^j8PZh z6n`95#nCK&FCHPYrd9HO2`9#5Y+~B{F@9hpsuXzy+1(OptoRBknYwZAcCanobO9rH zZpU518~U(nY}gj|DAn#v1ck06HcTZXOrj|YU)adViM=$Y1jBt5t%jiBue>-iBvvH` zkT9e5=6tmsxKWs-BcAnMmbHEf9bXsEt>}ra4Id{SMj)JjHDu|#hs+Xwe&pK<%qQK7 z_SQ4s(#mze@OgUzy1x{JKZ?<#(Vs>|4i=_kmmU|>1l^8x2KVSii?G6t>)Jhcy3%o) zft`&WCFgmaaCqdy?do`T>YIT&Q=!hP+m@U)VXGiVJIPmBN(O}_fUep|sjCnAMYF5_ z$64^e0<5y!;;+4S4T~u%)*=Bt))fr84Qpl?31YAG0G@yXq#DdeVZ&F(*GjKOLSG6U zcfw6pixL7MOblC%lX&Wj)|xtQJYKI~piCQ0c)V38aD5cU{9JQ5E4E?q`piA4bt}_p zyED>vA9(81a8;Dt; zH&CTSzdX3fd#@b9oyFf`*m%*rQ1e~NZvNg`d{C4EbRlJE>&xqgBWv;T+WL*DHaR-= zNzNK}F#97enD;lxt>?c=nUOM5KizDc%~qd`*uK7l3Y}|lDd>t|{?cQWiFYyNknOis z$arn#{pJ#sOYEH5@4|dI&$4|@evl_E#a;84=!1+;O#tkA$=oc^Ty=Ab5752aPu{8O zaGSxMeYdr`?FO?UROeh@Ll5y6bpX6bAy&HOxNt83=b4?yw33YcX5OY@aXL_m7EtlelfUhSO?1P}mHF4{&9 zzHCEs=2IEV?AcfVh$Ar~)B#UW1A30_oHkA@eIR8{`W`QcNa!Zb9E6`8L+oT*zYe9iO-XPe0QZo_`XuZ8mJGBL!C$d>& ze~6iW$H=`qZGzk3{}J`TN(cr%v6P-&UR3`XjB-?$6Y(r{IAqF{e=UF$y4-3p^>Cq# z<-%N29#}vEvtbYBDvf?ZATBQ|ssigQtZg2lv?tUp>Zw*#gmN;MflzAj7+G)-J0xi` zE`%Hk9l_;9JD!rIGdTrSD0K;%Mb0JC%I2H5?WX*Hm)*CV3l$iLoSCo?Bg~uTkASxT z9v?tIK%dfk>zLH1o7AUnW;#Q(79+{3?p>T+jz)X^QV>MdyM%fbqiGC44N*@b*cPFHZ(K zfNOwa0D2ETfG4WvPD+%{blUKUl_vv_Blg+N4CI5OB^Eq` z2C-(RWH#wC7G|)y;qqdRbg7g_E-3L)Gw&1&1tiWa@aJ!CdkuHLW1S!DOe$t z#G_I5YjSuZNfc`)7Tukk0$-FYXjZc@p5vTCJ+PJJNnJe_=@4q2GKekX;9)GM{rMB& zOW(|k{`K+#w$#r;?^|t3HduV1`DCa$`{(f=WZu6W|Mm1gKL6Y4zwFGj1cWFhb#W?9d(E3!7X$nxIt|8z5_m{x zdolw5H~6L;qPCvoAi4CRO$!`kn%|eJZv4O8=04KD8>;rpV|@`A;lPuH{R)z!%J@Sd zK%VpIODTLtOZ9xU@V8YSN}p4oe^KvZD0pR?FY^KqS-J&=QsAQtxfA~)uiGrhp-$Wa z9ux)=)AdX$D)O1KCHw5}`K_PL|EvXwpvio)&VY6k6--L`g)_Q$J-QbM>xWt3f-2Ed z4%FfgWHm1@RpE2#KQ)KFm&TMgFrEgPNXR=vRYa zz5MWYrKGQv5h{ftKh!gfS2SQfbpxBFDnh(M?{UMXJ@Q~p;7m%LTr`S*4(h6l7riP? z*>YhwODV-`w&AlH+^Qb*p)q=^Vjcc%yz-whnt|c{_+Bj**VDfQw-l9l`V=5$5L0BH zK)~WvrQhtcZIXCMMAQ4_Fi|35Mz^nBgr_1$+ipWMPePGM zM&@xH0J(WO9&J?)vQ)q%f<0U`bUc{inKvZe29~^mVor)UECdTs$aj+1G?P!SBJ&`S zG6Re}#-PJVCcJT`6bR&5i+OMAKahgDFdfL8?2BZGi6enk?{p*cyN;j zgCnb|-^}$DH_oKRkEgc9@@yqP6}n6qBNXx^iq+CEB^3C-VxWR3*#p6lgFXQsjqxpP zFnugewqTAzFHXs4lU}^IUdAYDweT?PzeYzgIx??PK!QKXo*fr`fObMoqhQK*U7?P% zM1uj*GPITrkrcxRmZeH(9Vz%of_|v-Lrj58@Tu<|Fv`b&(9OPWN_Or_y0AxsxH{|s z>u$}IhU=+LEirvQETgA@jU^|MCZi5FF*gcqiAVmpMy9C-qv?cA4&LehgipEy!I4cZ z(H2!PUUxAbtRallBzfbys|U@y%y#*29wZjC_t)hR27tc^H#uE{z^N8C$ zm( z_$6KqDo$0nBwd<3(>`PGo^FbQzNRmSQp&=aykXgz=y^Io!6rE~3F3vAfT9O`fb1=9 zfFbL|{z<9g`8SqcI8_(( zJ8aK+91XiO@sivx@QH76W^3nPEg#5Ge6$fr_~b$~DhD6X)3cK#<-$xX?oAVF6omC6 z(M@A&!PLG;YKh;o$hJsI*&IJ?n%f%Ag*+IaHlLR1-N`SUdyv{CdyYn0k>IP7U#S+HZRnjO-*}~Q zvOL9^c2L*GAE|-%4YZHRBD^KDRPG;Pt}b0=dY3#jAAecjNy+t zPwzXTtqjId5UFPy+VBy*YS%jJwWOkQ`5Fd8^7H9FnzNfZ=|0W81c~^9Nj_o})0I`q z<=F7b*Cvps$d#+D?(9KR{>*w+5Ub1>@qM^y$f{q2f@>RP3Jo#_W188fPX+Sh3bd26 z4}(s3harTNksFO>TEeWn@6C;L=ic9Osw5H*DO6~D^5EKRyZy69TJzMNk?G~_$sbuN zRTNle;*my6d9`p}g4f29(zUnuKI_M({??m=PcNSd6QivCE)Q%SUqF9fR7p!DV;l|Ht^ zym(v#T@PeB03+JB@^A$^T2a=oys4>U!>zmv*Ku;ay_V2tGGT+o#4XCg$~F*W>;hwV zaf3TQz-#mxo$1iaK=k#Wmh#v1Siv8L+P25%j&E0baY<$!uu-matjQlMGi~F{`fV=2 z(7ZEv7PLEh>!i&l^qbVHHP;q|$gBDtcNy0}$%Z>_mhT07LXL4DPN`_h_}+nQh*u|j zsFmu*JY{2&3`kddP20IkzXyKn>BTDB$Vxt2Y;q(?jFez6*0>r=VX7MDaTU!zWF-u2 zbbsD@!e{jq?P_!|<%1>r>ThS*v5 z*F2y2{C$+h{n|Q2ANPm5qc5n*9qfB1v$BA1+4vK%UGLaf{~0F8!Lg6EfyCR`rK=J9 zz*xu&O+QP;-+7Ah+m|2mW9X3f^V6Y5-w7({3<3qb8p%(~o#U+Quwx-c53FDJZ}mAK zYQL=TR#_N3eBV@DONkqJw)s2AXe?QJatfrHdM>h?OU$%h9a1x+XK)hw#?V|Q&?*)N zd?P+Qo!4t!^ML)f;D2wA#%c|E?HBs>E$O(*S-U=#ID)l_((>+%K?N|w6hUiG47ltG>X$umAKW(;7Mp$D zPf~eto9e5u^>1!s8wOy42QbOH6O@(Bw_wB^Z5aaVyjY-Di};T6U)|VO;HTArQ9c?4 z;P2bnpIp6|S#6Dtb$zx!%Dd95oyHuZ#27H(4j0^jfarR=U$N0&Kqqv+hWYZli0`|m z+;R+(gQ*XeoSZgo6uKO*CrT94p8Dv1*>#x8SDaYbH0XJoQw&Pc>K zpsgRrw6~}Fc}jx6y=b99JVj{7AY-9J#A7+*dJmh8=$CKqIbteelBcaXXG z`1`Z0GJV&_C=u?hIA4L+O@)Yxr~#E0>I~CU)9coR#&_il3{POPjT^BVSdIK8e&^xA z8>ky57?&BoRv&06sK#laedlBEf(52V$;Y9?-nJ`lAaU(sh%HJwPn<4Oy61P$WBIL0 zEY;EDc$CY~)`*V&t5;~_p;DnQN3)YE*bD_WL^@g)o$jnw+5uJbYA0yE^{tNG9(u0vBHykyBW*t_$I-)Dgt+_Ytt3xC(t5t#A9dLDn<6Eg!XeZg{mdq%(7Jh%mv5gEi* zBP4YO^5)aQbv`}Nhp%Pj{V!gMCR|)xKu$N)F>TnG@0T$+`}wM|`7;b;{OD}$kyVd-w}M#K z-(!^Y&hEF-VnElEcaiNBzn9VT6e3KhLL@^v<HjLP}9_yc! zO;qx8nv_?^J6W$XznWTZ(>Zim-LY%%D94Pa#7e2aVqaA)6D(#7@;vK3jDFjYeA#k-LATX zghs#VuMD@SwKKQ3ab&$O-1zOzZ{(9M%DTi4yjlQ=`e4qz-WrSY?Th+>TMGacu^>Vn zSyxU&1wQp0BYkX^4KA0qw=R+mdnqZ;&oNZVCzKEF0QPs#xDg3*`Gv%l-#`!11^<^ z?DyZJ>rn$%pGWjwpx$X72GBd8dcm}BJ|9#meam1Kpq5gDmVkIU`l9L^`QXaL!IaKq zMS&Us;WhN(r?obWh-KJo8YnMGMdArtn%S`u%nTh_rl)?IR80x6*frbS3|Z9ht)8u= zEE@@V|3YFWMlUa_7^p)>$6fCHD8A4Aq-+AD6P9Od;aVDwB*ByHNJ=YBu7#NOI4Ba% ztT}up7$yM^3MYt}3z)o z&?*s>fk3N>2K@w~q7E=19E)i zcn&S~1Qnd1baQ-i4iPY*uM+^z$pfm-DraU@)f}az>+k^;TW2z<@sBEGfK>reDKsh$ zcv!%#f*leJNMYjb8Bl=~0y0KLS%#j2K@6bu6!ct~h9MU8;WHDSMCJXtcy7e z#(ijzeZWFsvjfj!JR?-xo->zzt0VdKnQ=GhkwM$hI$<>>(mzl=C<%?76bQorwVpCH zA@^OQ>}q(>LQasDB1;5mDZE5HH>kAoLlAVs69Yb^^#{Ksw~oNNY^ z=&@w%g69C1;U2(J7JuYRvdc|oxqQ%FacOyE2R#t|8-a{F1IWo2aVr2e zSG73hFQM)EyOhDM03byo6p(?7z&gb=zQ0>f}0DDd@R&d&!Lt}XrwpIc{iU8SAy7>N2b$boK z?+47dwg{>G{y?1S@{Ajr;d?^eHv?*uK*|-=VA1-agPK)^1OcQ_)w?H|fa@rxWKltl zu!UBct{-AHPMJRSCbi1kegDGyW-&xwc9Wu5lbGcf1*m`oyUaKtECiGX%Te$;(-VnP zr4rR7y*d3`-b-)F7_JVI9tF|~^e=XDxZZaw%Sln7l^G=ZT3Y@VaMBK<;Vg)x zO_u8kwgFsrmlBSn@9>amLztv$K5Z zybv>uS6kmkvSe!#^d9etqh+3{aN515OACAL<8x5QM=q<5_XbZIji4Xrtbaq!iW9Wj zdgAlomnlb?p&~hK6_?kW^j@~FNYzfg*ksbKs+gV71$u=0=`#AwI1(rW zg9S)F4fZ!gxH9T<4z87xV$b5yr1JViq}Md; zhRZx67;N7EIi48_50Sillbq<7v(Kno(Enc;sP|0V^XwHILf$MN4z|Krks zJ^lBy-y;-^Ssym7T-}}BL7+K6>?{#ybc|1R+wDu6>xjYs$PB22$AkBJq+U&dC?B)F zZVl0XT8t@e14Mj+4URn%sz^Y{-XFcmwPp_+y;np&XfXQM78f@jGPSjV6|$Cy&(we5 zPDvc`VI=W2TbvABK4pA5@6A0>0&vGwSH=rSus)A-f8<=v$%bJImP-D(L)Gf*QmwAs z2?zv)o36%+r~t(R{=&(@u6sKp69)25ga5Z`&Nh(Mz~#gfvv z1<~2*$S?E+NzhkeqIa0x=&?FkGI=+6(~MAndb==2By}9aw%6FW7J?1d8AD5ys_)alpqx zp#?A=fz_{hb9cFo9=^#!AT3KyIzR1xzE54BNK6F1Zcin+tHdIXfll@)6>yaPrjvAc zTR#-5hOL+)lm-@Kkt5-@Dk71yABtC_C7R};6E>J+LeXO^20F}{Z>+_s#DVzK z@j%lc#)N@DIR_~843UBbYS^-PWcC!iJOvb4+H99wi+(#@tVYHOa?4(t7TzA z!|;{{=Yhc}iobf=w6$ec345ySP7TZ&@OwZ#D{Z)a+BzsO>l%7U;=~!=B^09z1};p9 zblBp;$H1pe{cJVIYV%#|c0CiFS&TP6mT zBLWSNN3XFy>*>#V!6i+th8ATGsuDo%_M27BanpU@CV624-=RBVGQKn#sbteAKe)7vc_ej41lk?k3J?6Ct7Bl z1k@PHJ_Xi!vx&_A3{NmLob|cCxU1x_QFf#j|Gxc)T7XoRj|k))!ljs@0N`(aHxwkM zK!)lw^bL&mc^zl^5QRJ`jNm?_NK&`)ouA`-KfhVlurHi_&i`316SH>s=UOiYA7N%#;Blz>L8G?1Me+@$tj%# zIq-N?FDdS`r<#mk0glZaV(oeHQaB=CiwQ*iPyjAkSNAMKC24;W{+D1)MfulWKnU$svVnJIq7+xdSQ=YskXVUH?Lp!nwfDq87m>+YMvzjZp{8}#W7^k3Dnvw@w*0^ zF2Fd5t;@K66+?FRRYk3&``LadLk{(3XQnMHNpmvPfU*GA*r%z%`$vH^h|OM(WhcQI zvDLR2=olC!zDP-FJiM4WG0`X4N*~#<`yDO59}fy0j(zWlJg)!iwIB@X*h$GQ_S`o_ z8pO07kvKXBbVZ(aHnj@xHHp-;3ZrCR{EyQ6!rJ^-?5tHFvKb*FLWY z9CDFfJGUbuu6YJZ$-?gkV_vM{Ou`e;s!e!8rtyHEu4?p$F8qGFI+l|*^K76 z#!fzMUk`6L9o@FG;u?|}*4BB9&rL^sZhsQjglb+Bk^%QZXP6*k5A6e5SuK&$hXsLS zvemGlWQt`GqbrQ#XOue7FZ1JKuwgIMe<@L78SqQdr`++sNI~p)$QW_oVrQg}mNzvW zzPod`O@3`83C9*sku=>RXeJ`cnn=yb}wJHmz7j697T=$_@105 z@SYz3Ido_~8ZY5`>_ z2zqE%^O*~_{FVhx?N0(6xeE6(ZO3;xxQW$nOOeo~bI>uV*mZh3E5%?DyfWE0gX1o@ zc!UEV^8Yv>mpiGXJ>JZuO(7SirOZiZ{TLdIKD|gmNZX#CdG|^`E>212AQh|k;E!AVwp!)6UmX+C*T|s!1dr4cC@bj1I%Zu^T3z;#d zrd~A!o@HegQ$N|M1?FwFcWE74R|0RVe@-(k+tk(9s%!ea{n3rW7+YFIU!Gf1i?C5l zzzuhQLcs@7uFQbZqb(7mK!9gdvky=!!I${N#ue{}8!3QW(9c6`kH$T{Gm*lpsy`)W zE&DvAi&!ErH0CLew44UJTGiT*Mo!aO_--4+rw)99A1?-y6d4Fu&oLjTe%5!}+;f)~ zmG?}iYN23e=Umy?ju|6w)q5exz~cz$D2jc^IH7)v9RI;>&T|bpkSJeoqC%V}bKbT{ zL|U?sp=79puksSh@=1z^mj^Zn-}^>Wj$ZV*XFMERp&!^n#9U0je=Xk%@9He+iSpf) z{o;&;6h0w<>iGhlbS@~KdWCFbCqzQ2Ukd>}fs%$Uy0>^j$vA@%4O9j+b-%0RV=;Br zLxj_E&rN(cK=OU|9Nn4CSTZd?uF*71UV&_|azYX*fta3Hm9nwd*2cbPcA`uFRiOgh zI2(6TBGEHqqvJ3GEyJkPL=}u95v9?io?lj*6H`;ti_tb_GU3=bgwb?TQdIDw6tCP# zHf8PTg$+hd!ZyKrrzxc4wE98+b^!ghOE)=pU)3eXRnNm2xg1_1>s# zW>&Z})NtRrGz}xXLxG145hE%)qG))<;;>X1Rv|J-X(vb2^SqyPW5TtuDJ~u_G%ilP zjMCOjNFH}%x7Ne;PK;llH(+FNd)aa;H#1$txmvED5z7Fccz&LKeE%s0j$2@$HFN5b z^`iW#`p&5SMgM0>(GrU>OxLf)F)}qzgPK0t-{0+2>H&GVH#;Ra@XEwR)Q_q}jR_eT ze4G}$l5B3oiW!Pw&Fmuh|fAlAPMO3B;QIvhYzj7Vwn0@%`W-jk)gyiwL z6$FL#>EfcHJP}uPI7b-!p0Y*Zrt1nzlIy~{G5dbvE>$-zmVubt-k#6x@!FYHtri78 z?78Ul;OvCBQxf)d(|aN`q1FIF1x!o{JQOPbfTroOBy(kD@LS7LvsNa$p*_T(nf7FOw+J<_prmkPu3z z2rYRE+4TsMW7ioUPHtzSU&gGTOnE=k_K9_tbNQaD!Stb;FU7Uy-Q4N26M5DpO~>Pa zvs4cfJYlr$n>V5irMHh6teFYM))vPuZk#m;=+(E(43xo3s06btifl|9VR!!AMn*#X ze%vn;$sI4Rh4t&@koAKYE69YtRbx|MY5EI6Y-wrlxSmHbU}G-|TSRO8DN2iKaDL9f zuDTeW)-rLkFUeNDn$$t*W)pp71)qlQ7V*46SSVdm5Y=bvVOu&3shshYm{=zz&3dM7 zdVcP^Fz_2UzrGg<3;S-2KNArS{D@JSLLwpQ%SrphnU@!+fzS1-xD`5aZ}#N@jLF$Y z{x4}MCqM~+5j^p7H#hYcio?bE_O;x~S`&XP{wbV?H*Nd-cRfcL#0C{jk;T~9cMr|` zUw-9dI7a_S$Q&XCSGHHXkH&F<};Od%eS}LN$)nZnzx*9 zR>~5=X~km7GgI8V)RhdC5K%&0T#rmmLRT*l^v{}2O0H@wR6*iDe_S|qF(CFXzQ=T0 zJ0I>e9c|luK)N{2qnFX)U&gau3seuPuS2w`_=8TFXOvF%s`!I2by=s4dq`Z8mD_zQH@e}HiK%3`ed9nT6OP@7JysF1S z30~XVwIT?Ty9OLM=98#R+XkznqUox=ot|hmd4atjn~PEsIFCBYcRtyTjU;dHd_*O` zkTt!#A>bH#FHbQy$$>6!XQ$G+1Gn>yfs*oWPofZPFN4&zYjBM(*VbixM!QlA>pzbB z-tQq6jIQ;@c#xj4JsN+-PXxo`dmwAj-a0$&G}+H?_us~kC%1;ZsK6m-9F5wuw9HU&=T_@9|(SLfV*XISfvxv$chZULhve{q46_5JoL@h(p>IMN2zW4N#M%pa2<;6BzZdfSnjM zoRz~C^LgWAUmrECt+Bb)%YxzAV8TUxfXnQdb~9hA*y}X1|1s;7mJ5eRcTi|813PNt z{#`5L0oxNBc{fo})lbiV)_U&mC;2@%c5KNVHL9!Y+nmv}w>X2#qck|_2) z?uj}P1zyw9q=lh7f~kLfy8B{6)yJGEqo&4F$8&?&QPbSqRc4=$Y*?HQazA00?;vXF zXH3klo%Q36N#UDsD&<$cztn*W1IQS^(QH(_Wu-%<)f=N*G1Wa*<*gTaN0J zy~0~h|A!0fOWqZ6q=j-ZP1%=cw*Vg}60O&ZR8_)~`X{u`n#@o21?-X!_7>CB%&+1J zE`MjI(R2RA-&^AuHtcs2qZ@OdjFl|=d;ZGj-d@LKzGJ3DDlq2W?$bT|;cSm_kTUAF zg-pV#s?F-i6VH)VXED<=7jp{1=zf3y;~(iQ%uVd!-qbEdztPpr5(cn!C-eMAf~{&^3)t!Jc9 z$qNqY&Oy>PjVYg8eo8B@Q7bZh@H-}7_|wRVNkpx5?2VJjy_S-RTl>jnjO^4OJ z`x#WQnYKx$^o&d?FM&-%`R&g#H@AD!DebYbm9+e->|dQ_6A5sBs#K;^tFlSW37(vg znW-Oh&6@w}@3jxm{kvaGEtT^-G<|rebuQti*|9cH@#Dvi|CmBT7e9Rj0E#oO>D6~r zorrsZ)%sI4Y=v)-K_r2AaIiKu&c7n>S=uc(EsDJuHr?o?ND05j=cTV{X`BC9{I<1~ zz5_9-2Qf`vP>>!K$v)co)3Pk5RAA*`V$GL#iSJ1QWac zFdh8$vGfCP!B|pMIkXCGS?$3kluXE)$)XhJA3XuW^dzXoWov1hQiU{>PT7xwT}>cq|1;~_o*zy08XH5_Z(GIiPiU^~(dtH4#^qoHD(qP0 zo@B{4VlLaKonZyEG<2_AbUiNls)w8o3?J#cx83(nZ)4Tju(g|>#tC-bkasRem)*;( zxX^;#xYKabH8eDSTs>o^X-BKI=Cu^p^%}#$i9cPARh7|2K}E{Znj=M6I343w+WtXi zSxFd^zW&hX^%;sIvxFcB8`v%y6V8CU_aak;BP7x^nue(B5_`ZEqOKlI^uT~en4vB| zV4I3co`=pR7`ZB>xtZTse?Jah0ldYRjP)~VbJH)ScUCi>>=UXs>leC*UcV-~Pe5Zk zIc_X#`s|j8Y0RKpwI8sUy&vpd&TU8;>^otpGEhPnM^LZFVp2z6xA!6opH$S7=k>OA zBvaOG%FNEl8rYT%hZAUUlTI=)JY^$#HDUx9@Y29`-%*-ok|N3!*({AR+W*L17M76M zo9yg&CVNao}3ugKnXp5u9l+2qGS+-}g-& zdWZ>K%WdmI-KMEE))7*^Jp`3j;k2lJ$~{E5cXT=?MsNTPqwKSQsNc2kG}OEOL?Z|& z7_&GFvEb>JZ7ac@uFDjxx0>5yvlHXQPF|P1lQMUPp}Fs5s_58!C6{EKU;J(wHOlo6 z_nhkbm)~7Hs;=*DKBXNVHC{;-Z1BdfB6KM;xfS8Xf4%wK_Z21;4c+&F4@@`Gu79uU zXsKZ2edd(#rHh*E&oS5g2Z`y`mG?xD-+qOJ1jKQ9wD$KjiEc2HtYWoK_67zU^lWK{ zhFCQx#>QGODXr1`w4qg3i|SqaPX2-7EiUU#n={Yszuk6?8Vn>0!^Et9$u#DO z=EhiQ-T@8IJpx4@^e7e}*{^{+ct=JEVO7VbhLGdp+QUD3Mx z3MItWrUKqhkME9V4UCaEVUN;-A^;tPn=0W2)eTJS-6tz;4riXDM?Tn={FWM&e}iX( z9_1Lb#%TyMGoJmjJJ~DhK2}{4a=c^DeEF@$=iZ-!-!D>0-&u~=($s1>1mMUL{P=BA zay&M4=rvfa4pCJ7Se2@egU~*jNuEY*7M~bMP0;v=tO9~#Dbfy#*lUrSuvqvLRsrIk zMZ`n$Pdx9hy%rRlewSP49O+k(&B4Z1F=%4coNbXIZqa16`tz4w(|~q*?}%((ij0Xx zNlsl&;5ot5(V6V);n|+=#h!SxnXJBjPeL)G8ewGE=;-Qyky`m{6c#_!eXja3w8?Q1 zDKK8SV|r1d{}_k%iFF>`UOufcy2m9U)Tg*-T^~ZCx|0V71i%W>A#F`C8qnno;qZ zu>4_$Jo0Cm++Gh$2ismD*4xC24O!8;;@SK&B{&pMS480h*TXVfm3*SO>gH-F#8IU^ zBW9V+W+BuHpGU}Va0)wgOmA;1Bya7%%y@?vf*Vj#_z+GPd*CR3%#;ALuT^il*s-J- znYZ@v=f<c7DHRvYm0~VwqrN-$h=d>!En7 zx&X6x>$4@T`?|SagD1q{!!#1moxb%`&vcKp%XYt5Po#2UF&%~*oj}4 z@xW`So1Wm(t-1ShGkkUomTPyp6#Fr}d(_J_PGlN5MMT>9MusSDgyjATo;pFb8FvsT z_m8UL*urnJzsZs*tv+237IIx6(ZX*tcR@kB{^f>u(7oVi$0x6` zy@#9h_?FH1usE>fcKT9uIhB%Lk|t9(xucDp9N%YA#@o!HQKhBcSAKkQV22eeXEid_ zW2L}xT(*G{3actWdagute#x#VA6D?gv2BF$Z8&x8 zyG-aeb1@H;?`Q7N4oOOtM&Yn+U^V^0x_`6g%}SAKj=6El)W}P}jEHY`_bJM|+LlZ2+|Upr{=MQ?5f^84!~qT5m3*2zOvYIrH#o^S zQWlqEYMfCu{L(ruW`gc9o%JmQa@~PmaC;>#jrEO})^jwr7xmQt+s5Zx!w{PDy9(9> zB(c5mX2$IU1FuYNGem=4vRKH~)(LIx*+{Oe`{ljyzC@!?ua;dXl>gnw)sdgPt@wNV zCi|o@=O;eR^362E#(LpOyTrEM9ar*3N{h{1)O3z1qSeGYF|BBA&$tU|P@`X_#6hN^ z#GkLU%HI=TzeKXfgp8J({QGzHJXEy00WVBJzEryjstT_xSE+j0Siu-^%UyVfujN<>Ld^gBwp$G^ z^t?sn9!;qHoMEx9I<=@+pt8^U`dbjlj z!yUOFzx{~qP8s_PL&`U$#a^Hs0! z6WH)3M0m~lAIt1i-?V#8UDlhgLMwS3=$$G`I=x_{z^ST9K<$sBWGvh~r`p5l z?E%{oLH2jbtRwh4r<#nG^4pHC?I1a7si{xych|X7Fp|8ahl3VpLB9DXR9Cl%}M54Y9_K-wJs{t+KK&b=PqLL**u*sb1aG@j3W;1<(86!q{M zsSIUh*VE3tkIm1B;mlIfk$Fic;Wd|b0)u`jBZ{0mL#~_KHTD}++b17pTu&6}dMTI) zrDi5|wP8CWR^LOHv+|8;s@#VCb`G1}1lC)X?8l4+ky$9A&pqO8nLG`1=M64>_?t{2 zrTZy_cYt04UD-a=s(aB@s`Itgn609;#ChRb=B)LN%ZmfKd&{ZMkWmGrG35qX6m$aT zta7y#=!&1^hfsmf8YJ*j%ZHW9l*Ipay!uh!6A_?PSk8Rx-ax<;%c>d`jJt>W#b0?& z>o-om+)%~`bU$C`cN0kHmmOlmfW{bU7)gO zrl`uaBzvqFm zigqQ1u7!+UE;27sorUUk9WSXV(+w z)9-K<^m~Q*fF}+%l^JNLlE^;&mpL~(obb*8iN<<7pYg}Qmp_lFgXnENmk*I za`)~te1=o9J3(=ouIK^Hvg>{z!r@=2w7%G1(eQdGaC@rMAzs63wBAAth!F5_;Mwrq z!T*luDNng6u2<)EDL)(=U#~g5CLE?HSI|B>9Z|%KXh7(VRLtjDO}TZYz6X%ebX*j{ z4j5r|9Q*)eHRPEfF*~=n=VUM&bnO57d0%$z>C77d!ca3J9-ss(?l(|s2_U$*W1e== zTMwY37ctS=dP!a=sWNZV55mdow*-%%0CZ>-#=0-}jn=f5ku+{LoIFhAh840}3AWbu z;)KcRIpxFYdPHLO2Xc#!!+WC1g$g2pdS2F5^1l3ER(5KJG}Zq;PC zne*5ydj|y0^)>X^RrlHsWKcq8X>hSX>DY9?FtH8#P(D__KR-loU5!%O6H8O=okd?HI42CMiSqa$-_|m8+r{uJle) z{Bc^T;>z+b@vN==k24FyF|g#Aq(56%OP*S82-zDMnKC$>Hw7p!*h=QD@Q|@^Y2KWv zk{m)=%cvFq?Tm5R9n?e>{%T5fD}ZhTdZdl_YEOFVf*`p=EAo7~+^0L{4Gr!m;mH85 zfEJo@TGEqBK{$?oX48`2ob+WXOCZ2yS<4`-N=HQeRu`Ht8F6Q^D=pDJUDhPMOI@X% z4JWCxTcG;fxP{nLYO3hybW70mER5;~*+GMdIg~?B9^E=8RdZ7+-rKCz5SPcC8qjWG z=}^YIE?W1FMC@1;Vxai190y;d#!HA3WGQMf;ogOuhN5xF6E^T!E%tqTQ0|anJAQ?vzhWCA1ja9ZwMzCS?FY38iDYFzbmk-W# zFiaDtJMcGIs9-C$ttE=j`V?X5=+(Dm4(@$JCvslH)O5CCst{&fsf};`c5Aj>F0V=C zL$BY|rb+Zl{Bt~Nl*VbND}m#dAos%VHf;4L zdx9c3S;!4vviqHxJoP%g)}-%_oIrDuelOBi*x)-l-QTP8Cv6RroB*1y1V#6=uL6lN ztsOa{F||)4B7zGR{=goIhy`jIY6DUERn9Cuq$KJY0wTS^l)#IL&A;Rbjk6v`D^CrY zf~v25fA;cflIrtG3I!Y5`odwUZ*%KtIY!?ivvObg^oUDrB|ks4Qtw5a882Zao}Te6 z|3Nm5Oi75l-OXI$E)BURAO5D0V{5spaA(uvQVhr-lN0_sQPK4_4dQ(|*+r$_RQ85j z9j{bl-vHPHgbuD@L!bVrjfr4c=@rML6ld>%yqaeqd)%s6=GZ&=P*czaONIDanc`>C z2wxscF62tadW*-!Il?S&$X%wOp)P6jF`IK>@6+6zRlkNwf8_7grEddDUWMxH3ZRnh z=yJ)H0Xv&JO&&&?BI0YG%Q;wCk zFESq&eDLm5<&K9Oa-B<6Sz!{34e~`3FKV*%VCvfma#}U*Gt>xXH(s$thUk9sI!SR861324e?bkAQOwd*QFE|xA zYoAkAGL+ha+;}1rXIX!Lf&o`z4Ua~c_9T_OA26SDJo}30BhDQ$5rpw~sOvocqy5(# zZ@z@w!5v@m@vG$ir6bWPm%&@nWX!gr$z5xgX#UQ4Txci%OJ zgi45m6m+oIg7PMp?1zlRvAc7M^UNq>^+$WB_^hu z{DvlLZ4p`}?rZl25KLY4Y0YLY2$N~-#L_F-`aOJ&G$;XbjPm(xXATa#noW(2+4oPY zKvJ}pJ{50}wU(|KmVaRQv1G#G&m5P_@sb@$uG~A7>;VXI=*x5T=cB zl>otvoGt5O-nD|Tb5}dUfD8L;+Y6%Kddm)gLBC6FxS_D>YsHkH<%bR8R^~T>1$@|@ zl7O93or=ERsNYwdC#ELQ6KOq!nlb%V`H2)>kWF83^=Tm$d6=J9=T$HGEodEUz74F! zcFwp&|FK8@n6#JDjF{$kEi3!+R&t~7x|Eu2#@$-WF@~!P%NB0+b~oJG7UA(H-DULk zMcqtoz`L)EOtTLuK3Y+M@9c$uvvUcRd1-=8#r8#T`vR{f{$sauz%R&Pwzvt$<=~`O z`c8dfTBeljR3Exn@I!FTl$DI8kez};r?YP~!x`9n@1RGq`6oT_<0f;nNK8OvB}_cM zJBj*_I0FfPRJxyWs$}h8NCk`x)GM8Rg``i*B_-=S_WP;N^4g)2457fF)2%c^WxRI8 z_xO0Yhwx2}b8QDFOMsI7n7nqS$bw;Tc;veJB^e|XU|j}qP_BMNOSZ+$-%A1oQ4D#T z@EnLqFxwb!*vI6Eoa*?c(s{OANXn;z`i$)MhV}|ZgrfMVOF+uowr+1@Z{|E|;9-hl zqWBk?DloiB-XBdoeNzZ$oiX`o)*B}^j*3$J2U&j-8>Tl(Y0!THo!2>RyW!wah0p%9 zNMJiY?3I(nPBn*>wY}{`1WKoTYi@`08;%khZrGzfk0%DNaK;l^?)^BPwy@f_6wf)J z<7ewJKZW?bs1cO;Cbo|dH7sG&aKc;aRM(TG7@GO&ar3{4f(maH3r8$4LR}pFi@*lLt8q91pCJ$Oe{Fl#q_L^8-cSoXRV>2#2l&gTefYH&7nTqYjy@}Wax9Czu0RVUCh(DeP z@w|#vNYrYcDOHC81~EN6Xh6j%+nvzgw{ht8%+;1IhV)=zfys*!pBS7s;WOFgZbhVkJhwDY2nP~w z&lF~9P5sgfHIXOOiz;%orMn8Wh!mCLy&E8C%dh85lj2y8m6XIB9@&8ITqts&W#_h& z@IJem1yHSXQn7`Df<=68?J8jlg{o02(Oxr6qlT$l`=1yvlIlXgK;5=Zy2SVl5lI8P zR01olu8i#0nrzdxm>$)bNtx;JKdhGq>ap^{;{4+A#@U(~^kc7+eA-Mh%)07yj)op9 z{+n_5nWCKw1B2d?KKm8N4nlJ5I$M;Ew4etWB|>yorEa_>y%A9FIj_w!m62(K>CkvRTLpZzbms39S$mAby+q-e<`@_XAMAXz8R@va|#!hDc z7@Ilmk}fgV5QAhudXb=$g?tw2Mj4X?`p(tz8k|02Km`mf#}GG3kLuS%AbFLV%ga=H zR_KlGGRv3r9%VF}U3F>$(_BO$xYV?RN+>V`g{9WY*@cHY$?ceE`Y-V_$e zi~ebO=^t9~iU`^LnJS0P<>(~|1R=0gKgZ)Jq;9%(7}l~J6{jiCj$}O-&wgaGA1TBj!ctg z#eBv+-pzKU-_%X_RSxTW4gsm~AcNh0y!WcvkD@qxj@MG6GZOb}>$UgaK0-|9-_mAT z8U9@_$v`~H^*3c3A(g5Xjs!lVKCM9v-}RC%C51oF7pus4w=(DzCMXPEh4JTJ{8D+R z-To0_R$tjFXrw0cu-<-Zj(?|@@#pm^>T7m}ZnwG~&9noP{_&{nVN^}pXR8v4SFv~x zvV<%qVj(m@w-6gbz2mw2r-3D%4Hmb!&MynWFMuFpqkqAWsPSFHLw=n}BaGp>6g(XC zk0JC*&Oj-0LQfF-lT{=OcoDZXeB_X}gJ@j{ba9!Cuk}$wo$gpJ@QVh|A#YafnB6HD2s z=GT^+k-5F7f1|>w#BO58qZ`&q)$T99F7qPkTC^caP|4t1>u+#nznIoG5x_8)Y;Y@^ z#=3R5r7hNx?jk!BHGg0LGmWF?32nA>twX^FgoRKL}Eejo0Cr$tu+g)On3TVyjp z)^?91G^ft9u|1h~Gp?%&{1c2rYTMCjq3IhaXyfEd8^Xb+w@w}2_w{#~NRDg` z8T6UQgD4PDLg9*_K=Khw659^H1#kvBN|Lv4IEvErw8|_maQY+!wU1}ks!joc$VLee znB2qOUHF^0Fl(wC>?S4Rw84C?DR~$m;LLQet#D?B;99LovxFvnEI4zG%uS1v>IO~P z=){zFLFrsR!U5h9(!IaH{R6w5p^2%Qv;arso1X?{A?%3gbYv_pSNjiX-pcb|Du${g z`*fO)MmF&OLg^W>LeuutM530|VUE{fZk}ar5pza+8Ps|>TKPHCE z1rBqxdB^x{6^G*yI`p-&K2^i!Dp9Qyn>O@O8G% zI(T^d&h(hh%3&=*?#B0HQ5F{dtTiHgQ}(sXh#*ZvO9$Nu5JlMdzChkt4wEgwpw*t& zuqt?2r9W#-Po}}SmNQWlG$>w#>v1E_m3nZieA#+-$1&6#I-j$R1n1LfR1A4S4szFx zC!r-V1-;H8ua-5A|54&;k}9Lh616o^`}k~SA4LXIXhHn+9`&YwRMW7Ix6bU^NpXW5 zz_Z?Y!w}?Ig2`HToOftlRn4RVQjy9!f=_*N+(7jpHvcMH7Fy5^s|pLs@1P`ZCck$s zfuaOc)^tH-=u`$HxRakDFQrapAXQtoi{C#S<8$HG`=@=+&Go&LHbRN?v|{PLh|_=^ zzmnKXOvyt6>C0K2@>iZ?2|{~y8vd@A@qtt@cLe1^zrW2>_K+}vsB19)qre<}7k$J9 znY+e&d*-Nb5^U2arsMR$88tcQaElnrqvWX10Cb*Dojy27GR~rOb-$gUvkl-7T zR7-I-691Q-os^Gsj1jk+Pz_byFDV=CosIyrz&{y*MH?;8&?Z12vn)^X(}pqBi{|DW z`U2~dQyXr?{(#bH=lS5HVOa-*mp0a&b`H*ZyA`TU1#MaqVGd$ zk9P??LXmRl^0rp}skNCkX`06XKIQK-b)K)GX{bBzF<5){`&RA2UsbqnFckGffjzJ`LE*J3Y zHPHp(=o%C&DhuWNEK_CQK%)6({mfSiYm?{Qj1Not;E*1*b+a2g&%e#xX&iig4s?$@ znaJj&zpcGJ$GtK`>|Pf3b)#4M!B|jGWa+*)Us`&x!o7)VZvUa$*B-haGzeFt)|Wo} zP)sd3^F9ovcQu8<%)c! zTdO|=mq=zo>Y%wn62tS(_}QtJXE>K2V#Ut$n`>PSB%rtwC%=0=agry}^AiF9vG(H^ zQ1y`8jdl}G(8=Dw6`&5S3<-f_aE3{}e4>`yv!>zq9x;}>Vdwoa z3{YmVUt>j)2SFnh?uJ#fEd{YV5}z$fO050Rk8M?kDDI7235IjK^nSfFiyWI zEYKh^XuaSo7rc5+1Xx<%3xE(np40T!f`+vx>38(=8aswQWO7yGtC4}XFaWxG6XLDA z`x0HjquM1Cpo~0qH5iG@W*E^iF^9PZ(0r38bXT9O%nidBOw3(-668<>N=FAoO3>Am4Wobq$c~cq zNvWNdr{q)xemjeWfN^zC0$DtR03YrOsq0sx4RB;_TRkS;)|Fe7f z#OuXPkKNX*{$?S}6*B8~8f_)b_s#-7*GvC9Fx_Kz28lvAZepjcTUL!FhD{a4OuD9Q zX2oAGNm=>mYUFb2XrE4^jzEOP2;Gz&-hEQP6cu~R_y_g_6j|Sqj>jwvkba)R@N9fu4$}T+>StPO8-nilA3_=WaJH>&Jg% zRrTneHo6V-VT6#ox>dp!h>)rHqGV0q4HN`y34zaXr;c$cL@Rh}j=6?Grae6;$mSFV zf699jeYqu_%?knLHd`V<{ zW1%@f_dfR9RQNA0CKv)x$6^IrG}Kf_`CjaI_nC-D0{%LiX97mXI-xhewSXtZHWvJ} zzO&3Qu+$PH!WTn;sRe)%W;vDBHK{{Kzvrp(d8YV2nwU2a8O*&Gz4M^tX4;I{|9mns zWf%Y@s&^4Y`$!NSLB6f5s)5oAKeur zL!evbZZ?O-MCOD+HKWFm%%q7?TIR1;&=VAQejOiqq1LZ!o&h@q5)l^wOx*i ztfC@GmGJlD-cn0=AglkE&kUl!rCrWy#_(2*%zCuiM1M6X;T}2TXXhuG8~e@_^D_sl zNX*$Gy-++_5)#!SRfLVLO+`wNxHsH`7gzHPh$b8fTs5>$1L%40ZXSXCs7Yb26D?3K z_^8@HPU&hdOBJs|L>Uhza$EZ+?<^F!^JMV6W-2f~KR7?c@p(7<8pH~)4F$MH9=d)$ zF`uNUYgOrg;_8*h0`E>xzq8reRd^bR|Fc!x8}6~4Tk~Q_u5nIQK2mpXDYB1o;2^|A zf2rmLDk<$zXUECcs@6ajuqJ@rKzY25A@+aL|J&E_^~wnJe|!+O z2T1`H0=gv!c}U(i)%x4f-ETmxAEW-#N3v4_xLolgc0eLz0>Pgn4Mtn45U9qBt+ zVpt;7Hn1=lT1-&kwPC-+PmL^RfkIbS)jL-S4x`lpR6>$Vd$gK2h)?<{vP1YV>4PR^ z3yMq#E+axsd9VYFsUp&9&cVtW>OSDj8dCbZG5;?Q2@`g(J{Aq5V4n2%H7U?lsaiKy z=;o!yAb7XG*A(-t4lI11=xgYcSN0 zc^9On)*Pp_X17`Jht%$Dk@$$D%`6se#4dBohbu^csRXQ(w2q3?I8M?6#ul5}AYM`* zZtDSf#?73ynLqm3xZVzJdUD_ps1WlRr!ychU;7QBJ~%{FK*heGGI4P#C@jRSJ&fu% zLYP}Kpo23*u;GXaN<0AT$7~*I>yTnlBEbbB3S(=~PX9+=pcIdT&!xuo%89EH$!wK& z8(lf4+{vLzdgA0n_cEfi0A$dp4d*MCj>x=s3vs@b=E}K(y_}B(a&d3104x>Is$u#M z1IEb5t5La@!!$ns^ps9YZCD9@1@+bLj}VNd1P-BH%_D)Ggq7M(P6(aZJCaqbP3^A()>sgF*D2UkZE#41TW+JIl`+B1_A_FAOUP9CczB>9!g`X(Atj&P>aK z4t4Nql#d2FG%Hsem^xa~(%+D_?fTEAQr3K3%cQ-MM}c%b9mRst3e3r_wEjm2LTink zP&Ix20C@Q5bG6zsx_qL8fRXLDS~?F)=cxlDGK3T?<;A@lCSbe4$z z(m6iJ2;eb>iB3D~{a!q{Uk>6V@8amTj@1oX1QY*TZxCOg=M3HZc&w4f`ykabRH4UE=SbuZ56tX9-Kcy*;!HrVFW%AEpYaWTpGn#(w!O zHe}$4`UZfUGV=txR#ge;Gu+);)^8CSSw=FE&0O%9 zip!fySGPm9U$?92{k=^_N=8}JF~tLgN-!qnyR9mKF4bSD7M8JP!{S=Q&^%uX6%BAf zl9cc>_;THp*kBj|V{M&mi{eRcH@JI&>K9mLN%G{Z#j4tS?<5iH ztZLbLM${nn(GUAvfxz$vxg2~pxG-eXUWj~b9?z83?i@t?GS2HY5yclw z)kl@CLmGxOwxD_(&Kges{Z0g3N|^hlx*8O7;Tenr=_XvExuW? zj^Zd)Tw;LFmzv=JD1RLu2UR1cddfeg(YLr$?44|kUUb3luW)0sG~ChL3N11|9)8Da7S9%B`quZFg{=LIr&;geZgD9< zTHwCGxx@dASsu=s+gHydhzw~1Ho&-F|Lk9gpzCBqmGSoBJ8li*x;;9BY#tz4qWJ8e z!2km%aq2oo&MQ`Wq6=dP`VMBD-V##B`E%NPOE^DSIv#4Mcn+5X1-kNb$V0-zS$z86 zK+0fogv!j({UJCb;Q2CVT>?yW<=}E}@DS`TI$dnd;6c?LQPm1EAlp<{gRFS!@&`O| z3h`=pxLf0A}u;oSfA0wjuqHwz_L69VEEPA*zTGyjdpT<-~2PDW8+nScWKJFxwt8^!@| z;zeQ?rN%-J4(`3)$^Bbuvp*T+vFlHS?M_YvM+@p8m>zFXLf%NYn#vtj+y|i@rWYA6 zzN0M|1ZsoDjV35qh$UsR&bSKf1~B!8ia0k$Cd& zE}Vn(7vkcMiR^K4YoM)FmU~DbM1n-7m|!n?znQ!82cEHa|_| z7Fx93@*lg6pJ2cg>#hY=lyq9b1&bhOzjcT z2=fub`(F3TyFN^j_%*vSWb4k>HMD?$+h3e2EhMiI5I?_@DA$tMUoW}s^$e4oucs}6 zj7JDV?qcpm`ev6{ULz(&8Bd$+k~eX%8Q;CIL2Mp-+9l=~bB;5I%TQw8i1-iP& z$G%kgNV(bzv;&ueI@Yg8s}ouaTannL2r~D>4^-!iRoh1=6!Z@B`?soTS&$fMf#t%x zjSJiuQ?(iP3qQR%OGyHJeaXHY_xIrOrV*+3wzf?%lp*$@sSvhv8=mT z+4<42l)YHr^O>&_oh|hTPZ+KJyVVk!B!KQ@@e?zk|9}`Hu`sK2H%t|ysNy>QGcoHLaD+x zrT6;~YG^9&*WL!thpN*u&78G#WID2sGFXX4mpxWzphA20BrW>9yC(ZN{HozTCX|G0V_)ja+`rl%%Hg``k`1bc+A?oa^NRYj4<{Y?=H&%Eyg};68pp29Gfn(A6xuCi+r~Cn&S^u%H@F&mr(oF>L%pmhi+1k`f0S{{CCA34VV+MDlwsE6U)zA zPYx1A#mtE4`pJU7HNA!(Ps^z%rV4pc>3V?Jy7M+=Y1Ot|AAOmGo4B`w|I__H$0X=q zbJ%Pz&~>^=kP<#k@$4-$V${1Gw46r!L>zMuri2ChHtPPa#aVsq=Yz+Zt|#+Idn)x4 zbM{K4yMzRVuu(-@9)9?nOZt6RR2ZPf<%S}+|-Kt*oHs1~6 zG1~8t>h|e0Dj!t3s8mYx44$YF&G!DF);=qw)Dc`?Pb^uH(4?kSPnrJExr3zCsinVC zaR4=GIP1Mxo>> zUxxO1^THahlT`jbH$M^i%m(BHQtC6h1|}EQJZ0RMJi?9*U7=UabLxL#7h~k2WW4VRf1jDf^;DGlI5Ls zTmH`ds<|K9BHr__+=4jq=Xiyu9nz zbS}1Z2%o^t5s|2|veTi%YLE6J3e9R|F5PrV-tqW6Ai-wk*?58MvAbV5t%s;~-=U6!S&F&QI?!TBi_0Pd1XUq4*b*as-!-hn& zXB`@{yYNLBJZ_S>m6lqBmvm_*FzVp(_u3l?4k#lA){L+Y2$dVZy({g`cYOoPcV;!1 za+~Iu+KX4FEt1JMZrK)5Kxr#BZ{8}aDNgm`WY|wlbN2@O^5NZ=_ z5uVNDz%LJtOF<77<&FELT#lk7K?o6qvfAZ#lYtQzzgUZR3Lvv#@RFT>xhfOt-h|Ut z%&){=9a9;hs>{i$GWSg8zQqPE^sw!n9_4^wRDyKqZeJKw5Os)EqI3vx)j-(+r4>5^ zfw4zT`Qi#+*B$?rv3QN^lTAeJO{;KERGYWD=iq&p^w&L`)-84&eephG6s!3|j3o2? zctmj8sJ2*FCdJnBI?HXLuXGw5utiV*S!%k>eG9Z-deh|?Ktvcod=|^J`=>At&VccO zsZZb=1@5c`fD(qi=TN*$2k;fDYp8d(#K+6ZWoefw9Tr)2{QYuvpJ1iZ`Z#1cN`7ts z+LUb3I-XV)%|C1BPnJk;-M$cvlkWkkv^wX;TSP_ZubJ@7jAe0Q)5}vUU3X_{JDVZT z6AttV3mui&p5=&=QHrJFZ?DTk0>RoTHpdNzk@xEDmqPo*weJ%v7cO{pPO;Ee}_Jw*{)FK3S6j~P zxON|^j>~D`=GM~E@=cb=l;WF`6-f_Db2twfE@~HOU#OJ?sfz)2XPMqzO8=8Gs@=2` z5nRDzLjIb@;!ShOzi5c}Rtq6&^-1YJUmzg8-eI%B9yhZ5lpHvNz2N7hHdj(>b>gwT z5IfucqBvk;gNdo2dwG)Puoa{7(#f`n#m?19Y(NR^!|q2skLDZ6dFIa{#iK-2u0y<= zCz1J7cR9HfIgr5ygjjC!#xePQ8rm87kky{wPG>)$RCVCRqwd(!oK(>t)V#TQiiyO~ zt+=-mn!@$m%366o=yc(`<09Sz!6aXg656e;Pm=GaUJ+DN1T@sR61{ojSdYOLmTDFhm>`G+T;jO$>b+%BA*$=uvEJnL6_E80gD-oWBJRGF3*`}Zy`flX^8*)cIp_hY>xt8itEhQ< zjpuB0>=hJWKL4z=qb9<5p;-Ufv*UN1qokIW!#J2&I6vXa0E$0fg~V|~RM%2YYBnBQ zg+1d9w(3=k#YBc5ojfmah0>4;OM#N85NXWd+l1W0K`{h=fMi9g`MhWP-WrHN-@9wc z$Q-St##ybS>xYc79)5lcdhmskO%eBWZd|e2J&l;ASpfRRj_YVzF|6Kp$a-%hzjBFI zue1XBG2Mw+=O4J$s}g@rNX$4JXbu^mQ@1s2Z-nxs&;8-@7nrUWYxSBm%2tmX zNH?Vu_5Xj$yQ;9Zf~_0e-JRk^io07|q_{f-r$Ab)xD_u_XmKrGG$lxJin|rpBE^CQ z*Zzn5{6F0Ho9}#I@^CUUIkWfLYwhfr!L7Qq=>B`<1MWJD8ITIlhLoD{7yxv4>0IP` zHSr|aaw%tB|8e!YyKg567!sBL*v1k-E8;4GV`?Y6U8gcW*WD*D=Prxaqyu*(KCg(e<6CVvcClLO33~fkI2{K2B%4m z!731lrLBWzcr{o(dM~zxi<*JkA__~;y|=plRX+Y@@2R=5Ld|ag+P?5kJw@_#(@~Ku z!!w(4T}T6}>Az+IkJORctmEAL8LN*irCBrcHcVd6B1wJ5@k#{GgnlFGB$xYzvSQ?pHouf((xeyupL-( z&mm?ShO>wE;b2$^BzH1kJu&A2-ZsbEj{_QQ+T));Tet!&n6)Wq$=`)J$)|bXuaTE8 z9kSws`!4-GvL2n3P=cRXPWkzzSju~Ptg8ePWD~qw@~mT&U%vt3IE1bq+5d5UYB10B ztQ^>#ZTtC$Yy~D1*H%A^_FTZMn1cLpVVQm{H`p<_zw}ki6!d+06qx3dPX5HDO9X%8 zJ7@Gtydp(2X2ql-Gc!;1i8rzMX1V@n+rlWfCT7pc6)zDh!ZL&ah<(hG!UVyK_fjA? zC#1kYL7)>m_FgfY<=_02qF%U9>MN$rfrzLjzgxIYH~5FRcYPUl*Pm{;U3z52nX;eX z>&5>L&;$wohrWPIKM}*~fw=|f*EiG*4CI#FD%nTa9=YYG!n)Wb8Oueu0wu6V3KV(11P>4JE0*yRHjGye- z{za953T_JcgZ+KTxd1S%e?i%Ar{nwpV4iQk&DX*Q3}wKsJ9@JuA#}k}eu!dE!8dm? zFJ98P<~cA$mo>7HK-WM-2M8)YfTzr01wH`gM}qBBPgDXnG(Oz;**pr?eKgLR65tr% zh7!e1Eo1-IbJW&0ipg=ozqm$@0vMZ&W;9%$RJXk8@LA2E^TA=AyQ01?-BSd2JZSuPOCQsb z?x%Q@CnZWxF@=L_KBzL>T}(eUn|}gwd@+aV+CNKhwcvP=%{mP|V~G0ro}(S2j2XEj ztq(|kMS$Ktkz43a<+^Y-p9{B+NGZ;u-`hJUz-a;E0;rbf9-E(2G94NNPw})0r2bJm zbHXOfs`~(tu#NKaS2BHt3a5GBcW`I3M8H8ie=ejj*&t6(sdxRsc0pb6ODVJNx zQ-K8#p7<0IIGOudL}E6S3QXbaf2(_ndfr{1o}k@-*hS{V<~%*Yxq9Y3G<>)k!=7&)`ZhFdW+C&c7ug9Ufs z;BYj5x3^;j_0E(`owEwwIX!y>OSY(l2mWG01TEC&^{3D zCdNOj@0vU}9G(Pu{f4vD<)YN5SezZHP#@*Zq4BR`&?|00r-fui5v#5Gp~?hV&>`%g zCf1IQM2Kz45sc68m;wD`Y+L$>8d1S@#Gh>SudD|FF#Kh| zS9*zUhHLj{e4aCSH?lb|Oyou-CoLUaTqGCrT}`Zn0d}EiD$FX}bO#;8BpxMA03f}J zhfUtJf(sU{5={&&=Bg%zCCtqhE@><|9xShoB+blzG=>85q#Nj~jH9eaahw$!$3@kv zJEP(D+IPXRqW5htkjUtsUTRyazeqJze0oMHcVmA5hQ55(lY<$LtI*@hjOvPqPtZ9S zxRV4tpP{pAYd|4T^0T3W`1V%c0Upe;eDy2ss1d6hFdX^5M&l+(E6CN=)d(@mtX1A7 zSzI-BX*X8m&^g?X(zr|=Ik5B+bI~NAofZf%A(r{rh3u=yKAMg6)3djWeZQ7h*R}z%c`~q&LrUd@}tBPBN&S73GQH@a*LCGH*?N{0SK>c;B|~9fDKQr_&v#Z*yYj1EJczlU?lZ1= z9a$V<;aFx@#pcmZyJSTZe-s3)Kn~F-;-xf+B4WJ*A-;H@`LV4*$$|^sSII zgM@R_-C^BkE89pb84IqG<~JbC*Sm=(<^yL}^9?Q0r-L`{m}t)|fmF*qqvesExF*vU zoq2dCV7T_KUQ{8z2FU4Dlp^q-C4GOR9#8^Th>81sdo>f~fds+N&X}wcKy=CG@hE#c z=c;)8TqGvk4>q%0f;B*);%FdO4^3QyFCcq-9DP~v1P%Eu#x*C&5#7$4Mb-1@hEzRM z6ovcgs4!r}Vg9eRFz%$6A{(M`<9V11H-0VI9Mc{Wr4XuY$a%&BPz)r3ng8|MsI4ZZ zElIVm)N{(#=v9@xQ&dwMCa^UlQv8D2AvZJq;>8Ob+}l*hl)*-1z3|z4)71={VXYa1 zG7!Sob2M&L^hAzSg8aXj0rjQb7!R+N#^;AQPV8%E`jY|V%B38T@MalN9Q?3M*u z*_G`&K7^fEAlMO^{Bj83l6T9;+Xpg@^$QaC;Rgs0IGTLUcJCW{8wx?0sx8FF3Q5W-i#RBI($7IpWG@i)Uh@F_en;(<8k zNV#INKz<1o*gKgT_QtVnTG4Piq}gSzsyXvzok5|;*~zIR;`E~V`#FTixejJt$yp@UB5P+eh%7bi>KH?@~#(H?@erhnPB(g8G1G2i5zn%GR{cb-PwFS1V z)9h}N6K)*uQ{-=4jv=lT$=ZQ!#`W=LZ=3r>+Nu}y3m{HsI=&(wZkr6bR#vJ(98Rvzqti1Ye^EG4v`|9~ zY}ljazg1xutC3c^Y8O?t6;-X$3#$^?i92yonV_I)f(k(20%l}Be+SJNQJ^uIo0&10 zWSPD)QYY6;ncQ;WH~oqpSdrG4mo^Dxc{{a&E8-OXPy9(ZczzF~zWH-*9|&0YEPnri3w2Yh`3feGlmKKITOrk(W_uv}%(rV= zFejW9&+{_vZy&i~>J@o$%hc|l!&MMWxY@r+znO#7Futk;0hn@f>fcl*P(55xeh*cC z-m#*w!vR*2^y3tK1W>&WL;U z1$G({dCJ154K&iTtO$=M=*0_Hw-*qoZ-RYY?@Ye?MRPfY;vXCR8(-nGVACpBa=??= zile45K?W%^@IQwZha2B(GfmV@2zUvq1|8o*O$xM|o@ee4)mVyVsHYj%lo5yI>O;a9 zqb+6(G#SPrUS*a8ax(^XgKuF*m4+&&SYeDn;`+b2qPya-J7E^B7RXI#_mNC$NJZYy z0y*#A_z(|`NhVqZhV7C`A&tO$4|?nTWJojm&#UULI`BHPq>~bJC)|EvWq?E^RnF+{ z&PCi&91mE*Twlyxx(NdbR>4lyBpd%kj%J z0huDP=(flaDv2f|t!qQ#e1?d|gv1I75Z!Ie;C*feTsfuhO?hX$d1-X@za%GB#Pi_ufdk!aj$`bUG|&THUzw8CBzYR$HnL>#7)wdjJ+CNBhvk^X z(6;)BMmP<=G^*8SmIr}LI!zAA29oUImN0W((q+CvA+2{PA;C}=7U%(wwe(DY*y?Mc zFJGrUD!Dc45W}d1JSnBFpjkBp%$!a0s&-TR+xwu59K-6Vz6XN{&-dE3#f~xB*qS8f;7Z(IN{%O3oAREr| zOwIXSoH>xNt@05Q-YA^Bs+jc&`ZIZZ&REli9TeX)U1Zl=MY+$sm8Fc3!3R%{|~IZD7l(gGJM36Ia0RI!xV|E^Tr?j+-H2h;wo50a#%! zsXHa3GORbanEXuCb3Sjto`GJ|{`G5z;i3UO`qQ|JqAlg;p{;j~NmO#27{1A|A+c?a zE{D-kFPXUSk4fyNf_o9OREjPUBgZ#Qv05sNO6*rwHKlf80l59tmpP?d61;;&+wrUp zVTH;3j;kJ823)pB@+Jl}>Cq~AUSEog?n0_ zoK(MiUhF;PlS-Qbu4f7-bz~s_^kr`^-8e(Mr3N+Q{Tr*b2-PFzxtKpCDd!~)wGM?J z9~Lw(hhjVaLGg37ky}dILe?B7f%9@csb`uw$HmBx7{H>OiuehXNSmIC@~3p+QMDiD z2gMCq?G?9(3-QXT^ct3JWuAM1*<&?MZm~&)}IeJEv1ti2KBzi&cp^s(h zmxE5+c`s>KwSPx=Gh>>izicg|J$zjJ;();tblwth9DX zsz%WuhNwnIX4{Dak~-Q6zGTTkkAHsh^En+@fA%cx8#9piizSaB}MAu!-{BvZk*X$%4@v{ z^2XqVMTXB-|;Q{w7ZI{8HUpwK-KAxY;o`&!;XUtivgpL zh^4Ql?n;4oNU@wwF_RIU*$;xWohSg_d1rsUYwKBEoF&wdymw%kZha*RQ4HqyhW$|9 zp!@6sce$&@wM5#PG(lfSGAMEPQ~>*;fLoMs+*6ex^n(OO;&HxfWA4B4al6Rbk2MWf zae^`CsB}1Pje{D(Jo4H*Ie(L92pO-Nt*$I^G}O90U}Yc7f!OfpS+SwX$|P#4`cOyE||H3+u%Lsr8mk~D?Fqa zZ&?-;;{4jq*2&SeG4>O7nxPww6xt z6E2g8wkKH^s~clLEvlqfKc6?*WQZNVn@V~3B%PNdAl-mE*`k9w=5dSu)_h$aPAQLT zk6-HtqeT=@1@ym;xx6Tb&#*d-e3{HeHPBR}yLS_AV#O$uU%GD&3M_|e!yg@dw834A z323{No1TKy!)cpJll;P4;YYpHT;6OS>#m-mB!J6ApbhWpJ_$0J*rp$JMnI+5`OEZo zDG9__cHMD=5Y>mJoxQ~KXJ)M<7AAO$n`k!1;Jv9qmqT0L5BM!^+;`kjo2QS7#?6ZB ztuDa9p&Wvk?{91GlZ_HD{0C1>4D2Oo?=OQhtXM)l#M6FY(+m#O^lQYkoi>4&4Mx9qQw$dvXZe@)+8oP znWIBLWjOT=tAn>AV_)#&9jTkY?=f^Fcw1xFk?3%{Q8s-puGX02q+C3ZZ=BG+kPyEk zI}#$sB3=!~4;#;c(P@t)ZEYIp!!0z!&yu{pw!x)<6Sz)Y*qZnVe`+((EwYE^kQdjW zodmE;j1h#JV&!X{TbvxOwS61AI8Hxu__4DVc6qV*E6SwUV=u}#7EQF$gE9eS=U3O8 z{zUSNB>Ya1f%ilqoRYav4v5QpWhljuX!En=9@RXgvFVbuJ)lsT*N8|>mlM1#c^ioN z7W^$W%wq-7XTouoJ@_@^e3_RY$rS&^>uK)hak6!@*)9nExmf&)p-@wLnOddUMb3aO z@_}*8v1s&#I?4PEer>EMfgE(#mz7Uj18jFjNK!z{<~D^61SP_4_ksbd?;ugd*O?ll z2r}ONg*^M?5nnNaS=Q)KRZ_8;jJ9t&RHQ9V8fm81pr$6Gz|GF)eLc!J ziBa%S;F|p!AJ4ojo@C0MBV)&a@G&D}Jgcp?77;ErEgs6aeH-uoQ=!Z?RFFt7$Ak+TYVui%7 zlM{|wp{sL-P)L+Rt*EmY!Rd51bfY@nkNr7Yc6b1XW{DFr==}tf4Kf2?oOH}BawxR@depr20qdaab&So9F{eaB5}p5a*TovxsRwz2gIOpG>363{ zz8+eB()P5r3}92Y$s!OM88wuR4rHq&+Qw4oz3*zuE+2TQUHn@1Mj{a5$a@~O7boR=ssm{=@*UEj!sE<@Sg4MT<4+BHED>sfL|M# zTze7SJmy#&`I|(@i9(j@6@D5mZObHODJsag@`uExF{B!vW{1j{8>3yR&Kt`hq8Bbw z#D@9pUox);_W_%*2)!AHvKsr;nAOW!XR3HS42oPL)s zZ%N>cfM3;(>VN$>Cp)y0HF*x6@9z;2qw>rB)J_c0Vwd~@q6jy|dC{OR!Id>j0p(2>GGW>z!R&IA zHCzqjp(dU|$P?~Jq#!Xgw_lUf#8v1`F>1l3uY}Kma=K$aB4RBp2-l10?vv;Hu;yr{J zV`CwK3=R}`vW4vi5qFI;mWR4#%MmtZL`Nj&8es$P1z$Jyg2L7nW6oW(9NEKTp5nwM-F%rL?+CG^NuDw-L*R4?#5}6Dw z=;e*q<={trINZ9BNHK5GdO!M=d*wN3EzC6fD`jVs7STCJ!TR!CixFCIwEjQ<1SfI` zP3~o=X*g?UU@+w}^0zop_N)Gk=l8xh=cNnoBbBd&i8{##-LE&VzaRb$2e ST@51$c&R9BC{-(1h5aAeZ94w| literal 0 HcmV?d00001 diff --git a/docs/bucket/versioning/versioning_DELETE_versionEnabled_id.png b/docs/bucket/versioning/versioning_DELETE_versionEnabled_id.png new file mode 100644 index 0000000000000000000000000000000000000000..c3beeffd984fefcfcc9d8c43b39be91dc5d35070 GIT binary patch literal 39988 zcmeFZbyQVv*EYK8?(P(k1}W+82I=kw>F#b+L|Q;Vq@__hrIGGZLApD>d;5F7=Y7u@ z=lt>f@r`lL8Se(Ru-97mx?|pRUh|sQGFDAR76X+O6#{`^$jeD-Kp?P&5D3g1G9vio z{WR|;_$SU!OV>lg%$wTP&BfZz(Tdu`*VT&J%E!(c0`XaRnY4`2&4C=Tno&S!+HseF z_ab%*Lm=ptyu~m7kVzB@r2ehIE{7i}VlB;4ObocVbw#c$#5Fh#&F+M=zu(2Z!%%v{ zy`(miZ}^Zmy5pDm-flKOoGSxbaAty=4Rt&<6xEcvGd}h6+xvIa=J*jfKSoDMzm zU#CMa7ErVEv2xUvvU9R>b_ZP$rsd_}{@d7p9o749N4fd9{vL(4Bp~l(W@DveXX#=4 zUzh#Y0XZu>8(T1v|H~z+Zgy5+4xpnIrsbq%ht4GXzortb4Ew*A20B0g=U@MGlmBRr z|7W`XXS)8Q8TgM*{vYf5pXvIKX5c?M`G2hI|69|A`d{GA${8R!ZvfpYX66e3?1Oig zkk>*6uK;Ar7zl(KA}=Mb<+HH2?CYaxwIXu#bv~;?K8xG#N1~WhsKh*uHymL=~l840nzx-v(k%2@#-(P{zp$M@i|Lr?Q@!MA(vU{WZ z@a<4H*~MTbbV>-R%_=wCrFYfh!T+l2X+hx=b3E(`oEwag!Kr(4$S+6`%o;AF9qCAn zYU~wkQ&IAti$dLGlQYhW=jW${kiqa`6O<3-$)2v19>H)SHaEJ~Pac0sNlAHp8}45B z|6mEx{z%z=mC~Qv%_poX>Tw;-#_k)j`(U)|O3j0XGr>t?iifQ(iVSo0A|w#<(DR#` z)_je&z=EhH7l{M41D+QNXM&GEDn90BGHGh825A|orBBmnq5m!|df1!(#^1LPvvzoz zzoo`nN($XjYO3lkx|CJG<2;CBXQdEX?!NWgeCcAoiuD>TEg~B&soclXyc)^kSy8=QqRR63OGCx~^x@&D#B6g7DU73;J2uWaONx}#y}IH< zRAuPaZ1szn7~-aw?QPG0l60`09&3X%}*I~?w!vN=9`f^}iFa~QaIB2jvMqmMLVMhVx# z_Kj6=*rNXLJ-}`LPrN3u(mENa3ks>>OqG_$kf>o(;KY0fxFO>d-1lo+fto4sBn;wF zVk@eKxTW-^GoQk6GjvNwHDsO>d}&gnf1MGZ zV6@ImCuzzyI`q(xHnGE3L$Tc zgOYF+d!JfT7t)i)W=2SeD;23Dob_c_mky+uvGK|)Ny|7@$|L1)BH34qPa>=O^^zVaG=%ez$ZB=D@C$NB9y5Vd|93 zfNn95rJr!`(M`kTXfqk{QHSXARE;uo$ho7>bG$!Sb9m`k zFOEN`pS7@N>s8P^nrkaGzNBa46vMse@*4h5JXMRKzHjq*Bwx(dfhpo;{%-XX98Oo9 z39OGW926Zw&0Oxz^!Oy~xbg4^?*blCHf61o7+JO6Ce%}}nww)yb6}&$PWXI4Nkr1G zcDW246<)kV+`Zr##ErESV<9*|K`P9Pg3b4YG;l=xZFc=TB%6^CZ45{8NlCBy@i)9d z*_-0woD@%q>j5Q2I~@PAPefIVNW0EtKuxT;ymFq{Yrso^`3sM=s8pI~iz+{DNU&%AP>8hG> zv5_1PIqqFxjgcv!ZGZ%wIlM4dhh0|a-ZMAjt`b(WZ^NP@wVI7r)a?) zl_S$l4@-j8n?p>z$P||!&aX3-ucq}HyLK-&+DZ;h3$ZDX$U#Q}p6KlR5{S*lKGV5d z^{<|u+W6R8>w1z=YsSR=C35>c_{4m95M!>%3yoozH2RcHP`I^~m9jPB5D9A8lFwZ| zPg??Y?|%z88r~9obEaY#EA6QBeZyg`@vJIYPW1iiR)13B%efQ1Cti+A2;q9k^Jq4v z4=}XHRIsQd+2$^0@+W5C4U{SD+nvQWLm7S&kiL>=M%XM) z6#sLDOa9%HpX8q&daem*(UVJWhF0<|2spsmS720Y&r_$yscUJ3gyC4fk4qTf*-h0k z4e2c3swY9BX`F;BS&p3f&C8$J;ylAdM(Q{M34x7Ac{kC^$o5R^&y~6?Maw^J6F0RX zf$g)nA}|%W9P-;wFxyM#CQ+Z3pj*o_qO&|&I~^g121&qz*A0FC&#gy*=cCI4#p+!i z)IE6Abx%woLI#-kZt^lv6FCS-Qe8OnZM8(|1eDvWuz>f4e$T3(+};_%l+VY`6oU@w z`@?gA{#B^J)oVwxRG=c(QUVWWUqPyGMEI5o;V09JIspuN4bJ{l3R+_1&fSYnI~Q3- z;;6+xp^NjOYSi#0trQf_R3KiED?4qUA+B9s=RXg^^QsrX@aQAoMxP(x`D4n&Mfpap zw?q1Db7MX-Hi|z|0VYx*wBU7E_vmKbS8Vi%H7~z4yF)P!0>bi_4aR1jcHZKQxF03O zy&=1pmZ)j4sP*KKl%kiC4C%090!=U>YK$SH?8e3owjh6(On)WZCMFb!X<;w8=O$7l`KX#GO# zWZJ*cKxdg|?H02xDWcF-P#BUT0#BG2@Vr8>&ZDXIQ@s~8Y0}yI%e5^kmCJAUG@5L? zrv`ok7MLJh2)t*~6jx_S^PmXgv6jq0O~EaN&&)3O@JEU43c`qy>gkY{n(FS1CiIO- zjoJQs{6ho)Jm`-3=bd|?BFLS>%cX+kUDf?ia>E+&kGF_98i{R_KPuPv9`?yL$#{!F*BafB0OgOMTdwpTdUS0KRH5nuscx zVs_OZAwt6Jrg`Y`jC}NJzKgEq-D6s-#(E-1enDDP_>K=0sZ6(iFULd`g1u97)^a+zh3$EgdEJ0xSlu#5-Q(e} zAc%@ryfi!5(5rq+us->c>^MFvI5Q}Sp1fBxH~>*+wHX~XEsTuxUrS{6^V^V$YII(` zOI82*r7=v6RM^vpBW~JtM(6=cfdTC2l(;?`EC{IdK|;>e+yt|vFZf~2^$rmY43NQ1Vj3IQNKz5X>w~=7Uj2w-TH4gG~~(^{QF^=DLS^> z1b!=l$8e6iwJ4XufwFl{r~M98QKRp}$qw5|Q(f_oeaJza@-}*a1YPzh4={6d?4MEN zCea2*kT9DVCu*|ulN3u>v`_#CgKhv_a_;O3u++NVi>|Aa-XHxdR{SnRoBnh#+1faJBHFn_Kve=a+mc4+I+)?^cjg1r5nH0t4}Knvcd?HUARalZ ziuoF60%NmRb{0?{KY8l7svBAF5*$}{xToI^oI9QHVHcWeXW7u|&hgzO8-LN5r(UBQ zoObI44W-{YR-iF*OYx7md;guIj@tI!w;X>3)hex;4pc zGt@eP+-QnfiAf7}=}xr0YgtD);J@4~wJCNg#V|MupKXe#OXz^5dYKulRUoFN*XkzPBRi7L0X|4c< zNl+aEyU+tEZ_pWmv)_?^LXII~tX9IZfdjyLFiKf`vC(+3Am-ZgXMOb{B+PzJ#FM5i zbUz;7^pML8fmjhdu`PG{GmSUxG|`JzeWyB>-(lXTI+`=PCG_sghDi8aqq@UOe*bye zm?Cq4I1B___eDOII1bqes%flDJ6KhGSuaMaQogi5pDvq8|L)G@dhtctmVZ1!*4ZER zlC7PUq6fHcu3uaUqz<7@s;qY+k7jW5z(Qr$Kfx}2`{}%NW;%O@ur=CULggr>TtqPB z{O$ceZUD#iG<eNlmgSZDm2yoEd^FxG1aC*RxT(Ens z?+Cu5IA2};76goF2Ebnlq1=woIjok~?4({hT_xojsx=~r_ONrn>G+LDqT5$8dJooI2qo0ddV1ByxA}3$X{=za{ zM<}7jQ3W8{;v|;9=TGb`hqW6?gaiz4@D~g`o3xU2-xHGBSDTU0P-l9whw5x9)%`X_ zSax;Qjd!z!e(Z(K%giMdk-Bn2{}c^V|(1!K$h-u@tfspL0ne+_?j}f*1!6z8huZM z-mg!~2FZARiDA0562_r0z)Bz?VDCxfzwT}*!CY4({-rr+N|_oRA{rBQpXgfe$Kj{( z4D1jb_^`@J^1-x>0j&X!!-RPbqAiSp#FJl@-Ic*V59B>qc3E&JRKLIUH>Pl7-&v@2 z?zL*a*SlGa>*psn{o_gGs<1Lt#jAYs5N2ClY2fAd}HDR0yu}(J2f=vfGN{x@5iP8A6Ny5h8GNJ5KXnahN z5^%(V$wz0Xt9^&3-MbeQ7q39vY+tFrll8554ESlX+TtATCMZ1DT3pbs(Hg0Iz{A2)Q_l9A^|euYki6If<(s$h9i&3p=?|L1CBczMfd9Q2mFPR(@|B3^K@HeD;VQlpL=I^f z$*cF=V4r-cV*;b)@IGcSyuBTQco@Ao$syA2)UuQkB8)C9#b^}ugX>czdt==9d3I*w z*V9$8&|df&tpqDR!)%ezU1(^=w?}YpdbEdhD5`!-daS%6Mj*snQo+J!LO3uATJ)}b z`ga1UIh_uqglcadYiKYFrtMre?{!MxUN8RXoQ@BUe>0Yy_$3baZ4y;viH>ll2;F_~ zLn|T_RJ?1yM24(QV)1S)6X~oU6JSZ$3?J0rWhyW`2cIFnM@@u;+~E{c4t>2`h=!tw zkL+>E(5U8t4*YWUM#klj^wmEXKdByLp}PtIQNgzIuB@@jh*h!CEPKP@LR45WP9Qb0)$QhZQaZ7M|=Cj*;jy5 z6#XMBOsOMgA0BwY!mRimhz`6Ra-9Ue?&ov=26>RQR0JAky@qB=pQuQN{lIORz1T7{ zg4|bI>k+d==i_z{n_;0d_vTV49Dou;{Z>S1pM_psPBz_7kU=1a-;Q8_i_7-D&KM`D z1N~}(F37HbRZq+p+NK=Jh_H2!!;z@ztRJDf-~As^sut5R{nh;~JIGV`siI8gP)=Lw}@S|Qz@y5s?Ho@hYXh8g4nQ58MgFf+LnrMe~p zc0J{Alh$YyhNqgdVvz|_!9%uS2$8WTFEp&q9a|9`G@XguZQl2%JWF4{?l7hV2nMl9maa+zFTpCjLr&Li-}gPQxB1N%zMwoRw={BKzrZsFMnP{m?Glgx#eC9F5C9P97Zp<`>H>Z1^3wu8`fs|OUy}D0=%A7e%(@#`?ztodDwOZ z^KP^mQE|%raYM01YX_|})&mC0o*xI?CWr|p9*xjg^@qv_nryQec^T+Lv$;@4H zWJC#&aL^DVpYspv?Wjs^_Vo@jb zAg=*WqG^2siDluy8mYYK7cgRsxNEO>**jt+-vw?rF$4Vfy_Wgtjr*2)_l|O_A0o)q zq1*53FfQ2)Y!pB;NEB_?kcaE7ZtV(iJKmpHwr}SCY6Q?`15*r|$Mz3}fs5~NO+Fra zlL`X`B0$YEmpK`2 z2=f7$?yZA;`{nBH!Szo3>Q*%PmM8+|A1%ccRX_Odz|28t{H}r~&Mf|KT$g&vni*+B z`9KsQK?8h7mg`w9$WkfNSE2!W2JrrF1>(q4zv|~)Ci-}RqV%2zQz9QC$g6F3k{xG! zD)-Uc@M1A{WeKm43)ib41XNb;$wi1L6vdN1r3uDWyG#mKqW-TF>6htmpuz)dHSFr|T?+#v-HhT&h9 z((~}g?(Lzn1|u%*?8p9|j7`4;Sq-aaoQDZ`6Ei=O0;lU!tQ9yNI<Zj%8aTa7m#KK;QOC8D4bH(!fcOurRwY1oO?SfK=7B@6fdWGH@4P@< z{j5dTwOvFxD-9)4UoJPddXC>)UDG?_g%u>m$yiT*(H)nqnjk8x5JJS#)8PPYz}%~d zR-^BLMua6rRn@AjW_~P!=Y8g57gadh=o=;N0#ur+nR7P&2m;|?Ms9_^|H*&srrP;c zvH}Sab-Sb&@banADARMh_-*$q!|l{XBGEwJg&o*>YtSbjCRnUS6+@8 zLL~)a)YwOcK4*n75rsZCW9&Q?e!sc}@@9iYLXZt2x98SpLt>LeF+%(!B-TlYi~?1L zUOSBDGQE?tbR-m@1Znz;zny0{h-dgncb)BUSI{8@7z4@1TuVR~ z?<>u8^fcXBlFuh8QTk5CS3i6t$(A9BuKUu!p4NbGuiE9o@@vQL@sonoZGXrL%n^>k zUl<;CTA!w%e_kI)K9V$aM|+}JS<7zZkYzq`N9h%@ASDYZ5mVZ00-UH99xii^y+Q}d^9 zlT-8*;@yUO88S)-AQv$h!p4nQ7!3?l`wmjev*(>~Gf}366?)rthXb#z6?Qj{uab1> z8z?$3Az2-gkx*>(+)2* zJ0;N-iqLQnp3F$lcLGBUVTAcrQg!lF%9vyfMnPznOMmf79=L>YZo4*XA^~ge?O*oBlUym+p`wIwNpf?R5hu& z^P9Jn)ERHTTVc|_SwYYLz={(;dn+U&OOIQo99eRqmGULL#F83pNm_x3O2O^adU`R!RsT9qFZ>g8zr- z*fG+ZBB65QOB#pUa*@TDkQyyl*bseZN+=!t|L>QiBx06n2N!G##hE6ISh}B*?|5LM z6zz=S^CAsMOrN}sG5vowj)6R#;lQ}$Cc#Du(fj}03mxH zPo|sm6(ja=Lhpu#Uh#065(6!4D0NIiLW30T1U_a|mWFf@?|H58ODR%K>8Kc8gLZGH zH0I}U=CS+}Mg}k)hK~3c4GTM$2;aU-aZN1zAjOIi~CBeau|L0)Pqwwi4@R4`JEqiwolyP71GF=5>fT^2`t@6K=r3i zV5Hh<{6J#zesXdpRHce4sgE~#=ueW5;mW7U6}GeHK-hM6#`u17iG1Ck-^FAsXj5(D zBIqiR2om%m{#ZBka#E6Y4c2xqg&6I#n;d0z*a3jMZXHufOeCrn-Nk} znpgOd%g#1xM0e*dFu318t(o0M{N&%sAf(BlfKt4E4kB*p`wAcQ z7nuvt3eqdDj_Kckid$?=l98<|ok?{SF#HiB`T^`=j94ji6MFJe#Ah32oVoAx3*;2nG#_F_@EDD5(}9vn2RenK8meIdv=H8fIZ`-?k2^I^oIKc zQ~m6`_6@7-RC4`GdM2W{$UyDR&>k3$Ewi>1OSeo$s? z3B<4!7{{$#d8j|!HM)IW!KwoGmtBmsg`h>vT$EP+Tv(;}C#f@v`j@n6*~Ia^gSrNK z23ht71=EXd2gC>Up8W7^Bq7ZZ>lmp5Um#zI(oUaN*XCs647zFB`*BQH1UN}R*&a|# zN#@kUBqC+jSZuOwfjY|ZJHKcdmef5A#4swjEBFpS2xS94hGuz%hgN;P+oZGVuk@Ge z&~s?h0w|7{N91KW?RV`r*_yCAE(ZYDcem%@LFII}P65x6dZ#OGXCBo|h^Ogr<3v$4QM~GPm7$M0Bc;0j%#flz#%<|)hF#NuOCjxaf7h+N?)6nLBc?w$QOcYh{r7^v99;URGVB;< z4eMu#r(?qMt_uhyKSAAf$^1`~J9oX=K49%<+D`4NYYS^fR!IVoJBHTyJZ-CIXWW0U z)uC-==^6a#udE0%`OrGK8|=z4Gm{{0h+g?d?g`(!B&t|ums?*m`DW>Xk=KZ*poVSi zcjxInJ(Fq5h~36VnJn7BCLLbX5cgz*Y>-LD3R+3{4LI4i)q1vK(=+WA+jFf&4A^JU z2P8`Mo*amkDCRZWCT~>AQ%td9dedzt<>=hYW$`7)#(z(8?Z zxnEse4V$dG|B{>0Cf%+-pO-bkbbd6~k&GVlzcBq&5zgvrRIc`&>bCoF zatuNPKCO)^@B3CwdcwPJ_09Jb4ai{j&Q+ZP&ypU`6+adzBKkQ|D1|!Y8a@j?e-!Z< z=E!Cr6ARY?L|uFPMyOf`QRe#PE%Q7rx2b&@?P=QM?V(9xc?I`-hcb
rIQxGba{ zKg`Tms<;_HSKIyX_@2D`Vh+Q{PMQF?QMuy6xqY9rh43qbV9&>+E&eaUPpRWJqh(gL}UUS6)#*ACj% zBn#6uh>bFHqR76bLE#xi6KGWmvC*e-3h53o~ zX}Si<;o^MnEEkqyeeM#G);w>Ih9+J5Ksge&ak9H284(}!X^YAqpxhHTB%OQ>G%Mv5Qr%Szm^&TQtdz+XvpW9O5BKcV$AwiO4-5kYt6A5u+mU&h z2g{3nw!JGiqJ|*t*qp2~&Q~-1*#W;DmEP-N=2suOEIfC`0lhgYtB|C1rC|AoYni+I z&bv3)ga&G)c-BeXfc#a{=ZupMONyJ_mppH1ooj9;UF16m0}SHNc{~m)6$FNn$}3}G z2)ZG3W$x^EQ_u{f-M}W?jZ)aFs51vu=k2p=PA?4-K)Dz-))0QGXL8=3Qslsv04X70 zAtXU@2`n-gGGiHIC%7Spne_{hcqjQZ|829p0y^014$mfhBTc(rP#^{sB1i~e5aJY3 zLHSkY0U7Uvl_5!om;o^LfMfy$R7XPzVMC@qs1&T#eN6#U43Gi?O7Q;TS#D@N)amLO zM4LEKc!id+13*5rj5KcR5LQ1J1&jqW0IJN4)$hS&-CDLGrQ7_Cy~_Kf zzq4;!>iwa41qK>?cJ@F`4ZdUH1Y3ksJKS+UH=4%D(q)q?ailixN2QLfY ze^v}k?%DZuTYE`R^8^AAoU>>s_~rwt0tl!e(47Sa#R-alYC0gGvoE}^gmSizuGZ=r zy$M1DK+pC8sSS7;Gmfn;)0x6gA0fpOa3`|wLA_h3a*G_`=^1V47a|*BB;6dg!Op%b zx8THUCgPQ;zHKkR@%0ND(j*B`nu(8)s0h(c2ci>F)HL!JQI3q$Zh+DStV*u!QA!v& zdW7HpwzCb=H)od?Q}3~ona>}l?NV~=T|ne-E)C0Z*tz)`(B#rU?gCw*LGUGTXI~bR zwTeaeSBi6#V^pZ}70o!A$XM0Juh5IT9*CVlSunqQKO7|9fAb+#A|y~;R%&Ls*JMcp zS(C#3zRQ8lpp}|k8_E03qL<=c110STR^$~vLYO7Xo^$J`x@>#_N5|r28fAR#1mM}mi0H|j?`u4Prha~`M5AYN~SKA3Mz|0{HW2#D(SCm)u zbez*|-$3kzhZ4dh$?U4QkX8K6hCZhpvKKse5y>68Rcp zrSF1w>%v8i>^(_A;q_-GU@G0I@lii$J)_q7KA9);6^#Z5Y$_kgElbKX?bCAa4?YmqGiZG zwg+n8*%udQn@PnDXMq6ONC{y=V*33@y^n;Br-Wn`fqX}kO>l4XFAQwzKQu2TmdM8z zu<)`cu^-?nfjZ8d-p>F4j;MU7WGF)^b5Nu|NI z2kss04M0)5*07SOEtu7hNJY$!e~8B$|gbX3rdq`s6No_SxPcy z@*r$-B5B6@DcEd4ht`yntJUH5kCmz-0UkCFQ>ocn7zj^5t~|hLNvBvAro^^8fyS+i zud8(5HY^Ee>)>u6&@`=epolbs*@y+kkS~S|WL!X-gwON>#KwPYB;~IpO*L8@n)DXH z&=Vu)7p)|{^FzwqrLe86Ei77rVp#wS0OEJkbom)ImG#xi0|#o(q%WBu9_LfzsX!qN zah0O^4wPmVi{G)ks}n}v9P+(Z}9FlvH) zm|>)OqnHZ+f$tY}Ed9^VN{m_=atyfR87;gsESd~&Nquiz2yKWzOkCbcrHaAETH->e zN8XBMq+N#_CyP;sK}i0r7N!z^+iyENB!Q(#fh<)}7`4;ErK+LlP*R1U*B#o0k8R2# z!XqPfA2r{LA&$>XVOm1=z4*X5F@Ctd)NIC@VKSI?x(tH9Hv{Wboxmp3iHuh~k`+;) zj?jIQ==Ev0vNw8>7r6RQi?akR-p-YPz>Wg6^adWPnl>jY2o;PJCR{%o6BESiD*u+n zIhZGB`QfVi)7w_pFS-uV#2C^giqs`s24#ww@Z2KtU-;?|XlV>gbJ=<*3{2qy*)29Q z!uZe6(f6afo}RRXNi=I{bfhtzlRJ|1#F93$5Rr7MR8#nUOe_K6AmHbb5xWLRr8dzC zla0CNE`&)JlA0O>Ky7xn%zkgdFmc)7vS4ycR!H(#7(x< z(=%Y2oXD7JtR$a!1=OH{Bmv->&WManfD=%QCFl%NlODPo7Nd8Z{A=9ZdAz{f&TjSs zdYi-jK}#vq=r~9AIdK!B&)SZEwm2SO!Oj>cJQ!HdemmNpPK^_5jgw9dj1Gkf7<_NF zsNNs0RPiCt+4ewOb3Eyo|0$v9M7D?F6jR(!@;>>R9}ba?6uq8@GI_I1wZWo0Q+e)W z9y1|FD=Gu6*mP-%^Uj)-f(SU5uWbwABYBS9K#5|uQU2#)pn+ zhivibAb2>M_i-4E{4`k2k@MpOwx*1u>-nu_409wB28Co!p3ZYv{Og|_`a^}!W1=id zUm-rZ=N%0RjYo8II{!y^Tkdx*`S#^oQy`{WE%OfD){ToOgqBukS}#eUN~sJ*)$Wb> zt;?$~1aPy-#7&r#Hfr_th&jfhGKtJGL9sZ+8Wbmr=;+#KIpXDZd2k~pg6dztqSX77 z;>7o;X}{`9p|BC_e(oyWr4Dq5M&*J2!i4PCD1_PWxb2*Be`NwYd^2X#4pbZ2R`IWZ z;0IKg(GB~byHW^^n%)u#;>7o=@%qv$Fc8tU4?_Y)kY4?m*xf_^;H87-n3j0uXTS=K zQ7MxrXdvPFhjpSRsH}jpdH}Gn&>TriA z3!Ki_E5yZDUGQjwHpyKv$JUlx$CD%!{>C~_FAi-xauboNx0sn3M#+<3pxK}=yh7x@ zgG~q63R)(&c$!|rV>^BwFv1{oxy=lAw4sg7+RkCewD9QKz0PchhH(r^$=i*H(q3#u zAA8Q5V<|e$fYy4#MbV}3z%)<(-Pe=VR~rc7_SrBP>=|*xKNA69e?vH8X)*qx9;h3t zf7MdV)i-B`mbJs`T=Iuwt=_=9k7mWmm%KYB3+JZxHOiSg%gdQNI?&Gky62JY{%1p+ zb2KC>Js~0CCy<8U&M?biV|V~gGMRZoD z7SRc+`|+fpYJbeD>!lXk@%iKLly_THc^wmLs zrtP$ia`-nTy#vyq0%le7xdL&Au5T0XgJcj+WR5YjuyAI8-=0eW#1#dCLhkR?qqsW3?!yvYIul0g8(j|SQwVp`L*4TBFT zuFk>};U13AF-$ z7e%}s$w_CiIDB^}DC%IVD{N?7t6$cojAa}T)KS_G{ut0SINR+vd3K!-1BkOrjN7t0 zHclNtI(c+FI0i~*mUj;>jp(1d*TU`5(?vwd+V4(#Iq?59{hIht8*Bq-{OyqfI4+Qu zktMyJ=h7?Jt!VQxaM(4Fysy9TOp*jo2|!hq7!JHZJ>5Tit;uQefDbn55tpckArKRR zT;j_9-m5(L5zY3TbXdewKt;rzcS`?d)wOT&br1_G-<=bQ@$C>(cEE(ku#g2TyaY_6 z9O)76F}z5i0~+tQS_0A5d-S_@6XCHA9^{>oxZ2S!c9ARU+!Bp@x4fbAv30^G%-?li zkAMVr?u!``kbmH(<2Vl9__GVuTVS0kX~CZ%-^sxT&S-IJJVlAUy_}!6}p^bdczb zNET~EnT)mNg*sqQWi6Cd^#vs(DL$O$y;q}d8e5t7eRRbzM2e3n{8U<3T&3wy@r4E` z@<421DU4U=@`4A+*Uh&<*ojU+l+HCSMgbWS=rxe=BLRpJTRX^U_M0DYV180t(H3hv zTOHFCy)$vDs|#VD*~{Pi?eHsQiXvV=k#X)kD|D!--zejGd1XJs_I8Y^^BY>pIY0Vn z2agXd5rq3DK=S36mhXUmXAxJERw7RZH)Y2i+w4+fB$F>%pWjfZ@bfCEU)zZbDojvw_O8q?Ov zKD@A$V8jy>RCrs)5+gye9$IeI9VS-Sj{(r+=AQNs7!bcJ#qi`#Z_+u>-03foso6T} zu>#reJ?7aQR`94pC&a*>xb^qv8Vf0Boa(Fh9mR5`laHUKnRP93*}6@y-Yn7l=uSI; zA!WOpU}U>rgA1;oY-YobH$tZF>F~4!1=3kE_rTe#R}PwFAm#kSXPvKx4DdWiQUxSc zZrT(I!=O(?l&t&IY_d=$ciVYG>*lK@A(Jb)508Kk2HrawXhQzc29yqf$1O;pPkS(P zcWBw}xrf}=baZ%z)2LXA4AfE+)0L^dcE2BFB8L(lESQEuCt zvfl8UR@P6;%Mmx3*4+9O1K%e9cv`?{pb_3*MfKUJrTC98C{@C9K4_v4{OQ{%BsQ1^w9_6(3 zMlhG;bsM>XnHU9Rwgz}sg*I2Lwt~QOdgN1;olKxI+5)N3_?At#+FLmHSe&S?>`F3V z%`Bc?@Z;tVl0%`q{5#p;%&5Xj3XNv?UW*Z8PF^Z$i{SxwPIC1Q^EFbxgS)-wvvIBd z1{SeB-ywUz9MTI*D`b=7XF+N`>)8%6Y(4@8&|zSQKwJ3M{skx_fHVX-vg?d*#iK9# z3<;XWcTEL?jG^Xd)(Urc^CbZ-NSA{I(LqTR)BoM$fpKxT?}8wBJPmwUcG_G<8a2C zim~AY(fYt!qGL-p&Jfau!D9=K-CK0AXgu(gi|4II{zv8rX}Z-2Tp$ZG{rmJxMJcE3 za;xAUEX(M-m0h?+Z=oRiza#CPamsAt zdtv@_Ff~>C?jeE`>q|mbac!)`_-)#nr~0%R<)-oIhJa%buqBsQqcdL<=!2IK*TK>W zT79aAhD7%#7f0uFNl!}!Vdp6J!afD^jf7kep4t{;{zTb_QFU6)XW;?{D3LOlcE!Y? zuOGQkrsm_OxKYR+##Iw2V6KE=e2jvw40DjZqSl=bBT7c%#cYs+RH>I&I!D!X$Ur-z z+G20YWE#}NQ#a8#&!=92fewsLR_Bzi23R$28kN0#E5}e};eZXEeBuNt-beqww#2gD z$-@5LRU9QG#P+8lqe)utZ4afmaiI}QDGmLH63Sb>43IhfartIS% z5wZPYrHC8IBOK%)Ccqg)76qoe=m=q8U zyL#Nq@io-cS$PzVz($V1?joJ<#Ah7X(7;d6uk5Xvsi_~@o~jhJu_qGT7s@GF^ym}G zIj*5FNS_gzsc&eg*Z-Wr=$ZeCF)WN4HgV6CS`3{A8~63mEgj#iV^f{2bo=3KJdx|H z`&jFiyXcnxsIY9wbZsgV#4+d!(Wg6SEW9xf0!yVD0~-;egdCcl{PbIZ&^p z>{;^$!o$Uq10Lcav^C;i$l} zJL`2W8>>gF-zWU#ZAYF<;c6*w7F!;HGke5G{pFxrlQ6PmG#Am(Za0U?t4u z+40e+@zIE%{|TJOcH_s6YhCNx)aXg3u9e1yCt=&@TOZx3kpcQ6QqYG6ii%7A_@i4s^OhYi0jM{ETH z{Yx5pDfDVra5eTncl*Ep0QB)+XSDpfI%;P1-m8Cf>Lm4p{A~bzHgQYC|B}LgU!ggU z67si+e_ip){X^<+7u+J>H2#`Kc-7Wh?4@fVt-Q+a(cnYb_7(8ogdO!}$YqkpmwO`7 zoZ*@76mHH3CI<&2CcY#(o^x<_Owu!%=sIvwwv<3#@@Z&nC@x14)V3@%aS3Voip42! zrhC$!EK#IpF(mkv)6b7KXfB8xF$d-$zfJe-#t>fms@867B_%qj73RiR_=ojVij?Y>gD+#-bniFEJuUIBma?A`;1Dq?V07|UHVuqV)RgF zL$IZ|@ggQ<&EJ3R`XyS8gECv6lA@S`jL_G|-1BI#sAe-8@@F5QGilj$pv8o0Iae=0 z#9n29?k$34Uxa9|vXWRt#U}NMoA^W@K}7cpNtKI1ScJ#3hW5t(K39T0;c&ZTWmo$R zZlhMrp(hhCm{GgCmzV3QIyRD-q%aU$7j0OGk_?{xU?z43c3k^m01UYfrdQ=c^8Vp| zCv%<%0tA_aB!X!64o0E|0&aNt@PPNxia{Kg0@=x%3UiOC>)Td|AhAUJ{KDDqMxTf7 zNR<)ohXeDs6cv40CJ!j!DyB%%0x=qBn`(9(xsR?mS;p!guzyU+WzQLOk+Qv+h`{q# ztUl|G{I79>?o>N?vgMl8W1Gh!LtKB4`zDZ6go5iWQebf?pLRCm5ph^O=r$cUeG)N> zp5s)vb=VHkScz1BV?mMRL?z#XD^Wuf=RSnPgaK=^78;^fGjda!oQTvvz0@UinXGxd zlR%)x%!3_=z`Kar3mc&j{^8G$FGCU^#pQ;Sy!dSlB$3*DT(ENeE@|Wn;Btz~w#7(_ zg?V#%EWl9wbKMa{^IP>|L)194qFz16tJ!)Z>9{aTFhGrp|HW4xYSm!1IO>)I=fI>O z#~syjvXZ`JXHuQv5BP4wl?j3Txdgev3T{tQtc3c(i-g?VHS8ML5MZu3GEKi(b1*}b zOGdu*ujWQSIu3NNQtF3K`EPRW9?O@*)De@T9RyGUC$o^nF3w1}c6Djv>Spo`(vjFN zU0Xi#dyepp|nroF}_w{_fkBH+mPx<^jhml;m@M=ozZYX3B~iQqOXIY}zIR z556mPJ+R8=PwuF{cPAWcyR4qv*WlX7xmb7GIwgOH(hDneu|Pg-AV4AN$ligZ$h)~; zmHuVdQ(yRN-gjFG?-E3?LQH55)xo>!-@ZJ^&*)YJ{H}Fqxp=yAP2tA-%i?m!Q0QdG zv1Ab}^0>n}=Fp2u@?qWgNp{IevUbK#cw4VobFa6$W8_gK^{u%s%0(0EhUvA4rZM~+ zp}wN&<%cSK7wP~P_sM0v!(rKnN?PC{;2=FoiCx!kc);STT;g5%Y#bbO4(NMh2ZZ*2 zz$qArz?RdbBM-`!?q->I-qr!<{>V=N882)hrzm4gsR>S}sIbnWc)4$Xg>YMKtyrX1 zOEF(112$j$%KP}!NFK)F7ej39BV`duoO(Z|knw-L!^$*HVn!+~c|z@Fh_Ao9Fn>4c zalR3~j}Sva^b3204!F7)1B^J=Wq7Gj389PG7jqYvEZ-V_=}9p}m=1<-R&T^&fKw45 zw&kxkfs4?yczztpsr%JYkn(D>jz+%w^HxKLDs5PB3^9?y4lsA{Tv3=tC2W6Mcv$Nb zejVy?vgG(LclJUTI_^kUV53|)?~6McvxL}*lo(F2o~xW|X5S z4k`6vEhcY1-k+NJb^IL`WxCnhnNXho6;kaC1Ct4J3l-9`pCEMU{x8PfGAiotiyIv} zq`Ol(MMAnkL_(47?viehmXwx|Qb1A~q`RfNK}x!YhI{7sf1dm5zPK)zONL?i#y)$0 z>ddH3uawHD)lz^~BPEAC4?X|)uZP@4)A+`NOYq_MvG=&^WnOL`QhK?;>pb}RqOb4E zJ6DNHJLJV%gVL)QUD@~|s0b}4xfIK$)}#~=K<{7T<40f4B0FgjZ724WVA0GqbTkT& z|3$-QZPXTl2=OX|*P|l#$HVF;X7<%gR~mohGfwVsD+W?mS)nk?q8Pf_xbfTb?-y=kxi)4svOB` z9&i7AQxi0Q{WcXVavpwoBnfSALUPtcPmmvZfGk%laC{PP?x{?8QTNF{=Ap8*?LCFC z%M{H{YjYfV-2Sg&vetIfu?sxdECi$cGr@MIY*Z;G(^XavXut}jQvd5G5xH{AM3DDu zm-dRtln%zsr+8i_r%&&p5U1sU6`=uq5R&LH=wID*WGA0{Qjt_t!4T+ z$g}kHJFE#D6ivi_5S?BdUUDeku2mziS)VQ_B3BO^Wc`Z0f1!0v#|4Ikbxost7t+wQ zXvR{1V{&M@cnEY`5mxMmDh@^{X7=EJ)N!R-?nO%Co`O}9MHm5?YR0h3uq>Ni2|1PyHcWf=2laEI9)48I$*xjmT=2Ll@x$^Og#ZeTh2z#sKHT!QsDmh--by zU^#Z)6I1z(JPkJ$V?#xYL)FnW3HF)IMV+nf|H1GTCSzzrLr|ZqS;= zO6)*vtw%{I^+bfrHL>%u6adZjO9C!7aE@~r_)b_>nxh-@TO#Z7h zmE||HB!fUE#&JClY6VNb2tGcL9xPtdHyr7q_85SLP(R<+nulaq-fuy#HsiH>Q1VvO zLRBr6t#2>QEJad9@thSmi(q@oE~}rjBbSmX_wcCeA8{S6y*tkYPon*q@-CCn`8QZS z^gAs}nDoifTB^0itP7>Mk+p&O@ZgOw(|V6Y<>$taH}L~BX04lHImEC#NdQm)#_+$s zf3^K+%}NzuO9feLpE1QjLO(6U_Q!4)scNR4WKvlEX}p79%3j*hs&wP&wQS<b zgk6au0-I7_3;K<|zF#}H7j3Pb_)lqZpxqwa{?bAdW-k4)S6$bP)>ox$I=){gt--v- zdSP?x#9D~q8zd~J6l)%%XJ*3ef1L-ZwIruaiGYC{fHO@yGe0=#3C1V$zM>eFnjg0T zKqcHRNBh4;-NlC*x%d9yT9ZHBybLuNIluA^7Pwv;lP0SAdX_JBnpD{(CjM$AQI}cB zi^M5oRg;=X?(T$6G{33gbERSK3>bU2^~~ju$pnJw9M%TFS!z@i;hU@37IBmW>!Y#I zKh9e;j4TvcWvqpLY@+WYFYeUl3tkL;pZSAx?TKY0mHEy!hr-eSl!?D8DdlT)RtXe_ zgPBK4qy}3~4~otJQtBNc>w&HpvzJP$hESXI$yDx}_Q~c}{R-pLW2#bo=XD1ExAyRL z8gr=Xfw{{gfSf_fCE+^rFwAO1ZOzL{&0~Rewmj<1*W~7t`G@;kxTtX`ljd%=C0Ii zNndGCq7hFz_xj_9I#Tz+`^qBDVp(g0WTozjuk$)h2hT*VH{KSGd_i<>+GJVFzvvc> z{wlM1irbY+eU;U&A34s|sk6|}*`o87_|s-`yA>Wb)GI9@VtvV0bEea93at95}oh+{JM&iOaG zuz)W%aqJLBmNFSpCzgUMO^6BC!D15jld7S6X6f!bLZj1>!K?Hp0u7?T9 zDDe!9$e;U9VCv@?IimSQ13FlhfEx%J!~+KW3?s@S$2KJ9hw$5y!{ILQ?x= z0U6nTbY94L$Y-Z)U>FZltu3Ktvu+>SCla<3Fjhu4W4OFF8ek0Ri;<`awnx6QbcO%B0;DS_67s27{rTaZ$ajf>d5^{P&;yew}@d0f!i<%HICN zozG^J-x%PL4(F}4F-<1tZ6l=4&F91Z#Kt`7C;cVWa1gY;_pnN9`?drDjTx-$_yW0K z4?PF-rxkVp6vVfcpx!inE6 zkYXc6x?;cYbV=$!tBw40Q*PtOWk6tf>K#Y61Hhbp2-FYP$cx%{a(O^@(PHor`q-rN zQ_@}=A28kL@8IlEoPX%`q@zDep(Rc#V@359dBMZ?iBc?Adj>r>yS?D)J-Cb<#0xZ$ z=rxhUMag7E!hHOOq&l~1+H_7(`jM-B%m&p72*5;Ne&TZPVsMixVJW$Y!}NgKEhSZ! zDOB_;`3T;A#%yqsyEdBi-XBP=SEy7yZ0zKwG+CyK#8=_LAd3GYc_38#KDdBY5U_JV zMI)5aP0xoC#6R~c12_aGpm2=AfGwd_X$rlWS-g_}&tx*Irn@;8z9F65=~wRM7tf%i z2_j?|Wd+!dVkfj0zx|05lr}WRX8`1`Vv+8ve&c*}t&%3>f$sm(2R?6;jwLn1Fbi25 zKlVU0Oxh=w&g-KHdhnv=RNE=@mG)mVvk=C*A-&DiId9yqo3GWjhiecKC47_JdUCNZqym| z0r7RqmBVwDrXg!_6COQjW^{mM*tJjl<443_;|L@IFb`vwH3Dqmy7Tl$5?$soZ*qI* z?`=L1&*ZmOk>XBpKL^ao+?Vcqh zo|fGPQ7TVv>=(XAF_FPPxt3D27~=Ht4~w6&|4ipJLg|)7y&{w`H_Y3%YpPUEj}s&uc2!FArwOz!l;_{dv_(&&d(SNX6){4v-O$lb8oeWK5GO zz(^x?UE=xFK)KfN5xh{{&FdeAjuN2r5X5A~8(MXK?Zm5@s#|}bf!?tmM1spH`8ouV zp6(MZ8uq8>BB+wIa0GFl zklzS1;YlB3x9tj@K+Y8Rg0qjwY7pi?1_vI*%dk=4tFDMsPv#(UWRA~2QrGM{s16th zqPZ=x&AvIahymgcy-=(hhK7%Vs1)mQfkx*ls;?pU!96ELUi8e*A+3*e|G3-}Uy#M@ zTB%F<{UjGnH*l?Sms*(%o3RL;*W#;_oE}F~aTL2}FhLCYj7!4VtB)`Jg(?9NOrv5a zERQa8{ie^9_&u@pQ(6r2mwQ5g9YjBaPI)iwhnq+c3)n(PGzOWgnDnOxBCy!4imW8T zRp&F0vHDUAv^3BVpYvqjVk%+F;AoET2dbXt2$h1~wNe^VRr@Ser^zW-BTNlRA5f7o z+P07t2M*B5WFEy%o}GoxtrElM5_iM3o}3bPc1=<~cKDyHHFeDzBIZ^ITz{iVI(0AN za?9U;FTm+ml=JW4liYH!A|8Q{+J$BN*}wrwCq$*{0!9F-dx6A_5%KWyYIZ`6l-EoC9FKZUDYj2h{-# zrIjIz#vKnr#ki6Npzjiao+6|T03b?^H@rg&G!^POfJ*3jaCVw9pvI90X-6>$Qhw(!XNhy_R;hPwM)EBO9lJirs~F!%b(C`XXmG_QWi;` zT<5DM734xfg3b?Ce0~~X#m`a+Y^p2Y7md#^Ab~p$)NGT;`4773f_RfZ!1&C+Q^2YJ zdZZhToNN!YjS39IHD^JxZN%F52+4Wm#qNAb!_S)PN^!HIS=3mj)}D#9UqJ&6e_=Pv zd^;iVlpvrnvmtZ4^-;N>ZyOeLtWMyS3h+D_^);=k8DESF88T z7;x}ug2ND?A_lBQ=Iv`e0vZ|5CDi9iv7yWouPFs#8ViqJJ)Kmg*NU{|`PDpm@Shn@ ze!b<%AF^@9#2h?)pq)--k-=OX9$XTLUE&XhgZZ?hBfuL*WX>V9NM_rsT5 zcnKaV{yk(OITMouQWN0onoDwp)6$baTAEvWn_GIAwa*j_`JR@Se@eo<4AokXE-T)S zoR>SaeaQ$wrE(DA1W>r)-0K9-3((fcK%5scK`VfhbxNN~QLkQu+XNRJ)6x|GheLs{go(`z0i9J@C^UgWV$+{tb zi2wF36$B_cH0U@mYWyyzQDMe}voU(ThCQE|2IlL8MP2tem8KI7=)v@&svGbpI^#An$%s+yuCI54mz%%~u1OItUu-Cwt<)(Ao`6&!taN*yYM3BHiv}Utr z10%E7cyWprkk7G6GH7*|SgKZKePqFA&Cjv`|6I zcVN!rSaSnCV(rgPF5vyo-vt4khTf2kO-}DS8GNES!muB?+a_RK$~MU_i&6imp#pSIf}NBuahYh)ZroSv!mt2} zxtCqA5j3nW#RMKfNm*dy-lUGMX+kHrx6Y@Sx)jQ?FwlX(xg{{JA^}QOVhf6C>Ba$Q zN}pkhmLJE?A0Ppu;mZk4YXE4vvE%}%GRX&B;2e>Na=?WOnnzILTf1b!WZ2pg%0G(t zBFvp`iHPfonyrYdiwNL)Ltr{GI3rpi77Uo*w(g<}D8Lw4D6#te*Wp~~P71m1uZ~hY z+)Xc^EmHUcatXqwDVDjv8?e5h(E@eno!X2U=gG2P|6?0H%s>I|n30St4N$V|CRNsk zbI)PS505esbUbSId5kO#YzV;PftgA-bB#9Q_tHHl7UiQlIQ(tZ(; zWC2SJKHh8|f8X_J7(MqcCNQMBDT~XF zzKLk)iu-RVYV3n&oOC3N?9P&pm5d8o?XOoT=c$$dwy?KPsTHzodT0@SWvCT=@(K;8 z`TcLm=G=V51VgwUEwM|==E=|OYHAOx_;k$d<7H4F5SUvB6a)?30AMcyaV;#Mh(rud zs&JwBeNmQ;txq)`TlPF^vQ*6OcW!`Z05DTc*Ni-#p){+l1v3=LW~kic!R~AoUMqfL;Nu!=zvvc*wZ8Ox--3&eVLd(N^KR& zXh1{6bQF+&C2q!TqBFiml-7z%{7{u9V3q-AkwBm9xi!gDp{?8UfQU5&Tm~3}#t~*5 zG}Z^-{t48iS|0BY%rI)JMoo?lrj7O2fzoZb_Qp@yG!^!3#dSKYeTC)oWxII1B*v2S5|iMF5O7ldM%i3ZFZKzl9;Ibas?M3-Ect>-TwpaNN7SVtsM zU&C-5K6oV%C|Sm-mqp&=?6%TCQjWy`A=?%NKJ4m$pAo=3%+eJdy7wwWGxFqBq!RwZ zD9q)BRr>2j0jfkQl98}74lM!Wus((ME$8v^(h%UafKdV9x|L%qqLL;T=-nB9n|*z+ z2t$BpI6m%+rqcQ{V9a$|vxdgry{hXh5<9xLu);5T<|*AW9~f)EH;TwqYru-Qu- zq%i|hU_5+**l@c0jxj(gNd#<@)a+{=I+J8jKo1NAtp)s!G3(1B=6JB?@Ii4iZ|U>j^~Oeh zEH3kzp_T)$`gk@!8C(D>29&I{N}!RO91ej^oZ8m;Ww{eZ07HS&m6ZB@L&@vy{b}+X z->ktwFzF*9fCWGWgTW~9EJ)zEm6U1&weIGxDKQ{KKp>>gCVqj%msZJ5DT^|gMTQ6I zzrCiJ%8DHcSGo5SBR8lQ^Y_)kKzA90=_;?&@uGklJW{^Xuu#X@blL#cbFPLBOwRy| zCCt?o@f8KuKeqIWy;~nXKfv3yT>fI@k0WCBRhkQbAd9YV)A{@|ob{LG1NgwMSD(cF zPJCb@g1(dvoxPg01Nmkv*1`3iGMh)FQkPf)N zCMjI7sTd*xA|p^Hia5(nF%zqI$z~N~d6_$@9Rq3!w&Y7FYuq{?elibo$b$luZQSBN zh7DDY7%jTaPpQY!MZBmPD+{5s{Nm5RYVgOS1|7S~*whCIW$n&eIe8LczdsX+Tpmd) z*ZXV;#e@Nk2b63YPXPPM?XrB@<@HukMzco8%rF40fY6|#UiN&I-dQI;aA_RCc8YMq z?9H3q$*cN>y!iMnbhb`p-v}PAoof!|?6-TN*Lv?J9mh*ju*6_S=AUhK;|+v-7+;1u z3V!}K>E^WQY;O~fD7hrKKnBcyU}kSrWMm^5uEAAyqwJvjlYP+b+I)_4ee&)dO>pUV zIN-htn=J@kj-RGow_7z|HpPuKuT`PhrPNk(pxXQh+jH3p{|rwm1Gkb^?<(X&p_B?u z|8sS%d#>sScAbL*Z1aWkf{q<-FDHYP^HWExs4n(f3e2`U8DKNq_@GQ$ASOhWU$`7| z@Fe^R%)X0#%;0VM)AJnVbAccSzuQt)(#8j?2XsVLHlH=>e?y;j-@Yet_e&DCx`?BRSj+}}7&zX)9s;1h7K$gK zFxMR%L=KpkJ)mY5(*2*T4s^1jn)>ehXxn1V8?c0 zXH#3&*5*m{`a5&7SqFl{24|bP(1c5HgwnqBBmhkF|KuFY1mL&Yl6p{yd?gy$g8Mq$cqci z4sHiII#DY`XB69g*!r9K9c(wb5oP-rh@cxcZ?z9x=>w>#CF4d$z>ZPJ&=sbFj;l^f zOr025g8Xt0GaCZ{lZforc*9g)(2nN@cWYKvjB4HP56ncaCx-VYW&o823Y4_QLLg8U z!D-Af4ue_|d4!6P+s7!QBLL9TC9Et_I1L7PPYd>H7nUhpT2l?^?;Rn6EEhSLFF3oH z%Mfg1Tip7VNPrmzGDIi=t2dpPqH+JnfzL{+Z{z2Bt0T5FNagI~B5!RAMn8Y+B}dA! z|6W0o+l0o1#xsKT-#wtA=1gB0kKRhBLFW($aex&}JD~v-Q~eTbqu#v8Bw9q$a^b9s z)z$EYD#mEA*uW%MIXQRM6U5QI6aWOEA*+^;p-Y7K&q!cR@~^OT0<;ZAX{FDA21qV> zExa*g`OhzxTrw(BIMvJp$*mGL%dw!tFfwr9HR)o{wf)M@T`J#3*zl%?l1z6UUz)8y%*7^&IfyBf(HiQ>m?!(TrnM!l;BF@ z&8b$ujCvLVKnV!3m=4#3(7x2UX^L`Y-{ZCYi|=*fv0}>Hv1H zP}XO9A89Jiq$$(+J{1h_Z_hxcZli8zQT?ui*mDkpSxB$u66o`E)*snHTri%b7g%qS z9V;&njHaKecg!_(t3j=4`}0900Mtd|THX1ei)|+wHrV-0b9yGXA9mE3#?VByFxS{( z#lz)UKAOGJAHV_xY@;zCQ|Ni$5HKqLZRm)!+)IXc`Wx-P0M*lce)tS@S7-2*U4SDt z@PSyu7u12Vm_7{*S~KK$FBFSa*C$dKFaGm-D`;qPPr1 zC%{-~W0_S6+sDf1t-u2vECY^zF(a8-9EuO?ln{^u2nKNTK{DF8vKad~|jYgylsPWxz}}DeQbV zs2-opFOiNHl(*ypJUrk_cY%CLut8Ko32|TbABnQ5UkH(p?*X5fpS$??1!V1LCum>{ z92_+sN1?;GW{n`=B^`86 zv5-15_j6Xx%a!bP%^n|jeG*>{=Xy#$j9&q9HIxwqpeR@Oi(AA zO$oC^1ZZFLZYkOv<(j%GW?*-sDzsnPotN63iF!1rA}I5q@WorqQam6E2S0AAZ??^{)~Ds{ay( zLb^@Ab`rbh9#^PPWxkQ%QbrulQp*7gv~V^Tw27WXazLy>T}Zk)Qz~y~QeOPlZ2L8` z7Z=RTo!Iyrs1PaF?NKZeeXZgMz~Kfx8D`-ABNnTV6!W|PhXq~}SX_O;oCoY31#oBd$72mL}4f{$gGb|9d73&koSq*uC zvfVVAfkigTHL(?3A;4F^23~%ilmw0djBNr4Tc_=QaQ70JW016rRxn3x^UG4)fLG*X zwEqr9XaIo4K|%$%zlW*fAaG125YTD>(o}1OZAw53`E9=F-1-X{z+|BO!*UK9^|c@+ zc=?XAj$_oMmJf6ctN^!G)+V!-7$`@8Hs`NY5E2Z@i*hhKzaBDHz|5%xA`dWKG}9 zYtUj8>MVM<8uNA~8Ye4%E1RX#(R8O~{RRl|D`{NqgEJ+CdEoNRg_7zUKYb*N8XiW3rM?oaNX2XnwDgC87-b z?Y38m55f%eR26@;Btrvf9AIvxZ_7t&;fGjRUE&roVnDZBb?*=i7Vp`~rzhO6)`7{X zZC_kr)v;*EWnNqRs-7ZYH?SmDT`OG^Evt@tCY(w>H9g{hj8EvY0l`{~ zeu;~=JyM|hsJ$czD*+M;?hByBs6fquF*QqDVpt#woc;GDa>tj`?*RV$EzAcHduec; zkWZTyMk&(1#W)Pp?)-cTQ7)mLC@|pB>!JWa1+0!7a4_2<89^}gy>kw-z%ZB#n0fyr zd))Gh2@X3ziFWC|AfN-_P6&4P;wKnuPp)I|6_i!XFF?m}_oK zWGUFItP|_EP0BjgJ;xQ+fo<*hx)qq1Nz@OKKh2Bh(|`1Yc;BHpl}jMEQxw$k2c&ay z{8uhC4%f$y@oh~cz-7n>+3Z4W^|jObo!mmjD42QybP~1gIMCL6f$5+HBC3VfHmzgm zdsclSzvO>?8xFk5-U=5HnR0-&|o3$gORSUmQ%N(7p`V@c= zf&iPHOCCgV_vZXSlZ85phJn<0eB~My=uBpg{axoJLr!yA9fADud9@-0^H@ElU2izQxejw;~0?4FQ*8^v? z<%#6^co6?0=*l=}<-TnGd&F$znsciQbK2({>3tR6OTeKLd#I?0KYdLmS0ER-IVjm- zuvpME741omo52%6;iCj3ApX#kqf`2Kn&TJ{FasP!PXGAUjcNjk($iy|>2-<0Tq$R)BEg-_#;b4(44^{N;FYJalhPS z_v-fJp*OmR&lJa$!UtunM#F1dS$1Pq4iJ6R*Yl1wjfdMROGoTso#mvlrr2tO$1JO**-`5O} zzi+f5+q7sf2LJiQ<@R(qIYK2$5-26$9&TP=6#~b$Z#|Pi{?8)2ZR1#2pz3P28>W8; zT}?f+eTqJd(4%6_B@ntz`eZ8sef*dEzQ~?8c5~3VxNO}SFKJ72o?UIQNef{H7@4p7 z`?j;dZVZXC1IdHAA^><)*_}MIwVn(!bz{-43agZ=W~^Js7lh~-e^s~UZKZJ7b`?xE zcNi@J?D}qG7>z?{TZYrQKIgh&x!&DN~n9fOZLh3%OuY;md>7PmBPX^R^{0VTqkw%r5h70HF%U%Nre^K+2qU+S95 z13DGx+T1`V&PghX2N)EVasF>tfgFsrWrI*cAkf-|3k!Z3dM)foy1mPav6|B4%?H~F zEVS9chz{sNdBaPp)PGli^O(EgxXY)q#Coxhk$t?H5TLg5*}7WoQ-cKulr3#KjPhGcfTyKb8KD`bDv zLc1U&b$?l=H=`FG#heyhuxVpHN@3de!pY308&j!%6%}`ZO!&o(hv~ZIv?J>a{m)dj zM6d;K=T9iK#TxxK7G_9;p?XB+fZDYGk)u+xtfzxZC5^fto>Zro zL~z0MvU4XOr;&*H!^7gq3G_Q4eGKA1B$Bv~PU{UcTPvQK!jJgi$JR_@;Li=yL4sJ- z#muSmHSfdJXF&So0R3ucbgA6EY&^j`qnt43{hFhGRk?Qty_AxXLXEY|k{kGPzSi*D z1ngs4@9`bS2h+L0Mo{^&kN}iuzwbW*oCp!;*|dIgsGi^5I9w6Izv6QyuHoES5$6(a zZf;c|Ne#caBoB57s~g;3c;WS^b*<~Coyl$d+|}=)w2n1BIle%qYGH88S-Y&-Y9B;+ z?Q7h&H}QcBF_`o!Xe z&gpmTHWx{Ce0zEQvjMA3+X?#<{XMS#6{?0Vf>aOloC`8&B>9zRbaFB%CNghvAT*aN zo?u|j?fV<;xn>-Zo-Mdj+?A7<%a^?1<_?st7!8&9>!q|{^W^{aG(2+6r;+}E6OHbO zs>8;1WL^$LG;yWG;qnr@z#V8ZURanO=O}qao8iZnTD#64RNj_~9kAN}HzFjoV)b}{ z*fU-SuU-VmPq4&nlKsEzhHppKmp?992MIScCN)%OpTsAU{`X|rg*WkaD;>C;iaiB} zmUE@=pq|uEk8i62invDSKOiDHm#Bh0FBz)~_D<(ci|-A`MWm;T|K;n^I=r)Ao|w)( zWt~$?kXm?yfnvXW26fnUn$So!&RmV83piHs^feen7*)c5UZCi>VDF=sV zx&p0pCcb;z1n0>*r+f@0;avE~K~%cw(-_BS&)@*wMRC;9t2YSl1F4(VtIV3|E&l$e zQz`L|Hk7vo-!B^TZ}wc?3wD`@xotoJZ`=e;86Em<&%QsnLgNz& znf>L7YlVxmvzPTNo01`WR~4xizZL_Y-MeheSoFU!-%{FDz)*ip6G0vcm^F}+owrZC zOigR`n9y-7^qa?^$bsI2l%HynY5Xhe(#cA;{OW`2elLYxMLsi1$nL)S`t4}t&hNf? z86?(kBKO{h^eHg&D&L#H*wmh@=$qIN&>>qwIp<%a@08?6(~m&L0D?S7yTKm{e%|rd zE=!r*U0~c?M#e|SW+rPkXX%<+Wn(;)o4>2d3;#Z%EOimQ3q09_!?@R2Tqef*iX(*M zp;FVt%kW9&Qv)N+!pp6}RWG!OPA;@T0RgEuLGb}HEXONjv$I$9U)eXX7-V;uJSTd3 z%wshsiQg=|xwo_DD5eCe>5G*k@C(A14dDVKO(Ktj<${g99aH()xtB&s5-Eb!3}m3Dm2E&h35dR$q% z`hPF;sH;PNKooCmXrO3PtDO#|!bXvSY=A*A;7coR-){DH>Jln{wSS8gwC*pbV3y+} zkv7LiGe!JJ3zAK@pODd-`2cu@4NsuOfJ_H$+XAsI0mA13Wwii9ad{f3joXz|)7r*{ z_nYx4XG964qH1Bu_&U7rWnlBGdILlbJ_0lLLM7aqrm>=A~{)v2v-(b`gP8Z0ZdoT!5ibeXl`|V&Sc-H`929Ku(hy z0x){ieFQMP5n%ob!SeQ})t?pP}9QnHhlSR7%&Jx@D*5~KrsLT zl^QZi-eQB`R+Zz4#pRvB?=5jdoBx(Yy=;tfGIow;{&lX`fH+i)Xljj{UyC0x#Fblo z$}5WomE8XF;*@0XUvCVfeamlPMwNfe(lAt6coP-{q5Ha;3u#!Pt3c%v=L$%LBNpeJ zRX5=!6V-pkNgw@yERa7=@K;r9+E0o&6;6P-VQ$GXxMR0?diT0*w`08_4N=O!Vk;og zy1oD0X@C8<%O%9G%fkGFSFCtzK}G&A6Y~G(-w;uF$fo1Q&3hv%)Dva1 z7lHJ%l@GWU*}2ct1drIgFVHp$*5HgLltc{a)czdHSiHD(FPm*cXtx!hp%jp6inRTX z+P+<4caCuGQtgG%-mb)>-!a_Czmz-Lv6{Burh%O17nbmPyWu%<_hC0KWL3XwsCHNL z^&;EbuT9#?nf?$#52qjQAnVQCMCLYo;_xWAgnHP@u!a%Ln*#4iupOzA^#$HYD6rX( z)?OU`nrOQ*-+`ABQisrAaD@OiUH89$oEuRgPLbwDBU9fIPPn5=5Cv8X?D=NHU4N!R z+fX*g+w{z)GJ6oOm`uj+!vE}q+Z{t2>s7u=V%EYi$eU92uP%&EPatg_XI%(6hqOIU za)!&K?q|JjE6z6e4Q^W3I;k>LAI-%jC*tM~HBen+pV|m2+Ie4EDg5X8c2M=PCPQGO zy>Q>Z`!>q%w~zP`;&wcUPg$J@k<{w&_W>Pd4(xkJ@X-gt+qZ}7JfSuf)9x8%{^c{8 zQiw)^!z1kzHJJg9PPRO#hTVGsOsGN<&%w9Y_Rs%{F|lMQNI1{t{d80BYDPk~u{3P8iMQ z<(@wTd%^w*vHFh`ceNeUaPEo4wr>ZwfcS!k)e^*r!zGqu_=e~O-z=-1SLcKgWA-=W zvyJD80kRM|A%#gS$o}EPMRFqVLzp&|b@J%w%RV=aYk#8&qlX0Mw%&y+@vnDAjfH;0 zPeX@wLsOSSo&_*ou3p9$LgJcC8r>#g>PJKk5%h!;qL!>co7 zO-b{8!GdxxT*}1_WKEHxv6``6G@lLKRIknT@rAK8Wg~bJw<8t2mFL+pIC$QYc<|>E z(G;T&LG{HBKKE_iU}UcyoKx(L{rCWeTs2ww`&MSwouSML0m2)VUBR#ZlHe%%AoW^MMA)pp5=E_K*dOTXnd*%6ya+ZhUoOh#;n z>n1Y~o`RR1k=`yGv<+5c*IIB7u^u9#4YwY;*G9rI6}Si{u9Qr(yTivX%6^j>t~$>A z{&R^_Nv|MqDC=)wzru)D`yVHrbb>3a1Wj4Zfs)hVsvpXTRjWt->O6flDo^iJ6X~3N z@CK8Dla((z3^v)#@Ao7J-+#xCdFWQ)ElLgC$>r^_~=motfravJcLahME-}78!fiWzfz|JAz*jKn-XAzG9J? z!Jeac$9_wh86IjLjfa?sxUr?{nEqYIIyF$(|KZ1c>!RlidlY zdxAf-b09mfkzuC3L^!F@UwEJSp}69E3-mbrNOFbJ5hHJFHlcvcI3SvBE7^Q`4*Xhi z(rP9WnNdxhuWfKly$3e4{0F4Ue(&qLD5R7_1oecYA~o!t@i5f{8;$AM-u+_4{v(}M z(YZ9m5G?50Oc?vnohiG~;B>GL*``Q|Zxp=@whJgr64IVAHh&v5G!lxdY$JvYK1JJI zpGEnMFI1b@&V!Or^Mw;@)54|9_KMSs>)H3gTgh9urWH9vKXgcpHCO{_7Qoc#?32| zKj0(JldU=l&%{CpCN<0Bk@r1L6qpu2EdN2WdqrAa0e2teyt-p_W*zIf{;`@#FXJIoJmF{Mt_^MPOw+eGpXg;y+qUzZ(!+lb zZV8bVDJ)yrZ-)Ia?;Lya`{eVb7ImKPB!WXq1U{>64SR^jH&)!23>(7Y{$|bciw-Q^kP1}B+e?9sX@@rx3 z-g>LK;kS_}+k`r0T}#D4vHJ1pvwb6rZ!fgfuivjwl(d$Io2yq{;uWK{&7tfge!?8o z%=g!)I)8R|m*9|ek;n)yd2OJM-L1${gEE)#_Q|2cvV7?6vzQlA$nqkbC1(zlAy?5?`4gFoi(u^&^~6%%dt%(>g~9^2iI6j#ll`Ae-mME{uztTgzvkdR>a$LSqp+p83;?t@b|59xplt8Cp_{?k{CvFq>t_Svvd4Jmt(p1hdGeu=Ud$<8vGjOhhqa3jNZkDCoE4isLH?v>8hn8fiF~(>J?UhabDql=i!y#W4^52sHBQX?9C&3 zZD1VhQ@_zfV(zHAtxN4n{Z=dTz7U7)l?ROwSFwM`)$3bjSH0szQK84-Hr-T|=E8L5 z7Pzc~guVz$DpQxZct#Oq0T&KJ!S6Z<*Q8JPfqkiYos$OH$G`eMSHClPnFnp&=I~s` zkGx9Qt2TJ|tG=Tp{{YS0YF6j$*jq}&bf(zDS1??CpQcW7M&$Vwft5z>9}}j$8_VYp z&5`gIWmR7glx25h8=jU)2&<0I8v-Wc0@Q0yS7u75qHNVs99-HCoJZdg3jETg-|t-| zPEM2KYkRSMn9y3Jrlye+v!@QlKd-59p&MAJNS$sTF@NJo{vacKc>dPB>M?ILK=*v{ zt^F7Ey|C|Pv$hi-@96ZQ?$~DZRlS+1dB=@v8p#hFd=3dovEjl#QIMDV2XGWLWPw zEAe4Me0^uQ&}ONKc=v<3HpexJ&C-}pf5jIZ(>TUjni!LVk#}XP5#uAhz&%!)_|8OJ zs2fp9OGU{(x00Z8_bbxeF)HB)b{@$ho6poMcH4{@X7|!46FbikAu@*CvhD|*A%3RH zgNDBGMrP>cK)MUXr60K*eA)#`#tQjA%0-7K!`)>ub*A2O=wb1-d-QD;|INp~MuPF5 zoW=eEN0NM0Ovn(`Rnje#AuK|q?m4z4^@C4ra^stuF$QG$GH{S6H5(k$9~#_dkoE~58UBx=uN#~~n-oXdnu#D|>BhCkKT%&dXoS$tKdX*Y@Ro&gmP|R!Yw7%? znkQcnn9@Ev_H8^?T=;Pxl^e{Ny%YM}nbm9HpP%7Ob(MUAWcGo1O-og3{m~V*0Td%= zxwz_zKTNB^@MfFf(xmkMrS#Xm3=wE3Nt14jVf(!*Be>jq$G24m${UvRn@53mB0UdC z&eYHKiZ;;{xCRaGoTQ*<6KxipN5cQ~^cWO$&GX6n48B~{85XHkefb++dm=kpN&NgU z{vvF<&FJ>K9q+C4YOn5dzq|!Vz-)v&IwYmA>G@$Iri}Lz{#Vn4BWs@CK~p=1#GIBL zq(3Z%kCd1oMWLYb=GK(JRPkG>C_8zKShL=(*@tTbHk-OYS@NWov5V^O-zT?-S8vR2 z#am<&S?@m9$xMd%E=VKo2Mq5-B6NhlZJ+1b%iIYkZ%90tCaMmOohZ^@g6NrY)^P2Sm?tn7=ME?cj&NKwElmze&uU$EC8nG>ur)0ULJc^lWvTiUS&w2!?kc}K z_LBu1rCdb4oq(o%Z=qm?u>Cct7h+keOHUdpx`~y#dUMD6WZF6Fy-MOLmiq`E3>EZSPb*= z&vtm9a1D1GN%}S&wEv%-4Y_5#;nBx;XO|>IcY*XTN=a^v_1TubJ8SQ$-IHd_QR{xL z`nT~g*Oqw&wVJOLD;&+fg?Ut*OV`^{YInc#D%x6nF+CFfSE+~nS9eB|}yeeZ%#TFw9P({9q2 z>YEMgUNEe_!J2zjd-XNDN8-QE)+PF#Vv{>-Iz{L7v4WSM|I1GWX0_c*{I_mU_m0S$ z@xP`1*`(F^75wRz3!&_3+tvYv97%r8E>{Xn_EIKj6hFmiLZ@1%VbY^BzSPfkzmm%9H^;?i~R^8*jut}HYH|vcVm9{1l$^|*%&bgrT+N)>sfAG}1?`PY_}`05{HqG^O^Djm z(b1Nlg~i3ih1rFJ*~b1O3mYFF9}6ox3p+a#c!J5n&Dv4lmC4$H=CO)@>JT?}Ftj(b zbu_cFrhKeZ-@wMnQHYuv+^76k<&QJ)KWevj_?HoYIanU=u&^<+vi!%#9nDPslgA(L z{QKjNh4~fCT#YT&#LcXXtsOuYgs6GgIsR7m?|0Sz!(C1e&cAmbYvPx((*J0zYG&kU z`tQa5{l;5kvyY}=B>z*9clKt+zy^<_6{2RNW_h$E%fCzsW`^Z|OwFTx{@*|TyOIBr zjsM$R|81`Sk_G-t5C6A!{kOUPOBVPqJ^bI^_5Uw(q5M0zGqwhZ&ILfXw^oh#0QSK< zyp~cz28SoIQ3wP=36TeJC380n_sW%z7O4QcRumI3<>*|Nxx3Jt|IOC$77UR?oYOq z!?u*S&)TzG9(t5N2}3t${fvF}-a{8(pJkl<=AnaN|L=03KgUsl!5*13!Lnh1sMsYT z`quYc*7=j4Y(hR4z5k#Dg^5A_@;Xru1`;Yu9WRyhr%bcAw0s!fr}%BmG>qffR|%}l z7+d-|*tRszI{xhnI8jN@7zlA}_cRQ=yR4XaC##&eV^nz<%oG}>CRz`ll>QV$;y^Wy z6ekZu3_bPhPC{(z0-R9sx5`CY?~`RqCvu$W<6nI4&pB4AW-wyiG5#!VWS`8L5}#NA zZ}5HUYy3idp-KvjXwj%>$%hL~`mAYRyquLPn_^02+o`nXpkOimI#O$>T_Oh_@|(00 zYjsuCL=kFas>Cdz=irLi_fq-9j*=*Gnm?aK@Q&R*-6@IBZuqOQsWLw^uE`fD5yR_6 zBV+5Ts;X8Kw(WM^xV|mJ?r79*!p3Hk#l=RaMr6bL7=FPQQjM`mk9S;b%4vSEgTrpj zu1={OHxrXkdb0bcYBbwN|-P_gkLJSM(jlFl}yF+-j_|n zj;^=JmvE*oZrEFBKgV~D8%PhzYv&0}tt{hR1zT(zN8?uP+>E_#^4JNHcj622JUGL0 zZRxd0A|+BgeYGRi!d)sao6@;t#nDE^#q)SrqmfvN+ulMD?CYDJ;Y8v|Q7UGezZZzu zsPcWzC={z-Ne^1yK}+Rh5*mfC9ES&%Q>qTD=0)b+lLx)2YHTtstoBGXPrdss7p36f zB~w*0opV&zlw!;KwNRngqM_lF*y}`FLQw?Y4oH-L$?QLK`+61TE2dtPEHeomOs+)M zFyU{reRa19TW`ClOw&DN?fYKRYNjESX2l+25|h{MtSKZKBY&pCu2&;V4e`0sy9*`0 zw@jq(TwHUpCs%Qk?(h?ail0MxA$kG^Ota#TV{?>Ogh_g$_x&}ME-6Acp{)C1QbAUL zAzidI37@ri1Jfc()^uA+=BI{2i7_%8tf+WZ0n9x?JVEa|6?`Hv&$wJj2w`rhDLqkgp`6#-n9L*ro9$nv2&y_sn(PHh z)vrH_(-TTe*WCT=<`Jnmob^_i7=K=jehG)^?^PNaqMs5HL|Ff#(5!!3)~o#9_hv#lQM^r#5%F@f1AZ{pR(f**P7zBKdEZC+HNE6@T}C ztIRAug?Lx5cxM;J7o$-VvpSzpX0}r{EDtm6%_>Iu+*xq}=ibn9hB5h-`g2!ult|ej z8)KIH@aM!uEe*%4KYp!Fz|UA%Eq)U|5t@H2ppq12QCz*6Ra?6fhn^vpHLAA~Q9+-q z6!CB|8q@L1xqjNwk^z*ts6ZbxrwGE_lHgAX0=zuqb`AXZuaTN^~VHy>2TLvkBTILD_HU7E1vxSW+J3dGtdL?vnvOp2at z{Blexp*1qm)T_y*5$gEFnHA;dfn`6HX3IBsRM!mtnp&F%4vb)^770M-HqWSlsfz^} z!CEh0SC9E%Dt6zil~j^jR{d|oa$u0Xpj&X?wy(M%>R;&q4-S(!4exNWYRZ8Vg#u+( z&C!@bmG;l+h$TAck@9hMd46V~iN66hM4Z}oY8DG6XxY9F`v_^t&TZu2nD#ul<3K#@ zJxta#q>_tlLpJVm>RCEGQA~0gv>J{BxNaIo6QvL~x!+|EG$F{aAS`$wbSPD$xMwkA-$L3f<$`5t1C?lHJ~iVRqktgR*$`m@hxa1Pdgx}y z`*X4tpCKI~d1CD%6f9ISauEn6X;|O_Z}5C_9eLf*YVgo5oUb^qd^)Y$%r8q+BZIX3 zvBFLsScYnp$0<-Q`o^(^L&tS@W=#7y*VM$HB1}47JbY#&r(C_bLl^@6N}+L@hqV;^ z3Mbk)5bi57yve`v{Zy7sQ*P^u;NV9#u3w9PvqYYsL|G8{0YNCa{K%(ch>g zJ|-9TJ_b3tK9E}l8GD2;H&P;! z=Az_%zD+jadqnGvHZbK+d1TMl6^5CA^4I6&sWJeUfEzYX%x`0Y#b=M~Km2hT6qV*V z{7H9cy<3yUCAo7nGOMZB;6|W@;bOoiq^zveq3?1`Ns~z>)eR!itEGDo9t84Hs(e23 zdFqImFnz_ym<-nS-n=N^Fv)cFq~gDWxxL?zRJ`xfM}7^@atk#E>6MVr8J@g;RcZ^J ze~n<1Ho@uef%_&~a>WK3KR@LEB}3ZHX8+78p}aoN?2 z_t#=~36}Fz?@?+OE&@7l1%&TmecV>Dv+3~-k}Ytk?)yB_#h`Vsi!ap|=>*;_oRC`A z-Cc{%cQ&_Pi_JaEl{Ka*LE0W|kz1Xyw6TZ>fjC=V!PeaV(i#01+_Lk>YKN=6eZsNv zu1lFmE0$2F#!BSJaq(1@wTwa1(5~2r5;D}%q=0xIcLa<^To5iFeVahrPMJW(nAAnuIlDF#~5=538D0D*YHsc*hbOl@Jv9n?HCNj5OH(Gp7k>`|6Eu1quc6Z0o zjluji_YM|i4G@I#qIM+=C(uc zQ+n^CU3`EYE^Y5rUg~gXAdz6BtM75hiH}7y9eRR5H&;@|iDmHuBiP_BVzm~yJ8HZ} zAP9J7T%o2@Vja)=nM??|K8);TRP}?oYEM$ zp6h)^GR$=^1Zb*{1Dtiki2&$o-eWVNxX~an6aGg0VkH2H9ti(obyKkvHZ3?6V%lpk zz=^F0)?-RgWBMh_ocDgS>ng>r-8^O)JY)@TMnSN&T+_no$MeaTZH6wwk-)g3Hp5xv z+21BP!ECTtUGMr1#AYfj{Q<7>`1^2QjXgBiiZfK~g$HrJGcCV&rBO0)iFw2iVAV$* z-mT0+d_nBFhvuouH?*uk1OE>F(E6T6<$ilySTnEMoy%A4!KPVp(&9G4?E|af0-lnx z(yA)sK@a%)ooA=qbz0E#^jw8=oZGPbwqSUR^FIvb{WwFf5&PUpqQKE1!Gd&^0*eke z=C01d=Q?|k;^yzyvO%=rbm!9XGU0m4l6p}+FtUy5cmqe|kXtZ&2{0xIDF~Dk^I*fm z2la%x`P=L*)V}5HxDgKH5OP%1W8F0onR98m>I|tkpdJ!B#eo1Mv($u2x4fSBkY?H( zr5nH6>sQ$M;}lUS`v;59Vark&-_)5|@|3wqG!ij3x|wA%E%i=-O5UqBLe%dyf{fN4 zOTm|mbgIkk1P9%F;X@$VcFyw%6QUNiQ08j$K5-F(>V{q=YI$I%-*m`a&g&lD&KN&I z>Tunv=^0suiqR&%wi#ZXxM{tGS?(kmo~y@!G}PnTfn% z->$gshz?}O@|6jTMg;Aw>EVw)2d)&B&5FT|=PtTN!7Oeg$#vk zZWFHLmg1b*Qfdg|oXFxlq#9hs+*>7yi(Tz33^=N{ydStrX8q+XYL`ZQ?(a!vxicnt z_8Lw8g&s{sr+x>7Y@vPnU90Owk?8ZWuY6AU+>Vy|8mG|>3p-7sR@Vg+K#GdfnQ+#M z=-s!t*ds?j%gRGcSqZu41qGQ|jO@30CfsMYF)5cY9UBKZy}b4|UWvnXC74lXj^>xi zbf!ratW=kThHUNWWtdSv9ssiaryq}av0aij?)xZ(-kafXODET#3#IVYaR26LvDYS1 z#us-YS09wHl61HBaNuDWA8yU3QmoWFW+*fLK3AZGY+0VvxZE=0p{2Fnb!ex=tV)<( zAw6^C{$e%DGWQTCQg6<a)2{8IDfZus5D$_quTOhQI%z5ymty#R|C$HqsoX&p&*8x0Awc z6K3`2%20W6Y|(M_XiUwb=RPg?Hp8d9!!Ca>L<1SMI_-Y&BTN%QTRZVzGZ0&&Kz_wZ z=Y;3H@1r&@d>^Zsw4coG;cx$IO)>e6A0AA z{$QbfT7Wwu_Z@;bme+(la-qTJCXh`yK=PWX->`90i(8sWq;b^JH-bPS%5LD8&XzZ9 zQoXJY*0!2qhafA@ICs{$TJy!_vWBjlF`34`Rf=uC3{~7f)F=P%`yo!VQy5DShz@tq zBRvSYR)GD6T)|BJ949nejUiqh_S<;2nO65{m3%l?Gh<_0N~{pIrrE&2nEH=jGyi9J zUDNRCx{%h!K`gv=`~d4~$v%5+mNp4Wpb|=8Ce<9iFdlxUG9(H%A7YCoDr6D!x4{<1$k@~41|i|vQ5rJ~C)9nb{VIhRYRae@+B4Z6Q_-l zregZ?_S@D!x`k+g^fr(_u&q+MS>gupB*NJ!6ZPyePNp(L%(Hu@JjR!fQaM;g_UgD0c(`RfPU6TgDxN{6 zg<+b@85sDkO^nWKx>>g*tp8nPz5kEKF_QdTA0G|H9>TX`$Oysr<%A&GB|Y`6k2a%{ zH{@+UmJh~&MBzpp!YV8dojOBcpmuT;t7Iz<|HRQCgSC)$9&NO4Xn874&>AU3BUCAt zD(0;tHkAQ9Bvc9KJwv>8ffBWxVvb{ebwNe8rL+U%hYw?gN;!bO`Lox*d@eMauTb;> z!7lB+jEZ}TbXJ)2V#WJpEvib0Xo@0IyoOUh32vms9X8d9EIBQ*e|AnEm5eNnIDccq zC+CmhjL-4VWMC-qc8<9xIA1_036Y(6Q`5{}g}fE}u8>zdpNHQ7AGkPKfb~5!gfgyg2jNu+l0z+F)&7cMqw7iND$&o-5D>@QkzZg1*w(maz_l^Ny#29eZ{~*; znnL|r9EdT1dHyLIx8Qd|BtxV1>Wz(O9wl=8Bb%yjTt)ca$xI`1It2p)DEOoLqzwV= z4B|VHTcZ%%vuSQ+c~;xl^jb4~;Ly^puQ@%6`D2m5_~J-1@e$J4>%`Br@zSZ(BOx5b zq5LpP;eL%8OdaUe@w@L0)W>2(XZp+Q_sF%&%9smjLApC$oWYn3(Q*1D(SCL&N;Qs zVg%UK{z|kZ=)phZ4eCk>4lO!8cr0Rp2?%qMav^QJkmoN7zZBRfJ4(|N4#xGZO4evRJrc7EbmnKC5jG)h%($kxV zTeoPU1%&0%Kf1v%OnHOM@O{k~s?Gg0N}2#G$j}?)M38*PkPtRqKOh2sB%y}0*GxYI?19*Z<*v2 z`+&t=3RT`3N=~;8*Sk;KhKhnMpWBlM-?o)bdEgn(o+F)mwzOuoG`!$?Q!q6%a^Qj9 zCcq1Q4*%EDu8TPcnYt-td^2+B@w@qChE(MmZNoh0+_J7#MXtL8{q^@2)uT!dZ7%la ze$e4y1n)K-MA1Q-qFBnx9&Lv<`fDDsU4p{+5y#SE`T$iluse> zV$ySM`dY^f$l#Z9K9Wl=OPEBUWy0JcYAb8SLDcraC1sKNO^LH#funVkro*`KFsTz> zD?Zy$9|YL@!8{t~#?&U2_8E;L{^iZFGwkgWsJQ9-Z;`FUIzoBPUJAeNk?aEj|H z=ZW|hijDKSAMrjybsD6?&zVYkKO^jt*nAjhWhnx1wcUaMJbR373W3|{fCXb53Wa$I z^eX~U0}x{Unv~C)regY1FeqUTF+x8PHN9yskyytvfOL_oA+Qt8lUbOFI3Di=^iar0 z3xTliwJf9eY~AUq{&pWE)Be;_{gt}VMg#a|FLEj-qGo8$fGQga#92_nw>!lI2%n$h zGoAoL0VV{r%HCdXASVAjpxy-N=!v8Q$xKQb5J2Am*)_#{FJEBck-!4|(;n4|o?Q3S z?TQv&)L`(m`L)Kb5zzU-oRiz@UyD;U7Ss^QRsROmSQIF0U|^)qbU>|T|G@-;Fzd(M zo`aHYAi@ljRDr)h=RMjY`UG|_z=Mw)7!!M+vZkK|H#1e(ko~p-nlOl)pwOet4b(;; z`IfJ1#(dDbnfadlXuhpEDiKv4!3FAhf>&5ewVNvz`Y-pS?2@>%=3sU$C< zKagMP@q#)Qg5-T5z%c>ymZ+q%f&hB+>|gLQy?3@8%+ou7FA#1`l9&N$>z*JV$}1M(>lmmoOb z1)0eQSFaftlTEpzGxX3LQ~u&qCbrW0@70>kZ{Vq*YFHFK9S*@gm>H8u$+C!*<72xz zS)*;H@&O>zmB^!~y+Q(VafvIe7;yYfRw4M#MmUg*t$qk20gG@Hqn_vK|I74P17>nb=Pa?3V16BR4=F& z-21&JLEYkFsb7>uA5Z?DNw1*bzx3GuyYm0K_ivB>UlrW{tbNY4UUh_k0H9?B_e3kO zqJc^Nmn~cBzjHyS^j)Z_ks***Tf*AMoc_GOCv$jqJ{PK>yEZN42S}Ir%hCo+o5Mm( zorv?D%ptzh=HQB0p#}oja7%mlx9@nkm>n1%B%WkxM|Fa>d^3-_eR(Zls=htzfdk7D z^n^h@>u>TzAdt5*qtwbdpPms1jvM$Zg^1>T{BS@FSX|LiIcU(Mg(LAw}v$ z{<4&zjsSq++j!CU5i{5FM`YV^ssLQ~5i-bR!C|}U>yruA0|d?lRLWJ zy_Hk``E8VvaUR*L5XUSGZwk$f{&TM4a|~M(Wq+=*%zAo zI&oM3V%zS#k6;%KehD*pIaYir3fXiArEM2?cxFfRgE6jPJ66g(xvK!2>BaT%%!)rS zIhYESJSoA!UxSakHp$!^V71X%D$@N;GQyESyyhIA8Bv;z=$vXFG}Vg!Eu zR!Wg_z~FTa8~U`Fd&FC18jd#s5n8yvrr#$iF{p77_vY+{c^Aqu;DaA~_o%mrKItm0 z6eTz?CCgaEuhUG;4#Yn=@trpanL2w6&uj!KQAwKAJhvO@ucZdD2rZQeL`}x@zboQh zJn%7XWtu_;a|N(72Q+yFpJJGOYnC7EvgJhRy*%0aO&S3*qYj@xk_3{LJbhZ7BKeEb zK(R<4#&-;NZ`YRa>@_mi2v(GUfWikaNaI)QvFfVt-zyMIEcijh>m`ec{Kv??y>qe$~{YmnwTc)>+KvJT>Gg@+LkSqLa0 zsWrSv5K4NV6r3|%pOnQxCE2t4bd)FS&!XQu0d3dAUN%xI%XT2{vK{_FWd(rnuhJB> z-A<4z#LTjwdxFhh9nosYKqpYX>Z}OM|0}7Tk`UJEcCK9PzLEP zo{N002L1Ijf6r#}bOq#ic{;F=$TLJJKCrVRRz+M^Cn6Cqz?>2VA+2<~ECxz^Z>zy) zbWi&#sH+l0fJxeB=&-qxqL|h4=Qu!H+jM`Aw1(wbqgEa1ymzkFvEzfg78e5}y)?m-4+27I;|I$?s1shsHA6dRRk0*Ne{|$KnVI`v~jQ zT^e{cdGcAO^J4Q@Px%^JUzq7m4j^H!C}_^Ari;)c6B@HG2kh)vcQjICH+k(5nwS_v z$Js>@aGsO*GITc#l#0*S*w1;bQh;2o&IW>L$M83sWbds!pKe?Nt&rrPQ!3r*$;2<; zstTU?EY@O152m3)rp#(r58esCQow>hE`Fy3b|D~m4L%$|S6%C>@F3s5^c`)@)hAh3 zuhjowFBYp7UY%E1aT-9jOCy+X;6B8{Scg}PsMlmx( zntN|(*Kkg@W9mZ}98!5}HNH2}K6Gy+Hx)K-D^WLqNnJFhS zGSqESNo8gH*uJ>o*1k9A{g>M6<@6HGn>p^t*M77an3&eHL6Vz;UE<-!z56G--+D$u z4XGa-vP;8l_#S6|;M>6lcBuHXaCWPX!k>}S#+mrn0V%|?oT<1>Hr<7bOS<605jd!T#ARVpN4g>O&>;c%pY2+$!#NMmX$8xP&bDJ54iNu*PQ#Nel` z6ghHoNhdQF^SPl&OGfE>SR_S770Z`U>yFj-ro5eV=e-}Z9!wr7gmX6z`}vBGzBFgI z;HIXn9$FM0PL(L_QF2l+-&m1TiXokhChd0EkzXd#U0`O@qpew^r|<1u>wP#Vx|BW3 z-ET_z=jQHnvwC|i$v-S=?y%Ont3OBP-PI5KfbpQhLUA>fexo{i78}vT@Hm+qH8$s$ z2PtfZqPrvXcu8q#kp#>wmtXpMp3}gAwlcUlm(IdainSW5COL=VHoUw7HOn-s(tU~3ebq}h?#qoU5Uw(F~D4=H|YV zdQJ@c(qHfx>iF1>JPYd4V6dVMIaEg7p`t%0|7=IRVJ5Inltg2nTt(vA*0l->A5GW?(LX_>=%|X>fbgZaRf4 zDKRxw?Oe=ZT1$|rEit=m0cFIqtc(n26ZD;Jrz-IwU)Nl+@@Xc zx**`G)agRW~M@j zqmf37RbKuY=veXhk8^g?{}+>W)aO@Mz$(wHCjzUR-pc&v{e%h_Oj2x&U&Z!zl3s=0 zmyxO6_cr2QWHP~N@?WZ}9v$jNLlbw|%|zuX2wh2f+!=o={*o8Y+uq>OZhB+(UscmM zUOpf-Dm7@)TbQ*2z;JuA^lHpiuy=A-(TG*g(w$#|7>%BjFz9#A#nzbP8*1JS=d8kJ zGSCJn^Y~?9SMF--r?CJR%;AW9yk?+N1%F|K4(p8X(5&~8rs23uqRA$ zeC7@oPVYMzF+ZJX|0>S4?X%ty^XU|!Ph3}?`4{dZ!y|s~Wd!5<`}qVEJz@^GYvxsb zpMF+xg2Co_WmFmij%|7wlVL$p{H2c-II7|3=-;gAs`||TItpXVT@cg)RzPM|!3*p< zfK=?OG>puu%gs4^N47ubjI`w|QA_3pyeTnx*sJ_Dei+Pw<;#fWJAWwoK~l2zo7O56 z06H4TR!Nlj^eMG&nPiL35bpDTRHPYvs`ze=tebkruPmezz&{bo2%HSmfJe$| z9W!}18_Sq86l*ZOEGPhpp*%bHXZHJ7Sqg21Bcde=ZLujRS-cSqC!by)!3ThDE~6kW zKS3N>J88#9Zo@?Z4klK#hzT+z0RA8xKCLEJCq{p1&Q(l|j&kb$t8XwW?bNh-o_XX^ zK>~{JQ&YS!@$mD|F^^_jMA)OC*X1{!-~?c-&Xx(4I#|EX6HwEuSH8l}5!`cdhy!6I z;!*5F;Z>m5{puP_MzLyTzAqzbNBqqdi(+EOQf5vtbs%jioOhG>EW4pKe92jrxUXJ` zsy>UoM3PXYgy{03Q4!0boxleieR@5gfL(?JM*GE{@6lf6Beb}uFJ7?HrIjCj`t+BE z1jmr3l@8g?Qh;)*lvt=GX=%vLPE>fI&+ZN8UIY!hw(5#nHsgz#4k^qe#zl(YLlc%- zY8q!b3koXWWEldLv-}iu{1tOvm-428b6IMss^Q)E{#OrqFTwZ${|<1j~Y#j|Kh_o5eppGKd(-C zA*ky5h&}nYuWSmqe>c;x|CbjYUk(0W-|65yEtlJmw6#e_|sqUNR$pNdplpuh~ zCxo5lA!>UDc~~nwNW^dBg8Ty8k7{s~wulp1_>(8eb?M)mhKeBx5LXJ{v|ubYSs$?l zA#YNaz4>re=0-Fy-5|Wp-SyESqf`WazcKm!bHyK4u*MLH69vIKjr}>>VMCVWW14@* ztTt?w?4hB7wYRfVU0Pwn0cLVOXuoO}I$NT-j2gU*8O#ROTA7wJ5o8JEqzaH1K{Kep zRY}txi~CXsz(9`lt(eG}nF2*fB^0o1-zZ{}Ckc^2j#m8OAOM;3=#=HRg|Qjptj12> z1zXLQ+xOw9jqT{Hdx4}0Xo}BXoHj!jiK@<%NUf+0pHu9p`uVLe36LDx0ib|(4yLIk zWkeTAM#=tq7(18;d2f!-iWrkKlfx7Xeg_u9gQ3@8&K9AKumoOzB!qaPEV)5^WQcWO z^->Ab86me=zHn_EkOUZBTjBvd#G_V7i%Tr45Z<|ZXdM$OVQ#CP4?%Q}G1};6Qkt^> z`D_arVHN(bl}LEdXzfvJd*R+_&~KoG+Q_L%jaD3WnN&une$7d+x&}CV;7r zGh#j4L_|_rj|gGyH|=Ch)DC(Xi8v?<3BSLy(c&=2qt$o`B8KbPFx4Ep8++9KQE+p= z{jdFQAFG9kJObuDj<;n6#hp7H!O%gjNlKZlS8+qZpoZu#3DhD?OxTx2LgXOoCdH*i zD&ymavd-g&KH@CFMprgqzh#R25(C0rqVo9_rqVAk4U?N5p=v&T2MEIm-v~v6c`Xrq zV+{1Db_P&7TlDjVW$4%^%2*d=Aq)@-4p~e#5U7CX92G^2bc1=qWoo7v8`G1PKm;iV z^5_--<$Eem2 z%82!cZEl&p1VxgN4_&^xkn7B~ieT$GI5u=T(#E4?$Vhuvo+#d3kzfL z)vx_t5D^J3g#`J0V@4Q0`OS*5^a6ruJ9Ih#Ln+hrj$00mfJ~}G=S}gAf55oJol(~% z55Y$oS(f`Sx3iBSUNk>hg~m+rQ08jfIP4wy*XRN#|AF7lrLNeRlZ@RF?SLz2eMt;< zl8j!7#iseDNbk<{5cB6xi)Jp}J9+qvIy!=vZMcoj`uBy(040}Aa`r82yohVhROwh4 z@q=w9w_sp|O1tp^Xl5P=yB>9LSaBuf?>1+Z$|FV_Z(!-}-s+^Z5BrFn% zO?#rpX)sSwqqn5Hz6nW+R6|W7X*(xBk$;*o3lo84n_d?+1R1-il*Ko=b3kA5OvSd4 zI~aHi*$mIvJX^6}rNCM3I>N;iHL*+9fSh#c8%vEQeD|>MX?l5rWkKW^{=KpFetZ-X zfg5n0Fk1f59d5iZV|atw$)5-}6?vfjVa4mg`Rqd(hsT(d&$Ow2w3RS+fSM#C^pnxa zlO6hc;e7)k1oOqtD^j>lNo}q=e+{;ozox z?#K=iR;*N`aRrtJ61FbAS+?zp&$Wa*jcwj(Ot?Z4;nWfxi&dACRM0+vu2{`BmQN?9wS$9d0}1FP+Ejcs0?+d&D)MpNE_BaF=hPZ(zxr%*XK*~}IJB+4?2GiMv#Kl#&+JOljdqbdaEsp`_ z4&}n@p3jL|)8FUZ4-_GtYH(R&4EawXoV=|Ri{uobo?3(qostAW)5$Fbu ztkRy>M?~|q1au%>IB8BS2izP%yIthXnjuH`vldHdRjV`ji-!)LhF(ky-mBx*9q*MG z*47COj!%={)*7Yn`dt%49O?xjPM@P0`3<$-j+F$>IPDK9f6b0un9G459^jV`ZC zp$c#5*nV8B#M;~*L6^{;V8Ly*BBZ1J?7JI8UyH7uch7$dtiU_v^t|-Lc)P9zr%53cesjxBJ2t5%ijy`-YBut~-4z^}} z0oY2{Du*PNs0XNXlWc6P>FHQAr!z9{;_Cr450c8(hxfL9A=sA#2lJ3&8-mae+cQ|Y zH9$LIilcky-dvDuQOV>FTG=!|#!_v+Gn=MAR&`m`&nHL?`*`*9odjl&hJ|WZ&l?Se zo;0O93qGvTl&^1`k>rZFKUfv}0G}Kv6s)2c61hTVulzw3*N2FgdnZgmetR~wba7?d z-t?w?eeDcm)QQXWzE4t*HAk!@s+rusopMGa6}(?&H~nK5dg4KG;CyTsg|L&?xunG) ztP8vJ0J)`lVfh-3`8gyv{M8$br6B`)p(z^9bIZOKlD2oTuU(qn)PI!OAX^D;e1oQe z$c8Ker)Tqr9|h8cx+DYA1mi_?(Kzzg_o4=JOo#%jI8znCZO^xnHb^Iaq8-8WFU z)qhXlfYN5pHgzXIM^Fn}*Ak5ze;7Jjc?Q4hEjKr~E!BP|0d?KlX0UT@`bxGUHM`h# zC-l5ve2!s4vY?J)exWm0$;BKCrjvKKe$8!?0b3m~oHLBaWuHJQbiODvH6kD^c z$R;a7i!p?)i+LDkT>LcP;MQBd=K911dd4O*r^9F9bw)w3$hd%=c!vx9w4IXu?IBi; zr#4OR$|ZClV>ruuh+xwT)fc|vKo1(%#}TG~jn?8U0%su_*aqq2Ip&rQDsk zGw6V>(&OJyw0fP=rzeR}01?!|!Tx9~itO@AlA=BTz5Y7~j$C!5g$vy*g{A4P@QU00 zS~glCvgrfhn+m zZmqhjrt{vv)f$XaUD+pU8#JC^wQny9I)|1P7~hOLK|z9MfrmTchF;? zAcN5<)1}bBSb%?{YH)N7ed4d$;@T=?rbb!thFBaG5}u>jPe>fLrl?(2r)d!WUb) zYS&e|cnB&rd!`Aklfg(iGjJ`%h1+G)U9KZ~F){isR>wzs#l3|weXCP*PnSTm?18E! zsubtqA#iK{3{L1gC#wf)eYjbD5vd2#y^6NC+X@sYeYc0t6GcPBA&^@6WPiXd#MNa` zfo5Qx#$T=3x(>o2<3iRGmB97sm3$ZdKSP01240dj<_cdNXN?E8omIkp|tqV82>#AaiJWe|B zY#>2DaXu<23ImTAt@n?qEdaw)qjM49f^4k@6zn4?&{nU_vs#np8k-nMo6urb>F1U< zphVvv$jhcWQKfrB7rlZu?ll62zPK!Sl2vXp@f*%RmtTuiRWWFfWz-w!3$J!ugr*Ab zX0vQ3$liji-~NTD@@UmY*RO}74i!@ndQ0?TlM-{x>ukD&KxA)qBlMyN%K~2_T95xq zd{b}*=~9F3TXWR?&F6$OY>YXBBdL4kD{8hq!cI-CMpqFzw9(2q|FvXxIy0ZAr^w-N z3sX-zUqTlk7ww^)jcQGklAqIG2Ia$f?JrLaZuLrwM^&_$K&Mw@e)P4{8GWmO*(?>}@5Pk?X8yx(4>lyj@Ay0lr5GFpBcj9? zOZKhph)!Bp>~MYL$o36I#1Kl>FqrG*RD#l8S+|^SRmXPrdgA?oK&dH77ZLbk^n4FOD-at=A8Bp`kTi zReh)H!3VuDblSD=X?JS{A>ZCf@h!U&H|RWUqGB~P*dEfnL+9m8my*Ox%<^0$aJ{`! z9qezo@TtHBDnnXtABA75$Q_;MeV>Vz>qzVejlqcNlZvfBMbN>SN$9{0xd2VcXn>#s z=31}>l&4qdE}~4hy$PdJ!|LH>O|ej*n$KL%@J(>wUEp_Do~SinZt@D1-?&~T(RBexTv-RUu`fp?6-beh4CX0UykbiWJg4*SmAayX z<={N!oG2&4nx#AsT`eKIQ!f(^6`|Ry^ak2i_edW%hJxk8eCUM#vW$S)n&mQ&^Jm`W zZylMZm9Jl*gh`z3%-Gd5HDBuvZ;#q8|9rl9d(&RTLOmZFiuxepaY0U?oc3UM`#jN) z2$9flnHm|Hnjvfj-6nbdr8Y;ST7tDzbxF0A_5ONo>wZH? z@x1zacrin=owks1pDd=EO7ju4UM&avkl){=sb_kzPyaPuU8CZhdLorkrqx zaD4S_o(qr{%WuAt?&&yl9|o48CPxvxow2E(GJUvp(ss46(}m3W7Dc&JmDh}b$#5?zc|0PZ}`~A=;vN{V-bFHQhS1S*n8CHIJ&JkITwkfjr{@s14LUmo$$)^_?z5jl9|zF(H^?YB!iZ$ELf zQa*cSu>IQlt36WTH@&i+2WVcN>m3El<^AHO(I(H1u!mc~o7w1BrrVk^Si+4j!A252H z68j1!C&Cw6NSCyfAU@s7Vqb%c7OL)$5uT(qSrevnz$G}bh153F2B7jB9O`Q|`@npy z|Mji&&L%dxXV5t_vS)6VB(x||Bz#n;#HvS-@KBN6(+zGiGJ~gKcI;>EBw20sI;82N zf5aIyJ#}vA%-A;AX|Ja;JFn=l9rwVnuS2=#{^sO#fxB^by_xxI;zvWDMu$qq<~7Wf zhL6kcF8SV*cP%bwi4)8hf%C8zUH$u>m~lK(NiFaZGH>g)W}CFT&7LNH1_tG%S|CH& zRb^V8))m^T*bBD`BC53d&%@Vqs5M< zv|07@KL1`|!H9vau^Tj1zBY%tAUc8686{n+%6_t5ex1OxjZ?xltL}z?DT)~;&5+%9 z*BrKy!z0OO2b34QrNrhp2&Solb^UnWzmp$M(+`CYY82nh91hffMRmRid`o^LT#Bu} zcW{&vu!~qz+kiEOQJ>)x+_+D0yx+W~yR<`;5x6NA2;;iw86wbjhc_7+jZfx(>Z(;e zv_I=mC?MG4gt0uivf9M2`nyj0ZEhnA>}>6NH@O`$KbcW=uG7&nr?2+(Hz!25T)P{OMGg`8 zv>m@$xT5BK*)&>_GSo#NLlFCQCfH9b=*IJQqVe#AP$;W;)BBFKr{6Pp+k^b}sy1q) z!nIxV5()ljrLq-z`-W_3XyxL;V?w{-K6}Kg-A@1EIJwqAAo_0Yyt_LnD3$l5+%PeP z!Nt^-Y%&A|b>WIgYvAd@i>7$l1KazyWI_{^UDs9Qjz(@<-$O# zlZ2b_i?hiZQz1+E*KeJ?b zK6?uUjUKvBmxu$i_H%3c4kpuNhFLn4SDVl#LkO*UPLNC4LQdvpUpUm>uwJx|@)vJ9 zxsaI6-^|AEx9ni-S@9_E%pF#;3^gt${yq?Oo$~q>y}I+=>S4iq599p7sa6*wkM3Fh zLi3^V!K#(TVMkrRu1UkiYQTx>!NXHkb@ADj#QIq%&%uecPB1EM+>Ms$KxF-8sYq?z zVq@3A#dhn#q(ptRNBGUkOUR^Lu<_ReD-pjU7x%y$|J+Ku1zkH`wY|2p#F%FnZ{&Jf zg^L{yWm-IRi>|DF{9;-EkLKPoEXwwa9tA~0LPEME1f;t|K6UJg zmXeb0l4j@{;@so=`=9gST-W)0{KCvQGtd3p&)#dVwf3HwdtU~Q^f_HM*JZCD1|lMz zHv7y)v=j}MZd3uc9~OfNXVqzA%i=mM+r4$35W+@kT4j(LV|T z$L-R!wg@57uGJ!L9fcxm=RMRROBDI{?X^+P_Z^4KQb|oQTe$6Q_;WMfm#Hn~ea>D9 z(c+CVa9K_em1^%LD4&q1V!vzak)N)HJ&8w`Z={GTQcL%daaxG=llV(pmnSoUSBgvkLm?gFm2o_7c{h?Uj7UYQ^X?M zek(=`nIbH1jmXujX`B|!6=avsufFK^5;FHxIQbW3x%n3iE<#thyM~yKE`)=&_0igY zs&R?p^qWT}M+_npYc8BJaYErpOO}CkVS^Aea zK%qa+u4;jelRGQf_OPsEJuHfNAUv8ztxzSnx75O7HQW2r!WE>bHCb3Q@P6XM9DV%a_I@+p-S42Xbj%+wWH{3sG0Zaj?5YmhX_|L$G{p zm_`;;mXh{eap%cwjzWLW{XOj1=}x7vGdl>7&RV120jX7l;NqSB`4uQX0pa>A z-Tcu@^zw$C`4Cftq-OEa@0C0SwVeJdPcAOO23Lk5dm-O7BO5!j`O_`NH1;p7iQ#+; z4QHNV`(|68yoCHaZmrIV(1ZIVvX)y@cYUF!E%NDbv(At8`#wcxtLk8=nqAqIzw*^m zs{i)(*>;t&fGvbR;*0hCvjra1=N?Qe7x*MClLLh+gAkImN0Ok$Cq50%Z@6lsL7rN^nEEzG-&dRMU9+4FzozveBPC#d)}XclH8# z)xNT0^Z;7Owttj9K<16UQPCmYWH@S4W;}U2C9J<-P_Z4=AX;X0@<(ZUvr^Xaom>6 zR!EWgQq`2&@%L}_R+cxiO0k6_FDrM8%%?Oh?czGE#Iftuy{X0V!WXE1=Trv@w4=5Q zefAx<;r{xNG}eK!*d6#?geA3&>YKNR~;I2!|jR4K%1%{bKhoA6%Oa~)_z(x;4<<_<{Bmf@a zx+$1P-*poiBuBN&p6b``t|D8j@7@lqQRU@#Hiuddo+Dh5VKMz+6;Tm*Y#%BM{Y|Jl z$had@^?gK*tr@U7J6knA{_>}hJ8l{Q0g*CZncXiJ{=(FZos~WuPDBKJTz`8?$lGJT zAbqo;^NNwL?ggrb?Z(Ed3 zIcnKI2wSPSC}kNS5)Ln0%jMT~hnzn|wj9wo@z`%>A^9nM_`oWJjs1q6@mtZ4pRP9J zY}AF9hPS>oDK7h&!XcOOub3(vN_mC6SDybK{oo`fl_CsC(WK_-2#-w4Lv3RUlwHSo znwgp2=%{XnUi6@%-8Z#%(6OMHnb?!3O)OM_ z+YRSJ z+hjuRq`f`8XT9}Z92L;8!iP;ZJIo?Q}-F|Y)XDjs3?>8-MYKGa$40v+x?N9W3y6m<3;BWNvR`ucS9^ER{}yxCfcjk zlfsN42#BZSn**)2*7(lu5Li45CZrZ8*wwxf_WUY*STJwrZ7XI$w`Ep?orJUSWvYI8 zk;dKL2E?wTYRnjh(C}efQZ}T;-0Ro?25DP3D$0#BzMtYq@cNR%pQ|S(c?F$jf^Tr) z^fVf1qifIqkqr z@9x%`OsA%=)DK$M=AD|B^mkmZPF}t>Hhl`=EzrBPz8ceaz=U=|E$U7({ zIIEJK-r9&!+e3; ztWqDMCiZQ{qNv&p$Xzu4AFtWrJO&$-!ke}X&k?%uM>-Is2()cf*WlWOrq3|>ck z(6yBSFi;s%_jDL&8?3&4eeRA1EsZ9j*sf?1oGDknA>n7CG}TW^#=*y)*lip)XXoTjdg|&*Ccb7XG#&rNM|dOexMLi>ght!4yqY-remUkW?B7#u{1~}{LtMjXsn{JM6dcOo6*N6tD)cf$^e2p)*usGh67uEob zBsDf;*f!g@FXh_fw4_veYFAKV(}PJQyXE%sg0OFdn;7*4snOa9kGIg}@i)=MHlr^S z?cmUNPCFlh`}e7;MnTRj5v6zUGl7MkYMRbem zxz^46!ZkJHfG!FBetPbeZ(dtp9Hvz|m1~j*Zs<-;A-L>IxP@e`30~0=9ZKfRI^OYu zh(fr5LNnm2dQ2!g3W95omInUp50n=3*^9U15=I?}IDUSa`P(`ZFsZyf7vb(&P+gsB znkMf-fNf6wark3H!^Rj{wqud6+5-yL5&evb#62aI&@!>OG1Uzh7jK3_jGG*!#nUeP1gz6Fnq9CzDEa-|F7p)Ap_j3Z z64Y&qk!DCTgcNTzJo?6_?rQKj$$ko6E-xFX z&)27P_(o3rxR&fa{p+p4cZHq=vxxbSP{Nd`J=_h^98|@BB=4J?&*eHNA(tA zJpb+>ufkRYMSl{F1U$P zHzo)@)cA{2g?!upR#iDOhm2-4o@RP^4`{*m_e30Hl9W`)!xm2{+BXU*()1v{&8^etYNjLb_ljG=VwMQ7Q#5c@Q!c zJNDtf3pmeH*gY^VjyQ;+>QuW*pcZ1ytE8fs{ZZ&?H`NaLa%RG@11JgSL{S;=U4c1O zaqIE4d`1=K{<&eFacJo(xv-b0J~y^kQC}X44*o>Io-i8qTEZ^q&42Z-RA|nkYLW3D z?!^Q!90uIo-AnxrrRAd{D~N*o)@zmfy)kXDd#a4N;_o|YK3qv_NB#`zy)J3g$v8Yb z1yHX}w3q<=_k0QZHRt%u%EmVTd(Nl50va?tR9rmNkaQxtVjjm~3rf0=??;!}-cd_p zCx0DII9KY)-$7A*Cbbd9u)$$a<&vz92@4#WcQ1rHLspi~KDs&a8FkB`00 z@BN%pg%t?h-k6psVT|JBY`~lnN+?Qk_>N79OrA%@&6f&)g4IL)9y?E6TKqW$NQo{N zjiS2FLhmYHR$`_UDKDuM;vH7gPPbj*VIT#!u@m@WcWG0S(?MMt==z4iG*7 zGe9LpK|y<0IsWbRj7q)q^*3!ElvUk7v-5)kf6c7Q)nQ!O$|kR>4%fKFG`QZBY2*(# zmtl=SXcV$|Au0u5j0makryK}ibfkv)7=Q;2Cc8|}l;t@ewQNZ{+6L`+YproF5 z$4udV<&~iF0SR3-p~5t6%aZC3+A~u5`Oj2rNz11YyLuv6CJw20ka3t{vX*+%BGHWN-imQBK{&qRNcp@$GP4n1SQD0X`#qn-a)yL z3Yx|i#7yCrt`@BG4#l7F|6Bb~IJeHzH6qG&7hS)o$+aHUjWo)(;*w7))9P?#YIs`O zhu3q8N*dd@4?#=O116nj&Z`jKFIC^;)YCKV6pE~Ey8dG1E@iYgs9D`YCVJuBYJxsb7njxNsZYH3&7!Z~?RQrpAz*FdD_F!hd=FTaLZ^>< z9>O2+G|MNl++sU!&>7==x?;@t$loCQBL`#S?-sPyK8i1L>ku1B#PF%S?J>3aUDX5j=l8JVB4a{%E!u!G%MmC z+??J_4GptxN#!0KA*qV;6uuTLa50#6#rBjRaK8oZgPl6-L>=;UTg!zzC!CHDVf2f9 zs|LrUPVAp{LL~Wu5Fn$w&||!gnK0KE%?+rhZA0EF!I1vSPnN&GkDpJ#r6(P&@8oO@ z-<&5S6sy(FA0U)GfnAGKjeL%a@%_Ymbmh`tS)8F!*_-r+%~NKy{u`oZ9jC^8`EKxDyT0KWCXiPZbAR4|%i-Z8 zf|k#fVl`|Msc@#{4_%(KpQL)P_`APp=et;#bU39{`KgD`pUuv}s|`EizVJtzo)22$ zIx?IfcHqo%pLs`+W(p^$FYNxz%D@^mAQ!#ep?FH(`XfV=W%<3FasmW3Dhi3f9qM4} zKhOLMl#Y*&;~2M5a<9SOZJm75$gerJXDBXb{jM0gYG8+g<+#?9|859{7Is1?lN#6n zC-5G8UxTAr+5V_roF9{W@5qI!_wnfD?nfvlT%Q8z;N|EUfnYm%%M7`}uM0w%Up9~> z5yoB}^Ajz07Eb{3nsbwH5cB#EBkGIQ#7CnE>GF@sDyBZ z{(!4{jL|TYII`+9*1Nt>n-%m1r{Ws}x=qOk(t%qhIIyG++GU8Eni!rW7w+o!*B)4J zDs6(g$HOB}6oH7?25GQ+<|WH48{&pMSdpj($5qIZ1XdNNrp$>W$pW=7-|X%-^(ivy6DP z$o4;up-#@Wy5K_|9SLJvU1CPJEowVC379hLyJPp+dA)x9)E;G&C`UK?z|tAd<7+>#e`Ia*AUKuV zSDM_&DfrPH-;94`8UZVliLGE35jAymO^(*h#_aqfSqaKjPcpfUO0@IjgzjJwd0_vQ z@xr#JUao8HAxoo-0XDSHu*CL*ABJ-2_1y*fIeBVHLe4*Q^~ugNzkQ-)9q8%so3!=z z`U&>rMyPC<*?gtl`d0t4WcC;-2(Xf&{gey;$;4GbSo%9ah!-D*h)0Kn@1Qf}>kx>f zsu+Ulm&9}8)IFu=%1S!DLop+?A8>yHLglx2#05#u92{)mIXN{x0|&A!v!^tc)0(Lq zySw)Q2faHiw2xa9$@(?xfOl_K#!ECeEUKA;0ztr(;n?6mEgsK1=DrAOH0oc(R#O>Kt?w>Equna1 z>N7f_etXz6!8~G&ueEjox`y|X!K_uy{nkxH?2i2!@w6;{P!CH`u~b1wcKNX?@cH7;H8E`?XRCW zXGigf|F$ZYm8rR8CG;h>SAT2<){12%*3~goZfQz%&f;i@9*&oua(;yU?PkzS2NXxF?52}^! zpN(OVwCk--X2Lgz`PAjy0$&!w6R{L-(e}3-Yrc;gd^Z^4`AD3GGEqdpZ5S{kp9yM? z(EbYpTMHUW=Ed9@--is-1YGqAtC}-V^ZIukzCKP-Xr*@b!3dQ$7rNL9d0eDNswcYEv1rC0CjugXczRSW!QfzvZ3mKm< z5yvMTtRFq|IJ?UJ74`-kKh&8iP3>x}fKXqlopBwfv8~1B z4MAgHEB91N+x+pk|L=1GZ+=Sdj6{2Tqd%LB>&o9@1$A&^4hKm|+mQOPR6H`9`$=hj zYRp6P=ef$eS!+2fIO9Z=Okq{a$8*Cr52pu4Qz*cQ4S)Z0>;fX^LMTkG0y1`sChX<- zjAD(bK+vbW+GfOCyNFk(H_-X$8z{qP+l*9`IgPOP!(R0p~UXoA{#l380fKQ{N^(QgMXyf??axFm~8p8+LenpStY{Xl8#D*V*P z@3&~>7Lcgm9L@;GR($Q=m>E;SeWLd_yI*n1UtAx0QGc=p)s~iu=v21P*9dW}U6}vT z;$7dphJ#3dxFjs~lyh2pJh(J5TR?c(s4iz35k401pUp^*Y*8-&p*~Fp3{eF%S?QP0 zXBJaOt}UqqKl_;Dv-~eBS}b~cL|dUD`DBLt?o)aNLXJ2fpzTH4QnCg8i7hpyYd_ky za_*B}ma&+Cl2P&M=0Tn8b|_PZ^gQjVJ~l1&mu(F#-P)*z1;e8Ha6PW4;Kdr1MK{-A zvv=N3qAU3?Cc{gHEBbxE{nTC?t>%d07(GZ``ys1&G}sBm~=7d?k*cepLHIuMS$85+_2oGAeR0RaQMuFT=BT)Q87 zdRtpY9Yg%w_$%dHOH*iWn8ab3g@%o?FFI|1rwb#M9@_nO^{GUU~R+#I-3RO=7e)fgq;>#q>6& zzUCOh&}8_Rpl^wL*gCy{s77NpN;;g{e*=EYJX5%KsDG^eE~m!V+L&9*XEH z?r!r%gXf_+xWZ}fJl>2kvg1!bXqbwTL-_99-|6tWs?nHs?4i+Aa@**qu~|9c<n7Js0Q@rg`vxjoae!UFH7$G+D_o2Jv@cKT#}=`jR5-tJ ztlvf0i$>qeAc$$CJq(J0&Gj9v!w`;T5sJv9I6GTY5!?TGm^PtZjxK>uI&GQy)|_3U zFpiBsDtw)O+0P6G-UC_o@_p#jcne41-&)+H!ucs}Uao_S$iv3c~o= zD|Q7*+4$`pI)EDrEj6;D^BTU5Fx2ari_JM%{8R-*08kol<&^WVi6%oXx_|^SunX%y z8|8$Tg7$-~B4p)9ch=3FIYW$r^dDV(89W1&K|uX)JX^siAE`KmV&FitfxNIAm%2R3 zEc`0Lov3wTP-Wi#wR@xEkCA|J%T31Hd1FoO;`g`5dIu|72aY73hh8&KwEg$Mhyf)= zuhPD5u5C~q;084~D`x&-F0arH7gjg^3=nule?GlPXN@H-y)@$1N~jl!)fR&&^qqjJ z0P|fzdbNJdvTOh_>j8-wN_kk29;|*<7moDkgM|9fN&++ppq^7(u8S87(k6O|N6tb( zOc`yb`JPLS8&}jAaC4tVG;y?MC3~vz12T7%j<5lk?^`W(Y-4*uCk3d8-#l`-TheJ) zn>}Xo_kIi3IN{bAPs{WcKY=hF|3?&}ASxrlnoGEb|LzHYjLjakXcUrt|-!L+$bnvObRr_xq)V%!6pIwi~Y+$nl%yOGlz} zYLkGNA$#Lx>cLf-PZ-i&y!dXm&w^D+LoEhKX{C5;ZY^ZxQb9WzPGTld!E$0CM{FhU zz$>RF4%im-Bc>!uM=ESaEm%Dd?OktB%98NbCygBVGk8gVEA;mXYuo190qY6SH6Jo( zup6N0Syasn&gV~=6;?MAg>>(Z2LrowBcBWGqeb_WUiTufFGyI*dv_hMlfPeCSv#OF znbVcOrg<|_+NM>;ZK}CIut-|WNuRwhlwFo&_OM;hp^K5t_m2y`F>Kauuq!0Hfi4ZA zn*k5H-mgC7i$C-~bH(vXQ=WB!VkUp@_omVo&OfP`$t7Pu8<#y=v~d294y?mu;j)(* zioV|u`KDd@(4kL2rJc_3{O~xkGHdfl<_!IhH>}H!91K*hG?H73bFeHw(h%bveUPxn zTvrB6=;OsD&BNtn17JjF3y;D(&L2hpt?q&Oh8RedQ=yFHZFv4_;kU=q>GRf3v4@G52L{1UIu;AHUopU z_ht)Z$*)g~*vEmr(-SVTwUJC3vM#)lU;&Lf&i}92tVk!d@w3zN)sH zi7`U^&x2|_)k!Lz99@Yy&uNn)-y@Q|0W$r4OU#eUwh!Ce)aE@nD*}Di z1U#UNl~WrGj9c)Z-*_uYRH9N7hO_d}rT;FQ7swEXfPFc6>Z*AummM~_wh7+L6p0>G z;#ChzCQx^RY6(DG?8afsyBOuor1Xddl__^?G8$0uouQ5|91o?P_}+Z zG4cD-VL9@<+_H92)&;!}0w23Url3@yH6MGOXNxz~T;dg}MkB{n@zaJgg;x_CzAtiW zB9fUu64ZX+8HYwr#R2e9nxqPq4nI%HRQ{(>yUq#!AIrG#siv9`*uRO-xqs>|Y^GCy zpi~M$#33YB2vrD_AdXZZmk3o5kL8d3fy@M70tXo5-TggvfV&dN%-muWKwgu~hzp1~ z0GqsGM`fC>H|1u$P$$}B1YXj+o?YY!WMCBo2(u`>tt|+=>#imXYW=i=3)uncU*AK_iV>ju}m$B`vBsK?pykGo=RD z2+S&Af?yIAtJN>4tAC5YQ0vnFMjtTd=DKWKc}T~p?7cjM4U)JFePo3wfayXV&PJU$ z>6YfoT03Dzi)n;YQ)h)IEe#;XB0h5zSg{PmNt^iQ28u;U91VOzV7h+(L+}0p<9#_g z4Yf|70(m$;=f_~Fm#~$CF|;U{N1Rr(=V(VxupmbLHgNpAk3BcHk$shMiRI5lJ5TKw zHvKbh0PixdjP1zp1ejk$AS?N~UHr~wP6t;f6XWN2jtsQ;4?V!Gor)J1$m9QG#9#0` z+_uP+FJXy|>`1uHZ~5fkO)${(&-4BNFna%Aj_?0t^!}$xfTZ7cwW#j zitX3g2Tu^y`_v-=clupMG%yE z{JAbCwQ~bH5C$`8Lm^S;a{51C-Jcpmaw9N68>eg6;S^XZmR0iv{^s1kAYu`^!%&yy z08(V=ukY^3EsMx(aB)GWb=%|JO^YDUA#P=5b(c)#_V33Hc3^`?c5tNU%$Tma@Fw;P z`(DW6jq1uj9;gDtP^Pm$oH4CM%d_>b1u57RAS~m)&c*ZiwP3155e9U7Zk2i3a}WzJ z{{9sfC`FjEikqhoy$HE}c7203f!nJFd?X}bL^oa@DSIJI1N11W2KetiC+}1mo71(nn<5j>C0N}bITW@%_SbQLh7darX={203 z@3|M;MD77xT7OdiHMpaTu^A&Feb1h__qzzR<^16TPwGKfDqLimU`nR- z<(docT$K~cTs-29jz5s@P8x?7x9l^Tty$?HcSf0g62N6L=}qSYjRl_=<(SJAcfnv- zzX8MvY~$_jZ!SRl#Vd>VpXJT>ya#0>;q)a8>Ovv$An9M-SE~>Ltq6HAd;`cK(79v5rKuw3 zghG_DJ)8~Iw?dz(47-WT+|_UXEE@ut_ahYClxBh+2FVu|1G zBm94x%ruL&&*}CBu=%E(|Kx4HaiyZ9$@s-f3XB6~UA_KeVx+I^ui!z8qm~~XfTdPu zaF*{nztAO!^p6VMhSrv^o6V^uUy#AbAxNn4_aSTsE<|4hFq{pbh6U&>v0ap+0(khp=SPh_*|6{of%fRk7T<4RbC%S_B0q71?$+B z0?0~PGyV_R3c6790c|bK*CaIDXaVH!Yt4=|V9@}dUeUbSYps)x)!O3X2V4;-nrLnv z=YiT!|K28O7cveM04DHX4#U!a58dkd*$Cs1H#Se+ZiCA3M=iD8Io76b&eQV}y2$E` z1~}o~7{rYY)EUj2OB#^;?};mN1}28jBFLWp1dR)*zfRu?RUIICyrc?84@)MdPN|o8 zpq=?(+dF8@VnPgCJ8%;ovOr4B*QDKTDjU+3KQjPc6F7QOYb#!E=Re@T0Kd`tBMLJ! zm`~Op;EQkqUgFmLE)`#jwitjg#sV734Hr=7kpaU7 z&hYnXLPwSP@ zz_qB13_8PJv}TC3xwqkAlII3QC+xbG0OUpC_r`GGfYX^00Rg_w=ISBc$yQ(aE4SCm z_~#C1H7&*y9ih=ChAZ~%N&QxZAJtyB`Gp?Z=q0nT0LpdAkRn`#=6=0@ zR{zd`=J}|?)?pz&_!yy%gbWJW!2{cTgEnN7){J+khDa-lb0! z01!jY2H`b!dt_^tJI(6Yj_TKd95X%|UCAx`F@ak^oThnNS;y$<$@CPw5MURW5HbK& zKsct;ruKL=yJH(;s~djybnZwY=BCNA^7?KMjFQDPCRl*Z_4#iZN*XrofYYb_<0GcLk64l;k`Sa;;j0KlFH)>Y2kQr!ltIrowa zK$G=YS(?qYy6U)J(Z%@Qf}TMt3_142+vZovYnFi?7`hgyv8t%Y(=F?X&pmx3tgrpuNXUc#U5x7pk!vwo92kHva_M#{--;z=5PeXcRu<&x8 zeXC!mPES5k^(rhK+V}7~F_vN3&${>m^b#vT8ep7fPIExCT3*;TrY)E$!s%=X3y>B9 z5v?J3Iw2IWPt%&Jr~u4K#73*A8UNa)?OP`e+}K#WZqR0VvoGKj%pS@yMc9CkRY=wD zjt1;`vVQizNfd)uz?a_i`5FNN0Z`CHd;i|@81R9-H%LWKU}m9?fG;(0hd_t%?&>Og zOn`yVI57->_g0waRZav3%%r3lCg}#(k_MQOfR{C$v{#Yg^8NAu1aNSeXa^e!WHm@n*r)?= z;Jyi7z56nr>qAMw)R(Jc2yl3Fi)8a=_l!Wml@2hi#8=CD?;YzT`JTR2ek6mLLR;px zjS?sma3vU2dbJw5eqGkSLSF9yML)f{Gjf07DXE(|oDV!$?`iUH6@x*5R^ye*dmaE$ z4yr=6zjl?Ak^!*PP@fcTyac|rhGl=uWBXSytN@!Scr&Zhqao|Y4o&o~S~via|5R3A zZwMIpfaaNO&^F!OMnMIZ3IRzJOnQ9@K3s|j7%&L5E>-J-@jnOw`y3Z)PWKuhTi^ab>w%W#*VO=m5X8(UU@ic||7KFC@?qE1bW-$Y0^xtkXu|)sBJEwXdj;T$`AM zV5!Xn2TFibtwhA0het=aY=4!atQ91`KG( z#BdxS*|6D)hS98_I|G662|xXFxOnFF;QeoEVZ|JD2Z_{vl* z0LN3RU#Tg}tY)4!%I6+K@CC0f2nodxsf$$X$^c?(|b?~tXAJ$ag z+dJhq&DfPtiSO@)^+wbSL4D~0F`7tb7trU$5OS3epOpCS<2&TID*kBl;*(iD+pXOW zYQR<~0dfF4cYFCLEW(W$qEl0e%*)*21X3C{YZ+BOclj{b(C)Rq)*&C$I<_zfh~=o> z%3Hv(^BKj<(f`i3NS^dTgv~%#4+UlBgL7?ktV134FMj&p423{EjfcenS@@irl+sy8 zzj7O$XeFi&wY~~;X#iLnma$izmRDX6oHb-FJSYysx-`{5Vt4@~6L>k_btR?9r}tYM z)Z~<@!B`O}Jx_iXycQ6$il!BE;DVXDAYQ={{>-GY5feAL-AaM$8WxGJ;T5l_N(39Q z(7$sA-@^)|TpF>TAfl@jD=h2Y;=ToYUpX!fS|IvNbH#UC24H#x*438*pA=*Vysn~F z6cFJX;uo>p)F|E&nYP*6p7MCk6ret#8NbFOZ+VVo--w_ahFyj2Ssht+Tio(MK%k|qtinc z*agp@BsYmvAOLkVn{^SP1h{z^TGIjy#Vfv}%h--p1?zb0E$mY%hmT$d_Y*jYeL_F) z-`3QIgMf831;3FJYUMObdbUrmSkmB|l}tEolc5R+_e@>-ov7*(hca&b%hwym5wb}D zCC&;3V%jf7P_>EW<7fa7xTpf_L|Lw(H|T_1uNjU@w1cfE=co8k z9oYzFtUq3jUxtGo8IgCyhHT;F4bp0CZ6u748Qv-zZ*NM}VIbVR@MjYSEMRvHO+f5)e*0GtVaSUmx@mOLCQ_eWyErCkR()QGX$ z7fsw$Cabf;@L-lJRvGg|w{HFbP_%0EZg}J6;HR@?Oi|_B{oX(Ee_~itz^G-{d7_C! z0R3vkx7DA;^?#=~vwXcdUl3&CB_$1)~^@r<$9h6>Ul|zFJ+QT_E=OqZnu0QlFZ@$VNcaT;&k+&fjKx|)bRa(j zkQUNx*_2SQnA+H}pLpd-AoWvAU}xjy4hPXypaDS)x+PZ8(@H?%RN14Mb;r%C&{_{$ zseSm#)-uvmH#Oai{_EK#Rreh1&A}LM@=iee+h(oO-_JkXnDc3o`W}>D9v6{eP0#AZ z!3!xHb)BOug@wnw1@k|tf`2DWxwTkHit~Z1frBeSFI$q1ZdzAJv_~}cNfiD1%`c;C9_SeurStnrCIUw80VXPBxbucm*c zI6M|zA6Wr+1gmOo=iCt18u{LxcftJO1NV$(QKjuqFdI=~xoQ$M2f&hxxO6X*wsb!b zzD1_I*p!$PM(A3FzK1LylB&^Gt4h{tywjDU6f z0knQ5cW>uZH=YmmmE5WWZf(b{)ttJ|*a;^}l=y~qtYdz3rLxOT zF@F7)^=AR%o;oZ`yLk7s;Mh@e*gX`Brs?6EN9ep<|w*AQeK-& zt<}eMBEO_aadg|5esXiigxiyZA+yZDpB4)}6N4F*7(vNumJ@m(t%guuw>Af8t_mCSlmeTZ=-Nc{L z5`_r5a5c5$AE&-uis+ z`|Y*I+GBh_){iyDd;0_BzG7Z;p7T79^Ei&n*QzS=m}sPE5CmZ=JeE<1AOvm*g1bgR z0>4>}Jh%lvB7HS=-PKLJsGVJ%EN$#9sNH>>EvPNLZ7d~> zoFxCVDKnY-^|FA?%o$uh{&MW;8K?CJ0>aOmm~hUfCN$*)F+_a^ulnZ%iKZssDpG#X zOp@TS^26FWbSla+sjJV<%2s-jY^Hg5-KFkA={5%+dBF|)I9r#7{)vT+op-D_;6rM58_ zrPbk8=1_K)wy?H&?CWZw;j5x)=4)prXih6Gh9=@I1TNrU;ci0h?O^ZdCgd$j`{%ks z;P0?sv(r-lImF#glvY<+m0H@#)qv4xG5HF%Q$*C|h3Z7jeXz@Ao=mXnqpHk0iCOeI(u_J1u6Y<~XFfBtim z|Ir-(XS)6~UH_vQ_#YkopXmC}bp4NJ;D2=Rf1>OEuIWPiH@LHK1c=THKsU;0l0E?Y z5Z$B{G*Q6EAH_TZf~X+{8A(m=neF*N1C5hg5H4 z<>e+?Onu%eedMH_H^JJt@TgH+FDcJRiM9U=5391$7>5S){lKh(arZ;%wfJ^tGc$Yg z^INz_OxD_2ndR#%?Fa(G!t2|E2}jFE=+w0T`TTEZAj`5hjsZ49S(c9xs8J-W=j9*I zuUAnCpV96V%wqqT8(=E2NxGt?Ffr8 zwLukEpGb@cni&r$6@Hkou?det&!te=nvwA!QNiG|L_IpBS3Ow=Q9saK!q7%mLdb!; z#cgoTmm$pr(V>dSvTFKRw<3R6TPOZ2Jnc|R{G`Jdtr)}1*49?M7x7wA6S+jbG+J5S zc+ivj{9o&0mTUs7;j-APh(iw^iqe%IYTomn)1ui` zjLT`r6DrATkaN*)s~isyLv1rSYtG%hdfU)T97Jpx0X`deBNL%-bs-lIqguGkqocDZ zNz5!;TU$h*%qGpUc)^h1kU5i5sfW^?A>G=gyphfhy2|F!DkS82>S1A_1@QM>5sS5^ z3~(ttk^3A}64GnZU8gC^-Z80Y=|w8`4JXVLKP4hWPM9U1sNrhNnp;D0)A1eO56RAJ zSaUc0$vGAJ(ab|~-FtqWC5!~NS`Cxca3gRMSC`n5IC#Z&MP{l9lK1g6@yfJ>=~ifq zO^@Ha)m9^w-LiIgua`|IHC|-LmBv$zmXEHjM52=MNKC_=#q**bJ^0Uy?MBnjFedI(iLY(|MhD#1PhAmplE|1(3IT9`Ejg^4UWf!~8YAdCfni<=Jx6h*b zrHZ<3#m`M@bs2W=RUo&SKtC|JVYe8^`wHid9RCPC0TUYOH2fVqB4<0aA>8xHtu|J2 zN2pt2#v?jZJ=eUtjEk?BqCO0s7>i8F_4LkG*z?z`aXnu-w)2a1Z2`mCt>D3hhT*}Q zM$%3U5Pzf9G>`;)ArG~3SvY{>2vKIu?2>^#J{ zCjc(CrMfoCjm(XGr5~x7$dzJVp53&p4}DOZjqLn88_FtJ)XLfDesj;)>KGaf6B?(2 zeq}oHd!R-<9hb&I)Q({LYWQ1HB?->2hT_5EMQ3{7n59yXChapLM>l^IW@fW&{rN_x3y*JlEPTPFF z7zUpX`2J3r4r-zvuO=<(`*{}i>}J0K9NBWX`5%{3$~}g^+mk$Ii(9WZ$Qs9X&#T8> z*O(z;lE1+mHzM1!*@7CU$|QFBU79OEiu|Ee@iz4}EiG}(~XU}>n4F`U7`1Us}o@@b-by@je1VQ^~9>~4eBb5`xR z7V>q|_cVX51UA?8XHWAgoN@c@A4#F9o1`RvW(@bHOqM%oxlIL$wBfL(q`%Fp_(h%l zy4l~;mYy4)!)vphla4VpA!xLu-22o&^!{BG5vA|_6!G28#+r%c0{U7=vXN07FT?W@53A}RJqtjbVEM|cnHah z+~?V6q`%9jg=e|BYEDvNA}fStNhxXX&*PiF5b;Y)VPvC2m-SNV$%W8~IYhBrp9zq`;S}2VAw=oeR+jIK@Syg7pYO^xfHRwOE*r z60e5>?tYP=IX^gh;QARupoty1{ZsqKk8-kxli$HZ-%!^B*#4QBTn;z3;#`byPpWRP zm|q2rCDcqfvrgW?qH>WF>k_NdIy+7#Q);N_xfvMQFEW*^pRZ7Rvg;pkq&l-KM)O@h zcHd13ViOfwx}>D|q{+;_>HRm*+!Z

Sffr6tYo$dR02Sd5FON1B3cmb}X^TN%)3S zNM}a$3OfP5j`PEb{{T=4};WDfME?MqnE&VK(gn^W~1^Y`nv(;$5V9!{09_)YqDArsQOyptCCVTp860 zcrdFDJE-c+pD?rmsFIYZJFV5uymWRiyIe73IbkIyLi1bYV}jbAYhTR_1uXpTFol91 zXm^^j!Kn%lRu0;pP0AMd&8w#%+A2Tzg(fI9&SLGp6j5rYFuKu!OHPWItLXF1$V#dG zZ$e1c+=)4%YOH5tc@*{_63iMLMCrAoEDF`7B?_2Tf12A3$}KMpyhv4RzQtq#r*rb3I%lf} zhY}GzT1o6adi@L@BnDdAzX{_1`}6O9%0e&JxB8VDup{d+#*Y7YU*O{Y{b}fv^}v^C zs>uKZO!L-sT5*;m=kO70gbio1?^Lr*^n|)+doO#=?c91Crv$UcNw_LnBtJj~jFrNP$ z^S;_Yb(r(a(wvu+@~y_(%*j`m_QFm4$WAwYlspa2pyKaq`tAqXSxK?HGqzrSAi{S+ zQ=)%L0T$uWn);I$UhqOpOc3PhxDayZw?k+BWvN21Z$ysjHS?Y*I+(O$UVRghRAl`W z3Q(`;ER5Ns16 z)KY~o$I(SS>_iZ)pE`bbnAJX&sTDJf_w))B@w}1&q5D8f5t6vvEPYI|P}}aZrO!IF zT6WAcefpz`{a`u@`_oj=E@Z1O1+r_<3?Z#XIti7(aPg_XU5)AGE3Zm{8&NPggT-vR zwepXsxr(T21PA59>76ankM=70$}Ztn@}g~cp8PT=;Rxtxd|$I4Di<=4BCRx zFO1%9dw(vG5(Vz|-1eY0;;DW30lWCNTXSLOmFex5ZZVU!@aoxvfn%4c?W-u0Fxd7> z?OJLUd@t_6d3&}Lh(nTHjLwe?GD(nn?s3?gS2^C>3k_6v@4dy#_C`_lr}o-@Ua6aK zCet$Y2Ystz;t}xNtY{$$pl~>Tqg!iFr`KzZOGXtV5jwYi@Wt=)g4}B~I+tVZ7cC9* z0$Gv0wlGT?TUU>K_H{{_OR1TA%=7+l_>2P?HN@{0KSBXg2H8{99yh~}8}r4%E`j!u zN;Yc1b+?7LDMl#1F)J)=DXS<1&+hbqA#bFm1$FwlL6D)Z`#;;GAz|&>3l5@wMBCLk zm|z9Vq+X1}XFvGGDk&i2{jakX_PdH0E$sJa`L3fWYdw#UBCA-v15l#{>oIHMU@^4M zaoRn4Zux`p2?aj_hP&+uvzV(#C!a%V~uFVOoO=E>|qg5;TIM1kKW7e(=krF@max6|>3X`#cf zcyYgmuzrZd+o_LR8t2;NE8p_bMRBF^&Fq4U0K3TXrfl=6Ri1b(f^gNrT9w;=50eHp zid0AdJr}CMP@i1JV)h>9#^@Omv776`T8?1+SNg||jZ{b&jrZ{7RocZF7viF08acd9 z$fl0{qN`E>1jSW*2dd)FrFFUX1gD-UCBE^{M0(=5HQ&a^6U9!bxQK8zXUDyLvUVm9 zt7rrn3P;Vh@Rvt&<@1rKVyc(ZL}T-Y+tRHmvO}uRYri1?)U*Gh8EnU0MeJq#*-tqI z1fX6y#aHU}IbT~Z$ToY%u#wYIQDGHo2FFhf6S_h$3LS7M6gx+oq+WM!8w~YfzwsAdcJ9iUM-u{| z0M_jB2I*cvQ$YnWm|vCz76yQp+HMP=bY{wXz1|P$Qq1{h%ygjv(HT)0- zKKQKAq7i=9T|%_JQcto{esPveb@v+fbk)|pTXAeoocgk^>?)R=9%Babte}Y!|tQ`1jt6~M~ zRvtf62^{tt+!5-zG>Cc-$ze`Lg93j{Vb)yA_!3PJ?C-j?G9(R2qWgr>+2}`}tX0o; zaAa}8Uc&ex_hnT6@`;oxxOu5gz~GaMYO|=|;QJ9Vai_oY7$a#x^3QPN;SUjxZF=-9 zy1W*+flj_UNj7(gVG4w^aeUf`n*zVaLyaMeUU}`{8m#u^#<=rb7w$Fk@*VX%|vNsQTB+8W0Az3UPCFAV|i&WuHoeOZYUCvpd=hoAfbDGe3Ver7FhN0ir#c2tttnBf4eflcT zvPaau97iZvdpGyS2wQ>}s&vjpPA8U~_PaaEafzrfLG$ym;bPMEN0XLrs|dyDs(`^%5h`YzW{%}I!Vu)7k{U9+ zR@%`yEDAwir|Zz^dgnteW*3oFg=4$bHG^fz7EczYqqHV4hx9ddqic=Q;`o!FycDA; zE1Mm^66>we>!a5B${D!x1Zo4x{^yEUlG)n#Ld;mJfp6W*wc?$P5A*5ts9pm;1~$0{tb#sN!8~FGN*0x5CEgm`UiDn9e#S7JB1WC1RLqDU<&@7A^ zTpANf+R?Q>pIM72k@D2*K)Y z6PF`WUUUEjKnp$CcC{-%NS=BRZvi}aSKa-d%L@-MR2g=>=41yK*Awp>FEK$UL|Q62 zGk?Q*c(8ih(jZ$~iE(zi~Z1{z=Jm&F>c|ICKLN|r%axqDoH0Z`GDquwdGTyq(7+slk1RaYSShwj@hxm6EL7lrn zgSNfpKjc8tXBPOyqz4g%D61^C3LfKsBMdgm(wpu6|VWyLEu8v4l;rM95?vU z-oLQ6=bDhgz5$RR6YMpJV=qxw?5ek0-mg{6OXRV6G!`tlVc@FUDKPfdE9d9l)47!~ z4|J$+2kTYZ7%5JPwhu!450fp^_lE-!lk&WEF5({PKZX|4c=Frpp`VLR;vNewk|&^H z2hd-BE$(V0&w&7x7w5DI#bwuqinVu`kByoq8cUVpRGf8uAqdrQc4cTZq^aC#S~{c> zhC3-HiKvEjk;3O+{wTghmc)r~v3&U4_ndd&xgdr7=}oZfhwRZxKFGPDg1FS|i`eWJ zHCzG1GdFh~--@3x3<3#0b)#FBe=7!IzFj%29RPSKpyB;QZ2{;^E|5%qNde%%c`2A!`)i}Y1@o(KJHgS4$|<1b>~mP^(Ij3inbb-ccz=jG}guUK5kG0 zR;t7(JyyCg&Q$)=5hO~7UHx^vTsu^40N1XaSUiM9zK1vmz9+DfYxlR3>~Qqur9pk)+I`F z^_x~`#fxYA3*=4@2_gD?_wEJ@vVh6(!g8hB1bUFr0M2NRb1=N)hV>y54qM|(Re2xE zoErd`h@KqP`;%%xwb<(&$SsE;!u-~7kd?Exl&9#9dIJ_5&0x1&apJ8YNR-IFageKP zCV9r{iL3fIQr}FqHC~xK*?dPVa>$oKc4?It*J<~(C6wZ~AC4wJ8~sj6VaTur$YaN- zd(F{$}rS`*iy|=T0z; zvnhCb_n7ef?*ZTJJo+;iU~SD4k}IJcb>=xo{)X{TT*!O>w;v2#d(%4vdr`N@k;O8* zF#)s)i|x?D6-#d;uXx4ZJDz-}p`+eMKLlY-pVWA}z!#{L4Xu4s`>6xoPREiI+r4(P@fcamVlA18cV6 ziS~u|t2QUQPidlzy@tOJo{8tasHv(duYHjf>`7rp^G0#Wwc+;giDA}*4n6-Kt9e3@ z|H8IcP&E5KJ&gz%v<)|)IYSLTnZnD`ra3Kfkt6b*KUwFd9FzvMB)Guv>`7@Q>CJOY z05gd{$AFF;fVUDuX(ru+#;r4hl6DID=Sd}h37aksX?F#~GLBBpKSaCCI4$Sjtn?2t z@-KoUp(_8Dq2W8=?yv9Nq5T64Gb1KRS9Yn+U{tJstFL~wCdWBhr--EBwo|R2Z}iz-+Cy^%OqoFSJ~i<1?`R$ zAT6kGQw+*{@ngHHcJ)wR*?k8E6_bJ!lp}FwO|*+2c4;_31sRTHh*jA3##T<9$sEvA#w3Q2M^_#j+N6_Ko`P;REr4sByn&GFiO;s z?nU0h1()IB%<{lQV7yuNffxb zzEQfqaY5Se_nQ9Lv$?rB=0WqDKIm-&P8TFI9$dXQ-J&1be_&|p+8Z(YbaB?WnJ6pI zvX6hIGo@do7dzwpy$0m0__v5Y0jp1X7-S*QQObIXgOvDsk2MZBW*?4@mJq zt%&cq1Vwn#<3*$nny&!Aq&S|cnxcmfnkn)=>U)4Y01!6N=hK%Y5TcoLYp(`vIzwn-{)I8mLdzv3Qyxk!gQ>&^(O$0O#g;c z4?Fz@gbmzhDa?X^nexQ6YjV`Un%y$|~y-cnFwMr^q0V?WDyDVq5-!pvgh ztpMQTg!%RgZ*9iP6wfax+>&&f?AfczTkxhj01u(8j6khcsD7%7^w78N;oLm=>JXoL zz2>-_O_ABGY`^8nXjy)Ti`5}%$%QOnCo9WP9OW%lN!h`y>-E>yRB+~1wlX@JA3Q(Kh^3Ox*+qGfxdmQY| z2){8L-N?ERuGThWzdiq=t@&oV5m?fH=J7v%`q#@I^LqJ)x3tscv2x+&G5Ycr-dfR?Z9>?xwv zs4PI!P6~S!PI++FiKSoYCcSXb^rWJZbTly&9aUq#3;uxGQ%_d@;f zAS}ZfIzBN{^^{jy%Xh0^1-&tR!r;m0&+NL{!!dZZC`Z|JzSa`_-wt5i6 z$%GwdPlvVF-~J6hzpJ2b(o*^1$uJz=-BWJ9o#(wQxQ4hpaP*iMX_&e9hthp#HM zZR5mfCR?SGPiZO-MgdLv`QAEWzW=5;!?j6d2Y|LTlO^KXm2uKNn&c@_5Z5$Jrk~~yjzzR&{C!E z@pANgROIk$nasE5Bmc?kyN6bZ5oPcBptq}NKG@GPe=&N6GAWdlX>Vld3C@0T*va1` z&W~_dl$!wp$|R={*svwK+<*jt;pG|gm(0V_8W)y2yXE)puf59$$h8Cm1?55VSNP9{ zSW{wg`L;|!Myj)-b=_amf#NTYK#ft%G`OZbE5&wv~Fc@0134c?DKo{Gvg9 zxmWN}PMk=>WPZKaH%|dPfe@8p@z;n6FeQ$0%9O0D3F+?j6ayl`*j8+zWy-~4UNM)m znO2l(4RKmDwr`}=Y701noUdzsR$>M9Jl(fj#SRn=QdJ`ep;v!xNQ+vK@R&g*ltmRo z6HyyvG|?g)iy#(FE#@~!utk8AZ0{?A3hC(ucpjsmVI88!qu>Vr#ExN`TmKbIZSeLP zm#o^WZUf(ToaM4(r~p?qy=ZxiDGC9~JpMAF?dWx`qxJelK6;d$Ol4l*iyk(Q>5mgy zTBZ^{&+5q`78Zkc-U(jy!1l#mtVtQ_Q;aLGn8xob!?rlMYNEI5)LC+OPG4u*LuU&wrmI+C9O@k;FCzon?N@|@ zx{(ONB-R^!v!B@z7E3Q*h648sxO0O*Ih#M+Ln8ke7_rH`I1oHy0c+;bv6~|YsLo6- z<=HE#frSD3u>HD#6UHneL-w;32tXspl4N1{E_W;KX3UNPHhX zvEVM7`%WSXL_Ft0ALxY3i+>9TcXgSM6~J=SI6+c^1fZH)6(;uyNLbSqv_R&GqXrqb zS;I-jH6urf%S+sF_WxI9&RRS1qvd(j#3Dx(c-Af|JoT9z;6a%|p@DIHSzMqM%dRB_ zOn-2J0FsBH_V65U_ua`)?U)Zn>-RE5q~WkEC}jcJ8h*A)4U|#P?n;At52*U3reTZs zG`7!MnHGElnV)~S=0U)O4YnVZd;K$@qOH%=$iX&9LqGzx5Jq=^M*5gyC36oa3JA^4 zsq1!e%!|?foeBhL9PL0+z9)yTvie~ZXj?<`c!2z7f=NFKEUU5o z^THelqgN!>u%*_sd@q;`#B&=Sn!RZSz^BuKng&J;2r}Rsq2-bb2Rt>e+{EX3EVUQ;V{TP0JXvHx0G=f zxr95@t3rOYDxDMtILd2Wo0JAf3P7ca`bc1Z-rG92pV- zVWbYkQoJ-k02X9hN5hrH1$nYsoJnJ6J*#86VPE~K`9UO%2=Iu;f~n{|n>b@Fn1GN6 z!n5J`EfX-o=_Yw%BO-U_AzDT8BfWp0ysUxa$WJo=l<|k2*E<9CvjFBuw5MPIp>jlF zKSzua?>&SK0BFM%diMaU4nmNC)HR$rJ?-xp%mtff{Z&)c_K#HK3>YD_|cu7CSe90YcE8Rj%UeOyhgk z_u6M?@N?9qNJ=0GB{;}=$Ej4}WNE|Tq_?;^cqGwo2;=TQmYXFIS(EZ*`4{|3pooubm_4zzG^tlqYA3^fQN?0 z@8up9Vk;-Bo55xMHxYZY(XEj86WGcqnqNPmX-({qtzmMqHdjZ2sJ#eJxs*5>7vFW! z(QYs@%lwo_*oKEQI#DD>EYq{0i$R;!PhU3gp?f`X_MOvf{*+4j;YVXZhxEQ8RgD8H zR2cuhp^g?eVtSynsDyxkV1%I-&T1?R3x4p?Gsqm3l@)K3kX8MRT~GjiPfe@*dUAIldF@+tJyfikVm<#ckWe>OzkjS$lng$vl-$-x|!zK z-+Yr_&){&^`kEW8%W!Gqv4j#gA~mKMrRpJ|_nY>U14Zaag>yl$blIA(!ErP#S}q>d zqdUI+$~U!(fphU=MIFD1ha{xIesdrEQR#!NLPTivj{g`&F-fP$gQa0Be|7$<%Vh|y zw}MN_MKijk*||SRvd%|>w1e(T$qV zt<3T!jPYy}e}vrurNMI#7`=uzue&9bV7dm~uAs+AoFl7`U z&R~Qd*vIZWA5j1&1W0V4SOIkl==TVcIL`e)7eJ%lJY4;3N1%e6EKh_R>~xovQhx99 zy?o_HXR}2|4s)trK=~8<8`*}q>9bl0RM3&tV(iXuTynyo_4FIe+Sw2QyR!jx2gD>l zZmf>@0CJj5V}n35d7uEmoJ5W!U@G~Oa1Ej)bpc)h1)3+J%Q z?Jm)CFMvDB^))ISoVV5JdXW`ned};pG!=j2d>I@D&#{g(A2+S-8?$6dQ9Mb2r zO!FYWoiDMsy?=)+x!?QY-J0=AvOZ7;_E+T>jnCq)nuqVp;=+7aUb*@0oQo+Ut?j+x z+{t_bfuP^a-}3=RBOerXW*6^8cEHwP2hFgVROLTvYbvAy6t+~^C&&!>X)yERcEByT zb{lvoprMHc-aQ-$+%elTi?V#wZFQt=a)Df|#;Pq{QsjtZ&Ukke2=-kJQVQ^Caa?5F zg#aM~>jMJ7;?3&@t=E2tg$WN>L}ENL9D>zzyZqSMrP4pOt)DwRNsW;J1c&lew6L_) z1k>{J3~NgD{!v7?QNS>@8wvbi<`UgP?ItqNrzoHb{uOGFk-C^R17gVtaf9JH zFZl#AG#PCw20_8gdSJR8l}*+x2aqUta?5f+84k7aYDcxV?~E?gb|>chtkBXr@3kA&8*WSIun#UgpK{(zi>qi$JW!Q1FSni!Z8qgCfm9*1boOS8| zR*3W7t}9@0o7|IdRn!5&;a@d6!N#CTar!($NaRJwFPlDnfVi!+Utrne12qJ=l#a<} zkfRoFF#yL7p4R?ZBSkHy6R{qUc^i&5+|{OVGp4aX{S`TD)5IVXMQKKIKeJ(Yy*D?+ zgm_X{O-9&ylQLXQF%y6Ylm*c9kdcd5zu5u~SPqBl*~bc7p)aI0NSn(p36pvl(TGn+ zjA4!hpfmr8b24r9@cCeX$_~O(_H>-OH&D+G2IX|l?ik?U$_s)u-!ZDBl#%-mKOp>t zb}>xm)&Ut0M&{1Sj=FIpVBRbR_CJEdt)?d-^8z(azNhCSrsMHMsw$8?ny6r6w{bO^ zVj>>1jp&;eOEN&Uncvoxk8%LnpzZxQ6PZ7Fq;aGxMGL&^z3gP?v-dRTjuY6#?=I=F z_685cC<~5vvKX)v+E?A-AgY)#n_4uFw>wk$ESS{b>eH#3+*#M z97O$C7P#3z*hpfYs*p({ymVnK8E=oAX(qc7XABhm>IeL}p1m4lJ*LZFnPD2NoQNwj zBtdrnrM4RGV~3m%#1KXU`+cw_0Sg|y<>!7#5^cl|7+#=2HE!4ej{;aXdEhUt1Tr3B zQzc@RjtVjapijW*ptvUJrcOtO(-QUU~p*{{fcWM?QPN)I?feL{A}2RmuS z=tW(-7|4)-$pP2^uo0M)Q%qeZ*ZtTlL&(TWMqVP}FQl&|3e{3PcZN}-P-O3f z)k!I$q}}g7PEfR1To62L|G93Ij?03$&sR`Se07Xo3J?D()yL^(Z{1dn9D_Ps>r+&F z5@;@65GC7yNZJnkot?lE2?e#&Idy)nFK$`{qWlU-x&FauP|1b4bhhEZZ2<^n@`Vow zY6L(ow%2ZaD!~QlRv-g`JPW{S%e6JdC3Y!=Y6*sR8!#vSalTcW4@E}#&sX1BJoUeE z0sIWiP70|CU;O|{41#zXINY0=anYX}!GD;*3ZGxXRDwyRkqwhHA109MrLbOWqCp${ zJokV*RT@O!I_4`G*1380LjTzW{sp^}+(BvGVPpfH&^7n@eZmX12k|upk2Y7JtXvZwSJc?M`x{vDv0|! z0YdBOd)~=FlimCIBGXf~nPxU(^+-n=Wxsbs&1Rv@0b9(2YOVP*3d~&rH1zNcve1~~ znsdZ*x5h{Sbby7l2veGc0k*0zXAb{qGu)~t6KGEs0+@gsgs`av5vZCkA4F~lpEH8g z_omW?2sBoDM7|vd-_xv`R(wH{IHvH&C->|%N-aw*NJ-5@!}2j*igXGQc2_a2GULYP zsp019e|yhOdt6h8sgm~C?^+k0)>Z~)*e!1$I$0XX1vc4XVT2#KZ-FiaI+S+qvEGA^ z?x>i%0&iPuH-^0hxr9zfEw*apLc>Cwb4*OV0zE*B zQS*J^C^6@ZD5E%IPUvGwD)})$-AOI2!|cgTmsOk!q}g>!ZxYbAQTp7%$@Dm9k!RV- zMB?D=-{#u|CWOgbKf^>KX;7@JPmdsV)AHobPKucy*ub{lV@H{_WIccK{j8>)G$}+f ztnB$;pN7smYH{&ar*!{Y3!)H=5E$^l9rd~XVgvl5pOsuofF}YNp-h1gI%SU;!S-gt zMtYC%j&nDq=Fz>sM^_WmIr$R!gas?PJ}S-~X!PvAc$8X6a8GW`NGzF?i3lbv7hcnn z4}ZJq?*+LOX20xPIomzSdNd=OS(TR~y4}?nDN1;Hk++M4lq`WT{hDf)3SyYt%n+vV} zkAP2av9eqmWR}3z8v@|L6=P+(3pmEiiP?w)a*{OAh4P+yBe&aXg@kg6e>j7gfh5TS z=ofXz)xX|cUoZHZEtp~xb(zP*&|H=hZNQ-RG35w_V&eD4QZagZ`fh0g+3i6A6zNl! zE~Xl3S4AQeshw0$U;-H_)JF!gp0lw$pCic}8OSqWLx<2^1%_&}NL~#&041t@-IB7v z5al~~Io?czv83AqSd-#_Qq+7!;^s=D^L454*w{&q5F`%P5I^ZsRM9;<&eacCpw-{dm^1b_lNcoGbZd}0ON(vzgFQlSU;+w zu{FKh9T6;~uD4S3N(u%ML;;H+W!wH44Exf?_OdfzyQxS_M1b;Q0?D?L1LLR@@r(jL zQ#y%?4>vCMg&M7_Jj*a^-%Pyr&s7*rBag;bZ^ zO@Lvu`O}s`_}SXekKc~S{Ozw@l|ljnXTG5S?IuB4Un)Cp19(M$c(}7rgd5n$+>D<8 z8l$kkxGLs6OT-3-V1cGHq9??hBv_3I5YXF+)W7>m63Ffy3?;=A!~s1V9jbV?1ME-8 zpdL5J=?EBlTOIm~T&;PZqu*lM?jMV?xBu~EcYHZ;^0PCdqFhYlL1vK6g@OP+fEf(W zT!7CDRZ5{UG zVLk#B7EKy4S}XQ4?GeHs#~V`& z9y%L*Qc*9^U?U9uuDO>vs+}P$j~xs)b!iqY2h5&3YsH$ecB2P&7;rc(mvKrPb0(Xv zc9g?BvjFBnK|mojUMXS);f9kYqJ{#>M0(3Q0{|AlUvF~!>I#Nbp*8?7I6USst}8r+ z0WXrCb{X-scsO_}*?6O%m8Rj%t_9WbB{{Zo9k%qXSj8wQu=DN><3ZqMCUg6_*ZN2& zYvPEuwquSI-dF5uoG$#81Q=Yf-rxP(@#Z4MUUL%)=zqZWp9WkSkbhrNqfEE1;2=uw zlVq|bDmWj$`bK8ySbxKqK^PAVvMbGnQN{z`I)VHQtp20t>_Zt<&60Vyb)t?tVW1Uk z;@>m$J&(nm-01MvBOOX|YYWPMPI0Ag{CX5P^zGb@su_=i=O@@;R#&j$Uv?WTa~HO= zBVz*Hree1$X2F3izrQiA@jJr5JPp9Rw(@O>ox1)h9w^k;UzmYeeJ|S!sN;Xk-VXSt z!2R0>>~8JA^pl?-w002Gg&w7oSS!9RcLTbAs3Q!p?udWsk`eu)$ z%f|1K3)W$aJ6yD|2-`XRE_`>dg5X1L?4fIY<9CKuy(r4E(WJaRVZt8jKaH=}yFU62 zzbj?fL;>GsEY+k#?}v=*DL?!&;1@@HtG2QyneRx^w1~d$?y(8b0<7N%yZ;-O&p{sP zI8UZ>bbb!VoBED3F_3jYoUPtGCn>4hm;y*9hwJrl&5;N99}jzK<(Kx;gfHpd=k>@h z&I}g)Yf$aPp2fA(X~5o3z(E959YVnl5@A97)%X+PCU!k}yXGk#V_l=YYT2HR%spac zjTYGc%ox5yi{fjE0*dKPSe`L3T)n^Qqixzh)$n%Oqy;E{S~w)-z?w2>#^vdWne9-( zpjBWiXc_GF@3@f`y~qHGthKAken;Pn=e8AhF~6|+qg!HX|UPv86N zeYDpm10Hsa@EG95@nu4Xf+zzTX#!5PBeW(qeC+tsK$vN^kU&jvo2qNLIapihz|0<1 z-<6MaVwMmz!>Ll0^fW|_1`7hacz^JzvhvFMy4TUla8pi`0n?Y+A>)Il+F~*N>lR`h z>H)8Wkbfu7rUAQ;zolX^zq=-B(!g^#u$sUQ0tzh1d4ayyasX-X|4w5!u9^Yq8rFg!%wKzk4A;7g?;lq&xWE;LGjxMZv`~- z>fYYkW}qSg$|PE_x>OJ-egK^%I=!&9_1at!w(S4)x&N^D2OaX6iFMz}F7HFJo1*Vp zxa$wt1H&Z2pRz~5#WHH)awZ;54Hn>7)J_QG1p+6wlwgLc);3TM5g;HnZp@W>O3?z7 zeMlt}xCLR;?Q8z7=oI%>uL|E}%8yNk?J&0;5TI;U7Q^~~O960_7kK>EYsBBo%wc9D zyJ;QlSg}%3SZs-$UK{=@kK&}IQk~}&(?ba-2=Rm8 zr*2{NyjA?Chp~T{$O482(Wb=7k#Whjk(PPy8}WB6_Evq4mhtr*NF;yXQD_j54qH5n zi_8;|RB*vfe{ud?hu3x&F23_EY#4vND&yb3!eP`04(NJmq?<1c*d!+2FY8WJS@%s! zlMG8``eRvAQyMcQ$q~xMinE^3WP0$G*YqZpy(n$y@Y^H)KUi4}oi36#N@6e!IQ^cm z6Qkm+lr8!rs#H~`6u+vPsH|F^%%xVmrpGhYaTVc`#8k;4Qv;5CY(BV*aOT@${to6lWZO9tIsMyfnY5`aG) zPopR9?%Q+&yrtaKICfzHl|O3 z5^eMvWxwsT!-hul92&QRQ#XGipY6E9Pj_?S^9C9yq(rc3vrE!mE3v$}W-XJBelOHi ze%bTEDc;82qy*!J0aSR;yE-cIz>H@;TvM7P1SQv>+N@LI*;F5|Tb*x)2fTEc?Um>4 zZg<)x*V1#WWkC39Q3zw)H#YI0%yJo8jCYT7JHe~qBS3{N*`2w%tQeXXqUK8%#oo9E zhWjnE?f(Dr0s!-UI9J^A#!I8_l7UVwIH{nMyC025`!z+vf!9uUwV-3P=ufJhe!U`q zj`a+M-rNFJ0p+CxDyEU5l9Cd#5JxUT>vzmN4i0>zykjL{4%H3p)De1mOdKidhtJY5 zg+HtW)H3LQex#IC(*3q2P?s=?l}Y7s#K$J1;cWTSiHUi453$QMx8eFe#P0l)#2BZL zsl~B+%s`pN(Ch-{%)#|Iv|eS#(bJI^r8J2<6H^hj34#b-IS%e#_x*e!l2UO#n>9 z{TYeLG{dAHXjBHDNV=v{GRuihBU>H2x(k>%ILPZ)-DkzL9M|E?s%fH`)*XX~L|J0K z6R;-}3JcE*@9dnikNW-A#f~HAOrfD=D6vVQ?zIBz(wfLBN# zpZeyz*#KXJMzpWZTgmVo0#+Uc)W?dWFK=Ag;v}epCN0rREXii{d^7YvuvXO6bk0o^ zeo1veIX-@+Nvf4^z-&jTl%vIFHH8~hcw;PJQ+fCR?;Q!PDFEd<(pm2?+X_^fdy0Ac z;Hi0r_Rw>D8bp~5v65MU_GOEQzpP1`OU+C7EJu|&Q$>^JTPAovJjj4SUMdTXs~$rE ziS?CKEWK!acIRuyTIA9}LhpuVXLo@Slbu(+J1-~ZV%fsU*z(aG80{)%S5ZA?$fc^~ ze#h$c?cfzov*1OMO4IPvH5xF@q?YDGns6LPU;k@l?w&uEa^w-(-g7eYqPqk~<47^n zzENHG^ihZF@|$0H<>=+rG`vOhTm8#zoj_Nv1*r^_35KZIPlJO-S1PKQ2#;1x4p9@J zeNwt;xL?37Ncy0=ptV5Ql6Mvd1u@Kje%1X`{}K`;u+xnCt?jIuS0932O5kX_2?s5h z#0opa#!s!FdxG)4RK-!Z({%i#IzpG&zinDMLPvnYidUG!OpK=2^xUYqzGWw8YRf^| zNjym?-LuO)ECPfju3-z}V?=5SNb@yIqIUT=1n61QBf)9>$4Jl{&78A_CRF%062us< z@I>b+Q&b-iczF2{yeJp{{z;xX0>7&p6*1t>OVH3Xqt{FAL(rGYuCE&k?ZzB9XrWT3 zh12D#-P#K$(nVYg3 zQ4r(tsoHNpMpLd%FhCH}-~&RSAx%aE5%x|?cAr0l4fj4|fzUd$_D&Bq$OBr~;osAM z2(c^4xr45t64^bDt94f@q_cN^n9ZdvPhZ_F-uw9xZgynx3=%;xmTa{Ng`NuzpU_^X ze}D!gw*BBqIRD7*c}=(8xY@DpKyo7CHIV}Z}3aEtG1;*z0+@bmTb~#YPcsEKBw+C zFu~QCMRahP9WAy8po`BEA{h;Kq^Ecw-6p^ig zNvpS}ZFhmFOmK_hy{E?v@KgplH-5}%&KSL$`?uxjOuSaJVS~hT>Itz6+8@q3z71K4 zG|T<1gSavNp-WUddyqG!H1@{Q1!3)PNr-CYa86@0e2LzIVrZU}KnXu+wqrOD@!@&O z9|!0tLzU3=^)||n^xqrJU0WafzwAmK*A)2^s^q=!hwJ~1#>8bdI(ru(M`QU3wiI-W z)y z?GESIuVZ+O<63F%pXL3C+==u02@R50P#}L#Q#I$pm9T-XQXEDb%W+L~mm(!4w)oIg z1M!$Ao$oJhhM=p_*{ttlPJvXQk7^Sia{D;>qMrd|CGa_By>;xDbt;xw#6F}yuHnOY zVoBKL?KFSSvA1;T1cUebP*~t>WPq3!RY=TurY$Kuap~q7-l%q2xAW?-6aBH}x&HgV zA!lgnv9XiQHPW-6&93Hc6>+xL5;1#7?Hew@CfHwtRo&ua^=$XR#D4v0$FNbCH>>Ei zwNP9D3Dt|TWoAAh7YKq0k8hl)eD$UCjMjD@!&hJ==daw+A1@fK>ufmKY~JaHEht`w z54oijcMSV^N5-fCVF{c>da-bCpC>3#elNa5G}%V@Qclb7j;rLVccXcG+oFpy*q6*# zKPGGz+geQ^lq=lDhE}K`n{gjxAM6Y?@*Y;9`(CR5wq*)En#Qo%B;1pLkMEx z@ZPY3C2jCYZadm&{Z~kgmIU-|ze1JAU_$`@iF&!yz2b-fQjkJoA}zu4nJ_@(Gtw$f~HD4D#V& zYdJJSjGkbRm^BH#(XRDfGNzkt?BN~J@Y0jMY-?AhtVXEGgM(jG1Pv4+-+&w64iPD7 z>+<6RLl3V3(8a_|Ih2s5c>S}Ki@9}`UsxC{`>krzXW;hHJbA+P*nFVD)T=-_<#9l zjH&w+O`V+_vb)-f4AF;RK#V&kq9H$Z2SZMH6RX}G>+V2X*N&-wgRmhpx8ixx+&j64 z&XlY^fgAOhe=z?5XAbp(=0fmdP^Z1Zs@`3N<|guDkwJ?i#KFl!g!2i4pwR@LOXSEm z3Zl_~+g3Gf4cx5Wlt`CLu2b25ZDLCw&uV3lf;#N_^M~dv#2hVuu z4Dol9FoOS7)`KP|YD~m>U+?sir)oh`v>C%nM2d4*pG7`CZy7bUfUfO}SCF>teVf|J zq%O$H#dU*u3A+m^8s@cIi=|PXB;F1};9{wzP1|zTOIV+u#GVn@knh4e3Z!T|uMjV- z$*fSGEKmg0S7-1bfq|vzxU`X^N2tm>cDE?pKs0D?!8ZF^+(1%jkJjaJ0l3S*e}5&JN<%|V?jDHEBpWHw zULv}Aqyc*UW*8gBw_O{QyYPQw+IrpSTkB4#szc zphD4}whX=WsRd$d$A)6;s;T+eqZ0=e9^S(e$Be9r-8wZMEDf2u$@#-Cf3IC9p}pHM zad3=|cXuoPnK$$ISNjVm=PF;D^yNu$`_Eh-zD0`fd@-AnNzq)F9AYD-ppj0DPRtf( z-ZMbVA>0$idbK&wW?I{>{F02H7#%Dn1Da{SZgSmJT&*l!O^k6$6H&F6qD`R z{V~|OeF@W}6jfl}g)%6RnTZ=Nm}<@EGi~(Z`mqFd(09!*nf)1()3@U+3TvTy<6XZ^ zB!V%sG>36Hg>|*{)ba|(4%nv>__hos)x|)J?OGQIw`+eU#bm#3YO(DR)Mc?V=!0WI z7i0vhc8Om(3hnYp(fJrYO`Yr$U7haK%Z_;K^ z&CGbDkV}jUO-#zl%um>+5`4FMlq8G-6o?T0?xDTY=e941pg&`K5Q>U(bM4G}3aN5`Gai@8(t;0jCG*W!x*oy35O3-C^zDV?s;(9v zMA9<~C62Cc*0WE?qX4zP^KD;&`sbqV{)R}!{Wf^DVY_>%A#=5njalM!|9&0TCCc`T z9!@ zK+6*`3>C&$lXK3!qm%Mgbau4p@wHix&izIOeve?oD6WczDh{uL{L)vOLb#MelmvCM zw|fWHkG{-5;s@0UcR$sfF_<)MK-Ep~G3SxaD|4Ft{B{-!9hwx~DV)S0v8w6Vgh&N1 z`z;tX`M|W0wOcZK&0QmwB075S(fVW+>fNPpSYFXnJShM~&&;yh z%>DI?x@FqFK8f|bj})Ia3-H})ZQ}HnYco?*iJt-_=e*7u%;*||NRtHiPejm1H@jsmWx7b&V;C?_ zmNzUXmx--hz8RzY!zkNUcrd%B(uPPu--cSSp0;b5ogTCkyrg&7=B}GO?oXYpY7cJy zY9}o3x56~J;Gw68At@GO@hI`z-~DMMY0v<}&0>aoyt#2Q*YfzbQe9N!il9fTVM?#KX!RA4lRdJ|>{%G#Z{~aXlG#_ouI#4vTM!@;( zao36Omm)~DtoLx5b=s;;-IENZykDZgS>omb%@TrMcfDXIyb1B5s2nw(QJGkKJRSE$ zfcku9j5b!TtF5j+;-BS-&H^`XJflA26A*X50ARscgrXt)qk4@jEiQ&z%`v9{9{xr6 z!ObTWFF#InWC1vTVPO!<7=&{dSi%`UlEr*V8}#8IOLyyVV9{^{i11^ey&=;NF0WCJ zIQxBtM8Rs~ird#vv4RIKAbpC6Sxq~>p$fh>NS9Oe@&w&d#Led>oZQLIM`3v`pP#Ee zE=&+ZMkHtASL6Kz2Z&c-7C}nJth!Z@$Lgs8yiOF3R&oze=ArHmOXF-oIL$H z2&mUfj~PFx=0ZWr^*ah9Z}})PBZK}qSZTC=Zp6TDM`{Ut#eSCiPWc~Z7elDrB__Oy zEh%r|ZU%(EhkNC7Hs;GV(S*Ey6TSUEVolnlorXIP)JlCt>B1A^UT~o^!(r%=j(tfT zM0z(FameGXrxm->wJI;3tqamKp zNh5=qUA9CAlRfFFN8-Jz!nnJ9k^{k10k`=*_|@!Xe}wWXI9@bSnW&!htDi`!`Wx9~ zGy(*(>nz%YQU2A%m?v9$Y?uJk6@2leYv}Xe6p3kkphz@L(y6j1cM`@vE5i%$F}aC% zwpenR`d&#&Lk>U=i4qhKO;!9s<9u^rhp$AY!;@JBwRrRwC7{$rCdT+wOTmn@AU}U& zE+rf+HAZp1C~il+TULnG3lVAx|q26mMPj8JSsF1c1SC%lTzv5`nswje8+J zA|aw03$;6Iviz&fP)O#V@an@AU*;d1s;+Xck*k1(hEf@-71tTnsmW@|MDJ~R7et4I z5yci5H}FG$R5&GHxJlh#Ka2QZt0G7OKaffnjD|GQ<3Xx(LZ_y4`vMFby0xs%%aLng z$F||wR9W8l1p^t`4gl5)FkHe>Xr8A+Z9*PF>5I+vx-v34^MiaaHkhKbm<4%5A~W2- ze9H&HuS49TdsbzL-M*0z%y8B55}OIUy&;)l@(X*1K)RQWJ?Gfrr6{7k-zTV?hAaPH z5^zM{g`xCq@g{@lAc3&o`Q5!UMn2Cwy)hlU^_jQcMu77zPn@SHt6^8m3|C`93%nzP&wfdMOLHe4Dzw!c=-Z^v|MC~%QFOf4gtmgdZZ z?k#|ag#`QTSN(N+*hcuieUl58sB6HW(?cUA-$#nU_cN{NPoY=O_htSl+JJb0*bvS@ zJN;)RJ+4JTtcH36t!7BM^ER+#%kfu3!|IN8dgmNM#5_zRhK=bO}yuEd(hi(7-(lCIvX^oSUNa&BW609HepF&lg zLwj;c?2i4btmdj$WI}|4TaJQg@M^9!Zu|luZ+?h(|M+bnF8*vn`l@F62-AxN%aI_! z0@pR#lnXT9%d(Sa7E!@n!s~bf9`2g+g`fb$GiJTJiZ%-R%o1q=kvp%`G1c{=eHa|A4>o>K4QZx(h(!aS_ zCPK02S`3Vc6V8$+{Sx=mp$QW~7h#+Yp$|JSJ;pSWyXU2QEE;0uOJQ$F0b+MPw`31Z z7iwy1G}=RJ?ecPu9pm)YY1`pd76rpgS0;!;1iXwg1Um#P7x6WrCW-1H3MWWS`vz^3 zeL=8#4w;}YlKjjmm>CbGv;6qaALf&3HSA%5feh|Qijvt(rjvc)ugRD-1K7gQZwb*c zZG?N;yqTFWMUDC*V!co`G_-mJ^Lj&+8evc(a8kkngqs+$nUU)Oh*9PezPRPOoN?i$9X=a3aN-h|H+8Q1V=o79PrVv}{}NASv5Aw2_o za_o12@~FJ%WA(b)RWAS-V+6efryH-v&!)*|{>UI!sTdS3)zc=44P$eOYm5vxihYtX zyBx+|MUa~~!MSRkIdvpRphpVyqb*>j*WM<}Y<96dJNgqU&zaW_xoA?8pds3eW3|OC+y4( zJ*87jEkqC~T(xoeky0quLWZ}FNw*<^)u&u2rmVoBf$4w(PY1n7d+eC8u;fiw_2Ocv zM0}El-00u(TJhSB$x?opgJB^~O69$LtC1)hi$*1Gd`w~Q_euTGFErXsw8S@vJ#9tr z;goI{Xs;SRFO7Nkvzu|knf_oUOZtcAhx;^vsu(5q$>z-ZjSLCBZ_$L`Z!Wy}{E3Xu z30}fE{x8^58oivjY!>!Am|b~>tG`<3<{~sG5>d+x^hPJVi09RV25I@OFUWYKr%g|* zw+%W4#jj9QDu&f0W0i`F=H8OI1e1OwL%cY-kPS5Q)%yAL^0d7*F2x|yD-@+ zTk-}s=94xA*k-=?gs32bCZjOAqu;sFSzdb{FCoG4ao148- z%dE$6bBSG3El3}eDahe`&6L#ZiT&X50{^!LWv&c)$-eqH%vY@|6CT>LtSa`b#2(ja*MVY9fewd*(`0PBN!Y zQi``Ma(Ic~0dcF8;of$-ShA(L+rk1gH2wbVt9cFwO>1e2U*K%J{d7RO_ zwb8zsz9-}-p)+TH`2xB)K|sJz2TyYO*LkOODx^#5qyM^Vng^nvN~mvh8(XFZcVR&$zFKAeo1R)9v@+U4DM6sJ&A9Jr@eXsKy@T=q% zZjkYE4>;eXN)rX7y(99)L5W@4ErCCoOiVj%LmLB@ht@s1T~*b|?Am-JDRxAnMVcj( z=)?8${N#7B)@x|fAlRy~o{)yph@zs^vpYu41SqfBj}-j0Z#&l~az99GDJxxu#k$t~ z{@H)hIOulY6(8cc?1V4tSxF+>d_Tg=WPd@;+WNHXxwc7Q+4U~Wa*|}l_555;pJjFE zHOP3?$+uM9|VK9))n;v0qRNo_N{fo?knU{t|Aa;h+Wg+D%SQ zkww9xu5XF+NFn0 z4P$Dn%{}LmMkTCi&_;3V3r$+Oj;2tgCigQ+M>e(uzPAea#khgDB(;}Jd^ zw6uvUk4KfK0DCnrj^}@S0TylG$G^}WSXuUn%-622>`ggmm!r8RCR)pCYGzaQfyrc8 z%%9v!bLc3k;_nfnEY!HLB1T0qYZisoThxW&`5Lv{V^GJv9Up8dEk-IjJ3pN+P>?%k z7#!@1$OU@$UC-l|A@AAEcBFpK_+pq5Dq&zlc4>YN72V4wE!AL$fmk6*wDjs`qSl2v z#;uk&UApn=47^S!q?*Y*I0}Dn<*pKzVcr+!O1*rPHPAy10k?^xW`J;tXvDxs;6GdO}=) zFFn1uazQ3WWI&x&uk5Q=0Etjh(omf&mqJg2f<%;XWn?}DcU8CO@B3D=ETqKYNU+>u zQq!z=V!cg|`8~Q}uRFH<6n-od4=Is?1Ni-3EPUlR9xM5S=pBu)*q~vP{SMlaY7;I< zQuYXVfu$Bcj57E~>~a@gXSW%KCSFf|@T&8q`_@~pEEvmvk=I6*0 zvLxu18X1YeQ14$v=AN3@;8%^0mse%z^jSm!^kudr1W6VG8!>+A_^xk|knI?=nK;Nxr~?p+PX}(l=P2~!~{vDbwfL7 zQ+o6U8jFTBeu;AXaQH)&Tu7GbC4|$IU4#}*l&+(i1ur%f&v!((tF6U(7H47@n-OZ| zdpEQdDO$mbf+VHN&nRCaqabgHwUZ|Cw4a{#vca@d@6OxAqA|g@`-gSWrSd|IaakRP zAx-@6xwtB|9`wv@)CfE(Tsgo++uk@zlR#vcv%XOdQ3 zHG|(Fgk}~*Q(7zDiH!~94<2(1mY4}r@l}o%le`tBLk`Z(e>9Z?N@{B4Cg7*BFADY# zy5y={Ml16IUguR|ZA2XY@$Su&?(`2vEXL)q9Nl?F@G@c^f4;D=ABcWGg5lD*D=hn! zmS=i;MH22tk?Qx}s-fwo&vvG8!z=#by?@Zz-+DHQqf6M`D>+nDV2}v-_N^ zilOJ_bsiK4`8+!Y6*A9PY;aWvA`cuLE?^`;gr1&ubXOOJ^Y3MW!XNdo2s-M7+2rW& zEU(h_jYvTO?R7j5EEM)G_E6SBGFtvyw@hvqy~;$>TO})F<-Hw{L*@^u3->V0CV-#uP~7CxTdOn9>y< zUiOu|7&g@W2zUAZJu)66maffY2~w$~wBPJz2YS_wkM0T}cbV1Ae1=6TfUH5MF#PO` zo;U9#c64b{3Q@*>*9LE3@MnI~e%hU2D2)TaJOf3gfgwoxX-2JZoZ#S-qumK-^F76d zL`mJC2$uP`eMySXiyoxOH(j%Ev(2@#~ltao(gM6~DT z<#*@ysI{ZvZ(0wxJDN9MKf61PD5PrwmCo|_a8H2K0{X6&sHlBSR~w(Sw93V4FT_i} z2x}Gr!P#neypKvjH36xN0X#jH#QnPq9v;X%P-;(`C8FRiICewgB>C^r>_U5lPR~Hy z`=_{k*D<(GWkR)>cN{iAWdl{cG?n-nxzDN7IE)Nd!`6FG|1LG7A;RXD&kmb?mu^-M zSH@xJTIX?Ei;8(0uu!+!W!58@X-siiTAa}kKoWkhU|F?VmXq`6t5yWode7?R!osVT zvi=;Uxh0f!J>Ur#{NxWGGHZt);^P8snK0>ii9uoho2=Wvqjg|iSdVcpF=li6HO^5ZuwAZG}w z7W!R7)OoL$1WET0y))Jfo_;CDClk8JJ{fFNQKPst!SyuRCLtGmGN3#{TEGsO4-=t& zJt_YN%;iJW1VXbqAj3YUjYRSpOojJX|7eS_;3}GT*v6f4ofpe>-lyEtR#4#b$|Vl| zebQ;l$b?Rto@@is11#Ud<agH*c~sYlBOX7+OhsRn;nLa^YO@2|&SHxm-{OILxr+ z()WM5zEYS`1Aek*W)*oA=rr`$8zwdJF^R!|{pSANpAoWWMmz%RabRZoQBLyJ9ubNZ z2W~kn>uU}pqqK8*9_$TKut5NfF)2g;$(c|!eU^+q{byELkB^*7O_L><*Qwu3>=G2j zT;@%M@5~T0Zw?kg1W+eHEF*nfl}zajb8>WS<;Pu8B!Mda&kh37nDH`H!RNrQCMJ<) z@~%EYiuU&D*(JzeQJt^;noRN#_Q@B438jPMtAxRE;SXW49#EyErD~&1Ir5lt^T=6N zWn^Z-fNv6$hx;PD@Zext1We>NxGkSORN$=RyL@yX7_>Gc03v7d&mrgHQaI3!n?yuR zjq1WU*O9lm$b>(Y}9|cMj8U^~&)3k=R^?a8MnGMQ4S$2QnsHII(HbqWD z1_dZ$JEJ(_}ZW)ITK}(GNr+Am=(?EiDxfMHQ5B@Ci}axKfPmxq&+P&~gB5 zFW|Mv$9%%YM9eNy6#qi|gEod-BgaRRVY}vcoOk!3|IoTnKrc+yJhFnavLczrG5BcG z@tA&bF9z{X9Y2!^^$VuJi1D(6b_&~Z`@kQtd0RS}f{IGSH96W1VgTexVu}o#|Eu84 z*x0@wo$gm^W4*nd>59T6B&aoti0*E@fM;OpG9w|r0OsT0(^GLI*(_tg;ZVwOq-AKh zn>0C_Gy##ccjmRec4MknFaNa&{saC6P%=8O2?m`WH=y&G_D6}5UijD_0_&K5bb(|l zbadc5{nNUTjj3ys&AxA*m@ES%C8s+-5RTwKDh-V`9lBt4uWy#u96JeK3eracDKAWh zO(93^OUr=bfAa7JT$`8c-_-OKu!6-8gEScevbCi9+X&2|y)u^Xak93030@{Uq}^3H zI)MYE)8Kypy$KC@9_r1*%kRRGCSVePl$qkW_X>ge7Kneh&tYM)Xi|Cw(k$Df{b&1+ z|K)+JQ|{7z8T4_W1y_xTSk}sWb`jZ17S*kt6j*j%O9USE_m6$=%OVq#tf4r|1w+41 z*oDuGh-uM(E`A74q~UM5%Eox94Xy)i6qy{g$VX>>UtuU6b@gmV8Ty#;>m*(9Bu70` zJyOs|xF<%2EoQ1TOalR3JK!RryNB_$0XH6eJ@*M|;89}Cl0j~GG5*sa^bsp>oc{3w-Qjd}6vN6{Gg zm*$Zfb3ZIVfM$O~?<1E3-Ws*g5etz}j8$95L zS$LRc0K|BkoftuOfp-EF0cBr4p9*84%HWj}4(7^Kme}gvuD2?Adx+h(u75k)*AZ4N z54_oFfUnL5?a+XsV%F8HP9);*Y}!5xUsfc4aq~kx+xnByS_@~#%Y#0ayLlBkZy?Ru z2wsA}D`9Gu?e@MZvBT{Yxu)O(Z`RBvA86td_%PtQru2u=;FKw>~_SRIypwXf5f;?s0pwef|6Wj3DKX z1*t3GghglY%H|%b3NT5%tj4z#wm0eeTgxcKT~QNx>strr5W$pAbeF^L-9Ep+@wHqJ zJCS0%JQ-i@bO!#Exkfz(wgq}UIDIyJ{6^?k8nz-H;{#P zp&yIiz73&dc8m5omIq&l8N-5P0=37Z$(`rX?$K?-lx+tA?)3Ru0$ZgRmj@dN zStpC7P!PJWSXob#9_~)dSdE0=MtxZ8`im>kmfG_X1cPtxa#ETA?NSpkb+!!jMSps6 z2T)+pNHgHnPEW2>xgL2d(suO(Bi;iJPpICEFQ0)1=Lc=pF@J;%YrVZSbX{GG4cMh$ zT^@W=VPU!cAa1hriIBKoNB)KCf+g?uZl1H-Ty=bq6o{`>K5GcbqTn~*_{@n>K`j8n z*mr2Xz5Un&huj+iEQ!iyaA_#$622-c`MaK-IcEZJslUE8wfL`?93MY*apwb7Uk_LV z+1Muks_V{Y-$IqHdnuRt-Qb`opm)!rnV8jZ^vsjXY`VOTN{+fS+#gOoE*1`?lz9#J z-UW$t0)|8?aT+_aRT?jz1nKyf!ag^^VO><|7d$-mRclqlNTd=mS-tqGEeIaVjpYV~ z2_es(*{WHo5LjhMq>-JtmDrLR4vMbM@jU#9>)u0a z%j2y$(&75woASN&h|{iWlV9{7k>I_CVq$)|-aAW;?Njt-G8!t+A99-1=$hKvQnC*- z;gaHnJS$6}HUW76w)?d#`OOD(zXION6BomvI-8?j~^&|s0=|b=oHI7J;iNnQW`Ry(ZBGB zh!Dt?=NUpJ2FA%mhcM8I0RYY|^m?>`xN)55LY7MB-7l_Q8mfQ_!)MR7^`B^I% zw#WPRojryL@6Q_cPi7|aY(RScxmrB_m{>w@=`?u<+;yM;f>l*d)>b?J*lwppCT6(S zn&VAB3V5!EIp1wa+&eyb6yxcVfYeOpUtW3rnG|@c(j71_UVuJB;)vsgpl1cP@Yf#b zw18z|POoFx#Agh?o;k6hdXDr;URE<~{-UBzP{LLLeD6vVcIg6Sij+)%PfSV;*SdY+ z)Tz)`FfbS#D2CDo8g)?V<^UJV9KcG1Y?41f$x|DL_bqh}Z=dz^sVY5$6b1l6^s7(b4eIM@ zO-l;v;dQ>!JFEcploo#xka2-t`d0;P?& z1lus~mDu1`{#gGr6|E_2=_q=Ia|W9g2;CtH+)6#l+*!am_bYqJr}-9ug#hv<*N^M zqvMla(q_weED3KI`Ml=}5eHK}6(*T)_6eTC1X|Cak6NC{cpnzlgq5uy-7&N@9k+yxpJ|^vTMwH1cW#U1suE4jMLqNo-Jx04TsfW)2D%>^cI|0!-5&N|wRRw*3V7BUdng5NYpf@JVJS zSGJ0a#l7|r;J^Vs+9~7Mvf%@p(VqiN(8la7aM*!v)TtfAeg7#GoUvdk7&tk;A0zOP zEB|$#7-p}oF}i7h;9Lahj))7odWw0LKo+0D?9Csr@y@?n3pF&aO7}E z5mxg_TEz$M&MXHSX+55o*N*M(bV-BfbG4TYoODkQUF$y;hzn0GM;vKDL!8fjOXMcp zHBle0YM#21Jg-sTHJf31JnbdLZLrQ)T#z^)6^J#)kE+)*mwV!ASXMy^X20+HUtU>L zSC^1PM{G(o@BzBZl$L7_#irHJBwt*LsOMn!4m{{dVuc=o3vf~%0qN9B}6Bid< zt_uv*frYuKEbVsy(O}DvqLYWmjUNyr-`Zy23)bUHZ^$2U2fzS+ukiQV0f2wl6b8Cl zoVuEf9?Jlsj5dI8%?Pnm!T60tpoB~y`S-0r+~*Y+=J3yzkbs-D+7CMB{9Fti?!gpALn@|FHzN%v`(PW93tW~ zrs=T|N9OEHW5xee%gCiZZNC07fOWmxY89sbk^RTLS`($s{uU|(B6-wwr6z!_#&J=4 z7i8oS0n;eZyXD~79g%kZol|I8z>NfV)+zv4O*-g-w__G%X#AR|0JMoK&K#~|w9y+CsJElrwGHup>4ODRxlZT zp{(R?P}ygY9QPYq9taQ>k1JDi=VN-pKj$AN)1f;vCi1iouA!sjQk$~VNYsq;BY-Rc z?jg{mZbC%&j9p;6yqoYg0hlYJFF4g$uX8GYUYTUbl{S|+WtSZ+v~Qn@5oC&yOOEv8 z+5evOe0apWpE*toN{o$7<Lz!u&cGq!RD3fk%Y{Ys;Som(jnn zD9Pb_B-JL1T3{e6{`DacNK{#h_rU_Mx$51K3k@qIeQa-ZT*dRZK<5X^Bf&2&1!wE1 zGnSTxWd^T?z7z`P-A(_D=NHk~`}mKU)AGd|_e$e-sP&$5?`iX@m^gY6i4YMFk}W^` z!m5*oq_lHw(@6wF50Bfue^i83ZT{r103Yso4FR~}?O5QfD}DDtOwq1U{ZE4lk)?(Q zhw2c0^!&z{D0~dUEOE zCAm!hy-=-%X@9VvQtokc7E;;L5~%??Sb!`7eG6hAaY77CKOq8VLS^AdsH`Ao+)Dk` z%!d6MFv}tF(b1T@`g6ch4Ik>(qzQXxg4ay{+TneSufNKXK=9n;;GgA4X1#NUYm6Fi zl!HZYN6ELAt#^&gT5+FpL`u!_2S#px#@bn|hs$zw9@^?m%sEw<9IkjUkXSeiuvE@j ze~F3H{CPKoB&DC~yrZe*qgqp(k+Eu*I;PAHXgY}c>#^uJI0h~NwPTNts$%!;rc1k9 z?v5I1_xAxv_v!n9?uE~!fk>tblj9h6k3QLyp!E@`N>m0^(aiG(qlM4C=Yn>uP2Pg~ zqN6SA801A|w*Tz~FiU|PON&Su?hu$-bDn(FSo|Pq$gZdeY5Q@NeR}!#*;GEDB-2vp3gsRPqb$32@kW(aT7q@`fQ0!dqJ$OLgacgT+{S zPfyg@dKcRGv%}>lod;K|4DJ3Jm5Y@XGPjxLb2xr=5QkK(-C`&mmS(M#6ba0c_1_$v zc|niOZ6+0~Yf@r)OXl-#Hi2!b3YHp9CfM&2NpttN&%ubg0Gbw-i53pJO|A;>*(;ma z?%ZEZQoj^H0oI74+cpnQ6W=B~!O`8d9&q4Z)hGtfNPc>@dr}j*UK^R$3hmoFHyhh> z+pM+2yO4t$KpL#=CMFmqm*cJaf!}1r~37_sE>6Z^Q2lF z;n+)D;g{Wa@|cgA&n?R5=PI0fYsI(ktQ5Yuypi#6(C>5Idg({bdp;u21*$7b5mrBl zw4o|l5%QSTrDKi6yCkN_dhShAwrAOvWjmktr{)!KU5GG8AV|U9pnu_B_v%Po@^{;> zC$MKGqAAC$?#W{Z2l&!zT$Lddq=iQZ+ETp{TXrZJ@4T+()rSwfP@o|$rVv!J*2PwV zLa!PZ-shH_VyzZJP_3REPe?(>3mimhdjE;Ub(haRC5vX_D8EqM!s-fm&#X}m^x^8V z(Fl+1S4G>~N`pasCNG>?E9E9Z*<@GCI)~%BsmYT(WCCMT6Cnb^Vo`eNiV@T9RmZy; zc-25N2N$kgxK4?g2hW;xFPh%a-P%^OWSQY(eU-t*s(_hr*GSc7P5xBw7<*rD4F?C_ zY8$;qoc%w{M+E%Icn@>i@B#4fciClJyu@jK-P{VgRTV|0GP_P*^E1Ud$`*GUHWt=Q zPq+e)o+vl31W-M{t&uEPcXuy*8_MCm{X3BN&f?&%+;Eeox~7K3W$%l?O}&6BC#r=r zCyv&)4{cYB$@gZDkA2N<4bH0wcey1sG513*P1jewv~LzV^+uY`vZCX#MYjHSOfL*p z=SY5Kw@4iM<2l<)az4YX*T;9t|nAMTw6yDy>`ox(30aS47c6ktM@At!t(igM#c!byr`3OQV$Mi>$)rV zHcQlaXd+{xUl>VYh5r6g;j}~G>Jq>sBVMQG6a--z7??L~;gf_t48&=zS(;@P+XmMyS*Phq%I^t2AeVO9=8Y{UAwXPdxnC2;paW?G#O>X zi3qHT2!+t}wvb3^sNp0PUeBAF;-cfgw;92s{=`N1W@d|yb2`i{%&qO&l`z_5Gd1^|~2!O4KK`Zf{rZCYp_!vHhB#D}T=cGdel$)4eHA zJZE)#z*>4RANQj57OO;)F~VZD<3Y=o< zi+uoiSZ~e}ph8dt?}X!96WODvj+;B(Blx7h^H)AVl~|+J4aOtcN9UKO7gR(py)of^ zI{pxv=DerC(*1+$0n7lbWP5)lOZV66`UZS8JJwn1*F^&ZUmH9H(m;cmX4dM%ny#f* zmH$Xtle|rTbCVekc%oiG<)3pH0``-o=r{WKVb$N7CkbVpEYH8W%cmVbt-Z<=7P&b~ zG>fiqzAXIGUqR1+bBu|7Hk>UHU#)uiwM4ze{*A);1PpiC(bv}0^KVyMORJqG7vF9s zY-sdIYiB0QOLBY49@e5U!Ago*&~sH(6qn{}fB1?=X8JzPNFUbzgs6OeR#Rbpop*n* zsjPMDEEMU3(bd6Sbs63CNr%WfsD8F7sX@^4Snk47SCC0Ur?`H`u0lAu8QTe7LT@<+eG9_&sNC;=?GIMGQxW?Nxkc(ZydW40e(J?EGO9#l+&k4Zv2?9%L7Ra*HIYHW;h z|3XSiLg$oYdBwW1zFL-K4f<~t0FfAY)XIth*wJCR_~dxpCC1oI`eFT#sXVDzYAuG{ zy(Hyk^VkN$&17_m3(VVg=Ng`rR|}{8m(Aa3?DqK<^S*!Ry%wY7zTBPSnax{Vx7=Dd zXW-VkSwClNi5@IIYHmQk6}N2O7P`G8*)S?+o1B4P$g_G9BCMEXKEIt+cbmncIY=|J zY$6g-WK)u)2fxjb#2?a`<(9=ZcVc~u+D~TG#l^or6G&#~XuQ(leut^IwLUISRoR;V zE1m!zMUm|&xW(N?yAy6OEG)Qok=?mGV)%ofe`Ql~Xmp>r@Z_G?n=dF40RSrxnvS@; z>mBBQkPR}v(^u9=Vfi?6Fe5wW2d)kX)|QIddqTra6!^PhC8|B}Q4=#cezb_I9{!wp&rc;Kj!1+ZL{xPJ#@sJdd9RaU{7gsu75(y zy<0lX^C+(Ie(q>dYN+pIml2c}+0E^$5mPEUHq3x|>HP}5vweZivG3nm9JU;!Qd}0D zEa%@c2hCpH-?$RBHoCKsWh}cC9|zlt?ohxGuO|{%Jq_=3C*19%{M@S2NUwh{K&{)C zyd?EWdUH%VDc{t%Eqki+Y(&xv%d9WD(4*uL+l*cTBtHaE$b+P@eJeLeKR zi9WZY-RhDqog!b`z{1NAzQK_2Vi8a9bamBZxHX*ZtQ4Pe8dHYA1LXVg*+6?G>hpNujW0}kC)8fC%tjv zy#Mf)HW005Zd((S*`r$a>Xgt@zq`vDm-ckNx#`)Y%kB)3vjH4fX8mE)XOaa#ubB4j zdjFtQj=Qie3n*YMaaS#RYEeo9zV-EBnzVLeJOnQRd%8KernEPfEh)6->Qc=$$EJFY=#^RFLb#QmE92q@F!#NwD9H!xmb>l7*3;Qoo(AN^ z6LKmlU$|Jv#-Hw61D$3bOL(CN=@-%%Ek)W5qj*Or3mj(xVOWcf{wKfa>Ad!S-s@vk z;aiaJF^n-CFb_i+8-MOKs@=s|1O5E*wN zwv3t$2xdS|iIb3>%LarhcbG2{aa!1)#(uuvths9h=|i7yZq9MJbAt?hsi{cd8Xo8n z7Z(;1d6Fdc56;Gg@-5%dMI2Sma$ZtV1bMAX+*cvr+s^&Yl5}>riL*?1pVvu!`idw( zFO0jHha9BAteNRI!L%5@Cp3WoZgUnEut-?kcn90dcnLlCmK_VKv*z>EkcC-_vD9`aK@8AGA?FP6@}t(tUA#dm}FK z&-q?_Nqa}s*{bQUTlEx9YHBSNTLOd_fY0{{Nc*A^i92n6eu$}y7=*R@7a5@xv2JOm zmp;D5Ho;j{QJP)%c|xz7n38N8zj4v1QuiLlUz6=Z4j=YP&}1+L9VV4wTjSfY5j&}o z0uA!|naiq~uSAe}yLrUPJD&N6#UpEB~NEZNvddd+-Jo_;4UqGv{L5*;l#G%>Vy z#6{|XnHRAhUz{#8X%v7hK!o+6Bk?Q%6CN6fT>H>oh;T3eayLXmx9213JzwK5`*sqyHQxJzJ)C=`u?gl1zTMiDtym|LRXedbJ4e zD_jxr(*^!I6MVN>{Q$?M0fW>FG%E}Gs0iF4Sv}NWpM$&xWs)Zjr44jiI{d+ z2Hfy9BpV6Nw*1L8dL4uq=Asrd)j3Bwd>)q7ctA}q35|f8A*?H zNEz#oZg**Me7cNtORQLtXVr5(-0o0dWIuXux`5F(+_{t^uEagSK+pHmLWFv`n7H-I zX+b(swka7gae3n{y;MxpY!e*@!Ox|`2(cb`dIJ3|EEF1`H8A+eof=ke`{ZQ)1?jWP ze<|@Gn<_T(n#HY>6GnaS-J2kK*I*!IgU8Fb5!W;IO$CDD!=TdXh>5w0PSALpD|`YQ zJ@&i3gD3&U7-2oOH^aLW*6DoR13VgDM!x&0dI!aQs4V)j5?GhELS?)Z>$Ehsnpvh$wN{gOnyFX6ID5j!BcU~ z9NhIs0B))X`FUa z2M%nO_(bkK%d)-FaV5hF3k#)IUX+KWGa#)Qw$xMei@V%v|H+fGX!^?>_Tayzu;76d zG`RUbUAEPW{Jc54kDsp`$ny$vtx|%8qs380#iI#H-qf{vzbS~5)7WnXlD#>SNpAo1 zuR~_4fJDSwoiem9$UP9986-fcro=m?*5Q=gcxZ?M-)#xd#SD!LFL6Z z_j@zMX&Lm`ksVOs>U&UN6$)CTv$;qA11@HdttW!i?76z|!BKeiQcbp>QrW%VS>vvorC_+ptz& zA?{~#Xn?pX%F*#=* z`rS6GMJq1aQ@zHXF}|IhQx=^ch?<`&3)?g?^2TFo{B?MFbf6)ZK<48O60$eLqSVoT zSRL6}%*?H?&28BsLN;FuZ=BDXS_Bsq#s+A4^^&Tn8_+iQ`C8m~Mo`*VS2RU$Sid;n zrFf%C(I|@S^?{TaKlnLINY9^Vy7KZp_YwJvhYfu!z;nCxo_G6NhvuaB{O>QjKi>7_ zQ~aEy{M~cyT3Ch3a3uTql?WJB|0cw=F|ou-MsSHJBf#zg%mMe0G%kcYHR21Ufd0PF z8?b@6XXe5>S2Mt9``jfC-#mL2=ys%I zopQ_Xx$lNEGF_!4uL`bzdD>p4Po<&+kM)#K<{GX9_D{Xa5Y)5@QsV4(+yYv6@8E^t z3?E-&Hlr7=nb?i*B;GflT2|Xka4ne}Y8U0LtJq=4aL6)Di07wT{vo7)QEB++D4d#p9c(=e(tyV0_C z<_A&`L+XADh(_GxQR?xIdkQ!f>{V5_wLv`0Pl#``FmVhW3xHqWQbl|tYr-S040h1MNXVqHv{0pkwbTv{5su&52|k(UTB(o2LW zFW~iyF6Z_)d&?GobxZ6jaGW`?$n7eTe&ySYj*f=p9}z2=U2Ibq6~X!W(>Q-xbeAe} zjQ%#s4k9WHmvN53@fT?D8CKje=3YG^zn_(3lpCDc?TRGu690jyeNDU`9e2)_a299S zk5cW1d`etQmYmnBey`BpiQONe|CIL9xPmZTNkmNr_q^sfgfEb|(K!MzZwM8u(BhZM zm3%S!t6_SWt?k0zMK@<*2I`R^5#gfpAS1T1A8LpkquVZ#8|$`0t-v7S-|8T&PoAr7kkL@0yb=!vmWX|%d!iJOXoMzF@;(KTJfr+xg z|NQq$cOb>F!dbN}X{SabEq%p(fs=`|oWd=Y`qu@c5gWX;O6K}?0sk6#$X9n)Px8o~ zUI%Y*J54r~!fR(4#3hnK2hF=z&9C1KPij>3>>O&d&6*rkI2PEPLbniLNVeQ5E_K2A2^B(%&?OvYZkP_mzGS%&OYQvH_ zQ*hF6Fe_C%E9z6ShW4^N%N4T_v$&EXmiMQmGniLfo{PZ;6C047Z%`pAAoyeNLo-&TMpSKAk5ugyl`?+vocx)Z6=t`kiS|oNUS%QX04%90`I<8_OSfH8gT;(XWcSlcBmTM# zY`EK1Dl}vMEUc~Y=OY_qN}4Kza2E7P%*^qz!1k?`B_}ItX`(*8F)iJJpE@fgQ!KARf@pzOZ@caUrCk%;;&DJk;>fTB^DMmHha;a)OTY7VOaOIu{B-?vZn@%rYls9EJC+bKc}0f6t~41 zpQ)8JZgbh|<7BSaWU)PoS~#nnd^hdc)tWC$bFTDyw91l0OQ@Fjo_=TlWx9W2Tv>M| zz}>&TX0s!b0PI5X;HASc4T_oXgJqh%_7auTpR8is=h$+q2c1s$3(?zl4)-TNl)U?3 zPVCoIbBP-9CR5J_`7Mq7sTAJaFLtl-%subONQ`x8XYIfd*x;7vC3zm6W z6cCvkUMeE?`J9A2FBv%wQv;UwCyKjC7r8h+-%ZP%plC+JyOKrv=1{9kg7`ld_7P-JRMW;iOCvA`=PI?0q#=TNU-un8K;~ zTup5Irzrtmm$hcC+au*U4!pQJfndYgb+?+08N1a`8ld$luCLnIA6Py&IGf|&b=~BW ztjPKms{6kDKH9{c=S#a;o(B&+6uZRA#mCrKLfT$rW0EXV*?pim=DqtNlkGBMk%#v{ zaTr&B;HKj$1=XtfDRgmNVAzH)aBB-+6zJ>HZKiC0H_csMci`JN0jn z|9<}gj*<~w=pci;DM@*Hny|U;a7*CAxkp&NuD`7S1**EPID^^ZtmMtRO?_-B#RV#M z&KR-b;+FRY?XkvL&u);qPG7@mS1@SNe4}QtWd64o!1}X}pz+dtGn3IV8yf+|vst%~ zu%IJ0(IKdYD^KLVUp*9|)o1zF#?Yj$*^))pz2xZfOhs#jDDKv6QttWaM7-{*;ghIX z5;;jF-*i;$f_z5VT6?Zj2_CEvLr`;`3X+;Ud`rC za;@Mm$jl#Es4s^h9T8sXq13%D`aN>XnuA?lQMKPUH~!4Lgewr4*>H=w>C&D^r0c|} zd&UE^ky(nQhxhOCvXr&TmB^p5-OsOe#Z%;F3-2=N&AX|&oTUXM-p@E4f3bdnS)7_l zLsuDM?WgocLXF1?J`siRe)0nu713|$v?SPncKZ4-jc*cjeKc0i-%^th1FPhi-?* zuT07(d8}1ZJxQm$6w)2BE{~WOT;0(ju}TPQ;+af0B5w_RB2s>*sJcAqIa0YkD#SIP zyYqXyTRSXrWj;3{Q>mcl^qfUdw&n{XX+bEa{94IM$=u);irUAB&?k>oB^*p%G`??k z_#)OVhT3p4yf7_VP*32Y;9PwruV&*v*LyAgK~m{9*Ub){Qe$~E&dnJKLSl(cMu=xC z*$id#n5$JL{3%N^S37d39tKS83fmW2w&nc0UtfK2T~4oZd1<76)$#!8RR!t$@Aqp~ zt&~ybtTy}^;+q;!A)#Hy)z_}(wLrsl8kzNbZTp618(hdDa0AvOm6?cet#n&l9XZ*1 zn;`}ux_od2Y@c*TX5(@Dv$^+u6LIw}*4O5pd`EMCTxcC`s`)um!=p2eCNNx6CH!>aOjz%4Boos$giQUcDhQKHBFp+K95tr>V{4pind_1*c z+{_uLDz=-|x*|&|-5cT8aJ6wClcw<#~>kI;U zZp|y_EHx>X3$B-XyB^D^&Bkx*qjPZ*>F!3Q%_q;M)uT$q$?&XqPDbi%_}^l8l?|O=THrr2E*A5Z49XJblQU5(!zQDhqa}d1s!2Mle!$OrQ(-P{70}~k3 zps~Z(zK80t54J{UV(OgaGotgH)IXLt1&?D?KRc<_>2=KBnvOcSnm*iQY&JDWrLTK( z6iS0eKHs zrLNmXB=Lp;g1^&HCHtZ6a~xanl60yP3H#UbX+L?qB+wei{_411$KvM-nh8Woh}|j3 z)5|_lk2ZSeYow)6PWxJx{%mVZ-3eM|gq@c_tfmQ#p!+pjwt&~FC4(X-66#7$xy86< zy4&6*|BtWP$Py=t#mYo^PwhnMq_lVrAaSJZyw+cPa6boQHHSy}@NgdEbzc~*;ETY` z8z*$8g)=JY;CHYaOz8^;JPmVLpM{?ty-lmz#VS5+@625x)oeV`0EiI45052M8uL=9 zlcQ5rG{It8;7Qk(!Hw%Qv=pS|*h)wq3UMKGJ{UR!Ofo67db~XsGd!r6r#Q#HE}7PN zCi5Qk2`&XuZ_tf;uo6+*ey|_0*ZAwcV}(P2WF2NPgs@3dE;gFaD0*Ne1Zj!^@Pjco zndC%9DT2@RP+QQ&;zhM%-aZWkV~Qq>*Yo0(J*`QHk%XuB->}Sje6vO;kRO)B#{f(3 z)N~BGu3~xxEoVFqQS$9@8hL#E14G zkz@OxBN{6E?W+_&b60!t# z!uXPH9m`YEvlXgE9z$#<3)efz(?jp1kb_Ce1Sb*Fr>-LYVfg2cFmClBLtl2`J7&Jt zJjo_zf6q1aCf)Wvc5FPZB%iYT2*@pX+aT||PBr_fx|&2XBkO>buNO=SY5_jbgd}Dc z8XM3Hx!Zfvxc*}?Z10izB1eYKXV{uC$0SSGW8{>4st5alXa0sAMwh33 zLl2Oia>R(4gO)xz7%Afw3>-29wE4_EdKjfA2{!B-v4TbmO*j{%Ay67NJjquX!m#K| zs6`oepfPr7Z~}=1;JC1ZXu-P8Ql7k`(sT9X#vz~V^JhABxUSiqr^cz(gQ1N>!lD(s zVJS@1;C4MgY<2BFtWMe6+C=wrHXWlD8tiF5tQ)3$e?S+fx~lx(;g2KI`XsBdGhRNN zL!b9Ex98o~FVGamJ7bOisS!PVtF~@-&%OwuUpDi6G>#Zl@PAr_{7egf3bU3w5M{z8 zT36yat)q`3`3||RaR_3d_15vGYUx91qWlemFM24v)sMs%Eqo~ARG*SQL-41xLd}|h z!;*q;3@moT^pjFS2#sZg19@P@=mOqrdscjxst7Fyqz9~#u-+}a7a&6^PEIKfrMS3(>#{^sp#T#=#eb*E<7L?|v|Q`@BQ zd}xEqf6;wzpcM{QbL-JYOcRfO86@7%eAYvsGqT_>sM@_Bj9l*#2&2Gr$+(ENQAu@{ z)$!MEjp;Y8;7T3wIP9CpfP-IA6;X>8=q$8{p4px;ere2F`|H%;*S^(3Ock8n*EQCh z8rrx!D>jS}o|}t9ohDXo#8jJX>)!y2R|CfR8}>QZ7|xK_{0P&Ums6LsS*fQ0W7e+E zkk54Q`98e7!@aGM%P!gE+G^N$Ov#&hp%2T@<0h1D?H3PUk(%n+3{?;dXX}5exz&|| zSOo7Yi?GQ-vN~)tHMjvP;MKUskuPZ$nc#{2m&Jdg@#HB^vm03?V;yo4qdzE+zo)GC zq90f>E!R$4oiR*2M>FERpTu*Evplu*;}1*PjoEGTI{|l|8$8=_z=nlNcf9~;+pA8W zUHv+;AX+0lTjbQCV>h~GDUg?d51Z{bH+urmx1N zT~3v(ZY*IhB+Dy-@XD%7<>Y*(3|kfQga0rdX;D|M`cc>g&4sb{%oovXo{JYbi+1R) znGr(6={tX$W>+5prUISYuX1s@IC!f5H}|>VNDB6V0pa5e2g&BH+|KR0v zw1!P>*BS%>Jqb*;plXx)+~k;O`QP^&RBL{z@VFkL0kgn*EQOw@bR8Eckr+sN9W(u3 z8<>b|{-A8bzK5QnuSNoTA(JgcP&np+l8Vjfkb|UZ8t{w>w%BUoqX#td@iM* zP>;^|l6x*5rk}FoR}v@<)v`;lID=3xHEcChFeSiFjvh@lXhJYEs-pVD)o zV`EuEJc5g5odVNGZbutxRk~5o9TfUUiFeB4T4@q_?y#j zr0dNxSoiO^8Z&}uh+nOD-k6^S86Bs0BFXl=dm+KgyI!+SmghJ*lEJY3(~flG@~h@$ zkJ<)rNEDGxUUy+6Fh_D4C#Y|(Tv6EF&DX$#DE-)?S(aT=_=tsCmS=BNd7|`R9EbWz zk3t_t!H28#!OP7eHRdj|*ozI7hXdfL>S8zMBza0#Z5i*CcL~b-=oRaR`&eD}mMTNKts4?hVhN-g#WwU}Q@T$$X z7t9_QJmW79f)#=h`WNCEtU&xKG1IxD2&_DP%A4M}{FpyJ(-hbGj9if`oC<7mC~5Ti zgv?95qwb{2_TWfw=E%0w5?HLL!Q28E9x$;uL+M2uGHS__dkvKY5~mwCMZb5IOK3T3 zX~EyLyScmt&ej_i#{mO}FktNn?)mTNuM;QhMN*PyHy;G@wouPHxN0I+KbJrZ{u#9T zG!ZV0{~DRExSAep8xyCn7j?*Ss-0_cj;vlo@4=%P1Vd|0s4NyFPav!(q*Mf&cAG)zO%3cRMwze)HzP@ z9l<*}yV~YA`LX`h%LDI9B3lpMhL*GgS_+YwzIfbTYq4XCHGT)DO_3c*1!t33_a7+W zSV3uc+|*|K_r{sl%%%d z?1wJo6!-OYO^{J?VCjL=5vSDzYzs;$@Prh7$8`eao#<1z6l^415b9f_s^^$*l)7%9 z$QWzRB!A4+MfO1C?n9dApW&$|kQ2bhBV|5EDg}n4AMsi-gA7cP12)@f8${F4x->~f zgKYoq;F4dw_eq(P9(p_DzFXvxUDgGNHW)HD6Cy7ht0=*bVBtNePiZT^pq03%-SLMIMpyK~Ppa%WeU z{#J9HS9IqlA(c){u{=FhQUV&NTU16rX}~dibzW*`I>jSz`+$W3vX({Q&_$GHVy)1db5y-Bm^FFr!Gh|Q6#g~iX)W8KHT==0h$)Kc254>=7=N;qb zyKCT(cF>$fjOA#)^BnpXciSNZLzWa_=OK*VyX(VB5wkpGdf=KO;vZqI zBO!}t)1N1Sg9!IFRbKW)d+WpcdVmWKlOc?Wmi-jUJV|UH@rbZi^0pURJR*Zq&F&%U zAOyn^2u<<%y_0_U&_gqORK;wL0lqY|xoJBT?T;gRC1(33eD59p-F15n5)uGk(4uHw z;h9O~Knl@5vBqYiHP3QWZ^+%E0_9;#o6`!8iF)@46Ui{%O~CgiiowYxs-r6N?wCd;vE zU3b}W|8-fX-%IwE8y^K5N%-@Zn28oB0{D|vsF-abAB(N+JLb|?@I5r6k32nU58j;x zyqRg-V_mttHE*Z~C8mK7e8>a1nY0}AJ_aQv)SNu3F2=)Pz5 z%LoZ~VYufk77$4az|tL%o>j%q9&XPy32jiGW6J2Pe=dpJFNXs6=1_R^%Z_9>waY^$qN8jYx~O(my=pMzn?rw|?@(LdF1`z|Gp=oq#73p?#`l*y# zV{qCv-rwPTO1hb3tPf~STjZ09bq~y`N9#EIcFwmj`Ezle&SKLobG39moh80Rrba=w z9h+_}>yQYQjkP3rjSL1DGVc+%#^0edK_#&DqoqQ)ktN}S`(sIpF7XSymkhjq6D7Z1 zb~`w7_=89=F69LuVX4sSkUm+xCOft!%NPkYe5b#u=x(f?+ib%h^}if>AA+HA#^{BS z!>Io5N^8U4j`xNOLet&tHe)f$-+%QQ0Jj8#0ct+1%h$d0P|#iWUF9;2FImehx;fs(gt;5T+>LDubH`1v*JtMV%TC0K1G` z@xNI{5JGRbQoBJiO3nOo^n1w5_^E&@@zAeW2;!T>MU3BJo-E7o>033Or%&mZVhg__ zD2?+)*;9VbtEbZF?_fXfneIU~z^Iten(U1NI$Rt<-~+0>W*A9oCRB|4eljQHzB zRG`K9#ezXLV}XC?imbkJ1?ow%|1RZ?Gr|-JOM{vXVp%_(cV@B~^Qap>Qpr?0m$&+9 z11x(VvhAB*$rku23-yFi+B+je8uSYoC$D5Lg2)Ts^->mlbl!0=IVh)#khZI!g zvCj%HuP|nf*RlHKPj-V0M}<`8U#HyEdJWo6k;cgg@e0QGVr~BdSHna190ZR zgr>PE@L1i%E-3)vabMfk^LuVn+Gm^1-%ueZlqU&k9{$-p=fl6V^lycBtR=#xp`K$H zf@VgF&56{gKB7#cdj)b9oagDx2wvr8;TyO+#q7$B@Fl^#0gBIaJmdM`j%7B!8R>iZ z*_3UT$JFE@3pM8#!mE-ipm;dn6e&~}MuapbnYIfZG%5>4)Y|PI{gVtmIBS?^!~vBz z#~8V2_iEh&E*i`#mQvcxJMn8%^6l%l)!TQ2QSZT914@W~W_Fj&UfWbMtUXCNFu&HP z4Nbc$LGG&BU{NH_cY}R3=nGy8rh#<-1Kj zApOdgiG1b-1Q{5O&e$l$ljGp0>zCL6_?l`G9$uzQZIeInaPX!uyH}=0N(>@dT$+4? zZRBW(#ZGo|F$u7r7W!n2zdXGB6qbZADr&~>N z&pZpV1Q^AmJsEw<0p@(A|U3M>g07&p)6;Ls`oK zK?omVY(5MLTzb9<_wd?*S8B;bAYRLQ1%hR^-q^c8d1r$P)E}8q`f18pjRqxXKu0NL z$5427P8#(%CsYDd^39?44Gu)MQARWv$wNt>mdiJiq1pk{{U2BJG7E9Q@a|4R_ogkU zyqZpS)gcQG}SJu0vpSsJb?S>}wgHI~+M^*W6qA61d^ z9oHSV*6E2EyQM(Y_=TVA(em3&q1eX}6t%pqYYjsI8HfE0&L1eU~qKqdVBQJ@M2lX_@Y#RXygK+ z8h%Uz-pAY^tF*b!z2~kA*<>rffkBpkr(F*r!X!;kRv_i7lsIi5=3j zNA8U*DG2-c^Fy$g`ds*4)deP^8T_tjp`ERr9X#AD5|qCD6p; z-M!lSVbR`QtNMEhu$7qdD~HTR^y+>)b#^l7MW9Q|ju3pGrzqMjmhrv-$Ot<67&%oI zHB8!l%DhhPkcSM975ZYpYy5~B0*$=HY;7mPh$gF@lS57iN+_H$d!r`HyX=&5fXCMf zEpwUjZb2v6+fDO2Y_oD_0#1af#XGKgHHS|1M!U074)!Tav#ne-uQr8b5>cT}82h}< z2#E+x=>N_Q!qgTSfBuv4krTpsh9$hS^dO9B6a~1?f4-oq{ofbxcURc|wjcc;tw{g> z56FE)zitubh8WC{Dg~54hWp%xFZS&uL(l)W7l0nH#U1@Wv^$yLZ`1#CFbA|O4?q`E z*~HKOgqQzkpVg`7oi{`&s^pxsTAr#crv>7`ZCiH^OLrTKz{k**WM>6G6O+Z&vG~Q4 z(JWeMocvx=TNnW`pThQZ{cd{m-?RS~jY+sHxPMS$5A){Y0K*Ib#@g2+N;5znAUSV2 zrkZbVu64vQLCR-Z8S<6L5ZYS^ydl9LIX7x@zL*zoUNT9!*#7FdK6LSmeyG+S%{6mq zZw8DD^K*S#s3_|-_&{ySZ{eHXMSi1uV|Hv6CsX5lJz}>`YtXgJ2|CiX~-5lr;^k|ACJS6-jsiAoF#; zdf$m6g8Ur(x|{|=^5MMC6bjSg1~*`OYD+SoQ=A0MF0OscZY{YA?9Pq;6!&Y46KdsP zNrW}i5k>a^YuHAHs8 z^m|0i38Y!Mmom!TV}%T*0tA4C*+hHZJ_Z5v$-#2bJ5U-jbjkb$C4Mna5R|L!zI8}L z0vk;KqrV+;JLhuO9jULmSiDHdredgdMog{txV1l44^b;0f)<$nb{mPnI;{ybfMb&e zBSmVs_-Ct*H3;3`cYsY3?Oo#BoOEHqY9NRsSP}~4rqGt5EvT~YG*?Zf)z;c(+I-x< zSMy`Iwo-QiHDAiC=GT^;@lnNl=#o+yyZ(k~aynH0vzB2d0kvOVm%a7BFnrsSf@T3r zbLhe{HU#kkHiYxuUVkw2763wcWlq}g;`D3qacz{t9*$<6)Tg1yy~u=iC=@mc6n8=A z<25c4n}dg4T5+?*)jFi$ORZ!QDcf{v;E(v5a4YC`&8}sSkM()27b_QIY42nd5#LTx zMuebj5|=4(d=fSXVzAAS-^O~uQ-7S)(coE%tO4La4R_rSlo>^J*X#Qh3-^C8W< zaY7^S<+Nw_E`Op25xDKpni3Vsm0SKpy6%9#6Db62XC^kSrSED?P{EU&*ueZQZbZJw zo5UnE9{@}KSQ35*QMVBz=A}B>>5h;3S)yD`S?Lg3Wea^GeGeCHJfM!zII0_q_O|<` zHs-s_d0_28jTI~8btoSLm}16%gT&TmA_3iS)s$L@f#u9|ydzmWF@LtyYjEICj`Fnw6csz5G9if?P&<&peOmV&99G!ywO^CX=Ajl!A`;_~b{7fC zORuajbHOcvwqz4!5D`9Q9-@dQ=?2bK z!q^}*9XT8hvFox;;uoSnb~@BXbW8o@W7Oa4x(`Z1FO2JyqPd~;Gof68mK&7wp|LQ% zivF588bywg+N;yDOPXS$m)nH)5w((wMHttY$O+&>uUs9iAhFIWdkijp=Y??u^)m=w z?CN&amd*grE z@MiWg4G0wk-2)*pu$1T7T})R5;^U&Sn?~Zo;I?#~4mm>RJ)P|*KDn8#KdE`P?T+({ zYE7^msok0+aP}duZ^qof+Xj8p&y*7bgQHtNEiKH{HZ4nze@XZ}VaA%5AR)!BD1Q{tgoiIUpkt3=&P`FFxvS@*fAqnK zAH46jZag04N;*o%>^U*@XjytZ;Yg&bWoRu%eGz<`o$gKJ&z?eesnmZPHI{_it<36?au|moKHL5ugd`^qk}<(C=ri?{$d` zGj;?UVx-kx5U^un=JhAV_xL%egSAAT ziLZ`j{SD>Qx789T%6i{DThu`Id;aRrhx_ig3q=OTNctLra;e?Aj2kXvoT3tSF%v7i zT*b7s9Hq&{9ck?1(bpL87f#17rkejUBc?oQOk4+Q+KAKCzT!0YgpVuNpfF_k?A=>! zptm|&9UV0)q)ud8vk_O1nT)--`9%eek66ah0#tqQUl1a_RJfd+Um?@FzEiy*B&Lq^@PzNfTQkLv zVPnhA9<+TA;G=eMw}OwJBfAL4a6IxxjEYHeeSZC zw2$lh;D%Xl%95OymG_pL7v0giR*bXcWLmY-Ple`OZywnAy1~PRQ^;6Zm@rBNYOzt} zJEG{lHNHW7yv-*-!yfxmJ$p;ajO(|!yFf#o_6|`Ly*}EA*}GFsmfwS@PjX0gc1l|p zc3vJ#ux{wtf1@k*%Pt~o9~<6l`0Dy%_hQh!(9@OrdV3G&6%j><)_|H%EzW}f8`uMf z7SqnxOI&4FMFOv7<-85FIXEI`Lt)lLyEc~`+-z!2(P#z+A4DdzaUqi_9WPR+s5*py zy#1!6&Fp+_$mHTr2QxE;~@5q2q}6 zo$BcRzE4upy*7zaraf)Gfu%#_@nSypYCWH^*l&{lS8@7~n2P?##0~!Aw#KRI%==n9 zRxOEds98>Y^3Xor5w=1T@qZTtA!Tg!r|ooFFPfi*@}14nk1clVnTs#uCQ;0z&J!nT zkS(vWkS!DLj-cZi=krFC$-l=X{Tjf?`#{fj@Z05YH0(Wr+s%uSKgnVqp?NA0j%R%T zMko-kIp=PK{oYENFtQ{PecS7@IK}MyV=7|Def9#<$LE~gEfsGJpDEaOB3t+zLK5F$AztsiMGB#3P44)OoV0X~lfU*C zsIC9~TN0wIBMHtcLpraft=WbrEIE~;k3T*7_$H(+Rc^pXnlXS*C3xlEomda0@SOZ* zW}_tL-FTL_LEp3?gKQta!o9pT`}|+`9A#L~Yk#F&VSSil(x<%1FJ4P?{8o{nANW8c z+PVGpp3-AGQ@X9b&(_An${ThyIzz0&WI4jq#n;6JjquOyjBtz*=JMFw~DlWX^dr~(go9Joe3_rRY;6`xs##;JW> zK|_YLmfMdWXKV*H&iZH;AX60%bpPCoSjVM%3{T-Mdai!+^mBfs-r=Q9Vq$`W*ii4p zv&WC?NEL`CziuqQ2b9v?MV2q=;R(v_6fRQyz8~CVz&#MQ!KzEd_dkyxUv2k*r!*k` z^kQNemQM|vwHHfeS5@Gx)|)Z*mcdM^H0zQrcvxc0kjbf!vfA*>`%yaa%$GXx73Z^p zUbh-rK5jEC(<=M>LAB_{7I_^bSdmO?!Kcz|F0AfQFX0dA&V0n9J&Zf))^GaqYpZrX z#oZdI^tLl(ESD+S^?zT~($;O&_@rB{)}CXH-+JuBdNcWR3sL1{MRmqA-P=F<46K&p z-Dri02yQ!k+{%ccJ-uW8p%SB0t^W(wSc}eH``C6u2*Jf{^X$iE3BIor`GC6&o^)0} z?k6IZdUkS>)8!L*9oxp=f<&IucBJ*Y#|QOtUlq}BN3mqVObB|Kw&EhyyU1}V!~m3dgQ z;Q8lz%U0SiKt1|3>-IjG8#e+N9GlNYb}P-{mqt6>*xembpA9tP6u<^-2<#&)RIelT zI$g)VRnSm5;RHFGN7<8FmlauoU{ZJ={dGS_Xru$D+S}R_w44_W?;5@eDkxxNtNH}H ze9&bQwzD8haU_a0MY%UgM7==nw;4+FCBFvyE=GZNH7~BtPn}VVX#gw)j#Vl>J4|LWAffy9h-VGCrMX5WhK(WeXl~Y*+A6HO_psXy%S7^&v?2HYu0(#R zamMmzUi}`hA(mRhGQ}Q58jojXxY_UayPqUKOq<;qx=0%y;aAztxxqoz)Bx`|>T3u5 z3t~Vv3&L9a^W@b1Rm)#sKyQ^)P=Xq9Br9)^5iW2)OT@*~VEUY|HsV@A{bhAw)n0FX zhKc#&x);#=8$*8QwXY{(%Cgi+>y&NjS>Lvt8wgu{B1ZVwAz(j&PW1Us;uCOCr>yTznmMe#mII=^5 zT8aOVkd#jCJSHhk(A=4YicD!u5135MdL)D$j^5wx0HY#nE$5XYpDbg3B<*2|i{bM$ zuk!uuN6kY|;v}`DBLdYQwSQ#5#|*|p^6qNy#v`CY&~&tMvx9{62I{8a{NKEVnoW3{ z^6#UP69P2IBunk)iIC2NJf3EEwyxt_!?uO7{jzcE{2QJwK!{>?f}s4eZd9DGqu2=L zFl%z0(x&}uaiqnZ6?(p~p8oyBg_l36JCn+@U(@KDQR2S6u<#b8Yi+u`=YD#KtuIR!a8TZ*J66C~#?*ZKv z`m_uHTa*Ab)&y`iH(LroEom)&yx=@>X8}zX!!*I5gG$=Tuf>Jvi1|rWXPXeH7USnc zGIz~JGE1BH&#K^-Zq(yfHJ#n}@|xpcxxjV5t;Fa-);NzXt-(x**HtQ9c4g1Re;D~l z?^hl%d{$ujEhOg0{r*`aSLp90*WI!)!m|4 ze&_ZDkr468grBmaW9Gb$-7DU-I_00xVCNU z;y~NA5+OQD()+o4r?W5Hs-44H>uV@>>v8q*9U3+XKD@%QheoofVZ9?Nwf09*f0crZ z6(eRAozZ}DnzFfV>BnDCmX~SGFc%CZuVx$N|CS#M%_L^9g6+f^!^I)}7c6+&#@Tj*__3rr=Zy z>BD}Eyb6Vd7!$h`ht2>ucvhSr^@0XJ(2xssGZ)$ovhk!sl+U3d>q~xUaZ9pSt2*@p zJrS@+5L#rdrsNSgOO^V(YN7W5>*X>RmG*fCIuJ2mP^Qg*FJV2BzS)BzSoD7nPWw+p zfMdqmv@>rnym9vIrRyPJl?HlVzL!oOWlYC$Pk;EeU-mffH%y@Y9h1rB(%+cO?l}+U zEpQ@%T=JHiaL0D5rhfPqk-osD|1^eg{pHL1x%EPQH(0jglb;B{Jh^mFBmT>8$yC)` zd6#Q|lzyS+#&teZmq|q4f&K|Ay7($4)yGhg?iK)TW2>z;1gtjVZs8J4@Rz^g=k)e= z`C8Y5d%1bn3e%+lV#j%26JS=Z@TOk1{+-qW`^vW0;pASb@|piurS5GzS4`=Cj&#n9 zcK=4IgpBglC-x&>PghbOOV`s&{MiX1(`jK^D{294wk+EJ=36D*<(1Liqaku+Na{yk z3xQ8>>d|0(12?nTV_I7pr=1k0i+lL|1$Jbkb&2!B%&0iK0;e9xo776o-SbWL16RIe zIaJKvp|og1+ z>pn(C3b6|dx3J=8*>ElcIX7-dad=dTEFfycyEw=`OD6a0WvdP()1^O~7{=2kpwa+9 z5OgHwyL`cSKoZ~KNCT#M+W6)AVG9HlvfPX$f$;Pj=8Q!sx7O`rL?$?B?Q>^P;lx0| zx&k1r6_u`hNFm5;FJOlkfn9L#7$)yGwEPLwsSeygZwq`y9j7; zY-S}|x0|h|c3)_(>udANIL&w?%gBlYe(h(ob#etse3;r_?*K$2tnD>*dF(&M$x*&1C9sW|^SH~>4D zA0!JB3rU+k4xEzy@za?+BGbptE9`vWRft6S1)=(BCj;(if7CQNP%9wtz`b0!(%hnz z{<{8sT}_tYs&Lk^!!Dg24dfWed*`-SM7@Q!&{xmx{XCPJ|M4@V6rYlVtU1kUYcQf2 zTRH7@>YR3Ht0jJlAz9eFo88eMF#@+PuwY}_DHgAK?Sy#Y0ySnxle;()t}L0Gq5$Xy z{<9?vGP&nQ<}!cd0$?I!5Wdjw_N3FlU~D3@+M5K_giA27m#1B#aWU$@^k z@*4r&Gn{;0TFyrltQ#d2)t{r2@Y9yzKe#?|v$*HQtK(_Cs>(8w_6$frR+7&fTB*xC zdx$dU&E}!X7^Sx}`$Bb_`RYX-QdAz|_6J-JE}nHEogEEM1?BbepaZ2vXy;FR@uw2Z z^k{N4E~mm03fVk4Z1lFZ2a1_f(cyyy+Vg<2Y z=M5kP9}MEAj^F&PU1e{Yzt`I2R==5gArODOA>;jc|DX+J!|MBS5C>=m|EGGEwo#V$ zI7EfH!_gupXrnAJg*bGX674!P{55((t}aOHaWM$ZB*=3OPX0t7_pzlSfI8==nA4kz zsY$Nzt*wHzr-euGlEUMyUsUt!M=OO(*!MTO2K?^{2zJD#3vZN81+q$9g*E(ec9zp# zxbMLBDsBw|X#k7gr<`@%Y-qLHcad7{Mm~%1(5&mz{*ica3`mKA3GU$N-GgDLi+i8; zy_#c_3B^G2XfkKYG^_X7_LnWyfk?NqnaO1r1KJ($Db5r+00iyg)UiEF?*c#pZu;CE z^1#mcZjZKsxmoZJHrez2#b!Nj3by+1n+|djMC*RW;ltm@CtG2A8N3X#$Gu@~vc!)d zy~LJly;MUjzC)KF`xUa~wsc1Gl7?I4hi(PD3)8n*Q}KDY#Z+y%sIIY`T<%PYvzcz~ zz>vUqRIGjW_Z)zl{Sx|c`fB|tn4c?6_OT$A2m-|nc;VzdV;axQqtpy@rx$!(Ybqw6 z8=!g#ipsdu#CKYmN|4=-N_Cc`&fNV3T1cNB;=!NnV`iE*Gi5+J-XyZ=-Lo?}$gC5K%4(J^bwGjUY z&0*RkKn7*+#a!jCa@V4fjCT{f(^Iu}k{UmEb@MTDp$m2VHVK~mCaT<*y3gMI!JuvB z1P5GROccF0$(z6U>QW~RErJM=n07^Tn}$=>f%5G&dRrp~ zCt7RuW#*{Um+?Sayj6UmzP~s8B_~65XmL>;;h_XH<*g#-8J}Lx%ND#{d8+qXTm!+k zM+)u8LrOBEyZrw^)P{d3AXGbw@35b48NfBmN7c(L1j6)aU+J%!oH(nt7HIkQs$vDO z`YCe17en4PdR=Pt!UxIh{N$w66%n5e<{Tg3>-!KYvj}m)C-cF^ zr9Ym&S?iNGrIMoP5s;VV8Qppg`Dhvwm2wlNAFt*8o@KegZ3yYVFur9Nc@dG|fLuaf zKP{^H-Br#2=0KdP>ctq0$?g_xee*42j@~g~5 zw12fC4HAGkzV{4m3I;>mt@d02IZm&#$$RW4<8dz=TO3h3`8DvLSxQSu11Qtt2df(h!~IZjb}j4Ur^_j+w0zSYyrB zrzrra{4Zi7fq-P@r<31LgfE$TamEPfGJ?q03hoxnFi6@ajzVQ)u099x!b)#fq<|il zKKeBiaac>15aKzz=&xJE>}1Ft9n&ULDqE&woJ_gPpF@Mc-Mqa=i4a4mv*fp)^5O_e zE-n3mKbw`Lnc=!AL#wj$jl&Z!Z2}9+iv1Y?X`|7w{N=ALIo6XPTw_qMdG^QiXg4cU z=aFJnz+~ek*|3IFMEo)&VvB-+EMpvw?o`jR-9L@(quizF-HLUF6}pQ<7b|bS_H^^U zUTw)3wFQTc;C$+gi~>OGvoSjvu`wXNigm;g;$ZXApzI#ifr>-`%4%e<2fKFtlSNl8 zBplk@n<|*^%?w_hvHEZEgW|z%l3o`=lyCwDJhPwkW|h-w&$nzDsNeknJRx{`i>Lq{ zVPknjZ@a{avp9TU7#lJZn9)zmjdf4AkF(pv$Wj3oy@*P$L;Zc9qj|6Lm<9Znjv_Zw zR11+I3GHF6IU9}ve;8iss%`5u;wrljD=e3Qrh7Xtw$VYo%5L|n)f^d04 z=hQEUUw3ZmS(S}0kIw~>AOB{U`aEO5Nq9HOtM-1Y5;8~b^Q1@%VJQjK6G=rE`gIEr zV^Y3+^!Kpp{A#8Q))T||p(%I7q^6>AVtnk|T0rn=E2-(QY$B}L31?AvYx|HrX0>i) z{g-PCQYvPYoE51xo35NDg5;+8#I z>d7dRQhTZHmoJNr@TvKZutNO{6b$SZe|W@|*8d}Z4Ef~sT}FdZ%>^a`N-#Rb27(Rx zWvPRK#NbqBQ{-{ykLOyzIMPT3)F;O^9QB4A=jGezgpSfwm|ZOMQBmcm2;yjFN@r%W ztAHB|rAV7mN&O79o%?Z+#@UrQlaeP~i=AxN;b5h3-{;*>F*9@b_9np6rGDTB<~}Dp ziv;}^dq&AK3(a~Sbeq?l0e+1Yc5KV}>4Cil?-0Mu#miAutY(Lz#78Xs;VrFvcoS$2 zcP;1I`DusIFKjh6YUoh9h{2)x-|fz#7{c>skziqpH6^mwR(GGp#*QiXXX)yViWkJe zlZ}!h!DM~XkxC3U3V2kG<8pS!&(>KpM%}(Z0op^1X;a;YMd%8CZ&3)d7aW~kxq|3i zvuhh!bgphIFpp`cZqK3z0OqjlfE{pTg2%huf-*W_&z1Jg%PamoEID&R8&(+iBnHp6 zuJFAtp;m`SXf2qgBKBkwBd3l3^sBYtmwkhuF;#yba6m3P`wGLjI-A#op@6mq@>FhZ z+e|l@>(w=CZ&Txm%Cn)Iq%qa$o)chGkFzNsNR=qGR!j0OoLHide>SFqjM4t$h^5an z+4&9%Tf&lRJTo8GSDkAo0sdnl4Qkj!L*Fj+p1W2_Q%Sj{jAc(fPV&&f5>Etgo=K1y zV@}O2*QJ$(>gSNT@>W?A8NKpU_)8sYJHLdMT6y8c;@0cigyhl#9~$IB9Vdl~ttWj> z_}vVTzUCUDbe?G?jo4EIQSTi+5LOxpByqs=er~Z?xELzG$2urEm9uq1A za~S#Z!KV&r$fHa-Pyx(~r;U`omR1y)CFu&Pp0hcsRo3fO0ecHh=>7+;wfol7_0iak zSxJa}w9T*(PR}jp(;NB9Z+Gc)U2Wu-}{C^T&mMDdt))r7QqCM{|@YI@T#PP(_xapDcjcO2yuf(f<8F#iMi!d#y>RBEM zz?hA!|2b;%o8(c1JSTgbi!v3;h!GL0e*!lyr{fdE>ZVe);0b>Y97!W6O9{Q4$ECsj zDxHAz|GiC+V?!=PcQj)?XYJQ8eD{bLz9vqJr);ydMU5018eMQ+cm*rrK z;DqQxV0v}(8Z7yog18<7zRC%apfE|i<zTS7ww{h|a_t2Lme8x= zA=yP7;GY-!UL>3NLN$!A5fF03Bb(PXO;`Di<42V;8$5x7coxeuyA^=IEP7a@chTdi}+x-PD}O??qRSh62XnKMZAhown5 zh1;2Bs@$Tr29>ZUA{sS}c2%q27%6zKpeLr*_NOWm1waNFMdq4{ZsB@FEOe6#EnlVa z?FL1;l=M##{6n+Beg#a$Jr#PIpo8LQB8$-DeSJk-(N9=JBTpV9GA>4~M;*SDbz|MH zUYL^m&^r}Zwx;}g@5?ULATTZ(yF=CVRVp%l=5Fv(65cbP7{ z>AuSFXjEmxyKJ%#(9;d>(n3sZ>;M-9&doW^ru^CN@rlhxTlP!TPg*Y>AkTF)s|8Z} zL^}%n4KG_?;}mb+%Fq+3V8*m>_=bi)718K|G*7#_--n3^{r~lUo`HISGluIh!Z&@D RI1vcAbhV8TZ#3^e`5$aMEP4O{ literal 0 HcmV?d00001 diff --git a/docs/bucket/versioning/versioning_PUT_versionEnabled.png b/docs/bucket/versioning/versioning_PUT_versionEnabled.png new file mode 100644 index 0000000000000000000000000000000000000000..388ab4924cd14a1ac694a903380e30683ec6a92a GIT binary patch literal 26946 zcmeFZWmFx}wl27l;7$Srf+x6paJS$Nf#B{=@C1io!QCx51a}XvAtbm23GNQPcFwu? z+}C|yk9SAE{?R?|9y_F{s=aE}TyuW&o8PQWn394NItn2Q1Oh>qkrr2hKwzf8ziA`{ zuqTc`B@X--=BcLTqGISy=HO&+W@%$e=JMXbl+4t_(hLIe_@x)Mhz`e!6tJ98jBZhlHgbpp1B1z%NssIiiN>=GXO@OUwHh0h z=dhFMw#2{A9@Qjzes4yF6StT&r+?{4i7qK}s>G(>h$%T+-PJSrdIc+w$ne3HP&xww zK|!ztlhTrx<2AOoWim9eH!@}Nuyp{lg+Ta)JRA&-txa9Xj7-ff?F1+eTRJGnEKLL` zG&$v2FJEi9!yolI3d71WG9t&LxsPzVX4@O$ur0c=fO49PrfZS0(RJp?HJj>`+S zp}UzW$o`(&mq8X0u&Z5 zE)KlR%V{2xzuu{8UioDMzm zuhXF)=2f!vFtyPVx3o32a|Tlopy1%*`a9Uaj%xo8N4ftVg^t84V{2$`s%dHBV)3t! z{p*0VsinCExDyVp|L_q-CreXc1L)lfP_R-kLoLbtPg8=GVgC1}f!gQ){nvkAZLa^41^!DX|F?Jjf6H7b{~~v$c7V~j1L{_*G;#=NAH1`u zj2aSnc_W#GLLg)i8F3Lck6(L>?>*GMuL>S<*B;)!j1z!Ac|%5^A%XQQ;fs=N`E0q4 zt=^YeE9>$)SIfG3otCDCbL#>ly(+s#5eYRc6I>JSF5z!ngK}o0d$)HF$%CBahS;X4 zhgCDYg%r9?V+NNE$%Zi)RGfxm*&uy;QVHV^ zFPtJ-q=EnR=~IX>TP#i7py|oOe6t)C{Tr&!RI$=857G{IKPwcevIUK(L-IpB1yDU= zgYY3LJ1)G0;anG9KheqJ{nB9#4lh&Tgl(eVhlIgLiYPLnUuAgD=^ zOHgA{r8Yt?m41mckn5<*mG&&6Z!KP@H2i5Oia0_X6J5e^J|vsls+O=#2L&RVAZo|N zCUc8tR-HnQ7Xg9r=3$@|cvtF=Sg;CHCHd~trz;e#x_KbVD^h(vYP#<}6~Bwv3e%c8 zJiY206t5vmknUShsN~5Qdz$~b;hSfdmhMBn_4b z1CCfFbEp7n%#e0XmWWg4u{;-bn4vgIxjuZ-|fCNdNX ze^IV#Sqr)>Lej6R_Dzu4rNg#&%jIWbil4Bc2q);0S0dI>u2qYt=#-*tqx}jjnLx6x z)>hGdmrut~H*|);h=wQd2@me(|y(8i|)aO&_4V=1oUV>Y+So z^7xiMU<1eXB+P{lY`xikYmp}nH^+<~AR%9_4*b674BlbYK%kvGy6~4RZ9cl;t9+WpYVP^0CoW37*!OBSeB?B`14pGq z9kZ^%WvPekRjzTL5TU{g_G8S)_S2ULG{ly`R1Lb`=KV&scg|ZEiA2EiE)3G`SI4Tf zZ8F0FSuSc<-n$BSC$}AMsMnAO9dwwnAHRzCByL=CKdQ%uQfuS5^cGB8GQ78ajdR>v zWldrjgM)6INP&CMwyJ2cG~3;sO=&irYTLM97fmn#zTW3U#jt&y~Wxa@86?&%I!6(HPrQLgzNoXy5&-C(zQyvnBsgPlrm`;(5LAObBmajnQNeIA1T!te7{j^|u9%M|QPRZ)hz&FvmQzrT-jb?gNXwSDn$Mc-@`vYBidICM>rodlupxH7 znC*+Vgm0^zZ|%Nv)aBMS!2uh4=HZ!A$|}Fp``E2QFH2`j!%&@FJTnv{fYDvkvKUt3 zM&11D#rDEtPz4vFQ7pmGmp4j!nnAI7rAqTGC<&au5&2&b2gHX4o5mc-cjAjnMG$9{ zFK*j8XXh25>g{8I$z`)!DXZxqy@^G}OG8eiVE?26HxeYNWwR;9HB0ToX%~Vo*gy*7 zhXsCu`3Sl*doy($@xMUJE1n5oLm>*6EvG4|$|-AxPskhUI5C7k4Wud3+=1?-*#hUQ zXh-?t2oZqaikru(>lW8gwKSS!1PoreAyyv+)r|8md>-3&h+>E!pTib-FPB+mil&>c zVX*@P3?W!Ug^&FATl>~#4Rdm$?{_}w(5RuP&BlwBs0dq6o7%d0jIy?nB?B&t`g zE>^B8Qm&elKZPLi(EKK^4^kpWx5sn#oh}UEKF7-f+1Se0+S&vzGsfeR;^N|HDu5e+ zRlvpu3m*ziasS!AvxGX}WCbPXjk=c=Ziw~v7HiQ4$%bGA7p-z%KLJQ~bH$&=x0mf7 zDtD>BABS`jLPxBY1er$njVitO!8^HBK@7->^X>6u?)Okh5J|n0#aU`07B7RNlgj#@ zFQ*D-Q(nni*LUe!h`x6n6k%T8x>#R-wL7tk+G)gYJVm>1P@RInAyoOc9-FP{skyiH zjsMtMh8sRt*#a5}sFOb`=}s3i-Iuo>gF0aAY1!L$Y!F6(lqaO2(UX8>}HSMMa?5|1l|1VW?^9)wzNPY@xH zNCi%VpCH;gnorzghD^m0M{|=B*AA>7LmkLInNZMrS9;l9;c9d|B7^WlLZrMpDo;?d z{_e3+K|MJ+#kq3&csD$dqc(gkW*iBEKQAhgY2k4?#BrcU2si3^`%!P{)vW2(TDSEk zEJX5qbf*CK9rBj0146s&&nAz{-y-9m`ugs~EVF`pkJWr&$;NA4$*Np0y1Yk%I9#qbpiGxq(>p$nlXRoI-#c`g zpJf&KF?N-b+xj^4}M29SvM}h>XttfvL3BsY8GCyj&F#o6g|!B1~*+ z!&&aQrhs&Y*q8U7IV%L~lx{{xa;i`e+5!Jtcz=HAILgdS>aaF?fiAyoQ@wso{jTd& zvGpV8VWm_je&GBna6{fFX&Uy=`myvL#^}7)`PhTeM%r1GZkK}zJ8I4Lq_-n=?(ROf z98a!&m+p;&dsj)M$xbFttw_q9J@{k)Xf3a=^!N_9a;2w11LNjyy5qI&x0u%TFG4E| zhnY=3s21xtF=*7nMaWSg`1jlMjr%9dYju7d0tQ&eDO(xAm`l3k{Q5UHPS6~3SN%8X z>wt;O%tZI{F}HDVbLRDt1?p?NBKtmcqBnI_)LF?1nWG4LTfc&BC*vw#3TJP39m>As zwzccWG+*M_zE~-b#(3WBm;Okk>(q07wKDqbYIP(HK{vC;oKYi30|_pu^1YvcUCUjA z10R3!b3Po`b!!BB1Bn3c>e8eNDRueROI|+L*((Qeq0}K|B5<9T_U%ZQck4+Gzw{b! z(z90g@e?5sz-C0Kl8kPHsW@!yBq?G$ui^$y2HXg$ER#YpTAGrw0uC2FV0?WC7d@`^ z?NQ{LK!o)r)RQZY!1!z9kN2)$@}t)&M8=uPy{P$FW2cM$&~{eMeD0&b>jMk@hu8M? zDKq~u-2=;^0))U}1RIjtP?z1Jcz39a+RIc&K3jj(%-SPwj|@LG8f8Oa03TAa8teXd^F=+vUa=wlHx z>S;B2ulvWy6-{gL_)Y$pvf26CQe`>H1u{B#pUvq_JgFLp63aM5=BrS_z;2& z3$j`gJ-hma8=UOfp#K~cXhx}QjxR*AX z%U2}C9wnOJGEfdfkN3{?V!RpW*d$a!L`sJ@qhILZ=#yrFx&ZsaY$P!o7P>wPR_kLis@CTW@V)o$M z*nSJ~uBOeO#illi1GgJlwiLOyTxs`MosST2w3Wu!T;srQkO>F@rh&?XFM~f_i5wwI zvTM{yY(eawU$TRbe1`eWHHEEJ`BNISe+w)ghL(9^3+n?Hz-xot{FV~;=DxqW^zw`> zQIpLaH4zf4NO8mUuY=UXgc4W&?gMPtry5cWr-W*t_HaxC$$N?8SSeX_u|3ipQ{J0O zN36_1N5}X%LH(y%-?Kgh=h#WAxHsA#rJgtRpk~cuX0>onO#eo4`8sDjrl2{se};F( z?`t_h%2udq>B^P1h;BgbKYQnc>%R}-)eZ-2>Bckuw^$vLKO2(^qe|dJcFnxkd z8(m#@(QL`OXI@2i)y!wL-I?6aHKHlA*^Co!&v;8m88qmgapip{GrJ<;DOwD2;Y9~U zt2qCTt7{Y6hou;hJjmvVWUq96;qOE&E2(KBBq@G*Fj&&k(11ez=Ycnw8?3{bm(4gL zbl)AzNNKvxFdv~?9lVAj6qlm}I z7PN(FQS3Nh_d>9uU$q|CMRR1x;J?Hvk)9wGfB7`-)moSZTP6*tc2mXqpGO4d_Pfv} z$oIz0+Hlb$_`@oTcyYzaZ}cz+E`$jj+^x^vwNXDd+VeuwTJrRUbz|_Ie1_jr(U;}* z_V+i4?xb%XYTM_w!%(0Tr->-zj~mm%{EiaZXN2glMinOm%wXf|WEJ}Bopr~+!yUR; zU?!Y!tG-hI$uuCY{~KRXPEJ8K-2QtHc654LiQi5Id?rYP(s*c;XQWe;C*HYWDmXytMe41;x+OXVP(e0d&v%h z3FiUdnFZs032EQK7Ri?p?I_@jy@6ywDue^`_v%7y|CtP?d##*-B$$UJ1k z{1^+mM9T`22Q>+T1)W$ z{`z^1FQ-oKI*uY2MehA~UE5ov>2}=s*_vW#UBNMT=e>U};qB<8k`)JYhuu``tyxpN z?a^1LNAU|E(cd1%gIL(PJ(v*I0EUI@@s5vy3OfGrMhf%mM_C9bgbBj(yy=ZO7rA@W z2;bbLjO;MXLcoXnJsfoUCk(K;m3nT<>mTpOIK0kY-I2Z|MDY0ZZxzj7>;{AVbGiOj zIsXg${x0kP>Ae5azQ4uQzpnf5ZbI>Yw=W8-Npu#5Z2qy3duIyq?o7J*IyoZclf$6ZyVNT?^3t8%4W%V7~ zaCx!$Yv55sJ;%@!VwkE>bR3dK>Qv$3E^kehagE?lf=Bsp2MHs49mZWTJpuo$C(n=_ zf3jkJ3=NBINH+%{`xo@S_;XEd=Zk7|uH^m%5s^^qBVrYT@*EUUV;0eioIW`l##KgtVUUi>sVJ^gcPjh8$lkwt= z2n-F~9uBc=C{gl*iTL>c7zMUHR68lM4a?B{;r(I#8HME0J5_X*^O=P|yvA1Cj<+fh z&55U{SPiAk82V=7l%h+4{U&cRnJPt^7K&Qpbg}0?7W}D7y~fB-AfbIvjGjLAE_lpy zq09JltNAnxg^jR{mu1F#P@FAul%iCL?ju{u%S`D)oS$;M`6{KYB_?#w_RxdWbP_1j z2B~;;_TE8!A#K+UMGzN^<75CcE-wOes7M1nOVq5mbk2=ms+2IrDI`U_!qMZ$;|`Oc zkl=aB87lrH*I%d@4pOI)phjbw0vjt`He1ZD6Cn8#=b3cA6RjLxKotatA+cKUkO?AJ zrQ!_;q(By?T%={3xro~;M~N0=F#LAG_vPB4$TVdal4w`e5&sG*1z{Mv2g$IIJ6~() z0M0Q(jkadP+s~sWQ(+!oK<@f1^K$L0Y#z#+{Iz5?CiGD8+?ZnF4ZJrJ4<0WkUnPVI zWRyp7LF^!g5DW+_r2I}BhgEuK8|fPxqyQnV;$4Y`hDq(uDLG!%Oxc3SFb`28l)yTx za=H?w!CJ{$@=_(*{GeBOv04nGh9%F8stJJn#MXp{P)h=o8KBPqamcDX^;K&}ssl8$ zqC!A{0)a$NM}R~N(ya+UGSCR#*^?2W1+J46X+A|Cz}iC}jJGn?Dv7_8ea5B;V=!)3 zlNBO1sh1;MjP5T$+FW*y0~i&36@=i4fr)b1`OBwIKgm$5CRA&66QTs7$tF;kR&BFy zipJ$Pwknx?B>?(_;n!_LmR&=zQJn%+i$6<|gUbRz{2$UZ5W>Ox%%w>c!%MMd;DG7q z{T*AK&m>gTI%QYe zf<|wXu(F!M7eKF5*3g6fP0nj4-4g)>@cA(Xm-;zFd)NH5dX9l+9b%OP$sL?GeOZC` zIhT2pSF(_KEw5Jyw8RlFA%&VV@p^=)662A5xIyQ*yS?YQ)&<{z?5HaAae!hc2F%jH zU6qKCo6{UfOW|J+Yw#`3Q_EKkHhw#>w9@EX1B9BI95moeH>2fv=>ER{IuEeX>#!ol z_)Ld2f_TlODu?Y*P?1|$TJ_DkW|<1=wDN8^dG@G;T?u89EUSJa{yRjhCU?c=oq_#P zbq)5bS!X65RbIy%9+KhwJe5xT-anYL>vcuT2k0S`Z`%ag(;zlRgIEFaQBMbpSt)1V z+H6(}wVrz!&GPSP)%b|vl5}iY`c*buKJVamKz$R4!Ts*7a51rdt!?+9@xa5`<&Us4 zNk-JbC|cmRBh6O~64RR*)MqXiG}AjHAq>xqBI7X*m*Tb3?+=@c-WM_wPw$oMHTsS* z8|}DA4o+qF$n;WVyYNO{;`{b~%r^eT$IK+bo3Xk?M(l=z2Lsc2B2hZ=eY4Y4sL4O+ z>jZ0`rTglRkPu>uZa>h%KgzejLw+{Y3q)fs>x5};wM8PUDuU`ZJ@`*ET_PHB0ua6G zeWyYtP?rLx54sC}WJSk~r+$J`8@EURGbSHhl-FS)O}po_-c=;S%i2L>i*7)|BZ*TW z4q?T$evg(AMFU*BJd49E*#r`aQjpPs!Su_>iHK2S0yEgyMGJ@*k zj^`R$;Uhrxi-4-%%9>%HQHb6|VT6H-59lI$b2Ye7y>7U9JbU&FtKl!LPg$O@3OtS@ zF+X@fb4Wy1NLCkHNG zp=k2w6qvOh@U7f}>SyJn#Gsi*psg8{TK&D4NHYN31?ILpC?9}}eivyj0`&}-1*kc4 zY70<_TlOt1ZNzvpBxQooB?>g+)qW{ySxIQCvodh80~L*7e_akL8SRL;x8MTlO>bX; zXJ25ux&;W=%DRSP?_H6+e*7sKoWLp>!N3C2j~_b=2u4r+;3N%{aEF5R5skd zge$38NjUmamp0~ZTMO}bk`+*XNa~pQp^${x4en5^oyo>QpzZpYPGDQ>GKIy>bh*E= zcm^dy8P(KRj3Hsz;Olk{_n&N~K>$_cIAMW0Oo*^TJgWu-Il}Nj(g4j8Uqxn|WB3IO zX5cHHdrbM$&l`zJUarXi$rA_&-!@dv(7#o%Z#--QDCRUSg`yr4N0$-%@k>s?Cwvwe z>K72){LkR$H5`K4+X(~{?$#5cq zOzG9bVL0_8HP90@&ikA*tiD-WU3xF)Al3OCH{F9Ehxq?(G(sSv*JsOygZa}CvUqGr z&ce+B3>sZ+`&JNGFZHBui%Bu_8IhcL%(*Udz+H*^z`7y2%GEJj+#p5`q?GEx2=icx z&=5#+;E3@MJjW7z*FZXW=XDb@-roJ7q-_0tVp}bskX{Yt>=5_;_?A^Up%t_PAyeFT z8C0M~^^ZoV8Dl2kGaMX1n1g4PxH$iK@AgCezTqsyVs4 z51^#j@j&jec!>Z|Ly0WZfCWvn0rl7eha?2?*aH^KV@HkfT@k4;-hmw{MA%qKq#~Xi zRk%P|xFi-1lFx?x)hx0KEkGTyS_{6U9un*Xw;@Xl5w3_ADc9UTy|p}({gzN8R&xMj zPcSHMHF0J^6Kp|4#gBO@i5>kC0_k!S5^clo&1>3v$b_)%pg}sNL>H72v=|f+TvI2x5ZH6VOLV(G3S(Lce5?D|rcFfl|=< z+aa%I`olYYh+q<_T@gNvK9YzZQhVk*agrv)mVO0+ zRt<<;k96P0mNq5JrxU516Dcz+&xo?wEe(iGJHN%r=5SgXe6trm@BW3N4$4O=kQ}JOl;8h(pyXNA^Kq`EXtTd<_?Cg#S*8nSaq&OID7CSozb*ADo#_Bw^vP zrl4zzlA`OWG@K@IgJbstM2eJsj3gy1hwa0Qd2X(?Q`?q_R#R>hS=w+ZEkj23oW@__ zYuG_FUpPo>)`K}(JzDO!m-au_F2ZRR;a`|hr|*+!y^$kO7lN&$o-r7?(gA%8QlUZxvnk5 z`|^w1mY3OuIfS@Hkv;jA>!2mu{WsIJiQN3`C%#(A_^ox@B^e(@*P9$CWjJ$%@>E?% zOT|U}B`)n;T872aOz^k{CncWj+SyN4$NuzbB^5*C7{EuP7-F^J-y5I$p>X-tJUb#|mfW3`)&S*HjT|0a|Xl?~Lxrdt zhFKaL2Dzq7hktDPku{h~O|0r>yVIHG=STWPyV~!^#q1Vjp;slRN9VaAc1(qmH3Nw$ zZR5p<2!dRfk>@{^qOt@XpM1p&QYcB3(xL!jGx0+kZ8gnp(c@x2yX{W$?DT3EDbu4l zP;idWc%Ve5QjRyEE~24|Y&*IB}a&!2gO92J~5%p=;lF6hp94z`0sOCEG}w8`Rq_TC7^P5f{(n`%nT1! zwLTtfd8^IS3$h{bXTzZ~$7Bx;f%=NM4}GVCV6_8SRV9spsfz7rwpi78b77aT-0W<6 z>eWXr7VV@)17+oe>~XB9BMnw3u>0a7ksvkE<*i;5y}+vTnw)v>jfvdKmeB*$BVy5? z6@Out*eRya8s=$6BqkgbM_FWK`s!;Y<)wPEq#*ZEmeEwyi`Hhb@`v4+Og-nBUubG~ z+ZUWmBi^=hB;Q8<-JDZKFQV>4`-N!ee_$^j93Kww@b~yKnj#I;h%!bMz^6f&vEUOK z&~kR^mPr`~J&`QEh>5P2k;(#h^N_i)%?x5oH{%@$g(Du%)kEY?9Wznog69RCTu;@L zgG8_}5Wr;NNhk{fs5Aky(hvV~GbKW#VDHYn;08V@xc6)7LtO+{&lXDe{C-G|5~7%I zt8$(?qF4?FZyceg^}#z^rD_eYPw*xaxT2l-!5!4x7?zzxqToG&`*HkPUaKwHW$>rVkww0CznhE5a*?w1na0grCS~h$Q$^ zg(rwa*0I#!bSH)gP(pN1&X79ys=~_1kE#>3uJp%Z775RCm9v#lQxCs{dI2 z2XQ8B(MG>-;e8YYWlg5T>TJd5vWD+vBaJ#azsg3w_(EP$l2Bq2Qr5qKZ*_}v6dWs; z55Cl50h_a=Tj}BRn{uzJ5mvg0y=ff19}E4rAUBCT8MqIMlRmFY2KEeXxG*``+EM>p zSyUG)P7cp!CbG{*xO{xggJEPq2;`-f%sD6xsKG5^sT|Mvckb+y(&g~vb`h+_!c%k+ z+*SA=EoD=`Y%OBWzQn%>30KMUSK;IN_FMKj$Gsb5L-zShF!M*fgk^FTEmzTfsDr{& zR3k~2Cf0cXUNbr2LKTVCD=r>F=q~Xu&XWswvMoPuw7)hw@Vi;a{V`38|B&(l>EGy0 zZl1=$%}l4^+jkYXC>;*{hnXPNt)9dQNQkj|v^f8;!gxg@zju0mKd2}9l<#=A-usdEKOCR3SP65MkTS3Lt$}AiAWbUa)})O{B_1dp9ebo6D;aNt zc1h#Hp8DR8cC6L==|$$sMjWM7C@|~PyP@eM7R&_3qW=znwOrDx?A$^TEr{*;x!LvG zeN5v1D$ZSI?iv3rkQ@BPsk(^A-%$tFHZHrSWHI~<*T28jq2WY~o^B8i%XsV+PX8e_* z<@AsnOHPOrF&7xH%-5Ro;QenZ@)vnI1i+;L_X4Q~%MEXq`Dbt#`|k5x@!PIKJzhyM zjxfK48`#1uZW$y=$d${E+l*rAhlY7f`vy|=^MbhxM|Oz8p9U&}(Oh}`z=f8#jwgG^ z0fH@M{lVIr8C=`54p#dybu+X3Zr(Q!ZUFtHPl-v+Qdo??Vs-MyAy8*J!xH{DzuJEkB-@knHk>G8p{Atj*PLDpSO6qxfSpr4cUG>vD9rr4HnJ7$`!U7(JqjL ziPZ|ke5XsvP-_1xUVFW&Vp&EPYB<{3{^J2jkO0x84h3srfp07~F3`yB?xMOKxBiCP zWc)IdDM!vQH&*y|+ zBD$mT?Sy$Vx*F_xth4IQAKFOIV~1Wgo)40)wQXsxy5kFdxKXuYxx8V|VYg&4)qs2O z9-Y2AVBE@WDf9aw%D9jvo_ML-Y zsEb4M>Yk%kiw7#;w;bsjd?wDefCicAx!z>&@Wy$5J7lZLdBJ^m?&aX>dA#R?3Z74; z?avE?Ic?_k5uHACzU&B_X68{YR<6`*eQu4v|5%e5JZ;6g-W_+eE&nL;I~fOLc<|tb zK+@uXnoVb4M{*>fDPi1?*D#xl=P14xm!!~CB~$k-(%L=8ai1xD>#>w@0 zezKt%u*6RfEjsjllH!5@Aj`Pu`%1tQYlM;cN4Z}+%bB5(!-Um4J>x8<7ZW1DAWG~x zP7}7{#wA|9^>rhGGVB8wJYsIc;l!fZeR;?>IN9m7GT$NsSkKMj0m&YU z7$|kDZ-bskxnTl=ay2x(+-!un@wv7oldo_U*T5 zy=!>P`O<#oFS$)l5%p5>_0Iu~C7%)RpZ?tNO)69`{Pu_x%KfO%$GEs*msrp=_Os2F z@JYRibm9=4D>9Djyh%YaW@^E7DnK*kl<0OG+dA;WHA{mrf9J`@o0c)Loe*GE2T=U3 zf^5htJdoY%mUr=x2)^ynaT-vXwL2LxAI`OGYIs0EKHPody#E36Wp?dPI~bD;hgJnLD5?oX3EyPxBfb@e>75?{NH!5b>sLVH5Q z)A59T{$t1(AwHa0IFmQ#TMqlz?Wb`mjq5*mS+;&X zzuI*@1@}sc7NjE{z*? zDZKf0z!E6`c9xh_cUnkPZI$-jzsCK8qmQmt_!+$!t|9)a4-cw10#NK&lQdd%)}U!H;t5ti-P+)@^4Kad1*qt7 zP3(MGGeYx4@!R(~2_x)^+v!(uOgUk}Mq1bXMWRInKq$y%E@>%1^lLj<{!2>G-8i21 zWH^&{+v6=DWoQTBlXw#`zU9rH`G$?&=LoWGUhxzKM~3h14g2vQ$~W0FbPhjGxebn6 zqpSYxM^+^guGZWJR&%9s*BgTT(s*4>57}VydVh1E8FWaI5a!soxP|;9`Qf5_fCa#~ zR*MBYhv%zi9o^*9Ja&g)*Kr}bN$sqH=o*Y`lP~+_o;UqCU#csBvukmmIQxS;6k_w| z?kZBTdVA+LsFQn4FIVRQ><*e|k(CX^HFUtswBJe2Bw=i`kdw%-G~F%-w*5pdkc{%+ z2-_uTC75NK&W<{ozP5wC9iv%duTl-0@y`h3jNrn2zgS*69RUq=Q0$O2I|_;$VITNh z7mEDIq2mm1ySRM=!i5*o_Z?cE8}9xLZ#DKh1u7SSi1QIdxWsAVI`iS8pn=?I4!`TbFwgE(94m+~NGBrd zEHuSK*&yY}baOebJHHn(81PqG^YKx1R@cny^^g~EE`SlP002f6`yv3Y;-z=!U~lrx zQT~n8GxXHipL?B;o0XmWR5c5K`z7Kplrf{37t1jpNQxQjg0J{_iRZ`%uU z9itq6goH!A*U^+d9(js1z*95;jxg3K$lOA1Pkg;rjjwj{mPu<~frEyCi|KmazIdRV zsOmG$chQo&ofY>!InY?W{w0y-=+r)a1sLW4i0GqBc))q`T-3028s>wtrKwbsy%tkM zt7th=?{ z)w*?)161hV=*28+fyL4sRV&F+VBn-FI4%Cyhu`08>fo&L?fZiZ=AnWVwiU<_Ym@Nx z%)5KdI!cWB^pUVstwA zR>Pe4n)O-6xjwBJ9W3>hq_y%LKdzyxNBCp!1;;&6Qr5>wFAn5ey$`yGXN&qX4he*> zXFQ+fb}e8?Jg(<@kqo#3%JUFWia3*dn`No1W{Dl;*bmU|Zv7f$uWp6@nb)|11EWOq zM_1#SJwBC>YFe{TxU(4dY(|>g5J(<>p56*@OJ&9eY(C;c%{4H~qwwJ8)ryohb^v7eQwwn{cHNmtF$UUd! zD8bN`6i4cONGdxknk;o#id^Q&Rv-iH-HJL|c-QUL|Ci@u1_6*wHH$C-G0i^}n?WW* zPci9K7+VsZ*+N+fxl9+@CSaU&=+WmP=2A6_>?y<)>M1mhgi944P$=!Hd8{DD!npmV zM2$e{3dOj}DBSnG5PyewFyv@`J;U!Wy$hT#5e!kYK4et`b~c>+nkRk%XN)jph# zQ7p=uSFPx1-Rx6;Atn)GnG{>BC{@M>$x2a2S8`BUP_@qbW%w@`EYrWcHTlN2P@5MB z1d{RD15sB&mrL(9z;`}}Jq@QD1r&#-l$Ou|d7|jhuYSDKEEES2fsk+8w3O8=%n@V> zGD_-?67sgQ>P4$5%k9Bl%Y-bvX~SxAyB?-?rTp+ za<{4q3N~0s^IP4kb3Z5J2;akt*B*a%QvWOB9B@fL~a(|YU$ zrz(akvB|QeZk{Fz@$jWyZPV8A_&eKI$Zh)E^XfptI04TNBi&WkAqdbd{fL5wJc&T#v6H7CUpR5Uw zg0m+}5y}*ouL|#nt14%AY)*w=;-M1;aKw#u{@j!-=0*X@=kuR#5zfK;gq*Rm57G?3 zcEfZa7{j4_FQVfK@;Uav<13(gM#nVx*MnLv`M&T*li9H%79<;- zVDCKy?X#}ECv^;oi9&k!AQ}p93<^;$P-P{%ezz2JYmjpJdg69_e6yT{@mdE12tpWX zX{0T%&{8nOm!H%Xluh~C=!oAfVJ25&WXva8OVH`@?fe_&qn#`GpFsh0dprdy!6ryE zbb~^Y<+#k#Mz0zUL@c~{3!-u1r(WQB%8N?v%Z|KZ7zO^^ff+>eRkrHv0JW9bb8x1| z(*l3VP-^kGh#z~>!(KQMTNZPF1mtb9wHlpo_At2XOW(I(6t(Y3xh`sxF~(LnKE%M& zfj?Q?k3?M*GI7}31jyzI?{8oiXYj-3o!rA5hW&_z3d>*qU6q7Np1Ef_p0H*F{qnsLHr*dtsBZ8l_yq1o4?q@> zZe2mQwlYodR>9oPcy3eDycQQ!V~K2(BYg;3vhpnWyUJRIA11#DFf8^?PQIEU`p`iQ zX;?(8%)_rxsGK0q2fu=(2k<3+c1}snQ1TdQSERv>MS4eig;j;u0u%{A7l7lCcgv#Z8Ad!y^YholB-w?+E-=UMkZfD z?ODbJ0$wPvGI)c!A&VXf6d+Co3=OL4W`X~^HOZ*o5)_6G`|VMiJHvn^h9VdvorCTa z71miWRFODDrT~t;euNr;2$}#O7Xe)ss%fTjvgp()?W#`@r~z53xR`!ypF!J+?+w2U zXw-}Avy0^%_)ESycDuzs3dW9p1;*Ln-t6+m3IQnu>p}_(y$>6?54sUe1=>Djv2Oyg z4oGW^s*w<^?S48Xpjox4Xl1J85jdcit5Akmg<4yg()}baWxWjg)edH1ST-rnLH? z-#O)DN0WxOU%$)s1qbjJg1C+eFG7G z)LSu$643f1ez5_TIn|dBEDD-c4}Mq0&>w^PkmU29|WR>g^zLU zNiP)z?~6uy@ExZAwWs!^f}j-|z`dwpSN;17Nl^0y2*E_^g+Rb}t9^SOCX!Io|GuB5v5RjUk3$&9EYs4b%gnbysD8do;uP3WGTWOcBbUKW z0_ER8U|)u^O2I8+}$n28qtwiP+$+G~ZDg~S2@t3)X zWb0rrGB8%_fO)GdmZShvFG+rXO}jle87XCdgXw#IXdQ%co?%Op?s0(({NLj=>(Cff zLV>1tj~560{9pd~-d*v<4Zb9g3)=e-6Bt84#(-=+msgDRB%k8D2A#&P=G(V*^9>R8 zf0AaIzG9ZUok}I-Dm|UaB|~}|=FjjkknwXeR?TODl$T{jN^h6^%cmvPyb1i?J26h5B^R72`}{T0#s;f6A8H?C`&Qg?b3TSK5j*%cR8)*HB_0Z<4qsD z7?`=kIqW{dXbklkayIU{6Q&Q%1lYr2EbD+REw0j5Yo&S*-VB*yZ4xJ=i+N z?>Ip7t_XJ;q8+aU-=et>d<(Ht!Q5Anji7$X_D!1)K{_LGWKu7SciPM2R;G}NfSNiL z3rD)|e0%1>J8EBWl=+?C$aC?xqqZ}e$#Z8(Kw+JfBkYw?g`Ghne zvu9rMWRCQ3xBj5elU=>fb#Vz31$}!Kp5Idvh5BOicyE!J&1cf1fsnW}J13Up17~8* zove8_f1+#Z-JX%$cll(i)MM_OW@aI$D#;jz^?Ob)uqd~j0W`wuGK{QY*o#)xA~}+q z0vr)rhkjvaX;Lif706=-sjTh{Z#7I~n@n)z&&|ftX?}m$I`|wXkA$4R<~gHlZnn7= z*4lL9Lvp>?W>+`l<;xQdLpT~s9xH-I#2%Q};)E22M8QEdS1!4!q&nM~V>{5&CS)@b zWA|YOc)|zs!`#BM%r<+{)g=8}BFy_gb}Idt-{l(Q1$18kZFRBxy(+3RX}W}x1>}8J z`tOO^b)N$-JyXgC#*F?n`sJ-#W}m#bReQsXmu4)czJww587uZeTH_;qxGA+Zyp&X! z>-rr(MI{V5-)B8#wbH5K*hL}#pr%_{$RKULwx4(1Ls?nnGb*?#$b;*bM~YpYGo#6c z!$+xe>O74fX?__j1|Rf1g`Zj8ImOU8m?zEemY#wuzJ;VCV&B7??~Zy-UpG21eHkzT`;MvrV4cc&Hf6_EwWqTahSgRG zI^d$f@nYlg2EJt3nvB)#Y`40~rL(6hy7|bCJ$T~@dPbyMh)R<^`@x>k5EYWRJQ{A_ zF1I#Ep8tt-2>9sF#uFbo&e2}9$m}&6bU_6B!VyJ8gWe588?9@>XF;3`zgfR+t8Dl< zC7#^T1?Tq~)M>qL1zzgUG(>AYPwyoy8%=%qOze6U!*<{ov^EclT;Mx5pQ;v_O5Kcie z66%*tdQ6F^s;Z;7a718)Ln-ruGjKc#PuctC1+gtLS{Bczl_TL>Dc5%#YFuli+fw}t z)mcKxUhkD1S_8Y|3czexA7<6eCj_gxqtp5^U$JobD`MhhPrpB|BOM2!h~yuu`kCgo z>~n3=&=Il0O2vTvz{=LTjon3e#D?SXqE)=|@oHdS;EM4?Tsp~kQ#aw76k501^hjs1 z!RM(ph;)bPOIt>~lc@B9gY>Yg*nqGXIFj#0o?ZX!i^o@ErD6SijcJ~HS&jdsKL%#o zg_mlXHTWC3;g5x|*8Kzl$oQb9j_xXj3{vyEh09YSGxMyD$5#j5XLu?_RDL=0+P8Nt zff6B&i)pm&TFV*jx>|WGfT)Pkcx;i#}lhsrmbb}H%Z-N!I|6McL!oKeA$HoVMR6&hHz&W&z zN0XT6Z@_Af0lcoeG`(RZQuf=^ZOQtWCn1-~mq<>_n&stVt`L7RZ(p_+z5K2RcuA80 zmiPF))YWy;V=wp>icu$E&KDlaC%t9*mSbmU65tv?D4y5RSI|557<+gwHdG9L-6Ezn z0jTn_Xu$8lRsE^PX>^v|N|*l9qOA|7>3QpIgWla*nf4!4T^mgvyk6_}1 zO^!34l>6_a|D%}e{AcU^`;?+ab(uA)p;fKDDmJZ9yGE_3QIt@tsYZlaRV5T{)u#5| zyRk=Ai4|hhj@W9(3is*vPxw8#pPUDIy^`x(pYa}_b8__rc$HZcW$Ebfd-w1Yr+&zf zZ8&vgJ@PtW`;-QKWbiHKw?JH@VrOUTs51z*(ESR_hT&53B?KWEnczBQDb7(c?qIyZ9n)1m+)<|-#^!pk`3HjRF+YOuL~I8#c*gP zVzpW3efe}7=NX(Bc~lVoEOIBiDO(8zN%;B$&}}=~r$(mOv^1;kzn9d_50_mFp12$l zCEexY`})Cs%4vus0QZ&@#DXa`l&ulRx_)G)e>;#^ih|RyX-9MxCu|3gF<0z=QT#`h z!q1b`&IWH=Lz{Yz&WC9k%=RnM)S(x}oW>b}7C+jJgCW;t{nkRclksuAQ?yRvp8YSU z{cR!9Cn?q2_NnGOcD3=Q=T0xI*i~&cG+ywFBR0)|6{~73fa;H{e<4xp2d6)gLCw3` zZIqcI<{C@q><22B%O~l=8SM!6u;ntdjq1sECl2v@gGyJ=Y8H%dm^R`j)a`>1C!yQ` z$N>UD7wV8X`T1Qgb4mt3iK}v}FTJU4@#`r1{>)usCN>%I_bY@aS+?4ya-z;{Mapqn zF@_ta>;*GrRddNwvkL0j?eYSMo#?H^yrt0jE#cnbO)|bhC<6FgV(ws&!f(HO_vlQx zxy$&^Vv#NSr>5N!^m_7I;XRm5e?@D@9SBURwbRhs_dE@GbimuB5#*Ap!=)IK%UY5J z6zR7|C!qczNyx+8eQmvHC#+@Iz24GQ8BY|lFqA5(@@RWCEJZ(XoB6MOr|fw~JsBvs zW2n8ZUUc`&a_?-Gpp)>>-H~M(&(n@ch|y=GexQcAuwb+}ent-V6ad<&jT(KKw6AhO zfTpl~w#6qn@}YS!dINSK?!EoU$@d?p7acD3`W=qi(y)^c2bFoelvX;Y@NJf&>$8*1 z=PtEQ)isHe_-47J+%Z(d2fP82Z$9%Mp%5hiJpFAh^XJINcZu%TZXvrQ=y4OB+J*cp z@h)u}nfT+KFrb~pXqaaKPJdt;*Y)g%>9BD?>i1*`;*JuHqBMc2UnNmOsnrcdI(^A= ze!Rgt|J3eV!!4+H&TS+3_2MNNrz`LxSsA&Hu|MdYZlSdzJ=}A)N8vZ;T$rc#0qUuR z+KKs$)e&L{hnqfZZh_pW&p$5&&WfLh8Ie=Q@)U{UcLG2 zX7kK1;2)+gIpqa)QIxCqX+V6 zpV=oYgq!edD+n`As1*4!Qf5xW!u*;4Q;^etiF#U$X+t zwd5ClfmD)I_ixRWXSc`;wdY!Z>iFFx8{8rWLe7BIpxKy&>dIk_ow~QIO7mxVk^p#} zqWu9;!C$`@t-aiKMsPj1%LNNRLrRqH^-3mKq$AU0MFiFfDd=&Ha{gBc4Nn z*|k3>|4gCBdHc`7`2H4vt;z}&iGZrF-)PFG4q?0nYwzB<_#Z!T0p0IFoQTd7DsiaL zW)ip~9-CNk?0=E%8IZ;W$J|)3bOKP%0NO4doCEL?q=sH}Vv~n?Z4|Tqqm~_vya<>K zNG~&C(}It%lKeVxyXrhgZo^Lki6dKe_wlHNh5x84Whm(Y~_ z#;nV&GaXaezafiDDz_-2e$V0TUVi0@LJL{#?h6XQHazMIEnhs@L;%IF2QXy0mf@q$ zFu5&lI9Dm4hvGS5!X};7uRyul-q@H=ocEto0OIfi76$02(=}Pkyh7K-ZMLROzkmY_ zN0FZ^(^~%qn=E@DnbygiV|25X>L$c|T za&mZSb6^ou7`voo=XeoP9Dpw!^H2Q~#2G@AgCqqWGxupJxKg?F)6ZK#TwI@5Mt{Df zeow*hiPo!7Zz)fUCAxzj@NYP5lP;imUcZ?T>?yWcn!iLH%6T-@F1x$Bqibfdq3XSM zpI5>`6$}L2-f^nqpQTa_j{!D6Xg1sdm>09-5w+WP>VFd5TK%J6&VS?UXAQ|`7&0fX zPyR-Fe65_pU;PcTQ{OoHv29Ot_i;?6a$4dJ4WRHF?qESI9-*m_)$HBdfbwnCGK_rf zJi5=kXFt^kx<@XGZM&WgI14Ryxt|>CKYsks)Z|`gElXTY>`}DRTygqx$i6qVORhhp zJq~0$WcN#!9>6cY{C=&iXFN^cT$Lp=M?jJknDBOGorT4703Iiu^ zCH;oPyqMXZ#y(ShYQa^xnfk%ghQgL@eBR^UA|h+v;>!qQC)5kMduH)a9g1C54H2!pGDdOZS!-?PWg)`W&|@1Y6V^JZqp6nEEsp4e;T&`1rId9VCZv2t6&2^4uw z)(VZDmaS}Ru#I2*SjYLIEXG6fq_8q`wf{@0u!6~(-NW(xX~3&M_sA(~eldU(6o9p7 zlzdO#SqcZFR;-vwscy1!W7RfA|V0mtQ>O2mOQExl#no0b%%yExh@zo;R z{1d_6(nT{V)M3f zaoAgw>SC<+zhv-qb}>amCvZ5q()u|+m%@7ChjPNx@_f=?@r!~$qsXOA-wuJfRempf zzZ3KXm;%Tp8I2lRj?S`N< zP~vJ|_$)z^U%*0kF^3(hV@EBn4hsLkz3g|8UDpIl0GP|=*G-^!GXZVF;#449P2~I$ z+#ZPoB9`{%%^MWJS(Pjg@5tTZ|3sf5JT*4v^VH1sZ{O(U$*ib@sF1HYqnhr#?$FnM zZ^zPYW19YX91Ys*oZRcldsbFAEr6*5z73|5qge0%7(C;Eo}Yv9eNqsJ>Mq>&=CJchBuu>aF%p3|3emXe zqjC9?bO;mYvRlBlClsw$RT(4c*0%?3=XYp(a=?%tuqSAY8^75DZk57ds&uGtTrEW>`hwq`Na zcJiM@XzN*OTT&WiS^wv#IJ0Q&0$<)@q+{k{7klD9z`R*8PJ@_Z!Hv3vuYcE`aeT$R zGlUqAR6P@(R`P`!LthpLJ9^TGGrk^uqn;_M^9Ycx*Ke#U!M_Kj z&p52Cxw%(+OlK)#%R$Ta6&6KOxFkw36IuJUDquX6**Wzdn3JiSWcE2$o9k5fO^-(P z_aCrEG+}VxX~hnYs+LiEth~=r)wR}RS-V5HH}G-#Vs0oC8m@b!A~5iQ%zC}P?-4M< zQL785IWO600{-R=90BJzMR8)_4VT|VU4}KLHM?I_u}sR= zDkUk=+7vh!0B7=iQE5CLU)nLE;{%N8|8gl|&-8`f8|GN;`?QfY2Ef zx}-$sx=1!-G#>!pTJ z<02#HH-y|8Nma7-;`TZ5_#x|{P+z+V=d>Z^^cRrsR=$+(7D(6E=BtH>Dz0wEsJkT& zXV3fF=#&n*Kn%7UW=MNk>r zOOv#5Iw`8Vp1gjs`AYKKFDdj2v3-Vw`OXsw^ z%#{83A0gD;K)?MH>n|m)EK*hsCRqPwoJ&1gC4b^Mq~X58PN)8cI{H^F!KD!@#4UQ@ z$;EYKO#dTCY4|bePBX5B(#De6d524^`U%;+^tcG5tN(HlCj(E`qgul?`Q*>6fJ*m- z(!e9WqmAqOLES`Nqh7gVuuq}Gv%zzvAvc$RN?Z-s$vzByy6T)_K^&v5wuYo`m=Homc!CxqS!8x>{fyDS8+Tt2bYFDJcjScG8&t54jkbc-vppsC$eI&v%QgxvfMW)Mw z3Yo1)4>eDL9==8_Mi_1cJSRVoB2O$`ok}xUOU{hBgH@BS3`6M-JuI0tZh?SGQ+M8; zUq!cMe0ipy@BOj?!qlkdhLu+2UcCUk@mGmEAv0xJQ2BEOe~}Os|FnujeBSbm$?}mC zNez5mHvz#e#N-?hInWVknrM43aVkv6mGf+_z`3jFpd-HMmVp9K+!kF~pGaufm+cGW_&if-!=K1AAtUZMtA zQPjW`R(0)RvMFVOXw)qGpXeWI7r*{NEt;SUw1(9^7)Xbf_w{|OAKmKP%7AC*o~~k# z_64ffQ!uWR(w{U68PYL4Y${P(>Szsry@5J9VB*skG!OQ;f^6=4Ix7(0hG8Q44;vP= z&<*A>g%qmXj6lxJ=J3?iG9%B|Evoy1$(ls!cr0>);AL3WOCTHJPtL5&p)M7u{A}d& zyZ96GslwT1!G zZpr&<1r%wgTjgvkL;Bp(8qD(6`ShbHY7cgkah!c_CMRNZ*P3aguqb@AgSzqGLh7X< zRJ!$h@FvOGPk(l22F%a@>ErnYSfGobO_;-K_33LZ#nV_sNLwPxn6S6dk^jpn$> zCMt#W3)91UN8kgy;H!dy_q~@Q>eG5LMF#}w*=H_|q`bDHC?zGBaj)`Z`+=f+ zkMo*SVkACkBfbEBBhNnRbqvBLo7BU`wbn?rfsBP>BR*LV&Ahql?6O8^BR&l_4(hHS zNlME@i8r3^{XY526Xk5ET+0XCAM7`HKl^!3ki`f35(^%}ZnRLKs`LZa6SmZ@)=Ics{|MRt<@r-aZg64r}@L{zMVGWF53Dh_0g4 zS?fNK^~{jxr#jID-hNKZ7*x9*LOrxLwYJ)umhVq0`jcQxxg$8C`?j1srKc%+QO8|6 z(5H;VBtkm;VR*4jV(J7^n)Om-3j4NPSlT`B2Q>WT0UkH-3@o;$)l`Goz{tZ3WnJs8w9M0;O&&-u6F@JTfakT;G) zyL6K}B2zxKRxC2(cnGghO>f!G?=1@o=}`@CsvP= z6D`8l3eAgiOM6JfJO`Mue@U0pg<47yr2<=A4sV{+dgpXv%c7{8!pGT z$?#{x@F$G$C5EAPVBK`mm+6F@l6q9tCIIclc}0;oxALIfsDg9TlNyc7Roz{Zw8)h9dL3FIa5M82H|eqL0QqaW2)v}kRJ2hT{2B=2|_PxZXjtO0YH50vC$ zBlK5t7K^9lb)dZa?%eRAlRNpB(}@4Lag)Xc;=6!68_#>r;PkJ$#?nm^vz9{KtY{Q8 z8s(U)x~lez{*>i}TvSqg-2&mzI*mQc2bXvjbh@O0MXhLcHjG!evZBuAs?j8?{)DE} z#{|#YVp_P>)NXQSwr-b3Y`k$U$Qr4K@y!p-j)PAt3mWs+49Bj;V&1) - rv=$? +# # execute the test +# out=$($function 2>&1) +# rv=$? - if [ $rv -ne 0 ]; then - # if this functionality is not implemented return right away. - if echo "$out" | grep -q "NotImplemented"; then - ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 - return 0 - fi - fi +# if [ $rv -ne 0 ]; then +# # if this functionality is not implemented return right away. +# if echo "$out" | grep -q "NotImplemented"; then +# ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 +# return 0 +# fi +# fi - # if make bucket succeeds set object lock configuration - if [ $rv -eq 0 ]; then - function="${AWS} s3api put-object-lock-configuration --bucket ${bucket_name} --object-lock-configuration ObjectLockEnabled=Enabled" - out=$($function 2>&1) - rv=$? - if [ $rv -ne 0 ]; then - # if this functionality is not implemented return right away. - if echo "$out" | grep -q "NotImplemented"; then - ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 - return 0 - fi - fi - else - # if make bucket fails, $bucket_name has the error output - out="${bucket_name}" - fi +# # if make bucket succeeds set object lock configuration +# if [ $rv -eq 0 ]; then +# function="${AWS} s3api put-object-lock-configuration --bucket ${bucket_name} --object-lock-configuration ObjectLockEnabled=Enabled" +# out=$($function 2>&1) +# rv=$? +# if [ $rv -ne 0 ]; then +# # if this functionality is not implemented return right away. +# if echo "$out" | grep -q "NotImplemented"; then +# ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 +# return 0 +# fi +# fi +# else +# # if make bucket fails, $bucket_name has the error output +# out="${bucket_name}" +# fi - # if setting object lock configuration succeeds, upload a file first time - if [ $rv -eq 0 ]; then - function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB" - out=$($function 2>&1) - rv=$? - else - # if make bucket fails, $bucket_name has the error output - out="${bucket_name}" - fi +# # if setting object lock configuration succeeds, upload a file first time +# if [ $rv -eq 0 ]; then +# function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB" +# out=$($function 2>&1) +# rv=$? +# else +# # if make bucket fails, $bucket_name has the error output +# out="${bucket_name}" +# fi - # second time upload of same file should fail due to WORM setting - if [ $rv -eq 0 ]; then - function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB" - out=$($function 2>&1) - rv=$? - else - out="First time object upload failed" - fi +# # second time upload will succeed and there shall be now two versions of the object +# if [ $rv -eq 0 ]; then +# function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB" +# out=$($function 2>&1) +# rv=$? +# else +# out="First time object upload failed" +# fi - if [ $rv -eq 0 ]; then - log_success "$(get_duration "$start_time")" "${test_function}" - else - # cleanup is not possible due to one day validity of object lock configurataion - log_failure "$(get_duration "$start_time")" "${function}" "${out}" - fi +# if [ $rv -eq 0 ]; then +# log_success "$(get_duration "$start_time")" "${test_function}" +# else +# # cleanup is not possible due to one day validity of object lock configurataion +# log_failure "$(get_duration "$start_time")" "${function}" "${out}" +# fi - return $rv -} +# return $rv +# } -# Tests creating and deleting an object with legal hold. -function test_legal_hold() { - # log start time - start_time=$(get_time) +# # Tests creating and deleting an object with legal hold. +# function test_legal_hold() { +# # log start time +# start_time=$(get_time) - # Make bucket - bucket_name="awscli-mint-test-bucket-$RANDOM" - function="${AWS} s3api create-bucket --bucket ${bucket_name} --object-lock-enabled-for-bucket" +# # Make bucket +# bucket_name="awscli-mint-test-bucket-$RANDOM" +# function="${AWS} s3api create-bucket --bucket ${bucket_name} --object-lock-enabled-for-bucket" - # execute the test - out=$($function 2>&1) - rv=$? +# # execute the test +# out=$($function 2>&1) +# rv=$? - if [ $rv -ne 0 ]; then - # if this functionality is not implemented return right away. - if echo "$out" | grep -q "NotImplemented"; then - ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 - return 0 - fi - fi +# if [ $rv -ne 0 ]; then +# # if this functionality is not implemented return right away. +# if echo "$out" | grep -q "NotImplemented"; then +# ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 +# return 0 +# fi +# fi - # if make bucket succeeds upload a file - if [ $rv -eq 0 ]; then - function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB --object-lock-legal-hold-status ON" - out=$($function 2>&1) - errcnt=$(echo "$out" | sed -n '/Bucket is missing ObjectLockConfiguration/p' | wc -l) - # skip test for gateways - if [ "$errcnt" -eq 1 ]; then - return 0 - fi - rv=$? - else - # if make bucket fails, $bucket_name has the error output - out="${bucket_name}" - fi +# # if make bucket succeeds upload a file +# if [ $rv -eq 0 ]; then +# function="${AWS} s3api put-object --body ${MINT_DATA_DIR}/datafile-1-kB --bucket ${bucket_name} --key datafile-1-kB --object-lock-legal-hold-status ON" +# out=$($function 2>&1) +# errcnt=$(echo "$out" | sed -n '/Bucket is missing ObjectLockConfiguration/p' | wc -l) +# # skip test for gateways +# if [ "$errcnt" -eq 1 ]; then +# return 0 +# fi +# rv=$? +# else +# # if make bucket fails, $bucket_name has the error output +# out="${bucket_name}" +# fi - # if upload succeeds stat the file - if [ $rv -eq 0 ]; then - function="${AWS} s3api head-object --bucket ${bucket_name} --key datafile-1-kB" - # save the ref to function being tested, so it can be logged - test_function=${function} - out=$($function 2>&1) - lhold=$(echo "$out" | jq -r .ObjectLockLegalHoldStatus) - rv=$? - fi +# # if upload succeeds stat the file +# if [ $rv -eq 0 ]; then +# function="${AWS} s3api head-object --bucket ${bucket_name} --key datafile-1-kB" +# # save the ref to function being tested, so it can be logged +# test_function=${function} +# out=$($function 2>&1) +# lhold=$(echo "$out" | jq -r .ObjectLockLegalHoldStatus) +# rv=$? +# fi - # if head-object succeeds, verify metadata has legal hold status - if [ $rv -eq 0 ]; then - if [ "${lhold}" == "" ]; then - rv=1 - out="Legal hold was not applied" - fi - if [ "${lhold}" == "OFF" ]; then - rv=1 - out="Legal hold was not applied" - fi - fi - if [ $rv -eq 0 ]; then - function="${AWS} s3api put-object-legal-hold --bucket ${bucket_name} --key datafile-1-kB --legal-hold Status=OFF" - out=$($function 2>&1) - rv=$? - else - # if make bucket fails, $bucket_name has the error output - out="${bucket_name}" - fi - # if upload succeeds download the file - if [ $rv -eq 0 ]; then - function="${AWS} s3api get-object-legal-hold --bucket ${bucket_name} --key datafile-1-kB" - # save the ref to function being tested, so it can be logged - test_function=${function} - out=$($function 2>&1) - lhold=$(echo "$out" | jq -r .LegalHold.Status) - rv=$? - fi +# # if head-object succeeds, verify metadata has legal hold status +# if [ $rv -eq 0 ]; then +# if [ "${lhold}" == "" ]; then +# rv=1 +# out="Legal hold was not applied" +# fi +# if [ "${lhold}" == "OFF" ]; then +# rv=1 +# out="Legal hold was not applied" +# fi +# fi +# if [ $rv -eq 0 ]; then +# function="${AWS} s3api put-object-legal-hold --bucket ${bucket_name} --key datafile-1-kB --legal-hold Status=OFF" +# out=$($function 2>&1) +# rv=$? +# else +# # if make bucket fails, $bucket_name has the error output +# out="${bucket_name}" +# fi +# # if upload succeeds download the file +# if [ $rv -eq 0 ]; then +# function="${AWS} s3api get-object-legal-hold --bucket ${bucket_name} --key datafile-1-kB" +# # save the ref to function being tested, so it can be logged +# test_function=${function} +# out=$($function 2>&1) +# lhold=$(echo "$out" | jq -r .LegalHold.Status) +# rv=$? +# fi - # if head-object succeeds, verify metadata has legal hold status - if [ $rv -eq 0 ]; then - if [ "${lhold}" == "" ]; then - rv=1 - out="Legal hold was not applied" - fi - if [ "${lhold}" == "ON" ]; then - rv=1 - out="Legal hold status not turned off" - fi - fi - # Attempt a delete on prefix shouldn't delete the directory since we have an object inside it. - if [ $rv -eq 0 ]; then - function="${AWS} s3api delete-object --bucket ${bucket_name} --key datafile-1-kB" - # save the ref to function being tested, so it can be logged - test_function=${function} - out=$($function 2>&1) - rv=$? - fi - if [ $rv -eq 0 ]; then - log_success "$(get_duration "$start_time")" "${test_function}" - else - # clean up and log error - ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 - log_failure "$(get_duration "$start_time")" "${function}" "${out}" - fi +# # if head-object succeeds, verify metadata has legal hold status +# if [ $rv -eq 0 ]; then +# if [ "${lhold}" == "" ]; then +# rv=1 +# out="Legal hold was not applied" +# fi +# if [ "${lhold}" == "ON" ]; then +# rv=1 +# out="Legal hold status not turned off" +# fi +# fi +# # Attempt a delete on prefix shouldn't delete the directory since we have an object inside it. +# if [ $rv -eq 0 ]; then +# function="${AWS} s3api delete-object --bucket ${bucket_name} --key datafile-1-kB" +# # save the ref to function being tested, so it can be logged +# test_function=${function} +# out=$($function 2>&1) +# rv=$? +# fi +# if [ $rv -eq 0 ]; then +# log_success "$(get_duration "$start_time")" "${test_function}" +# else +# # clean up and log error +# ${AWS} s3 rb s3://"${bucket_name}" --force > /dev/null 2>&1 +# log_failure "$(get_duration "$start_time")" "${function}" "${out}" +# fi - return $rv -} +# return $rv +# } # main handler for all the tests. main() { @@ -1716,9 +1716,9 @@ main() { # Error tests test_list_objects_error && \ test_put_object_error && \ - test_serverside_encryption_error && \ - test_worm_bucket && \ - test_legal_hold + test_serverside_encryption_error + # test_worm_bucket && \ + # test_legal_hold return $? } diff --git a/pkg/bucket/lifecycle/expiration.go b/pkg/bucket/lifecycle/expiration.go index 2748d7efd..f220e3fa8 100644 --- a/pkg/bucket/lifecycle/expiration.go +++ b/pkg/bucket/lifecycle/expiration.go @@ -22,10 +22,11 @@ import ( ) var ( - errLifecycleInvalidDate = Errorf("Date must be provided in ISO 8601 format") - errLifecycleInvalidDays = Errorf("Days must be positive integer when used with Expiration") - errLifecycleInvalidExpiration = Errorf("At least one of Days or Date should be present inside Expiration") - errLifecycleDateNotMidnight = Errorf("'Date' must be at midnight GMT") + errLifecycleInvalidDate = Errorf("Date must be provided in ISO 8601 format") + errLifecycleInvalidDays = Errorf("Days must be positive integer when used with Expiration") + errLifecycleInvalidExpiration = Errorf("At least one of Days or Date should be present inside Expiration") + errLifecycleInvalidDeleteMarker = Errorf("Delete marker cannot be specified with Days or Date in a Lifecycle Expiration Policy") + errLifecycleDateNotMidnight = Errorf("'Date' must be at midnight GMT") ) // ExpirationDays is a type alias to unmarshal Days in Expiration @@ -96,17 +97,49 @@ func (eDate *ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartEl return e.EncodeElement(eDate.Format(time.RFC3339), startElement) } +// ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element. +type ExpireDeleteMarker bool + // Expiration - expiration actions for a rule in lifecycle configuration. type Expiration struct { - XMLName xml.Name `xml:"Expiration"` - Days ExpirationDays `xml:"Days,omitempty"` - Date ExpirationDate `xml:"Date,omitempty"` + XMLName xml.Name `xml:"Expiration"` + Days ExpirationDays `xml:"Days,omitempty"` + Date ExpirationDate `xml:"Date,omitempty"` + DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty"` +} + +// UnmarshalXML parses delete marker and validates if it is set. +func (b *ExpireDeleteMarker) UnmarshalXML(d *xml.Decoder, startElement xml.StartElement) error { + if !*b { + return nil + } + var deleteMarker bool + err := d.DecodeElement(&deleteMarker, &startElement) + if err != nil { + return err + } + *b = ExpireDeleteMarker(deleteMarker) + return nil +} + +// MarshalXML encodes delete marker boolean into an XML form. +func (b *ExpireDeleteMarker) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { + if !*b { + return nil + } + return e.EncodeElement(*b, startElement) } // Validate - validates the "Expiration" element func (e Expiration) Validate() error { + // DeleteMarker cannot be specified if date or dates are specified. + if (!e.IsDateNull() || !e.IsDateNull()) && bool(e.DeleteMarker) { + return errLifecycleInvalidDeleteMarker + } + // Neither expiration days or date is specified - if e.IsDaysNull() && e.IsDateNull() { + // if delete marker is false one of them should be specified + if !bool(e.DeleteMarker) && e.IsDaysNull() && e.IsDateNull() { return errLifecycleInvalidExpiration } @@ -114,6 +147,7 @@ func (e Expiration) Validate() error { if !e.IsDaysNull() && !e.IsDateNull() { return errLifecycleInvalidExpiration } + return nil } diff --git a/pkg/bucket/lifecycle/lifecycle.go b/pkg/bucket/lifecycle/lifecycle.go index fd401d3c1..5e62c2752 100644 --- a/pkg/bucket/lifecycle/lifecycle.go +++ b/pkg/bucket/lifecycle/lifecycle.go @@ -132,8 +132,8 @@ func (lc Lifecycle) Validate() error { // FilterActionableRules returns the rules actions that need to be executed // after evaluating prefix/tag filtering -func (lc Lifecycle) FilterActionableRules(objName, objTags string) []Rule { - if objName == "" { +func (lc Lifecycle) FilterActionableRules(obj ObjectOpts) []Rule { + if obj.Name == "" { return nil } var rules []Rule @@ -141,30 +141,86 @@ func (lc Lifecycle) FilterActionableRules(objName, objTags string) []Rule { if rule.Status == Disabled { continue } - if !strings.HasPrefix(objName, rule.Prefix()) { + if !strings.HasPrefix(obj.Name, rule.Prefix()) { continue } - tags := strings.Split(objTags, "&") - if rule.Filter.TestTags(tags) { + // Indicates whether MinIO will remove a delete marker with no + // noncurrent versions. If set to true, the delete marker will + // be expired; if set to false the policy takes no action. This + // cannot be specified with Days or Date in a Lifecycle + // Expiration Policy. + if rule.Expiration.DeleteMarker { + rules = append(rules, rule) + continue + } + // The NoncurrentVersionExpiration action requests MinIO to expire + // noncurrent versions of objects 100 days after the objects become + // noncurrent. + if !rule.NoncurrentVersionExpiration.IsDaysNull() { + rules = append(rules, rule) + continue + } + if rule.Filter.TestTags(strings.Split(obj.UserTags, "&")) { rules = append(rules, rule) } } return rules } +// ObjectOpts provides information to deduce the lifecycle actions +// which can be triggered on the resultant object. +type ObjectOpts struct { + Name string + UserTags string + ModTime time.Time + VersionID string + IsLatest bool + DeleteMarker bool +} + // ComputeAction returns the action to perform by evaluating all lifecycle rules // against the object name and its modification time. -func (lc Lifecycle) ComputeAction(objName, objTags string, modTime time.Time) (action Action) { - action = NoneAction - if modTime.IsZero() { - return +func (lc Lifecycle) ComputeAction(obj ObjectOpts) Action { + var action = NoneAction + if obj.ModTime.IsZero() { + return action } - _, expiryTime := lc.PredictExpiryTime(objName, modTime, objTags) - if !expiryTime.IsZero() && time.Now().After(expiryTime) { - return DeleteAction + for _, rule := range lc.FilterActionableRules(obj) { + if obj.DeleteMarker && obj.IsLatest && bool(rule.Expiration.DeleteMarker) { + // Indicates whether MinIO will remove a delete marker with no noncurrent versions. + // Only latest marker is removed. If set to true, the delete marker will be expired; + // if set to false the policy takes no action. This cannot be specified with Days or + // Date in a Lifecycle Expiration Policy. + return DeleteAction + } + + if !rule.NoncurrentVersionExpiration.IsDaysNull() { + if obj.VersionID != "" && !obj.IsLatest { + // Non current versions should be deleted. + if time.Now().After(expectedExpiryTime(obj.ModTime, rule.NoncurrentVersionExpiration.NoncurrentDays)) { + return DeleteAction + } + return NoneAction + } + return NoneAction + } + + // All other expiration only applies to latest versions. + if obj.IsLatest { + switch { + case !rule.Expiration.IsDateNull(): + if time.Now().UTC().After(rule.Expiration.Date.Time) { + action = DeleteAction + } + case !rule.Expiration.IsDaysNull(): + if time.Now().UTC().After(expectedExpiryTime(obj.ModTime, rule.Expiration.Days)) { + action = DeleteAction + } + } + } } - return + return action } // expectedExpiryTime calculates the expiry date/time based on a object modtime. @@ -179,13 +235,22 @@ func expectedExpiryTime(modTime time.Time, days ExpirationDays) time.Time { // PredictExpiryTime returns the expiry date/time of a given object // after evaluting the current lifecycle document. -func (lc Lifecycle) PredictExpiryTime(objName string, modTime time.Time, objTags string) (string, time.Time) { +func (lc Lifecycle) PredictExpiryTime(obj ObjectOpts) (string, time.Time) { + if obj.DeleteMarker { + // We don't need to send any x-amz-expiration for delete marker. + return "", time.Time{} + } + var finalExpiryDate time.Time var finalExpiryRuleID string // Iterate over all actionable rules and find the earliest // expiration date and its associated rule ID. - for _, rule := range lc.FilterActionableRules(objName, objTags) { + for _, rule := range lc.FilterActionableRules(obj) { + if !rule.NoncurrentVersionExpiration.IsDaysNull() && !obj.IsLatest && obj.VersionID != "" { + return rule.ID, expectedExpiryTime(time.Now(), ExpirationDays(rule.NoncurrentVersionExpiration.NoncurrentDays)) + } + if !rule.Expiration.IsDateNull() { if finalExpiryDate.IsZero() || finalExpiryDate.After(rule.Expiration.Date.Time) { finalExpiryRuleID = rule.ID @@ -193,7 +258,7 @@ func (lc Lifecycle) PredictExpiryTime(objName string, modTime time.Time, objTags } } if !rule.Expiration.IsDaysNull() { - expectedExpiry := expectedExpiryTime(modTime, rule.Expiration.Days) + expectedExpiry := expectedExpiryTime(obj.ModTime, rule.Expiration.Days) if finalExpiryDate.IsZero() || finalExpiryDate.After(expectedExpiry) { finalExpiryRuleID = rule.ID finalExpiryDate = expectedExpiry diff --git a/pkg/bucket/lifecycle/lifecycle_test.go b/pkg/bucket/lifecycle/lifecycle_test.go index 35b243841..2c27b897c 100644 --- a/pkg/bucket/lifecycle/lifecycle_test.go +++ b/pkg/bucket/lifecycle/lifecycle_test.go @@ -263,21 +263,21 @@ func TestComputeActions(t *testing.T) { }, // Too early to remove (test Date) { - inputConfig: `foodir/Enabled` + time.Now().Truncate(24*time.Hour).UTC().Add(24*time.Hour).Format(time.RFC3339) + ``, + inputConfig: `foodir/Enabled` + time.Now().UTC().Truncate(24*time.Hour).Add(24*time.Hour).Format(time.RFC3339) + ``, objectName: "foodir/fooobject", objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago expectedAction: NoneAction, }, // Should remove (test Days) { - inputConfig: `foodir/Enabled` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + ``, + inputConfig: `foodir/Enabled` + time.Now().UTC().Truncate(24*time.Hour).Add(-24*time.Hour).Format(time.RFC3339) + ``, objectName: "foodir/fooobject", objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago expectedAction: DeleteAction, }, // Should remove (Tags match) { - inputConfig: `foodir/tag1value1Enabled` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + ``, + inputConfig: `foodir/tag1value1Enabled` + time.Now().UTC().Truncate(24*time.Hour).Add(-24*time.Hour).Format(time.RFC3339) + ``, objectName: "foodir/fooobject", objectTags: "tag1=value1&tag2=value2", objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago @@ -310,7 +310,7 @@ func TestComputeActions(t *testing.T) { // Should not remove (Tags don't match) { - inputConfig: `foodir/tagvalue1Enabled` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + ``, + inputConfig: `foodir/tagvalue1Enabled` + time.Now().UTC().Truncate(24*time.Hour).Add(-24*time.Hour).Format(time.RFC3339) + ``, objectName: "foodir/fooobject", objectTags: "tag1=value1", objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago @@ -333,14 +333,20 @@ func TestComputeActions(t *testing.T) { }, } - for i, tc := range testCases { - t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) { + for _, tc := range testCases { + tc := tc + t.Run("", func(t *testing.T) { lc, err := ParseLifecycleConfig(bytes.NewReader([]byte(tc.inputConfig))) if err != nil { - t.Fatalf("%d: Got unexpected error: %v", i+1, err) + t.Fatalf("Got unexpected error: %v", err) } - if resultAction := lc.ComputeAction(tc.objectName, tc.objectTags, tc.objectModTime); resultAction != tc.expectedAction { - t.Fatalf("%d: Expected action: `%v`, got: `%v`", i+1, tc.expectedAction, resultAction) + if resultAction := lc.ComputeAction(ObjectOpts{ + Name: tc.objectName, + UserTags: tc.objectTags, + ModTime: tc.objectModTime, + IsLatest: true, + }); resultAction != tc.expectedAction { + t.Fatalf("Expected action: `%v`, got: `%v`", tc.expectedAction, resultAction) } }) diff --git a/pkg/bucket/lifecycle/noncurrentversion.go b/pkg/bucket/lifecycle/noncurrentversion.go index d879c12c4..a1bfa28df 100644 --- a/pkg/bucket/lifecycle/noncurrentversion.go +++ b/pkg/bucket/lifecycle/noncurrentversion.go @@ -22,26 +22,31 @@ import ( // NoncurrentVersionExpiration - an action for lifecycle configuration rule. type NoncurrentVersionExpiration struct { - XMLName xml.Name `xml:"NoncurrentVersionExpiration"` - NoncurrentDays int `xml:"NoncurrentDays,omitempty"` + XMLName xml.Name `xml:"NoncurrentVersionExpiration"` + NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty"` } // NoncurrentVersionTransition - an action for lifecycle configuration rule. type NoncurrentVersionTransition struct { - NoncurrentDays int `xml:"NoncurrentDays"` - StorageClass string `xml:"StorageClass"` + NoncurrentDays ExpirationDays `xml:"NoncurrentDays"` + StorageClass string `xml:"StorageClass"` } var ( - errNoncurrentVersionExpirationUnsupported = Errorf("Specifying is not supported") errNoncurrentVersionTransitionUnsupported = Errorf("Specifying is not supported") ) -// UnmarshalXML is extended to indicate lack of support for -// NoncurrentVersionExpiration xml tag in object lifecycle -// configuration -func (n NoncurrentVersionExpiration) UnmarshalXML(d *xml.Decoder, startElement xml.StartElement) error { - return errNoncurrentVersionExpirationUnsupported +// MarshalXML if non-current days not set returns empty tags +func (n *NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if n.NoncurrentDays == ExpirationDays(0) { + return nil + } + return e.EncodeElement(&n, start) +} + +// IsDaysNull returns true if days field is null +func (n NoncurrentVersionExpiration) IsDaysNull() bool { + return n.NoncurrentDays == ExpirationDays(0) } // UnmarshalXML is extended to indicate lack of support for @@ -54,11 +59,8 @@ func (n NoncurrentVersionTransition) UnmarshalXML(d *xml.Decoder, startElement x // MarshalXML is extended to leave out // tags func (n NoncurrentVersionTransition) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - return nil -} - -// MarshalXML is extended to leave out -// tags -func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - return nil + if n.NoncurrentDays == ExpirationDays(0) { + return nil + } + return e.EncodeElement(&n, start) } diff --git a/pkg/bucket/lifecycle/rule_test.go b/pkg/bucket/lifecycle/rule_test.go index 5a2da208d..7534fba17 100644 --- a/pkg/bucket/lifecycle/rule_test.go +++ b/pkg/bucket/lifecycle/rule_test.go @@ -25,8 +25,7 @@ import ( // TestUnsupportedRules checks if Rule xml with unsuported tags return // appropriate errors on parsing func TestUnsupportedRules(t *testing.T) { - // NoncurrentVersionTransition, NoncurrentVersionExpiration - // and Transition tags aren't supported + // NoncurrentVersionTransition, and Transition tags aren't supported unsupportedTestCases := []struct { inputXML string expectedErr error @@ -37,13 +36,6 @@ func TestUnsupportedRules(t *testing.T) { `, expectedErr: errNoncurrentVersionTransitionUnsupported, }, - { // Rule with unsupported NoncurrentVersionExpiration - - inputXML: ` - - `, - expectedErr: errNoncurrentVersionExpirationUnsupported, - }, { // Rule with unsupported Transition action inputXML: ` diff --git a/pkg/bucket/policy/action.go b/pkg/bucket/policy/action.go index 0e0a3a55e..43bebf18a 100644 --- a/pkg/bucket/policy/action.go +++ b/pkg/bucket/policy/action.go @@ -125,23 +125,48 @@ const ( PutBucketEncryptionAction = "s3:PutEncryptionConfiguration" // GetBucketEncryptionAction - GetBucketEncryption REST API action GetBucketEncryptionAction = "s3:GetEncryptionConfiguration" + + // PutBucketVersioningAction - PutBucketVersioning REST API action + PutBucketVersioningAction = "s3:PutBucketVersioning" + // GetBucketVersioningAction - GetBucketVersioning REST API action + GetBucketVersioningAction = "s3:GetBucketVersioning" + + // DeleteObjectVersionAction - DeleteObjectVersion Rest API action. + DeleteObjectVersionAction = "s3:DeleteObjectVersion" + + // DeleteObjectVersionTaggingAction - DeleteObjectVersionTagging Rest API action. + DeleteObjectVersionTaggingAction = "s3:DeleteObjectVersionTagging" + + // GetObjectVersionAction - GetObjectVersionAction Rest API action. + GetObjectVersionAction = "s3:GetObjectVersion" + + // GetObjectVersionTaggingAction - GetObjectVersionTagging Rest API action. + GetObjectVersionTaggingAction = "s3:GetObjectVersionTagging" + + // PutObjectVersionTaggingAction - PutObjectVersionTagging Rest API action. + PutObjectVersionTaggingAction = "s3:PutObjectVersionTagging" ) // List of all supported object actions. var supportedObjectActions = map[Action]struct{}{ - AbortMultipartUploadAction: {}, - DeleteObjectAction: {}, - GetObjectAction: {}, - ListMultipartUploadPartsAction: {}, - PutObjectAction: {}, - BypassGovernanceRetentionAction: {}, - PutObjectRetentionAction: {}, - GetObjectRetentionAction: {}, - PutObjectLegalHoldAction: {}, - GetObjectLegalHoldAction: {}, - GetObjectTaggingAction: {}, - PutObjectTaggingAction: {}, - DeleteObjectTaggingAction: {}, + AbortMultipartUploadAction: {}, + DeleteObjectAction: {}, + GetObjectAction: {}, + ListMultipartUploadPartsAction: {}, + PutObjectAction: {}, + BypassGovernanceRetentionAction: {}, + PutObjectRetentionAction: {}, + GetObjectRetentionAction: {}, + PutObjectLegalHoldAction: {}, + GetObjectLegalHoldAction: {}, + GetObjectTaggingAction: {}, + PutObjectTaggingAction: {}, + DeleteObjectTaggingAction: {}, + GetObjectVersionAction: {}, + GetObjectVersionTaggingAction: {}, + DeleteObjectVersionAction: {}, + DeleteObjectVersionTaggingAction: {}, + PutObjectVersionTaggingAction: {}, } // isObjectAction - returns whether action is object type or not. @@ -181,12 +206,19 @@ var supportedActions = map[Action]struct{}{ GetBucketObjectLockConfigurationAction: {}, PutBucketTaggingAction: {}, GetBucketTaggingAction: {}, + GetObjectVersionAction: {}, + GetObjectVersionTaggingAction: {}, + DeleteObjectVersionAction: {}, + DeleteObjectVersionTaggingAction: {}, + PutObjectVersionTaggingAction: {}, BypassGovernanceRetentionAction: {}, GetObjectTaggingAction: {}, PutObjectTaggingAction: {}, DeleteObjectTaggingAction: {}, PutBucketEncryptionAction: {}, GetBucketEncryptionAction: {}, + PutBucketVersioningAction: {}, + GetBucketVersioningAction: {}, } // IsValid - checks if action is valid or not. @@ -246,7 +278,6 @@ var actionConditionKeyMap = map[Action]condition.KeySet{ append([]condition.Key{ condition.S3XAmzServerSideEncryption, condition.S3XAmzServerSideEncryptionCustomerAlgorithm, - condition.S3XAmzStorageClass, }, condition.CommonKeys...)...), HeadBucketAction: condition.NewKeySet(condition.CommonKeys...), @@ -308,4 +339,22 @@ var actionConditionKeyMap = map[Action]condition.KeySet{ PutObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...), GetObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...), DeleteObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...), + + PutObjectVersionTaggingAction: condition.NewKeySet(condition.CommonKeys...), + GetObjectVersionAction: condition.NewKeySet( + append([]condition.Key{ + condition.S3VersionID, + }, condition.CommonKeys...)...), + GetObjectVersionTaggingAction: condition.NewKeySet( + append([]condition.Key{ + condition.S3VersionID, + }, condition.CommonKeys...)...), + DeleteObjectVersionAction: condition.NewKeySet( + append([]condition.Key{ + condition.S3VersionID, + }, condition.CommonKeys...)...), + DeleteObjectVersionTaggingAction: condition.NewKeySet( + append([]condition.Key{ + condition.S3VersionID, + }, condition.CommonKeys...)...), } diff --git a/pkg/bucket/policy/condition/key.go b/pkg/bucket/policy/condition/key.go index 08f490a9e..b6fc6f531 100644 --- a/pkg/bucket/policy/condition/key.go +++ b/pkg/bucket/policy/condition/key.go @@ -59,6 +59,10 @@ const ( // S3Delimiter - key representing delimiter query parameter of ListBucket API only. S3Delimiter Key = "s3:delimiter" + // S3VersionID - Enables you to limit the permission for the + // s3:PutObjectVersionTagging action to a specific object version. + S3VersionID Key = "s3:versionid" + // S3MaxKeys - key representing max-keys query parameter of ListBucket API only. S3MaxKeys Key = "s3:max-keys" diff --git a/pkg/bucket/versioning/error.go b/pkg/bucket/versioning/error.go new file mode 100644 index 000000000..e1f8dcc13 --- /dev/null +++ b/pkg/bucket/versioning/error.go @@ -0,0 +1,44 @@ +/* + * MinIO Cloud Storage, (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package versioning + +import ( + "fmt" +) + +// Error is the generic type for any error happening during tag +// parsing. +type Error struct { + err error +} + +// Errorf - formats according to a format specifier and returns +// the string as a value that satisfies error of type tagging.Error +func Errorf(format string, a ...interface{}) error { + return Error{err: fmt.Errorf(format, a...)} +} + +// Unwrap the internal error. +func (e Error) Unwrap() error { return e.err } + +// Error 'error' compatible method. +func (e Error) Error() string { + if e.err == nil { + return "versioning: cause " + } + return e.err.Error() +} diff --git a/pkg/bucket/versioning/versioning.go b/pkg/bucket/versioning/versioning.go new file mode 100644 index 000000000..078525b4f --- /dev/null +++ b/pkg/bucket/versioning/versioning.go @@ -0,0 +1,79 @@ +/* + * MinIO Cloud Storage, (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package versioning + +import ( + "encoding/xml" + "io" +) + +// State - enabled/disabled/suspended states +// for multifactor and status of versioning. +type State string + +// Various supported states +const ( + Enabled State = "Enabled" + // Disabled State = "Disabled" only used by MFA Delete not supported yet. + Suspended State = "Suspended" +) + +// Versioning - Configuration for bucket versioning. +type Versioning struct { + XMLNS string `xml:"xmlns,attr,omitempty"` + XMLName xml.Name `xml:"VersioningConfiguration"` + // MFADelete State `xml:"MFADelete,omitempty"` // not supported yet. + Status State `xml:"Status,omitempty"` +} + +// Validate - validates the versioning configuration +func (v Versioning) Validate() error { + // Not supported yet + // switch v.MFADelete { + // case Enabled, Disabled: + // default: + // return Errorf("unsupported MFADelete state %s", v.MFADelete) + // } + switch v.Status { + case Enabled, Suspended: + default: + return Errorf("unsupported Versioning status %s", v.Status) + } + return nil +} + +// Enabled - returns true if versioning is enabled +func (v Versioning) Enabled() bool { + return v.Status == Enabled +} + +// Suspended - returns true if versioning is suspended +func (v Versioning) Suspended() bool { + return v.Status == Suspended +} + +// ParseConfig - parses data in given reader to VersioningConfiguration. +func ParseConfig(reader io.Reader) (*Versioning, error) { + var v Versioning + if err := xml.NewDecoder(reader).Decode(&v); err != nil { + return nil, err + } + if err := v.Validate(); err != nil { + return nil, err + } + return &v, nil +} diff --git a/pkg/event/name.go b/pkg/event/name.go index 03d000944..74ea8802f 100644 --- a/pkg/event/name.go +++ b/pkg/event/name.go @@ -41,6 +41,7 @@ const ( ObjectCreatedPutLegalHold ObjectRemovedAll ObjectRemovedDelete + ObjectRemovedDeleteMarkerCreated ) // Expand - returns expanded values of abbreviated event type. @@ -88,6 +89,8 @@ func (name Name) String() string { return "s3:ObjectRemoved:*" case ObjectRemovedDelete: return "s3:ObjectRemoved:Delete" + case ObjectRemovedDeleteMarkerCreated: + return "s3:ObjectRemoved:DeleteMarkerCreated" } return "" @@ -166,6 +169,8 @@ func ParseName(s string) (Name, error) { return ObjectRemovedAll, nil case "s3:ObjectRemoved:Delete": return ObjectRemovedDelete, nil + case "s3:ObjectRemoved:DeleteMarkerCreated": + return ObjectRemovedDeleteMarkerCreated, nil default: return 0, &ErrInvalidEventName{s} } diff --git a/pkg/iam/policy/action.go b/pkg/iam/policy/action.go index d5022b386..1c911e160 100644 --- a/pkg/iam/policy/action.go +++ b/pkg/iam/policy/action.go @@ -92,6 +92,21 @@ const ( // PutObjectAction - PutObject Rest API action. PutObjectAction = "s3:PutObject" + // DeleteObjectVersionAction - DeleteObjectVersion Rest API action. + DeleteObjectVersionAction = "s3:DeleteObjectVersion" + + // DeleteObjectVersionTaggingAction - DeleteObjectVersionTagging Rest API action. + DeleteObjectVersionTaggingAction = "s3:DeleteObjectVersionTagging" + + // GetObjectVersionAction - GetObjectVersionAction Rest API action. + GetObjectVersionAction = "s3:GetObjectVersion" + + // GetObjectVersionTaggingAction - GetObjectVersionTagging Rest API action. + GetObjectVersionTaggingAction = "s3:GetObjectVersionTagging" + + // PutObjectVersionTaggingAction - PutObjectVersionTagging Rest API action. + PutObjectVersionTaggingAction = "s3:PutObjectVersionTagging" + // BypassGovernanceRetentionAction - bypass governance retention for PutObjectRetention, PutObject and DeleteObject Rest API action. BypassGovernanceRetentionAction = "s3:BypassGovernanceRetention" @@ -134,6 +149,12 @@ const ( // GetBucketEncryptionAction - GetBucketEncryption REST API action GetBucketEncryptionAction = "s3:GetEncryptionConfiguration" + // PutBucketVersioningAction - PutBucketVersioning REST API action + PutBucketVersioningAction = "s3:PutBucketVersioning" + + // GetBucketVersioningAction - GetBucketVersioning REST API action + GetBucketVersioningAction = "s3:GetBucketVersioning" + // AllActions - all API actions AllActions = "s3:*" ) @@ -170,30 +191,42 @@ var supportedActions = map[Action]struct{}{ PutBucketObjectLockConfigurationAction: {}, GetBucketTaggingAction: {}, PutBucketTaggingAction: {}, + GetObjectVersionAction: {}, + GetObjectVersionTaggingAction: {}, + DeleteObjectVersionAction: {}, + DeleteObjectVersionTaggingAction: {}, + PutObjectVersionTaggingAction: {}, GetObjectTaggingAction: {}, PutObjectTaggingAction: {}, DeleteObjectTaggingAction: {}, PutBucketEncryptionAction: {}, GetBucketEncryptionAction: {}, + PutBucketVersioningAction: {}, + GetBucketVersioningAction: {}, AllActions: {}, } // List of all supported object actions. var supportedObjectActions = map[Action]struct{}{ - AllActions: {}, - AbortMultipartUploadAction: {}, - DeleteObjectAction: {}, - GetObjectAction: {}, - ListMultipartUploadPartsAction: {}, - PutObjectAction: {}, - BypassGovernanceRetentionAction: {}, - PutObjectRetentionAction: {}, - GetObjectRetentionAction: {}, - PutObjectLegalHoldAction: {}, - GetObjectLegalHoldAction: {}, - GetObjectTaggingAction: {}, - PutObjectTaggingAction: {}, - DeleteObjectTaggingAction: {}, + AllActions: {}, + AbortMultipartUploadAction: {}, + DeleteObjectAction: {}, + GetObjectAction: {}, + ListMultipartUploadPartsAction: {}, + PutObjectAction: {}, + BypassGovernanceRetentionAction: {}, + PutObjectRetentionAction: {}, + GetObjectRetentionAction: {}, + PutObjectLegalHoldAction: {}, + GetObjectLegalHoldAction: {}, + GetObjectTaggingAction: {}, + PutObjectTaggingAction: {}, + DeleteObjectTaggingAction: {}, + GetObjectVersionAction: {}, + GetObjectVersionTaggingAction: {}, + DeleteObjectVersionAction: {}, + DeleteObjectVersionTaggingAction: {}, + PutObjectVersionTaggingAction: {}, } // isObjectAction - returns whether action is object type or not. @@ -235,7 +268,6 @@ var actionConditionKeyMap = map[Action]condition.KeySet{ append([]condition.Key{ condition.S3XAmzServerSideEncryption, condition.S3XAmzServerSideEncryptionCustomerAlgorithm, - condition.S3XAmzStorageClass, }, condition.CommonKeys...)...), HeadBucketAction: condition.NewKeySet(condition.CommonKeys...), @@ -303,4 +335,22 @@ var actionConditionKeyMap = map[Action]condition.KeySet{ PutObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...), GetObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...), DeleteObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...), + + PutObjectVersionTaggingAction: condition.NewKeySet(condition.CommonKeys...), + GetObjectVersionAction: condition.NewKeySet( + append([]condition.Key{ + condition.S3VersionID, + }, condition.CommonKeys...)...), + GetObjectVersionTaggingAction: condition.NewKeySet( + append([]condition.Key{ + condition.S3VersionID, + }, condition.CommonKeys...)...), + DeleteObjectVersionAction: condition.NewKeySet( + append([]condition.Key{ + condition.S3VersionID, + }, condition.CommonKeys...)...), + DeleteObjectVersionTaggingAction: condition.NewKeySet( + append([]condition.Key{ + condition.S3VersionID, + }, condition.CommonKeys...)...), } diff --git a/pkg/madmin/info-commands.go b/pkg/madmin/info-commands.go index b3a8e3a43..88a897471 100644 --- a/pkg/madmin/info-commands.go +++ b/pkg/madmin/info-commands.go @@ -254,8 +254,8 @@ type FSBackend struct { Type backendType `json:"backendType,omitempty"` } -// XLBackend contains specific erasure storage information -type XLBackend struct { +// ErasureBackend contains specific erasure storage information +type ErasureBackend struct { Type backendType `json:"backendType,omitempty"` OnlineDisks int `json:"onlineDisks,omitempty"` OfflineDisks int `json:"offlineDisks,omitempty"`