change directory objects to never create new versions (#17109)

This commit is contained in:
Harshavardhana 2023-05-02 16:09:33 -07:00 committed by GitHub
parent 0ec722bc54
commit b53376a3a4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 74 additions and 40 deletions

View File

@ -1,5 +0,0 @@
# Config file for markdownlint-cli
MD033:
allowed_elements:
- details
- summary

View File

@ -1,30 +0,0 @@
name: Markdown Linter
on:
pull_request:
branches:
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
lint:
name: Lint all docs
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Lint all docs
run: |
npm install -g markdownlint-cli
markdownlint --fix '**/*.md' \
--config /home/runner/work/minio/minio/.github/markdown-lint-cfg.yaml \
--disable MD013 MD040 MD051

View File

@ -657,13 +657,14 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
) { ) {
var err error var err error
contentBytes := []byte("hello")
sha256sum := "" sha256sum := ""
var objectNames []string var objectNames []string
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
contentBytes := []byte("hello")
objectName := "test-object-" + strconv.Itoa(i) objectName := "test-object-" + strconv.Itoa(i)
if i == 0 { if i == 0 {
objectName += "/" objectName += "/"
contentBytes = []byte{}
} }
// uploading the object. // uploading the object.
_, err = obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{}) _, err = obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
@ -676,6 +677,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
objectNames = append(objectNames, objectName) objectNames = append(objectNames, objectName)
} }
contentBytes := []byte("hello")
for _, name := range []string{"private/object", "public/object"} { for _, name := range []string{"private/object", "public/object"} {
// Uploading the object with retention enabled // Uploading the object with retention enabled
_, err = obj.PutObject(GlobalContext, bucketName, name, mustGetPutObjReader(t, bytes.NewReader(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{}) _, err = obj.PutObject(GlobalContext, bucketName, name, mustGetPutObjReader(t, bytes.NewReader(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})

View File

@ -81,7 +81,7 @@ func loadPrefixUsageFromBackend(ctx context.Context, objAPI ObjectLayer, bucket
for id, usageInfo := range cache.flattenChildrens(*root) { for id, usageInfo := range cache.flattenChildrens(*root) {
prefix := decodeDirObject(strings.TrimPrefix(id, bucket+slashSeparator)) prefix := decodeDirObject(strings.TrimPrefix(id, bucket+slashSeparator))
// decodeDirObject to avoid any __XL_DIR__ objects // decodeDirObject to avoid any __XLDIR__ objects
m[prefix] += uint64(usageInfo.Size) m[prefix] += uint64(usageInfo.Size)
} }
} }

View File

@ -937,7 +937,15 @@ func (z *erasureServerPools) PutObject(ctx context.Context, bucket string, objec
return ObjectInfo{}, err return ObjectInfo{}, err
} }
origObject := object
object = encodeDirObject(object) object = encodeDirObject(object)
// Only directory objects skip creating new versions.
if object != origObject && isDirObject(object) && data.Size() == 0 {
// Treat all directory PUTs to behave as if they are performed
// on an unversioned bucket.
opts.Versioned = false
opts.VersionSuspended = false
}
if z.SinglePool() { if z.SinglePool() {
if !isMinioMetaBucketName(bucket) { if !isMinioMetaBucketName(bucket) {

View File

@ -55,7 +55,7 @@ func (e metaCacheEntry) isObject() bool {
return len(e.metadata) > 0 return len(e.metadata) > 0
} }
// isObjectDir returns if the entry is representing an object__XL_DIR__ // isObjectDir returns if the entry is representing an object/
func (e metaCacheEntry) isObjectDir() bool { func (e metaCacheEntry) isObjectDir() bool {
return len(e.metadata) > 0 && strings.HasSuffix(e.name, slashSeparator) return len(e.metadata) > 0 && strings.HasSuffix(e.name, slashSeparator)
} }

View File

@ -1031,6 +1031,10 @@ func decodeDirObject(object string) string {
return object return object
} }
func isDirObject(object string) bool {
return HasSuffix(object, globalDirSuffix)
}
// Helper method to return total number of nodes in cluster // Helper method to return total number of nodes in cluster
func totalNodeCount() uint64 { func totalNodeCount() uint64 {
peers, _ := globalEndpoints.peers() peers, _ := globalEndpoints.peers()

View File

@ -132,4 +132,61 @@ if [ $ret -ne 0 ]; then
exit 1 exit 1
fi fi
./mc mb sitea/bucket-version/
./mc mb siteb/bucket-version
./mc version enable sitea/bucket-version/
./mc version enable siteb/bucket-version/
echo "adding replication rule for site a -> site b"
./mc replicate add sitea/bucket-version/ \
--remote-bucket http://minio:minio123@127.0.0.1:9004/bucket-version
./mc mb sitea/bucket-version/directory/
sleep 2s
./mc ls -r --versions sitea/bucket-version/ > /tmp/sitea_dirs.txt
./mc ls -r --versions siteb/bucket-version/ > /tmp/siteb_dirs.txt
out=$(diff -qpruN /tmp/sitea_dirs.txt /tmp/siteb_dirs.txt)
ret=$?
if [ $ret -ne 0 ]; then
echo "BUG: expected no 'diff' after replication: $out"
exit 1
fi
./mc rm -r --versions --force sitea/bucket-version/
sleep 2s
./mc ls -r --versions sitea/bucket-version/ > /tmp/sitea_dirs.txt
./mc ls -r --versions siteb/bucket-version/ > /tmp/siteb_dirs.txt
out=$(diff -qpruN /tmp/sitea_dirs.txt /tmp/siteb_dirs.txt)
ret=$?
if [ $ret -ne 0 ]; then
echo "BUG: expected no 'diff' after replication: $out"
exit 1
fi
## check if we don't create delete markers on the directory objects, its always permanent delete.
./mc mb sitea/bucket-version/directory/
sleep 2s
./mc rm -r --force sitea/bucket-version/
sleep 2s
./mc ls -r --versions sitea/bucket-version/ > /tmp/sitea_dirs.txt
./mc ls -r --versions siteb/bucket-version/ > /tmp/siteb_dirs.txt
out=$(diff -qpruN /tmp/sitea_dirs.txt /tmp/siteb_dirs.txt)
ret=$?
if [ $ret -ne 0 ]; then
echo "BUG: expected no 'diff' after replication: $out"
exit 1
fi
catch catch

View File

@ -78,8 +78,6 @@ To exclude objects under a list of prefix (glob) patterns from being versioned,
``` ```
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>Enabled</Status> <Status>Enabled</Status>
<ExcludeFolders>true</ExcludeFolders>
<ExcludedPrefixes> <ExcludedPrefixes>
<Prefix>*/_temporary</Prefix> <Prefix>*/_temporary</Prefix>
</ExcludedPrefixes> </ExcludedPrefixes>