mirror of
https://github.com/minio/minio.git
synced 2025-10-30 00:05:02 -04:00
Compare commits
450 Commits
RELEASE.20
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3a0cc6c86e | ||
|
|
10b0a234d2 | ||
|
|
18f97e70b1 | ||
|
|
52eee5a2f1 | ||
|
|
c6d3aac5c4 | ||
|
|
fa18589d1c | ||
|
|
05e569960a | ||
|
|
9e49d5e7a6 | ||
|
|
c1a49490c7 | ||
|
|
334c313da4 | ||
|
|
1b8ac0af9f | ||
|
|
ba3c0fd1c7 | ||
|
|
d51a4a4ff6 | ||
|
|
62383dfbfe | ||
|
|
bde0d5a291 | ||
|
|
534f4a9fb1 | ||
|
|
b8631cf531 | ||
|
|
456d9462e5 | ||
|
|
756f3c8142 | ||
|
|
7a80ec1cce | ||
|
|
ae71d76901 | ||
|
|
07c3a429bf | ||
|
|
0cde982902 | ||
|
|
d0f50cdd9b | ||
|
|
da532ab93d | ||
|
|
558fc1c09c | ||
|
|
9fdbf6fe83 | ||
|
|
5c87d4ae87 | ||
|
|
f0b91e5504 | ||
|
|
3b7cb6512c | ||
|
|
4ea6f3b06b | ||
|
|
86d9d9b55e | ||
|
|
5a35585acd | ||
|
|
0848e69602 | ||
|
|
02ba581ecf | ||
|
|
b44b2a090c | ||
|
|
c7d6a9722d | ||
|
|
a8abdc797e | ||
|
|
0638ccc5f3 | ||
|
|
b1a34fd63f | ||
|
|
ffcfa36b13 | ||
|
|
376fbd11a7 | ||
|
|
c76f209ccc | ||
|
|
7a6a2256b1 | ||
|
|
d002beaee3 | ||
|
|
71f293d9ab | ||
|
|
e3d183b6a4 | ||
|
|
752abc2e2c | ||
|
|
b9f0e8c712 | ||
|
|
7ced9663e6 | ||
|
|
50fcf9b670 | ||
|
|
64f5c6103f | ||
|
|
e909be6380 | ||
|
|
83b2ad418b | ||
|
|
7a64bb9766 | ||
|
|
34679befef | ||
|
|
4021d8c8e2 | ||
|
|
de234b888c | ||
|
|
2718d9a430 | ||
|
|
a65292cab1 | ||
|
|
e0c79be251 | ||
|
|
a6c538c5a1 | ||
|
|
e1fcaebc77 | ||
|
|
21409f112d | ||
|
|
417c8648f0 | ||
|
|
e2245a0b12 | ||
|
|
b4b3d208dd | ||
|
|
0a36d41dcd | ||
|
|
ea77bcfc98 | ||
|
|
9f24ca5d66 | ||
|
|
816666a4c6 | ||
|
|
2c7fe094d1 | ||
|
|
9ebe168782 | ||
|
|
ee2028cde6 | ||
|
|
ecde75f911 | ||
|
|
12a6ea89cc | ||
|
|
63e102c049 | ||
|
|
160f8a901b | ||
|
|
ef9b03fbf5 | ||
|
|
1d50cae43d | ||
|
|
c0a33952c6 | ||
|
|
8cad40a483 | ||
|
|
6d18dba9a2 | ||
|
|
9ea14c88d8 | ||
|
|
30a1261c22 | ||
|
|
0e017ab071 | ||
|
|
f14198e3dc | ||
|
|
93c389dbc9 | ||
|
|
ddd9a84cd7 | ||
|
|
b7540169a2 | ||
|
|
f01374950f | ||
|
|
18aceae620 | ||
|
|
427826abc5 | ||
|
|
2780778c10 | ||
|
|
2d8ba15b9e | ||
|
|
bd6dd55e7f | ||
|
|
0d7408fc99 | ||
|
|
864f80e226 | ||
|
|
0379d6a37f | ||
|
|
43aa8e4259 | ||
|
|
e2ed696619 | ||
|
|
fb3f67a597 | ||
|
|
7ee75368e0 | ||
|
|
1d6478b8ae | ||
|
|
0581001b6f | ||
|
|
479303e7e9 | ||
|
|
89aec6804b | ||
|
|
eb33bc6bf5 | ||
|
|
3310f740f0 | ||
|
|
4595293ca0 | ||
|
|
02a67cbd2a | ||
|
|
2b34e5b9ae | ||
|
|
a6258668a6 | ||
|
|
d0cada583f | ||
|
|
0bd8f06b62 | ||
|
|
6640be3bed | ||
|
|
eafeb27e90 | ||
|
|
f2c9eb0f79 | ||
|
|
f2619d1f62 | ||
|
|
8c70975283 | ||
|
|
01447d2438 | ||
|
|
07f31e574c | ||
|
|
8d223e07fb | ||
|
|
4041a8727c | ||
|
|
5f243fde9a | ||
|
|
a0e3f1cc18 | ||
|
|
b1bc641105 | ||
|
|
e0c8738230 | ||
|
|
9aa24b1920 | ||
|
|
53d40e41bc | ||
|
|
e88d494775 | ||
|
|
b67f0cf721 | ||
|
|
46922c71b7 | ||
|
|
670edb4fcf | ||
|
|
42d4ab2a0a | ||
|
|
5e2eb372bf | ||
|
|
cccb37a5ac | ||
|
|
dbf31af6cb | ||
|
|
93e40c3ab4 | ||
|
|
8aa0e9ff7c | ||
|
|
bbd6f18afb | ||
|
|
2a3acc4f24 | ||
|
|
11507d46da | ||
|
|
f9c62dea55 | ||
|
|
8c2c92f7af | ||
|
|
4c71f1b4ec | ||
|
|
6cd8a372cb | ||
|
|
953a3e2bbd | ||
|
|
7cc0c69228 | ||
|
|
f129fd48f2 | ||
|
|
bc4008ced4 | ||
|
|
526053339b | ||
|
|
62a35b3e77 | ||
|
|
39df134204 | ||
|
|
ad4cbce22d | ||
|
|
90f5e1e5f6 | ||
|
|
aeabac9181 | ||
|
|
b312f13473 | ||
|
|
727a803bc0 | ||
|
|
d0e443172d | ||
|
|
60446e7ac0 | ||
|
|
b8544266e5 | ||
|
|
437dd4e32a | ||
|
|
447054b841 | ||
|
|
9bf43e54cd | ||
|
|
60f8423157 | ||
|
|
4355ea3c3f | ||
|
|
e30f1ad7bd | ||
|
|
f00c8c4cce | ||
|
|
703f51164d | ||
|
|
b8dde47d4e | ||
|
|
7fa3e39f85 | ||
|
|
4df7a3aa8f | ||
|
|
64a8f2e554 | ||
|
|
f4fd4ea66d | ||
|
|
712fe1a8df | ||
|
|
4a319bedc9 | ||
|
|
bdb3db6dad | ||
|
|
abb385af41 | ||
|
|
4ee62606e4 | ||
|
|
079d64c801 | ||
|
|
dcc000ae2c | ||
|
|
c5d19ecebb | ||
|
|
ed29a525b3 | ||
|
|
020c46cd3c | ||
|
|
827004cd6d | ||
|
|
779ec8f0d4 | ||
|
|
3d0f513ee2 | ||
|
|
4b6eadbd80 | ||
|
|
6f47414b23 | ||
|
|
224a27992a | ||
|
|
232544e1d8 | ||
|
|
dbcb71828d | ||
|
|
b9196757fd | ||
|
|
b4ac53d157 | ||
|
|
4952bdb770 | ||
|
|
00b2ef2932 | ||
|
|
4536ecfaa4 | ||
|
|
43a7402968 | ||
|
|
330dca9a35 | ||
|
|
ddd137d317 | ||
|
|
06ddd8770e | ||
|
|
16f8cf1c52 | ||
|
|
01e520eb23 | ||
|
|
02f770a0c0 | ||
|
|
2f4c79bc0f | ||
|
|
969ee7dfbe | ||
|
|
5f0b086b05 | ||
|
|
68b004a48f | ||
|
|
54ecce66f0 | ||
|
|
2b008c598b | ||
|
|
86d02b17cf | ||
|
|
c1a95a70ac | ||
|
|
f246c9053f | ||
|
|
9cdd204ae4 | ||
|
|
7b3eb9f7f8 | ||
|
|
d56ef8dbe1 | ||
|
|
a248ed5ff5 | ||
|
|
5bb31e4883 | ||
|
|
aff2a76d80 | ||
|
|
eddbe6bca2 | ||
|
|
734d1e320a | ||
|
|
b8dab7b1a9 | ||
|
|
abd6bf060d | ||
|
|
f0d4ef604c | ||
|
|
2712f75762 | ||
|
|
4c46668da8 | ||
|
|
02e93fd6ba | ||
|
|
366876e98b | ||
|
|
d202fdd022 | ||
|
|
c07e5b49d4 | ||
|
|
9a39f8ad4d | ||
|
|
7e0c1c9413 | ||
|
|
485d833cd7 | ||
|
|
e8a476ef5a | ||
|
|
267f0ecea2 | ||
|
|
4ee3434854 | ||
|
|
0e9854372e | ||
|
|
b5177993b3 | ||
|
|
55f5c18fd9 | ||
|
|
8ce101c174 | ||
|
|
4972735507 | ||
|
|
e6ca6de194 | ||
|
|
cefc43e4da | ||
|
|
25e34fda5f | ||
|
|
4208d7af5a | ||
|
|
8d42f37e4b | ||
|
|
7cb4b5c636 | ||
|
|
1615920f48 | ||
|
|
7ee42b3ff5 | ||
|
|
a6f1e727fb | ||
|
|
c1fc7779ca | ||
|
|
b3ab7546ee | ||
|
|
ad88a81e3d | ||
|
|
c4239ced22 | ||
|
|
f85c28e960 | ||
|
|
f7e176d4ca | ||
|
|
72a0d14195 | ||
|
|
6abe4128d7 | ||
|
|
ed5ed7e490 | ||
|
|
51410c9023 | ||
|
|
96ca402dcd | ||
|
|
3da7c9cce3 | ||
|
|
a14e19ec54 | ||
|
|
e091dde041 | ||
|
|
d10bb7e1b6 | ||
|
|
7ebceacac6 | ||
|
|
1593cb615d | ||
|
|
86a41d1631 | ||
|
|
d4157b819c | ||
|
|
e0aceca1b7 | ||
|
|
87804624fe | ||
|
|
e029f8a9d7 | ||
|
|
1bc6681176 | ||
|
|
28322124e2 | ||
|
|
cbfe9de3e7 | ||
|
|
dc86b8d9d4 | ||
|
|
ba70118e2b | ||
|
|
cb1d3e50f7 | ||
|
|
ded0b19d97 | ||
|
|
d0bb3dd136 | ||
|
|
ab7714b01e | ||
|
|
e5b18df6db | ||
|
|
0abfd1bcb1 | ||
|
|
6186d11761 | ||
|
|
e8b457e8a6 | ||
|
|
afea40cc0f | ||
|
|
402b798f1b | ||
|
|
4759532e90 | ||
|
|
7f1e1713ab | ||
|
|
b2c5819dbc | ||
|
|
2b0156b1fc | ||
|
|
f6f0807c86 | ||
|
|
0c53d86017 | ||
|
|
6a6ee46d76 | ||
|
|
974cbb3bb7 | ||
|
|
03e996320e | ||
|
|
78fcb76294 | ||
|
|
3d152015eb | ||
|
|
ade8925155 | ||
|
|
05a6c170bf | ||
|
|
e1c2344591 | ||
|
|
48a591e9b4 | ||
|
|
fa5d9c02ef | ||
|
|
5bd27346ac | ||
|
|
3c82cf9327 | ||
|
|
70d40083e9 | ||
|
|
8a30967542 | ||
|
|
229f04ab79 | ||
|
|
1123dc3676 | ||
|
|
5bf41aff17 | ||
|
|
e47d787adb | ||
|
|
398ffb1136 | ||
|
|
5862582cd7 | ||
|
|
e36b1146d6 | ||
|
|
c28a4beeb7 | ||
|
|
15ab0808b3 | ||
|
|
3bae73fb42 | ||
|
|
bc527eceda | ||
|
|
b963f36e1e | ||
|
|
cdd7512a2e | ||
|
|
a6d5287310 | ||
|
|
22822f4151 | ||
|
|
0b7aa6af87 | ||
|
|
8c9ab85cfa | ||
|
|
b1c849bedc | ||
|
|
fb24bcfee0 | ||
|
|
8268c12cfb | ||
|
|
3f39da48ea | ||
|
|
9d5cdaa2e3 | ||
|
|
84e122c5c3 | ||
|
|
261111e728 | ||
|
|
0f1e8db4c5 | ||
|
|
64e803b136 | ||
|
|
a0f9e9f661 | ||
|
|
b6b7cddc9c | ||
|
|
241be9709c | ||
|
|
85f08d7752 | ||
|
|
6be88a2b99 | ||
|
|
060276932d | ||
|
|
6224849fd1 | ||
|
|
9b79eec29e | ||
|
|
c2e318dd40 | ||
|
|
69258d5945 | ||
|
|
d7ef6315ae | ||
|
|
aaf4fb1184 | ||
|
|
f05641c3c6 | ||
|
|
7a34c88d73 | ||
|
|
6c746843ac | ||
|
|
bb07df7e7b | ||
|
|
1cb824039e | ||
|
|
504e52b45e | ||
|
|
38c0840834 | ||
|
|
c65e67c357 | ||
|
|
fb2360ff88 | ||
|
|
1a2de1bdde | ||
|
|
993b97f1db | ||
|
|
1c4d28d7af | ||
|
|
af55f37b27 | ||
|
|
2d67c26794 | ||
|
|
006cacfefb | ||
|
|
c28f09d4a7 | ||
|
|
73992d2b9f | ||
|
|
a8f143298f | ||
|
|
2d44c161c7 | ||
|
|
9511056f44 | ||
|
|
fb4ad000b6 | ||
|
|
a8ff12bc72 | ||
|
|
1e1bd3afd9 | ||
|
|
7b239ae154 | ||
|
|
8a11282522 | ||
|
|
85c3db3a93 | ||
|
|
37383ecd09 | ||
|
|
6378ca10a4 | ||
|
|
9e81ccd2d9 | ||
|
|
72cff79c8a | ||
|
|
a5702f978e | ||
|
|
4687c4616f | ||
|
|
d8dfb57d5c | ||
|
|
b07c58aa05 | ||
|
|
cc0c41d216 | ||
|
|
f1302c40fe | ||
|
|
3b1aa40372 | ||
|
|
d96798ae7b | ||
|
|
b508264ac4 | ||
|
|
db78431b1d | ||
|
|
743ddb196a | ||
|
|
3ffeabdfcb | ||
|
|
51b1f41518 | ||
|
|
e7a56f35b9 | ||
|
|
516af01a12 | ||
|
|
acdb355070 | ||
|
|
37c02a5f7b | ||
|
|
04be352ae9 | ||
|
|
53eb7656de | ||
|
|
d8f0e0ea6e | ||
|
|
2e0fd2cba9 | ||
|
|
909b169593 | ||
|
|
4e67a4027e | ||
|
|
49055658a9 | ||
|
|
89c58ce87d | ||
|
|
14876a4df1 | ||
|
|
2681219039 | ||
|
|
5da7f0100a | ||
|
|
dea9abed29 | ||
|
|
e3eb5c1328 | ||
|
|
fb9364f1fb | ||
|
|
6efb56851c | ||
|
|
db2c7ed1d1 | ||
|
|
74c047cb03 | ||
|
|
50a5ad48fc | ||
|
|
292fccff6e | ||
|
|
a9dc061d84 | ||
|
|
01a8c09920 | ||
|
|
4c8562bcec | ||
|
|
f13c04629b | ||
|
|
80ff907d08 | ||
|
|
673df6d517 | ||
|
|
2d40433bc1 | ||
|
|
3bc39db34e | ||
|
|
a17f14f73a | ||
|
|
6651c655cb | ||
|
|
3ae104edae | ||
|
|
c87a489514 | ||
|
|
a60267501d | ||
|
|
641a56da0d | ||
|
|
59788e25c7 | ||
|
|
a16193bb50 | ||
|
|
132e7413ba | ||
|
|
1966668066 | ||
|
|
064f36ca5a | ||
|
|
15b609ecea | ||
|
|
4a1edfd9aa | ||
|
|
b7f319b62a | ||
|
|
33c101544d | ||
|
|
21cf29330e | ||
|
|
3b21bb5be8 | ||
|
|
6fe2b3f901 | ||
|
|
b368d4cc13 | ||
|
|
0680af7414 | ||
|
|
91805bcab6 | ||
|
|
c0e2886e37 | ||
|
|
4f5dded4d4 | ||
|
|
b3a94c4e85 | ||
|
|
8e618d45fc | ||
|
|
3ef59d2821 | ||
|
|
23db4958f5 | ||
|
|
d9ee668b6d | ||
|
|
2e5d792f0c | ||
|
|
b276651eaa |
11
.github/ISSUE_TEMPLATE/bug_report.md
vendored
11
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -1,14 +1,19 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
about: Report a bug in MinIO (community edition is source-only)
|
||||
title: ''
|
||||
labels: community, triage
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## NOTE
|
||||
If this case is urgent, please subscribe to [Subnet](https://min.io/pricing) so that our 24/7 support team may help you faster.
|
||||
## IMPORTANT NOTES
|
||||
|
||||
**Community Edition**: MinIO community edition is now source-only. Install via `go install github.com/minio/minio@latest`
|
||||
|
||||
**Feature Requests**: We are no longer accepting feature requests for the community edition. For feature requests and enterprise support, please subscribe to [MinIO Enterprise Support](https://min.io/pricing).
|
||||
|
||||
**Urgent Issues**: If this case is urgent or affects production, please subscribe to [SUBNET](https://min.io/pricing) for 24/7 enterprise support.
|
||||
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
|
||||
|
||||
6
.github/ISSUE_TEMPLATE/config.yml
vendored
6
.github/ISSUE_TEMPLATE/config.yml
vendored
@ -2,7 +2,7 @@ blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: MinIO Community Support
|
||||
url: https://slack.min.io
|
||||
about: Join here for Community Support
|
||||
- name: MinIO SUBNET Support
|
||||
about: Community support via Slack - for questions and discussions
|
||||
- name: MinIO Enterprise Support (SUBNET)
|
||||
url: https://min.io/pricing
|
||||
about: Join here for Enterprise Support
|
||||
about: Enterprise support with SLA - for production deployments and feature requests
|
||||
|
||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@ -1,20 +0,0 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: community, triage
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
2
.github/workflows/go-cross.yml
vendored
2
.github/workflows/go-cross.yml
vendored
@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.22.x]
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
59
.github/workflows/go-fips.yml
vendored
59
.github/workflows/go-fips.yml
vendored
@ -1,59 +0,0 @@
|
||||
name: FIPS Build Test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go BoringCrypto ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Setup dockerfile for build test
|
||||
run: |
|
||||
GO_VERSION=$(go version | cut -d ' ' -f 3 | sed 's/go//')
|
||||
echo Detected go version $GO_VERSION
|
||||
cat > Dockerfile.fips.test <<EOF
|
||||
FROM golang:${GO_VERSION}
|
||||
COPY . /minio
|
||||
WORKDIR /minio
|
||||
ENV GOEXPERIMENT=boringcrypto
|
||||
RUN make
|
||||
EOF
|
||||
|
||||
- name: Build
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile.fips.test
|
||||
push: false
|
||||
load: true
|
||||
tags: minio/fips-test:latest
|
||||
|
||||
# This should fail if grep returns non-zero exit
|
||||
- name: Test binary
|
||||
run: |
|
||||
docker run --rm minio/fips-test:latest ./minio --version
|
||||
docker run --rm -i minio/fips-test:latest /bin/bash -c 'go tool nm ./minio | grep FIPS | grep -q FIPS'
|
||||
2
.github/workflows/go-healing.yml
vendored
2
.github/workflows/go-healing.yml
vendored
@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.22.x]
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
14
.github/workflows/go-lint.yml
vendored
14
.github/workflows/go-lint.yml
vendored
@ -20,24 +20,14 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest, Windows]
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'Windows'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
run: |
|
||||
Set-MpPreference -DisableRealtimeMonitoring $true
|
||||
netsh int ipv4 set dynamicport tcp start=60000 num=61000
|
||||
go build --ldflags="-s -w" -o %GOPATH%\bin\minio.exe
|
||||
go test -v --timeout 120m ./...
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
|
||||
39
.github/workflows/go-resiliency.yml
vendored
Normal file
39
.github/workflows/go-resiliency.yml
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
name: Resiliency Functional Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-resiliency
|
||||
3
.github/workflows/go.yml
vendored
3
.github/workflows/go.yml
vendored
@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.22.x]
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@ -39,3 +39,4 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make verify
|
||||
make test-timeout
|
||||
|
||||
36
.github/workflows/iam-integrations.yaml
vendored
36
.github/workflows/iam-integrations.yaml
vendored
@ -61,7 +61,7 @@ jobs:
|
||||
# are turned off - i.e. if ldap="", then ldap server is not enabled for
|
||||
# the tests.
|
||||
matrix:
|
||||
go-version: [1.22.x]
|
||||
go-version: [1.24.x]
|
||||
ldap: ["", "localhost:389"]
|
||||
etcd: ["", "http://localhost:2379"]
|
||||
openid: ["", "http://127.0.0.1:5556/dex"]
|
||||
@ -125,3 +125,37 @@ jobs:
|
||||
if: matrix.openid == 'http://127.0.0.1:5556/dex'
|
||||
run: |
|
||||
make test-site-replication-oidc
|
||||
iam-import-with-missing-entities:
|
||||
name: Test IAM import in new cluster with missing entities
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Checkout minio-iam-testing
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: minio/minio-iam-testing
|
||||
path: minio-iam-testing
|
||||
- name: Test import of IAM artifacts when in fresh cluster there are missing groups etc
|
||||
run: |
|
||||
make test-iam-import-with-missing-entities
|
||||
iam-import-with-openid:
|
||||
name: Test IAM import in new cluster with opendid configurations
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Checkout minio-iam-testing
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: minio/minio-iam-testing
|
||||
path: minio-iam-testing
|
||||
- name: Test import of IAM artifacts when in fresh cluster with openid configurations
|
||||
run: |
|
||||
make test-iam-import-with-openid
|
||||
|
||||
2
.github/workflows/mint.yml
vendored
2
.github/workflows/mint.yml
vendored
@ -29,7 +29,7 @@ jobs:
|
||||
- name: setup-go-step
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
go-version: 1.24.x
|
||||
|
||||
- name: github sha short
|
||||
id: vars
|
||||
|
||||
9
.github/workflows/replication.yaml
vendored
9
.github/workflows/replication.yaml
vendored
@ -21,7 +21,7 @@ jobs:
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.22.x]
|
||||
go-version: [1.24.x]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@ -40,6 +40,7 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-ilm
|
||||
make test-ilm-transition
|
||||
|
||||
- name: Test PBAC
|
||||
run: |
|
||||
@ -70,3 +71,9 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-versioning
|
||||
|
||||
- name: Test Multipart upload with failures
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-multipart
|
||||
|
||||
2
.github/workflows/root-disable.yml
vendored
2
.github/workflows/root-disable.yml
vendored
@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.22.x]
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
|
||||
3
.github/workflows/run-mint.sh
vendored
3
.github/workflows/run-mint.sh
vendored
@ -15,6 +15,9 @@ docker volume rm $(docker volume ls -f dangling=true) || true
|
||||
## change working directory
|
||||
cd .github/workflows/mint
|
||||
|
||||
## always pull latest
|
||||
docker pull docker.io/minio/mint:edge
|
||||
|
||||
docker-compose -f minio-${MODE}.yaml up -d
|
||||
sleep 1m
|
||||
|
||||
|
||||
2
.github/workflows/upgrade-ci-cd.yaml
vendored
2
.github/workflows/upgrade-ci-cd.yaml
vendored
@ -20,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.22.x]
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
|
||||
3
.github/workflows/vulncheck.yml
vendored
3
.github/workflows/vulncheck.yml
vendored
@ -21,7 +21,8 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.5
|
||||
go-version: 1.24.x
|
||||
cached: false
|
||||
- name: Get official govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
shell: bash
|
||||
|
||||
@ -1,36 +1,64 @@
|
||||
linters-settings:
|
||||
gofumpt:
|
||||
simplify: true
|
||||
|
||||
misspell:
|
||||
locale: US
|
||||
|
||||
staticcheck:
|
||||
checks: ['all', '-ST1005', '-ST1000', '-SA4000', '-SA9004', '-SA1019', '-SA1008', '-U1000', '-ST1016']
|
||||
|
||||
version: "2"
|
||||
linters:
|
||||
disable-all: true
|
||||
default: none
|
||||
enable:
|
||||
- durationcheck
|
||||
- forcetypeassert
|
||||
- gocritic
|
||||
- gofumpt
|
||||
- goimports
|
||||
- gomodguard
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- tenv
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
|
||||
- usetesting
|
||||
- whitespace
|
||||
settings:
|
||||
misspell:
|
||||
locale: US
|
||||
staticcheck:
|
||||
checks:
|
||||
- all
|
||||
- -SA1008
|
||||
- -SA1019
|
||||
- -SA4000
|
||||
- -SA9004
|
||||
- -ST1000
|
||||
- -ST1005
|
||||
- -ST1016
|
||||
- -U1000
|
||||
exclusions:
|
||||
generated: lax
|
||||
rules:
|
||||
- linters:
|
||||
- forcetypeassert
|
||||
path: _test\.go
|
||||
- path: (.+)\.go$
|
||||
text: 'empty-block:'
|
||||
- path: (.+)\.go$
|
||||
text: 'unused-parameter:'
|
||||
- path: (.+)\.go$
|
||||
text: 'dot-imports:'
|
||||
- path: (.+)\.go$
|
||||
text: should have a package comment
|
||||
- path: (.+)\.go$
|
||||
text: error strings should not be capitalized or end with punctuation or a newline
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
exclude:
|
||||
- "empty-block:"
|
||||
- "unused-parameter:"
|
||||
- "dot-imports:"
|
||||
- should have a package comment
|
||||
- error strings should not be capitalized or end with punctuation or a newline
|
||||
max-issues-per-linter: 100
|
||||
max-same-issues: 100
|
||||
formatters:
|
||||
enable:
|
||||
- gofumpt
|
||||
- goimports
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
||||
17
.typos.toml
17
.typos.toml
@ -1,11 +1,5 @@
|
||||
[files]
|
||||
extend-exclude = [
|
||||
".git/",
|
||||
"docs/",
|
||||
"CREDITS",
|
||||
"go.mod",
|
||||
"go.sum",
|
||||
]
|
||||
extend-exclude = [".git/", "docs/", "CREDITS", "go.mod", "go.sum"]
|
||||
ignore-hidden = false
|
||||
|
||||
[default]
|
||||
@ -20,6 +14,7 @@ extend-ignore-re = [
|
||||
'http\.Header\{"X-Amz-Server-Side-Encryptio":',
|
||||
"ZoEoZdLlzVbOlT9rbhD7ZN7TLyiYXSAlB79uGEge",
|
||||
"ERRO:",
|
||||
"(?Rm)^.*(#|//)\\s*spellchecker:disable-line$", # ignore line
|
||||
]
|
||||
|
||||
[default.extend-words]
|
||||
@ -40,3 +35,11 @@ extend-ignore-re = [
|
||||
"TestGetPartialObjectMisAligned" = "TestGetPartialObjectMisAligned"
|
||||
"thr" = "thr"
|
||||
"toi" = "toi"
|
||||
|
||||
[type.go]
|
||||
extend-ignore-identifiers-re = [
|
||||
# Variants of `typ` used to mean `type` in golang as it is otherwise a
|
||||
# keyword - some of these (like typ1 -> type1) can be fixed, but probably
|
||||
# not worth the effort.
|
||||
"[tT]yp[0-9]*",
|
||||
]
|
||||
|
||||
@ -12,8 +12,9 @@ Fork [MinIO upstream](https://github.com/minio/minio/fork) source repository to
|
||||
|
||||
```sh
|
||||
git clone https://github.com/minio/minio
|
||||
cd minio
|
||||
go install -v
|
||||
ls /go/bin/minio
|
||||
ls $(go env GOPATH)/bin/minio
|
||||
```
|
||||
|
||||
### Set up git remote as ``upstream``
|
||||
|
||||
10
Dockerfile
10
Dockerfile
@ -1,6 +1,14 @@
|
||||
FROM minio/minio:latest
|
||||
|
||||
COPY ./minio /usr/bin/minio
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
RUN chmod -R 777 /usr/bin
|
||||
|
||||
COPY ./minio-${TARGETARCH}.${RELEASE} /usr/bin/minio
|
||||
COPY ./minio-${TARGETARCH}.${RELEASE}.minisig /usr/bin/minio.minisig
|
||||
COPY ./minio-${TARGETARCH}.${RELEASE}.sha256sum /usr/bin/minio.sha256sum
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
@ -1,12 +0,0 @@
|
||||
FROM minio/minio:latest
|
||||
|
||||
ENV PATH=/opt/bin:$PATH
|
||||
|
||||
COPY ./minio /opt/bin/minio
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
@ -1,24 +1,26 @@
|
||||
FROM golang:1.21-alpine as build
|
||||
FROM golang:1.24-alpine as build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GOPATH=/go
|
||||
ENV CGO_ENABLED=0
|
||||
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.1
|
||||
|
||||
# Download minio binary and signature file
|
||||
# Download minio binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \
|
||||
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /go/bin/minio.sha256sum && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
# Download mc binary and signature file
|
||||
# Download mc binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.sha256sum -o /go/bin/mc.sha256sum && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
@ -51,9 +53,11 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
MC_CONFIG_DIR=/tmp/.mc
|
||||
|
||||
RUN chmod -R 777 /usr/bin
|
||||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/mc /usr/bin/mc
|
||||
COPY --from=build /go/bin/minio* /usr/bin/
|
||||
COPY --from=build /go/bin/mc* /usr/bin/
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
|
||||
@ -1,35 +1,39 @@
|
||||
FROM golang:1.21-alpine as build
|
||||
FROM golang:1.24-alpine AS build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GOPATH=/go
|
||||
ENV CGO_ENABLED=0
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
apk add -U --no-cache bash && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.1
|
||||
|
||||
# Download minio binary and signature file
|
||||
# Download minio binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /go/bin/minio.sha256sum && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
# Download mc binary and signature file
|
||||
# Download mc binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.sha256sum -o /go/bin/mc.sha256sum && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
|
||||
chmod +x /go/bin/curl; \
|
||||
fi
|
||||
|
||||
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
|
||||
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
|
||||
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
|
||||
|
||||
COPY dockerscripts/download-static-curl.sh /build/download-static-curl
|
||||
RUN chmod +x /build/download-static-curl && \
|
||||
/build/download-static-curl
|
||||
|
||||
FROM registry.access.redhat.com/ubi9/ubi-micro:latest
|
||||
|
||||
ARG RELEASE
|
||||
@ -51,10 +55,12 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
MC_CONFIG_DIR=/tmp/.mc
|
||||
|
||||
RUN chmod -R 777 /usr/bin
|
||||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/mc /usr/bin/mc
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
COPY --from=build /go/bin/minio* /usr/bin/
|
||||
COPY --from=build /go/bin/mc* /usr/bin/
|
||||
COPY --from=build /go/bin/curl* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
@ -1,59 +0,0 @@
|
||||
FROM golang:1.21-alpine as build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.1
|
||||
|
||||
# Download minio binary and signature file
|
||||
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips -o /go/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips.minisig -o /go/bin/minio.minisig && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
|
||||
chmod +x /go/bin/curl; \
|
||||
fi
|
||||
|
||||
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
|
||||
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
|
||||
|
||||
FROM registry.access.redhat.com/ubi9/ubi-micro:latest
|
||||
|
||||
ARG RELEASE
|
||||
|
||||
LABEL name="MinIO" \
|
||||
vendor="MinIO Inc <dev@min.io>" \
|
||||
maintainer="MinIO Inc <dev@min.io>" \
|
||||
version="${RELEASE}" \
|
||||
release="${RELEASE}" \
|
||||
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
|
||||
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
|
||||
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_ROOT_USER_FILE=access_key \
|
||||
MINIO_ROOT_PASSWORD_FILE=secret_key \
|
||||
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
|
||||
MINIO_CONFIG_ENV_FILE=config.env
|
||||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
VOLUME ["/data"]
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
CMD ["minio"]
|
||||
@ -1,24 +1,26 @@
|
||||
FROM golang:1.21-alpine as build
|
||||
FROM golang:1.24-alpine AS build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GOPATH=/go
|
||||
ENV CGO_ENABLED=0
|
||||
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.1
|
||||
|
||||
# Download minio binary and signature file
|
||||
# Download minio binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /go/bin/minio.sha256sum && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
# Download mc binary and signature file
|
||||
# Download mc binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.sha256sum -o /go/bin/mc.sha256sum && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
@ -51,9 +53,11 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
MC_CONFIG_DIR=/tmp/.mc
|
||||
|
||||
RUN chmod -R 777 /usr/bin
|
||||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/mc /usr/bin/mc
|
||||
COPY --from=build /go/bin/minio* /usr/bin/
|
||||
COPY --from=build /go/bin/mc* /usr/bin/
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
|
||||
48
Makefile
48
Makefile
@ -2,8 +2,8 @@ PWD := $(shell pwd)
|
||||
GOPATH := $(shell go env GOPATH)
|
||||
LDFLAGS := $(shell go run buildscripts/gen-ldflags.go)
|
||||
|
||||
GOARCH := $(shell go env GOARCH)
|
||||
GOOS := $(shell go env GOOS)
|
||||
GOOS ?= $(shell go env GOOS)
|
||||
GOARCH ?= $(shell go env GOARCH)
|
||||
|
||||
VERSION ?= $(shell git describe --tags)
|
||||
REPO ?= quay.io/minio
|
||||
@ -24,8 +24,6 @@ help: ## print this help
|
||||
getdeps: ## fetch necessary dependencies
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOLANGCI_DIR)
|
||||
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.10-0.20240227114326-6d6f813fff1b
|
||||
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest
|
||||
|
||||
crosscompile: ## cross compile minio
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
@ -60,6 +58,10 @@ test-ilm: install-race
|
||||
@echo "Running ILM tests"
|
||||
@env bash $(PWD)/docs/bucket/replication/setup_ilm_expiry_replication.sh
|
||||
|
||||
test-ilm-transition: install-race
|
||||
@echo "Running ILM tiering tests with healing"
|
||||
@env bash $(PWD)/docs/bucket/lifecycle/setup_ilm_transition.sh
|
||||
|
||||
test-pbac: install-race
|
||||
@echo "Running bucket policies tests"
|
||||
@env bash $(PWD)/docs/iam/policies/pbac-tests.sh
|
||||
@ -97,6 +99,14 @@ test-iam-ldap-upgrade-import: install-race ## verify IAM (external LDAP IDP)
|
||||
@echo "Running upgrade tests for IAM (LDAP backend)"
|
||||
@env bash $(PWD)/buildscripts/minio-iam-ldap-upgrade-import-test.sh
|
||||
|
||||
test-iam-import-with-missing-entities: install-race ## test import of external iam config withg missing entities
|
||||
@echo "Test IAM import configurations with missing entities"
|
||||
@env bash $(PWD)/docs/distributed/iam-import-with-missing-entities.sh
|
||||
|
||||
test-iam-import-with-openid: install-race
|
||||
@echo "Test IAM import configurations with openid"
|
||||
@env bash $(PWD)/docs/distributed/iam-import-with-openid.sh
|
||||
|
||||
test-sio-error:
|
||||
@(env bash $(PWD)/docs/bucket/replication/sio-error.sh)
|
||||
|
||||
@ -133,6 +143,14 @@ test-site-replication-minio: install-race ## verify automatic site replication
|
||||
@echo "Running tests for automatic site replication of SSE-C objects with compression enabled for site"
|
||||
@(env bash $(PWD)/docs/site-replication/run-ssec-object-replication-with-compression.sh)
|
||||
|
||||
test-multipart: install-race ## test multipart
|
||||
@echo "Test multipart behavior when part files are missing"
|
||||
@(env bash $(PWD)/buildscripts/multipart-quorum-test.sh)
|
||||
|
||||
test-timeout: install-race ## test multipart
|
||||
@echo "Test server timeout"
|
||||
@(env bash $(PWD)/buildscripts/test-timeout.sh)
|
||||
|
||||
verify: install-race ## verify minio various setups
|
||||
@echo "Verifying build with race"
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
@ -160,7 +178,7 @@ build-debugging:
|
||||
|
||||
build: checks build-debugging ## builds minio to $(PWD)
|
||||
@echo "Building minio binary to './minio'"
|
||||
@CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@CGO_ENABLED=0 GOOS=$(GOOS) GOARCH=$(GOARCH) go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
hotfix-vars:
|
||||
$(eval LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
|
||||
@ -168,9 +186,9 @@ hotfix-vars:
|
||||
$(eval VERSION := $(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD))
|
||||
|
||||
hotfix: hotfix-vars clean install ## builds minio binary with hotfix tags
|
||||
@wget -q -c https://github.com/minio/pkger/releases/download/v2.3.1/pkger_2.3.1_linux_amd64.deb
|
||||
@wget -q -c https://raw.githubusercontent.com/minio/minio-service/v1.0.1/linux-systemd/distributed/minio.service
|
||||
@sudo apt install ./pkger_2.3.1_linux_amd64.deb --yes
|
||||
@wget -q -c https://github.com/minio/pkger/releases/download/v2.3.11/pkger_2.3.11_linux_amd64.deb
|
||||
@wget -q -c https://raw.githubusercontent.com/minio/minio-service/v1.1.1/linux-systemd/distributed/minio.service
|
||||
@sudo apt install ./pkger_2.3.11_linux_amd64.deb --yes
|
||||
@mkdir -p minio-release/$(GOOS)-$(GOARCH)/archive
|
||||
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio
|
||||
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION)
|
||||
@ -180,11 +198,11 @@ hotfix: hotfix-vars clean install ## builds minio binary with hotfix tags
|
||||
@pkger -r $(VERSION) --ignore
|
||||
|
||||
hotfix-push: hotfix
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive
|
||||
@echo "Published new hotfix binaries at https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.$(VERSION)"
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/archive
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/archive
|
||||
@echo "Published new hotfix binaries at https://dl.min.io/server/minio/hotfixes/linux-$(GOOS)/archive/minio.$(VERSION)"
|
||||
|
||||
docker-hotfix-push: docker-hotfix
|
||||
@docker push -q $(TAG) && echo "Published new container $(TAG)"
|
||||
@ -197,6 +215,10 @@ docker: build ## builds minio docker container
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@docker build -q --no-cache -t $(TAG) . -f Dockerfile
|
||||
|
||||
test-resiliency: build
|
||||
@echo "Running resiliency tests"
|
||||
@(DOCKER_COMPOSE_FILE=$(PWD)/docs/resiliency/docker-compose.yaml env bash $(PWD)/docs/resiliency/resiliency-tests.sh)
|
||||
|
||||
install-race: checks build-debugging ## builds minio to $(PWD)
|
||||
@echo "Building minio binary with -race to './minio'"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -tags kqueue,dev -race -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
93
PULL_REQUESTS_ETIQUETTE.md
Normal file
93
PULL_REQUESTS_ETIQUETTE.md
Normal file
@ -0,0 +1,93 @@
|
||||
# MinIO Pull Request Guidelines
|
||||
|
||||
These guidelines ensure high-quality commits in MinIO’s GitHub repositories, maintaining
|
||||
a clear, valuable commit history for our open-source projects. They apply to all contributors,
|
||||
fostering efficient reviews and robust code.
|
||||
|
||||
## Why Pull Requests?
|
||||
|
||||
Pull Requests (PRs) drive quality in MinIO’s codebase by:
|
||||
- Enabling peer review without pair programming.
|
||||
- Documenting changes for future reference.
|
||||
- Ensuring commits tell a clear story of development.
|
||||
|
||||
**A poor commit lasts forever, even if code is refactored.**
|
||||
|
||||
## Crafting a Quality PR
|
||||
|
||||
A strong MinIO PR:
|
||||
- Delivers a complete, valuable change (feature, bug fix, or improvement).
|
||||
- Has a concise title (e.g., `[S3] Fix bucket policy parsing #1234`) and a summary with context, referencing issues (e.g., `#1234`).
|
||||
- Contains well-written, logical commits explaining *why* changes were made (e.g., “Add S3 bucket tagging support so that users can organize resources efficiently”).
|
||||
- Is small, focused, and easy to review—ideally one commit, unless multiple commits better narrate complex work.
|
||||
- Adheres to MinIO’s coding standards (e.g., Go style, error handling, testing).
|
||||
|
||||
PRs must flow smoothly through review to reach production. Large PRs should be split into smaller, manageable ones.
|
||||
|
||||
## Submitting PRs
|
||||
|
||||
1. **Title and Summary**:
|
||||
- Use a scannable title: `[Subsystem] Action Description #Issue` (e.g., `[IAM] Add role-based access control #567`).
|
||||
- Include context in the summary: what changed, why, and any issue references.
|
||||
- Use `[WIP]` for in-progress PRs to avoid premature merging or choose GitHub draft PRs.
|
||||
|
||||
2. **Commits**:
|
||||
- Write clear messages: what changed and why (e.g., “Refactor S3 API handler to reduce latency so that requests process 20% faster”).
|
||||
- Rebase to tidy commits before submitting (e.g., `git rebase -i main` to squash typos or reword messages), unless multiple contributors worked on the branch.
|
||||
- Keep PRs focused—one feature or fix. Split large changes into multiple PRs.
|
||||
|
||||
3. **Testing**:
|
||||
- Include unit tests for new functionality or bug fixes.
|
||||
- Ensure existing tests pass (`make test`).
|
||||
- Document testing steps in the PR summary if manual testing was performed.
|
||||
|
||||
4. **Before Submitting**:
|
||||
- Run `make verify` to check formatting, linting, and tests.
|
||||
- Reference related issues (e.g., “Closes #1234”).
|
||||
- Notify team members via GitHub `@mentions` if urgent or complex.
|
||||
|
||||
## Reviewing PRs
|
||||
|
||||
Reviewers ensure MinIO’s commit history remains a clear, reliable record. Responsibilities include:
|
||||
|
||||
1. **Commit Quality**:
|
||||
- Verify each commit explains *why* the change was made (e.g., “So that…”).
|
||||
- Request rebasing if commits are unclear, redundant, or lack context (e.g., “Please squash typo fixes into the parent commit”).
|
||||
|
||||
2. **Code Quality**:
|
||||
- Check adherence to MinIO’s Go standards (e.g., error handling, documentation).
|
||||
- Ensure tests cover new code and pass CI.
|
||||
- Flag bugs or critical issues for immediate fixes; suggest non-blocking improvements as follow-up issues.
|
||||
|
||||
3. **Flow**:
|
||||
- Review promptly to avoid blocking progress.
|
||||
- Balance quality and speed—minor issues can be addressed later via issues, not PR blocks.
|
||||
- If unable to complete the review, tag another reviewer (e.g., `@username please take over`).
|
||||
|
||||
4. **Shared Responsibility**:
|
||||
- All MinIO contributors are reviewers. The first commenter on a PR owns the review unless they delegate.
|
||||
- Multiple reviewers are encouraged for complex PRs.
|
||||
|
||||
5. **No Self-Edits**:
|
||||
- Don’t modify the PR directly (e.g., fixing bugs). Request changes from the submitter or create a follow-up PR.
|
||||
- If you edit, you’re a collaborator, not a reviewer, and cannot merge.
|
||||
|
||||
6. **Testing**:
|
||||
- Assume the submitter tested the code. If testing is unclear, ask for details (e.g., “How was this tested?”).
|
||||
- Reject untested PRs unless testing is infeasible, then assist with test setup.
|
||||
|
||||
## Tips for Success
|
||||
|
||||
- **Small PRs**: Easier to review, faster to merge. Split large changes logically.
|
||||
- **Clear Commits**: Use `git rebase -i` to refine history before submitting.
|
||||
- **Engage Early**: Discuss complex changes in issues or Slack (https://slack.min.io) before coding.
|
||||
- **Be Responsive**: Address reviewer feedback promptly to keep PRs moving.
|
||||
- **Learn from Reviews**: Use feedback to improve future contributions.
|
||||
|
||||
## Resources
|
||||
|
||||
- [MinIO Coding Standards](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
|
||||
- [Effective Commit Messages](https://mislav.net/2014/02/hidden-documentation/)
|
||||
- [GitHub PR Tips](https://github.com/blog/1943-how-to-write-the-perfect-pull-request)
|
||||
|
||||
By following these guidelines, we ensure MinIO’s codebase remains high-quality, maintainable, and a joy to contribute to. Happy coding!
|
||||
@ -1,7 +0,0 @@
|
||||
# MinIO FIPS Builds
|
||||
|
||||
MinIO creates FIPS builds using a patched version of the Go compiler (that uses BoringCrypto, from BoringSSL, which is [FIPS 140-2 validated](https://csrc.nist.gov/csrc/media/projects/cryptographic-module-validation-program/documents/security-policies/140sp2964.pdf)) published by the Golang Team [here](https://github.com/golang/go/tree/dev.boringcrypto/misc/boring).
|
||||
|
||||
MinIO FIPS executables are available at <http://dl.min.io> - they are only published for `linux-amd64` architecture as binary files with the suffix `.fips`. We also publish corresponding container images to our official image repositories.
|
||||
|
||||
We are not making any statements or representations about the suitability of this code or build in relation to the FIPS 140-2 standard. Interested users will have to evaluate for themselves whether this is useful for their own purposes.
|
||||
268
README.md
268
README.md
@ -4,254 +4,154 @@
|
||||
|
||||
[](https://min.io)
|
||||
|
||||
MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
|
||||
MinIO is a high-performance, S3-compatible object storage solution released under the GNU AGPL v3.0 license.
|
||||
Designed for speed and scalability, it powers AI/ML, analytics, and data-intensive workloads with industry-leading performance.
|
||||
|
||||
This README provides quickstart instructions on running MinIO on bare metal hardware, including container-based installations. For Kubernetes environments, use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
|
||||
- S3 API Compatible – Seamless integration with existing S3 tools
|
||||
- Built for AI & Analytics – Optimized for large-scale data pipelines
|
||||
- High Performance – Ideal for demanding storage workloads.
|
||||
|
||||
## Container Installation
|
||||
This README provides instructions for building MinIO from source and deploying onto baremetal hardware.
|
||||
Use the [MinIO Documentation](https://github.com/minio/docs) project to build and host a local copy of the documentation.
|
||||
|
||||
Use the following commands to run a standalone MinIO server as a container.
|
||||
## MinIO is Open Source Software
|
||||
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html)
|
||||
for more complete documentation.
|
||||
We designed MinIO as Open Source software for the Open Source software community. We encourage the community to remix, redesign, and reshare MinIO under the terms of the AGPLv3 license.
|
||||
|
||||
### Stable
|
||||
All usage of MinIO in your application stack requires validation against AGPLv3 obligations, which include but are not limited to the release of modified code to the community from which you have benefited. Any commercial/proprietary usage of the AGPLv3 software, including repackaging or reselling services/features, is done at your own risk.
|
||||
|
||||
Run the following command to run the latest stable image of MinIO as a container using an ephemeral data volume:
|
||||
The AGPLv3 provides no obligation by any party to support, maintain, or warranty the original or any modified work.
|
||||
All support is provided on a best-effort basis through Github and our [Slack](https//slack.min.io) channel, and any member of the community is welcome to contribute and assist others in their usage of the software.
|
||||
|
||||
```sh
|
||||
podman run -p 9000:9000 -p 9001:9001 \
|
||||
quay.io/minio/minio server /data --console-address ":9001"
|
||||
```
|
||||
MinIO [AIStor](https://www.min.io/product/aistor) includes enterprise-grade support and licensing for workloads which require commercial or proprietary usage and production-level SLA/SLO-backed support. For more information, [reach out for a quote](https://min.io/pricing).
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded
|
||||
object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the
|
||||
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
## Source-Only Distribution
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
**Important:** The MinIO community edition is now distributed as source code only. We will no longer provide pre-compiled binary releases for the community version.
|
||||
|
||||
> NOTE: To deploy MinIO on with persistent storage, you must map local persistent directories from the host OS to the container using the `podman -v` option. For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the container.
|
||||
### Installing Latest MinIO Community Edition
|
||||
|
||||
## macOS
|
||||
To use MinIO community edition, you have two options:
|
||||
|
||||
Use the following commands to run a standalone MinIO server on macOS.
|
||||
1. **Install from source** using `go install github.com/minio/minio@latest` (recommended)
|
||||
2. **Build a Docker image** from the provided Dockerfile
|
||||
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.
|
||||
See the sections below for detailed instructions on each method.
|
||||
|
||||
### Homebrew (recommended)
|
||||
### Legacy Binary Releases
|
||||
|
||||
Run the following command to install the latest stable MinIO package using [Homebrew](https://brew.sh/). Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
|
||||
Historical pre-compiled binary releases remain available for reference but are no longer maintained:
|
||||
- GitHub Releases: https://github.com/minio/minio/releases
|
||||
- Direct downloads: https://dl.min.io/server/minio/release/
|
||||
|
||||
```sh
|
||||
brew install minio/stable/minio
|
||||
minio server /data
|
||||
```
|
||||
|
||||
> NOTE: If you previously installed minio using `brew install minio` then it is recommended that you reinstall minio from `minio/stable/minio` official repo instead.
|
||||
|
||||
```sh
|
||||
brew uninstall minio
|
||||
brew install minio/stable/minio
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html/> to view MinIO SDKs for supported languages.
|
||||
|
||||
### Binary Download
|
||||
|
||||
Use the following command to download and run a standalone MinIO server on macOS. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
|
||||
|
||||
```sh
|
||||
wget https://dl.min.io/server/minio/release/darwin-amd64/minio
|
||||
chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
## GNU/Linux
|
||||
|
||||
Use the following command to run a standalone MinIO server on Linux hosts running 64-bit Intel/AMD architectures. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
|
||||
|
||||
```sh
|
||||
wget https://dl.min.io/server/minio/release/linux-amd64/minio
|
||||
chmod +x minio
|
||||
./minio server /data
|
||||
```
|
||||
|
||||
The following table lists supported architectures. Replace the `wget` URL with the architecture for your Linux host.
|
||||
|
||||
| Architecture | URL |
|
||||
| -------- | ------ |
|
||||
| 64-bit Intel/AMD | <https://dl.min.io/server/minio/release/linux-amd64/minio> |
|
||||
| 64-bit ARM | <https://dl.min.io/server/minio/release/linux-arm64/minio> |
|
||||
| 64-bit PowerPC LE (ppc64le) | <https://dl.min.io/server/minio/release/linux-ppc64le/minio> |
|
||||
| IBM Z-Series (S390X) | <https://dl.min.io/server/minio/release/linux-s390x/minio> |
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
|
||||
|
||||
## Microsoft Windows
|
||||
|
||||
To run MinIO on 64-bit Windows hosts, download the MinIO executable from the following URL:
|
||||
|
||||
```sh
|
||||
https://dl.min.io/server/minio/release/windows-amd64/minio.exe
|
||||
```
|
||||
|
||||
Use the following command to run a standalone MinIO server on the Windows host. Replace ``D:\`` with the path to the drive or directory in which you want MinIO to store data. You must change the terminal or powershell directory to the location of the ``minio.exe`` executable, *or* add the path to that directory to the system ``$PATH``:
|
||||
|
||||
```sh
|
||||
minio.exe server D:\
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
|
||||
**These legacy binaries will not receive updates.** We strongly recommend using source builds for access to the latest features, bug fixes, and security updates.
|
||||
|
||||
## Install from Source
|
||||
|
||||
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.21](https://golang.org/dl/#stable)
|
||||
Use the following commands to compile and run a standalone MinIO server from source.
|
||||
If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.24](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
go install github.com/minio/minio@latest
|
||||
```
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
You can alternatively run `go build` and use the `GOOS` and `GOARCH` environment variables to control the OS and architecture target.
|
||||
For example:
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.
|
||||
|
||||
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
|
||||
|
||||
## Deployment Recommendations
|
||||
|
||||
### Allow port access for Firewalls
|
||||
|
||||
By default MinIO uses the port 9000 to listen for incoming connections. If your platform blocks the port by default, you may need to enable access to the port.
|
||||
|
||||
### ufw
|
||||
|
||||
For hosts with ufw enabled (Debian based distros), you can use `ufw` command to allow traffic to specific ports. Use below command to allow access to port 9000
|
||||
|
||||
```sh
|
||||
ufw allow 9000
|
||||
```
|
||||
env GOOS=linux GOARCh=arm64 go build
|
||||
```
|
||||
|
||||
Below command enables all incoming traffic to ports ranging from 9000 to 9010.
|
||||
Start MinIO by running `minio server PATH` where `PATH` is any empty folder on your local filesystem.
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`.
|
||||
You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server.
|
||||
Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials.
|
||||
You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool:
|
||||
|
||||
```sh
|
||||
ufw allow 9000:9010/tcp
|
||||
mc alias set local http://localhost:9000 minioadmin minioadmin
|
||||
mc admin info local
|
||||
```
|
||||
|
||||
### firewall-cmd
|
||||
See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool.
|
||||
For application developers, see <https://docs.min.io/enterprise/aistor-object-store/developers/sdk/> to view MinIO SDKs for supported languages.
|
||||
|
||||
For hosts with firewall-cmd enabled (CentOS), you can use `firewall-cmd` command to allow traffic to specific ports. Use below commands to allow access to port 9000
|
||||
> [!NOTE]
|
||||
> Production environments using compiled-from-source MinIO binaries do so at their own risk.
|
||||
> The AGPLv3 license provides no warranties nor liabilites for any such usage.
|
||||
|
||||
## Build Docker Image
|
||||
|
||||
You can use the `docker build .` command to build a Docker image on your local host machine.
|
||||
You must first [build MinIO](#install-from-source) and ensure the `minio` binary exists in the project root.
|
||||
|
||||
The following command builds the Docker image using the default `Dockerfile` in the root project directory with the repository and image tag `myminio:minio`
|
||||
|
||||
```sh
|
||||
firewall-cmd --get-active-zones
|
||||
docker build -t myminio:minio .
|
||||
```
|
||||
|
||||
This command gets the active zone(s). Now, apply port rules to the relevant zones returned above. For example if the zone is `public`, use
|
||||
Use `docker image ls` to confirm the image exists in your local repository.
|
||||
You can run the server using standard Docker invocation:
|
||||
|
||||
```sh
|
||||
firewall-cmd --zone=public --add-port=9000/tcp --permanent
|
||||
docker run -p 9000:9000 -p 9001:9001 myminio:minio server /tmp/minio --console-address :9001
|
||||
```
|
||||
|
||||
Note that `permanent` makes sure the rules are persistent across firewall start, restart or reload. Finally reload the firewall for changes to take effect.
|
||||
Complete documentation for building Docker containers, managing custom images, or loading images into orchestration platforms is out of scope for this documentation.
|
||||
You can modify the `Dockerfile` and `dockerscripts/docker-entrypoint.sh` as-needed to reflect your specific image requirements.
|
||||
|
||||
```sh
|
||||
firewall-cmd --reload
|
||||
```
|
||||
See the [MinIO Container](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-as-a-container.html#deploy-minio-container) documentation for more guidance on running MinIO within a Container image.
|
||||
|
||||
### iptables
|
||||
## Install using Helm Charts
|
||||
|
||||
For hosts with iptables enabled (RHEL, CentOS, etc), you can use `iptables` command to enable all traffic coming to specific ports. Use below command to allow
|
||||
access to port 9000
|
||||
There are two paths for installing MinIO onto Kubernetes infrastructure:
|
||||
|
||||
```sh
|
||||
iptables -A INPUT -p tcp --dport 9000 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
- Use the [MinIO Operator](https://github.com/minio/operator)
|
||||
- Use the community-maintained [Helm charts](https://github.com/minio/minio/tree/master/helm/minio)
|
||||
|
||||
Below command enables all incoming traffic to ports ranging from 9000 to 9010.
|
||||
|
||||
```sh
|
||||
iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
|
||||
service iptables restart
|
||||
```
|
||||
See the [MinIO Documentation](https://docs.min.io/community/minio-object-store/operations/deployments/kubernetes.html) for guidance on deploying using the Operator.
|
||||
The Community Helm chart has instructions in the folder-level README.
|
||||
|
||||
## Test MinIO Connectivity
|
||||
|
||||
### Test using MinIO Console
|
||||
|
||||
MinIO Server comes with an embedded web based object browser. Point your web browser to <http://127.0.0.1:9000> to ensure your server has started successfully.
|
||||
MinIO Server comes with an embedded web based object browser.
|
||||
Point your web browser to <http://127.0.0.1:9000> to ensure your server has started successfully.
|
||||
|
||||
> NOTE: MinIO runs console on random port by default, if you wish to choose a specific port use `--console-address` to pick a specific interface and port.
|
||||
> [!NOTE]
|
||||
> MinIO runs console on random port by default, if you wish to choose a specific port use `--console-address` to pick a specific interface and port.
|
||||
|
||||
### Things to consider
|
||||
### Test using MinIO Client `mc`
|
||||
|
||||
MinIO redirects browser access requests to the configured server port (i.e. `127.0.0.1:9000`) to the configured Console port. MinIO uses the hostname or IP address specified in the request when building the redirect URL. The URL and port *must* be accessible by the client for the redirection to work.
|
||||
`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services.
|
||||
|
||||
For deployments behind a load balancer, proxy, or ingress rule where the MinIO host IP address or port is not public, use the `MINIO_BROWSER_REDIRECT_URL` environment variable to specify the external hostname for the redirect. The LB/Proxy must have rules for directing traffic to the Console port specifically.
|
||||
|
||||
For example, consider a MinIO deployment behind a proxy `https://minio.example.net`, `https://console.minio.example.net` with rules for forwarding traffic on port :9000 and :9001 to MinIO and the MinIO Console respectively on the internal network. Set `MINIO_BROWSER_REDIRECT_URL` to `https://console.minio.example.net` to ensure the browser receives a valid reachable URL.
|
||||
|
||||
| Dashboard | Creating a bucket |
|
||||
| ------------- | ------------- |
|
||||
|  |  |
|
||||
|
||||
## Test using MinIO Client `mc`
|
||||
|
||||
`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. Follow the MinIO Client [Quickstart Guide](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) for further instructions.
|
||||
|
||||
## Upgrading MinIO
|
||||
|
||||
Upgrades require zero downtime in MinIO, all upgrades are non-disruptive, all transactions on MinIO are atomic. So upgrading all the servers simultaneously is the recommended way to upgrade MinIO.
|
||||
|
||||
> NOTE: requires internet access to update directly from <https://dl.min.io>, optionally you can host any mirrors at <https://my-artifactory.example.com/minio/>
|
||||
|
||||
- For deployments that installed the MinIO server binary by hand, use [`mc admin update`](https://min.io/docs/minio/linux/reference/minio-mc-admin/mc-admin-update.html)
|
||||
The following commands set a local alias, validate the server information, create a bucket, copy data to that bucket, and list the contents of the bucket.
|
||||
|
||||
```sh
|
||||
mc admin update <minio alias, e.g., myminio>
|
||||
mc alias set local http://localhost:9000 minioadmin minioadmin
|
||||
mc admin info
|
||||
mc mb data
|
||||
mc cp ~/Downloads/mydata data/
|
||||
mc ls data/
|
||||
```
|
||||
|
||||
- For deployments without external internet access (e.g. airgapped environments), download the binary from <https://dl.min.io> and replace the existing MinIO binary let's say for example `/opt/bin/minio`, apply executable permissions `chmod +x /opt/bin/minio` and proceed to perform `mc admin service restart alias/`.
|
||||
|
||||
- For installations using Systemd MinIO service, upgrade via RPM/DEB packages **parallelly** on all servers or replace the binary lets say `/opt/bin/minio` on all nodes, apply executable permissions `chmod +x /opt/bin/minio` and process to perform `mc admin service restart alias/`.
|
||||
|
||||
### Upgrade Checklist
|
||||
|
||||
- Test all upgrades in a lower environment (DEV, QA, UAT) before applying to production. Performing blind upgrades in production environments carries significant risk.
|
||||
- Read the release notes for MinIO *before* performing any upgrade, there is no forced requirement to upgrade to latest release upon every release. Some release may not be relevant to your setup, avoid upgrading production environments unnecessarily.
|
||||
- If you plan to use `mc admin update`, MinIO process must have write access to the parent directory where the binary is present on the host system.
|
||||
- `mc admin update` is not supported and should be avoided in kubernetes/container environments, please upgrade containers by upgrading relevant container images.
|
||||
- **We do not recommend upgrading one MinIO server at a time, the product is designed to support parallel upgrades please follow our recommended guidelines.**
|
||||
Follow the MinIO Client [Quickstart Guide](https://docs.min.io/community/minio-object-store/reference/minio-mc.html#quickstart) for further instructions.
|
||||
|
||||
## Explore Further
|
||||
|
||||
- [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html)
|
||||
- [Use `mc` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc.html)
|
||||
- [Use `minio-go` SDK with MinIO Server](https://min.io/docs/minio/linux/developers/go/minio-go.html)
|
||||
- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html)
|
||||
- [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html)
|
||||
- [MinIO Erasure Code Overview](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html)
|
||||
- [Use `mc` with MinIO Server](https://docs.min.io/community/minio-object-store/reference/minio-mc.html)
|
||||
- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/enterprise/aistor-object-store/developers/sdk/go/)
|
||||
|
||||
## Contribute to MinIO Project
|
||||
|
||||
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
|
||||
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md) for guidance on making new contributions to the repository.
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@ -74,11 +74,11 @@ check_minimum_version() {
|
||||
|
||||
assert_is_supported_arch() {
|
||||
case "${ARCH}" in
|
||||
x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x | loong64 | loongarch64)
|
||||
x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x | loong64 | loongarch64 | riscv64)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x, loong64, loongarch64]"
|
||||
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x, loong64, loongarch64, riscv64]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
@ -9,7 +9,7 @@ function _init() {
|
||||
export CGO_ENABLED=0
|
||||
|
||||
## List of architectures and OS to test coss compilation.
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64"
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64 linux/riscv64"
|
||||
}
|
||||
|
||||
function _build() {
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
#
|
||||
# This script assumes that LDAP server is at:
|
||||
#
|
||||
# `localhost:1389`
|
||||
# `localhost:389`
|
||||
#
|
||||
# if this is not the case, set the environment variable
|
||||
# `_MINIO_LDAP_TEST_SERVER`.
|
||||
@ -41,7 +41,7 @@ __init__() {
|
||||
fi
|
||||
|
||||
if [ -z "$_MINIO_LDAP_TEST_SERVER" ]; then
|
||||
export _MINIO_LDAP_TEST_SERVER=localhost:1389
|
||||
export _MINIO_LDAP_TEST_SERVER=localhost:389
|
||||
echo "Using default LDAP endpoint: $_MINIO_LDAP_TEST_SERVER"
|
||||
fi
|
||||
|
||||
@ -58,7 +58,7 @@ create_iam_content_in_old_minio() {
|
||||
mc alias set old-minio http://localhost:9000 minioadmin minioadmin
|
||||
mc ready old-minio
|
||||
mc idp ldap add old-minio \
|
||||
server_addr=localhost:1389 \
|
||||
server_addr=localhost:389 \
|
||||
server_insecure=on \
|
||||
lookup_bind_dn=cn=admin,dc=min,dc=io \
|
||||
lookup_bind_password=admin \
|
||||
|
||||
@ -69,8 +69,10 @@ __init__() {
|
||||
|
||||
## this is needed because github actions don't have
|
||||
## docker-compose on all runners
|
||||
go install github.com/docker/compose/v2/cmd@latest
|
||||
mv -v /tmp/gopath/bin/cmd /tmp/gopath/bin/docker-compose
|
||||
COMPOSE_VERSION=v2.35.1
|
||||
mkdir -p /tmp/gopath/bin/
|
||||
wget -O /tmp/gopath/bin/docker-compose https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-linux-x86_64
|
||||
chmod +x /tmp/gopath/bin/docker-compose
|
||||
|
||||
cleanup
|
||||
|
||||
|
||||
126
buildscripts/multipart-quorum-test.sh
Normal file
126
buildscripts/multipart-quorum-test.sh
Normal file
@ -0,0 +1,126 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -n "$TEST_DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
trap 'catch $LINENO' ERR
|
||||
|
||||
function purge() {
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
catch() {
|
||||
if [ $# -ne 0 ]; then
|
||||
echo "error on line $1"
|
||||
fi
|
||||
|
||||
echo "Cleaning up instances of MinIO"
|
||||
pkill minio || true
|
||||
pkill -9 minio || true
|
||||
purge "$WORK_DIR"
|
||||
if [ $# -ne 0 ]; then
|
||||
exit $#
|
||||
fi
|
||||
}
|
||||
|
||||
catch
|
||||
|
||||
function start_minio_10drive() {
|
||||
start_port=$1
|
||||
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
|
||||
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
mkdir ${WORK_DIR}
|
||||
C_PWD=${PWD}
|
||||
if [ ! -x "$PWD/mc" ]; then
|
||||
MC_BUILD_DIR="mc-$RANDOM"
|
||||
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
|
||||
echo "failed to download https://github.com/minio/mc"
|
||||
purge "${MC_BUILD_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$C_PWD/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
fi
|
||||
|
||||
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/disk{1...10}" >"${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 5
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
"${PWD}/mc" mb --with-versioning minio/bucket
|
||||
|
||||
export AWS_ACCESS_KEY_ID=minio
|
||||
export AWS_SECRET_ACCESS_KEY=minio123
|
||||
aws --endpoint-url http://localhost:"$start_port" s3api create-multipart-upload --bucket bucket --key obj-1 >upload-id.json
|
||||
uploadId=$(jq -r '.UploadId' upload-id.json)
|
||||
|
||||
truncate -s 5MiB file-5mib
|
||||
for i in {1..2}; do
|
||||
aws --endpoint-url http://localhost:"$start_port" s3api upload-part \
|
||||
--upload-id "$uploadId" --bucket bucket --key obj-1 \
|
||||
--part-number "$i" --body ./file-5mib
|
||||
done
|
||||
for i in {1..6}; do
|
||||
find ${WORK_DIR}/disk${i}/.minio.sys/multipart/ -type f -name "part.1" -delete
|
||||
done
|
||||
cat <<EOF >parts.json
|
||||
{
|
||||
"Parts": [
|
||||
{
|
||||
"PartNumber": 1,
|
||||
"ETag": "5f363e0e58a95f06cbe9bbc662c5dfb6"
|
||||
},
|
||||
{
|
||||
"PartNumber": 2,
|
||||
"ETag": "5f363e0e58a95f06cbe9bbc662c5dfb6"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
err=$(aws --endpoint-url http://localhost:"$start_port" s3api complete-multipart-upload --upload-id "$uploadId" --bucket bucket --key obj-1 --multipart-upload file://./parts.json 2>&1)
|
||||
rv=$?
|
||||
if [ $rv -eq 0 ]; then
|
||||
echo "Failed to receive an error"
|
||||
exit 1
|
||||
fi
|
||||
echo "Received an error during complete-multipart as expected: $err"
|
||||
}
|
||||
|
||||
function main() {
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
start_minio_10drive ${start_port}
|
||||
}
|
||||
|
||||
main "$@"
|
||||
137
buildscripts/test-timeout.sh
Normal file
137
buildscripts/test-timeout.sh
Normal file
@ -0,0 +1,137 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -n "$TEST_DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
trap 'catch $LINENO' ERR
|
||||
|
||||
function purge() {
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
catch() {
|
||||
if [ $# -ne 0 ]; then
|
||||
echo "error on line $1"
|
||||
fi
|
||||
|
||||
echo "Cleaning up instances of MinIO"
|
||||
pkill minio || true
|
||||
pkill -9 minio || true
|
||||
purge "$WORK_DIR"
|
||||
if [ $# -ne 0 ]; then
|
||||
exit $#
|
||||
fi
|
||||
}
|
||||
|
||||
catch
|
||||
|
||||
function gen_put_request() {
|
||||
hdr_sleep=$1
|
||||
body_sleep=$2
|
||||
|
||||
echo "PUT /testbucket/testobject HTTP/1.1"
|
||||
sleep $hdr_sleep
|
||||
echo "Host: foo-header"
|
||||
echo "User-Agent: curl/8.2.1"
|
||||
echo "Accept: */*"
|
||||
echo "Content-Length: 30"
|
||||
echo ""
|
||||
|
||||
sleep $body_sleep
|
||||
echo "random line 0"
|
||||
echo "random line 1"
|
||||
echo ""
|
||||
echo ""
|
||||
}
|
||||
|
||||
function send_put_object_request() {
|
||||
hdr_timeout=$1
|
||||
body_timeout=$2
|
||||
|
||||
start=$(date +%s)
|
||||
timeout 5m bash -c "gen_put_request $hdr_timeout $body_timeout | netcat 127.0.0.1 $start_port | read" || return -1
|
||||
[ $(($(date +%s) - start)) -gt $((srv_hdr_timeout + srv_idle_timeout + 1)) ] && return -1
|
||||
return 0
|
||||
}
|
||||
|
||||
function test_minio_with_timeout() {
|
||||
start_port=$1
|
||||
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
mkdir ${WORK_DIR}
|
||||
C_PWD=${PWD}
|
||||
if [ ! -x "$PWD/mc" ]; then
|
||||
MC_BUILD_DIR="mc-$RANDOM"
|
||||
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
|
||||
echo "failed to download https://github.com/minio/mc"
|
||||
purge "${MC_BUILD_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$C_PWD/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
fi
|
||||
|
||||
"${MINIO[@]}" --address ":$start_port" --read-header-timeout ${srv_hdr_timeout}s --idle-timeout ${srv_idle_timeout}s "${WORK_DIR}/disk/" >"${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 1
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -e
|
||||
|
||||
"${PWD}/mc" mb minio/testbucket
|
||||
"${PWD}/mc" anonymous set public minio/testbucket
|
||||
|
||||
# slow header writing
|
||||
send_put_object_request 20 0 && exit -1
|
||||
"${PWD}/mc" stat minio/testbucket/testobject && exit -1
|
||||
|
||||
# quick header write and slow bodywrite
|
||||
send_put_object_request 0 40 && exit -1
|
||||
"${PWD}/mc" stat minio/testbucket/testobject && exit -1
|
||||
|
||||
# quick header and body write
|
||||
send_put_object_request 1 1 || exit -1
|
||||
"${PWD}/mc" stat minio/testbucket/testobject || exit -1
|
||||
}
|
||||
|
||||
function main() {
|
||||
export start_port=$(shuf -i 10000-65000 -n 1)
|
||||
export srv_hdr_timeout=5
|
||||
export srv_idle_timeout=5
|
||||
export -f gen_put_request
|
||||
|
||||
test_minio_with_timeout ${start_port}
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@ -38,6 +38,7 @@ import (
|
||||
objectlock "github.com/minio/minio/internal/bucket/object/lock"
|
||||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
"github.com/minio/minio/internal/event"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
@ -427,6 +428,21 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
cfgPath := pathJoin(bi.Name, cfgFile)
|
||||
bucket := bi.Name
|
||||
switch cfgFile {
|
||||
case bucketPolicyConfig:
|
||||
config, _, err := globalBucketMetadataSys.GetBucketPolicy(bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, BucketPolicyNotFound{Bucket: bucket}) {
|
||||
continue
|
||||
}
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
||||
case bucketNotificationConfig:
|
||||
config, err := globalBucketMetadataSys.GetNotificationConfig(bucket)
|
||||
if err != nil {
|
||||
@ -796,11 +812,12 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
|
||||
bucketMap[bucket].NotificationConfigXML = configData
|
||||
bucketMap[bucket].NotificationConfigUpdatedAt = updatedAt
|
||||
rpt.SetStatus(bucket, fileName, nil)
|
||||
case bucketPolicyConfig:
|
||||
// Error out if Content-Length is beyond allowed size.
|
||||
if sz > maxBucketPolicySize {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf(ErrPolicyTooLarge.String()))
|
||||
rpt.SetStatus(bucket, fileName, errors.New(ErrPolicyTooLarge.String()))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -818,7 +835,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
|
||||
// Version in policy must not be empty
|
||||
if bucketPolicy.Version == "" {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf(ErrPolicyInvalidVersion.String()))
|
||||
rpt.SetStatus(bucket, fileName, errors.New(ErrPolicyInvalidVersion.String()))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -964,7 +981,6 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
rpt.SetStatus(bucket, "", err)
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
rptData, err := json.Marshal(rpt.BucketMetaImportErrs)
|
||||
@ -1023,7 +1039,7 @@ func (a adminAPIHandlers) ReplicationDiffHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
if len(diffCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
}
|
||||
case <-keepAliveTicker.C:
|
||||
if len(diffCh) > 0 {
|
||||
@ -1032,7 +1048,7 @@ func (a adminAPIHandlers) ReplicationDiffHandler(w http.ResponseWriter, r *http.
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
@ -1082,7 +1098,7 @@ func (a adminAPIHandlers) ReplicationMRFHandler(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
if len(mrfCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
}
|
||||
case <-keepAliveTicker.C:
|
||||
if len(mrfCh) > 0 {
|
||||
@ -1091,7 +1107,7 @@ func (a adminAPIHandlers) ReplicationMRFHandler(w http.ResponseWriter, r *http.R
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
@ -193,27 +193,27 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
func setConfigKV(ctx context.Context, objectAPI ObjectLayer, kvBytes []byte) (result setConfigResult, err error) {
|
||||
result.Cfg, err = readServerConfig(ctx, objectAPI, nil)
|
||||
if err != nil {
|
||||
return
|
||||
return result, err
|
||||
}
|
||||
|
||||
result.Dynamic, err = result.Cfg.ReadConfig(bytes.NewReader(kvBytes))
|
||||
if err != nil {
|
||||
return
|
||||
return result, err
|
||||
}
|
||||
|
||||
result.SubSys, _, _, err = config.GetSubSys(string(kvBytes))
|
||||
if err != nil {
|
||||
return
|
||||
return result, err
|
||||
}
|
||||
|
||||
tgts, err := config.ParseConfigTargetID(bytes.NewReader(kvBytes))
|
||||
if err != nil {
|
||||
return
|
||||
return result, err
|
||||
}
|
||||
ctx = context.WithValue(ctx, config.ContextKeyForTargetFromConfig, tgts)
|
||||
if verr := validateConfig(ctx, result.Cfg, result.SubSys); verr != nil {
|
||||
err = badConfigErr{Err: verr}
|
||||
return
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Check if subnet proxy being set and if so set the same value to proxy of subnet
|
||||
@ -222,12 +222,12 @@ func setConfigKV(ctx context.Context, objectAPI ObjectLayer, kvBytes []byte) (re
|
||||
|
||||
// Update the actual server config on disk.
|
||||
if err = saveServerConfig(ctx, objectAPI, result.Cfg); err != nil {
|
||||
return
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Write the config input KV to history.
|
||||
err = saveServerConfigHistory(ctx, objectAPI, kvBytes)
|
||||
return
|
||||
return result, err
|
||||
}
|
||||
|
||||
// GetConfigKVHandler - GET /minio/admin/v3/get-config-kv?key={key}
|
||||
|
||||
@ -125,7 +125,6 @@ func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
}
|
||||
|
||||
if err = validateConfig(ctx, cfg, subSys); err != nil {
|
||||
|
||||
var validationErr ldap.Validation
|
||||
if errors.As(err, &validationErr) {
|
||||
// If we got an LDAP validation error, we need to send appropriate
|
||||
@ -416,7 +415,6 @@ func (a adminAPIHandlers) DeleteIdentityProviderCfg(w http.ResponseWriter, r *ht
|
||||
return
|
||||
}
|
||||
if err = validateConfig(ctx, cfg, subSys); err != nil {
|
||||
|
||||
var validationErr ldap.Validation
|
||||
if errors.As(err, &validationErr) {
|
||||
// If we got an LDAP validation error, we need to send appropriate
|
||||
|
||||
@ -190,7 +190,7 @@ func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http.
|
||||
//
|
||||
// PUT /minio/admin/v3/idp/ldap/add-service-account
|
||||
func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cred, opts, createReq, targetUser, APIError := commonAddServiceAccount(r)
|
||||
ctx, cred, opts, createReq, targetUser, APIError := commonAddServiceAccount(r, true)
|
||||
if APIError.Code != "" {
|
||||
writeErrorResponseJSON(ctx, w, APIError, r.URL)
|
||||
return
|
||||
@ -214,10 +214,7 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
|
||||
// Check if we are creating svc account for request sender.
|
||||
isSvcAccForRequestor := false
|
||||
if targetUser == requestorUser || targetUser == requestorParentUser {
|
||||
isSvcAccForRequestor = true
|
||||
}
|
||||
isSvcAccForRequestor := targetUser == requestorUser || targetUser == requestorParentUser
|
||||
|
||||
var (
|
||||
targetGroups []string
|
||||
@ -345,7 +342,7 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
||||
Name: newCred.Name,
|
||||
Description: newCred.Description,
|
||||
Claims: opts.claims,
|
||||
SessionPolicy: createReq.Policy,
|
||||
SessionPolicy: madmin.SRSessionPolicy(createReq.Policy),
|
||||
Status: auth.AccountOn,
|
||||
Expiration: createReq.Expiration,
|
||||
},
|
||||
@ -448,8 +445,10 @@ func (a adminAPIHandlers) ListAccessKeysLDAP(w http.ResponseWriter, r *http.Requ
|
||||
for _, svc := range serviceAccounts {
|
||||
expiryTime := svc.Expiration
|
||||
serviceAccountList = append(serviceAccountList, madmin.ServiceAccountInfo{
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &expiryTime,
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &expiryTime,
|
||||
Name: svc.Name,
|
||||
Description: svc.Description,
|
||||
})
|
||||
}
|
||||
for _, sts := range stsKeys {
|
||||
@ -499,7 +498,7 @@ func (a adminAPIHandlers) ListAccessKeysLDAPBulk(w http.ResponseWriter, r *http.
|
||||
|
||||
dnList := r.Form["userDNs"]
|
||||
isAll := r.Form.Get("all") == "true"
|
||||
onlySelf := !isAll && len(dnList) == 0
|
||||
selfOnly := !isAll && len(dnList) == 0
|
||||
|
||||
if isAll && len(dnList) > 0 {
|
||||
// This should be checked on client side, so return generic error
|
||||
@ -527,7 +526,7 @@ func (a adminAPIHandlers) ListAccessKeysLDAPBulk(w http.ResponseWriter, r *http.
|
||||
dn = foundResult.NormDN
|
||||
}
|
||||
if dn == cred.ParentUser || dnList[0] == cred.ParentUser {
|
||||
onlySelf = true
|
||||
selfOnly = true
|
||||
}
|
||||
}
|
||||
|
||||
@ -538,13 +537,13 @@ func (a adminAPIHandlers) ListAccessKeysLDAPBulk(w http.ResponseWriter, r *http.
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
DenyOnly: onlySelf,
|
||||
DenyOnly: selfOnly,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if onlySelf && len(dnList) == 0 {
|
||||
if selfOnly && len(dnList) == 0 {
|
||||
selfDN := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
selfDN = cred.ParentUser
|
||||
@ -609,10 +608,9 @@ func (a adminAPIHandlers) ListAccessKeysLDAPBulk(w http.ResponseWriter, r *http.
|
||||
return
|
||||
}
|
||||
for _, sts := range stsKeys {
|
||||
expiryTime := sts.Expiration
|
||||
accessKeys.STSKeys = append(accessKeys.STSKeys, madmin.ServiceAccountInfo{
|
||||
AccessKey: sts.AccessKey,
|
||||
Expiration: &expiryTime,
|
||||
Expiration: &sts.Expiration,
|
||||
})
|
||||
}
|
||||
// if only STS keys, skip if user has no STS keys
|
||||
@ -628,10 +626,11 @@ func (a adminAPIHandlers) ListAccessKeysLDAPBulk(w http.ResponseWriter, r *http.
|
||||
return
|
||||
}
|
||||
for _, svc := range serviceAccounts {
|
||||
expiryTime := svc.Expiration
|
||||
accessKeys.ServiceAccounts = append(accessKeys.ServiceAccounts, madmin.ServiceAccountInfo{
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &expiryTime,
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &svc.Expiration,
|
||||
Name: svc.Name,
|
||||
Description: svc.Description,
|
||||
})
|
||||
}
|
||||
// if only service accounts, skip if user has no service accounts
|
||||
|
||||
248
cmd/admin-handlers-idp-openid.go
Normal file
248
cmd/admin-handlers-idp-openid.go
Normal file
@ -0,0 +1,248 @@
|
||||
// Copyright (c) 2015-2025 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const dummyRoleARN = "dummy-internal"
|
||||
|
||||
// ListAccessKeysOpenIDBulk - GET /minio/admin/v3/idp/openid/list-access-keys-bulk
|
||||
func (a adminAPIHandlers) ListAccessKeysOpenIDBulk(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIAMSys.OpenIDConfig.Enabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminOpenIDNotEnabled), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
userList := r.Form["users"]
|
||||
isAll := r.Form.Get("all") == "true"
|
||||
selfOnly := !isAll && len(userList) == 0
|
||||
cfgName := r.Form.Get("configName")
|
||||
allConfigs := r.Form.Get("allConfigs") == "true"
|
||||
if cfgName == "" && !allConfigs {
|
||||
cfgName = madmin.Default
|
||||
}
|
||||
|
||||
if isAll && len(userList) > 0 {
|
||||
// This should be checked on client side, so return generic error
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Empty DN list and not self, list access keys for all users
|
||||
if isAll {
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListUsersAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
} else if len(userList) == 1 && userList[0] == cred.ParentUser {
|
||||
selfOnly = true
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListServiceAccountsAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
DenyOnly: selfOnly,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if selfOnly && len(userList) == 0 {
|
||||
selfDN := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
selfDN = cred.ParentUser
|
||||
}
|
||||
userList = append(userList, selfDN)
|
||||
}
|
||||
|
||||
listType := r.Form.Get("listType")
|
||||
var listSTSKeys, listServiceAccounts bool
|
||||
switch listType {
|
||||
case madmin.AccessKeyListUsersOnly:
|
||||
listSTSKeys = false
|
||||
listServiceAccounts = false
|
||||
case madmin.AccessKeyListSTSOnly:
|
||||
listSTSKeys = true
|
||||
listServiceAccounts = false
|
||||
case madmin.AccessKeyListSvcaccOnly:
|
||||
listSTSKeys = false
|
||||
listServiceAccounts = true
|
||||
case madmin.AccessKeyListAll:
|
||||
listSTSKeys = true
|
||||
listServiceAccounts = true
|
||||
default:
|
||||
err := errors.New("invalid list type")
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
s := globalServerConfig.Clone()
|
||||
roleArnMap := make(map[string]string)
|
||||
// Map of configs to a map of users to their access keys
|
||||
cfgToUsersMap := make(map[string]map[string]madmin.OpenIDUserAccessKeys)
|
||||
configs, err := globalIAMSys.OpenIDConfig.GetConfigList(s)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, config := range configs {
|
||||
if !allConfigs && cfgName != config.Name {
|
||||
continue
|
||||
}
|
||||
arn := dummyRoleARN
|
||||
if config.RoleARN != "" {
|
||||
arn = config.RoleARN
|
||||
}
|
||||
roleArnMap[arn] = config.Name
|
||||
newResp := make(map[string]madmin.OpenIDUserAccessKeys)
|
||||
cfgToUsersMap[config.Name] = newResp
|
||||
}
|
||||
if len(roleArnMap) == 0 {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchConfigTarget), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
userSet := set.CreateStringSet(userList...)
|
||||
accessKeys, err := globalIAMSys.ListAllAccessKeys(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
for _, accessKey := range accessKeys {
|
||||
// Filter out any disqualifying access keys
|
||||
_, ok := accessKey.Claims[subClaim]
|
||||
if !ok {
|
||||
continue // OpenID access keys must have a sub claim
|
||||
}
|
||||
if (!listSTSKeys && !accessKey.IsServiceAccount()) || (!listServiceAccounts && accessKey.IsServiceAccount()) {
|
||||
continue // skip if not the type we want
|
||||
}
|
||||
arn, ok := accessKey.Claims[roleArnClaim].(string)
|
||||
if !ok {
|
||||
if _, ok := accessKey.Claims[iamPolicyClaimNameOpenID()]; !ok {
|
||||
continue // skip if no roleArn and no policy claim
|
||||
}
|
||||
// claim-based provider is in the roleArnMap under dummy ARN
|
||||
arn = dummyRoleARN
|
||||
}
|
||||
matchingCfgName, ok := roleArnMap[arn]
|
||||
if !ok {
|
||||
continue // skip if not part of the target config
|
||||
}
|
||||
var id string
|
||||
if idClaim := globalIAMSys.OpenIDConfig.GetUserIDClaim(matchingCfgName); idClaim != "" {
|
||||
id, _ = accessKey.Claims[idClaim].(string)
|
||||
}
|
||||
if !userSet.IsEmpty() && !userSet.Contains(accessKey.ParentUser) && !userSet.Contains(id) {
|
||||
continue // skip if not in the user list
|
||||
}
|
||||
openIDUserAccessKeys, ok := cfgToUsersMap[matchingCfgName][accessKey.ParentUser]
|
||||
|
||||
// Add new user to map if not already present
|
||||
if !ok {
|
||||
var readableClaim string
|
||||
if rc := globalIAMSys.OpenIDConfig.GetUserReadableClaim(matchingCfgName); rc != "" {
|
||||
readableClaim, _ = accessKey.Claims[rc].(string)
|
||||
}
|
||||
openIDUserAccessKeys = madmin.OpenIDUserAccessKeys{
|
||||
MinioAccessKey: accessKey.ParentUser,
|
||||
ID: id,
|
||||
ReadableName: readableClaim,
|
||||
}
|
||||
}
|
||||
svcAccInfo := madmin.ServiceAccountInfo{
|
||||
AccessKey: accessKey.AccessKey,
|
||||
Expiration: &accessKey.Expiration,
|
||||
}
|
||||
if accessKey.IsServiceAccount() {
|
||||
openIDUserAccessKeys.ServiceAccounts = append(openIDUserAccessKeys.ServiceAccounts, svcAccInfo)
|
||||
} else {
|
||||
openIDUserAccessKeys.STSKeys = append(openIDUserAccessKeys.STSKeys, svcAccInfo)
|
||||
}
|
||||
cfgToUsersMap[matchingCfgName][accessKey.ParentUser] = openIDUserAccessKeys
|
||||
}
|
||||
|
||||
// Convert map to slice and sort
|
||||
resp := make([]madmin.ListAccessKeysOpenIDResp, 0, len(cfgToUsersMap))
|
||||
for cfgName, usersMap := range cfgToUsersMap {
|
||||
users := make([]madmin.OpenIDUserAccessKeys, 0, len(usersMap))
|
||||
for _, user := range usersMap {
|
||||
users = append(users, user)
|
||||
}
|
||||
sort.Slice(users, func(i, j int) bool {
|
||||
return users[i].MinioAccessKey < users[j].MinioAccessKey
|
||||
})
|
||||
resp = append(resp, madmin.ListAccessKeysOpenIDResp{
|
||||
ConfigName: cfgName,
|
||||
Users: users,
|
||||
})
|
||||
}
|
||||
sort.Slice(resp, func(i, j int) bool {
|
||||
return resp[i].ConfigName < resp[j].ConfigName
|
||||
})
|
||||
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
@ -61,7 +61,7 @@ func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
if z.IsRebalanceStarted() {
|
||||
if z.IsRebalanceStarted(ctx) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceAlreadyStarted), r.URL)
|
||||
return
|
||||
}
|
||||
@ -258,8 +258,8 @@ func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request)
|
||||
// concurrent rebalance-start commands.
|
||||
if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal {
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == ep.Host {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
if proxyEp.Host == ep.Host {
|
||||
if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success {
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -277,7 +277,7 @@ func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
if pools.IsRebalanceStarted() {
|
||||
if pools.IsRebalanceStarted(ctx) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceAlreadyStarted), r.URL)
|
||||
return
|
||||
}
|
||||
@ -329,8 +329,8 @@ func (a adminAPIHandlers) RebalanceStatus(w http.ResponseWriter, r *http.Request
|
||||
// pools may temporarily have out of date info on the others.
|
||||
if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal {
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == ep.Host {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
if proxyEp.Host == ep.Host {
|
||||
if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success {
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -380,14 +380,14 @@ func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request)
|
||||
func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w http.ResponseWriter, r *http.Request) (proxy bool) {
|
||||
host := env.Get("_MINIO_DECOM_ENDPOINT_HOST", defaultEndPoint.Host)
|
||||
if host == "" {
|
||||
return
|
||||
return proxy
|
||||
}
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == host && !proxyEp.IsLocal {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
if proxyEp.Host == host && !proxyEp.IsLocal {
|
||||
if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
return proxy
|
||||
}
|
||||
|
||||
@ -70,7 +70,7 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
func getSRAddOptions(r *http.Request) (opts madmin.SRAddOptions) {
|
||||
opts.ReplicateILMExpiry = r.Form.Get("replicateILMExpiry") == "true"
|
||||
return
|
||||
return opts
|
||||
}
|
||||
|
||||
// SRPeerJoin - PUT /minio/admin/v3/site-replication/join
|
||||
@ -304,7 +304,7 @@ func (a adminAPIHandlers) SRPeerGetIDPSettings(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
}
|
||||
|
||||
func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptionKey string) error {
|
||||
func parseJSONBody(ctx context.Context, body io.Reader, v any, encryptionKey string) error {
|
||||
data, err := io.ReadAll(body)
|
||||
if err != nil {
|
||||
return SRError{
|
||||
@ -422,7 +422,7 @@ func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Req
|
||||
func getSREditOptions(r *http.Request) (opts madmin.SREditOptions) {
|
||||
opts.DisableILMExpiryReplication = r.Form.Get("disableILMExpiryReplication") == "true"
|
||||
opts.EnableILMExpiryReplication = r.Form.Get("enableILMExpiryReplication") == "true"
|
||||
return
|
||||
return opts
|
||||
}
|
||||
|
||||
// SRPeerEdit - PUT /minio/admin/v3/site-replication/peer/edit
|
||||
@ -484,7 +484,7 @@ func getSRStatusOptions(r *http.Request) (opts madmin.SRStatusOptions) {
|
||||
opts.EntityValue = q.Get("entityvalue")
|
||||
opts.ShowDeleted = q.Get("showDeleted") == "true"
|
||||
opts.Metrics = q.Get("metrics") == "true"
|
||||
return
|
||||
return opts
|
||||
}
|
||||
|
||||
// SiteReplicationRemove - PUT /minio/admin/v3/site-replication/remove
|
||||
|
||||
@ -89,7 +89,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
||||
|
||||
// Create a policy policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
@ -104,7 +104,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket)
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@ -113,7 +113,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
||||
userCount := 50
|
||||
accessKeys := make([]string, userCount)
|
||||
secretKeys := make([]string, userCount)
|
||||
for i := 0; i < userCount; i++ {
|
||||
for i := range userCount {
|
||||
accessKey, secretKey := mustGenerateCredentials(c)
|
||||
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
|
||||
if err != nil {
|
||||
@ -133,7 +133,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
||||
}
|
||||
|
||||
g := errgroup.Group{}
|
||||
for i := 0; i < userCount; i++ {
|
||||
for i := range userCount {
|
||||
g.Go(func(i int) func() error {
|
||||
return func() error {
|
||||
uClient := s.getUserClient(c, accessKeys[i], secretKeys[i], "")
|
||||
|
||||
@ -24,18 +24,21 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"net/http"
|
||||
"os"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/klauspost/compress/zip"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/cachevalue"
|
||||
"github.com/minio/minio/internal/config/dns"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
xldap "github.com/minio/pkg/v3/ldap"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
@ -64,6 +67,17 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// This API only supports removal of internal users not service accounts.
|
||||
ok, _, err = globalIAMSys.IsServiceAccount(accessKey)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if ok {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errIAMActionNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// When the user is root credential you are not allowed to
|
||||
// remove the root user. Also you cannot delete yourself.
|
||||
if accessKey == globalActiveCred.AccessKey || accessKey == cred.AccessKey {
|
||||
@ -144,9 +158,7 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for k, v := range ldapUsers {
|
||||
allCredentials[k] = v
|
||||
}
|
||||
maps.Copy(allCredentials, ldapUsers)
|
||||
|
||||
// Marshal the response
|
||||
data, err := json.Marshal(allCredentials)
|
||||
@ -184,12 +196,7 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
checkDenyOnly := false
|
||||
if name == cred.AccessKey {
|
||||
// Check that there is no explicit deny - otherwise it's allowed
|
||||
// to view one's own info.
|
||||
checkDenyOnly = true
|
||||
}
|
||||
checkDenyOnly := name == cred.AccessKey
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
@ -480,12 +487,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
checkDenyOnly := false
|
||||
if accessKey == cred.AccessKey {
|
||||
// Check that there is no explicit deny - otherwise it's allowed
|
||||
// to change one's own password.
|
||||
checkDenyOnly = true
|
||||
}
|
||||
checkDenyOnly := accessKey == cred.AccessKey
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
@ -636,7 +638,7 @@ func (a adminAPIHandlers) TemporaryAccountInfo(w http.ResponseWriter, r *http.Re
|
||||
|
||||
// AddServiceAccount - PUT /minio/admin/v3/add-service-account
|
||||
func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cred, opts, createReq, targetUser, APIError := commonAddServiceAccount(r)
|
||||
ctx, cred, opts, createReq, targetUser, APIError := commonAddServiceAccount(r, false)
|
||||
if APIError.Code != "" {
|
||||
writeErrorResponseJSON(ctx, w, APIError, r.URL)
|
||||
return
|
||||
@ -676,10 +678,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
// Check if we are creating svc account for request sender.
|
||||
isSvcAccForRequestor := false
|
||||
if targetUser == requestorUser || targetUser == requestorParentUser {
|
||||
isSvcAccForRequestor = true
|
||||
}
|
||||
isSvcAccForRequestor := targetUser == requestorUser || targetUser == requestorParentUser
|
||||
|
||||
// If we are creating svc account for request sender, ensure
|
||||
// that targetUser is a real user (i.e. not derived
|
||||
@ -770,7 +769,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
Name: newCred.Name,
|
||||
Description: newCred.Description,
|
||||
Claims: opts.claims,
|
||||
SessionPolicy: createReq.Policy,
|
||||
SessionPolicy: madmin.SRSessionPolicy(createReq.Policy),
|
||||
Status: auth.AccountOn,
|
||||
Expiration: createReq.Expiration,
|
||||
},
|
||||
@ -894,7 +893,7 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
Status: opts.status,
|
||||
Name: opts.name,
|
||||
Description: opts.description,
|
||||
SessionPolicy: updateReq.NewPolicy,
|
||||
SessionPolicy: madmin.SRSessionPolicy(updateReq.NewPolicy),
|
||||
Expiration: updateReq.NewExpiration,
|
||||
},
|
||||
},
|
||||
@ -1166,6 +1165,172 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// ListAccessKeysBulk - GET /minio/admin/v3/list-access-keys-bulk
|
||||
func (a adminAPIHandlers) ListAccessKeysBulk(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
users := r.Form["users"]
|
||||
isAll := r.Form.Get("all") == "true"
|
||||
selfOnly := !isAll && len(users) == 0
|
||||
|
||||
if isAll && len(users) > 0 {
|
||||
// This should be checked on client side, so return generic error
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Empty user list and not self, list access keys for all users
|
||||
if isAll {
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListUsersAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
} else if len(users) == 1 {
|
||||
if users[0] == cred.AccessKey || users[0] == cred.ParentUser {
|
||||
selfOnly = true
|
||||
}
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListServiceAccountsAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
DenyOnly: selfOnly,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if selfOnly && len(users) == 0 {
|
||||
selfUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
selfUser = cred.ParentUser
|
||||
}
|
||||
users = append(users, selfUser)
|
||||
}
|
||||
|
||||
var checkedUserList []string
|
||||
if isAll {
|
||||
users, err := globalIAMSys.ListUsers(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for user := range users {
|
||||
checkedUserList = append(checkedUserList, user)
|
||||
}
|
||||
checkedUserList = append(checkedUserList, globalActiveCred.AccessKey)
|
||||
} else {
|
||||
for _, user := range users {
|
||||
// Validate the user
|
||||
_, ok := globalIAMSys.GetUser(ctx, user)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
checkedUserList = append(checkedUserList, user)
|
||||
}
|
||||
}
|
||||
|
||||
listType := r.Form.Get("listType")
|
||||
var listSTSKeys, listServiceAccounts bool
|
||||
switch listType {
|
||||
case madmin.AccessKeyListUsersOnly:
|
||||
listSTSKeys = false
|
||||
listServiceAccounts = false
|
||||
case madmin.AccessKeyListSTSOnly:
|
||||
listSTSKeys = true
|
||||
listServiceAccounts = false
|
||||
case madmin.AccessKeyListSvcaccOnly:
|
||||
listSTSKeys = false
|
||||
listServiceAccounts = true
|
||||
case madmin.AccessKeyListAll:
|
||||
listSTSKeys = true
|
||||
listServiceAccounts = true
|
||||
default:
|
||||
err := errors.New("invalid list type")
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
accessKeyMap := make(map[string]madmin.ListAccessKeysResp)
|
||||
for _, user := range checkedUserList {
|
||||
accessKeys := madmin.ListAccessKeysResp{}
|
||||
if listSTSKeys {
|
||||
stsKeys, err := globalIAMSys.ListSTSAccounts(ctx, user)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, sts := range stsKeys {
|
||||
accessKeys.STSKeys = append(accessKeys.STSKeys, madmin.ServiceAccountInfo{
|
||||
AccessKey: sts.AccessKey,
|
||||
Expiration: &sts.Expiration,
|
||||
})
|
||||
}
|
||||
// if only STS keys, skip if user has no STS keys
|
||||
if !listServiceAccounts && len(stsKeys) == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if listServiceAccounts {
|
||||
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, user)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, svc := range serviceAccounts {
|
||||
accessKeys.ServiceAccounts = append(accessKeys.ServiceAccounts, madmin.ServiceAccountInfo{
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &svc.Expiration,
|
||||
})
|
||||
}
|
||||
// if only service accounts, skip if user has no service accounts
|
||||
if !listSTSKeys && len(serviceAccounts) == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
accessKeyMap[user] = accessKeys
|
||||
}
|
||||
|
||||
data, err := json.Marshal(accessKeyMap)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
// AccountInfoHandler returns usage, permissions and other bucket metadata for incoming us
|
||||
func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
@ -1235,18 +1400,6 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
return rd, wr
|
||||
}
|
||||
|
||||
bucketStorageCache.InitOnce(10*time.Second,
|
||||
cachevalue.Opts{ReturnLastGood: true},
|
||||
func(ctx context.Context) (DataUsageInfo, error) {
|
||||
ctx, done := context.WithTimeout(ctx, 2*time.Second)
|
||||
defer done()
|
||||
|
||||
return loadDataUsageFromBackend(ctx, objectAPI)
|
||||
},
|
||||
)
|
||||
|
||||
dataUsageInfo, _ := bucketStorageCache.Get()
|
||||
|
||||
// If etcd, dns federation configured list buckets from etcd.
|
||||
var err error
|
||||
var buckets []BucketInfo
|
||||
@ -1287,7 +1440,12 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
var buf []byte
|
||||
switch {
|
||||
case accountName == globalActiveCred.AccessKey:
|
||||
case accountName == globalActiveCred.AccessKey || newGlobalAuthZPluginFn() != nil:
|
||||
// For owner account and when plugin authZ is configured always set
|
||||
// effective policy as `consoleAdmin`.
|
||||
//
|
||||
// In the latter case, we let the UI render everything, but individual
|
||||
// actions would fail if not permitted by the external authZ service.
|
||||
for _, policy := range policy.DefaultPolicies {
|
||||
if policy.Name == "consoleAdmin" {
|
||||
effectivePolicy = policy.Definition
|
||||
@ -1315,8 +1473,8 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
effectivePolicy = globalIAMSys.GetCombinedPolicy(policies...)
|
||||
|
||||
}
|
||||
|
||||
buf, err = json.MarshalIndent(effectivePolicy, "", " ")
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@ -1333,15 +1491,12 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
rd, wr := isAllowedAccess(bucket.Name)
|
||||
if rd || wr {
|
||||
// Fetch the data usage of the current bucket
|
||||
var size uint64
|
||||
var objectsCount uint64
|
||||
var objectsHist, versionsHist map[string]uint64
|
||||
if !dataUsageInfo.LastUpdate.IsZero() {
|
||||
size = dataUsageInfo.BucketsUsage[bucket.Name].Size
|
||||
objectsCount = dataUsageInfo.BucketsUsage[bucket.Name].ObjectsCount
|
||||
objectsHist = dataUsageInfo.BucketsUsage[bucket.Name].ObjectSizesHistogram
|
||||
versionsHist = dataUsageInfo.BucketsUsage[bucket.Name].ObjectVersionsHistogram
|
||||
}
|
||||
bui := globalBucketQuotaSys.GetBucketUsageInfo(ctx, bucket.Name)
|
||||
size := bui.Size
|
||||
objectsCount := bui.ObjectsCount
|
||||
objectsHist := bui.ObjectSizesHistogram
|
||||
versionsHist := bui.ObjectVersionsHistogram
|
||||
|
||||
// Fetch the prefix usage of the current bucket
|
||||
var prefixUsage map[string]uint64
|
||||
if enablePrefixUsage {
|
||||
@ -1411,6 +1566,7 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errTooManyPolicies), r.URL)
|
||||
return
|
||||
}
|
||||
setReqInfoPolicyName(ctx, name)
|
||||
|
||||
policyDoc, err := globalIAMSys.InfoPolicy(name)
|
||||
if err != nil {
|
||||
@ -1514,6 +1670,7 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
vars := mux.Vars(r)
|
||||
policyName := vars["name"]
|
||||
setReqInfoPolicyName(ctx, policyName)
|
||||
|
||||
if err := globalIAMSys.DeletePolicy(ctx, policyName, true); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@ -1546,6 +1703,13 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
setReqInfoPolicyName(ctx, policyName)
|
||||
|
||||
// Reject policy names with commas.
|
||||
if strings.Contains(policyName, ",") {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrPolicyInvalidName), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Error out if Content-Length is missing.
|
||||
if r.ContentLength <= 0 {
|
||||
@ -1611,6 +1775,7 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
||||
policyName := vars["policyName"]
|
||||
entityName := vars["userOrGroup"]
|
||||
isGroup := vars["isGroup"] == "true"
|
||||
setReqInfoPolicyName(ctx, policyName)
|
||||
|
||||
if !isGroup {
|
||||
ok, _, err := globalIAMSys.IsTempUser(entityName)
|
||||
@ -1661,16 +1826,18 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
||||
iamLogIf(ctx, err)
|
||||
} else if foundGroupDN == nil || !underBaseDN {
|
||||
err = errNoSuchGroup
|
||||
} else {
|
||||
entityName = foundGroupDN.NormDN
|
||||
}
|
||||
entityName = foundGroupDN.NormDN
|
||||
} else {
|
||||
var foundUserDN *xldap.DNSearchResult
|
||||
if foundUserDN, err = globalIAMSys.LDAPConfig.GetValidatedDNForUsername(entityName); err != nil {
|
||||
iamLogIf(ctx, err)
|
||||
} else if foundUserDN == nil {
|
||||
err = errNoSuchUser
|
||||
} else {
|
||||
entityName = foundUserDN.NormDN
|
||||
}
|
||||
entityName = foundUserDN.NormDN
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@ -1696,7 +1863,7 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
||||
}))
|
||||
}
|
||||
|
||||
// ListPolicyMappingEntities - GET /minio/admin/v3/idp/builtin/polciy-entities?policy=xxx&user=xxx&group=xxx
|
||||
// ListPolicyMappingEntities - GET /minio/admin/v3/idp/builtin/policy-entities?policy=xxx&user=xxx&group=xxx
|
||||
func (a adminAPIHandlers) ListPolicyMappingEntities(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
@ -1798,6 +1965,7 @@ func (a adminAPIHandlers) AttachDetachPolicyBuiltin(w http.ResponseWriter, r *ht
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
setReqInfoPolicyName(ctx, strings.Join(addedOrRemoved, ","))
|
||||
|
||||
respBody := madmin.PolicyAssociationResp{
|
||||
UpdatedAt: updatedAt,
|
||||
@ -1823,6 +1991,227 @@ func (a adminAPIHandlers) AttachDetachPolicyBuiltin(w http.ResponseWriter, r *ht
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
// RevokeTokens - POST /minio/admin/v3/revoke-tokens/{userProvider}
|
||||
func (a adminAPIHandlers) RevokeTokens(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
userProvider := mux.Vars(r)["userProvider"]
|
||||
|
||||
user := r.Form.Get("user")
|
||||
tokenRevokeType := r.Form.Get("tokenRevokeType")
|
||||
fullRevoke := r.Form.Get("fullRevoke") == "true"
|
||||
isTokenSelfRevoke := user == ""
|
||||
if !isTokenSelfRevoke {
|
||||
var err error
|
||||
user, err = getUserWithProvider(ctx, userProvider, user, false)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if (user != "" && tokenRevokeType == "" && !fullRevoke) || (tokenRevokeType != "" && fullRevoke) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
adminPrivilege := globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.RemoveServiceAccountAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
})
|
||||
|
||||
if !adminPrivilege || isTokenSelfRevoke {
|
||||
parentUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
parentUser = cred.ParentUser
|
||||
}
|
||||
if !isTokenSelfRevoke && user != parentUser {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
user = parentUser
|
||||
}
|
||||
|
||||
// Infer token revoke type from the request if requestor is STS.
|
||||
if isTokenSelfRevoke && tokenRevokeType == "" && !fullRevoke {
|
||||
if cred.IsTemp() {
|
||||
tokenRevokeType, _ = cred.Claims[tokenRevokeTypeClaim].(string)
|
||||
}
|
||||
if tokenRevokeType == "" {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNoTokenRevokeType), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err := globalIAMSys.RevokeTokens(ctx, user, tokenRevokeType)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessNoContent(w)
|
||||
}
|
||||
|
||||
// InfoAccessKey - GET /minio/admin/v3/info-access-key?access-key=<access-key>
|
||||
func (a adminAPIHandlers) InfoAccessKey(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
accessKey := mux.Vars(r)["accessKey"]
|
||||
if accessKey == "" {
|
||||
accessKey = cred.AccessKey
|
||||
}
|
||||
|
||||
u, ok := globalIAMSys.GetUser(ctx, accessKey)
|
||||
targetCred := u.Credentials
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListServiceAccountsAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
// If requested user does not exist and requestor is not allowed to list service accounts, return access denied.
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
requestUser := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
requestUser = cred.ParentUser
|
||||
}
|
||||
|
||||
if requestUser != targetCred.ParentUser {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !ok {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchAccessKey), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
sessionPolicy *policy.Policy
|
||||
err error
|
||||
userType string
|
||||
)
|
||||
switch {
|
||||
case targetCred.IsTemp():
|
||||
userType = "STS"
|
||||
_, sessionPolicy, err = globalIAMSys.GetTemporaryAccount(ctx, accessKey)
|
||||
if err == errNoSuchTempAccount {
|
||||
err = errNoSuchAccessKey
|
||||
}
|
||||
case targetCred.IsServiceAccount():
|
||||
userType = "Service Account"
|
||||
_, sessionPolicy, err = globalIAMSys.GetServiceAccount(ctx, accessKey)
|
||||
if err == errNoSuchServiceAccount {
|
||||
err = errNoSuchAccessKey
|
||||
}
|
||||
default:
|
||||
err = errNoSuchAccessKey
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// if session policy is nil or empty, then it is implied policy
|
||||
impliedPolicy := sessionPolicy == nil || (sessionPolicy.Version == "" && len(sessionPolicy.Statements) == 0)
|
||||
|
||||
var svcAccountPolicy policy.Policy
|
||||
|
||||
if !impliedPolicy {
|
||||
svcAccountPolicy = *sessionPolicy
|
||||
} else {
|
||||
policiesNames, err := globalIAMSys.PolicyDBGet(targetCred.ParentUser, targetCred.Groups...)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
svcAccountPolicy = globalIAMSys.GetCombinedPolicy(policiesNames...)
|
||||
}
|
||||
|
||||
policyJSON, err := json.MarshalIndent(svcAccountPolicy, "", " ")
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var expiration *time.Time
|
||||
if !targetCred.Expiration.IsZero() && !targetCred.Expiration.Equal(timeSentinel) {
|
||||
expiration = &targetCred.Expiration
|
||||
}
|
||||
|
||||
userProvider := guessUserProvider(targetCred)
|
||||
|
||||
infoResp := madmin.InfoAccessKeyResp{
|
||||
AccessKey: accessKey,
|
||||
InfoServiceAccountResp: madmin.InfoServiceAccountResp{
|
||||
ParentUser: targetCred.ParentUser,
|
||||
Name: targetCred.Name,
|
||||
Description: targetCred.Description,
|
||||
AccountStatus: targetCred.Status,
|
||||
ImpliedPolicy: impliedPolicy,
|
||||
Policy: string(policyJSON),
|
||||
Expiration: expiration,
|
||||
},
|
||||
|
||||
UserType: userType,
|
||||
UserProvider: userProvider,
|
||||
}
|
||||
|
||||
populateProviderInfoFromClaims(targetCred.Claims, userProvider, &infoResp)
|
||||
|
||||
data, err := json.Marshal(infoResp)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
const (
|
||||
allPoliciesFile = "policies.json"
|
||||
allUsersFile = "users.json"
|
||||
@ -1852,6 +2241,7 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
// Get current object layer instance.
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ExportIAMAction)
|
||||
if objectAPI == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
// Initialize a zip writer which will provide a zipped content
|
||||
@ -1988,7 +2378,7 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
SecretKey: acc.Credentials.SecretKey,
|
||||
Groups: acc.Credentials.Groups,
|
||||
Claims: claims,
|
||||
SessionPolicy: json.RawMessage(policyJSON),
|
||||
SessionPolicy: policyJSON,
|
||||
Status: acc.Credentials.Status,
|
||||
Name: sa.Name,
|
||||
Description: sa.Description,
|
||||
@ -2062,19 +2452,25 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// ImportIAM - imports all IAM info into MinIO
|
||||
func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
a.importIAM(w, r, "")
|
||||
}
|
||||
|
||||
// ImportIAMV2 - imports all IAM info into MinIO
|
||||
func (a adminAPIHandlers) ImportIAMV2(w http.ResponseWriter, r *http.Request) {
|
||||
a.importIAM(w, r, "v2")
|
||||
}
|
||||
|
||||
// ImportIAM - imports all IAM info into MinIO
|
||||
func (a adminAPIHandlers) importIAM(w http.ResponseWriter, r *http.Request, apiVer string) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
// Validate signature, permissions and get current object layer instance.
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ImportIAMAction)
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
@ -2086,9 +2482,12 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var skipped, removed, added madmin.IAMEntities
|
||||
var failed madmin.IAMErrEntities
|
||||
|
||||
// import policies first
|
||||
{
|
||||
|
||||
f, err := zr.Open(pathJoin(iamAssetsDir, allPoliciesFile))
|
||||
switch {
|
||||
case errors.Is(err, os.ErrNotExist):
|
||||
@ -2111,8 +2510,10 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
for policyName, policy := range allPolicies {
|
||||
if policy.IsEmpty() {
|
||||
err = globalIAMSys.DeletePolicy(ctx, policyName, true)
|
||||
removed.Policies = append(removed.Policies, policyName)
|
||||
} else {
|
||||
_, err = globalIAMSys.SetPolicy(ctx, policyName, policy)
|
||||
added.Policies = append(added.Policies, policyName)
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allPoliciesFile, policyName), r.URL)
|
||||
@ -2158,43 +2559,17 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if (cred.IsTemp() || cred.IsServiceAccount()) && cred.ParentUser == accessKey {
|
||||
// Incoming access key matches parent user then we should
|
||||
// reject password change requests.
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAddUserInvalidArgument, err, allUsersFile, accessKey), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if accessKey has beginning and end space characters, this only applies to new users.
|
||||
if !exists && hasSpaceBE(accessKey) {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminResourceInvalidArgument, err, allUsersFile, accessKey), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
checkDenyOnly := false
|
||||
if accessKey == cred.AccessKey {
|
||||
// Check that there is no explicit deny - otherwise it's allowed
|
||||
// to change one's own password.
|
||||
checkDenyOnly = true
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.CreateUserAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
DenyOnly: checkDenyOnly,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAccessDenied, err, allUsersFile, accessKey), r.URL)
|
||||
return
|
||||
}
|
||||
if _, err = globalIAMSys.CreateUser(ctx, accessKey, ureq); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, toAdminAPIErrCode(ctx, err), err, allUsersFile, accessKey), r.URL)
|
||||
return
|
||||
failed.Users = append(failed.Users, madmin.IAMErrEntity{Name: accessKey, Error: err})
|
||||
} else {
|
||||
added.Users = append(added.Users, accessKey)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2230,8 +2605,9 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
if _, gerr := globalIAMSys.AddUsersToGroup(ctx, group, grpInfo.Members); gerr != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, gerr, allGroupsFile, group), r.URL)
|
||||
return
|
||||
failed.Groups = append(failed.Groups, madmin.IAMErrEntity{Name: group, Error: err})
|
||||
} else {
|
||||
added.Groups = append(added.Groups, group)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2260,7 +2636,8 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// Validations for LDAP enabled deployments.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
err := globalIAMSys.NormalizeLDAPAccessKeypairs(ctx, serviceAcctReqs)
|
||||
skippedAccessKeys, err := globalIAMSys.NormalizeLDAPAccessKeypairs(ctx, serviceAcctReqs)
|
||||
skipped.ServiceAccounts = append(skipped.ServiceAccounts, skippedAccessKeys...)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, ""), r.URL)
|
||||
return
|
||||
@ -2268,6 +2645,9 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
for user, svcAcctReq := range serviceAcctReqs {
|
||||
if slices.Contains(skipped.ServiceAccounts, user) {
|
||||
continue
|
||||
}
|
||||
var sp *policy.Policy
|
||||
var err error
|
||||
if len(svcAcctReq.SessionPolicy) > 0 {
|
||||
@ -2283,17 +2663,6 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument), r.URL)
|
||||
return
|
||||
}
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.CreateServiceAccountAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAccessDenied, err, allSvcAcctsFile, user), r.URL)
|
||||
return
|
||||
}
|
||||
updateReq := true
|
||||
_, _, err = globalIAMSys.GetServiceAccount(ctx, svcAcctReq.AccessKey)
|
||||
if err != nil {
|
||||
@ -2325,10 +2694,10 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if _, _, err = globalIAMSys.NewServiceAccount(ctx, svcAcctReq.Parent, svcAcctReq.Groups, opts); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL)
|
||||
return
|
||||
failed.ServiceAccounts = append(failed.ServiceAccounts, madmin.IAMErrEntity{Name: user, Error: err})
|
||||
} else {
|
||||
added.ServiceAccounts = append(added.ServiceAccounts, user)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2365,8 +2734,15 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
if _, err := globalIAMSys.PolicyDBSet(ctx, u, pm.Policies, regUser, false); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, userPolicyMappingsFile, u), r.URL)
|
||||
return
|
||||
failed.UserPolicies = append(
|
||||
failed.UserPolicies,
|
||||
madmin.IAMErrPolicyEntity{
|
||||
Name: u,
|
||||
Policies: strings.Split(pm.Policies, ","),
|
||||
Error: err,
|
||||
})
|
||||
} else {
|
||||
added.UserPolicies = append(added.UserPolicies, map[string][]string{u: strings.Split(pm.Policies, ",")})
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2396,7 +2772,8 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
// Validations for LDAP enabled deployments.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
isGroup := true
|
||||
err := globalIAMSys.NormalizeLDAPMappingImport(ctx, isGroup, grpPolicyMap)
|
||||
skippedDN, err := globalIAMSys.NormalizeLDAPMappingImport(ctx, isGroup, grpPolicyMap)
|
||||
skipped.Groups = append(skipped.Groups, skippedDN...)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, groupPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
@ -2404,9 +2781,19 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
for g, pm := range grpPolicyMap {
|
||||
if slices.Contains(skipped.Groups, g) {
|
||||
continue
|
||||
}
|
||||
if _, err := globalIAMSys.PolicyDBSet(ctx, g, pm.Policies, unknownIAMUserType, true); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, groupPolicyMappingsFile, g), r.URL)
|
||||
return
|
||||
failed.GroupPolicies = append(
|
||||
failed.GroupPolicies,
|
||||
madmin.IAMErrPolicyEntity{
|
||||
Name: g,
|
||||
Policies: strings.Split(pm.Policies, ","),
|
||||
Error: err,
|
||||
})
|
||||
} else {
|
||||
added.GroupPolicies = append(added.GroupPolicies, map[string][]string{g: strings.Split(pm.Policies, ",")})
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2436,13 +2823,17 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
// Validations for LDAP enabled deployments.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
isGroup := true
|
||||
err := globalIAMSys.NormalizeLDAPMappingImport(ctx, !isGroup, userPolicyMap)
|
||||
skippedDN, err := globalIAMSys.NormalizeLDAPMappingImport(ctx, !isGroup, userPolicyMap)
|
||||
skipped.Users = append(skipped.Users, skippedDN...)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, stsUserPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
for u, pm := range userPolicyMap {
|
||||
if slices.Contains(skipped.Users, u) {
|
||||
continue
|
||||
}
|
||||
// disallow setting policy mapping if user is a temporary user
|
||||
ok, _, err := globalIAMSys.IsTempUser(u)
|
||||
if err != nil && err != errNoSuchUser {
|
||||
@ -2455,19 +2846,43 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if _, err := globalIAMSys.PolicyDBSet(ctx, u, pm.Policies, stsUser, false); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, stsUserPolicyMappingsFile, u), r.URL)
|
||||
return
|
||||
failed.STSPolicies = append(
|
||||
failed.STSPolicies,
|
||||
madmin.IAMErrPolicyEntity{
|
||||
Name: u,
|
||||
Policies: strings.Split(pm.Policies, ","),
|
||||
Error: err,
|
||||
})
|
||||
} else {
|
||||
added.STSPolicies = append(added.STSPolicies, map[string][]string{u: strings.Split(pm.Policies, ",")})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if apiVer == "v2" {
|
||||
iamr := madmin.ImportIAMResult{
|
||||
Skipped: skipped,
|
||||
Removed: removed,
|
||||
Added: added,
|
||||
Failed: failed,
|
||||
}
|
||||
|
||||
b, err := json.Marshal(iamr)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, b)
|
||||
}
|
||||
}
|
||||
|
||||
func addExpirationToCondValues(exp *time.Time, condValues map[string][]string) error {
|
||||
if exp == nil || exp.IsZero() || exp.Equal(timeSentinel) {
|
||||
return nil
|
||||
}
|
||||
dur := exp.Sub(time.Now())
|
||||
dur := time.Until(*exp)
|
||||
if dur <= 0 {
|
||||
return errors.New("unsupported expiration time")
|
||||
}
|
||||
@ -2475,7 +2890,7 @@ func addExpirationToCondValues(exp *time.Time, condValues map[string][]string) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials, newServiceAccountOpts, madmin.AddServiceAccountReq, string, APIError) {
|
||||
func commonAddServiceAccount(r *http.Request, ldap bool) (context.Context, auth.Credentials, newServiceAccountOpts, madmin.AddServiceAccountReq, string, APIError) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
@ -2533,7 +2948,7 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials
|
||||
name: createReq.Name,
|
||||
description: description,
|
||||
expiration: createReq.Expiration,
|
||||
claims: make(map[string]interface{}),
|
||||
claims: make(map[string]any),
|
||||
}
|
||||
|
||||
condValues := getConditionValues(r, "", cred)
|
||||
@ -2542,6 +2957,14 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", toAdminAPIErr(ctx, err)
|
||||
}
|
||||
|
||||
denyOnly := (targetUser == cred.AccessKey || targetUser == cred.ParentUser)
|
||||
if ldap && !denyOnly {
|
||||
res, _ := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(targetUser)
|
||||
if res != nil && res.NormDN == cred.ParentUser {
|
||||
denyOnly = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check if action is allowed if creating access key for another user
|
||||
// Check if action is explicitly denied if for self
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
@ -2551,7 +2974,7 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials
|
||||
ConditionValues: condValues,
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
DenyOnly: (targetUser == cred.AccessKey || targetUser == cred.ParentUser),
|
||||
DenyOnly: denyOnly,
|
||||
}) {
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErr(ErrAccessDenied)
|
||||
}
|
||||
@ -2568,3 +2991,10 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials
|
||||
|
||||
return ctx, cred, opts, createReq, targetUser, APIError{}
|
||||
}
|
||||
|
||||
// setReqInfoPolicyName will set the given policyName as a tag on the context's request info,
|
||||
// so that it appears in audit logs.
|
||||
func setReqInfoPolicyName(ctx context.Context, policyName string) {
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
reqInfo.SetTags("policyName", policyName)
|
||||
}
|
||||
|
||||
@ -28,6 +28,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@ -159,7 +160,7 @@ func (s *TestSuiteIAM) SetUpSuite(c *check) {
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) RestartIAMSuite(c *check) {
|
||||
s.TestSuiteCommon.RestartTestServer(c)
|
||||
s.RestartTestServer(c)
|
||||
|
||||
s.iamSetup(c)
|
||||
}
|
||||
@ -207,6 +208,8 @@ func TestIAMInternalIDPServerSuite(t *testing.T) {
|
||||
suite.TestGroupAddRemove(c)
|
||||
suite.TestServiceAccountOpsByAdmin(c)
|
||||
suite.TestServiceAccountPrivilegeEscalationBug(c)
|
||||
suite.TestServiceAccountPrivilegeEscalationBug2_2025_10_15(c, true)
|
||||
suite.TestServiceAccountPrivilegeEscalationBug2_2025_10_15(c, false)
|
||||
suite.TestServiceAccountOpsByUser(c)
|
||||
suite.TestServiceAccountDurationSecondsCondition(c)
|
||||
suite.TestAddServiceAccountPerms(c)
|
||||
@ -331,22 +334,30 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
|
||||
|
||||
// 2.2 create and associate policy to user
|
||||
policy := "mypolicy-test-user-update"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket)
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@ -442,7 +453,7 @@ func (s *TestSuiteIAM) TestAddServiceAccountPerms(c *check) {
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::testbucket/*"
|
||||
"arn:aws:s3:::testbucket"
|
||||
]
|
||||
}
|
||||
]
|
||||
@ -553,22 +564,30 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
|
||||
|
||||
// 1. Create a policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket)
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@ -663,22 +682,30 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
|
||||
c.Fatalf("bucket creat error: %v", err)
|
||||
}
|
||||
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket)
|
||||
|
||||
// Check that default policies can be overwritten.
|
||||
err = s.adm.AddCannedPolicy(ctx, "readwrite", policyBytes)
|
||||
@ -691,6 +718,12 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
|
||||
c.Fatalf("policy info err: %v", err)
|
||||
}
|
||||
|
||||
// Check that policy with comma is rejected.
|
||||
err = s.adm.AddCannedPolicy(ctx, "invalid,policy", policyBytes)
|
||||
if err == nil {
|
||||
c.Fatalf("invalid policy created successfully")
|
||||
}
|
||||
|
||||
infoStr := string(info)
|
||||
if !strings.Contains(infoStr, `"s3:PutObject"`) || !strings.Contains(infoStr, ":"+bucket+"/") {
|
||||
c.Fatalf("policy contains unexpected content!")
|
||||
@ -708,22 +741,30 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||
}
|
||||
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket)
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@ -770,8 +811,9 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||
if err != nil {
|
||||
c.Fatalf("group list err: %v", err)
|
||||
}
|
||||
if !set.CreateStringSet(groups...).Contains(group) {
|
||||
c.Fatalf("created group not present!")
|
||||
expected := []string{group}
|
||||
if !slices.Equal(groups, expected) {
|
||||
c.Fatalf("expected group listing: %v, got: %v", expected, groups)
|
||||
}
|
||||
groupInfo, err := s.adm.GetGroupDescription(ctx, group)
|
||||
if err != nil {
|
||||
@ -871,22 +913,30 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
|
||||
|
||||
// Create policy, user and associate policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket)
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@ -947,7 +997,7 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) {
|
||||
|
||||
// Create policy, user and associate policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
@ -961,16 +1011,24 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket)
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@ -1037,22 +1095,30 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
|
||||
|
||||
// Create policy, user and associate policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket)
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@ -1185,6 +1251,108 @@ func (s *TestSuiteIAM) TestServiceAccountPrivilegeEscalationBug(c *check) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestServiceAccountPrivilegeEscalationBug2_2025_10_15(c *check, forRoot bool) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
for i := range 3 {
|
||||
err := s.client.MakeBucket(ctx, fmt.Sprintf("bucket%d", i+1), minio.MakeBucketOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("bucket create error: %v", err)
|
||||
}
|
||||
defer func(i int) {
|
||||
_ = s.client.RemoveBucket(ctx, fmt.Sprintf("bucket%d", i+1))
|
||||
}(i)
|
||||
}
|
||||
|
||||
allow2BucketsPolicyBytes := []byte(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "ListBucket1AndBucket2",
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListBucket"],
|
||||
"Resource": ["arn:aws:s3:::bucket1", "arn:aws:s3:::bucket2"]
|
||||
},
|
||||
{
|
||||
"Sid": "ReadWriteBucket1AndBucket2Objects",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:DeleteObject",
|
||||
"s3:DeleteObjectVersion",
|
||||
"s3:GetObject",
|
||||
"s3:GetObjectVersion",
|
||||
"s3:PutObject"
|
||||
],
|
||||
"Resource": ["arn:aws:s3:::bucket1/*", "arn:aws:s3:::bucket2/*"]
|
||||
}
|
||||
]
|
||||
}`)
|
||||
|
||||
if forRoot {
|
||||
// Create a service account for the root user.
|
||||
_, err := s.adm.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
Policy: allow2BucketsPolicyBytes,
|
||||
AccessKey: "restricted",
|
||||
SecretKey: "restricted123",
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("could not create service account")
|
||||
}
|
||||
defer func() {
|
||||
_ = s.adm.DeleteServiceAccount(ctx, "restricted")
|
||||
}()
|
||||
} else {
|
||||
// Create a regular user and attach consoleAdmin policy
|
||||
err := s.adm.AddUser(ctx, "foobar", "foobar123")
|
||||
if err != nil {
|
||||
c.Fatalf("could not create user")
|
||||
}
|
||||
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{"consoleAdmin"},
|
||||
User: "foobar",
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("could not attach policy")
|
||||
}
|
||||
|
||||
// Create a service account for the regular user.
|
||||
_, err = s.adm.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
Policy: allow2BucketsPolicyBytes,
|
||||
TargetUser: "foobar",
|
||||
AccessKey: "restricted",
|
||||
SecretKey: "restricted123",
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("could not create service account: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = s.adm.DeleteServiceAccount(ctx, "restricted")
|
||||
_ = s.adm.RemoveUser(ctx, "foobar")
|
||||
}()
|
||||
}
|
||||
restrictedClient := s.getUserClient(c, "restricted", "restricted123", "")
|
||||
|
||||
buckets, err := restrictedClient.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
c.Fatalf("err fetching buckets %s", err)
|
||||
}
|
||||
if len(buckets) != 2 || buckets[0].Name != "bucket1" || buckets[1].Name != "bucket2" {
|
||||
c.Fatalf("restricted service account should only have access to bucket1 and bucket2")
|
||||
}
|
||||
|
||||
// Try to escalate privileges
|
||||
restrictedAdmClient := s.getAdminClient(c, "restricted", "restricted123", "")
|
||||
_, err = restrictedAdmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
AccessKey: "newroot",
|
||||
SecretKey: "newroot123",
|
||||
})
|
||||
if err == nil {
|
||||
c.Fatalf("restricted service account was able to create service account bypassing sub-policy!")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) SetUpAccMgmtPlugin(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
|
||||
defer cancel()
|
||||
@ -1303,7 +1471,7 @@ func (s *TestSuiteIAM) TestAccMgmtPlugin(c *check) {
|
||||
svcAK, svcSK := mustGenerateCredentials(c)
|
||||
|
||||
// This policy does not allow listing objects.
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
@ -1317,7 +1485,7 @@ func (s *TestSuiteIAM) TestAccMgmtPlugin(c *check) {
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket)
|
||||
cr, err := userAdmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
Policy: policyBytes,
|
||||
TargetUser: accessKey,
|
||||
@ -1494,7 +1662,7 @@ func (c *check) mustDownload(ctx context.Context, client *minio.Client, bucket s
|
||||
func (c *check) mustUploadReturnVersions(ctx context.Context, client *minio.Client, bucket string) []string {
|
||||
c.Helper()
|
||||
versions := []string{}
|
||||
for i := 0; i < 5; i++ {
|
||||
for range 5 {
|
||||
ui, err := client.PutObject(ctx, bucket, "some-object", bytes.NewBuffer([]byte("stuff")), 5, minio.PutObjectOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("upload did not succeed got %#v", err)
|
||||
@ -1563,7 +1731,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
|
||||
svcAK, svcSK := mustGenerateCredentials(c)
|
||||
|
||||
// This policy does not allow listing objects.
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
@ -1577,7 +1745,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket)
|
||||
cr, err := madmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
Policy: policyBytes,
|
||||
TargetUser: accessKey,
|
||||
@ -1591,7 +1759,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
|
||||
c.mustNotListObjects(ctx, svcClient, bucket)
|
||||
|
||||
// This policy allows listing objects.
|
||||
newPolicyBytes := []byte(fmt.Sprintf(`{
|
||||
newPolicyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
@ -1600,11 +1768,11 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket)
|
||||
err = madmClient.UpdateServiceAccount(ctx, svcAK, madmin.UpdateServiceAccountReq{
|
||||
NewPolicy: newPolicyBytes,
|
||||
})
|
||||
|
||||
@ -49,6 +49,7 @@ import (
|
||||
"github.com/klauspost/compress/zip"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/madmin-go/v3/estream"
|
||||
"github.com/minio/madmin-go/v3/logger/log"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/dsync"
|
||||
@ -59,7 +60,6 @@ import (
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v3/logger/message/log"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/secure-io/sio-go"
|
||||
@ -93,11 +93,18 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
return
|
||||
}
|
||||
|
||||
if globalInplaceUpdateDisabled || currentReleaseTime.IsZero() {
|
||||
if globalInplaceUpdateDisabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if currentReleaseTime.IsZero() || currentReleaseTime.Equal(timeSentinel) {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMethodNotAllowed)
|
||||
apiErr.Description = fmt.Sprintf("unable to perform in-place update, release time is unrecognized: %s", currentReleaseTime)
|
||||
writeErrorResponseJSON(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
updateURL := vars["updateURL"]
|
||||
dryRun := r.Form.Get("dry-run") == "true"
|
||||
@ -110,6 +117,11 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
}
|
||||
|
||||
local := globalLocalNodeName
|
||||
if local == "" {
|
||||
local = "127.0.0.1"
|
||||
}
|
||||
|
||||
u, err := url.Parse(updateURL)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@ -128,6 +140,39 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
return
|
||||
}
|
||||
|
||||
updateStatus := madmin.ServerUpdateStatusV2{
|
||||
DryRun: dryRun,
|
||||
Results: make([]madmin.ServerPeerUpdateStatus, 0, len(globalNotificationSys.allPeerClients)),
|
||||
}
|
||||
peerResults := make(map[string]madmin.ServerPeerUpdateStatus, len(globalNotificationSys.allPeerClients))
|
||||
failedClients := make(map[int]bool, len(globalNotificationSys.allPeerClients))
|
||||
|
||||
if lrTime.Sub(currentReleaseTime) <= 0 {
|
||||
updateStatus.Results = append(updateStatus.Results, madmin.ServerPeerUpdateStatus{
|
||||
Host: local,
|
||||
Err: fmt.Sprintf("server is running the latest version: %s", Version),
|
||||
CurrentVersion: Version,
|
||||
})
|
||||
|
||||
for _, client := range globalNotificationSys.peerClients {
|
||||
updateStatus.Results = append(updateStatus.Results, madmin.ServerPeerUpdateStatus{
|
||||
Host: client.String(),
|
||||
Err: fmt.Sprintf("server is running the latest version: %s", Version),
|
||||
CurrentVersion: Version,
|
||||
})
|
||||
}
|
||||
|
||||
// Marshal API response
|
||||
jsonBytes, err := json.Marshal(updateStatus)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
return
|
||||
}
|
||||
|
||||
u.Path = path.Dir(u.Path) + SlashSeparator + releaseInfo
|
||||
// Download Binary Once
|
||||
binC, bin, err := downloadBinary(u, mode)
|
||||
@ -137,16 +182,6 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
return
|
||||
}
|
||||
|
||||
updateStatus := madmin.ServerUpdateStatusV2{DryRun: dryRun}
|
||||
peerResults := make(map[string]madmin.ServerPeerUpdateStatus)
|
||||
|
||||
local := globalLocalNodeName
|
||||
if local == "" {
|
||||
local = "127.0.0.1"
|
||||
}
|
||||
|
||||
failedClients := make(map[int]struct{})
|
||||
|
||||
if globalIsDistErasure {
|
||||
// Push binary to other servers
|
||||
for idx, nerr := range globalNotificationSys.VerifyBinary(ctx, u, sha256Sum, releaseInfo, binC) {
|
||||
@ -156,7 +191,7 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
Err: nerr.Err.Error(),
|
||||
CurrentVersion: Version,
|
||||
}
|
||||
failedClients[idx] = struct{}{}
|
||||
failedClients[idx] = true
|
||||
} else {
|
||||
peerResults[nerr.Host.String()] = madmin.ServerPeerUpdateStatus{
|
||||
Host: nerr.Host.String(),
|
||||
@ -167,25 +202,17 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
}
|
||||
|
||||
if lrTime.Sub(currentReleaseTime) > 0 {
|
||||
if err = verifyBinary(u, sha256Sum, releaseInfo, mode, bytes.NewReader(bin)); err != nil {
|
||||
peerResults[local] = madmin.ServerPeerUpdateStatus{
|
||||
Host: local,
|
||||
Err: err.Error(),
|
||||
CurrentVersion: Version,
|
||||
}
|
||||
} else {
|
||||
peerResults[local] = madmin.ServerPeerUpdateStatus{
|
||||
Host: local,
|
||||
CurrentVersion: Version,
|
||||
UpdatedVersion: lrTime.Format(MinioReleaseTagTimeLayout),
|
||||
}
|
||||
if err = verifyBinary(u, sha256Sum, releaseInfo, mode, bytes.NewReader(bin)); err != nil {
|
||||
peerResults[local] = madmin.ServerPeerUpdateStatus{
|
||||
Host: local,
|
||||
Err: err.Error(),
|
||||
CurrentVersion: Version,
|
||||
}
|
||||
} else {
|
||||
peerResults[local] = madmin.ServerPeerUpdateStatus{
|
||||
Host: local,
|
||||
Err: fmt.Sprintf("server is already running the latest version: %s", Version),
|
||||
CurrentVersion: Version,
|
||||
UpdatedVersion: lrTime.Format(MinioReleaseTagTimeLayout),
|
||||
}
|
||||
}
|
||||
|
||||
@ -193,8 +220,7 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
if globalIsDistErasure {
|
||||
ng := WithNPeers(len(globalNotificationSys.peerClients))
|
||||
for idx, client := range globalNotificationSys.peerClients {
|
||||
_, ok := failedClients[idx]
|
||||
if ok {
|
||||
if failedClients[idx] {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
@ -240,14 +266,14 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
startTime := time.Now().Add(restartUpdateDelay)
|
||||
ng := WithNPeers(len(globalNotificationSys.peerClients))
|
||||
for idx, client := range globalNotificationSys.peerClients {
|
||||
_, ok := failedClients[idx]
|
||||
if ok {
|
||||
if failedClients[idx] {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
prs, ok := peerResults[client.String()]
|
||||
if ok && prs.CurrentVersion != prs.UpdatedVersion && prs.UpdatedVersion != "" {
|
||||
// We restart only on success, not for any failures.
|
||||
if ok && prs.Err == "" {
|
||||
return client.SignalService(serviceRestart, "", dryRun, &startTime)
|
||||
}
|
||||
return nil
|
||||
@ -284,7 +310,9 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
|
||||
if !dryRun {
|
||||
if lrTime.Sub(currentReleaseTime) > 0 {
|
||||
prs, ok := peerResults[local]
|
||||
// We restart only on success, not for any failures.
|
||||
if ok && prs.Err == "" {
|
||||
globalServiceSignalCh <- serviceRestart
|
||||
}
|
||||
}
|
||||
@ -801,7 +829,7 @@ func (a adminAPIHandlers) MetricsHandler(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
// Flush before waiting for next...
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
|
||||
select {
|
||||
case <-ticker.C:
|
||||
@ -848,9 +876,10 @@ func (a adminAPIHandlers) DataUsageInfoHandler(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
|
||||
func lriToLockEntry(l lockRequesterInfo, now time.Time, resource, server string) *madmin.LockEntry {
|
||||
t := time.Unix(0, l.Timestamp)
|
||||
entry := &madmin.LockEntry{
|
||||
Timestamp: l.Timestamp,
|
||||
Elapsed: now.Sub(l.Timestamp),
|
||||
Timestamp: t,
|
||||
Elapsed: now.Sub(t),
|
||||
Resource: resource,
|
||||
ServerList: []string{server},
|
||||
Source: l.Source,
|
||||
@ -925,7 +954,7 @@ func (a adminAPIHandlers) ForceUnlockHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
var args dsync.LockArgs
|
||||
var lockers []dsync.NetLocker
|
||||
for _, path := range strings.Split(vars["paths"], ",") {
|
||||
for path := range strings.SplitSeq(vars["paths"], ",") {
|
||||
if path == "" {
|
||||
continue
|
||||
}
|
||||
@ -1035,7 +1064,7 @@ func (a adminAPIHandlers) StartProfilingHandler(w http.ResponseWriter, r *http.R
|
||||
// Start profiling on remote servers.
|
||||
var hostErrs []NotificationPeerErr
|
||||
for _, profiler := range profiles {
|
||||
hostErrs = append(hostErrs, globalNotificationSys.StartProfiling(profiler)...)
|
||||
hostErrs = append(hostErrs, globalNotificationSys.StartProfiling(ctx, profiler)...)
|
||||
|
||||
// Start profiling locally as well.
|
||||
prof, err := startProfiler(profiler)
|
||||
@ -1116,7 +1145,11 @@ func (a adminAPIHandlers) ProfileHandler(w http.ResponseWriter, r *http.Request)
|
||||
|
||||
// Start profiling on remote servers.
|
||||
for _, profiler := range profiles {
|
||||
globalNotificationSys.StartProfiling(profiler)
|
||||
// Limit start time to max 10s.
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
globalNotificationSys.StartProfiling(ctx, profiler)
|
||||
// StartProfiling blocks, so we can cancel now.
|
||||
cancel()
|
||||
|
||||
// Start profiling locally as well.
|
||||
prof, err := startProfiler(profiler)
|
||||
@ -1131,6 +1164,10 @@ func (a adminAPIHandlers) ProfileHandler(w http.ResponseWriter, r *http.Request)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Stop remote profiles
|
||||
go globalNotificationSys.DownloadProfilingData(GlobalContext, io.Discard)
|
||||
|
||||
// Stop local
|
||||
globalProfilerMu.Lock()
|
||||
defer globalProfilerMu.Unlock()
|
||||
for k, v := range globalProfiler {
|
||||
@ -1156,7 +1193,7 @@ type dummyFileInfo struct {
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
isDir bool
|
||||
sys interface{}
|
||||
sys any
|
||||
}
|
||||
|
||||
func (f dummyFileInfo) Name() string { return f.name }
|
||||
@ -1164,7 +1201,7 @@ func (f dummyFileInfo) Size() int64 { return f.size }
|
||||
func (f dummyFileInfo) Mode() os.FileMode { return f.mode }
|
||||
func (f dummyFileInfo) ModTime() time.Time { return f.modTime }
|
||||
func (f dummyFileInfo) IsDir() bool { return f.isDir }
|
||||
func (f dummyFileInfo) Sys() interface{} { return f.sys }
|
||||
func (f dummyFileInfo) Sys() any { return f.sys }
|
||||
|
||||
// DownloadProfilingHandler - POST /minio/admin/v3/profiling/download
|
||||
// ----------
|
||||
@ -1206,17 +1243,17 @@ func extractHealInitParams(vars map[string]string, qParams url.Values, r io.Read
|
||||
if hip.objPrefix != "" {
|
||||
// Bucket is required if object-prefix is given
|
||||
err = ErrHealMissingBucket
|
||||
return
|
||||
return hip, err
|
||||
}
|
||||
} else if isReservedOrInvalidBucket(hip.bucket, false) {
|
||||
err = ErrInvalidBucketName
|
||||
return
|
||||
return hip, err
|
||||
}
|
||||
|
||||
// empty prefix is valid.
|
||||
if !IsValidObjectPrefix(hip.objPrefix) {
|
||||
err = ErrInvalidObjectName
|
||||
return
|
||||
return hip, err
|
||||
}
|
||||
|
||||
if len(qParams[mgmtClientToken]) > 0 {
|
||||
@ -1238,7 +1275,7 @@ func extractHealInitParams(vars map[string]string, qParams url.Values, r io.Read
|
||||
if (hip.forceStart && hip.forceStop) ||
|
||||
(hip.clientToken != "" && (hip.forceStart || hip.forceStop)) {
|
||||
err = ErrInvalidRequest
|
||||
return
|
||||
return hip, err
|
||||
}
|
||||
|
||||
// ignore body if clientToken is provided
|
||||
@ -1247,12 +1284,12 @@ func extractHealInitParams(vars map[string]string, qParams url.Values, r io.Read
|
||||
if jerr != nil {
|
||||
adminLogIf(GlobalContext, jerr, logger.ErrorKind)
|
||||
err = ErrRequestBodyParse
|
||||
return
|
||||
return hip, err
|
||||
}
|
||||
}
|
||||
|
||||
err = ErrNone
|
||||
return
|
||||
return hip, err
|
||||
}
|
||||
|
||||
// HealHandler - POST /minio/admin/v3/heal/
|
||||
@ -1283,7 +1320,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Analyze the heal token and route the request accordingly
|
||||
token, success := proxyRequestByToken(ctx, w, r, hip.clientToken)
|
||||
token, _, success := proxyRequestByToken(ctx, w, r, hip.clientToken, false)
|
||||
if success {
|
||||
return
|
||||
}
|
||||
@ -1322,7 +1359,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
case hr := <-respCh:
|
||||
switch hr.apiErr {
|
||||
case noError:
|
||||
@ -1330,7 +1367,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if _, err := w.Write(hr.respBytes); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
} else {
|
||||
writeSuccessResponseJSON(w, hr.respBytes)
|
||||
}
|
||||
@ -1357,7 +1394,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if _, err := w.Write(errorRespJSON); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
}
|
||||
break forLoop
|
||||
}
|
||||
@ -1370,7 +1407,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if exists && !nh.hasEnded() && len(nh.currentStatus.Items) > 0 {
|
||||
clientToken := nh.clientToken
|
||||
if globalIsDistErasure {
|
||||
clientToken = fmt.Sprintf("%s:%d", nh.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
clientToken = fmt.Sprintf("%s%s%d", nh.clientToken, getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
}
|
||||
b, err := json.Marshal(madmin.HealStartSuccess{
|
||||
ClientToken: clientToken,
|
||||
@ -1574,7 +1611,6 @@ func (a adminAPIHandlers) ClientDevNull(w http.ResponseWriter, r *http.Request)
|
||||
if err != nil || ctx.Err() != nil || totalRx > 100*humanize.GiByte {
|
||||
break
|
||||
}
|
||||
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
@ -1803,7 +1839,7 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
|
||||
return
|
||||
}
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
case result, ok := <-ch:
|
||||
if !ok {
|
||||
return
|
||||
@ -1812,7 +1848,7 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
|
||||
return
|
||||
}
|
||||
prevResult = result
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1921,7 +1957,7 @@ func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.R
|
||||
if err := enc.Encode(madmin.DriveSpeedTestResult{}); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
case result, ok := <-ch:
|
||||
if !ok {
|
||||
return
|
||||
@ -1929,7 +1965,7 @@ func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.R
|
||||
if err := enc.Encode(result); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1986,7 +2022,7 @@ func extractTraceOptions(r *http.Request) (opts madmin.ServiceTraceOpts, err err
|
||||
opts.OS = true
|
||||
// Older mc - cannot deal with more types...
|
||||
}
|
||||
return
|
||||
return opts, err
|
||||
}
|
||||
|
||||
// TraceHandler - POST /minio/admin/v3/trace
|
||||
@ -2046,7 +2082,7 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
|
||||
grid.PutByteBuffer(entry)
|
||||
if len(traceCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
}
|
||||
case <-keepAliveTicker.C:
|
||||
if len(traceCh) > 0 {
|
||||
@ -2055,7 +2091,7 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
@ -2147,7 +2183,7 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
|
||||
grid.PutByteBuffer(log)
|
||||
if len(logCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
}
|
||||
case <-keepAliveTicker.C:
|
||||
if len(logCh) > 0 {
|
||||
@ -2156,7 +2192,7 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
@ -2330,6 +2366,7 @@ func getPoolsInfo(ctx context.Context, allDisks []madmin.Disk) (map[int]map[int]
|
||||
}
|
||||
|
||||
func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) madmin.InfoMessage {
|
||||
const operationTimeout = 10 * time.Second
|
||||
ldap := madmin.LDAP{}
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
ldapConn, err := globalIAMSys.LDAPConfig.LDAP.Connect()
|
||||
@ -2370,7 +2407,9 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma
|
||||
mode = madmin.ItemOnline
|
||||
|
||||
// Load data usage
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
|
||||
ctx2, cancel := context.WithTimeout(ctx, operationTimeout)
|
||||
dataUsageInfo, err := loadDataUsageFromBackend(ctx2, objectAPI)
|
||||
cancel()
|
||||
if err == nil {
|
||||
buckets = madmin.Buckets{Count: dataUsageInfo.BucketsCount}
|
||||
objects = madmin.Objects{Count: dataUsageInfo.ObjectsTotalCount}
|
||||
@ -2404,18 +2443,24 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma
|
||||
}
|
||||
|
||||
if pools {
|
||||
poolsInfo, _ = getPoolsInfo(ctx, allDisks)
|
||||
ctx2, cancel := context.WithTimeout(ctx, operationTimeout)
|
||||
poolsInfo, _ = getPoolsInfo(ctx2, allDisks)
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
domain := globalDomainNames
|
||||
services := madmin.Services{
|
||||
KMSStatus: fetchKMSStatus(ctx),
|
||||
LDAP: ldap,
|
||||
Logger: log,
|
||||
Audit: audit,
|
||||
Notifications: notifyTarget,
|
||||
}
|
||||
{
|
||||
ctx2, cancel := context.WithTimeout(ctx, operationTimeout)
|
||||
services.KMSStatus = fetchKMSStatus(ctx2)
|
||||
cancel()
|
||||
}
|
||||
|
||||
return madmin.InfoMessage{
|
||||
Mode: string(mode),
|
||||
@ -2631,7 +2676,7 @@ func fetchHealthInfo(healthCtx context.Context, objectAPI ObjectLayer, query *ur
|
||||
// disk metrics are already included under drive info of each server
|
||||
getRealtimeMetrics := func() *madmin.RealtimeMetrics {
|
||||
var m madmin.RealtimeMetrics
|
||||
var types madmin.MetricType = madmin.MetricsAll &^ madmin.MetricsDisk
|
||||
types := madmin.MetricsAll &^ madmin.MetricsDisk
|
||||
mLocal := collectLocalMetrics(types, collectMetricsOpts{})
|
||||
m.Merge(&mLocal)
|
||||
cctx, cancel := context.WithTimeout(healthCtx, time.Second/2)
|
||||
@ -2675,7 +2720,7 @@ func fetchHealthInfo(healthCtx context.Context, objectAPI ObjectLayer, query *ur
|
||||
poolsArgs := re.ReplaceAllString(cmdLine, `$3`)
|
||||
var anonPools []string
|
||||
|
||||
if !(strings.Contains(poolsArgs, "{") && strings.Contains(poolsArgs, "}")) {
|
||||
if !strings.Contains(poolsArgs, "{") || !strings.Contains(poolsArgs, "}") {
|
||||
// No ellipses pattern. Anonymize host name from every pool arg
|
||||
pools := strings.Fields(poolsArgs)
|
||||
anonPools = make([]string, len(pools))
|
||||
@ -2917,13 +2962,13 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
if len(healthInfoCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
}
|
||||
case <-ticker.C:
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
case <-healthCtx.Done():
|
||||
return
|
||||
}
|
||||
@ -3049,7 +3094,7 @@ func targetStatus(ctx context.Context, h logger.Target) madmin.Status {
|
||||
return madmin.Status{Status: string(madmin.ItemOffline)}
|
||||
}
|
||||
|
||||
// fetchLoggerDetails return log info
|
||||
// fetchLoggerInfo return log info
|
||||
func fetchLoggerInfo(ctx context.Context) ([]madmin.Logger, []madmin.Audit) {
|
||||
var loggerInfo []madmin.Logger
|
||||
var auditloggerInfo []madmin.Audit
|
||||
@ -3375,7 +3420,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
// save the format.json as part of inspect by default
|
||||
if !(volume == minioMetaBucket && file == formatConfigFile) {
|
||||
if volume != minioMetaBucket || file != formatConfigFile {
|
||||
err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn)
|
||||
}
|
||||
if !errors.Is(err, errFileNotFound) {
|
||||
|
||||
@ -263,7 +263,7 @@ func buildAdminRequest(queryVal url.Values, method, path string,
|
||||
}
|
||||
|
||||
func TestAdminServerInfo(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
adminTestBed, err := prepareAdminErasureTestBed(ctx)
|
||||
@ -402,7 +402,7 @@ func (b byResourceUID) Less(i, j int) bool {
|
||||
func TestTopLockEntries(t *testing.T) {
|
||||
locksHeld := make(map[string][]lockRequesterInfo)
|
||||
var owners []string
|
||||
for i := 0; i < 4; i++ {
|
||||
for i := range 4 {
|
||||
owners = append(owners, fmt.Sprintf("node-%d", i))
|
||||
}
|
||||
|
||||
@ -410,7 +410,7 @@ func TestTopLockEntries(t *testing.T) {
|
||||
// request UID, but 10 different resource names associated with it.
|
||||
var lris []lockRequesterInfo
|
||||
uuid := mustGetUUID()
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
resource := fmt.Sprintf("bucket/delete-object-%d", i)
|
||||
lri := lockRequesterInfo{
|
||||
Name: resource,
|
||||
@ -425,7 +425,7 @@ func TestTopLockEntries(t *testing.T) {
|
||||
}
|
||||
|
||||
// Add a few concurrent read locks to the mix
|
||||
for i := 0; i < 50; i++ {
|
||||
for i := range 50 {
|
||||
resource := fmt.Sprintf("bucket/get-object-%d", i)
|
||||
lri := lockRequesterInfo{
|
||||
Name: resource,
|
||||
@ -463,6 +463,7 @@ func TestTopLockEntries(t *testing.T) {
|
||||
Owner: lri.Owner,
|
||||
ID: lri.UID,
|
||||
Quorum: lri.Quorum,
|
||||
Timestamp: time.Unix(0, lri.Timestamp),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@ -22,6 +22,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"net/http"
|
||||
"sort"
|
||||
"sync"
|
||||
@ -260,7 +261,7 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
||||
} else {
|
||||
clientToken := he.clientToken
|
||||
if globalIsDistErasure {
|
||||
clientToken = fmt.Sprintf("%s:%d", he.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
clientToken = fmt.Sprintf("%s%s%d", he.clientToken, getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
}
|
||||
|
||||
hsp = madmin.HealStopSuccess{
|
||||
@ -331,7 +332,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay
|
||||
|
||||
clientToken := h.clientToken
|
||||
if globalIsDistErasure {
|
||||
clientToken = fmt.Sprintf("%s:%d", h.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
clientToken = fmt.Sprintf("%s%s%d", h.clientToken, getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
}
|
||||
|
||||
if h.clientToken == bgHealingUUID {
|
||||
@ -520,9 +521,7 @@ func (h *healSequence) getScannedItemsMap() map[madmin.HealItemType]int64 {
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.scannedItemsMap))
|
||||
for k, v := range h.scannedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
maps.Copy(retMap, h.scannedItemsMap)
|
||||
|
||||
return retMap
|
||||
}
|
||||
@ -534,9 +533,7 @@ func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 {
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.healedItemsMap))
|
||||
for k, v := range h.healedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
maps.Copy(retMap, h.healedItemsMap)
|
||||
|
||||
return retMap
|
||||
}
|
||||
@ -549,9 +546,7 @@ func (h *healSequence) getHealFailedItemsMap() map[madmin.HealItemType]int64 {
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.healFailedItemsMap))
|
||||
for k, v := range h.healFailedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
maps.Copy(retMap, h.healFailedItemsMap)
|
||||
|
||||
return retMap
|
||||
}
|
||||
@ -761,6 +756,15 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
return nil
|
||||
}
|
||||
|
||||
countOKDrives := func(drives []madmin.HealDriveInfo) (count int) {
|
||||
for _, drive := range drives {
|
||||
if drive.State == madmin.DriveStateOk {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// task queued, now wait for the response.
|
||||
select {
|
||||
case res := <-task.respCh:
|
||||
@ -781,6 +785,11 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
if res.err != nil {
|
||||
res.result.Detail = res.err.Error()
|
||||
}
|
||||
if res.result.ParityBlocks > 0 && res.result.DataBlocks > 0 && res.result.DataBlocks > res.result.ParityBlocks {
|
||||
if got := countOKDrives(res.result.After.Drives); got < res.result.ParityBlocks {
|
||||
res.result.Detail = fmt.Sprintf("quorum loss - expected %d minimum, got drive states in OK %d", res.result.ParityBlocks, got)
|
||||
}
|
||||
}
|
||||
return h.pushHealResultItem(res.result)
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
@ -792,18 +801,20 @@ func (h *healSequence) healDiskMeta(objAPI ObjectLayer) error {
|
||||
return h.healMinioSysMeta(objAPI, minioConfigPrefix)()
|
||||
}
|
||||
|
||||
func (h *healSequence) healItems(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
func (h *healSequence) healItems(objAPI ObjectLayer) error {
|
||||
if h.clientToken == bgHealingUUID {
|
||||
// For background heal do nothing.
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := h.healDiskMeta(objAPI); err != nil {
|
||||
return err
|
||||
if h.bucket == "" { // heal internal meta only during a site-wide heal
|
||||
if err := h.healDiskMeta(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Heal buckets and objects
|
||||
return h.healBuckets(objAPI, bucketsOnly)
|
||||
return h.healBuckets(objAPI)
|
||||
}
|
||||
|
||||
// traverseAndHeal - traverses on-disk data and performs healing
|
||||
@ -814,8 +825,7 @@ func (h *healSequence) healItems(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
// has to wait until a safe point is reached, such as between scanning
|
||||
// two objects.
|
||||
func (h *healSequence) traverseAndHeal(objAPI ObjectLayer) {
|
||||
bucketsOnly := false // Heals buckets and objects also.
|
||||
h.traverseAndHealDoneCh <- h.healItems(objAPI, bucketsOnly)
|
||||
h.traverseAndHealDoneCh <- h.healItems(objAPI)
|
||||
xioutil.SafeClose(h.traverseAndHealDoneCh)
|
||||
}
|
||||
|
||||
@ -826,6 +836,7 @@ func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) f
|
||||
// NOTE: Healing on meta is run regardless
|
||||
// of any bucket being selected, this is to ensure that
|
||||
// meta are always upto date and correct.
|
||||
h.settings.Recursive = true
|
||||
return objAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket, object, versionID string, scanMode madmin.HealScanMode) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
@ -842,14 +853,14 @@ func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) f
|
||||
}
|
||||
|
||||
// healBuckets - check for all buckets heal or just particular bucket.
|
||||
func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
func (h *healSequence) healBuckets(objAPI ObjectLayer) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// 1. If a bucket was specified, heal only the bucket.
|
||||
if h.bucket != "" {
|
||||
return h.healBucket(objAPI, h.bucket, bucketsOnly)
|
||||
return h.healBucket(objAPI, h.bucket, false)
|
||||
}
|
||||
|
||||
buckets, err := objAPI.ListBuckets(h.ctx, BucketOptions{})
|
||||
@ -863,7 +874,7 @@ func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
})
|
||||
|
||||
for _, bucket := range buckets {
|
||||
if err = h.healBucket(objAPI, bucket.Name, bucketsOnly); err != nil {
|
||||
if err = h.healBucket(objAPI, bucket.Name, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -881,16 +892,6 @@ func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly
|
||||
return nil
|
||||
}
|
||||
|
||||
if !h.settings.Recursive {
|
||||
if h.object != "" {
|
||||
if err := h.healObject(bucket, h.object, "", h.settings.ScanMode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := objAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
|
||||
@ -159,14 +159,14 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
|
||||
// Info operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/info").HandlerFunc(adminMiddleware(adminAPI.ServerInfoHandler, traceAllFlag, noObjLayerFlag))
|
||||
adminRouter.Methods(http.MethodGet, http.MethodPost).Path(adminVersion + "/inspect-data").HandlerFunc(adminMiddleware(adminAPI.InspectDataHandler, noGZFlag, traceAllFlag))
|
||||
adminRouter.Methods(http.MethodGet, http.MethodPost).Path(adminVersion + "/inspect-data").HandlerFunc(adminMiddleware(adminAPI.InspectDataHandler, noGZFlag, traceHdrsS3HFlag))
|
||||
|
||||
// StorageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/storageinfo").HandlerFunc(adminMiddleware(adminAPI.StorageInfoHandler, traceAllFlag))
|
||||
// DataUsageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(adminMiddleware(adminAPI.DataUsageInfoHandler, traceAllFlag))
|
||||
// Metrics operation
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/metrics").HandlerFunc(adminMiddleware(adminAPI.MetricsHandler, traceAllFlag))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/metrics").HandlerFunc(adminMiddleware(adminAPI.MetricsHandler, traceHdrsS3HFlag))
|
||||
|
||||
if globalIsDistErasure || globalIsErasure {
|
||||
// Heal operations
|
||||
@ -193,9 +193,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
// Profiling operations - deprecated API
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/profiling/start").HandlerFunc(adminMiddleware(adminAPI.StartProfilingHandler, traceAllFlag, noObjLayerFlag)).
|
||||
Queries("profilerType", "{profilerType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(adminMiddleware(adminAPI.DownloadProfilingHandler, traceAllFlag, noObjLayerFlag))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(adminMiddleware(adminAPI.DownloadProfilingHandler, traceHdrsS3HFlag, noObjLayerFlag))
|
||||
// Profiling operations
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/profile").HandlerFunc(adminMiddleware(adminAPI.ProfileHandler, traceAllFlag, noObjLayerFlag))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/profile").HandlerFunc(adminMiddleware(adminAPI.ProfileHandler, traceHdrsS3HFlag, noObjLayerFlag))
|
||||
|
||||
// Config KV operations.
|
||||
if enableConfigOps {
|
||||
@ -244,6 +244,10 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
// STS accounts ops
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/temporary-account-info").HandlerFunc(adminMiddleware(adminAPI.TemporaryAccountInfo)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// Access key (service account/STS) operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-access-keys-bulk").HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysBulk)).Queries("listType", "{listType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-access-key").HandlerFunc(adminMiddleware(adminAPI.InfoAccessKey)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// Info policy IAM latest
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(adminMiddleware(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
|
||||
// List policies latest
|
||||
@ -290,8 +294,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
|
||||
// Import IAM info
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-iam").HandlerFunc(adminMiddleware(adminAPI.ImportIAM, noGZFlag))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-iam-v2").HandlerFunc(adminMiddleware(adminAPI.ImportIAMV2, noGZFlag))
|
||||
|
||||
// IDentity Provider configuration APIs
|
||||
// Identity Provider configuration APIs
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(adminMiddleware(adminAPI.AddIdentityProviderCfg))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(adminMiddleware(adminAPI.UpdateIdentityProviderCfg))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp-config/{type}").HandlerFunc(adminMiddleware(adminAPI.ListIdentityProviderCfg))
|
||||
@ -308,6 +313,11 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
// LDAP IAM operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp/ldap/policy-entities").HandlerFunc(adminMiddleware(adminAPI.ListLDAPPolicyMappingEntities))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/idp/ldap/policy/{operation}").HandlerFunc(adminMiddleware(adminAPI.AttachDetachPolicyLDAP))
|
||||
|
||||
// OpenID specific service accounts ops
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp/openid/list-access-keys-bulk").
|
||||
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysOpenIDBulk)).Queries("listType", "{listType:.*}")
|
||||
|
||||
// -- END IAM APIs --
|
||||
|
||||
// GetBucketQuotaConfig
|
||||
@ -420,6 +430,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
// -- Health API --
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/healthinfo").
|
||||
HandlerFunc(adminMiddleware(adminAPI.HealthInfoHandler))
|
||||
|
||||
// STS Revocation
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/revoke-tokens/{userProvider}").HandlerFunc(adminMiddleware(adminAPI.RevokeTokens))
|
||||
}
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
|
||||
@ -32,6 +32,8 @@ type DeletedObject struct {
|
||||
DeleteMarkerMTime DeleteMarkerMTime `xml:"-"`
|
||||
// MinIO extensions to support delete marker replication
|
||||
ReplicationState ReplicationState `xml:"-"`
|
||||
|
||||
found bool // the object was found during deletion
|
||||
}
|
||||
|
||||
// DeleteMarkerMTime is an embedded type containing time.Time for XML marshal
|
||||
@ -42,10 +44,10 @@ type DeleteMarkerMTime struct {
|
||||
// MarshalXML encodes expiration date if it is non-zero and encodes
|
||||
// empty string otherwise
|
||||
func (t DeleteMarkerMTime) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
|
||||
if t.Time.IsZero() {
|
||||
if t.IsZero() {
|
||||
return nil
|
||||
}
|
||||
return e.EncodeElement(t.Time.Format(time.RFC3339), startElement)
|
||||
return e.EncodeElement(t.Format(time.RFC3339), startElement)
|
||||
}
|
||||
|
||||
// ObjectV object version key/versionId
|
||||
|
||||
@ -28,7 +28,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
@ -213,6 +213,10 @@ const (
|
||||
ErrPolicyAlreadyAttached
|
||||
ErrPolicyNotAttached
|
||||
ErrExcessData
|
||||
ErrPolicyInvalidName
|
||||
ErrNoTokenRevokeType
|
||||
ErrAdminOpenIDNotEnabled
|
||||
ErrAdminNoSuchAccessKey
|
||||
// Add new error codes here.
|
||||
|
||||
// SSE-S3/SSE-KMS related API errors
|
||||
@ -561,6 +565,16 @@ var errorCodes = errorCodeMap{
|
||||
Description: "More data provided than indicated content length",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrPolicyInvalidName: {
|
||||
Code: "PolicyInvalidName",
|
||||
Description: "Policy name may not contain comma",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminOpenIDNotEnabled: {
|
||||
Code: "OpenIDNotEnabled",
|
||||
Description: "No enabled OpenID Connect identity providers",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrPolicyTooLarge: {
|
||||
Code: "PolicyTooLarge",
|
||||
Description: "Policy exceeds the maximum allowed document size.",
|
||||
@ -623,7 +637,7 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrMissingContentMD5: {
|
||||
Code: "MissingContentMD5",
|
||||
Description: "Missing required header for this request: Content-Md5.",
|
||||
Description: "Missing or invalid required header for this request: Content-Md5 or Amz-Content-Checksum",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingSecurityHeader: {
|
||||
@ -978,7 +992,7 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrReplicationNoExistingObjects: {
|
||||
Code: "XMinioReplicationNoExistingObjects",
|
||||
Description: "No matching ExistingsObjects rule enabled",
|
||||
Description: "No matching ExistingObjects rule enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrRemoteTargetDenyAddError: {
|
||||
@ -1258,6 +1272,16 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The security token included in the request is invalid",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrNoTokenRevokeType: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "No token revoke type specified and one could not be inferred from the request",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminNoSuchAccessKey: {
|
||||
Code: "XMinioAdminNoSuchAccessKey",
|
||||
Description: "The specified access key does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
|
||||
// S3 extensions.
|
||||
ErrContentSHA256Mismatch: {
|
||||
@ -1486,7 +1510,7 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrTooManyRequests: {
|
||||
Code: "TooManyRequests",
|
||||
Description: "Deadline exceeded while waiting in incoming queue, please reduce your request rate",
|
||||
Description: "Please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusTooManyRequests,
|
||||
},
|
||||
ErrUnsupportedMetadata: {
|
||||
@ -2155,6 +2179,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrAdminNoSuchUserLDAPWarn
|
||||
case errNoSuchServiceAccount:
|
||||
apiErr = ErrAdminServiceAccountNotFound
|
||||
case errNoSuchAccessKey:
|
||||
apiErr = ErrAdminNoSuchAccessKey
|
||||
case errNoSuchGroup:
|
||||
apiErr = ErrAdminNoSuchGroup
|
||||
case errGroupNotEmpty:
|
||||
@ -2248,6 +2274,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrServerNotInitialized
|
||||
case errBucketMetadataNotInitialized:
|
||||
apiErr = ErrBucketMetadataNotInitialized
|
||||
case hash.ErrInvalidChecksum:
|
||||
apiErr = ErrInvalidChecksum
|
||||
}
|
||||
|
||||
// Compression errors
|
||||
@ -2549,11 +2577,11 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
if len(e.Errors) >= 1 {
|
||||
apiErr.Code = e.Errors[0].Reason
|
||||
}
|
||||
case azblob.StorageError:
|
||||
case *azcore.ResponseError:
|
||||
apiErr = APIError{
|
||||
Code: string(e.ServiceCode()),
|
||||
Code: e.ErrorCode,
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: e.Response().StatusCode,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
// Add more other SDK related errors here if any in future.
|
||||
default:
|
||||
@ -2592,7 +2620,7 @@ func getAPIError(code APIErrorCode) APIError {
|
||||
return errorCodes.ToAPIErr(ErrInternalError)
|
||||
}
|
||||
|
||||
// getErrorResponse gets in standard error and resource value and
|
||||
// getAPIErrorResponse gets in standard error and resource value and
|
||||
// provides a encodable populated response values
|
||||
func getAPIErrorResponse(ctx context.Context, err APIError, resource, requestID, hostID string) APIErrorResponse {
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
|
||||
@ -18,7 +18,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
@ -64,7 +63,7 @@ var toAPIErrorTests = []struct {
|
||||
}
|
||||
|
||||
func TestAPIErrCode(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
for i, testCase := range toAPIErrorTests {
|
||||
errCode := toAPIErrorCode(ctx, testCase.err)
|
||||
if errCode != testCase.errCode {
|
||||
|
||||
@ -23,6 +23,7 @@ import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"mime"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -64,7 +65,7 @@ func setCommonHeaders(w http.ResponseWriter) {
|
||||
}
|
||||
|
||||
// Encodes the response headers into XML format.
|
||||
func encodeResponse(response interface{}) []byte {
|
||||
func encodeResponse(response any) []byte {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(xml.Header)
|
||||
if err := xml.NewEncoder(&buf).Encode(response); err != nil {
|
||||
@ -82,7 +83,7 @@ func encodeResponse(response interface{}) []byte {
|
||||
// Do not use this function for anything other than ListObjects()
|
||||
// variants, please open a github discussion if you wish to use
|
||||
// this in other places.
|
||||
func encodeResponseList(response interface{}) []byte {
|
||||
func encodeResponseList(response any) []byte {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(xxml.Header)
|
||||
if err := xxml.NewEncoder(&buf).Encode(response); err != nil {
|
||||
@ -93,7 +94,7 @@ func encodeResponseList(response interface{}) []byte {
|
||||
}
|
||||
|
||||
// Encodes the response headers into JSON format.
|
||||
func encodeResponseJSON(response interface{}) []byte {
|
||||
func encodeResponseJSON(response any) []byte {
|
||||
var bytesBuffer bytes.Buffer
|
||||
e := json.NewEncoder(&bytesBuffer)
|
||||
e.Encode(response)
|
||||
@ -168,6 +169,32 @@ func setObjectHeaders(ctx context.Context, w http.ResponseWriter, objInfo Object
|
||||
if !stringsHasPrefixFold(k, userMetadataPrefix) {
|
||||
continue
|
||||
}
|
||||
// check the doc https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html
|
||||
// For metadata values like "ö", "ÄMÄZÕÑ S3", and "öha, das sollte eigentlich
|
||||
// funktionieren", tested against a real AWS S3 bucket, S3 may encode incorrectly. For
|
||||
// example, "ö" was encoded as =?UTF-8?B?w4PCtg==?=, producing invalid UTF-8 instead
|
||||
// of =?UTF-8?B?w7Y=?=. This mirrors errors like the ä½ in another string.
|
||||
//
|
||||
// S3 uses B-encoding (Base64) for non-ASCII-heavy metadata and Q-encoding
|
||||
// (quoted-printable) for mostly ASCII strings. Long strings are split at word
|
||||
// boundaries to fit RFC 2047’s 75-character limit, ensuring HTTP parser
|
||||
// compatibility.
|
||||
//
|
||||
// However, this splitting increases header size and can introduce errors, unlike Go’s
|
||||
// mime package in MinIO, which correctly encodes strings with fixed B/Q encodings,
|
||||
// avoiding S3’s heuristic-driven issues.
|
||||
//
|
||||
// For MinIO developers, decode S3 metadata with mime.WordDecoder, validate outputs,
|
||||
// report encoding bugs to AWS, and use ASCII-only metadata to ensure reliable S3 API
|
||||
// compatibility.
|
||||
if needsMimeEncoding(v) {
|
||||
// see https://github.com/golang/go/blob/release-branch.go1.24/src/net/mail/message.go#L325
|
||||
if strings.ContainsAny(v, "\"#$%&'(),.:;<>@[]^`{|}~") {
|
||||
v = mime.BEncoding.Encode("UTF-8", v)
|
||||
} else {
|
||||
v = mime.QEncoding.Encode("UTF-8", v)
|
||||
}
|
||||
}
|
||||
w.Header()[strings.ToLower(k)] = []string{v}
|
||||
isSet = true
|
||||
break
|
||||
@ -229,3 +256,14 @@ func setObjectHeaders(ctx context.Context, w http.ResponseWriter, objInfo Object
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// needsEncoding reports whether s contains any bytes that need to be encoded.
|
||||
// see mime.needsEncoding
|
||||
func needsMimeEncoding(s string) bool {
|
||||
for _, b := range s {
|
||||
if (b < ' ' || b > '~') && b != '\t' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@ -34,7 +34,8 @@ func TestNewRequestID(t *testing.T) {
|
||||
e = char
|
||||
|
||||
// Ensure that it is alphanumeric, in this case, between 0-9 and A-Z.
|
||||
if !(('0' <= e && e <= '9') || ('A' <= e && e <= 'Z')) {
|
||||
isAlnum := ('0' <= e && e <= '9') || ('A' <= e && e <= 'Z')
|
||||
if !isAlnum {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
@ -31,7 +31,7 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string,
|
||||
var err error
|
||||
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
|
||||
errCode = ErrInvalidMaxKeys
|
||||
return
|
||||
return prefix, marker, delimiter, maxkeys, encodingType, errCode
|
||||
}
|
||||
} else {
|
||||
maxkeys = maxObjectList
|
||||
@ -41,7 +41,7 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string,
|
||||
marker = values.Get("marker")
|
||||
delimiter = values.Get("delimiter")
|
||||
encodingType = values.Get("encoding-type")
|
||||
return
|
||||
return prefix, marker, delimiter, maxkeys, encodingType, errCode
|
||||
}
|
||||
|
||||
func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType, versionIDMarker string, errCode APIErrorCode) {
|
||||
@ -51,7 +51,7 @@ func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimit
|
||||
var err error
|
||||
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
|
||||
errCode = ErrInvalidMaxKeys
|
||||
return
|
||||
return prefix, marker, delimiter, maxkeys, encodingType, versionIDMarker, errCode
|
||||
}
|
||||
} else {
|
||||
maxkeys = maxObjectList
|
||||
@ -62,7 +62,7 @@ func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimit
|
||||
delimiter = values.Get("delimiter")
|
||||
encodingType = values.Get("encoding-type")
|
||||
versionIDMarker = values.Get("version-id-marker")
|
||||
return
|
||||
return prefix, marker, delimiter, maxkeys, encodingType, versionIDMarker, errCode
|
||||
}
|
||||
|
||||
// Parse bucket url queries for ListObjects V2.
|
||||
@ -73,7 +73,7 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
|
||||
if val, ok := values["continuation-token"]; ok {
|
||||
if len(val[0]) == 0 {
|
||||
errCode = ErrIncorrectContinuationToken
|
||||
return
|
||||
return prefix, token, startAfter, delimiter, fetchOwner, maxkeys, encodingType, errCode
|
||||
}
|
||||
}
|
||||
|
||||
@ -81,7 +81,7 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
|
||||
var err error
|
||||
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
|
||||
errCode = ErrInvalidMaxKeys
|
||||
return
|
||||
return prefix, token, startAfter, delimiter, fetchOwner, maxkeys, encodingType, errCode
|
||||
}
|
||||
} else {
|
||||
maxkeys = maxObjectList
|
||||
@ -97,11 +97,11 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
|
||||
decodedToken, err := base64.StdEncoding.DecodeString(token)
|
||||
if err != nil {
|
||||
errCode = ErrIncorrectContinuationToken
|
||||
return
|
||||
return prefix, token, startAfter, delimiter, fetchOwner, maxkeys, encodingType, errCode
|
||||
}
|
||||
token = string(decodedToken)
|
||||
}
|
||||
return
|
||||
return prefix, token, startAfter, delimiter, fetchOwner, maxkeys, encodingType, errCode
|
||||
}
|
||||
|
||||
// Parse bucket url queries for ?uploads
|
||||
@ -112,7 +112,7 @@ func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadID
|
||||
var err error
|
||||
if maxUploads, err = strconv.Atoi(values.Get("max-uploads")); err != nil {
|
||||
errCode = ErrInvalidMaxUploads
|
||||
return
|
||||
return prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType, errCode
|
||||
}
|
||||
} else {
|
||||
maxUploads = maxUploadsList
|
||||
@ -123,7 +123,7 @@ func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadID
|
||||
uploadIDMarker = values.Get("upload-id-marker")
|
||||
delimiter = values.Get("delimiter")
|
||||
encodingType = values.Get("encoding-type")
|
||||
return
|
||||
return prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType, errCode
|
||||
}
|
||||
|
||||
// Parse object url queries
|
||||
@ -134,7 +134,7 @@ func getObjectResources(values url.Values) (uploadID string, partNumberMarker, m
|
||||
if values.Get("max-parts") != "" {
|
||||
if maxParts, err = strconv.Atoi(values.Get("max-parts")); err != nil {
|
||||
errCode = ErrInvalidMaxParts
|
||||
return
|
||||
return uploadID, partNumberMarker, maxParts, encodingType, errCode
|
||||
}
|
||||
} else {
|
||||
maxParts = maxPartsList
|
||||
@ -143,11 +143,11 @@ func getObjectResources(values url.Values) (uploadID string, partNumberMarker, m
|
||||
if values.Get("part-number-marker") != "" {
|
||||
if partNumberMarker, err = strconv.Atoi(values.Get("part-number-marker")); err != nil {
|
||||
errCode = ErrInvalidPartNumberMarker
|
||||
return
|
||||
return uploadID, partNumberMarker, maxParts, encodingType, errCode
|
||||
}
|
||||
}
|
||||
|
||||
uploadID = values.Get("uploadId")
|
||||
encodingType = values.Get("encoding-type")
|
||||
return
|
||||
return uploadID, partNumberMarker, maxParts, encodingType, errCode
|
||||
}
|
||||
|
||||
@ -166,10 +166,11 @@ type Part struct {
|
||||
Size int64
|
||||
|
||||
// Checksum values
|
||||
ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
|
||||
ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
|
||||
ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
|
||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
||||
ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
|
||||
ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
|
||||
ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
|
||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
||||
ChecksumCRC64NVME string `xml:",omitempty"`
|
||||
}
|
||||
|
||||
// ListPartsResponse - format for list parts response.
|
||||
@ -192,6 +193,8 @@ type ListPartsResponse struct {
|
||||
IsTruncated bool
|
||||
|
||||
ChecksumAlgorithm string
|
||||
ChecksumType string
|
||||
|
||||
// List of parts.
|
||||
Parts []Part `xml:"Part"`
|
||||
}
|
||||
@ -413,10 +416,11 @@ type CompleteMultipartUploadResponse struct {
|
||||
Key string
|
||||
ETag string
|
||||
|
||||
ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
|
||||
ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
|
||||
ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
|
||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
||||
ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
|
||||
ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
|
||||
ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
|
||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
||||
ChecksumCRC64NVME string `xml:",omitempty"`
|
||||
}
|
||||
|
||||
// DeleteError structure.
|
||||
@ -516,7 +520,6 @@ func cleanReservedKeys(metadata map[string]string) map[string]string {
|
||||
}
|
||||
case crypto.SSEC:
|
||||
m[xhttp.AmzServerSideEncryptionCustomerAlgorithm] = xhttp.AmzEncryptionAES
|
||||
|
||||
}
|
||||
|
||||
var toRemove []string
|
||||
@ -593,8 +596,6 @@ func generateListVersionsResponse(ctx context.Context, bucket, prefix, marker, v
|
||||
for k, v := range cleanReservedKeys(object.UserDefined) {
|
||||
content.UserMetadata.Set(k, v)
|
||||
}
|
||||
|
||||
content.UserMetadata.Set("expires", object.Expires.Format(http.TimeFormat))
|
||||
content.Internal = &ObjectInternalInfo{
|
||||
K: object.DataBlocks,
|
||||
M: object.ParityBlocks,
|
||||
@ -729,7 +730,6 @@ func generateListObjectsV2Response(ctx context.Context, bucket, prefix, token, n
|
||||
for k, v := range cleanReservedKeys(object.UserDefined) {
|
||||
content.UserMetadata.Set(k, v)
|
||||
}
|
||||
content.UserMetadata.Set("expires", object.Expires.Format(http.TimeFormat))
|
||||
content.Internal = &ObjectInternalInfo{
|
||||
K: object.DataBlocks,
|
||||
M: object.ParityBlocks,
|
||||
@ -790,17 +790,18 @@ func generateInitiateMultipartUploadResponse(bucket, key, uploadID string) Initi
|
||||
|
||||
// generates CompleteMultipartUploadResponse for given bucket, key, location and ETag.
|
||||
func generateCompleteMultipartUploadResponse(bucket, key, location string, oi ObjectInfo, h http.Header) CompleteMultipartUploadResponse {
|
||||
cs := oi.decryptChecksums(0, h)
|
||||
cs, _ := oi.decryptChecksums(0, h)
|
||||
c := CompleteMultipartUploadResponse{
|
||||
Location: location,
|
||||
Bucket: bucket,
|
||||
Key: key,
|
||||
// AWS S3 quotes the ETag in XML, make sure we are compatible here.
|
||||
ETag: "\"" + oi.ETag + "\"",
|
||||
ChecksumSHA1: cs[hash.ChecksumSHA1.String()],
|
||||
ChecksumSHA256: cs[hash.ChecksumSHA256.String()],
|
||||
ChecksumCRC32: cs[hash.ChecksumCRC32.String()],
|
||||
ChecksumCRC32C: cs[hash.ChecksumCRC32C.String()],
|
||||
ETag: "\"" + oi.ETag + "\"",
|
||||
ChecksumSHA1: cs[hash.ChecksumSHA1.String()],
|
||||
ChecksumSHA256: cs[hash.ChecksumSHA256.String()],
|
||||
ChecksumCRC32: cs[hash.ChecksumCRC32.String()],
|
||||
ChecksumCRC32C: cs[hash.ChecksumCRC32C.String()],
|
||||
ChecksumCRC64NVME: cs[hash.ChecksumCRC64NVME.String()],
|
||||
}
|
||||
return c
|
||||
}
|
||||
@ -828,6 +829,7 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
|
||||
listPartsResponse.IsTruncated = partsInfo.IsTruncated
|
||||
listPartsResponse.NextPartNumberMarker = partsInfo.NextPartNumberMarker
|
||||
listPartsResponse.ChecksumAlgorithm = partsInfo.ChecksumAlgorithm
|
||||
listPartsResponse.ChecksumType = partsInfo.ChecksumType
|
||||
|
||||
listPartsResponse.Parts = make([]Part, len(partsInfo.Parts))
|
||||
for index, part := range partsInfo.Parts {
|
||||
@ -840,6 +842,7 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
|
||||
newPart.ChecksumCRC32C = part.ChecksumCRC32C
|
||||
newPart.ChecksumSHA1 = part.ChecksumSHA1
|
||||
newPart.ChecksumSHA256 = part.ChecksumSHA256
|
||||
newPart.ChecksumCRC64NVME = part.ChecksumCRC64NVME
|
||||
listPartsResponse.Parts[index] = newPart
|
||||
}
|
||||
return listPartsResponse
|
||||
@ -886,6 +889,12 @@ func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, err
|
||||
}
|
||||
|
||||
func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
|
||||
// Don't write a response if one has already been written.
|
||||
// Fixes https://github.com/minio/minio/issues/21633
|
||||
if headersAlreadyWritten(w) {
|
||||
return
|
||||
}
|
||||
|
||||
if statusCode == 0 {
|
||||
statusCode = 200
|
||||
}
|
||||
@ -947,19 +956,10 @@ func writeSuccessResponseHeadersOnly(w http.ResponseWriter) {
|
||||
// writeErrorResponse writes error headers
|
||||
func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
|
||||
switch err.HTTPStatusCode {
|
||||
case http.StatusServiceUnavailable:
|
||||
case http.StatusServiceUnavailable, http.StatusTooManyRequests:
|
||||
// Set retry-after header to indicate user-agents to retry request after 60 seconds.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(xhttp.RetryAfter, "60")
|
||||
case http.StatusTooManyRequests:
|
||||
_, deadline := globalAPIConfig.getRequestsPool()
|
||||
if deadline <= 0 {
|
||||
// Set retry-after header to indicate user-agents to retry request after 10 seconds.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(xhttp.RetryAfter, "10")
|
||||
} else {
|
||||
w.Header().Set(xhttp.RetryAfter, strconv.Itoa(int(deadline.Seconds())))
|
||||
}
|
||||
}
|
||||
|
||||
switch err.Code {
|
||||
@ -1021,3 +1021,45 @@ func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, er
|
||||
encodedErrorResponse := encodeResponseJSON(errorResponse)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
}
|
||||
|
||||
type unwrapper interface {
|
||||
Unwrap() http.ResponseWriter
|
||||
}
|
||||
|
||||
// headersAlreadyWritten returns true if the headers have already been written
|
||||
// to this response writer. It will unwrap the ResponseWriter if possible to try
|
||||
// and find a trackingResponseWriter.
|
||||
func headersAlreadyWritten(w http.ResponseWriter) bool {
|
||||
for {
|
||||
if trw, ok := w.(*trackingResponseWriter); ok {
|
||||
return trw.headerWritten
|
||||
} else if uw, ok := w.(unwrapper); ok {
|
||||
w = uw.Unwrap()
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// trackingResponseWriter wraps a ResponseWriter and notes when WriterHeader has
|
||||
// been called. This allows high level request handlers to check if something
|
||||
// has already sent the header.
|
||||
type trackingResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
headerWritten bool
|
||||
}
|
||||
|
||||
func (w *trackingResponseWriter) WriteHeader(statusCode int) {
|
||||
if !w.headerWritten {
|
||||
w.headerWritten = true
|
||||
w.ResponseWriter.WriteHeader(statusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *trackingResponseWriter) Write(b []byte) (int, error) {
|
||||
return w.ResponseWriter.Write(b)
|
||||
}
|
||||
|
||||
func (w *trackingResponseWriter) Unwrap() http.ResponseWriter {
|
||||
return w.ResponseWriter
|
||||
}
|
||||
|
||||
@ -18,8 +18,12 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/klauspost/compress/gzhttp"
|
||||
)
|
||||
|
||||
// Tests object location.
|
||||
@ -100,7 +104,6 @@ func TestObjectLocation(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object)
|
||||
if testCase.expectedLocation != gotLocation {
|
||||
@ -123,3 +126,89 @@ func TestGetURLScheme(t *testing.T) {
|
||||
t.Errorf("Expected %s, got %s", httpsScheme, gotScheme)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrackingResponseWriter(t *testing.T) {
|
||||
rw := httptest.NewRecorder()
|
||||
trw := &trackingResponseWriter{ResponseWriter: rw}
|
||||
trw.WriteHeader(123)
|
||||
if !trw.headerWritten {
|
||||
t.Fatal("headerWritten was not set by WriteHeader call")
|
||||
}
|
||||
|
||||
_, err := trw.Write([]byte("hello"))
|
||||
if err != nil {
|
||||
t.Fatalf("Write unexpectedly failed: %v", err)
|
||||
}
|
||||
|
||||
// Check that WriteHeader and Write were called on the underlying response writer
|
||||
resp := rw.Result()
|
||||
if resp.StatusCode != 123 {
|
||||
t.Fatalf("unexpected status: %v", resp.StatusCode)
|
||||
}
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("reading response body failed: %v", err)
|
||||
}
|
||||
if string(body) != "hello" {
|
||||
t.Fatalf("response body incorrect: %v", string(body))
|
||||
}
|
||||
|
||||
// Check that Unwrap works
|
||||
if trw.Unwrap() != rw {
|
||||
t.Fatalf("Unwrap returned wrong result: %v", trw.Unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadersAlreadyWritten(t *testing.T) {
|
||||
rw := httptest.NewRecorder()
|
||||
trw := &trackingResponseWriter{ResponseWriter: rw}
|
||||
|
||||
if headersAlreadyWritten(trw) {
|
||||
t.Fatal("headers have not been written yet")
|
||||
}
|
||||
|
||||
trw.WriteHeader(123)
|
||||
if !headersAlreadyWritten(trw) {
|
||||
t.Fatal("headers were written")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadersAlreadyWrittenWrapped(t *testing.T) {
|
||||
rw := httptest.NewRecorder()
|
||||
trw := &trackingResponseWriter{ResponseWriter: rw}
|
||||
wrap1 := &gzhttp.NoGzipResponseWriter{ResponseWriter: trw}
|
||||
wrap2 := &gzhttp.NoGzipResponseWriter{ResponseWriter: wrap1}
|
||||
|
||||
if headersAlreadyWritten(wrap2) {
|
||||
t.Fatal("headers have not been written yet")
|
||||
}
|
||||
|
||||
wrap2.WriteHeader(123)
|
||||
if !headersAlreadyWritten(wrap2) {
|
||||
t.Fatal("headers were written")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteResponseHeadersNotWritten(t *testing.T) {
|
||||
rw := httptest.NewRecorder()
|
||||
trw := &trackingResponseWriter{ResponseWriter: rw}
|
||||
|
||||
writeResponse(trw, 299, []byte("hello"), "application/foo")
|
||||
|
||||
resp := rw.Result()
|
||||
if resp.StatusCode != 299 {
|
||||
t.Fatal("response wasn't written")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteResponseHeadersWritten(t *testing.T) {
|
||||
rw := httptest.NewRecorder()
|
||||
rw.Code = -1
|
||||
trw := &trackingResponseWriter{ResponseWriter: rw, headerWritten: true}
|
||||
|
||||
writeResponse(trw, 200, []byte("hello"), "application/foo")
|
||||
|
||||
if rw.Code != -1 {
|
||||
t.Fatalf("response was written when it shouldn't have been (Code=%v)", rw.Code)
|
||||
}
|
||||
}
|
||||
|
||||
@ -218,6 +218,8 @@ func s3APIMiddleware(f http.HandlerFunc, flags ...s3HFlag) http.HandlerFunc {
|
||||
handlerName := getHandlerName(f, "objectAPIHandlers")
|
||||
|
||||
var handler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
|
||||
w = &trackingResponseWriter{ResponseWriter: w}
|
||||
|
||||
// Wrap the actual handler with the appropriate tracing middleware.
|
||||
var tracedHandler http.HandlerFunc
|
||||
if handlerFlags.has(traceHdrsS3HFlag) {
|
||||
@ -227,13 +229,13 @@ func s3APIMiddleware(f http.HandlerFunc, flags ...s3HFlag) http.HandlerFunc {
|
||||
}
|
||||
|
||||
// Skip wrapping with the gzip middleware if specified.
|
||||
var gzippedHandler http.HandlerFunc = tracedHandler
|
||||
gzippedHandler := tracedHandler
|
||||
if !handlerFlags.has(noGZS3HFlag) {
|
||||
gzippedHandler = gzipHandler(gzippedHandler)
|
||||
}
|
||||
|
||||
// Skip wrapping with throttling middleware if specified.
|
||||
var throttledHandler http.HandlerFunc = gzippedHandler
|
||||
throttledHandler := gzippedHandler
|
||||
if !handlerFlags.has(noThrottleS3HFlag) {
|
||||
throttledHandler = maxClients(throttledHandler)
|
||||
}
|
||||
@ -387,6 +389,11 @@ func registerAPIRouter(router *mux.Router) {
|
||||
HeadersRegexp(xhttp.AmzSnowballExtract, "true").
|
||||
HandlerFunc(s3APIMiddleware(api.PutObjectExtractHandler, traceHdrsS3HFlag))
|
||||
|
||||
// AppendObject to be rejected
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
HeadersRegexp(xhttp.AmzWriteOffsetBytes, "").
|
||||
HandlerFunc(s3APIMiddleware(errorResponseHandler))
|
||||
|
||||
// PutObject
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.PutObjectHandler, traceHdrsS3HFlag))
|
||||
@ -456,6 +463,14 @@ func registerAPIRouter(router *mux.Router) {
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketCorsHandler)).
|
||||
Queries("cors", "")
|
||||
// PutBucketCors - this is a dummy call.
|
||||
router.Methods(http.MethodPut).
|
||||
HandlerFunc(s3APIMiddleware(api.PutBucketCorsHandler)).
|
||||
Queries("cors", "")
|
||||
// DeleteBucketCors - this is a dummy call.
|
||||
router.Methods(http.MethodDelete).
|
||||
HandlerFunc(s3APIMiddleware(api.DeleteBucketCorsHandler)).
|
||||
Queries("cors", "")
|
||||
// GetBucketWebsiteHandler - this is a dummy call.
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketWebsiteHandler)).
|
||||
@ -472,6 +487,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketLoggingHandler)).
|
||||
Queries("logging", "")
|
||||
|
||||
// GetBucketTaggingHandler
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketTaggingHandler)).
|
||||
|
||||
@ -43,7 +43,7 @@ func shouldEscape(c byte) bool {
|
||||
// - Force encoding of '~'
|
||||
func s3URLEncode(s string) string {
|
||||
spaceCount, hexCount := 0, 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
for i := range len(s) {
|
||||
c := s[i]
|
||||
if shouldEscape(c) {
|
||||
if c == ' ' {
|
||||
@ -70,7 +70,7 @@ func s3URLEncode(s string) string {
|
||||
|
||||
if hexCount == 0 {
|
||||
copy(t, s)
|
||||
for i := 0; i < len(s); i++ {
|
||||
for i := range len(s) {
|
||||
if s[i] == ' ' {
|
||||
t[i] = '+'
|
||||
}
|
||||
@ -79,7 +79,7 @@ func s3URLEncode(s string) string {
|
||||
}
|
||||
|
||||
j := 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
for i := range len(s) {
|
||||
switch c := s[i]; {
|
||||
case c == ' ':
|
||||
t[j] = '+'
|
||||
|
||||
File diff suppressed because one or more lines are too long
@ -96,7 +96,7 @@ func isRequestSignStreamingTrailerV4(r *http.Request) bool {
|
||||
// Verify if the request has AWS Streaming Signature Version '4', with unsigned content and trailer.
|
||||
func isRequestUnsignedTrailerV4(r *http.Request) bool {
|
||||
return r.Header.Get(xhttp.AmzContentSha256) == unsignedPayloadTrailer &&
|
||||
r.Method == http.MethodPut && strings.Contains(r.Header.Get(xhttp.ContentEncoding), streamingContentEncoding)
|
||||
r.Method == http.MethodPut
|
||||
}
|
||||
|
||||
// Authorization type.
|
||||
@ -162,7 +162,6 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string)
|
||||
s3Err := ErrAccessDenied
|
||||
if _, ok := r.Header[xhttp.AmzContentSha256]; ok &&
|
||||
getRequestAuthType(r) == authTypeSigned {
|
||||
|
||||
// Get credential information from the request.
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
if s3Err != ErrNone {
|
||||
@ -217,12 +216,12 @@ func getSessionToken(r *http.Request) (token string) {
|
||||
|
||||
// Fetch claims in the security token returned by the client, doesn't return
|
||||
// errors - upon errors the returned claims map will be empty.
|
||||
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
func mustGetClaimsFromToken(r *http.Request) map[string]any {
|
||||
claims, _ := getClaimsFromToken(getSessionToken(r))
|
||||
return claims
|
||||
}
|
||||
|
||||
func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{}, error) {
|
||||
func getClaimsFromTokenWithSecret(token, secret string) (*xjwt.MapClaims, error) {
|
||||
// JWT token for x-amz-security-token is signed with admin
|
||||
// secret key, temporary credentials become invalid if
|
||||
// server admin credentials change. This is done to ensure
|
||||
@ -244,7 +243,7 @@ func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{},
|
||||
|
||||
// If AuthZPlugin is set, return without any further checks.
|
||||
if newGlobalAuthZPluginFn() != nil {
|
||||
return claims.Map(), nil
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
// Check if a session policy is set. If so, decode it here.
|
||||
@ -263,16 +262,20 @@ func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{},
|
||||
claims.MapClaims[sessionPolicyNameExtracted] = string(spBytes)
|
||||
}
|
||||
|
||||
return claims.Map(), nil
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
return getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey)
|
||||
func getClaimsFromToken(token string) (map[string]any, error) {
|
||||
jwtClaims, err := getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return jwtClaims.Map(), nil
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client and validate the token.
|
||||
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]interface{}, APIErrorCode) {
|
||||
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]any, APIErrorCode) {
|
||||
token := getSessionToken(r)
|
||||
if token != "" && cred.AccessKey == "" {
|
||||
// x-amz-security-token is not allowed for anonymous access.
|
||||
@ -319,7 +322,7 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
}
|
||||
return claims, ErrNone
|
||||
return claims.Map(), ErrNone
|
||||
}
|
||||
|
||||
claims := xjwt.NewMapClaims()
|
||||
@ -360,7 +363,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
switch getRequestAuthType(r) {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
case authTypeUnknown, authTypeStreamingSigned, authTypeStreamingSignedTrailer, authTypeStreamingUnsignedTrailer:
|
||||
return ErrSignatureVersionNotSupported
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
@ -671,32 +674,6 @@ func setAuthMiddleware(h http.Handler) http.Handler {
|
||||
})
|
||||
}
|
||||
|
||||
func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool, APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
var s3Err APIErrorCode
|
||||
switch atype {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return cred, owner, ErrSignatureVersionNotSupported
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
region := globalSite.Region()
|
||||
if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
|
||||
func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate time.Time, retMode objectlock.RetMode, byPassSet bool, r *http.Request, cred auth.Credentials, owner bool) (s3Err APIErrorCode) {
|
||||
var retSet bool
|
||||
if cred.AccessKey == "" {
|
||||
@ -751,8 +728,14 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
|
||||
return ErrSignatureVersionNotSupported
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeStreamingSigned, authTypePresigned, authTypeSigned, authTypeStreamingSignedTrailer, authTypeStreamingUnsignedTrailer:
|
||||
case authTypeStreamingSigned, authTypePresigned, authTypeSigned, authTypeStreamingSignedTrailer:
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
case authTypeStreamingUnsignedTrailer:
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
if s3Err == ErrMissingFields {
|
||||
// Could be anonymous. cred + owner is zero value.
|
||||
s3Err = ErrNone
|
||||
}
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
|
||||
@ -413,7 +413,7 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
objLayer, fsDir, err := prepareFS(ctx)
|
||||
@ -450,7 +450,7 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateAdminSignature(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
objLayer, fsDir, err := prepareFS(ctx)
|
||||
|
||||
@ -102,7 +102,7 @@ func waitForLowHTTPReq() {
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
bgSeq := newBgHealSequence()
|
||||
// Run the background healer
|
||||
for i := 0; i < globalBackgroundHealRoutine.workers; i++ {
|
||||
for range globalBackgroundHealRoutine.workers {
|
||||
go globalBackgroundHealRoutine.AddWorker(ctx, objAPI, bgSeq)
|
||||
}
|
||||
|
||||
|
||||
@ -24,6 +24,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -72,10 +73,12 @@ type healingTracker struct {
|
||||
|
||||
// Numbers when current bucket started healing,
|
||||
// for resuming with correct numbers.
|
||||
ResumeItemsHealed uint64 `json:"-"`
|
||||
ResumeItemsFailed uint64 `json:"-"`
|
||||
ResumeBytesDone uint64 `json:"-"`
|
||||
ResumeBytesFailed uint64 `json:"-"`
|
||||
ResumeItemsHealed uint64 `json:"-"`
|
||||
ResumeItemsFailed uint64 `json:"-"`
|
||||
ResumeItemsSkipped uint64 `json:"-"`
|
||||
ResumeBytesDone uint64 `json:"-"`
|
||||
ResumeBytesFailed uint64 `json:"-"`
|
||||
ResumeBytesSkipped uint64 `json:"-"`
|
||||
|
||||
// Filled on startup/restarts.
|
||||
QueuedBuckets []string
|
||||
@ -90,6 +93,9 @@ type healingTracker struct {
|
||||
BytesSkipped uint64
|
||||
|
||||
RetryAttempts uint64
|
||||
|
||||
Finished bool // finished healing, whether with errors or not
|
||||
|
||||
// Add future tracking capabilities
|
||||
// Be sure that they are included in toHealingDisk
|
||||
}
|
||||
@ -143,6 +149,26 @@ func initHealingTracker(disk StorageAPI, healID string) *healingTracker {
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *healingTracker) resetHealing() {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
|
||||
h.ItemsHealed = 0
|
||||
h.ItemsFailed = 0
|
||||
h.BytesDone = 0
|
||||
h.BytesFailed = 0
|
||||
h.ResumeItemsHealed = 0
|
||||
h.ResumeItemsFailed = 0
|
||||
h.ResumeBytesDone = 0
|
||||
h.ResumeBytesFailed = 0
|
||||
h.ItemsSkipped = 0
|
||||
h.BytesSkipped = 0
|
||||
|
||||
h.HealedBuckets = nil
|
||||
h.Object = ""
|
||||
h.Bucket = ""
|
||||
}
|
||||
|
||||
func (h *healingTracker) getLastUpdate() time.Time {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
@ -198,9 +224,6 @@ func (h *healingTracker) updateProgress(success, skipped bool, bytes uint64) {
|
||||
// update will update the tracker on the disk.
|
||||
// If the tracker has been deleted an error is returned.
|
||||
func (h *healingTracker) update(ctx context.Context) error {
|
||||
if h.disk.Healing() == nil {
|
||||
return fmt.Errorf("healingTracker: drive %q is not marked as healing", h.ID)
|
||||
}
|
||||
h.mu.Lock()
|
||||
if h.ID == "" || h.PoolIndex < 0 || h.SetIndex < 0 || h.DiskIndex < 0 {
|
||||
h.ID, _ = h.disk.GetDiskID()
|
||||
@ -247,12 +270,7 @@ func (h *healingTracker) delete(ctx context.Context) error {
|
||||
func (h *healingTracker) isHealed(bucket string) bool {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
for _, v := range h.HealedBuckets {
|
||||
if v == bucket {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(h.HealedBuckets, bucket)
|
||||
}
|
||||
|
||||
// resume will reset progress to the numbers at the start of the bucket.
|
||||
@ -262,8 +280,10 @@ func (h *healingTracker) resume() {
|
||||
|
||||
h.ItemsHealed = h.ResumeItemsHealed
|
||||
h.ItemsFailed = h.ResumeItemsFailed
|
||||
h.ItemsSkipped = h.ResumeItemsSkipped
|
||||
h.BytesDone = h.ResumeBytesDone
|
||||
h.BytesFailed = h.ResumeBytesFailed
|
||||
h.BytesSkipped = h.ResumeBytesSkipped
|
||||
}
|
||||
|
||||
// bucketDone should be called when a bucket is done healing.
|
||||
@ -274,8 +294,10 @@ func (h *healingTracker) bucketDone(bucket string) {
|
||||
|
||||
h.ResumeItemsHealed = h.ItemsHealed
|
||||
h.ResumeItemsFailed = h.ItemsFailed
|
||||
h.ResumeItemsSkipped = h.ItemsSkipped
|
||||
h.ResumeBytesDone = h.BytesDone
|
||||
h.ResumeBytesFailed = h.BytesFailed
|
||||
h.ResumeBytesSkipped = h.BytesSkipped
|
||||
h.HealedBuckets = append(h.HealedBuckets, bucket)
|
||||
for i, b := range h.QueuedBuckets {
|
||||
if b == bucket {
|
||||
@ -324,6 +346,7 @@ func (h *healingTracker) toHealingDisk() madmin.HealingDisk {
|
||||
PoolIndex: h.PoolIndex,
|
||||
SetIndex: h.SetIndex,
|
||||
DiskIndex: h.DiskIndex,
|
||||
Finished: h.Finished,
|
||||
Path: h.Path,
|
||||
Started: h.Started.UTC(),
|
||||
LastUpdate: h.LastUpdate.UTC(),
|
||||
@ -339,6 +362,7 @@ func (h *healingTracker) toHealingDisk() madmin.HealingDisk {
|
||||
Object: h.Object,
|
||||
QueuedBuckets: h.QueuedBuckets,
|
||||
HealedBuckets: h.HealedBuckets,
|
||||
RetryAttempts: h.RetryAttempts,
|
||||
|
||||
ObjectsHealed: h.ItemsHealed, // Deprecated July 2021
|
||||
ObjectsFailed: h.ItemsFailed, // Deprecated July 2021
|
||||
@ -353,16 +377,18 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
}
|
||||
|
||||
initBackgroundHealing(ctx, objAPI) // start quick background healing
|
||||
|
||||
if env.Get("_MINIO_AUTO_DRIVE_HEALING", config.EnableOn) == config.EnableOn || env.Get("_MINIO_AUTO_DISK_HEALING", config.EnableOn) == config.EnableOn {
|
||||
if env.Get("_MINIO_AUTO_DRIVE_HEALING", config.EnableOn) == config.EnableOn {
|
||||
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
|
||||
go monitorLocalDisksAndHeal(ctx, z)
|
||||
}
|
||||
|
||||
go globalMRFState.startMRFPersistence()
|
||||
go globalMRFState.healRoutine(z)
|
||||
}
|
||||
|
||||
func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
globalLocalDrivesMu.RLock()
|
||||
localDrives := cloneDrives(globalLocalDrives)
|
||||
localDrives := cloneDrives(globalLocalDrivesMap)
|
||||
globalLocalDrivesMu.RUnlock()
|
||||
for _, disk := range localDrives {
|
||||
_, err := disk.DiskInfo(context.Background(), DiskInfoOptions{})
|
||||
@ -370,7 +396,7 @@ func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
continue
|
||||
}
|
||||
if disk.Healing() != nil {
|
||||
if h := disk.Healing(); h != nil && !h.Finished {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
}
|
||||
}
|
||||
@ -469,16 +495,19 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
||||
// if objects have failed healing, we attempt a retry to heal the drive upto 3 times before giving up.
|
||||
if tracker.ItemsFailed > 0 && tracker.RetryAttempts < 4 {
|
||||
tracker.RetryAttempts++
|
||||
bugLogIf(ctx, tracker.update(ctx))
|
||||
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is incomplete, retrying %s time (healed: %d, skipped: %d, failed: %d).", disk,
|
||||
humanize.Ordinal(int(tracker.RetryAttempts)), tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
|
||||
|
||||
tracker.resetHealing()
|
||||
bugLogIf(ctx, tracker.update(ctx))
|
||||
|
||||
return errRetryHealing
|
||||
}
|
||||
|
||||
if tracker.ItemsFailed > 0 {
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is incomplete, retried %d times (healed: %d, skipped: %d, failed: %d).", disk,
|
||||
tracker.RetryAttempts-1, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
|
||||
tracker.RetryAttempts, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
|
||||
} else {
|
||||
if tracker.RetryAttempts > 0 {
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is complete, retried %d times (healed: %d, skipped: %d).", disk,
|
||||
@ -516,7 +545,8 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
||||
continue
|
||||
}
|
||||
if t.HealID == tracker.HealID {
|
||||
t.delete(ctx)
|
||||
t.Finished = true
|
||||
t.update(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
@ -132,6 +132,12 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "ResumeItemsFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeItemsSkipped":
|
||||
z.ResumeItemsSkipped, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeItemsSkipped")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesDone":
|
||||
z.ResumeBytesDone, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
@ -144,6 +150,12 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "ResumeBytesFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesSkipped":
|
||||
z.ResumeBytesSkipped, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesSkipped")
|
||||
return
|
||||
}
|
||||
case "QueuedBuckets":
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadArrayHeader()
|
||||
@ -206,6 +218,12 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
case "Finished":
|
||||
z.Finished, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Finished")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@ -219,9 +237,9 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 26
|
||||
// map header, size 29
|
||||
// write "ID"
|
||||
err = en.Append(0xde, 0x0, 0x1a, 0xa2, 0x49, 0x44)
|
||||
err = en.Append(0xde, 0x0, 0x1d, 0xa2, 0x49, 0x44)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -400,6 +418,16 @@ func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "ResumeItemsFailed")
|
||||
return
|
||||
}
|
||||
// write "ResumeItemsSkipped"
|
||||
err = en.Append(0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ResumeItemsSkipped)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeItemsSkipped")
|
||||
return
|
||||
}
|
||||
// write "ResumeBytesDone"
|
||||
err = en.Append(0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
|
||||
if err != nil {
|
||||
@ -420,6 +448,16 @@ func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "ResumeBytesFailed")
|
||||
return
|
||||
}
|
||||
// write "ResumeBytesSkipped"
|
||||
err = en.Append(0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ResumeBytesSkipped)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesSkipped")
|
||||
return
|
||||
}
|
||||
// write "QueuedBuckets"
|
||||
err = en.Append(0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
|
||||
if err != nil {
|
||||
@ -494,15 +532,25 @@ func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
// write "Finished"
|
||||
err = en.Append(0xa8, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBool(z.Finished)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Finished")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 26
|
||||
// map header, size 29
|
||||
// string "ID"
|
||||
o = append(o, 0xde, 0x0, 0x1a, 0xa2, 0x49, 0x44)
|
||||
o = append(o, 0xde, 0x0, 0x1d, 0xa2, 0x49, 0x44)
|
||||
o = msgp.AppendString(o, z.ID)
|
||||
// string "PoolIndex"
|
||||
o = append(o, 0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
@ -555,12 +603,18 @@ func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "ResumeItemsFailed"
|
||||
o = append(o, 0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeItemsFailed)
|
||||
// string "ResumeItemsSkipped"
|
||||
o = append(o, 0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeItemsSkipped)
|
||||
// string "ResumeBytesDone"
|
||||
o = append(o, 0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
|
||||
o = msgp.AppendUint64(o, z.ResumeBytesDone)
|
||||
// string "ResumeBytesFailed"
|
||||
o = append(o, 0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeBytesFailed)
|
||||
// string "ResumeBytesSkipped"
|
||||
o = append(o, 0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeBytesSkipped)
|
||||
// string "QueuedBuckets"
|
||||
o = append(o, 0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z.QueuedBuckets)))
|
||||
@ -585,6 +639,9 @@ func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "RetryAttempts"
|
||||
o = append(o, 0xad, 0x52, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73)
|
||||
o = msgp.AppendUint64(o, z.RetryAttempts)
|
||||
// string "Finished"
|
||||
o = append(o, 0xa8, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64)
|
||||
o = msgp.AppendBool(o, z.Finished)
|
||||
return
|
||||
}
|
||||
|
||||
@ -714,6 +771,12 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "ResumeItemsFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeItemsSkipped":
|
||||
z.ResumeItemsSkipped, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeItemsSkipped")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesDone":
|
||||
z.ResumeBytesDone, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
@ -726,6 +789,12 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "ResumeBytesFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesSkipped":
|
||||
z.ResumeBytesSkipped, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesSkipped")
|
||||
return
|
||||
}
|
||||
case "QueuedBuckets":
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
@ -788,6 +857,12 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
case "Finished":
|
||||
z.Finished, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Finished")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@ -802,7 +877,7 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *healingTracker) Msgsize() (s int) {
|
||||
s = 3 + 3 + msgp.StringPrefixSize + len(z.ID) + 10 + msgp.IntSize + 9 + msgp.IntSize + 10 + msgp.IntSize + 5 + msgp.StringPrefixSize + len(z.Path) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 8 + msgp.TimeSize + 11 + msgp.TimeSize + 18 + msgp.Uint64Size + 17 + msgp.Uint64Size + 12 + msgp.Uint64Size + 12 + msgp.Uint64Size + 10 + msgp.Uint64Size + 12 + msgp.Uint64Size + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Object) + 18 + msgp.Uint64Size + 18 + msgp.Uint64Size + 16 + msgp.Uint64Size + 18 + msgp.Uint64Size + 14 + msgp.ArrayHeaderSize
|
||||
s = 3 + 3 + msgp.StringPrefixSize + len(z.ID) + 10 + msgp.IntSize + 9 + msgp.IntSize + 10 + msgp.IntSize + 5 + msgp.StringPrefixSize + len(z.Path) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 8 + msgp.TimeSize + 11 + msgp.TimeSize + 18 + msgp.Uint64Size + 17 + msgp.Uint64Size + 12 + msgp.Uint64Size + 12 + msgp.Uint64Size + 10 + msgp.Uint64Size + 12 + msgp.Uint64Size + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Object) + 18 + msgp.Uint64Size + 18 + msgp.Uint64Size + 19 + msgp.Uint64Size + 16 + msgp.Uint64Size + 18 + msgp.Uint64Size + 19 + msgp.Uint64Size + 14 + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.QueuedBuckets {
|
||||
s += msgp.StringPrefixSize + len(z.QueuedBuckets[za0001])
|
||||
}
|
||||
@ -810,6 +885,6 @@ func (z *healingTracker) Msgsize() (s int) {
|
||||
for za0002 := range z.HealedBuckets {
|
||||
s += msgp.StringPrefixSize + len(z.HealedBuckets[za0002])
|
||||
}
|
||||
s += 7 + msgp.StringPrefixSize + len(z.HealID) + 13 + msgp.Uint64Size + 13 + msgp.Uint64Size + 14 + msgp.Uint64Size
|
||||
s += 7 + msgp.StringPrefixSize + len(z.HealID) + 13 + msgp.Uint64Size + 13 + msgp.Uint64Size + 14 + msgp.Uint64Size + 9 + msgp.BoolSize
|
||||
return
|
||||
}
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
@ -36,6 +36,7 @@ import (
|
||||
"github.com/minio/pkg/v3/env"
|
||||
"github.com/minio/pkg/v3/wildcard"
|
||||
"github.com/minio/pkg/v3/workers"
|
||||
"github.com/minio/pkg/v3/xtime"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
@ -116,7 +117,7 @@ func (p BatchJobExpirePurge) Validate() error {
|
||||
// BatchJobExpireFilter holds all the filters currently supported for batch replication
|
||||
type BatchJobExpireFilter struct {
|
||||
line, col int
|
||||
OlderThan time.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
|
||||
OlderThan xtime.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
|
||||
CreatedBefore *time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
|
||||
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`
|
||||
Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"`
|
||||
@ -162,7 +163,7 @@ func (ef BatchJobExpireFilter) Matches(obj ObjectInfo, now time.Time) bool {
|
||||
if len(ef.Name) > 0 && !wildcard.Match(ef.Name, obj.Name) {
|
||||
return false
|
||||
}
|
||||
if ef.OlderThan > 0 && now.Sub(obj.ModTime) <= ef.OlderThan {
|
||||
if ef.OlderThan > 0 && now.Sub(obj.ModTime) <= ef.OlderThan.D() {
|
||||
return false
|
||||
}
|
||||
|
||||
@ -194,8 +195,8 @@ func (ef BatchJobExpireFilter) Matches(obj ObjectInfo, now time.Time) bool {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(ef.Metadata) > 0 && !obj.DeleteMarker {
|
||||
for _, kv := range ef.Metadata {
|
||||
// Object (version) must match all x-amz-meta and
|
||||
@ -280,7 +281,7 @@ type BatchJobExpire struct {
|
||||
line, col int
|
||||
APIVersion string `yaml:"apiVersion" json:"apiVersion"`
|
||||
Bucket string `yaml:"bucket" json:"bucket"`
|
||||
Prefix string `yaml:"prefix" json:"prefix"`
|
||||
Prefix BatchJobPrefix `yaml:"prefix" json:"prefix"`
|
||||
NotificationCfg BatchJobNotification `yaml:"notify" json:"notify"`
|
||||
Retry BatchJobRetry `yaml:"retry" json:"retry"`
|
||||
Rules []BatchJobExpireFilter `yaml:"rules" json:"rules"`
|
||||
@ -288,6 +289,16 @@ type BatchJobExpire struct {
|
||||
|
||||
var _ yaml.Unmarshaler = &BatchJobExpire{}
|
||||
|
||||
// RedactSensitive will redact any sensitive information in b.
|
||||
func (r *BatchJobExpire) RedactSensitive() {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
if r.NotificationCfg.Token != "" {
|
||||
r.NotificationCfg.Token = redactedText
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalYAML - BatchJobExpire extends default unmarshal to extract line, col information.
|
||||
func (r *BatchJobExpire) UnmarshalYAML(val *yaml.Node) error {
|
||||
type expireJob BatchJobExpire
|
||||
@ -340,8 +351,24 @@ func (r *BatchJobExpire) Expire(ctx context.Context, api ObjectLayer, vc *versio
|
||||
PrefixEnabledFn: vc.PrefixEnabled,
|
||||
VersionSuspended: vc.Suspended(),
|
||||
}
|
||||
_, errs := api.DeleteObjects(ctx, r.Bucket, objsToDel, opts)
|
||||
return errs
|
||||
|
||||
allErrs := make([]error, 0, len(objsToDel))
|
||||
|
||||
for {
|
||||
count := len(objsToDel)
|
||||
if count == 0 {
|
||||
break
|
||||
}
|
||||
if count > maxDeleteList {
|
||||
count = maxDeleteList
|
||||
}
|
||||
_, errs := api.DeleteObjects(ctx, r.Bucket, objsToDel[:count], opts)
|
||||
allErrs = append(allErrs, errs...)
|
||||
// Next batch of deletion
|
||||
objsToDel = objsToDel[count:]
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
const (
|
||||
@ -371,9 +398,12 @@ func (oiCache objInfoCache) Get(toDel ObjectToDelete) (*ObjectInfo, bool) {
|
||||
|
||||
func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo, job BatchJobRequest, api ObjectLayer, wk *workers.Workers, expireCh <-chan []expireObjInfo) {
|
||||
vc, _ := globalBucketVersioningSys.Get(r.Bucket)
|
||||
retryAttempts := r.Retry.Attempts
|
||||
retryAttempts := job.Expire.Retry.Attempts
|
||||
if retryAttempts <= 0 {
|
||||
retryAttempts = batchExpireJobDefaultRetries
|
||||
}
|
||||
delay := job.Expire.Retry.Delay
|
||||
if delay == 0 {
|
||||
if delay <= 0 {
|
||||
delay = batchExpireJobDefaultRetryDelay
|
||||
}
|
||||
|
||||
@ -394,12 +424,12 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
|
||||
go func(toExpire []expireObjInfo) {
|
||||
defer wk.Give()
|
||||
|
||||
toExpireAll := make([]ObjectInfo, 0, len(toExpire))
|
||||
toExpireAll := make([]expireObjInfo, 0, len(toExpire))
|
||||
toDel := make([]ObjectToDelete, 0, len(toExpire))
|
||||
oiCache := newObjInfoCache()
|
||||
for _, exp := range toExpire {
|
||||
if exp.ExpireAll {
|
||||
toExpireAll = append(toExpireAll, exp.ObjectInfo)
|
||||
toExpireAll = append(toExpireAll, exp)
|
||||
continue
|
||||
}
|
||||
// Cache ObjectInfo value via pointers for
|
||||
@ -415,14 +445,14 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
|
||||
oiCache.Add(od, &exp.ObjectInfo)
|
||||
}
|
||||
|
||||
var done bool
|
||||
// DeleteObject(deletePrefix: true) to expire all versions of an object
|
||||
for _, exp := range toExpireAll {
|
||||
var success bool
|
||||
for attempts := 1; attempts <= retryAttempts; attempts++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
done = true
|
||||
ri.trackMultipleObjectVersions(exp, success)
|
||||
return
|
||||
default:
|
||||
}
|
||||
stopFn := globalBatchJobsMetrics.trace(batchJobMetricExpire, ri.JobID, attempts)
|
||||
@ -439,14 +469,7 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
|
||||
break
|
||||
}
|
||||
}
|
||||
ri.trackMultipleObjectVersions(r.Bucket, exp, success)
|
||||
if done {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if done {
|
||||
return
|
||||
ri.trackMultipleObjectVersions(exp, success)
|
||||
}
|
||||
|
||||
// DeleteMultiple objects
|
||||
@ -504,7 +527,8 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
|
||||
|
||||
type expireObjInfo struct {
|
||||
ObjectInfo
|
||||
ExpireAll bool
|
||||
ExpireAll bool
|
||||
DeleteMarkerCount int64
|
||||
}
|
||||
|
||||
// Start the batch expiration job, resumes if there was a pending job via "job.ID"
|
||||
@ -534,40 +558,58 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
ctx, cancelCause := context.WithCancelCause(ctx)
|
||||
defer cancelCause(nil)
|
||||
|
||||
results := make(chan itemOrErr[ObjectInfo], workerSize)
|
||||
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, WalkOptions{
|
||||
Marker: lastObject,
|
||||
LatestOnly: false, // we need to visit all versions of the object to implement purge: retainVersions
|
||||
VersionsSort: WalkVersionsSortDesc,
|
||||
}); err != nil {
|
||||
// Do not need to retry if we can't list objects on source.
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
prefixes := r.Prefix.F()
|
||||
if len(prefixes) == 0 {
|
||||
prefixes = []string{""}
|
||||
}
|
||||
for _, prefix := range prefixes {
|
||||
prefixResultCh := make(chan itemOrErr[ObjectInfo], workerSize)
|
||||
err := api.Walk(ctx, r.Bucket, prefix, prefixResultCh, WalkOptions{
|
||||
Marker: lastObject,
|
||||
LatestOnly: false, // we need to visit all versions of the object to implement purge: retainVersions
|
||||
VersionsSort: WalkVersionsSortDesc,
|
||||
})
|
||||
if err != nil {
|
||||
cancelCause(err)
|
||||
xioutil.SafeClose(results)
|
||||
return
|
||||
}
|
||||
for result := range prefixResultCh {
|
||||
results <- result
|
||||
}
|
||||
}
|
||||
xioutil.SafeClose(results)
|
||||
}()
|
||||
|
||||
// Goroutine to periodically save batch-expire job's in-memory state
|
||||
saverQuitCh := make(chan struct{})
|
||||
go func() {
|
||||
saveTicker := time.NewTicker(10 * time.Second)
|
||||
defer saveTicker.Stop()
|
||||
for {
|
||||
quit := false
|
||||
after := time.Minute
|
||||
for !quit {
|
||||
select {
|
||||
case <-saveTicker.C:
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
|
||||
case <-ctx.Done():
|
||||
// persist in-memory state immediately before exiting due to context cancellation.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
return
|
||||
|
||||
quit = true
|
||||
case <-saverQuitCh:
|
||||
// persist in-memory state immediately to disk.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
return
|
||||
quit = true
|
||||
}
|
||||
|
||||
if quit {
|
||||
// save immediately if we are quitting
|
||||
after = 0
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, 30*time.Second) // independent context
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, after, job))
|
||||
cancel()
|
||||
}
|
||||
}()
|
||||
|
||||
@ -583,76 +625,115 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
||||
matchedFilter BatchJobExpireFilter
|
||||
versionsCount int
|
||||
toDel []expireObjInfo
|
||||
failed bool
|
||||
done bool
|
||||
)
|
||||
failed := true
|
||||
for result := range results {
|
||||
if result.Err != nil {
|
||||
failed = true
|
||||
batchLogIf(ctx, result.Err)
|
||||
continue
|
||||
deleteMarkerCountMap := map[string]int64{}
|
||||
pushToExpire := func() {
|
||||
// set preObject deleteMarkerCount
|
||||
if len(toDel) > 0 {
|
||||
lastDelIndex := len(toDel) - 1
|
||||
lastDel := toDel[lastDelIndex]
|
||||
if lastDel.ExpireAll {
|
||||
toDel[lastDelIndex].DeleteMarkerCount = deleteMarkerCountMap[lastDel.Name]
|
||||
// delete the key
|
||||
delete(deleteMarkerCountMap, lastDel.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply filter to find the matching rule to apply expiry
|
||||
// actions accordingly.
|
||||
// nolint:gocritic
|
||||
if result.Item.IsLatest {
|
||||
// send down filtered entries to be deleted using
|
||||
// DeleteObjects method
|
||||
if len(toDel) > 10 { // batch up to 10 objects/versions to be expired simultaneously.
|
||||
xfer := make([]expireObjInfo, len(toDel))
|
||||
copy(xfer, toDel)
|
||||
|
||||
var done bool
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
done = true
|
||||
case expireCh <- xfer:
|
||||
toDel = toDel[:0] // resetting toDel
|
||||
}
|
||||
if done {
|
||||
break
|
||||
}
|
||||
// send down filtered entries to be deleted using
|
||||
// DeleteObjects method
|
||||
if len(toDel) > 10 { // batch up to 10 objects/versions to be expired simultaneously.
|
||||
xfer := make([]expireObjInfo, len(toDel))
|
||||
copy(xfer, toDel)
|
||||
select {
|
||||
case expireCh <- xfer:
|
||||
toDel = toDel[:0] // resetting toDel
|
||||
case <-ctx.Done():
|
||||
done = true
|
||||
}
|
||||
var match BatchJobExpireFilter
|
||||
var found bool
|
||||
for _, rule := range r.Rules {
|
||||
if rule.Matches(result.Item, now) {
|
||||
match = rule
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
||||
prevObj = result.Item
|
||||
matchedFilter = match
|
||||
versionsCount = 1
|
||||
// Include the latest version
|
||||
if matchedFilter.Purge.RetainVersions == 0 {
|
||||
toDel = append(toDel, expireObjInfo{
|
||||
ObjectInfo: result.Item,
|
||||
ExpireAll: true,
|
||||
})
|
||||
continue
|
||||
}
|
||||
} else if prevObj.Name == result.Item.Name {
|
||||
if matchedFilter.Purge.RetainVersions == 0 {
|
||||
continue // including latest version in toDel suffices, skipping other versions
|
||||
}
|
||||
versionsCount++
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
|
||||
if versionsCount <= matchedFilter.Purge.RetainVersions {
|
||||
continue // retain versions
|
||||
}
|
||||
toDel = append(toDel, expireObjInfo{
|
||||
ObjectInfo: result.Item,
|
||||
})
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case result, ok := <-results:
|
||||
if !ok {
|
||||
done = true
|
||||
break
|
||||
}
|
||||
if result.Err != nil {
|
||||
failed = true
|
||||
batchLogIf(ctx, result.Err)
|
||||
continue
|
||||
}
|
||||
if result.Item.DeleteMarker {
|
||||
deleteMarkerCountMap[result.Item.Name]++
|
||||
}
|
||||
// Apply filter to find the matching rule to apply expiry
|
||||
// actions accordingly.
|
||||
// nolint:gocritic
|
||||
if result.Item.IsLatest {
|
||||
var match BatchJobExpireFilter
|
||||
var found bool
|
||||
for _, rule := range r.Rules {
|
||||
if rule.Matches(result.Item, now) {
|
||||
match = rule
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
||||
if prevObj.Name != result.Item.Name {
|
||||
// switch the object
|
||||
pushToExpire()
|
||||
}
|
||||
|
||||
prevObj = result.Item
|
||||
matchedFilter = match
|
||||
versionsCount = 1
|
||||
// Include the latest version
|
||||
if matchedFilter.Purge.RetainVersions == 0 {
|
||||
toDel = append(toDel, expireObjInfo{
|
||||
ObjectInfo: result.Item,
|
||||
ExpireAll: true,
|
||||
})
|
||||
continue
|
||||
}
|
||||
} else if prevObj.Name == result.Item.Name {
|
||||
if matchedFilter.Purge.RetainVersions == 0 {
|
||||
continue // including latest version in toDel suffices, skipping other versions
|
||||
}
|
||||
versionsCount++
|
||||
} else {
|
||||
// switch the object
|
||||
pushToExpire()
|
||||
// a file switched with no LatestVersion, logging it
|
||||
batchLogIf(ctx, fmt.Errorf("skipping object %s, no latest version found", result.Item.Name))
|
||||
continue
|
||||
}
|
||||
|
||||
if versionsCount <= matchedFilter.Purge.RetainVersions {
|
||||
continue // retain versions
|
||||
}
|
||||
toDel = append(toDel, expireObjInfo{
|
||||
ObjectInfo: result.Item,
|
||||
})
|
||||
pushToExpire()
|
||||
case <-ctx.Done():
|
||||
done = true
|
||||
}
|
||||
if done {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if context.Cause(ctx) != nil {
|
||||
xioutil.SafeClose(expireCh)
|
||||
return context.Cause(ctx)
|
||||
}
|
||||
pushToExpire()
|
||||
// Send any remaining objects downstream
|
||||
if len(toDel) > 0 {
|
||||
select {
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
@ -39,7 +39,7 @@ func (z *BatchJobExpire) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
return
|
||||
}
|
||||
case "Prefix":
|
||||
z.Prefix, err = dc.ReadString()
|
||||
err = z.Prefix.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@ -114,7 +114,7 @@ func (z *BatchJobExpire) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Prefix)
|
||||
err = z.Prefix.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@ -171,7 +171,11 @@ func (z *BatchJobExpire) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.AppendString(o, z.Bucket)
|
||||
// string "Prefix"
|
||||
o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
|
||||
o = msgp.AppendString(o, z.Prefix)
|
||||
o, err = z.Prefix.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
}
|
||||
// string "NotificationCfg"
|
||||
o = append(o, 0xaf, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x66, 0x67)
|
||||
o, err = z.NotificationCfg.MarshalMsg(o)
|
||||
@ -230,7 +234,7 @@ func (z *BatchJobExpire) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
return
|
||||
}
|
||||
case "Prefix":
|
||||
z.Prefix, bts, err = msgp.ReadStringBytes(bts)
|
||||
bts, err = z.Prefix.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@ -280,7 +284,7 @@ func (z *BatchJobExpire) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobExpire) Msgsize() (s int) {
|
||||
s = 1 + 11 + msgp.StringPrefixSize + len(z.APIVersion) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 16 + z.NotificationCfg.Msgsize() + 6 + z.Retry.Msgsize() + 6 + msgp.ArrayHeaderSize
|
||||
s = 1 + 11 + msgp.StringPrefixSize + len(z.APIVersion) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + z.Prefix.Msgsize() + 16 + z.NotificationCfg.Msgsize() + 6 + z.Retry.Msgsize() + 6 + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.Rules {
|
||||
s += z.Rules[za0001].Msgsize()
|
||||
}
|
||||
@ -306,7 +310,7 @@ func (z *BatchJobExpireFilter) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "OlderThan":
|
||||
z.OlderThan, err = dc.ReadDuration()
|
||||
err = z.OlderThan.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@ -433,7 +437,7 @@ func (z *BatchJobExpireFilter) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteDuration(z.OlderThan)
|
||||
err = z.OlderThan.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@ -544,7 +548,11 @@ func (z *BatchJobExpireFilter) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// map header, size 8
|
||||
// string "OlderThan"
|
||||
o = append(o, 0x88, 0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
|
||||
o = msgp.AppendDuration(o, z.OlderThan)
|
||||
o, err = z.OlderThan.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
}
|
||||
// string "CreatedBefore"
|
||||
o = append(o, 0xad, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65)
|
||||
if z.CreatedBefore == nil {
|
||||
@ -613,7 +621,7 @@ func (z *BatchJobExpireFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "OlderThan":
|
||||
z.OlderThan, bts, err = msgp.ReadDurationBytes(bts)
|
||||
bts, err = z.OlderThan.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@ -734,7 +742,7 @@ func (z *BatchJobExpireFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobExpireFilter) Msgsize() (s int) {
|
||||
s = 1 + 10 + msgp.DurationSize + 14
|
||||
s = 1 + 10 + z.OlderThan.Msgsize() + 14
|
||||
if z.CreatedBefore == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
@ -18,9 +18,10 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func TestParseBatchJobExpire(t *testing.T) {
|
||||
@ -32,7 +33,7 @@ expire: # Expire objects that match a condition
|
||||
rules:
|
||||
- type: object # regular objects with zero or more older versions
|
||||
name: NAME # match object names that satisfy the wildcard expression.
|
||||
olderThan: 70h # match objects older than this value
|
||||
olderThan: 7d10h # match objects older than this value
|
||||
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||
tags:
|
||||
- key: name
|
||||
@ -64,8 +65,61 @@ expire: # Expire objects that match a condition
|
||||
delay: 500ms # least amount of delay between each retry
|
||||
`
|
||||
var job BatchJobRequest
|
||||
err := yaml.UnmarshalStrict([]byte(expireYaml), &job)
|
||||
err := yaml.Unmarshal([]byte(expireYaml), &job)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to parse batch-job-expire yaml", err)
|
||||
}
|
||||
if !slices.Equal(job.Expire.Prefix.F(), []string{"myprefix"}) {
|
||||
t.Fatal("Failed to parse batch-job-expire yaml")
|
||||
}
|
||||
|
||||
multiPrefixExpireYaml := `
|
||||
expire: # Expire objects that match a condition
|
||||
apiVersion: v1
|
||||
bucket: mybucket # Bucket where this batch job will expire matching objects from
|
||||
prefix: # (Optional) Prefix under which this job will expire objects matching the rules below.
|
||||
- myprefix
|
||||
- myprefix1
|
||||
rules:
|
||||
- type: object # regular objects with zero or more older versions
|
||||
name: NAME # match object names that satisfy the wildcard expression.
|
||||
olderThan: 7d10h # match objects older than this value
|
||||
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||
tags:
|
||||
- key: name
|
||||
value: pick* # match objects with tag 'name', all values starting with 'pick'
|
||||
metadata:
|
||||
- key: content-type
|
||||
value: image/* # match objects with 'content-type', all values starting with 'image/'
|
||||
size:
|
||||
lessThan: "10MiB" # match objects with size less than this value (e.g. 10MiB)
|
||||
greaterThan: 1MiB # match objects with size greater than this value (e.g. 1MiB)
|
||||
purge:
|
||||
# retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
|
||||
# retainVersions: 5 # keep the latest 5 versions of the object.
|
||||
|
||||
- type: deleted # objects with delete marker as their latest version
|
||||
name: NAME # match object names that satisfy the wildcard expression.
|
||||
olderThan: 10h # match objects older than this value (e.g. 7d10h31s)
|
||||
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||
purge:
|
||||
# retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
|
||||
# retainVersions: 5 # keep the latest 5 versions of the object including delete markers.
|
||||
|
||||
notify:
|
||||
endpoint: https://notify.endpoint # notification endpoint to receive job completion status
|
||||
token: Bearer xxxxx # optional authentication token for the notification endpoint
|
||||
|
||||
retry:
|
||||
attempts: 10 # number of retries for the job before giving up
|
||||
delay: 500ms # least amount of delay between each retry
|
||||
`
|
||||
var multiPrefixJob BatchJobRequest
|
||||
err = yaml.Unmarshal([]byte(multiPrefixExpireYaml), &multiPrefixJob)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to parse batch-job-expire yaml", err)
|
||||
}
|
||||
if !slices.Equal(multiPrefixJob.Expire.Prefix.F(), []string{"myprefix", "myprefix1"}) {
|
||||
t.Fatal("Failed to parse batch-job-expire yaml")
|
||||
}
|
||||
}
|
||||
|
||||
@ -25,6 +25,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@ -39,7 +40,6 @@ import (
|
||||
"github.com/lithammer/shortuuid/v4"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio-go/v7"
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
@ -47,7 +47,6 @@ import (
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/pkg/v3/console"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
@ -61,6 +60,8 @@ var globalBatchConfig batch.Config
|
||||
const (
|
||||
// Keep the completed/failed job stats 3 days before removing it
|
||||
oldJobsExpiration = 3 * 24 * time.Hour
|
||||
|
||||
redactedText = "**REDACTED**"
|
||||
)
|
||||
|
||||
// BatchJobRequest this is an internal data structure not for external consumption.
|
||||
@ -74,6 +75,29 @@ type BatchJobRequest struct {
|
||||
ctx context.Context `msg:"-"`
|
||||
}
|
||||
|
||||
// RedactSensitive will redact any sensitive information in b.
|
||||
func (j *BatchJobRequest) RedactSensitive() {
|
||||
j.Replicate.RedactSensitive()
|
||||
j.Expire.RedactSensitive()
|
||||
j.KeyRotate.RedactSensitive()
|
||||
}
|
||||
|
||||
// RedactSensitive will redact any sensitive information in b.
|
||||
func (r *BatchJobReplicateV1) RedactSensitive() {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
if r.Target.Creds.SecretKey != "" {
|
||||
r.Target.Creds.SecretKey = redactedText
|
||||
}
|
||||
if r.Target.Creds.SessionToken != "" {
|
||||
r.Target.Creds.SessionToken = redactedText
|
||||
}
|
||||
}
|
||||
|
||||
// RedactSensitive will redact any sensitive information in b.
|
||||
func (r *BatchJobKeyRotateV1) RedactSensitive() {}
|
||||
|
||||
func notifyEndpoint(ctx context.Context, ri *batchJobInfo, endpoint, token string) error {
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
@ -117,7 +141,7 @@ func (r BatchJobReplicateV1) Notify(ctx context.Context, ri *batchJobInfo) error
|
||||
}
|
||||
|
||||
// ReplicateFromSource - this is not implemented yet where source is 'remote' and target is local.
|
||||
func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api ObjectLayer, core *miniogo.Core, srcObjInfo ObjectInfo, retry bool) error {
|
||||
func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api ObjectLayer, core *minio.Core, srcObjInfo ObjectInfo, retry bool) error {
|
||||
srcBucket := r.Source.Bucket
|
||||
tgtBucket := r.Target.Bucket
|
||||
srcObject := srcObjInfo.Name
|
||||
@ -164,7 +188,7 @@ func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api Objec
|
||||
}
|
||||
return r.copyWithMultipartfromSource(ctx, api, core, srcObjInfo, opts, partsCount)
|
||||
}
|
||||
gopts := miniogo.GetObjectOptions{
|
||||
gopts := minio.GetObjectOptions{
|
||||
VersionID: srcObjInfo.VersionID,
|
||||
}
|
||||
if err := gopts.SetMatchETag(srcObjInfo.ETag); err != nil {
|
||||
@ -185,7 +209,7 @@ func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api Objec
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, api ObjectLayer, c *miniogo.Core, srcObjInfo ObjectInfo, opts ObjectOptions, partsCount int) (err error) {
|
||||
func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, api ObjectLayer, c *minio.Core, srcObjInfo ObjectInfo, opts ObjectOptions, partsCount int) (err error) {
|
||||
srcBucket := r.Source.Bucket
|
||||
tgtBucket := r.Target.Bucket
|
||||
srcObject := srcObjInfo.Name
|
||||
@ -225,8 +249,8 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a
|
||||
pInfo PartInfo
|
||||
)
|
||||
|
||||
for i := 0; i < partsCount; i++ {
|
||||
gopts := miniogo.GetObjectOptions{
|
||||
for i := range partsCount {
|
||||
gopts := minio.GetObjectOptions{
|
||||
VersionID: srcObjInfo.VersionID,
|
||||
PartNumber: i + 1,
|
||||
}
|
||||
@ -276,8 +300,12 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
}
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
|
||||
retryAttempts := job.Replicate.Flags.Retry.Attempts
|
||||
if retryAttempts <= 0 {
|
||||
retryAttempts = batchReplJobDefaultRetries
|
||||
}
|
||||
delay := job.Replicate.Flags.Retry.Delay
|
||||
if delay == 0 {
|
||||
if delay <= 0 {
|
||||
delay = batchReplJobDefaultRetryDelay
|
||||
}
|
||||
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
@ -287,22 +315,22 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
isStorageClassOnly := len(r.Flags.Filter.Metadata) == 1 && strings.EqualFold(r.Flags.Filter.Metadata[0].Key, xhttp.AmzStorageClass)
|
||||
|
||||
skip := func(oi ObjectInfo) (ok bool) {
|
||||
if r.Flags.Filter.OlderThan > 0 && time.Since(oi.ModTime) < r.Flags.Filter.OlderThan {
|
||||
if r.Flags.Filter.OlderThan > 0 && time.Since(oi.ModTime) < r.Flags.Filter.OlderThan.D() {
|
||||
// skip all objects that are newer than specified older duration
|
||||
return true
|
||||
}
|
||||
|
||||
if r.Flags.Filter.NewerThan > 0 && time.Since(oi.ModTime) >= r.Flags.Filter.NewerThan {
|
||||
if r.Flags.Filter.NewerThan > 0 && time.Since(oi.ModTime) >= r.Flags.Filter.NewerThan.D() {
|
||||
// skip all objects that are older than specified newer duration
|
||||
return true
|
||||
}
|
||||
|
||||
if !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.Before(oi.ModTime) {
|
||||
if !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.After(oi.ModTime) {
|
||||
// skip all objects that are created before the specified time.
|
||||
return true
|
||||
}
|
||||
|
||||
if !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.After(oi.ModTime) {
|
||||
if !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.Before(oi.ModTime) {
|
||||
// skip all objects that are created after the specified time.
|
||||
return true
|
||||
}
|
||||
@ -353,7 +381,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
|
||||
cred := r.Source.Creds
|
||||
|
||||
c, err := miniogo.New(u.Host, &miniogo.Options{
|
||||
c, err := minio.New(u.Host, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
|
||||
Secure: u.Scheme == "https",
|
||||
Transport: getRemoteInstanceTransport(),
|
||||
@ -364,7 +392,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
}
|
||||
|
||||
c.SetAppInfo("minio-"+batchJobPrefix, r.APIVersion+" "+job.ID)
|
||||
core := &miniogo.Core{Client: c}
|
||||
core := &minio.Core{Client: c}
|
||||
|
||||
workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_REPLICATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2)))
|
||||
if err != nil {
|
||||
@ -377,7 +405,6 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
return err
|
||||
}
|
||||
|
||||
retryAttempts := ri.RetryAttempts
|
||||
retry := false
|
||||
for attempts := 1; attempts <= retryAttempts; attempts++ {
|
||||
attempts := attempts
|
||||
@ -385,12 +412,27 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
s3Type := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3
|
||||
minioSrc := r.Source.Type == BatchJobReplicateResourceMinIO
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
objInfoCh := c.ListObjects(ctx, r.Source.Bucket, miniogo.ListObjectsOptions{
|
||||
Prefix: r.Source.Prefix,
|
||||
WithVersions: minioSrc,
|
||||
Recursive: true,
|
||||
WithMetadata: true,
|
||||
})
|
||||
|
||||
objInfoCh := make(chan minio.ObjectInfo, 1)
|
||||
go func() {
|
||||
prefixes := r.Source.Prefix.F()
|
||||
if len(prefixes) == 0 {
|
||||
prefixes = []string{""}
|
||||
}
|
||||
for _, prefix := range prefixes {
|
||||
prefixObjInfoCh := c.ListObjects(ctx, r.Source.Bucket, minio.ListObjectsOptions{
|
||||
Prefix: prefix,
|
||||
WithVersions: minioSrc,
|
||||
Recursive: true,
|
||||
WithMetadata: true,
|
||||
})
|
||||
for obj := range prefixObjInfoCh {
|
||||
objInfoCh <- obj
|
||||
}
|
||||
}
|
||||
xioutil.SafeClose(objInfoCh)
|
||||
}()
|
||||
|
||||
prevObj := ""
|
||||
skipReplicate := false
|
||||
|
||||
@ -401,7 +443,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
// all user metadata or just storageClass. If its only storageClass
|
||||
// List() already returns relevant information for filter to be applied.
|
||||
if isMetadata && !isStorageClassOnly {
|
||||
oi2, err := c.StatObject(ctx, r.Source.Bucket, obj.Key, miniogo.StatObjectOptions{})
|
||||
oi2, err := c.StatObject(ctx, r.Source.Bucket, obj.Key, minio.StatObjectOptions{})
|
||||
if err == nil {
|
||||
oi = toObjectInfo(r.Source.Bucket, obj.Key, oi2)
|
||||
} else {
|
||||
@ -497,7 +539,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
}
|
||||
|
||||
// toObjectInfo converts minio.ObjectInfo to ObjectInfo
|
||||
func toObjectInfo(bucket, object string, objInfo miniogo.ObjectInfo) ObjectInfo {
|
||||
func toObjectInfo(bucket, object string, objInfo minio.ObjectInfo) ObjectInfo {
|
||||
tags, _ := tags.MapToObjectTags(objInfo.UserTags)
|
||||
oi := ObjectInfo{
|
||||
Bucket: bucket,
|
||||
@ -533,14 +575,12 @@ func toObjectInfo(bucket, object string, objInfo miniogo.ObjectInfo) ObjectInfo
|
||||
oi.UserDefined[xhttp.AmzStorageClass] = objInfo.StorageClass
|
||||
}
|
||||
|
||||
for k, v := range objInfo.UserMetadata {
|
||||
oi.UserDefined[k] = v
|
||||
}
|
||||
maps.Copy(oi.UserDefined, objInfo.UserMetadata)
|
||||
|
||||
return oi
|
||||
}
|
||||
|
||||
func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLayer, remoteClnt *minio.Client, entries []ObjectInfo) error {
|
||||
func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLayer, remoteClnt *minio.Client, entries []ObjectInfo, prefix string) error {
|
||||
input := make(chan minio.SnowballObject, 1)
|
||||
opts := minio.SnowballOptions{
|
||||
Opts: minio.PutObjectOptions{},
|
||||
@ -562,6 +602,10 @@ func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLa
|
||||
continue
|
||||
}
|
||||
|
||||
if prefix != "" {
|
||||
entry.Name = pathJoin(prefix, entry.Name)
|
||||
}
|
||||
|
||||
snowballObj := minio.SnowballObject{
|
||||
// Create path to store objects within the bucket.
|
||||
Key: entry.Name,
|
||||
@ -575,12 +619,12 @@ func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLa
|
||||
},
|
||||
}
|
||||
|
||||
opts, err := batchReplicationOpts(ctx, "", gr.ObjInfo)
|
||||
opts, _, err := batchReplicationOpts(ctx, "", gr.ObjInfo)
|
||||
if err != nil {
|
||||
batchLogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO: I am not sure we read it back, but we aren't sending whether checksums are single/multipart.
|
||||
for k, vals := range opts.Header() {
|
||||
for _, v := range vals {
|
||||
snowballObj.Headers.Add(k, v)
|
||||
@ -596,7 +640,7 @@ func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLa
|
||||
}
|
||||
|
||||
// ReplicateToTarget read from source and replicate to configured target
|
||||
func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectLayer, c *miniogo.Core, srcObjInfo ObjectInfo, retry bool) error {
|
||||
func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectLayer, c *minio.Core, srcObjInfo ObjectInfo, retry bool) error {
|
||||
srcBucket := r.Source.Bucket
|
||||
tgtBucket := r.Target.Bucket
|
||||
tgtPrefix := r.Target.Prefix
|
||||
@ -605,9 +649,9 @@ func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectL
|
||||
|
||||
if srcObjInfo.DeleteMarker || !srcObjInfo.VersionPurgeStatus.Empty() {
|
||||
if retry && !s3Type {
|
||||
if _, err := c.StatObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), miniogo.StatObjectOptions{
|
||||
if _, err := c.StatObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), minio.StatObjectOptions{
|
||||
VersionID: srcObjInfo.VersionID,
|
||||
Internal: miniogo.AdvancedGetOptions{
|
||||
Internal: minio.AdvancedGetOptions{
|
||||
ReplicationProxyRequest: "false",
|
||||
},
|
||||
}); isErrMethodNotAllowed(ErrorRespToObjectError(err, tgtBucket, pathJoin(tgtPrefix, srcObject))) {
|
||||
@ -624,19 +668,19 @@ func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectL
|
||||
dmVersionID = ""
|
||||
versionID = ""
|
||||
}
|
||||
return c.RemoveObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), miniogo.RemoveObjectOptions{
|
||||
return c.RemoveObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), minio.RemoveObjectOptions{
|
||||
VersionID: versionID,
|
||||
Internal: miniogo.AdvancedRemoveOptions{
|
||||
Internal: minio.AdvancedRemoveOptions{
|
||||
ReplicationDeleteMarker: dmVersionID != "",
|
||||
ReplicationMTime: srcObjInfo.ModTime,
|
||||
ReplicationStatus: miniogo.ReplicationStatusReplica,
|
||||
ReplicationStatus: minio.ReplicationStatusReplica,
|
||||
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if retry && !s3Type { // when we are retrying avoid copying if necessary.
|
||||
gopts := miniogo.GetObjectOptions{}
|
||||
gopts := minio.GetObjectOptions{}
|
||||
if err := gopts.SetMatchETag(srcObjInfo.ETag); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -665,14 +709,14 @@ func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectL
|
||||
return err
|
||||
}
|
||||
|
||||
putOpts, err := batchReplicationOpts(ctx, "", objInfo)
|
||||
putOpts, isMP, err := batchReplicationOpts(ctx, "", objInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3 {
|
||||
putOpts.Internal = miniogo.AdvancedPutOptions{}
|
||||
putOpts.Internal = minio.AdvancedPutOptions{}
|
||||
}
|
||||
if objInfo.isMultipart() {
|
||||
if isMP {
|
||||
if err := replicateObjectWithMultipart(ctx, c, tgtBucket, pathJoin(tgtPrefix, objInfo.Name), rd, objInfo, putOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -725,7 +769,7 @@ const (
|
||||
|
||||
batchReplJobAPIVersion = "v1"
|
||||
batchReplJobDefaultRetries = 3
|
||||
batchReplJobDefaultRetryDelay = 250 * time.Millisecond
|
||||
batchReplJobDefaultRetryDelay = time.Second
|
||||
)
|
||||
|
||||
func getJobPath(job BatchJobRequest) string {
|
||||
@ -753,22 +797,10 @@ func (ri *batchJobInfo) loadOrInit(ctx context.Context, api ObjectLayer, job Bat
|
||||
switch {
|
||||
case job.Replicate != nil:
|
||||
ri.Version = batchReplVersionV1
|
||||
ri.RetryAttempts = batchReplJobDefaultRetries
|
||||
if job.Replicate.Flags.Retry.Attempts > 0 {
|
||||
ri.RetryAttempts = job.Replicate.Flags.Retry.Attempts
|
||||
}
|
||||
case job.KeyRotate != nil:
|
||||
ri.Version = batchKeyRotateVersionV1
|
||||
ri.RetryAttempts = batchKeyRotateJobDefaultRetries
|
||||
if job.KeyRotate.Flags.Retry.Attempts > 0 {
|
||||
ri.RetryAttempts = job.KeyRotate.Flags.Retry.Attempts
|
||||
}
|
||||
case job.Expire != nil:
|
||||
ri.Version = batchExpireVersionV1
|
||||
ri.RetryAttempts = batchExpireJobDefaultRetries
|
||||
if job.Expire.Retry.Attempts > 0 {
|
||||
ri.RetryAttempts = job.Expire.Retry.Attempts
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -848,21 +880,23 @@ func (ri *batchJobInfo) clone() *batchJobInfo {
|
||||
defer ri.mu.RUnlock()
|
||||
|
||||
return &batchJobInfo{
|
||||
Version: ri.Version,
|
||||
JobID: ri.JobID,
|
||||
JobType: ri.JobType,
|
||||
RetryAttempts: ri.RetryAttempts,
|
||||
Complete: ri.Complete,
|
||||
Failed: ri.Failed,
|
||||
StartTime: ri.StartTime,
|
||||
LastUpdate: ri.LastUpdate,
|
||||
Bucket: ri.Bucket,
|
||||
Object: ri.Object,
|
||||
Objects: ri.Objects,
|
||||
ObjectsFailed: ri.ObjectsFailed,
|
||||
BytesTransferred: ri.BytesTransferred,
|
||||
BytesFailed: ri.BytesFailed,
|
||||
Attempts: ri.Attempts,
|
||||
Version: ri.Version,
|
||||
JobID: ri.JobID,
|
||||
JobType: ri.JobType,
|
||||
RetryAttempts: ri.RetryAttempts,
|
||||
Complete: ri.Complete,
|
||||
Failed: ri.Failed,
|
||||
StartTime: ri.StartTime,
|
||||
LastUpdate: ri.LastUpdate,
|
||||
Bucket: ri.Bucket,
|
||||
Object: ri.Object,
|
||||
Objects: ri.Objects,
|
||||
ObjectsFailed: ri.ObjectsFailed,
|
||||
DeleteMarkers: ri.DeleteMarkers,
|
||||
DeleteMarkersFailed: ri.DeleteMarkersFailed,
|
||||
BytesTransferred: ri.BytesTransferred,
|
||||
BytesFailed: ri.BytesFailed,
|
||||
Attempts: ri.Attempts,
|
||||
}
|
||||
}
|
||||
|
||||
@ -961,11 +995,22 @@ func (ri *batchJobInfo) updateAfter(ctx context.Context, api ObjectLayer, durati
|
||||
// Note: to be used only with batch jobs that affect multiple versions through
|
||||
// a single action. e.g batch-expire has an option to expire all versions of an
|
||||
// object which matches the given filters.
|
||||
func (ri *batchJobInfo) trackMultipleObjectVersions(bucket string, info ObjectInfo, success bool) {
|
||||
func (ri *batchJobInfo) trackMultipleObjectVersions(info expireObjInfo, success bool) {
|
||||
if ri == nil {
|
||||
return
|
||||
}
|
||||
|
||||
ri.mu.Lock()
|
||||
defer ri.mu.Unlock()
|
||||
|
||||
if success {
|
||||
ri.Objects += int64(info.NumVersions)
|
||||
ri.Bucket = info.Bucket
|
||||
ri.Object = info.Name
|
||||
ri.Objects += int64(info.NumVersions) - info.DeleteMarkerCount
|
||||
ri.DeleteMarkers += info.DeleteMarkerCount
|
||||
} else {
|
||||
ri.ObjectsFailed += int64(info.NumVersions)
|
||||
ri.ObjectsFailed += int64(info.NumVersions) - info.DeleteMarkerCount
|
||||
ri.DeleteMarkersFailed += info.DeleteMarkerCount
|
||||
}
|
||||
}
|
||||
|
||||
@ -1015,29 +1060,34 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
lastObject := ri.Object
|
||||
|
||||
retryAttempts := job.Replicate.Flags.Retry.Attempts
|
||||
if retryAttempts <= 0 {
|
||||
retryAttempts = batchReplJobDefaultRetries
|
||||
}
|
||||
delay := job.Replicate.Flags.Retry.Delay
|
||||
if delay == 0 {
|
||||
if delay <= 0 {
|
||||
delay = batchReplJobDefaultRetryDelay
|
||||
}
|
||||
|
||||
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
selectObj := func(info FileInfo) (ok bool) {
|
||||
if r.Flags.Filter.OlderThan > 0 && time.Since(info.ModTime) < r.Flags.Filter.OlderThan {
|
||||
if r.Flags.Filter.OlderThan > 0 && time.Since(info.ModTime) < r.Flags.Filter.OlderThan.D() {
|
||||
// skip all objects that are newer than specified older duration
|
||||
return false
|
||||
}
|
||||
|
||||
if r.Flags.Filter.NewerThan > 0 && time.Since(info.ModTime) >= r.Flags.Filter.NewerThan {
|
||||
if r.Flags.Filter.NewerThan > 0 && time.Since(info.ModTime) >= r.Flags.Filter.NewerThan.D() {
|
||||
// skip all objects that are older than specified newer duration
|
||||
return false
|
||||
}
|
||||
|
||||
if !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.Before(info.ModTime) {
|
||||
if !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.After(info.ModTime) {
|
||||
// skip all objects that are created before the specified time.
|
||||
return false
|
||||
}
|
||||
|
||||
if !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.After(info.ModTime) {
|
||||
if !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.Before(info.ModTime) {
|
||||
// skip all objects that are created after the specified time.
|
||||
return false
|
||||
}
|
||||
@ -1084,7 +1134,8 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
}
|
||||
|
||||
// if one of source or target is non MinIO, just replicate the top most version like `mc mirror`
|
||||
return !((r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3) && !info.IsLatest)
|
||||
isSourceOrTargetS3 := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3
|
||||
return !isSourceOrTargetS3 || info.IsLatest
|
||||
}
|
||||
|
||||
u, err := url.Parse(r.Target.Endpoint)
|
||||
@ -1094,7 +1145,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
|
||||
cred := r.Target.Creds
|
||||
|
||||
c, err := miniogo.NewCore(u.Host, &miniogo.Options{
|
||||
c, err := minio.NewCore(u.Host, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
|
||||
Secure: u.Scheme == "https",
|
||||
Transport: getRemoteInstanceTransport(),
|
||||
@ -1106,7 +1157,6 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
|
||||
c.SetAppInfo("minio-"+batchJobPrefix, r.APIVersion+" "+job.ID)
|
||||
|
||||
retryAttempts := ri.RetryAttempts
|
||||
retry := false
|
||||
for attempts := 1; attempts <= retryAttempts; attempts++ {
|
||||
attempts := attempts
|
||||
@ -1115,17 +1165,17 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
slowCh = make(chan itemOrErr[ObjectInfo], 100)
|
||||
)
|
||||
|
||||
if !*r.Source.Snowball.Disable && r.Source.Type.isMinio() && r.Target.Type.isMinio() {
|
||||
if r.Source.Snowball.Disable != nil && !*r.Source.Snowball.Disable && r.Source.Type.isMinio() && r.Target.Type.isMinio() {
|
||||
go func() {
|
||||
// Snowball currently needs the high level minio-go Client, not the Core one
|
||||
cl, err := miniogo.New(u.Host, &miniogo.Options{
|
||||
cl, err := minio.New(u.Host, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
|
||||
Secure: u.Scheme == "https",
|
||||
Transport: getRemoteInstanceTransport(),
|
||||
BucketLookup: lookupStyle(r.Target.Path),
|
||||
})
|
||||
if err != nil {
|
||||
batchLogIf(ctx, err)
|
||||
batchLogOnceIf(ctx, err, job.ID+"minio.New")
|
||||
return
|
||||
}
|
||||
|
||||
@ -1135,8 +1185,8 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
batch := make([]ObjectInfo, 0, *r.Source.Snowball.Batch)
|
||||
writeFn := func(batch []ObjectInfo) {
|
||||
if len(batch) > 0 {
|
||||
if err := r.writeAsArchive(ctx, api, cl, batch); err != nil {
|
||||
batchLogIf(ctx, err)
|
||||
if err := r.writeAsArchive(ctx, api, cl, batch, r.Target.Prefix); err != nil {
|
||||
batchLogOnceIf(ctx, err, job.ID+"writeAsArchive")
|
||||
for _, b := range batch {
|
||||
slowCh <- itemOrErr[ObjectInfo]{Item: b}
|
||||
}
|
||||
@ -1144,7 +1194,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
ri.trackCurrentBucketBatch(r.Source.Bucket, batch)
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
batchLogOnceIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job), job.ID+"updateAfter")
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1184,19 +1234,32 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
if walkQuorum == "" {
|
||||
walkQuorum = "strict"
|
||||
}
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
ctx, cancelCause := context.WithCancelCause(ctx)
|
||||
// one of source/target is s3, skip delete marker and all versions under the same object name.
|
||||
s3Type := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3
|
||||
|
||||
if err := api.Walk(ctx, r.Source.Bucket, r.Source.Prefix, walkCh, WalkOptions{
|
||||
Marker: lastObject,
|
||||
Filter: selectObj,
|
||||
AskDisks: walkQuorum,
|
||||
}); err != nil {
|
||||
cancel()
|
||||
// Do not need to retry if we can't list objects on source.
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
prefixes := r.Source.Prefix.F()
|
||||
if len(prefixes) == 0 {
|
||||
prefixes = []string{""}
|
||||
}
|
||||
for _, prefix := range prefixes {
|
||||
prefixWalkCh := make(chan itemOrErr[ObjectInfo], 100)
|
||||
if err := api.Walk(ctx, r.Source.Bucket, prefix, prefixWalkCh, WalkOptions{
|
||||
Marker: lastObject,
|
||||
Filter: selectObj,
|
||||
AskDisks: walkQuorum,
|
||||
}); err != nil {
|
||||
cancelCause(err)
|
||||
xioutil.SafeClose(walkCh)
|
||||
return
|
||||
}
|
||||
for obj := range prefixWalkCh {
|
||||
walkCh <- obj
|
||||
}
|
||||
}
|
||||
xioutil.SafeClose(walkCh)
|
||||
}()
|
||||
|
||||
prevObj := ""
|
||||
|
||||
@ -1204,7 +1267,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
for res := range slowCh {
|
||||
if res.Err != nil {
|
||||
ri.Failed = true
|
||||
batchLogIf(ctx, res.Err)
|
||||
batchLogOnceIf(ctx, res.Err, job.ID+"res.Err")
|
||||
continue
|
||||
}
|
||||
result := res.Item
|
||||
@ -1222,7 +1285,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
stopFn := globalBatchJobsMetrics.trace(batchJobMetricReplication, job.ID, attempts)
|
||||
success := true
|
||||
if err := r.ReplicateToTarget(ctx, api, c, result, retry); err != nil {
|
||||
if miniogo.ToErrorResponse(err).Code == "PreconditionFailed" {
|
||||
if minio.ToErrorResponse(err).Code == "PreconditionFailed" {
|
||||
// pre-condition failed means we already have the object copied over.
|
||||
return
|
||||
}
|
||||
@ -1231,7 +1294,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
return
|
||||
}
|
||||
stopFn(result, err)
|
||||
batchLogIf(ctx, err)
|
||||
batchLogOnceIf(ctx, err, job.ID+"ReplicateToTarget")
|
||||
success = false
|
||||
} else {
|
||||
stopFn(result, nil)
|
||||
@ -1239,7 +1302,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
ri.trackCurrentBucketObject(r.Source.Bucket, result, success, attempts)
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
batchLogOnceIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job), job.ID+"updateAfter2")
|
||||
|
||||
if wait := globalBatchConfig.ReplicationWait(); wait > 0 {
|
||||
time.Sleep(wait)
|
||||
@ -1247,20 +1310,23 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
}()
|
||||
}
|
||||
wk.Wait()
|
||||
|
||||
// Do not need to retry if we can't list objects on source.
|
||||
if context.Cause(ctx) != nil {
|
||||
return context.Cause(ctx)
|
||||
}
|
||||
ri.RetryAttempts = attempts
|
||||
ri.Complete = ri.ObjectsFailed == 0
|
||||
ri.Failed = ri.ObjectsFailed > 0
|
||||
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk.
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
batchLogOnceIf(ctx, ri.updateAfter(ctx, api, 0, job), job.ID+"updateAfter3")
|
||||
|
||||
if err := r.Notify(ctx, ri); err != nil {
|
||||
batchLogIf(ctx, fmt.Errorf("unable to notify %v", err))
|
||||
batchLogOnceIf(ctx, fmt.Errorf("unable to notify %v", err), job.ID+"notify")
|
||||
}
|
||||
|
||||
cancel()
|
||||
cancelCause(nil)
|
||||
if ri.Failed {
|
||||
ri.ObjectsFailed = 0
|
||||
ri.Bucket = ""
|
||||
@ -1395,7 +1461,6 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
|
||||
cred = r.Source.Creds
|
||||
remoteBkt = r.Source.Bucket
|
||||
pathStyle = r.Source.Path
|
||||
|
||||
}
|
||||
|
||||
u, err := url.Parse(remoteEp)
|
||||
@ -1403,7 +1468,7 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := miniogo.NewCore(u.Host, &miniogo.Options{
|
||||
c, err := minio.NewCore(u.Host, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
|
||||
Secure: u.Scheme == "https",
|
||||
Transport: getRemoteInstanceTransport(),
|
||||
@ -1416,7 +1481,7 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
|
||||
|
||||
vcfg, err := c.GetBucketVersioning(ctx, remoteBkt)
|
||||
if err != nil {
|
||||
if miniogo.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return batchReplicationJobError{
|
||||
Code: "NoSuchTargetBucket",
|
||||
Description: "The specified target bucket does not exist",
|
||||
@ -1521,19 +1586,19 @@ func (j *BatchJobRequest) load(ctx context.Context, api ObjectLayer, name string
|
||||
return err
|
||||
}
|
||||
|
||||
func batchReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, err error) {
|
||||
func batchReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts minio.PutObjectOptions, isMP bool, err error) {
|
||||
// TODO: support custom storage class for remote replication
|
||||
putOpts, err = putReplicationOpts(ctx, "", objInfo, 0)
|
||||
putOpts, isMP, err = putReplicationOpts(ctx, "", objInfo)
|
||||
if err != nil {
|
||||
return putOpts, err
|
||||
return putOpts, isMP, err
|
||||
}
|
||||
putOpts.Internal = miniogo.AdvancedPutOptions{
|
||||
putOpts.Internal = minio.AdvancedPutOptions{
|
||||
SourceVersionID: objInfo.VersionID,
|
||||
SourceMTime: objInfo.ModTime,
|
||||
SourceETag: objInfo.ETag,
|
||||
ReplicationRequest: true,
|
||||
}
|
||||
return putOpts, nil
|
||||
return putOpts, isMP, nil
|
||||
}
|
||||
|
||||
// ListBatchJobs - lists all currently active batch jobs, optionally takes {jobType}
|
||||
@ -1547,9 +1612,6 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
jobType := r.Form.Get("jobType")
|
||||
if jobType == "" {
|
||||
jobType = string(madmin.BatchJobReplicate)
|
||||
}
|
||||
|
||||
resultCh := make(chan itemOrErr[ObjectInfo])
|
||||
|
||||
@ -1567,6 +1629,9 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, result.Err), r.URL)
|
||||
return
|
||||
}
|
||||
if strings.HasPrefix(result.Item.Name, batchJobReportsPrefix+slashSeparator) {
|
||||
continue
|
||||
}
|
||||
req := &BatchJobRequest{}
|
||||
if err := req.load(ctx, objectAPI, result.Item.Name); err != nil {
|
||||
if !errors.Is(err, errNoSuchJob) {
|
||||
@ -1575,7 +1640,7 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
|
||||
continue
|
||||
}
|
||||
|
||||
if jobType == string(req.Type()) {
|
||||
if jobType == string(req.Type()) || jobType == "" {
|
||||
listResult.Jobs = append(listResult.Jobs, madmin.BatchJobResult{
|
||||
ID: req.ID,
|
||||
Type: req.Type(),
|
||||
@ -1665,6 +1730,8 @@ func (a adminAPIHandlers) DescribeBatchJob(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
// Remove sensitive fields.
|
||||
req.RedactSensitive()
|
||||
buf, err := yaml.Marshal(req)
|
||||
if err != nil {
|
||||
batchLogIf(ctx, err)
|
||||
@ -1675,7 +1742,7 @@ func (a adminAPIHandlers) DescribeBatchJob(w http.ResponseWriter, r *http.Reques
|
||||
w.Write(buf)
|
||||
}
|
||||
|
||||
// StarBatchJob queue a new job for execution
|
||||
// StartBatchJob queue a new job for execution
|
||||
func (a adminAPIHandlers) StartBatchJob(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
@ -1684,7 +1751,7 @@ func (a adminAPIHandlers) StartBatchJob(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
buf, err := io.ReadAll(ioutil.HardLimitReader(r.Body, humanize.MiByte*4))
|
||||
buf, err := io.ReadAll(xioutil.HardLimitReader(r.Body, humanize.MiByte*4))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@ -1772,7 +1839,7 @@ func (a adminAPIHandlers) CancelBatchJob(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
if _, success := proxyRequestByToken(ctx, w, r, jobID); success {
|
||||
if _, proxied, _ := proxyRequestByToken(ctx, w, r, jobID, true); proxied {
|
||||
return
|
||||
}
|
||||
|
||||
@ -1883,6 +1950,9 @@ func (j *BatchJobPool) resume(randomWait func() time.Duration) {
|
||||
batchLogIf(j.ctx, result.Err)
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(result.Item.Name, batchJobReportsPrefix+slashSeparator) {
|
||||
continue
|
||||
}
|
||||
// ignore batch-replicate.bin and batch-rotate.bin entries
|
||||
if strings.HasSuffix(result.Item.Name, slashSeparator) {
|
||||
continue
|
||||
@ -1953,7 +2023,6 @@ func (j *BatchJobPool) AddWorker() {
|
||||
}
|
||||
}
|
||||
}
|
||||
job.delete(j.ctx, j.objLayer)
|
||||
j.canceler(job.ID, false)
|
||||
case <-j.workerKillCh:
|
||||
return
|
||||
@ -2014,7 +2083,9 @@ func (j *BatchJobPool) canceler(jobID string, cancel bool) error {
|
||||
canceler()
|
||||
}
|
||||
}
|
||||
delete(j.jobCancelers, jobID)
|
||||
if cancel {
|
||||
delete(j.jobCancelers, jobID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -2075,12 +2146,14 @@ func (ri *batchJobInfo) metric() madmin.JobMetric {
|
||||
switch ri.JobType {
|
||||
case string(madmin.BatchJobReplicate):
|
||||
m.Replicate = &madmin.ReplicateInfo{
|
||||
Bucket: ri.Bucket,
|
||||
Object: ri.Object,
|
||||
Objects: ri.Objects,
|
||||
ObjectsFailed: ri.ObjectsFailed,
|
||||
BytesTransferred: ri.BytesTransferred,
|
||||
BytesFailed: ri.BytesFailed,
|
||||
Bucket: ri.Bucket,
|
||||
Object: ri.Object,
|
||||
Objects: ri.Objects,
|
||||
DeleteMarkers: ri.DeleteMarkers,
|
||||
ObjectsFailed: ri.ObjectsFailed,
|
||||
DeleteMarkersFailed: ri.DeleteMarkersFailed,
|
||||
BytesTransferred: ri.BytesTransferred,
|
||||
BytesFailed: ri.BytesFailed,
|
||||
}
|
||||
case string(madmin.BatchJobKeyRotate):
|
||||
m.KeyRotate = &madmin.KeyRotationInfo{
|
||||
@ -2091,10 +2164,12 @@ func (ri *batchJobInfo) metric() madmin.JobMetric {
|
||||
}
|
||||
case string(madmin.BatchJobExpire):
|
||||
m.Expired = &madmin.ExpirationInfo{
|
||||
Bucket: ri.Bucket,
|
||||
Object: ri.Object,
|
||||
Objects: ri.Objects,
|
||||
ObjectsFailed: ri.ObjectsFailed,
|
||||
Bucket: ri.Bucket,
|
||||
Object: ri.Object,
|
||||
Objects: ri.Objects,
|
||||
DeleteMarkers: ri.DeleteMarkers,
|
||||
ObjectsFailed: ri.ObjectsFailed,
|
||||
DeleteMarkersFailed: ri.DeleteMarkersFailed,
|
||||
}
|
||||
}
|
||||
|
||||
@ -2140,11 +2215,42 @@ func (m *batchJobMetrics) purgeJobMetrics() {
|
||||
m.RUnlock()
|
||||
for _, jobID := range toDeleteJobMetrics {
|
||||
m.delete(jobID)
|
||||
j := BatchJobRequest{
|
||||
ID: jobID,
|
||||
}
|
||||
j.delete(GlobalContext, newObjectLayerFn())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// load metrics from disk on startup
|
||||
func (m *batchJobMetrics) init(ctx context.Context, objectAPI ObjectLayer) error {
|
||||
resultCh := make(chan itemOrErr[ObjectInfo])
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
if err := objectAPI.Walk(ctx, minioMetaBucket, batchJobReportsPrefix, resultCh, WalkOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for result := range resultCh {
|
||||
if result.Err != nil {
|
||||
return result.Err
|
||||
}
|
||||
ri := &batchJobInfo{}
|
||||
if err := ri.loadByPath(ctx, objectAPI, result.Item.Name); err != nil {
|
||||
if !errors.Is(err, errNoSuchJob) {
|
||||
batchLogIf(ctx, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
m.metrics[ri.JobID] = ri
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *batchJobMetrics) delete(jobID string) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
@ -2209,16 +2315,42 @@ func (m *batchJobMetrics) trace(d batchJobMetric, job string, attempts int) func
|
||||
}
|
||||
}
|
||||
|
||||
func lookupStyle(s string) miniogo.BucketLookupType {
|
||||
var lookup miniogo.BucketLookupType
|
||||
func lookupStyle(s string) minio.BucketLookupType {
|
||||
var lookup minio.BucketLookupType
|
||||
switch s {
|
||||
case "on":
|
||||
lookup = miniogo.BucketLookupPath
|
||||
lookup = minio.BucketLookupPath
|
||||
case "off":
|
||||
lookup = miniogo.BucketLookupDNS
|
||||
lookup = minio.BucketLookupDNS
|
||||
default:
|
||||
lookup = miniogo.BucketLookupAuto
|
||||
|
||||
lookup = minio.BucketLookupAuto
|
||||
}
|
||||
return lookup
|
||||
}
|
||||
|
||||
// BatchJobPrefix - to support prefix field yaml unmarshalling with string or slice of strings
|
||||
type BatchJobPrefix []string
|
||||
|
||||
var _ yaml.Unmarshaler = &BatchJobPrefix{}
|
||||
|
||||
// UnmarshalYAML - to support prefix field yaml unmarshalling with string or slice of strings
|
||||
func (b *BatchJobPrefix) UnmarshalYAML(value *yaml.Node) error {
|
||||
// try slice first
|
||||
tmpSlice := []string{}
|
||||
if err := value.Decode(&tmpSlice); err == nil {
|
||||
*b = tmpSlice
|
||||
return nil
|
||||
}
|
||||
// try string
|
||||
tmpStr := ""
|
||||
if err := value.Decode(&tmpStr); err == nil {
|
||||
*b = []string{tmpStr}
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unable to decode %s", value.Value)
|
||||
}
|
||||
|
||||
// F - return prefix(es) as slice
|
||||
func (b *BatchJobPrefix) F() []string {
|
||||
return *b
|
||||
}
|
||||
|
||||
@ -1,11 +1,94 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BatchJobPrefix) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if cap((*z)) >= int(zb0002) {
|
||||
(*z) = (*z)[:zb0002]
|
||||
} else {
|
||||
(*z) = make(BatchJobPrefix, zb0002)
|
||||
}
|
||||
for zb0001 := range *z {
|
||||
(*z)[zb0001], err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z BatchJobPrefix) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = en.WriteArrayHeader(uint32(len(z)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0003 := range z {
|
||||
err = en.WriteString(z[zb0003])
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0003)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z BatchJobPrefix) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z)))
|
||||
for zb0003 := range z {
|
||||
o = msgp.AppendString(o, z[zb0003])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BatchJobPrefix) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if cap((*z)) >= int(zb0002) {
|
||||
(*z) = (*z)[:zb0002]
|
||||
} else {
|
||||
(*z) = make(BatchJobPrefix, zb0002)
|
||||
}
|
||||
for zb0001 := range *z {
|
||||
(*z)[zb0001], bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z BatchJobPrefix) Msgsize() (s int) {
|
||||
s = msgp.ArrayHeaderSize
|
||||
for zb0003 := range z {
|
||||
s += msgp.StringPrefixSize + len(z[zb0003])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BatchJobRequest) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
@ -9,6 +9,119 @@ import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalBatchJobPrefix(t *testing.T) {
|
||||
v := BatchJobPrefix{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgBatchJobPrefix(b *testing.B) {
|
||||
v := BatchJobPrefix{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgBatchJobPrefix(b *testing.B) {
|
||||
v := BatchJobPrefix{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBatchJobPrefix(b *testing.B) {
|
||||
v := BatchJobPrefix{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeBatchJobPrefix(t *testing.T) {
|
||||
v := BatchJobPrefix{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeBatchJobPrefix Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := BatchJobPrefix{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBatchJobPrefix(b *testing.B) {
|
||||
v := BatchJobPrefix{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeBatchJobPrefix(b *testing.B) {
|
||||
v := BatchJobPrefix{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalBatchJobRequest(t *testing.T) {
|
||||
v := BatchJobRequest{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
|
||||
75
cmd/batch-handlers_test.go
Normal file
75
cmd/batch-handlers_test.go
Normal file
@ -0,0 +1,75 @@
|
||||
// Copyright (c) 2015-2024 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func TestBatchJobPrefix_UnmarshalYAML(t *testing.T) {
|
||||
type args struct {
|
||||
yamlStr string
|
||||
}
|
||||
type PrefixTemp struct {
|
||||
Prefix BatchJobPrefix `yaml:"prefix"`
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
b PrefixTemp
|
||||
args args
|
||||
want []string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "test1",
|
||||
b: PrefixTemp{},
|
||||
args: args{
|
||||
yamlStr: `
|
||||
prefix: "foo"
|
||||
`,
|
||||
},
|
||||
want: []string{"foo"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "test2",
|
||||
b: PrefixTemp{},
|
||||
args: args{
|
||||
yamlStr: `
|
||||
prefix:
|
||||
- "foo"
|
||||
- "bar"
|
||||
`,
|
||||
},
|
||||
want: []string{"foo", "bar"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := yaml.Unmarshal([]byte(tt.args.yamlStr), &tt.b); (err != nil) != tt.wantErr {
|
||||
t.Errorf("UnmarshalYAML() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
if !slices.Equal(tt.b.Prefix.F(), tt.want) {
|
||||
t.Errorf("UnmarshalYAML() = %v, want %v", tt.b.Prefix.F(), tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -275,7 +275,7 @@ func (sf BatchJobSizeFilter) Validate() error {
|
||||
type BatchJobSize int64
|
||||
|
||||
// UnmarshalYAML to parse humanized byte values
|
||||
func (s *BatchJobSize) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
func (s *BatchJobSize) UnmarshalYAML(unmarshal func(any) error) error {
|
||||
var batchExpireSz string
|
||||
err := unmarshal(&batchExpireSz)
|
||||
if err != nil {
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
@ -21,8 +21,8 @@ import (
|
||||
"time"
|
||||
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/pkg/v3/xtime"
|
||||
)
|
||||
|
||||
//go:generate msgp -file $GOFILE
|
||||
@ -65,12 +65,12 @@ import (
|
||||
|
||||
// BatchReplicateFilter holds all the filters currently supported for batch replication
|
||||
type BatchReplicateFilter struct {
|
||||
NewerThan time.Duration `yaml:"newerThan,omitempty" json:"newerThan"`
|
||||
OlderThan time.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
|
||||
CreatedAfter time.Time `yaml:"createdAfter,omitempty" json:"createdAfter"`
|
||||
CreatedBefore time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
|
||||
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`
|
||||
Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"`
|
||||
NewerThan xtime.Duration `yaml:"newerThan,omitempty" json:"newerThan"`
|
||||
OlderThan xtime.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
|
||||
CreatedAfter time.Time `yaml:"createdAfter,omitempty" json:"createdAfter"`
|
||||
CreatedBefore time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
|
||||
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`
|
||||
Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"`
|
||||
}
|
||||
|
||||
// BatchJobReplicateFlags various configurations for replication job definition currently includes
|
||||
@ -151,7 +151,7 @@ func (t BatchJobReplicateTarget) ValidPath() bool {
|
||||
type BatchJobReplicateSource struct {
|
||||
Type BatchJobReplicateResourceType `yaml:"type" json:"type"`
|
||||
Bucket string `yaml:"bucket" json:"bucket"`
|
||||
Prefix string `yaml:"prefix" json:"prefix"`
|
||||
Prefix BatchJobPrefix `yaml:"prefix" json:"prefix"`
|
||||
Endpoint string `yaml:"endpoint" json:"endpoint"`
|
||||
Path string `yaml:"path" json:"path"`
|
||||
Creds BatchJobReplicateCredentials `yaml:"credentials" json:"credentials"`
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
@ -411,7 +411,7 @@ func (z *BatchJobReplicateSource) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
return
|
||||
}
|
||||
case "Prefix":
|
||||
z.Prefix, err = dc.ReadString()
|
||||
err = z.Prefix.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@ -514,7 +514,7 @@ func (z *BatchJobReplicateSource) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Prefix)
|
||||
err = z.Prefix.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@ -600,7 +600,11 @@ func (z *BatchJobReplicateSource) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.AppendString(o, z.Bucket)
|
||||
// string "Prefix"
|
||||
o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
|
||||
o = msgp.AppendString(o, z.Prefix)
|
||||
o, err = z.Prefix.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
}
|
||||
// string "Endpoint"
|
||||
o = append(o, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
|
||||
o = msgp.AppendString(o, z.Endpoint)
|
||||
@ -664,7 +668,7 @@ func (z *BatchJobReplicateSource) UnmarshalMsg(bts []byte) (o []byte, err error)
|
||||
return
|
||||
}
|
||||
case "Prefix":
|
||||
z.Prefix, bts, err = msgp.ReadStringBytes(bts)
|
||||
bts, err = z.Prefix.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@ -742,7 +746,7 @@ func (z *BatchJobReplicateSource) UnmarshalMsg(bts []byte) (o []byte, err error)
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobReplicateSource) Msgsize() (s int) {
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(string(z.Type)) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 5 + msgp.StringPrefixSize + len(z.Path) + 6 + 1 + 10 + msgp.StringPrefixSize + len(z.Creds.AccessKey) + 10 + msgp.StringPrefixSize + len(z.Creds.SecretKey) + 13 + msgp.StringPrefixSize + len(z.Creds.SessionToken) + 9 + z.Snowball.Msgsize()
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(string(z.Type)) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + z.Prefix.Msgsize() + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 5 + msgp.StringPrefixSize + len(z.Path) + 6 + 1 + 10 + msgp.StringPrefixSize + len(z.Creds.AccessKey) + 10 + msgp.StringPrefixSize + len(z.Creds.SecretKey) + 13 + msgp.StringPrefixSize + len(z.Creds.SessionToken) + 9 + z.Snowball.Msgsize()
|
||||
return
|
||||
}
|
||||
|
||||
@ -1409,13 +1413,13 @@ func (z *BatchReplicateFilter) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "NewerThan":
|
||||
z.NewerThan, err = dc.ReadDuration()
|
||||
err = z.NewerThan.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NewerThan")
|
||||
return
|
||||
}
|
||||
case "OlderThan":
|
||||
z.OlderThan, err = dc.ReadDuration()
|
||||
err = z.OlderThan.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@ -1489,7 +1493,7 @@ func (z *BatchReplicateFilter) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteDuration(z.NewerThan)
|
||||
err = z.NewerThan.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NewerThan")
|
||||
return
|
||||
@ -1499,7 +1503,7 @@ func (z *BatchReplicateFilter) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteDuration(z.OlderThan)
|
||||
err = z.OlderThan.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@ -1567,10 +1571,18 @@ func (z *BatchReplicateFilter) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// map header, size 6
|
||||
// string "NewerThan"
|
||||
o = append(o, 0x86, 0xa9, 0x4e, 0x65, 0x77, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
|
||||
o = msgp.AppendDuration(o, z.NewerThan)
|
||||
o, err = z.NewerThan.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NewerThan")
|
||||
return
|
||||
}
|
||||
// string "OlderThan"
|
||||
o = append(o, 0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
|
||||
o = msgp.AppendDuration(o, z.OlderThan)
|
||||
o, err = z.OlderThan.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
}
|
||||
// string "CreatedAfter"
|
||||
o = append(o, 0xac, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72)
|
||||
o = msgp.AppendTime(o, z.CreatedAfter)
|
||||
@ -1619,13 +1631,13 @@ func (z *BatchReplicateFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "NewerThan":
|
||||
z.NewerThan, bts, err = msgp.ReadDurationBytes(bts)
|
||||
bts, err = z.NewerThan.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NewerThan")
|
||||
return
|
||||
}
|
||||
case "OlderThan":
|
||||
z.OlderThan, bts, err = msgp.ReadDurationBytes(bts)
|
||||
bts, err = z.OlderThan.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@ -1694,7 +1706,7 @@ func (z *BatchReplicateFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchReplicateFilter) Msgsize() (s int) {
|
||||
s = 1 + 10 + msgp.DurationSize + 10 + msgp.DurationSize + 13 + msgp.TimeSize + 14 + msgp.TimeSize + 5 + msgp.ArrayHeaderSize
|
||||
s = 1 + 10 + z.NewerThan.Msgsize() + 10 + z.OlderThan.Msgsize() + 13 + msgp.TimeSize + 14 + msgp.TimeSize + 5 + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.Tags {
|
||||
s += z.Tags[za0001].Msgsize()
|
||||
}
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
182
cmd/batch-replicate_test.go
Normal file
182
cmd/batch-replicate_test.go
Normal file
@ -0,0 +1,182 @@
|
||||
// Copyright (c) 2015-2024 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func TestParseBatchJobReplicate(t *testing.T) {
|
||||
replicateYaml := `
|
||||
replicate:
|
||||
apiVersion: v1
|
||||
# source of the objects to be replicated
|
||||
source:
|
||||
type: minio # valid values are "s3" or "minio"
|
||||
bucket: mytest
|
||||
prefix: object-prefix1 # 'PREFIX' is optional
|
||||
# If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted
|
||||
# Either the 'source' or 'remote' *must* be the "local" deployment
|
||||
# endpoint: "http://127.0.0.1:9000"
|
||||
# # path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto"
|
||||
# credentials:
|
||||
# accessKey: minioadmin # Required
|
||||
# secretKey: minioadmin # Required
|
||||
# # sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used
|
||||
snowball: # automatically activated if the source is local
|
||||
disable: true # optionally turn-off snowball archive transfer
|
||||
# batch: 100 # upto this many objects per archive
|
||||
# inmemory: true # indicates if the archive must be staged locally or in-memory
|
||||
# compress: false # S2/Snappy compressed archive
|
||||
# smallerThan: 5MiB # create archive for all objects smaller than 5MiB
|
||||
# skipErrs: false # skips any source side read() errors
|
||||
|
||||
# target where the objects must be replicated
|
||||
target:
|
||||
type: minio # valid values are "s3" or "minio"
|
||||
bucket: mytest
|
||||
prefix: stage # 'PREFIX' is optional
|
||||
# If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted
|
||||
|
||||
# Either the 'source' or 'remote' *must* be the "local" deployment
|
||||
endpoint: "http://127.0.0.1:9001"
|
||||
# path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto"
|
||||
credentials:
|
||||
accessKey: minioadmin
|
||||
secretKey: minioadmin
|
||||
# sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used
|
||||
|
||||
# NOTE: All flags are optional
|
||||
# - filtering criteria only applies for all source objects match the criteria
|
||||
# - configurable notification endpoints
|
||||
# - configurable retries for the job (each retry skips successfully previously replaced objects)
|
||||
flags:
|
||||
filter:
|
||||
newerThan: "7d10h31s" # match objects newer than this value (e.g. 7d10h31s)
|
||||
olderThan: "7d" # match objects older than this value (e.g. 7d10h31s)
|
||||
# createdAfter: "date" # match objects created after "date"
|
||||
# createdBefore: "date" # match objects created before "date"
|
||||
|
||||
## NOTE: tags are not supported when "source" is remote.
|
||||
tags:
|
||||
- key: "name"
|
||||
value: "pick*" # match objects with tag 'name', with all values starting with 'pick'
|
||||
|
||||
metadata:
|
||||
- key: "content-type"
|
||||
value: "image/*" # match objects with 'content-type', with all values starting with 'image/'
|
||||
|
||||
# notify:
|
||||
# endpoint: "https://notify.endpoint" # notification endpoint to receive job status events
|
||||
# token: "Bearer xxxxx" # optional authentication token for the notification endpoint
|
||||
#
|
||||
# retry:
|
||||
# attempts: 10 # number of retries for the job before giving up
|
||||
# delay: "500ms" # least amount of delay between each retry
|
||||
|
||||
`
|
||||
var job BatchJobRequest
|
||||
err := yaml.Unmarshal([]byte(replicateYaml), &job)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to parse batch-job-replicate yaml", err)
|
||||
}
|
||||
if !slices.Equal(job.Replicate.Source.Prefix.F(), []string{"object-prefix1"}) {
|
||||
t.Fatal("Failed to parse batch-job-replicate yaml", err)
|
||||
}
|
||||
multiPrefixReplicateYaml := `
|
||||
replicate:
|
||||
apiVersion: v1
|
||||
# source of the objects to be replicated
|
||||
source:
|
||||
type: minio # valid values are "s3" or "minio"
|
||||
bucket: mytest
|
||||
prefix: # 'PREFIX' is optional
|
||||
- object-prefix1
|
||||
- object-prefix2
|
||||
# If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted
|
||||
# Either the 'source' or 'remote' *must* be the "local" deployment
|
||||
# endpoint: "http://127.0.0.1:9000"
|
||||
# # path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto"
|
||||
# credentials:
|
||||
# accessKey: minioadmin # Required
|
||||
# secretKey: minioadmin # Required
|
||||
# # sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used
|
||||
snowball: # automatically activated if the source is local
|
||||
disable: true # optionally turn-off snowball archive transfer
|
||||
# batch: 100 # upto this many objects per archive
|
||||
# inmemory: true # indicates if the archive must be staged locally or in-memory
|
||||
# compress: false # S2/Snappy compressed archive
|
||||
# smallerThan: 5MiB # create archive for all objects smaller than 5MiB
|
||||
# skipErrs: false # skips any source side read() errors
|
||||
|
||||
# target where the objects must be replicated
|
||||
target:
|
||||
type: minio # valid values are "s3" or "minio"
|
||||
bucket: mytest
|
||||
prefix: stage # 'PREFIX' is optional
|
||||
# If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted
|
||||
|
||||
# Either the 'source' or 'remote' *must* be the "local" deployment
|
||||
endpoint: "http://127.0.0.1:9001"
|
||||
# path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto"
|
||||
credentials:
|
||||
accessKey: minioadmin
|
||||
secretKey: minioadmin
|
||||
# sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used
|
||||
|
||||
# NOTE: All flags are optional
|
||||
# - filtering criteria only applies for all source objects match the criteria
|
||||
# - configurable notification endpoints
|
||||
# - configurable retries for the job (each retry skips successfully previously replaced objects)
|
||||
flags:
|
||||
filter:
|
||||
newerThan: "7d10h31s" # match objects newer than this value (e.g. 7d10h31s)
|
||||
olderThan: "7d" # match objects older than this value (e.g. 7d10h31s)
|
||||
# createdAfter: "date" # match objects created after "date"
|
||||
# createdBefore: "date" # match objects created before "date"
|
||||
|
||||
## NOTE: tags are not supported when "source" is remote.
|
||||
tags:
|
||||
- key: "name"
|
||||
value: "pick*" # match objects with tag 'name', with all values starting with 'pick'
|
||||
|
||||
metadata:
|
||||
- key: "content-type"
|
||||
value: "image/*" # match objects with 'content-type', with all values starting with 'image/'
|
||||
|
||||
# notify:
|
||||
# endpoint: "https://notify.endpoint" # notification endpoint to receive job status events
|
||||
# token: "Bearer xxxxx" # optional authentication token for the notification endpoint
|
||||
#
|
||||
# retry:
|
||||
# attempts: 10 # number of retries for the job before giving up
|
||||
# delay: "500ms" # least amount of delay between each retry
|
||||
|
||||
`
|
||||
var multiPrefixJob BatchJobRequest
|
||||
err = yaml.Unmarshal([]byte(multiPrefixReplicateYaml), &multiPrefixJob)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to parse batch-job-replicate yaml", err)
|
||||
}
|
||||
if !slices.Equal(multiPrefixJob.Replicate.Source.Prefix.F(), []string{"object-prefix1", "object-prefix2"}) {
|
||||
t.Fatal("Failed to parse batch-job-replicate yaml")
|
||||
}
|
||||
}
|
||||
@ -21,6 +21,7 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"runtime"
|
||||
@ -110,9 +111,7 @@ func (e BatchJobKeyRotateEncryption) Validate() error {
|
||||
}
|
||||
}
|
||||
e.kmsContext = kms.Context{}
|
||||
for k, v := range ctx {
|
||||
e.kmsContext[k] = v
|
||||
}
|
||||
maps.Copy(e.kmsContext, ctx)
|
||||
ctx["MinIO batch API"] = "batchrotate" // Context for a test key operation
|
||||
if _, err := GlobalKMS.GenerateKey(GlobalContext, &kms.GenerateKeyRequest{Name: e.Key, AssociatedData: ctx}); err != nil {
|
||||
return err
|
||||
@ -225,9 +224,7 @@ func (r *BatchJobKeyRotateV1) KeyRotate(ctx context.Context, api ObjectLayer, ob
|
||||
// Since we are rotating the keys, make sure to update the metadata.
|
||||
oi.metadataOnly = true
|
||||
oi.keyRotation = true
|
||||
for k, v := range encMetadata {
|
||||
oi.UserDefined[k] = v
|
||||
}
|
||||
maps.Copy(oi.UserDefined, encMetadata)
|
||||
if _, err := api.CopyObject(ctx, r.Bucket, oi.Name, r.Bucket, oi.Name, oi, ObjectOptions{
|
||||
VersionID: oi.VersionID,
|
||||
}, ObjectOptions{
|
||||
@ -267,8 +264,12 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
lastObject := ri.Object
|
||||
|
||||
retryAttempts := job.KeyRotate.Flags.Retry.Attempts
|
||||
if retryAttempts <= 0 {
|
||||
retryAttempts = batchKeyRotateJobDefaultRetries
|
||||
}
|
||||
delay := job.KeyRotate.Flags.Retry.Delay
|
||||
if delay == 0 {
|
||||
if delay <= 0 {
|
||||
delay = batchKeyRotateJobDefaultRetryDelay
|
||||
}
|
||||
|
||||
@ -354,7 +355,6 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
return err
|
||||
}
|
||||
|
||||
retryAttempts := ri.RetryAttempts
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
results := make(chan itemOrErr[ObjectInfo], 100)
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
@ -35,7 +35,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
|
||||
err = obj.MakeBucket(b.Context(), bucket, MakeBucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -51,10 +51,10 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
|
||||
b.ReportAllocs()
|
||||
// the actual benchmark for PutObject starts here. Reset the benchmark timer.
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for i := 0; b.Loop(); i++ {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
objInfo, err := obj.PutObject(b.Context(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
@ -76,7 +76,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
object := getRandomObjectName()
|
||||
|
||||
// create bucket.
|
||||
err = obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
|
||||
err = obj.MakeBucket(b.Context(), bucket, MakeBucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -90,7 +90,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
textData := generateBytesData(objSize)
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for NewMultipartUpload.
|
||||
res, err := obj.NewMultipartUpload(context.Background(), bucket, object, ObjectOptions{})
|
||||
res, err := obj.NewMultipartUpload(b.Context(), bucket, object, ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -101,11 +101,11 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
|
||||
b.ReportAllocs()
|
||||
// the actual benchmark for PutObjectPart starts here. Reset the benchmark timer.
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for i := 0; b.Loop(); i++ {
|
||||
// insert the object.
|
||||
totalPartsNR := int(math.Ceil(float64(objSize) / float64(partSize)))
|
||||
for j := 0; j < totalPartsNR; j++ {
|
||||
for j := range totalPartsNR {
|
||||
if j < totalPartsNR-1 {
|
||||
textPartData = textData[j*partSize : (j+1)*partSize-1]
|
||||
} else {
|
||||
@ -113,7 +113,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
}
|
||||
md5hex := getMD5Hash(textPartData)
|
||||
var partInfo PartInfo
|
||||
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, res.UploadID, j,
|
||||
partInfo, err = obj.PutObjectPart(b.Context(), bucket, object, res.UploadID, j,
|
||||
mustGetPutObjReader(b, bytes.NewReader(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
@ -130,7 +130,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
|
||||
func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(b.Context())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
@ -146,7 +146,7 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
|
||||
// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
|
||||
func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(b.Context())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
@ -162,7 +162,7 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
|
||||
// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
|
||||
func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(b.Context())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
@ -196,7 +196,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
|
||||
err := obj.MakeBucket(b.Context(), bucket, MakeBucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -218,7 +218,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
i := 0
|
||||
for pb.Next() {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
objInfo, err := obj.PutObject(b.Context(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
|
||||
@ -20,6 +20,7 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
"sync"
|
||||
@ -37,12 +38,22 @@ type streamingBitrotWriter struct {
|
||||
shardSize int64
|
||||
canClose *sync.WaitGroup
|
||||
byteBuf []byte
|
||||
finished bool
|
||||
}
|
||||
|
||||
func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if b.finished {
|
||||
return 0, errors.New("bitrot write not allowed")
|
||||
}
|
||||
if int64(len(p)) > b.shardSize {
|
||||
return 0, errors.New("unexpected bitrot buffer size")
|
||||
}
|
||||
if int64(len(p)) < b.shardSize {
|
||||
b.finished = true
|
||||
}
|
||||
b.h.Reset()
|
||||
b.h.Write(p)
|
||||
hashBytes := b.h.Sum(nil)
|
||||
@ -141,13 +152,7 @@ func (b *streamingBitrotReader) Close() error {
|
||||
}
|
||||
if closer, ok := b.rc.(io.Closer); ok {
|
||||
// drain the body for connection reuse at network layer.
|
||||
xhttp.DrainBody(struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}{
|
||||
Reader: b.rc,
|
||||
Closer: closeWrapper(func() error { return nil }),
|
||||
})
|
||||
xhttp.DrainBody(io.NopCloser(b.rc))
|
||||
return closer.Close()
|
||||
}
|
||||
return nil
|
||||
|
||||
@ -99,7 +99,7 @@ func BitrotAlgorithmFromString(s string) (a BitrotAlgorithm) {
|
||||
return alg
|
||||
}
|
||||
}
|
||||
return
|
||||
return a
|
||||
}
|
||||
|
||||
func newBitrotWriter(disk StorageAPI, origvolume, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
||||
@ -128,14 +128,20 @@ func closeBitrotReaders(rs []io.ReaderAt) {
|
||||
}
|
||||
|
||||
// Close all the writers.
|
||||
func closeBitrotWriters(ws []io.Writer) {
|
||||
for _, w := range ws {
|
||||
if w != nil {
|
||||
if bw, ok := w.(io.Closer); ok {
|
||||
bw.Close()
|
||||
}
|
||||
func closeBitrotWriters(ws []io.Writer) []error {
|
||||
errs := make([]error, len(ws))
|
||||
for i, w := range ws {
|
||||
if w == nil {
|
||||
errs[i] = errDiskNotFound
|
||||
continue
|
||||
}
|
||||
if bw, ok := w.(io.Closer); ok {
|
||||
errs[i] = bw.Close()
|
||||
} else {
|
||||
errs[i] = nil
|
||||
}
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
// Returns hash sum for whole-bitrot, nil for streaming-bitrot.
|
||||
@ -178,7 +184,7 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w
|
||||
return errFileCorrupt
|
||||
}
|
||||
|
||||
bufp := xioutil.ODirectPoolSmall.Get().(*[]byte)
|
||||
bufp := xioutil.ODirectPoolSmall.Get()
|
||||
defer xioutil.ODirectPoolSmall.Put(bufp)
|
||||
|
||||
for left > 0 {
|
||||
|
||||
@ -18,7 +18,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
)
|
||||
@ -34,7 +33,7 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
disk.MakeVol(context.Background(), volume)
|
||||
disk.MakeVol(t.Context(), volume)
|
||||
|
||||
writer := newBitrotWriter(disk, "", volume, filePath, 35, bitrotAlgo, 10)
|
||||
|
||||
|
||||
@ -48,9 +48,7 @@ func (bs *bootstrapTracer) Events() []madmin.TraceInfo {
|
||||
traceInfo := make([]madmin.TraceInfo, 0, bootstrapTraceLimit)
|
||||
|
||||
bs.mu.RLock()
|
||||
for _, i := range bs.info {
|
||||
traceInfo = append(traceInfo, i)
|
||||
}
|
||||
traceInfo = append(traceInfo, bs.info...)
|
||||
bs.mu.RUnlock()
|
||||
|
||||
return traceInfo
|
||||
|
||||
@ -183,11 +183,19 @@ var binaryChecksum = getBinaryChecksum()
|
||||
|
||||
func getBinaryChecksum() string {
|
||||
mw := md5.New()
|
||||
b, err := os.Open(os.Args[0])
|
||||
if err == nil {
|
||||
defer b.Close()
|
||||
io.Copy(mw, b)
|
||||
binPath, err := os.Executable()
|
||||
if err != nil {
|
||||
logger.Error("Calculating checksum failed: %s", err)
|
||||
return "00000000000000000000000000000000"
|
||||
}
|
||||
b, err := os.Open(binPath)
|
||||
if err != nil {
|
||||
logger.Error("Calculating checksum failed: %s", err)
|
||||
return "00000000000000000000000000000000"
|
||||
}
|
||||
|
||||
defer b.Close()
|
||||
io.Copy(mw, b)
|
||||
return hex.EncodeToString(mw.Sum(nil))
|
||||
}
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
@ -59,19 +59,17 @@ func (z *ServerSystemConfig) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
if z.MinioEnv == nil {
|
||||
z.MinioEnv = make(map[string]string, zb0003)
|
||||
} else if len(z.MinioEnv) > 0 {
|
||||
for key := range z.MinioEnv {
|
||||
delete(z.MinioEnv, key)
|
||||
}
|
||||
clear(z.MinioEnv)
|
||||
}
|
||||
for zb0003 > 0 {
|
||||
zb0003--
|
||||
var za0002 string
|
||||
var za0003 string
|
||||
za0002, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MinioEnv")
|
||||
return
|
||||
}
|
||||
var za0003 string
|
||||
za0003, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MinioEnv", za0002)
|
||||
@ -240,14 +238,12 @@ func (z *ServerSystemConfig) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
if z.MinioEnv == nil {
|
||||
z.MinioEnv = make(map[string]string, zb0003)
|
||||
} else if len(z.MinioEnv) > 0 {
|
||||
for key := range z.MinioEnv {
|
||||
delete(z.MinioEnv, key)
|
||||
}
|
||||
clear(z.MinioEnv)
|
||||
}
|
||||
for zb0003 > 0 {
|
||||
var za0002 string
|
||||
var za0003 string
|
||||
zb0003--
|
||||
var za0002 string
|
||||
za0002, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "MinioEnv")
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package cmd
|
||||
|
||||
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
@ -28,6 +28,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
@ -53,6 +54,7 @@ import (
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
"github.com/minio/minio/internal/config/dns"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/etag"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/handlers"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
@ -152,7 +154,6 @@ func initFederatorBackend(buckets []string, objLayer ObjectLayer) {
|
||||
g := errgroup.WithNErrs(len(bucketsToBeUpdatedSlice)).WithConcurrency(50)
|
||||
|
||||
for index := range bucketsToBeUpdatedSlice {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
return globalDNSConfig.Put(bucketsToBeUpdatedSlice[index])
|
||||
}, index)
|
||||
@ -342,11 +343,9 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
Created: dnsRecords[0].CreationDate,
|
||||
})
|
||||
}
|
||||
|
||||
sort.Slice(bucketsInfo, func(i, j int) bool {
|
||||
return bucketsInfo[i].Name < bucketsInfo[j].Name
|
||||
})
|
||||
|
||||
} else {
|
||||
// Invoke the list buckets.
|
||||
var err error
|
||||
@ -427,7 +426,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
// Content-Md5 is required should be set
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if _, ok := r.Header[xhttp.ContentMD5]; !ok {
|
||||
if !validateLengthAndChecksum(r) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL)
|
||||
return
|
||||
}
|
||||
@ -559,7 +558,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}, goi, opts, gerr)
|
||||
if dsc.ReplicateAny() {
|
||||
if object.VersionID != "" {
|
||||
object.VersionPurgeStatus = Pending
|
||||
object.VersionPurgeStatus = replication.VersionPurgePending
|
||||
object.VersionPurgeStatuses = dsc.PendingStatus()
|
||||
} else {
|
||||
object.DeleteMarkerReplicationStatus = dsc.PendingStatus()
|
||||
@ -593,7 +592,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
output[idx] = obj
|
||||
idx++
|
||||
}
|
||||
return
|
||||
return output
|
||||
}
|
||||
|
||||
// Disable timeouts and cancellation
|
||||
@ -669,9 +668,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
continue
|
||||
}
|
||||
|
||||
defer globalCacheConfig.Delete(bucket, dobj.ObjectName)
|
||||
|
||||
if replicateDeletes && (dobj.DeleteMarkerReplicationStatus() == replication.Pending || dobj.VersionPurgeStatus() == Pending) {
|
||||
if replicateDeletes && (dobj.DeleteMarkerReplicationStatus() == replication.Pending || dobj.VersionPurgeStatus() == replication.VersionPurgePending) {
|
||||
// copy so we can re-add null ID.
|
||||
dobj := dobj
|
||||
if isDirObject(dobj.ObjectName) && dobj.VersionID == "" {
|
||||
@ -841,7 +838,6 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
|
||||
}
|
||||
apiErr := ErrBucketAlreadyExists
|
||||
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(sr)...)).IsEmpty() {
|
||||
@ -889,6 +885,30 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
})
|
||||
}
|
||||
|
||||
// multipartReader is just like https://pkg.go.dev/net/http#Request.MultipartReader but
|
||||
// rejects multipart/mixed as its not supported in S3 API.
|
||||
func multipartReader(r *http.Request) (*multipart.Reader, error) {
|
||||
v := r.Header.Get("Content-Type")
|
||||
if v == "" {
|
||||
return nil, http.ErrNotMultipart
|
||||
}
|
||||
if r.Body == nil {
|
||||
return nil, errors.New("missing form body")
|
||||
}
|
||||
d, params, err := mime.ParseMediaType(v)
|
||||
if err != nil {
|
||||
return nil, http.ErrNotMultipart
|
||||
}
|
||||
if d != "multipart/form-data" {
|
||||
return nil, http.ErrNotMultipart
|
||||
}
|
||||
boundary, ok := params["boundary"]
|
||||
if !ok {
|
||||
return nil, http.ErrMissingBoundary
|
||||
}
|
||||
return multipart.NewReader(r.Body, boundary), nil
|
||||
}
|
||||
|
||||
// PostPolicyBucketHandler - POST policy
|
||||
// ----------
|
||||
// This implementation of the POST operation handles object creation with a specified
|
||||
@ -922,9 +942,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength <= 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEmptyRequestBody), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Here the parameter is the size of the form data that should
|
||||
// be loaded in memory, the remaining being put in temporary files.
|
||||
mp, err := r.MultipartReader()
|
||||
mp, err := multipartReader(r)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
@ -936,7 +961,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
|
||||
var (
|
||||
reader io.Reader
|
||||
fileSize int64 = -1
|
||||
actualSize int64 = -1
|
||||
fileName string
|
||||
fanOutEntries = make([]minio.PutObjectFanOutEntry, 0, 100)
|
||||
)
|
||||
@ -944,6 +969,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
maxParts := 1000
|
||||
// Canonicalize the form values into http.Header.
|
||||
formValues := make(http.Header)
|
||||
var headerLen int64
|
||||
for {
|
||||
part, err := mp.NextRawPart()
|
||||
if errors.Is(err, io.EOF) {
|
||||
@ -985,7 +1011,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
headerLen += int64(len(name)) + int64(len(fileName))
|
||||
if name != "file" {
|
||||
if http.CanonicalHeaderKey(name) == http.CanonicalHeaderKey("x-minio-fanout-list") {
|
||||
dec := json.NewDecoder(part)
|
||||
@ -996,7 +1022,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
if err := dec.Decode(&m); err != nil {
|
||||
part.Close()
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, multipart.ErrMessageTooLarge)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
@ -1006,8 +1032,12 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
continue
|
||||
}
|
||||
|
||||
buf := bytebufferpool.Get()
|
||||
// value, store as string in memory
|
||||
n, err := io.CopyN(&b, part, maxMemoryBytes+1)
|
||||
n, err := io.CopyN(buf, part, maxMemoryBytes+1)
|
||||
value := buf.String()
|
||||
buf.Reset()
|
||||
bytebufferpool.Put(buf)
|
||||
part.Close()
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
@ -1029,7 +1059,8 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
formValues[http.CanonicalHeaderKey(name)] = append(formValues[http.CanonicalHeaderKey(name)], b.String())
|
||||
headerLen += n
|
||||
formValues[http.CanonicalHeaderKey(name)] = append(formValues[http.CanonicalHeaderKey(name)], value)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -1038,10 +1069,33 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
// The file or text content must be the last field in the form.
|
||||
// You cannot upload more than one file at a time.
|
||||
reader = part
|
||||
|
||||
possibleShardSize := (r.ContentLength - headerLen)
|
||||
if globalStorageClass.ShouldInline(possibleShardSize, false) { // keep versioned false for this check
|
||||
var b bytes.Buffer
|
||||
n, err := io.Copy(&b, reader)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
reader = &b
|
||||
actualSize = n
|
||||
}
|
||||
|
||||
// we have found the File part of the request we are done processing multipart-form
|
||||
break
|
||||
}
|
||||
|
||||
// check if have a file
|
||||
if reader == nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, errors.New("The file or text content is missing"))
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if keyName, ok := formValues["Key"]; !ok {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, errors.New("The name of the uploaded key is missing"))
|
||||
@ -1139,11 +1193,33 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(ctx, reader, fileSize, "", "", fileSize)
|
||||
clientETag, err := etag.FromContentMD5(formValues)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDigest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var forceMD5 []byte
|
||||
// Optimization: If SSE-KMS and SSE-C did not request Content-Md5. Use uuid as etag. Optionally enable this also
|
||||
// for server that is started with `--no-compat`.
|
||||
kind, _ := crypto.IsRequested(formValues)
|
||||
if !etag.ContentMD5Requested(formValues) && (kind == crypto.SSEC || kind == crypto.S3KMS || !globalServerCtxt.StrictS3Compat) {
|
||||
forceMD5 = mustGetUUIDBytes()
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReaderWithOpts(ctx, reader, hash.Options{
|
||||
Size: actualSize,
|
||||
MD5Hex: clientETag.String(),
|
||||
SHA256Hex: "",
|
||||
ActualSize: actualSize,
|
||||
DisableMD5: false,
|
||||
ForceMD5: forceMD5,
|
||||
})
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if checksum != nil && checksum.Valid() {
|
||||
if err = hashReader.AddChecksumNoTrailer(formValues, false); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
@ -1200,9 +1276,9 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||
return
|
||||
}
|
||||
opts.WantChecksum = checksum
|
||||
|
||||
fanOutOpts := fanOutOptions{Checksum: checksum}
|
||||
|
||||
if crypto.Requested(formValues) {
|
||||
if crypto.SSECopy.IsRequested(r.Header) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL)
|
||||
@ -1247,8 +1323,15 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
wantSize := int64(-1)
|
||||
if actualSize >= 0 {
|
||||
info := ObjectInfo{Size: actualSize}
|
||||
wantSize = info.EncryptedSize()
|
||||
}
|
||||
|
||||
// do not try to verify encrypted content/
|
||||
hashReader, err = hash.NewReader(ctx, reader, -1, "", "", -1)
|
||||
hashReader, err = hash.NewReader(ctx, reader, wantSize, "", "", actualSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@ -1259,6 +1342,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
}
|
||||
opts.EncryptFn = metadataEncrypter(objectEncryptionKey)
|
||||
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
@ -1302,10 +1386,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
// Set the correct hex md5sum for the fan-out stream.
|
||||
fanOutOpts.MD5Hex = hex.EncodeToString(md5w.Sum(nil))
|
||||
|
||||
concurrentSize := 100
|
||||
if runtime.GOMAXPROCS(0) < concurrentSize {
|
||||
concurrentSize = runtime.GOMAXPROCS(0)
|
||||
}
|
||||
concurrentSize := min(runtime.GOMAXPROCS(0), 100)
|
||||
|
||||
fanOutResp := make([]minio.PutObjectFanOutResponse, 0, len(fanOutEntries))
|
||||
eventArgsList := make([]eventArgs, 0, len(fanOutEntries))
|
||||
@ -1328,7 +1409,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
Key: objInfo.Name,
|
||||
Error: errs[i].Error(),
|
||||
})
|
||||
|
||||
eventArgsList = append(eventArgsList, eventArgs{
|
||||
EventName: event.ObjectCreatedPost,
|
||||
BucketName: objInfo.Bucket,
|
||||
@ -1577,9 +1657,11 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
|
||||
return
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.HeadBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
getBucketInfo := objectAPI.GetBucketInfo
|
||||
@ -1682,7 +1764,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
|
||||
globalNotificationSys.DeleteBucketMetadata(ctx, bucket)
|
||||
globalReplicationPool.deleteResyncMetadata(ctx, bucket)
|
||||
globalReplicationPool.Get().deleteResyncMetadata(ctx, bucket)
|
||||
|
||||
// Call site replication hook.
|
||||
replLogIf(ctx, globalSiteReplicationSys.DeleteBucketHook(ctx, bucket, forceDelete))
|
||||
@ -1731,6 +1813,10 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
return
|
||||
}
|
||||
|
||||
// Audit log tags.
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
reqInfo.SetTags("retention", config.String())
|
||||
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
|
||||
@ -188,7 +188,6 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
if errorResponse.Code != testCase.errorResponse.Code {
|
||||
t.Errorf("Test %d: %s: Expected the error code to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Code, errorResponse.Code)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Test for Anonymous/unsigned http request.
|
||||
@ -290,7 +289,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
if recV2.Code != testCase.expectedRespStatus {
|
||||
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Test for Anonymous/unsigned http request.
|
||||
@ -659,7 +657,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
|
||||
sha256sum := ""
|
||||
var objectNames []string
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
contentBytes := []byte("hello")
|
||||
objectName := "test-object-" + strconv.Itoa(i)
|
||||
if i == 0 {
|
||||
@ -689,7 +687,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
|
||||
// The following block will create a bucket policy with delete object to 'public/*'. This is
|
||||
// to test a mixed response of a successful & failure while deleting objects in a single request
|
||||
policyBytes := []byte(fmt.Sprintf(`{"Id": "Policy1637752602639", "Version": "2012-10-17", "Statement": [{"Sid": "Stmt1637752600730", "Action": "s3:DeleteObject", "Effect": "Allow", "Resource": "arn:aws:s3:::%s/public/*", "Principal": "*"}]}`, bucketName))
|
||||
policyBytes := fmt.Appendf(nil, `{"Id": "Policy1637752602639", "Version": "2012-10-17", "Statement": [{"Sid": "Stmt1637752600730", "Action": "s3:DeleteObject", "Effect": "Allow", "Resource": "arn:aws:s3:::%s/public/*", "Principal": "*"}]}`, bucketName)
|
||||
rec := httptest.NewRecorder()
|
||||
req, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", bucketName), int64(len(policyBytes)), bytes.NewReader(policyBytes),
|
||||
credentials.AccessKey, credentials.SecretKey, nil)
|
||||
|
||||
@ -17,12 +17,16 @@
|
||||
|
||||
package cmd
|
||||
|
||||
import "github.com/minio/minio/internal/bucket/lifecycle"
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
)
|
||||
|
||||
//go:generate stringer -type lcEventSrc -trimprefix lcEventSrc_ $GOFILE
|
||||
type lcEventSrc uint8
|
||||
|
||||
//revive:disable:var-naming Underscores is used here to indicate where common prefix ends and the enumeration name begins
|
||||
//nolint:staticcheck,revive // Underscores are used here to indicate where common prefix ends and the enumeration name begins
|
||||
const (
|
||||
lcEventSrc_None lcEventSrc = iota
|
||||
lcEventSrc_Heal
|
||||
@ -43,7 +47,7 @@ type lcAuditEvent struct {
|
||||
source lcEventSrc
|
||||
}
|
||||
|
||||
func (lae lcAuditEvent) Tags() map[string]interface{} {
|
||||
func (lae lcAuditEvent) Tags() map[string]string {
|
||||
event := lae.Event
|
||||
src := lae.source
|
||||
const (
|
||||
@ -55,7 +59,7 @@ func (lae lcAuditEvent) Tags() map[string]interface{} {
|
||||
ilmNewerNoncurrentVersions = "ilm-newer-noncurrent-versions"
|
||||
ilmNoncurrentDays = "ilm-noncurrent-days"
|
||||
)
|
||||
tags := make(map[string]interface{}, 5)
|
||||
tags := make(map[string]string, 5)
|
||||
if src > lcEventSrc_None {
|
||||
tags[ilmSrc] = src.String()
|
||||
}
|
||||
@ -63,7 +67,7 @@ func (lae lcAuditEvent) Tags() map[string]interface{} {
|
||||
tags[ilmRuleID] = event.RuleID
|
||||
|
||||
if !event.Due.IsZero() {
|
||||
tags[ilmDue] = event.Due
|
||||
tags[ilmDue] = event.Due.Format(iso8601Format)
|
||||
}
|
||||
|
||||
// rule with Transition/NoncurrentVersionTransition in effect
|
||||
@ -73,10 +77,10 @@ func (lae lcAuditEvent) Tags() map[string]interface{} {
|
||||
|
||||
// rule with NewernoncurrentVersions in effect
|
||||
if event.NewerNoncurrentVersions > 0 {
|
||||
tags[ilmNewerNoncurrentVersions] = event.NewerNoncurrentVersions
|
||||
tags[ilmNewerNoncurrentVersions] = strconv.Itoa(event.NewerNoncurrentVersions)
|
||||
}
|
||||
if event.NoncurrentDays > 0 {
|
||||
tags[ilmNoncurrentDays] = event.NoncurrentDays
|
||||
tags[ilmNoncurrentDays] = strconv.Itoa(event.NoncurrentDays)
|
||||
}
|
||||
return tags
|
||||
}
|
||||
|
||||
@ -19,7 +19,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
@ -53,7 +52,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// PutBucketLifecycle always needs a Content-Md5
|
||||
if _, ok := r.Header[xhttp.ContentMD5]; !ok {
|
||||
if !validateLengthAndChecksum(r) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL)
|
||||
return
|
||||
}
|
||||
@ -70,7 +69,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
bucketLifecycle, err := lifecycle.ParseLifecycleConfigWithID(io.LimitReader(r.Body, r.ContentLength))
|
||||
bucketLifecycle, err := lifecycle.ParseLifecycleConfigWithID(r.Body)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
|
||||
@ -23,6 +23,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -71,8 +72,12 @@ func NewLifecycleSys() *LifecycleSys {
|
||||
return &LifecycleSys{}
|
||||
}
|
||||
|
||||
func ilmTrace(startTime time.Time, duration time.Duration, oi ObjectInfo, event string) madmin.TraceInfo {
|
||||
func ilmTrace(startTime time.Time, duration time.Duration, oi ObjectInfo, event string, metadata map[string]string, err string) madmin.TraceInfo {
|
||||
sz, _ := oi.GetActualSize()
|
||||
if metadata == nil {
|
||||
metadata = make(map[string]string)
|
||||
}
|
||||
metadata["version-id"] = oi.VersionID
|
||||
return madmin.TraceInfo{
|
||||
TraceType: madmin.TraceILM,
|
||||
Time: startTime,
|
||||
@ -81,18 +86,22 @@ func ilmTrace(startTime time.Time, duration time.Duration, oi ObjectInfo, event
|
||||
Duration: duration,
|
||||
Path: pathJoin(oi.Bucket, oi.Name),
|
||||
Bytes: sz,
|
||||
Error: "",
|
||||
Error: err,
|
||||
Message: getSource(4),
|
||||
Custom: map[string]string{"version-id": oi.VersionID},
|
||||
Custom: metadata,
|
||||
}
|
||||
}
|
||||
|
||||
func (sys *LifecycleSys) trace(oi ObjectInfo) func(event string) {
|
||||
func (sys *LifecycleSys) trace(oi ObjectInfo) func(event string, metadata map[string]string, err error) {
|
||||
startTime := time.Now()
|
||||
return func(event string) {
|
||||
return func(event string, metadata map[string]string, err error) {
|
||||
duration := time.Since(startTime)
|
||||
if globalTrace.NumSubscribers(madmin.TraceILM) > 0 {
|
||||
globalTrace.Publish(ilmTrace(startTime, duration, oi, event))
|
||||
e := ""
|
||||
if err != nil {
|
||||
e = err.Error()
|
||||
}
|
||||
globalTrace.Publish(ilmTrace(startTime, duration, oi, event, metadata, e))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -147,8 +156,8 @@ func (f freeVersionTask) OpHash() uint64 {
|
||||
return xxh3.HashString(f.TransitionedObject.Tier + f.TransitionedObject.Name)
|
||||
}
|
||||
|
||||
func (n newerNoncurrentTask) OpHash() uint64 {
|
||||
return xxh3.HashString(n.bucket + n.versions[0].ObjectV.ObjectName)
|
||||
func (n noncurrentVersionsTask) OpHash() uint64 {
|
||||
return xxh3.HashString(n.bucket + n.versions[0].ObjectName)
|
||||
}
|
||||
|
||||
func (j jentry) OpHash() uint64 {
|
||||
@ -232,14 +241,16 @@ func (es *expiryState) enqueueByDays(oi ObjectInfo, event lifecycle.Event, src l
|
||||
}
|
||||
}
|
||||
|
||||
// enqueueByNewerNoncurrent enqueues object versions expired by
|
||||
// NewerNoncurrentVersions limit for expiry.
|
||||
func (es *expiryState) enqueueByNewerNoncurrent(bucket string, versions []ObjectToDelete, lcEvent lifecycle.Event) {
|
||||
func (es *expiryState) enqueueNoncurrentVersions(bucket string, versions []ObjectToDelete, events []lifecycle.Event) {
|
||||
if len(versions) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
task := newerNoncurrentTask{bucket: bucket, versions: versions, event: lcEvent}
|
||||
task := noncurrentVersionsTask{
|
||||
bucket: bucket,
|
||||
versions: versions,
|
||||
events: events,
|
||||
}
|
||||
wrkr := es.getWorkerCh(task.OpHash())
|
||||
if wrkr == nil {
|
||||
es.stats.missedExpiryTasks.Add(1)
|
||||
@ -339,8 +350,8 @@ func (es *expiryState) Worker(input <-chan expiryOp) {
|
||||
} else {
|
||||
applyExpiryOnNonTransitionedObjects(es.ctx, es.objAPI, v.objInfo, v.event, v.src)
|
||||
}
|
||||
case newerNoncurrentTask:
|
||||
deleteObjectVersions(es.ctx, es.objAPI, v.bucket, v.versions, v.event)
|
||||
case noncurrentVersionsTask:
|
||||
deleteObjectVersions(es.ctx, es.objAPI, v.bucket, v.versions, v.events)
|
||||
case jentry:
|
||||
transitionLogIf(es.ctx, deleteObjectFromRemoteTier(es.ctx, v.ObjName, v.VersionID, v.TierName))
|
||||
case freeVersionTask:
|
||||
@ -348,7 +359,7 @@ func (es *expiryState) Worker(input <-chan expiryOp) {
|
||||
traceFn := globalLifecycleSys.trace(oi)
|
||||
if !oi.TransitionedObject.FreeVersion {
|
||||
// nothing to be done
|
||||
return
|
||||
continue
|
||||
}
|
||||
|
||||
ignoreNotFoundErr := func(err error) error {
|
||||
@ -362,7 +373,8 @@ func (es *expiryState) Worker(input <-chan expiryOp) {
|
||||
err := deleteObjectFromRemoteTier(es.ctx, oi.TransitionedObject.Name, oi.TransitionedObject.VersionID, oi.TransitionedObject.Tier)
|
||||
if ignoreNotFoundErr(err) != nil {
|
||||
transitionLogIf(es.ctx, err)
|
||||
return
|
||||
traceFn(ILMFreeVersionDelete, nil, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove this free version
|
||||
@ -387,12 +399,10 @@ func initBackgroundExpiry(ctx context.Context, objectAPI ObjectLayer) {
|
||||
globalExpiryState = newExpiryState(ctx, objectAPI, globalILMConfig.getExpirationWorkers())
|
||||
}
|
||||
|
||||
// newerNoncurrentTask encapsulates arguments required by worker to expire objects
|
||||
// by NewerNoncurrentVersions
|
||||
type newerNoncurrentTask struct {
|
||||
type noncurrentVersionsTask struct {
|
||||
bucket string
|
||||
versions []ObjectToDelete
|
||||
event lifecycle.Event
|
||||
events []lifecycle.Event
|
||||
}
|
||||
|
||||
type transitionTask struct {
|
||||
@ -707,6 +717,11 @@ type auditTierOp struct {
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
func (op auditTierOp) String() string {
|
||||
// flattening the auditTierOp{} for audit
|
||||
return fmt.Sprintf("tier:%s,respNS:%d,tx:%d,err:%s", op.Tier, op.TimeToResponseNS, op.OutputBytes, op.Error)
|
||||
}
|
||||
|
||||
func auditTierActions(ctx context.Context, tier string, bytes int64) func(err error) {
|
||||
startTime := time.Now()
|
||||
return func(err error) {
|
||||
@ -730,7 +745,7 @@ func auditTierActions(ctx context.Context, tier string, bytes int64) func(err er
|
||||
globalTierMetrics.logFailure(tier)
|
||||
}
|
||||
|
||||
logger.GetReqInfo(ctx).AppendTags("tierStats", op)
|
||||
logger.GetReqInfo(ctx).AppendTags("tierStats", op.String())
|
||||
}
|
||||
}
|
||||
|
||||
@ -945,9 +960,7 @@ func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo O
|
||||
UserDefined: meta,
|
||||
}
|
||||
}
|
||||
for k, v := range objInfo.UserDefined {
|
||||
meta[k] = v
|
||||
}
|
||||
maps.Copy(meta, objInfo.UserDefined)
|
||||
if len(objInfo.UserTags) != 0 {
|
||||
meta[xhttp.AmzObjectTagging] = objInfo.UserTags
|
||||
}
|
||||
@ -998,7 +1011,7 @@ func ongoingRestoreObj() restoreObjStatus {
|
||||
}
|
||||
}
|
||||
|
||||
// completeRestoreObj constructs restoreObjStatus for a completed restore-object with given expiry.
|
||||
// completedRestoreObj constructs restoreObjStatus for a completed restore-object with given expiry.
|
||||
func completedRestoreObj(expiry time.Time) restoreObjStatus {
|
||||
return restoreObjStatus{
|
||||
ongoing: false,
|
||||
@ -1094,17 +1107,20 @@ func isRestoredObjectOnDisk(meta map[string]string) (onDisk bool) {
|
||||
// ToLifecycleOpts returns lifecycle.ObjectOpts value for oi.
|
||||
func (oi ObjectInfo) ToLifecycleOpts() lifecycle.ObjectOpts {
|
||||
return lifecycle.ObjectOpts{
|
||||
Name: oi.Name,
|
||||
UserTags: oi.UserTags,
|
||||
VersionID: oi.VersionID,
|
||||
ModTime: oi.ModTime,
|
||||
Size: oi.Size,
|
||||
IsLatest: oi.IsLatest,
|
||||
NumVersions: oi.NumVersions,
|
||||
DeleteMarker: oi.DeleteMarker,
|
||||
SuccessorModTime: oi.SuccessorModTime,
|
||||
RestoreOngoing: oi.RestoreOngoing,
|
||||
RestoreExpires: oi.RestoreExpires,
|
||||
TransitionStatus: oi.TransitionedObject.Status,
|
||||
Name: oi.Name,
|
||||
UserTags: oi.UserTags,
|
||||
VersionID: oi.VersionID,
|
||||
ModTime: oi.ModTime,
|
||||
Size: oi.Size,
|
||||
IsLatest: oi.IsLatest,
|
||||
NumVersions: oi.NumVersions,
|
||||
DeleteMarker: oi.DeleteMarker,
|
||||
SuccessorModTime: oi.SuccessorModTime,
|
||||
RestoreOngoing: oi.RestoreOngoing,
|
||||
RestoreExpires: oi.RestoreExpires,
|
||||
TransitionStatus: oi.TransitionedObject.Status,
|
||||
UserDefined: oi.UserDefined,
|
||||
VersionPurgeStatus: oi.VersionPurgeStatus,
|
||||
ReplicationStatus: oi.ReplicationStatus,
|
||||
}
|
||||
}
|
||||
|
||||
@ -243,26 +243,26 @@ func parseRequestToken(token string) (subToken string, nodeIndex int) {
|
||||
return subToken, nodeIndex
|
||||
}
|
||||
|
||||
func proxyRequestByToken(ctx context.Context, w http.ResponseWriter, r *http.Request, token string) (string, bool) {
|
||||
subToken, nodeIndex := parseRequestToken(token)
|
||||
if nodeIndex > 0 {
|
||||
return subToken, proxyRequestByNodeIndex(ctx, w, r, nodeIndex)
|
||||
func proxyRequestByToken(ctx context.Context, w http.ResponseWriter, r *http.Request, token string, returnErr bool) (subToken string, proxied bool, success bool) {
|
||||
var nodeIndex int
|
||||
if subToken, nodeIndex = parseRequestToken(token); nodeIndex >= 0 {
|
||||
proxied, success = proxyRequestByNodeIndex(ctx, w, r, nodeIndex, returnErr)
|
||||
}
|
||||
return subToken, false
|
||||
return subToken, proxied, success
|
||||
}
|
||||
|
||||
func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http.Request, index int) (success bool) {
|
||||
func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http.Request, index int, returnErr bool) (proxied, success bool) {
|
||||
if len(globalProxyEndpoints) == 0 {
|
||||
return false
|
||||
return proxied, success
|
||||
}
|
||||
if index < 0 || index >= len(globalProxyEndpoints) {
|
||||
return false
|
||||
return proxied, success
|
||||
}
|
||||
ep := globalProxyEndpoints[index]
|
||||
if ep.IsLocal {
|
||||
return false
|
||||
return proxied, success
|
||||
}
|
||||
return proxyRequest(ctx, w, r, ep)
|
||||
return true, proxyRequest(ctx, w, r, ep, returnErr)
|
||||
}
|
||||
|
||||
// ListObjectsV1Handler - GET Bucket (List Objects) Version 1.
|
||||
|
||||
@ -39,6 +39,7 @@ import (
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/minio/pkg/v3/sync/errgroup"
|
||||
"golang.org/x/sync/singleflight"
|
||||
)
|
||||
|
||||
// BucketMetadataSys captures all bucket metadata for a given cluster.
|
||||
@ -47,6 +48,7 @@ type BucketMetadataSys struct {
|
||||
|
||||
sync.RWMutex
|
||||
initialized bool
|
||||
group *singleflight.Group
|
||||
metadataMap map[string]BucketMetadata
|
||||
}
|
||||
|
||||
@ -62,6 +64,7 @@ func (sys *BucketMetadataSys) Count() int {
|
||||
func (sys *BucketMetadataSys) Remove(buckets ...string) {
|
||||
sys.Lock()
|
||||
for _, bucket := range buckets {
|
||||
sys.group.Forget(bucket)
|
||||
delete(sys.metadataMap, bucket)
|
||||
globalBucketMonitor.DeleteBucket(bucket)
|
||||
}
|
||||
@ -121,6 +124,7 @@ func (sys *BucketMetadataSys) updateAndParse(ctx context.Context, bucket string,
|
||||
meta.PolicyConfigUpdatedAt = updatedAt
|
||||
case bucketNotificationConfig:
|
||||
meta.NotificationConfigXML = configData
|
||||
meta.NotificationConfigUpdatedAt = updatedAt
|
||||
case bucketLifecycleConfig:
|
||||
meta.LifecycleConfigXML = configData
|
||||
meta.LifecycleConfigUpdatedAt = updatedAt
|
||||
@ -150,12 +154,13 @@ func (sys *BucketMetadataSys) updateAndParse(ctx context.Context, bucket string,
|
||||
if err != nil {
|
||||
return updatedAt, fmt.Errorf("Error encrypting bucket target metadata %w", err)
|
||||
}
|
||||
meta.BucketTargetsConfigUpdatedAt = updatedAt
|
||||
meta.BucketTargetsConfigMetaUpdatedAt = updatedAt
|
||||
default:
|
||||
return updatedAt, fmt.Errorf("Unknown bucket %s metadata update requested %s", bucket, configFile)
|
||||
}
|
||||
|
||||
err = sys.save(ctx, meta)
|
||||
return updatedAt, err
|
||||
return updatedAt, sys.save(ctx, meta)
|
||||
}
|
||||
|
||||
func (sys *BucketMetadataSys) save(ctx context.Context, meta BucketMetadata) error {
|
||||
@ -262,6 +267,21 @@ func (sys *BucketMetadataSys) GetVersioningConfig(bucket string) (*versioning.Ve
|
||||
return meta.versioningConfig, meta.VersioningConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetBucketPolicy returns configured bucket policy
|
||||
func (sys *BucketMetadataSys) GetBucketPolicy(bucket string) (*policy.BucketPolicy, time.Time, error) {
|
||||
meta, _, err := sys.GetConfig(GlobalContext, bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
return nil, time.Time{}, BucketPolicyNotFound{Bucket: bucket}
|
||||
}
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
if meta.policyConfig == nil {
|
||||
return nil, time.Time{}, BucketPolicyNotFound{Bucket: bucket}
|
||||
}
|
||||
return meta.policyConfig, meta.PolicyConfigUpdatedAt, nil
|
||||
}
|
||||
|
||||
// GetTaggingConfig returns configured tagging config
|
||||
// The returned object may not be modified.
|
||||
func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, time.Time, error) {
|
||||
@ -451,13 +471,20 @@ func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (met
|
||||
if ok {
|
||||
return meta, reloaded, nil
|
||||
}
|
||||
meta, err = loadBucketMetadata(ctx, objAPI, bucket)
|
||||
if err != nil {
|
||||
if !sys.Initialized() {
|
||||
// bucket metadata not yet initialized
|
||||
return newBucketMetadata(bucket), reloaded, errBucketMetadataNotInitialized
|
||||
|
||||
val, err, _ := sys.group.Do(bucket, func() (val any, err error) {
|
||||
meta, err = loadBucketMetadata(ctx, objAPI, bucket)
|
||||
if err != nil {
|
||||
if !sys.Initialized() {
|
||||
// bucket metadata not yet initialized
|
||||
return newBucketMetadata(bucket), errBucketMetadataNotInitialized
|
||||
}
|
||||
}
|
||||
return meta, reloaded, err
|
||||
return meta, err
|
||||
})
|
||||
meta, _ = val.(BucketMetadata)
|
||||
if err != nil {
|
||||
return meta, false, err
|
||||
}
|
||||
sys.Lock()
|
||||
sys.metadataMap[bucket] = meta
|
||||
@ -480,11 +507,10 @@ func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []string, objAPI
|
||||
}
|
||||
|
||||
// concurrently load bucket metadata to speed up loading bucket metadata.
|
||||
func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []string, failedBuckets map[string]struct{}) {
|
||||
func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []string) {
|
||||
g := errgroup.WithNErrs(len(buckets))
|
||||
bucketMetas := make([]BucketMetadata, len(buckets))
|
||||
for index := range buckets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
// Sleep and stagger to avoid blocked CPU and thundering
|
||||
// herd upon start up sequence.
|
||||
@ -521,10 +547,6 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []stri
|
||||
|
||||
for i, meta := range bucketMetas {
|
||||
if errs[i] != nil {
|
||||
if failedBuckets == nil {
|
||||
failedBuckets = make(map[string]struct{})
|
||||
}
|
||||
failedBuckets[buckets[i]] = struct{}{}
|
||||
continue
|
||||
}
|
||||
globalEventNotifier.set(buckets[i], meta) // set notification targets
|
||||
@ -532,7 +554,7 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []stri
|
||||
}
|
||||
}
|
||||
|
||||
func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, failedBuckets map[string]struct{}) {
|
||||
func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context) {
|
||||
const bucketMetadataRefresh = 15 * time.Minute
|
||||
|
||||
sleeper := newDynamicSleeper(2, 150*time.Millisecond, false)
|
||||
@ -562,7 +584,10 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa
|
||||
for i := range buckets {
|
||||
wait := sleeper.Timer(ctx)
|
||||
|
||||
meta, err := loadBucketMetadata(ctx, sys.objAPI, buckets[i].Name)
|
||||
bucket := buckets[i].Name
|
||||
updated := false
|
||||
|
||||
meta, err := loadBucketMetadata(ctx, sys.objAPI, bucket)
|
||||
if err != nil {
|
||||
internalLogIf(ctx, err, logger.WarningKind)
|
||||
wait() // wait to proceed to next entry.
|
||||
@ -570,14 +595,16 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
sys.metadataMap[buckets[i].Name] = meta
|
||||
// Update if the bucket metadata in the memory is older than on-disk one
|
||||
if lu := sys.metadataMap[bucket].lastUpdate(); lu.Before(meta.lastUpdate()) {
|
||||
updated = true
|
||||
sys.metadataMap[bucket] = meta
|
||||
}
|
||||
sys.Unlock()
|
||||
|
||||
// Initialize the failed buckets
|
||||
if _, ok := failedBuckets[buckets[i].Name]; ok {
|
||||
globalEventNotifier.set(buckets[i].Name, meta)
|
||||
globalBucketTargetSys.set(buckets[i].Name, meta)
|
||||
delete(failedBuckets, buckets[i].Name)
|
||||
if updated {
|
||||
globalEventNotifier.set(bucket, meta)
|
||||
globalBucketTargetSys.set(bucket, meta)
|
||||
}
|
||||
|
||||
wait() // wait to proceed to next entry.
|
||||
@ -598,13 +625,12 @@ func (sys *BucketMetadataSys) Initialized() bool {
|
||||
// Loads bucket metadata for all buckets into BucketMetadataSys.
|
||||
func (sys *BucketMetadataSys) init(ctx context.Context, buckets []string) {
|
||||
count := globalEndpoints.ESCount() * 10
|
||||
failedBuckets := make(map[string]struct{})
|
||||
for {
|
||||
if len(buckets) < count {
|
||||
sys.concurrentLoad(ctx, buckets, failedBuckets)
|
||||
sys.concurrentLoad(ctx, buckets)
|
||||
break
|
||||
}
|
||||
sys.concurrentLoad(ctx, buckets[:count], failedBuckets)
|
||||
sys.concurrentLoad(ctx, buckets[:count])
|
||||
buckets = buckets[count:]
|
||||
}
|
||||
|
||||
@ -613,16 +639,14 @@ func (sys *BucketMetadataSys) init(ctx context.Context, buckets []string) {
|
||||
sys.Unlock()
|
||||
|
||||
if globalIsDistErasure {
|
||||
go sys.refreshBucketsMetadataLoop(ctx, failedBuckets)
|
||||
go sys.refreshBucketsMetadataLoop(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the state of the BucketMetadataSys.
|
||||
func (sys *BucketMetadataSys) Reset() {
|
||||
sys.Lock()
|
||||
for k := range sys.metadataMap {
|
||||
delete(sys.metadataMap, k)
|
||||
}
|
||||
clear(sys.metadataMap)
|
||||
sys.Unlock()
|
||||
}
|
||||
|
||||
@ -630,5 +654,6 @@ func (sys *BucketMetadataSys) Reset() {
|
||||
func NewBucketMetadataSys() *BucketMetadataSys {
|
||||
return &BucketMetadataSys{
|
||||
metadataMap: make(map[string]BucketMetadata),
|
||||
group: &singleflight.Group{},
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user