Compare commits

...

7 Commits

Author SHA1 Message Date
dorman
3a0cc6c86e
fix doc 404 (#21670) 2025-10-26 19:47:37 -07:00
yangw
10b0a234d2
fix: update metric descriptions to specify current MinIO server instance (#21638)
Signed-off-by: yangw <wuyangmuc@gmail.com>
2025-10-23 21:06:31 -07:00
Raul-Mircea Crivineanu
18f97e70b1
Updates for conditional put read quorum issue (#21653) 2025-10-23 21:05:31 -07:00
Menno Finlay-Smits
52eee5a2f1
fix(api): Don't send multiple responses for one request (#21651)
fix(api): Don't send responses twice.

In some cases multiple responses are being sent for one request, causing
the API server to incorrectly drop connections.

This change introduces a ResponseWriter which tracks whether a
response has already been sent. This is used to prevent a response being
sent if something already has (e.g. by a preconditions check function).

Fixes #21633.

Co-authored-by: Menno Finlay-Smits <hello@menno.io>
2025-10-23 21:05:19 -07:00
Rishabh Agrahari
c6d3aac5c4
Fix typo in entrypoint script path in README (#21657) 2025-10-23 08:10:39 -07:00
M Alvee
fa18589d1c
fix: Tagging in PostPolicy upload does not enforce policy tags (#21656) 2025-10-23 08:10:12 -07:00
Harshavardhana
05e569960a update scripts pointing to internal registry for community releases 2025-10-19 01:22:05 -07:00
12 changed files with 592 additions and 38 deletions

View File

@ -1,8 +1,14 @@
FROM minio/minio:latest FROM minio/minio:latest
ARG TARGETARCH
ARG RELEASE
RUN chmod -R 777 /usr/bin RUN chmod -R 777 /usr/bin
COPY ./minio /usr/bin/minio COPY ./minio-${TARGETARCH}.${RELEASE} /usr/bin/minio
COPY ./minio-${TARGETARCH}.${RELEASE}.minisig /usr/bin/minio.minisig
COPY ./minio-${TARGETARCH}.${RELEASE}.sha256sum /usr/bin/minio.sha256sum
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"] ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]

View File

@ -77,7 +77,7 @@ mc admin info local
``` ```
See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool.
For application developers, see <https://docs.min.io/community/minio-object-store/developers/minio-drivers.html> to view MinIO SDKs for supported languages. For application developers, see <https://docs.min.io/enterprise/aistor-object-store/developers/sdk/> to view MinIO SDKs for supported languages.
> [!NOTE] > [!NOTE]
> Production environments using compiled-from-source MinIO binaries do so at their own risk. > Production environments using compiled-from-source MinIO binaries do so at their own risk.
@ -102,7 +102,7 @@ docker run -p 9000:9000 -p 9001:9001 myminio:minio server /tmp/minio --console-a
``` ```
Complete documentation for building Docker containers, managing custom images, or loading images into orchestration platforms is out of scope for this documentation. Complete documentation for building Docker containers, managing custom images, or loading images into orchestration platforms is out of scope for this documentation.
You can modify the `Dockerfile` and `dockerscripts/socker-entrypoint.sh` as-needed to reflect your specific image requirements. You can modify the `Dockerfile` and `dockerscripts/docker-entrypoint.sh` as-needed to reflect your specific image requirements.
See the [MinIO Container](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-as-a-container.html#deploy-minio-container) documentation for more guidance on running MinIO within a Container image. See the [MinIO Container](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-as-a-container.html#deploy-minio-container) documentation for more guidance on running MinIO within a Container image.
@ -147,7 +147,7 @@ Follow the MinIO Client [Quickstart Guide](https://docs.min.io/community/minio-o
- [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html) - [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html)
- [MinIO Erasure Code Overview](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html) - [MinIO Erasure Code Overview](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html)
- [Use `mc` with MinIO Server](https://docs.min.io/community/minio-object-store/reference/minio-mc.html) - [Use `mc` with MinIO Server](https://docs.min.io/community/minio-object-store/reference/minio-mc.html)
- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/community/minio-object-store/developers/go/minio-go.html) - [Use `minio-go` SDK with MinIO Server](https://docs.min.io/enterprise/aistor-object-store/developers/sdk/go/)
## Contribute to MinIO Project ## Contribute to MinIO Project

View File

@ -889,6 +889,12 @@ func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, err
} }
func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) { func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
// Don't write a response if one has already been written.
// Fixes https://github.com/minio/minio/issues/21633
if headersAlreadyWritten(w) {
return
}
if statusCode == 0 { if statusCode == 0 {
statusCode = 200 statusCode = 200
} }
@ -1015,3 +1021,45 @@ func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, er
encodedErrorResponse := encodeResponseJSON(errorResponse) encodedErrorResponse := encodeResponseJSON(errorResponse)
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON) writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
} }
type unwrapper interface {
Unwrap() http.ResponseWriter
}
// headersAlreadyWritten returns true if the headers have already been written
// to this response writer. It will unwrap the ResponseWriter if possible to try
// and find a trackingResponseWriter.
func headersAlreadyWritten(w http.ResponseWriter) bool {
for {
if trw, ok := w.(*trackingResponseWriter); ok {
return trw.headerWritten
} else if uw, ok := w.(unwrapper); ok {
w = uw.Unwrap()
} else {
return false
}
}
}
// trackingResponseWriter wraps a ResponseWriter and notes when WriterHeader has
// been called. This allows high level request handlers to check if something
// has already sent the header.
type trackingResponseWriter struct {
http.ResponseWriter
headerWritten bool
}
func (w *trackingResponseWriter) WriteHeader(statusCode int) {
if !w.headerWritten {
w.headerWritten = true
w.ResponseWriter.WriteHeader(statusCode)
}
}
func (w *trackingResponseWriter) Write(b []byte) (int, error) {
return w.ResponseWriter.Write(b)
}
func (w *trackingResponseWriter) Unwrap() http.ResponseWriter {
return w.ResponseWriter
}

View File

@ -18,8 +18,12 @@
package cmd package cmd
import ( import (
"io"
"net/http" "net/http"
"net/http/httptest"
"testing" "testing"
"github.com/klauspost/compress/gzhttp"
) )
// Tests object location. // Tests object location.
@ -122,3 +126,89 @@ func TestGetURLScheme(t *testing.T) {
t.Errorf("Expected %s, got %s", httpsScheme, gotScheme) t.Errorf("Expected %s, got %s", httpsScheme, gotScheme)
} }
} }
func TestTrackingResponseWriter(t *testing.T) {
rw := httptest.NewRecorder()
trw := &trackingResponseWriter{ResponseWriter: rw}
trw.WriteHeader(123)
if !trw.headerWritten {
t.Fatal("headerWritten was not set by WriteHeader call")
}
_, err := trw.Write([]byte("hello"))
if err != nil {
t.Fatalf("Write unexpectedly failed: %v", err)
}
// Check that WriteHeader and Write were called on the underlying response writer
resp := rw.Result()
if resp.StatusCode != 123 {
t.Fatalf("unexpected status: %v", resp.StatusCode)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatalf("reading response body failed: %v", err)
}
if string(body) != "hello" {
t.Fatalf("response body incorrect: %v", string(body))
}
// Check that Unwrap works
if trw.Unwrap() != rw {
t.Fatalf("Unwrap returned wrong result: %v", trw.Unwrap())
}
}
func TestHeadersAlreadyWritten(t *testing.T) {
rw := httptest.NewRecorder()
trw := &trackingResponseWriter{ResponseWriter: rw}
if headersAlreadyWritten(trw) {
t.Fatal("headers have not been written yet")
}
trw.WriteHeader(123)
if !headersAlreadyWritten(trw) {
t.Fatal("headers were written")
}
}
func TestHeadersAlreadyWrittenWrapped(t *testing.T) {
rw := httptest.NewRecorder()
trw := &trackingResponseWriter{ResponseWriter: rw}
wrap1 := &gzhttp.NoGzipResponseWriter{ResponseWriter: trw}
wrap2 := &gzhttp.NoGzipResponseWriter{ResponseWriter: wrap1}
if headersAlreadyWritten(wrap2) {
t.Fatal("headers have not been written yet")
}
wrap2.WriteHeader(123)
if !headersAlreadyWritten(wrap2) {
t.Fatal("headers were written")
}
}
func TestWriteResponseHeadersNotWritten(t *testing.T) {
rw := httptest.NewRecorder()
trw := &trackingResponseWriter{ResponseWriter: rw}
writeResponse(trw, 299, []byte("hello"), "application/foo")
resp := rw.Result()
if resp.StatusCode != 299 {
t.Fatal("response wasn't written")
}
}
func TestWriteResponseHeadersWritten(t *testing.T) {
rw := httptest.NewRecorder()
rw.Code = -1
trw := &trackingResponseWriter{ResponseWriter: rw, headerWritten: true}
writeResponse(trw, 200, []byte("hello"), "application/foo")
if rw.Code != -1 {
t.Fatalf("response was written when it shouldn't have been (Code=%v)", rw.Code)
}
}

View File

@ -218,6 +218,8 @@ func s3APIMiddleware(f http.HandlerFunc, flags ...s3HFlag) http.HandlerFunc {
handlerName := getHandlerName(f, "objectAPIHandlers") handlerName := getHandlerName(f, "objectAPIHandlers")
var handler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) { var handler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
w = &trackingResponseWriter{ResponseWriter: w}
// Wrap the actual handler with the appropriate tracing middleware. // Wrap the actual handler with the appropriate tracing middleware.
var tracedHandler http.HandlerFunc var tracedHandler http.HandlerFunc
if handlerFlags.has(traceHdrsS3HFlag) { if handlerFlags.has(traceHdrsS3HFlag) {

View File

@ -0,0 +1,225 @@
// Copyright (c) 2015-2025 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"testing"
"github.com/dustin/go-humanize"
xhttp "github.com/minio/minio/internal/http"
)
// TestNewMultipartUploadConditionalWithReadQuorumFailure tests that conditional
// multipart uploads (with if-match/if-none-match) behave correctly when read quorum
// cannot be reached.
//
// Related to: https://github.com/minio/minio/issues/21603
//
// Should return an error when read quorum cannot
// be reached, as we cannot reliably determine if the precondition is met.
func TestNewMultipartUploadConditionalWithReadQuorumFailure(t *testing.T) {
ctx := context.Background()
obj, fsDirs, err := prepareErasure16(ctx)
if err != nil {
t.Fatal(err)
}
defer obj.Shutdown(context.Background())
defer removeRoots(fsDirs)
z := obj.(*erasureServerPools)
xl := z.serverPools[0].sets[0]
bucket := "test-bucket"
object := "test-object"
err = obj.MakeBucket(ctx, bucket, MakeBucketOptions{})
if err != nil {
t.Fatal(err)
}
// Put an initial object so it exists
_, err = obj.PutObject(ctx, bucket, object,
mustGetPutObjReader(t, bytes.NewReader([]byte("initial-value")),
int64(len("initial-value")), "", ""), ObjectOptions{})
if err != nil {
t.Fatal(err)
}
// Get object info to capture the ETag
objInfo, err := obj.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
if err != nil {
t.Fatal(err)
}
existingETag := objInfo.ETag
// Simulate read quorum failure by taking enough disks offline
// With 16 disks (EC 8+8), read quorum is 9. Taking 8 disks offline leaves only 8,
// which is below read quorum.
erasureDisks := xl.getDisks()
z.serverPools[0].erasureDisksMu.Lock()
xl.getDisks = func() []StorageAPI {
for i := range erasureDisks[:8] {
erasureDisks[i] = nil
}
return erasureDisks
}
z.serverPools[0].erasureDisksMu.Unlock()
t.Run("if-none-match with read quorum failure", func(t *testing.T) {
// Test Case 1: if-none-match (create only if doesn't exist)
// With if-none-match: *, this should only succeed if object doesn't exist.
// Since read quorum fails, we can't determine if object exists.
opts := ObjectOptions{
UserDefined: map[string]string{
xhttp.IfNoneMatch: "*",
},
CheckPrecondFn: func(oi ObjectInfo) bool {
// Precondition fails if object exists (ETag is not empty)
return oi.ETag != ""
},
}
_, err := obj.NewMultipartUpload(ctx, bucket, object, opts)
if !isErrReadQuorum(err) {
t.Errorf("Expected read quorum error when if-none-match is used with quorum failure, got: %v", err)
}
})
t.Run("if-match with wrong ETag and read quorum failure", func(t *testing.T) {
// Test Case 2: if-match with WRONG ETag
// This should fail even without quorum issues, but with quorum failure
// we can't verify the ETag at all.
opts := ObjectOptions{
UserDefined: map[string]string{
xhttp.IfMatch: "wrong-etag-12345",
},
HasIfMatch: true,
CheckPrecondFn: func(oi ObjectInfo) bool {
// Precondition fails if ETags don't match
return oi.ETag != "wrong-etag-12345"
},
}
_, err := obj.NewMultipartUpload(ctx, bucket, object, opts)
if !isErrReadQuorum(err) {
t.Logf("Got error (as expected): %v", err)
t.Logf("But expected read quorum error, not object-not-found error")
}
})
t.Run("if-match with correct ETag and read quorum failure", func(t *testing.T) {
// Test Case 3: if-match with CORRECT ETag but read quorum failure
// Even with the correct ETag, we shouldn't proceed if we can't verify it.
opts := ObjectOptions{
UserDefined: map[string]string{
xhttp.IfMatch: existingETag,
},
HasIfMatch: true,
CheckPrecondFn: func(oi ObjectInfo) bool {
// Precondition fails if ETags don't match
return oi.ETag != existingETag
},
}
_, err := obj.NewMultipartUpload(ctx, bucket, object, opts)
if !isErrReadQuorum(err) {
t.Errorf("Expected read quorum error when if-match is used with quorum failure, got: %v", err)
}
})
}
// TestCompleteMultipartUploadConditionalWithReadQuorumFailure tests that conditional
// complete multipart upload operations behave correctly when read quorum cannot be reached.
func TestCompleteMultipartUploadConditionalWithReadQuorumFailure(t *testing.T) {
ctx := context.Background()
obj, fsDirs, err := prepareErasure16(ctx)
if err != nil {
t.Fatal(err)
}
defer obj.Shutdown(context.Background())
defer removeRoots(fsDirs)
z := obj.(*erasureServerPools)
xl := z.serverPools[0].sets[0]
bucket := "test-bucket"
object := "test-object"
err = obj.MakeBucket(ctx, bucket, MakeBucketOptions{})
if err != nil {
t.Fatal(err)
}
// Put an initial object
_, err = obj.PutObject(ctx, bucket, object,
mustGetPutObjReader(t, bytes.NewReader([]byte("initial-value")),
int64(len("initial-value")), "", ""), ObjectOptions{})
if err != nil {
t.Fatal(err)
}
// Start a multipart upload WITHOUT conditional checks (this should work)
res, err := obj.NewMultipartUpload(ctx, bucket, object, ObjectOptions{})
if err != nil {
t.Fatal(err)
}
// Upload a part
partData := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
md5Hex := getMD5Hash(partData)
_, err = obj.PutObjectPart(ctx, bucket, object, res.UploadID, 1,
mustGetPutObjReader(t, bytes.NewReader(partData), int64(len(partData)), md5Hex, ""),
ObjectOptions{})
if err != nil {
t.Fatal(err)
}
// Now simulate read quorum failure
erasureDisks := xl.getDisks()
z.serverPools[0].erasureDisksMu.Lock()
xl.getDisks = func() []StorageAPI {
for i := range erasureDisks[:8] {
erasureDisks[i] = nil
}
return erasureDisks
}
z.serverPools[0].erasureDisksMu.Unlock()
t.Run("complete multipart with if-none-match and read quorum failure", func(t *testing.T) {
// Try to complete the multipart upload with if-none-match
// This should fail because we can't verify the condition due to read quorum failure
opts := ObjectOptions{
UserDefined: map[string]string{
xhttp.IfNoneMatch: "*",
},
CheckPrecondFn: func(oi ObjectInfo) bool {
return oi.ETag != ""
},
}
parts := []CompletePart{{PartNumber: 1, ETag: md5Hex}}
_, err := obj.CompleteMultipartUpload(ctx, bucket, object, res.UploadID, parts, opts)
if !isErrReadQuorum(err) {
t.Errorf("Expected read quorum error, got: %v", err)
}
})
}

View File

@ -390,7 +390,7 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
if err == nil && opts.CheckPrecondFn(obj) { if err == nil && opts.CheckPrecondFn(obj) {
return nil, PreConditionFailed{} return nil, PreConditionFailed{}
} }
if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) && !isErrReadQuorum(err) { if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) {
return nil, err return nil, err
} }
@ -1114,7 +1114,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
if err == nil && opts.CheckPrecondFn(obj) { if err == nil && opts.CheckPrecondFn(obj) {
return ObjectInfo{}, PreConditionFailed{} return ObjectInfo{}, PreConditionFailed{}
} }
if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) && !isErrReadQuorum(err) { if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) {
return ObjectInfo{}, err return ObjectInfo{}, err
} }

View File

@ -0,0 +1,150 @@
// Copyright (c) 2015-2025 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"context"
"testing"
xhttp "github.com/minio/minio/internal/http"
)
// TestPutObjectConditionalWithReadQuorumFailure tests that conditional
// PutObject operations (with if-match/if-none-match) behave correctly when read quorum
// cannot be reached.
//
// Related to: https://github.com/minio/minio/issues/21603
//
// Should return an error when read quorum cannot
// be reached, as we cannot reliably determine if the precondition is met.
func TestPutObjectConditionalWithReadQuorumFailure(t *testing.T) {
ctx := context.Background()
obj, fsDirs, err := prepareErasure16(ctx)
if err != nil {
t.Fatal(err)
}
defer obj.Shutdown(context.Background())
defer removeRoots(fsDirs)
z := obj.(*erasureServerPools)
xl := z.serverPools[0].sets[0]
bucket := "test-bucket"
object := "test-object"
err = obj.MakeBucket(ctx, bucket, MakeBucketOptions{})
if err != nil {
t.Fatal(err)
}
// Put an initial object so it exists
_, err = obj.PutObject(ctx, bucket, object,
mustGetPutObjReader(t, bytes.NewReader([]byte("initial-value")),
int64(len("initial-value")), "", ""), ObjectOptions{})
if err != nil {
t.Fatal(err)
}
// Get object info to capture the ETag
objInfo, err := obj.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
if err != nil {
t.Fatal(err)
}
existingETag := objInfo.ETag
// Simulate read quorum failure by taking enough disks offline
// With 16 disks (EC 8+8), read quorum is 9. Taking 8 disks offline leaves only 8,
// which is below read quorum.
erasureDisks := xl.getDisks()
z.serverPools[0].erasureDisksMu.Lock()
xl.getDisks = func() []StorageAPI {
for i := range erasureDisks[:8] {
erasureDisks[i] = nil
}
return erasureDisks
}
z.serverPools[0].erasureDisksMu.Unlock()
t.Run("if-none-match with read quorum failure", func(t *testing.T) {
// Test Case 1: if-none-match (create only if doesn't exist)
// With if-none-match: *, this should only succeed if object doesn't exist.
// Since read quorum fails, we can't determine if object exists.
opts := ObjectOptions{
UserDefined: map[string]string{
xhttp.IfNoneMatch: "*",
},
CheckPrecondFn: func(oi ObjectInfo) bool {
// Precondition fails if object exists (ETag is not empty)
return oi.ETag != ""
},
}
_, err := obj.PutObject(ctx, bucket, object,
mustGetPutObjReader(t, bytes.NewReader([]byte("new-value")),
int64(len("new-value")), "", ""), opts)
if !isErrReadQuorum(err) {
t.Errorf("Expected read quorum error when if-none-match is used with quorum failure, got: %v", err)
}
})
t.Run("if-match with read quorum failure", func(t *testing.T) {
// Test Case 2: if-match (update only if ETag matches)
// With if-match: <etag>, this should only succeed if object exists with matching ETag.
// Since read quorum fails, we can't determine if object exists or ETag matches.
opts := ObjectOptions{
UserDefined: map[string]string{
xhttp.IfMatch: existingETag,
},
CheckPrecondFn: func(oi ObjectInfo) bool {
// Precondition fails if ETag doesn't match
return oi.ETag != existingETag
},
}
_, err := obj.PutObject(ctx, bucket, object,
mustGetPutObjReader(t, bytes.NewReader([]byte("updated-value")),
int64(len("updated-value")), "", ""), opts)
if !isErrReadQuorum(err) {
t.Errorf("Expected read quorum error when if-match is used with quorum failure, got: %v", err)
}
})
t.Run("if-match wrong etag with read quorum failure", func(t *testing.T) {
// Test Case 3: if-match with wrong ETag
// Even if the ETag doesn't match, we should still get read quorum error
// because we can't read the object to check the condition.
opts := ObjectOptions{
UserDefined: map[string]string{
xhttp.IfMatch: "wrong-etag",
},
CheckPrecondFn: func(oi ObjectInfo) bool {
// Precondition fails if ETag doesn't match
return oi.ETag != "wrong-etag"
},
}
_, err := obj.PutObject(ctx, bucket, object,
mustGetPutObjReader(t, bytes.NewReader([]byte("should-fail")),
int64(len("should-fail")), "", ""), opts)
if !isErrReadQuorum(err) {
t.Errorf("Expected read quorum error when if-match is used with quorum failure (even with wrong ETag), got: %v", err)
}
})
}

View File

@ -1274,7 +1274,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
if err == nil && opts.CheckPrecondFn(obj) { if err == nil && opts.CheckPrecondFn(obj) {
return objInfo, PreConditionFailed{} return objInfo, PreConditionFailed{}
} }
if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) && !isErrReadQuorum(err) { if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) {
return objInfo, err return objInfo, err
} }

View File

@ -386,7 +386,7 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc( prometheus.NewDesc(
prometheus.BuildFQName(minioNamespace, "capacity_raw", "total"), prometheus.BuildFQName(minioNamespace, "capacity_raw", "total"),
"Total capacity online in the cluster", "Total capacity online in current MinIO server instance",
nil, nil), nil, nil),
prometheus.GaugeValue, prometheus.GaugeValue,
float64(GetTotalCapacity(server.Disks)), float64(GetTotalCapacity(server.Disks)),
@ -396,7 +396,7 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc( prometheus.NewDesc(
prometheus.BuildFQName(minioNamespace, "capacity_raw_free", "total"), prometheus.BuildFQName(minioNamespace, "capacity_raw_free", "total"),
"Total free capacity online in the cluster", "Total free capacity online in current MinIO server instance",
nil, nil), nil, nil),
prometheus.GaugeValue, prometheus.GaugeValue,
float64(GetTotalCapacityFree(server.Disks)), float64(GetTotalCapacityFree(server.Disks)),
@ -408,7 +408,7 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc( prometheus.NewDesc(
prometheus.BuildFQName(minioNamespace, "capacity_usable", "total"), prometheus.BuildFQName(minioNamespace, "capacity_usable", "total"),
"Total usable capacity online in the cluster", "Total usable capacity online in current MinIO server instance",
nil, nil), nil, nil),
prometheus.GaugeValue, prometheus.GaugeValue,
float64(GetTotalUsableCapacity(server.Disks, sinfo)), float64(GetTotalUsableCapacity(server.Disks, sinfo)),
@ -418,7 +418,7 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc( prometheus.NewDesc(
prometheus.BuildFQName(minioNamespace, "capacity_usable_free", "total"), prometheus.BuildFQName(minioNamespace, "capacity_usable_free", "total"),
"Total free usable capacity online in the cluster", "Total free usable capacity online in current MinIO server instance",
nil, nil), nil, nil),
prometheus.GaugeValue, prometheus.GaugeValue,
float64(GetTotalUsableCapacityFree(server.Disks, sinfo)), float64(GetTotalUsableCapacityFree(server.Disks, sinfo)),

View File

@ -51,6 +51,7 @@ var startsWithConds = map[string]bool{
"$x-amz-algorithm": false, "$x-amz-algorithm": false,
"$x-amz-credential": false, "$x-amz-credential": false,
"$x-amz-date": false, "$x-amz-date": false,
"$tagging": false,
} }
// Add policy conditionals. // Add policy conditionals.

View File

@ -1,6 +1,14 @@
#!/bin/bash #!/bin/bash
sudo sysctl net.ipv6.conf.all.disable_ipv6=0 set -ex
function _init() {
## All binaries are static make sure to disable CGO.
export CGO_ENABLED=0
export CRED_DIR="/media/${USER}/minio"
## List of architectures and OS to test coss compilation.
SUPPORTED_OSARCH="linux/ppc64le linux/amd64 linux/arm64"
remote=$(git remote get-url upstream) remote=$(git remote get-url upstream)
if test "$remote" != "git@github.com:minio/minio.git"; then if test "$remote" != "git@github.com:minio/minio.git"; then
@ -11,27 +19,51 @@ fi
git remote update upstream && git checkout master && git rebase upstream/master git remote update upstream && git checkout master && git rebase upstream/master
release=$(git describe --abbrev=0 --tags) release=$(git describe --abbrev=0 --tags)
export release
}
function _build() {
local osarch=$1
IFS=/ read -r -a arr <<<"$osarch"
os="${arr[0]}"
arch="${arr[1]}"
package=$(go list -f '{{.ImportPath}}')
printf -- "--> %15s:%s\n" "${osarch}" "${package}"
# go build -trimpath to build the binary.
export GOOS=$os
export GOARCH=$arch
export MINIO_RELEASE=RELEASE
LDFLAGS=$(go run buildscripts/gen-ldflags.go)
go build -tags kqueue -trimpath --ldflags "${LDFLAGS}" -o ./minio-${arch}.${release}
minisign -qQSm ./minio-${arch}.${release} -s "$CRED_DIR/minisign.key" <"$CRED_DIR/minisign-passphrase"
sha256sum_str=$(sha256sum <./minio-${arch}.${release})
rc=$?
if [ "$rc" -ne 0 ]; then
abort "unable to generate sha256sum for ${1}"
fi
echo "${sha256sum_str// -/minio.${release}}" >./minio-${arch}.${release}.sha256sum
}
function main() {
echo "Testing builds for OS/Arch: ${SUPPORTED_OSARCH}"
for each_osarch in ${SUPPORTED_OSARCH}; do
_build "${each_osarch}"
done
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
docker buildx build --push --no-cache \ docker buildx build --push --no-cache \
--build-arg RELEASE="${release}" \ --build-arg RELEASE="${release}" \
-t "minio/minio:latest" \ -t "registry.min.dev/community/minio:latest" \
-t "minio/minio:latest-cicd" \ -t "registry.min.dev/community/minio:${release}" \
-t "quay.io/minio/minio:latest" \
-t "quay.io/minio/minio:latest-cicd" \
-t "minio/minio:${release}" \
-t "quay.io/minio/minio:${release}" \
--platform=linux/arm64,linux/amd64,linux/ppc64le \ --platform=linux/arm64,linux/amd64,linux/ppc64le \
-f Dockerfile.release . -f Dockerfile .
docker buildx prune -f
docker buildx build --push --no-cache \
--build-arg RELEASE="${release}" \
-t "minio/minio:${release}-cpuv1" \
-t "quay.io/minio/minio:${release}-cpuv1" \
--platform=linux/arm64,linux/amd64,linux/ppc64le \
-f Dockerfile.release.old_cpu .
docker buildx prune -f docker buildx prune -f
sudo sysctl net.ipv6.conf.all.disable_ipv6=0 sudo sysctl net.ipv6.conf.all.disable_ipv6=0
}
_init && main "$@"