2021-04-18 15:41:13 -04:00
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2015-04-29 05:19:51 -04:00
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2015-04-22 19:28:13 -04:00
|
|
|
|
|
|
|
import (
|
2017-05-31 03:11:06 -04:00
|
|
|
"bytes"
|
2018-03-14 15:01:47 -04:00
|
|
|
"context"
|
2017-12-05 20:58:09 -05:00
|
|
|
"crypto/tls"
|
2017-04-07 17:37:32 -04:00
|
|
|
"encoding/json"
|
2016-04-20 20:35:38 -04:00
|
|
|
"encoding/xml"
|
2018-09-18 19:46:35 -04:00
|
|
|
"errors"
|
2016-08-26 03:11:53 -04:00
|
|
|
"fmt"
|
2016-04-20 20:35:38 -04:00
|
|
|
"io"
|
2021-04-16 21:58:26 -04:00
|
|
|
"net"
|
2016-09-09 12:38:07 -04:00
|
|
|
"net/http"
|
2020-02-11 22:38:02 -05:00
|
|
|
"net/url"
|
2017-10-13 06:01:15 -04:00
|
|
|
"os"
|
2021-03-09 15:58:22 -05:00
|
|
|
"path"
|
2018-09-18 19:46:35 -04:00
|
|
|
"path/filepath"
|
2017-12-28 12:32:48 -05:00
|
|
|
"reflect"
|
2020-01-10 20:19:58 -05:00
|
|
|
"runtime"
|
|
|
|
"runtime/pprof"
|
|
|
|
"runtime/trace"
|
2016-04-20 20:35:38 -04:00
|
|
|
"strings"
|
2020-01-10 20:19:58 -05:00
|
|
|
"sync"
|
2017-03-18 14:28:41 -04:00
|
|
|
"time"
|
2016-09-01 23:13:11 -04:00
|
|
|
|
2022-01-07 20:41:43 -05:00
|
|
|
"github.com/coreos/go-oidc"
|
directio: Check if buffers are set. (#13440)
Check if directio buffers have actually been fetched and prevent errors on double Close. Return error on Read after Close.
Fixes
```
panic: runtime error: invalid memory address or nil pointer dereference
[signal SIGSEGV: segmentation violation code=0x1 addr=0x0 pc=0xf8582f]
goroutine 210 [running]:
github.com/minio/minio/internal/ioutil.(*ODirectReader).Read(0xc0054f8320, {0xc0014560b0, 0xa8, 0x44d012})
github.com/minio/minio/internal/ioutil/odirect_reader.go:88 +0x10f
io.ReadAtLeast({0x428c5c0, 0xc0054f8320}, {0xc0014560b0, 0xa8, 0xa8}, 0xa8)
io/io.go:328 +0x9a
io.ReadFull(...)
io/io.go:347
github.com/minio/minio/internal/ioutil.ReadFile({0xc001bf60e0, 0x6})
github.com/minio/minio/internal/ioutil/read_file.go:48 +0x19b
github.com/minio/minio/cmd.(*FSObjects).scanBucket.func1({{0xc00444e1e0, 0x4d}, 0x0, {0xc0040cf240, 0xe}, {0xc0040cf24f, 0x18}, {0xc0040cf268, 0x18}, 0x0, ...})
github.com/minio/minio/cmd/fs-v1.go:366 +0x1ea
github.com/minio/minio/cmd.(*folderScanner).scanFolder.func1({0xc00474a6a8, 0xc0065d6793}, 0x0)
github.com/minio/minio/cmd/data-scanner.go:494 +0xb15
github.com/minio/minio/cmd.readDirFn({0xc002803e80, 0x34}, 0xc000670270)
github.com/minio/minio/cmd/os-readdir_unix.go:172 +0x638
github.com/minio/minio/cmd.(*folderScanner).scanFolder(0xc002deeb40, {0x42dc9d0, 0xc00068cbc0}, {{0xc001c6e2d0, 0x27}, 0xc0023db8e0, 0x1}, 0xc0001c7ab0)
github.com/minio/minio/cmd/data-scanner.go:427 +0xa8f
github.com/minio/minio/cmd.(*folderScanner).scanFolder.func2({{0xc001c6e2d0, 0x27}, 0xc0023db8e0, 0x27})
github.com/minio/minio/cmd/data-scanner.go:549 +0xd0
github.com/minio/minio/cmd.(*folderScanner).scanFolder(0xc002deeb40, {0x42dc9d0, 0xc00068cbc0}, {{0xc0013fa9e0, 0xe}, 0x0, 0x1}, 0xc000670dd8)
github.com/minio/minio/cmd/data-scanner.go:623 +0x205d
github.com/minio/minio/cmd.scanDataFolder({_, _}, {_, _}, {{{0xc0013fa9e0, 0xe}, 0x802, {0x210f15d2, 0xed8f903b8, 0x5bc0e80}, ...}, ...}, ...)
github.com/minio/minio/cmd/data-scanner.go:333 +0xc51
github.com/minio/minio/cmd.(*FSObjects).scanBucket(_, {_, _}, {_, _}, {{{0xc0013fa9e0, 0xe}, 0x802, {0x210f15d2, 0xed8f903b8, ...}, ...}, ...})
github.com/minio/minio/cmd/fs-v1.go:364 +0x305
github.com/minio/minio/cmd.(*FSObjects).NSScanner(0x42dc9d0, {0x42dc9d0, 0xc00068cbc0}, 0x0, 0xc003bcfda0, 0x802)
github.com/minio/minio/cmd/fs-v1.go:307 +0xa16
github.com/minio/minio/cmd.runDataScanner({0x42dc9d0, 0xc00068cbc0}, {0x436a6c0, 0xc000bfcf50})
github.com/minio/minio/cmd/data-scanner.go:150 +0x749
created by github.com/minio/minio/cmd.initDataScanner
github.com/minio/minio/cmd/data-scanner.go:73 +0xb0
```
2021-10-14 13:19:17 -04:00
|
|
|
"github.com/dustin/go-humanize"
|
2022-02-16 15:00:10 -05:00
|
|
|
"github.com/felixge/fgprof"
|
2022-12-06 16:46:50 -05:00
|
|
|
"github.com/minio/madmin-go/v2"
|
2022-10-24 20:44:15 -04:00
|
|
|
"github.com/minio/minio-go/v7"
|
2021-06-17 23:27:04 -04:00
|
|
|
miniogopolicy "github.com/minio/minio-go/v7/pkg/policy"
|
2021-11-29 12:06:56 -05:00
|
|
|
"github.com/minio/minio/internal/config"
|
|
|
|
"github.com/minio/minio/internal/config/api"
|
|
|
|
xtls "github.com/minio/minio/internal/config/identity/tls"
|
2022-11-05 14:09:21 -04:00
|
|
|
"github.com/minio/minio/internal/deadlineconn"
|
2021-11-29 12:06:56 -05:00
|
|
|
"github.com/minio/minio/internal/fips"
|
2021-06-01 17:59:40 -04:00
|
|
|
"github.com/minio/minio/internal/handlers"
|
2022-10-24 20:44:15 -04:00
|
|
|
"github.com/minio/minio/internal/hash"
|
2021-06-01 17:59:40 -04:00
|
|
|
xhttp "github.com/minio/minio/internal/http"
|
2022-07-05 17:45:49 -04:00
|
|
|
ioutilx "github.com/minio/minio/internal/ioutil"
|
2021-06-01 17:59:40 -04:00
|
|
|
"github.com/minio/minio/internal/logger"
|
2021-06-29 02:58:08 -04:00
|
|
|
"github.com/minio/minio/internal/logger/message/audit"
|
2022-12-06 12:27:26 -05:00
|
|
|
"github.com/minio/minio/internal/mcontext"
|
2022-12-12 23:31:21 -05:00
|
|
|
"github.com/minio/minio/internal/rest"
|
2023-01-23 06:12:47 -05:00
|
|
|
"github.com/minio/mux"
|
2021-05-28 18:17:01 -04:00
|
|
|
"github.com/minio/pkg/certs"
|
2021-11-29 12:06:56 -05:00
|
|
|
"github.com/minio/pkg/env"
|
2023-02-22 00:21:17 -05:00
|
|
|
pkgAudit "github.com/minio/pkg/logger/message/audit"
|
2022-10-24 20:44:15 -04:00
|
|
|
xnet "github.com/minio/pkg/net"
|
2022-01-07 20:41:43 -05:00
|
|
|
"golang.org/x/oauth2"
|
2015-04-22 19:28:13 -04:00
|
|
|
)
|
|
|
|
|
2020-03-27 00:07:39 -04:00
|
|
|
const (
|
|
|
|
slashSeparator = "/"
|
|
|
|
)
|
|
|
|
|
2021-06-17 23:27:04 -04:00
|
|
|
// BucketAccessPolicy - Collection of canned bucket policy at a given prefix.
|
|
|
|
type BucketAccessPolicy struct {
|
|
|
|
Bucket string `json:"bucket"`
|
|
|
|
Prefix string `json:"prefix"`
|
|
|
|
Policy miniogopolicy.BucketPolicy `json:"policy"`
|
|
|
|
}
|
|
|
|
|
2018-04-10 12:36:37 -04:00
|
|
|
// IsErrIgnored returns whether given error is ignored or not.
|
|
|
|
func IsErrIgnored(err error, ignoredErrs ...error) bool {
|
|
|
|
return IsErr(err, ignoredErrs...)
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsErr returns whether given error is exact error.
|
|
|
|
func IsErr(err error, errs ...error) bool {
|
|
|
|
for _, exactErr := range errs {
|
2019-09-11 13:21:43 -04:00
|
|
|
if errors.Is(err, exactErr) {
|
2018-04-10 12:36:37 -04:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2022-10-24 20:44:15 -04:00
|
|
|
// ErrorRespToObjectError converts MinIO errors to minio object layer errors.
|
|
|
|
func ErrorRespToObjectError(err error, params ...string) error {
|
|
|
|
if err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
bucket := ""
|
|
|
|
object := ""
|
|
|
|
if len(params) >= 1 {
|
|
|
|
bucket = params[0]
|
|
|
|
}
|
|
|
|
if len(params) == 2 {
|
|
|
|
object = params[1]
|
|
|
|
}
|
|
|
|
|
|
|
|
if xnet.IsNetworkOrHostDown(err, false) {
|
|
|
|
return BackendDown{Err: err.Error()}
|
|
|
|
}
|
|
|
|
|
|
|
|
minioErr, ok := err.(minio.ErrorResponse)
|
|
|
|
if !ok {
|
|
|
|
// We don't interpret non MinIO errors. As minio errors will
|
|
|
|
// have StatusCode to help to convert to object errors.
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
switch minioErr.Code {
|
|
|
|
case "PreconditionFailed":
|
|
|
|
err = PreConditionFailed{}
|
|
|
|
case "InvalidRange":
|
|
|
|
err = InvalidRange{}
|
|
|
|
case "BucketAlreadyOwnedByYou":
|
|
|
|
err = BucketAlreadyOwnedByYou{}
|
|
|
|
case "BucketNotEmpty":
|
|
|
|
err = BucketNotEmpty{}
|
|
|
|
case "NoSuchBucketPolicy":
|
|
|
|
err = BucketPolicyNotFound{}
|
|
|
|
case "NoSuchLifecycleConfiguration":
|
|
|
|
err = BucketLifecycleNotFound{}
|
|
|
|
case "InvalidBucketName":
|
|
|
|
err = BucketNameInvalid{Bucket: bucket}
|
|
|
|
case "InvalidPart":
|
|
|
|
err = InvalidPart{}
|
|
|
|
case "NoSuchBucket":
|
|
|
|
err = BucketNotFound{Bucket: bucket}
|
|
|
|
case "NoSuchKey":
|
|
|
|
if object != "" {
|
|
|
|
err = ObjectNotFound{Bucket: bucket, Object: object}
|
|
|
|
} else {
|
|
|
|
err = BucketNotFound{Bucket: bucket}
|
|
|
|
}
|
|
|
|
case "XMinioInvalidObjectName":
|
|
|
|
err = ObjectNameInvalid{}
|
|
|
|
case "AccessDenied":
|
|
|
|
err = PrefixAccessDenied{
|
|
|
|
Bucket: bucket,
|
|
|
|
Object: object,
|
|
|
|
}
|
|
|
|
case "XAmzContentSHA256Mismatch":
|
|
|
|
err = hash.SHA256Mismatch{}
|
|
|
|
case "NoSuchUpload":
|
|
|
|
err = InvalidUploadID{}
|
|
|
|
case "EntityTooSmall":
|
|
|
|
err = PartTooSmall{}
|
|
|
|
}
|
|
|
|
|
2023-03-06 11:56:10 -05:00
|
|
|
if minioErr.StatusCode == http.StatusMethodNotAllowed {
|
2022-10-24 20:44:15 -04:00
|
|
|
err = toObjectErr(errMethodNotAllowed, bucket, object)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-05-02 12:27:35 -04:00
|
|
|
// returns 'true' if either string has space in the
|
|
|
|
// - beginning of a string
|
|
|
|
// OR
|
|
|
|
// - end of a string
|
|
|
|
func hasSpaceBE(s string) bool {
|
|
|
|
return strings.TrimSpace(s) != s
|
|
|
|
}
|
|
|
|
|
2019-06-26 21:21:54 -04:00
|
|
|
func request2BucketObjectName(r *http.Request) (bucketName, objectName string) {
|
|
|
|
path, err := getResource(r.URL.Path, r.Host, globalDomainNames)
|
|
|
|
if err != nil {
|
2020-04-09 12:30:02 -04:00
|
|
|
logger.CriticalIf(GlobalContext, err)
|
2019-06-26 21:21:54 -04:00
|
|
|
}
|
|
|
|
|
2020-01-21 17:07:49 -05:00
|
|
|
return path2BucketObject(path)
|
|
|
|
}
|
2017-01-10 14:01:23 -05:00
|
|
|
|
2020-01-21 17:07:49 -05:00
|
|
|
// path2BucketObjectWithBasePath returns bucket and prefix, if any,
|
|
|
|
// of a 'path'. basePath is trimmed from the front of the 'path'.
|
|
|
|
func path2BucketObjectWithBasePath(basePath, path string) (bucket, prefix string) {
|
|
|
|
path = strings.TrimPrefix(path, basePath)
|
|
|
|
path = strings.TrimPrefix(path, SlashSeparator)
|
|
|
|
m := strings.Index(path, SlashSeparator)
|
|
|
|
if m < 0 {
|
|
|
|
return path, ""
|
2017-04-11 18:44:27 -04:00
|
|
|
}
|
2020-01-21 17:07:49 -05:00
|
|
|
return path[:m], path[m+len(SlashSeparator):]
|
|
|
|
}
|
2017-01-10 14:01:23 -05:00
|
|
|
|
2020-01-21 17:07:49 -05:00
|
|
|
func path2BucketObject(s string) (bucket, prefix string) {
|
|
|
|
return path2BucketObjectWithBasePath("", s)
|
2017-01-10 14:01:23 -05:00
|
|
|
}
|
|
|
|
|
2020-09-10 14:37:22 -04:00
|
|
|
// cloneMSS will clone a map[string]string.
|
|
|
|
// If input is nil an empty map is returned, not nil.
|
|
|
|
func cloneMSS(v map[string]string) map[string]string {
|
|
|
|
r := make(map[string]string, len(v))
|
|
|
|
for k, v := range v {
|
|
|
|
r[k] = v
|
|
|
|
}
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
2017-01-18 15:24:34 -05:00
|
|
|
// URI scheme constants.
|
|
|
|
const (
|
|
|
|
httpScheme = "http"
|
|
|
|
httpsScheme = "https"
|
|
|
|
)
|
|
|
|
|
2018-12-13 15:09:50 -05:00
|
|
|
// nopCharsetConverter is a dummy charset convert which just copies input to output,
|
|
|
|
// it is used to ignore custom encoding charset in S3 XML body.
|
|
|
|
func nopCharsetConverter(label string, input io.Reader) (io.Reader, error) {
|
|
|
|
return input, nil
|
|
|
|
}
|
|
|
|
|
2016-04-20 20:35:38 -04:00
|
|
|
// xmlDecoder provide decoded value in xml.
|
2016-07-19 00:20:17 -04:00
|
|
|
func xmlDecoder(body io.Reader, v interface{}, size int64) error {
|
|
|
|
var lbody io.Reader
|
|
|
|
if size > 0 {
|
|
|
|
lbody = io.LimitReader(body, size)
|
|
|
|
} else {
|
|
|
|
lbody = body
|
|
|
|
}
|
|
|
|
d := xml.NewDecoder(lbody)
|
2018-12-13 15:09:50 -05:00
|
|
|
// Ignore any encoding set in the XML body
|
|
|
|
d.CharsetReader = nopCharsetConverter
|
2022-12-07 11:30:52 -05:00
|
|
|
err := d.Decode(v)
|
|
|
|
if errors.Is(err, io.EOF) {
|
|
|
|
err = &xml.SyntaxError{
|
|
|
|
Line: 0,
|
|
|
|
Msg: err.Error(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return err
|
2016-04-20 20:35:38 -04:00
|
|
|
}
|
|
|
|
|
2020-03-04 10:04:12 -05:00
|
|
|
// hasContentMD5 returns true if Content-MD5 header is set.
|
|
|
|
func hasContentMD5(h http.Header) bool {
|
|
|
|
_, ok := h[xhttp.ContentMD5]
|
|
|
|
return ok
|
|
|
|
}
|
|
|
|
|
2021-11-16 12:28:29 -05:00
|
|
|
// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
|
2015-04-29 05:19:51 -04:00
|
|
|
const (
|
2017-11-28 02:29:02 -05:00
|
|
|
// Maximum object size per PUT request is 5TB.
|
2017-03-03 13:14:17 -05:00
|
|
|
// This is a divergence from S3 limit on purpose to support
|
|
|
|
// use cases where users are going to upload large files
|
|
|
|
// using 'curl' and presigned URL.
|
2017-11-28 02:29:02 -05:00
|
|
|
globalMaxObjectSize = 5 * humanize.TiByte
|
2017-03-03 13:14:17 -05:00
|
|
|
|
|
|
|
// Minimum Part size for multipart upload is 5MiB
|
|
|
|
globalMinPartSize = 5 * humanize.MiByte
|
|
|
|
|
|
|
|
// Maximum Part ID for multipart upload is 10000
|
|
|
|
// (Acceptable values range from 1 to 10000 inclusive)
|
|
|
|
globalMaxPartID = 10000
|
2015-04-29 05:19:51 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
// isMaxObjectSize - verify if max object size
|
2015-12-28 02:00:36 -05:00
|
|
|
func isMaxObjectSize(size int64) bool {
|
2017-03-03 13:14:17 -05:00
|
|
|
return size > globalMaxObjectSize
|
|
|
|
}
|
|
|
|
|
2016-05-08 15:06:05 -04:00
|
|
|
// Check if part size is more than or equal to minimum allowed size.
|
|
|
|
func isMinAllowedPartSize(size int64) bool {
|
2017-03-03 13:14:17 -05:00
|
|
|
return size >= globalMinPartSize
|
2016-05-08 15:06:05 -04:00
|
|
|
}
|
|
|
|
|
2016-05-24 04:52:47 -04:00
|
|
|
// isMaxPartNumber - Check if part ID is greater than the maximum allowed ID.
|
|
|
|
func isMaxPartID(partID int) bool {
|
2017-03-03 13:14:17 -05:00
|
|
|
return partID > globalMaxPartID
|
2016-05-24 04:52:47 -04:00
|
|
|
}
|
|
|
|
|
2017-12-28 12:32:48 -05:00
|
|
|
func contains(slice interface{}, elem interface{}) bool {
|
|
|
|
v := reflect.ValueOf(slice)
|
|
|
|
if v.Kind() == reflect.Slice {
|
|
|
|
for i := 0; i < v.Len(); i++ {
|
|
|
|
if v.Index(i).Interface() == elem {
|
|
|
|
return true
|
|
|
|
}
|
2016-02-05 06:09:31 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
2016-08-05 16:48:31 -04:00
|
|
|
|
2018-09-18 19:46:35 -04:00
|
|
|
// profilerWrapper is created becauses pkg/profiler doesn't
|
|
|
|
// provide any API to calculate the profiler file path in the
|
|
|
|
// disk since the name of this latter is randomly generated.
|
|
|
|
type profilerWrapper struct {
|
2020-01-21 18:49:25 -05:00
|
|
|
// Profile recorded at start of benchmark.
|
2021-07-23 16:16:53 -04:00
|
|
|
records map[string][]byte
|
|
|
|
stopFn func() ([]byte, error)
|
|
|
|
ext string
|
2018-09-18 19:46:35 -04:00
|
|
|
}
|
|
|
|
|
2021-07-23 16:16:53 -04:00
|
|
|
// record will record the profile and store it as the base.
|
|
|
|
func (p *profilerWrapper) record(profileType string, debug int, recordName string) {
|
2020-01-21 18:49:25 -05:00
|
|
|
var buf bytes.Buffer
|
2021-07-23 16:16:53 -04:00
|
|
|
if p.records == nil {
|
|
|
|
p.records = make(map[string][]byte)
|
|
|
|
}
|
|
|
|
err := pprof.Lookup(profileType).WriteTo(&buf, debug)
|
2020-01-21 18:49:25 -05:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2021-07-23 16:16:53 -04:00
|
|
|
p.records[recordName] = buf.Bytes()
|
2020-01-21 18:49:25 -05:00
|
|
|
}
|
|
|
|
|
2021-07-23 16:16:53 -04:00
|
|
|
// Records returns the recorded profiling if any.
|
|
|
|
func (p profilerWrapper) Records() map[string][]byte {
|
|
|
|
return p.records
|
2020-01-21 18:49:25 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Stop the currently running benchmark.
|
2020-01-10 20:19:58 -05:00
|
|
|
func (p profilerWrapper) Stop() ([]byte, error) {
|
|
|
|
return p.stopFn()
|
2018-09-18 19:46:35 -04:00
|
|
|
}
|
|
|
|
|
2020-03-04 09:58:12 -05:00
|
|
|
// Extension returns the extension without dot prefix.
|
|
|
|
func (p profilerWrapper) Extension() string {
|
|
|
|
return p.ext
|
|
|
|
}
|
|
|
|
|
2019-01-14 01:44:20 -05:00
|
|
|
// Returns current profile data, returns error if there is no active
|
|
|
|
// profiling in progress. Stops an active profile.
|
2020-01-10 20:19:58 -05:00
|
|
|
func getProfileData() (map[string][]byte, error) {
|
|
|
|
globalProfilerMu.Lock()
|
|
|
|
defer globalProfilerMu.Unlock()
|
|
|
|
|
|
|
|
if len(globalProfiler) == 0 {
|
2019-01-14 01:44:20 -05:00
|
|
|
return nil, errors.New("profiler not enabled")
|
|
|
|
}
|
|
|
|
|
2020-01-10 20:19:58 -05:00
|
|
|
dst := make(map[string][]byte, len(globalProfiler))
|
|
|
|
for typ, prof := range globalProfiler {
|
|
|
|
// Stop the profiler
|
|
|
|
var err error
|
|
|
|
buf, err := prof.Stop()
|
|
|
|
delete(globalProfiler, typ)
|
|
|
|
if err == nil {
|
2020-03-04 09:58:12 -05:00
|
|
|
dst[typ+"."+prof.Extension()] = buf
|
2020-01-10 20:19:58 -05:00
|
|
|
}
|
2021-07-23 16:16:53 -04:00
|
|
|
for name, buf := range prof.Records() {
|
|
|
|
if len(buf) > 0 {
|
|
|
|
dst[typ+"-"+name+"."+prof.Extension()] = buf
|
|
|
|
}
|
2020-01-21 18:49:25 -05:00
|
|
|
}
|
2019-01-14 01:44:20 -05:00
|
|
|
}
|
2020-01-10 20:19:58 -05:00
|
|
|
return dst, nil
|
2019-01-14 01:44:20 -05:00
|
|
|
}
|
|
|
|
|
2020-01-21 18:49:25 -05:00
|
|
|
func setDefaultProfilerRates() {
|
|
|
|
runtime.MemProfileRate = 4096 // 512K -> 4K - Must be constant throughout application lifetime.
|
|
|
|
runtime.SetMutexProfileFraction(0) // Disable until needed
|
|
|
|
runtime.SetBlockProfileRate(0) // Disable until needed
|
|
|
|
}
|
|
|
|
|
2016-09-01 23:13:11 -04:00
|
|
|
// Starts a profiler returns nil if profiler is not enabled, caller needs to handle this.
|
2020-01-10 20:19:58 -05:00
|
|
|
func startProfiler(profilerType string) (minioProfiler, error) {
|
|
|
|
var prof profilerWrapper
|
2020-03-04 09:58:12 -05:00
|
|
|
prof.ext = "pprof"
|
2018-10-15 14:13:19 -04:00
|
|
|
// Enable profiler and set the name of the file that pkg/pprof
|
|
|
|
// library creates to store profiling data.
|
2020-01-21 18:49:25 -05:00
|
|
|
switch madmin.ProfilerType(profilerType) {
|
|
|
|
case madmin.ProfilerCPU:
|
2022-09-19 14:05:16 -04:00
|
|
|
dirPath, err := os.MkdirTemp("", "profile")
|
2020-01-10 20:19:58 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
fn := filepath.Join(dirPath, "cpu.out")
|
2022-07-05 17:45:49 -04:00
|
|
|
f, err := Create(fn)
|
2020-01-10 20:19:58 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
err = pprof.StartCPUProfile(f)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
prof.stopFn = func() ([]byte, error) {
|
|
|
|
pprof.StopCPUProfile()
|
|
|
|
err := f.Close()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-07-05 17:45:49 -04:00
|
|
|
defer RemoveAll(dirPath)
|
|
|
|
return ioutilx.ReadFile(fn)
|
2020-01-10 20:19:58 -05:00
|
|
|
}
|
2022-04-29 12:35:42 -04:00
|
|
|
case madmin.ProfilerCPUIO:
|
2022-02-16 15:00:10 -05:00
|
|
|
// at 10k or more goroutines fgprof is likely to become
|
|
|
|
// unable to maintain its sampling rate and to significantly
|
|
|
|
// degrade the performance of your application
|
|
|
|
// https://github.com/felixge/fgprof#fgprof
|
|
|
|
if n := runtime.NumGoroutine(); n > 10000 && !globalIsCICD {
|
|
|
|
return nil, fmt.Errorf("unable to perform CPU IO profile with %d goroutines", n)
|
|
|
|
}
|
2022-09-19 14:05:16 -04:00
|
|
|
dirPath, err := os.MkdirTemp("", "profile")
|
2022-02-16 15:00:10 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
fn := filepath.Join(dirPath, "cpuio.out")
|
2022-07-05 17:45:49 -04:00
|
|
|
f, err := Create(fn)
|
2022-02-16 15:00:10 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
stop := fgprof.Start(f, fgprof.FormatPprof)
|
|
|
|
prof.stopFn = func() ([]byte, error) {
|
|
|
|
err := stop()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
err = f.Close()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-07-05 17:45:49 -04:00
|
|
|
defer RemoveAll(dirPath)
|
|
|
|
return ioutilx.ReadFile(fn)
|
2022-02-16 15:00:10 -05:00
|
|
|
}
|
2020-01-21 18:49:25 -05:00
|
|
|
case madmin.ProfilerMEM:
|
|
|
|
runtime.GC()
|
2021-07-23 16:16:53 -04:00
|
|
|
prof.record("heap", 0, "before")
|
2020-01-10 20:19:58 -05:00
|
|
|
prof.stopFn = func() ([]byte, error) {
|
2020-01-21 18:49:25 -05:00
|
|
|
runtime.GC()
|
2020-01-10 20:19:58 -05:00
|
|
|
var buf bytes.Buffer
|
|
|
|
err := pprof.Lookup("heap").WriteTo(&buf, 0)
|
|
|
|
return buf.Bytes(), err
|
|
|
|
}
|
2020-01-21 18:49:25 -05:00
|
|
|
case madmin.ProfilerBlock:
|
2021-02-27 12:22:14 -05:00
|
|
|
runtime.SetBlockProfileRate(100)
|
2020-01-10 20:19:58 -05:00
|
|
|
prof.stopFn = func() ([]byte, error) {
|
|
|
|
var buf bytes.Buffer
|
|
|
|
err := pprof.Lookup("block").WriteTo(&buf, 0)
|
2020-01-21 18:49:25 -05:00
|
|
|
runtime.SetBlockProfileRate(0)
|
2020-01-10 20:19:58 -05:00
|
|
|
return buf.Bytes(), err
|
|
|
|
}
|
2020-01-21 18:49:25 -05:00
|
|
|
case madmin.ProfilerMutex:
|
2021-07-23 16:16:53 -04:00
|
|
|
prof.record("mutex", 0, "before")
|
2020-01-10 20:19:58 -05:00
|
|
|
runtime.SetMutexProfileFraction(1)
|
|
|
|
prof.stopFn = func() ([]byte, error) {
|
|
|
|
var buf bytes.Buffer
|
|
|
|
err := pprof.Lookup("mutex").WriteTo(&buf, 0)
|
2020-01-21 18:49:25 -05:00
|
|
|
runtime.SetMutexProfileFraction(0)
|
|
|
|
return buf.Bytes(), err
|
|
|
|
}
|
|
|
|
case madmin.ProfilerThreads:
|
2021-07-23 16:16:53 -04:00
|
|
|
prof.record("threadcreate", 0, "before")
|
2020-01-21 18:49:25 -05:00
|
|
|
prof.stopFn = func() ([]byte, error) {
|
|
|
|
var buf bytes.Buffer
|
|
|
|
err := pprof.Lookup("threadcreate").WriteTo(&buf, 0)
|
2020-01-10 20:19:58 -05:00
|
|
|
return buf.Bytes(), err
|
|
|
|
}
|
2020-03-04 09:58:12 -05:00
|
|
|
case madmin.ProfilerGoroutines:
|
|
|
|
prof.ext = "txt"
|
2021-07-23 16:16:53 -04:00
|
|
|
prof.record("goroutine", 1, "before")
|
|
|
|
prof.record("goroutine", 2, "before,debug=2")
|
2020-03-04 09:58:12 -05:00
|
|
|
prof.stopFn = func() ([]byte, error) {
|
|
|
|
var buf bytes.Buffer
|
2020-03-06 16:22:47 -05:00
|
|
|
err := pprof.Lookup("goroutine").WriteTo(&buf, 1)
|
2020-03-04 09:58:12 -05:00
|
|
|
return buf.Bytes(), err
|
|
|
|
}
|
2020-01-21 18:49:25 -05:00
|
|
|
case madmin.ProfilerTrace:
|
2022-09-19 14:05:16 -04:00
|
|
|
dirPath, err := os.MkdirTemp("", "profile")
|
2020-01-10 20:19:58 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
fn := filepath.Join(dirPath, "trace.out")
|
2022-07-05 17:45:49 -04:00
|
|
|
f, err := Create(fn)
|
2020-01-10 20:19:58 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
err = trace.Start(f)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-03-04 09:58:12 -05:00
|
|
|
prof.ext = "trace"
|
2020-01-10 20:19:58 -05:00
|
|
|
prof.stopFn = func() ([]byte, error) {
|
|
|
|
trace.Stop()
|
|
|
|
err := f.Close()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-07-05 17:45:49 -04:00
|
|
|
defer RemoveAll(dirPath)
|
|
|
|
return ioutilx.ReadFile(fn)
|
2020-01-10 20:19:58 -05:00
|
|
|
}
|
2016-09-01 23:13:11 -04:00
|
|
|
default:
|
2018-09-18 19:46:35 -04:00
|
|
|
return nil, errors.New("profiler type unknown")
|
2016-09-01 23:13:11 -04:00
|
|
|
}
|
2018-09-18 19:46:35 -04:00
|
|
|
|
2020-01-10 20:19:58 -05:00
|
|
|
return prof, nil
|
2016-09-01 23:13:11 -04:00
|
|
|
}
|
|
|
|
|
2019-02-13 07:59:36 -05:00
|
|
|
// minioProfiler - minio profiler interface.
|
|
|
|
type minioProfiler interface {
|
2021-07-23 16:16:53 -04:00
|
|
|
// Return recorded profiles, each profile associated with a distinct generic name.
|
|
|
|
Records() map[string][]byte
|
2018-09-18 19:46:35 -04:00
|
|
|
// Stop the profiler
|
2020-01-10 20:19:58 -05:00
|
|
|
Stop() ([]byte, error)
|
2020-03-04 09:58:12 -05:00
|
|
|
// Return extension of profile
|
|
|
|
Extension() string
|
2016-09-01 23:13:11 -04:00
|
|
|
}
|
|
|
|
|
2019-02-13 07:59:36 -05:00
|
|
|
// Global profiler to be used by service go-routine.
|
2022-01-06 16:08:21 -05:00
|
|
|
var (
|
|
|
|
globalProfiler map[string]minioProfiler
|
|
|
|
globalProfilerMu sync.Mutex
|
|
|
|
)
|
2019-02-13 07:59:36 -05:00
|
|
|
|
2016-09-19 13:17:46 -04:00
|
|
|
// dump the request into a string in JSON format.
|
|
|
|
func dumpRequest(r *http.Request) string {
|
2019-09-11 13:21:43 -04:00
|
|
|
header := r.Header.Clone()
|
2016-09-19 13:17:46 -04:00
|
|
|
header.Set("Host", r.Host)
|
2017-05-31 03:11:06 -04:00
|
|
|
// Replace all '%' to '%%' so that printer format parser
|
|
|
|
// to ignore URL encoded values.
|
2021-11-16 12:28:29 -05:00
|
|
|
rawURI := strings.ReplaceAll(r.RequestURI, "%", "%%")
|
2016-09-19 13:17:46 -04:00
|
|
|
req := struct {
|
2017-05-31 03:11:06 -04:00
|
|
|
Method string `json:"method"`
|
|
|
|
RequestURI string `json:"reqURI"`
|
|
|
|
Header http.Header `json:"header"`
|
|
|
|
}{r.Method, rawURI, header}
|
|
|
|
|
|
|
|
var buffer bytes.Buffer
|
|
|
|
enc := json.NewEncoder(&buffer)
|
|
|
|
enc.SetEscapeHTML(false)
|
|
|
|
if err := enc.Encode(&req); err != nil {
|
2017-04-07 17:37:32 -04:00
|
|
|
// Upon error just return Go-syntax representation of the value
|
|
|
|
return fmt.Sprintf("%#v", req)
|
2016-09-19 13:17:46 -04:00
|
|
|
}
|
2017-05-31 03:11:06 -04:00
|
|
|
|
|
|
|
// Formatted string.
|
2019-02-13 07:59:36 -05:00
|
|
|
return strings.TrimSpace(buffer.String())
|
2016-09-19 13:17:46 -04:00
|
|
|
}
|
2017-02-27 17:59:53 -05:00
|
|
|
|
2017-03-02 17:21:30 -05:00
|
|
|
// isFile - returns whether given path is a file or not.
|
|
|
|
func isFile(path string) bool {
|
2017-10-13 06:01:15 -04:00
|
|
|
if fi, err := os.Stat(path); err == nil {
|
2017-03-02 17:21:30 -05:00
|
|
|
return fi.Mode().IsRegular()
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
2017-03-15 19:30:34 -04:00
|
|
|
|
2017-03-18 14:28:41 -04:00
|
|
|
// UTCNow - returns current UTC time.
|
|
|
|
func UTCNow() time.Time {
|
|
|
|
return time.Now().UTC()
|
|
|
|
}
|
2017-10-26 13:17:07 -04:00
|
|
|
|
2017-12-05 20:58:09 -05:00
|
|
|
// GenETag - generate UUID based ETag
|
|
|
|
func GenETag() string {
|
|
|
|
return ToS3ETag(getMD5Hash([]byte(mustGetUUID())))
|
2017-10-26 13:17:07 -04:00
|
|
|
}
|
|
|
|
|
2017-12-05 20:58:09 -05:00
|
|
|
// ToS3ETag - return checksum to ETag
|
|
|
|
func ToS3ETag(etag string) string {
|
2017-10-26 13:17:07 -04:00
|
|
|
etag = canonicalizeETag(etag)
|
|
|
|
|
|
|
|
if !strings.HasSuffix(etag, "-1") {
|
|
|
|
// Tools like s3cmd uses ETag as checksum of data to validate.
|
|
|
|
// Append "-1" to indicate ETag is not a checksum.
|
|
|
|
etag += "-1"
|
|
|
|
}
|
|
|
|
|
|
|
|
return etag
|
|
|
|
}
|
2017-12-05 20:58:09 -05:00
|
|
|
|
2022-12-13 17:28:48 -05:00
|
|
|
// GetDefaultConnSettings returns default HTTP connection settings.
|
|
|
|
func GetDefaultConnSettings() xhttp.ConnSettings {
|
|
|
|
return xhttp.ConnSettings{
|
|
|
|
DNSCache: globalDNSCache,
|
|
|
|
DialTimeout: rest.DefaultTimeout,
|
|
|
|
RootCAs: globalRootCAs,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-12 23:31:21 -05:00
|
|
|
// NewInternodeHTTPTransport returns a transport for internode MinIO
|
|
|
|
// connections.
|
|
|
|
func NewInternodeHTTPTransport() func() http.RoundTripper {
|
|
|
|
return xhttp.ConnSettings{
|
|
|
|
DNSCache: globalDNSCache,
|
|
|
|
DialTimeout: rest.DefaultTimeout,
|
|
|
|
RootCAs: globalRootCAs,
|
|
|
|
CipherSuites: fips.TLSCiphers(),
|
|
|
|
CurvePreferences: fips.TLSCurveIDs(),
|
|
|
|
EnableHTTP2: false,
|
|
|
|
}.NewInternodeHTTPTransport()
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewCustomHTTPProxyTransport is used only for proxied requests, specifically
|
|
|
|
// only supports HTTP/1.1
|
|
|
|
func NewCustomHTTPProxyTransport() func() *http.Transport {
|
|
|
|
return xhttp.ConnSettings{
|
|
|
|
DNSCache: globalDNSCache,
|
|
|
|
DialTimeout: rest.DefaultTimeout,
|
|
|
|
RootCAs: globalRootCAs,
|
|
|
|
CipherSuites: fips.TLSCiphers(),
|
|
|
|
CurvePreferences: fips.TLSCurveIDs(),
|
|
|
|
EnableHTTP2: false,
|
|
|
|
}.NewCustomHTTPProxyTransport()
|
2019-11-13 15:17:45 -05:00
|
|
|
}
|
|
|
|
|
2022-10-24 20:44:15 -04:00
|
|
|
// NewHTTPTransportWithClientCerts returns a new http configuration
|
2021-03-01 12:19:13 -05:00
|
|
|
// used while communicating with the cloud backends.
|
2022-10-24 20:44:15 -04:00
|
|
|
func NewHTTPTransportWithClientCerts(clientCert, clientKey string) *http.Transport {
|
2022-12-12 23:31:21 -05:00
|
|
|
s := xhttp.ConnSettings{
|
|
|
|
DNSCache: globalDNSCache,
|
2023-01-17 23:36:38 -05:00
|
|
|
DialTimeout: defaultDialTimeout,
|
2022-12-12 23:31:21 -05:00
|
|
|
RootCAs: globalRootCAs,
|
|
|
|
EnableHTTP2: false,
|
|
|
|
}
|
|
|
|
|
2021-03-01 12:19:13 -05:00
|
|
|
if clientCert != "" && clientKey != "" {
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
|
|
|
defer cancel()
|
2022-12-12 23:31:21 -05:00
|
|
|
transport, err := s.NewHTTPTransportWithClientCerts(ctx, clientCert, clientKey)
|
2021-03-01 12:19:13 -05:00
|
|
|
if err != nil {
|
|
|
|
logger.LogIf(ctx, fmt.Errorf("failed to load client key and cert, please check your endpoint configuration: %s",
|
|
|
|
err.Error()))
|
|
|
|
}
|
2022-12-12 23:31:21 -05:00
|
|
|
return transport
|
2021-03-01 12:19:13 -05:00
|
|
|
}
|
2022-12-12 23:31:21 -05:00
|
|
|
|
|
|
|
return s.NewHTTPTransportWithTimeout(1 * time.Minute)
|
2021-03-01 12:19:13 -05:00
|
|
|
}
|
|
|
|
|
2022-10-24 20:44:15 -04:00
|
|
|
// NewHTTPTransport returns a new http configuration
|
2017-12-05 20:58:09 -05:00
|
|
|
// used while communicating with the cloud backends.
|
2022-10-24 20:44:15 -04:00
|
|
|
func NewHTTPTransport() *http.Transport {
|
2022-12-12 23:31:21 -05:00
|
|
|
return NewHTTPTransportWithTimeout(1 * time.Minute)
|
2020-07-07 15:19:57 -04:00
|
|
|
}
|
|
|
|
|
2022-10-24 20:44:15 -04:00
|
|
|
// Default values for dial timeout
|
|
|
|
const defaultDialTimeout = 5 * time.Second
|
|
|
|
|
2022-12-12 23:31:21 -05:00
|
|
|
// NewHTTPTransportWithTimeout allows setting a timeout.
|
|
|
|
func NewHTTPTransportWithTimeout(timeout time.Duration) *http.Transport {
|
|
|
|
return xhttp.ConnSettings{
|
|
|
|
DNSCache: globalDNSCache,
|
|
|
|
DialTimeout: defaultDialTimeout,
|
|
|
|
RootCAs: globalRootCAs,
|
|
|
|
EnableHTTP2: false,
|
|
|
|
}.NewHTTPTransportWithTimeout(timeout)
|
2017-12-05 20:58:09 -05:00
|
|
|
}
|
2018-01-08 17:30:55 -05:00
|
|
|
|
2022-11-05 14:09:21 -04:00
|
|
|
type dialContext func(ctx context.Context, network, addr string) (net.Conn, error)
|
|
|
|
|
|
|
|
// newCustomDialContext setups a custom dialer for any external communication and proxies.
|
|
|
|
func newCustomDialContext() dialContext {
|
|
|
|
return func(ctx context.Context, network, addr string) (net.Conn, error) {
|
|
|
|
dialer := &net.Dialer{
|
|
|
|
Timeout: 15 * time.Second,
|
|
|
|
KeepAlive: 30 * time.Second,
|
|
|
|
}
|
|
|
|
|
|
|
|
conn, err := dialer.DialContext(ctx, network, addr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
dconn := deadlineconn.New(conn).
|
|
|
|
WithReadDeadline(globalConnReadDeadline).
|
|
|
|
WithWriteDeadline(globalConnWriteDeadline)
|
|
|
|
|
|
|
|
return dconn, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-16 21:58:26 -04:00
|
|
|
// NewRemoteTargetHTTPTransport returns a new http configuration
|
|
|
|
// used while communicating with the remote replication targets.
|
2022-06-22 19:28:25 -04:00
|
|
|
func NewRemoteTargetHTTPTransport() func() *http.Transport {
|
2022-12-12 23:31:21 -05:00
|
|
|
return xhttp.ConnSettings{
|
|
|
|
DialContext: newCustomDialContext(),
|
|
|
|
RootCAs: globalRootCAs,
|
|
|
|
EnableHTTP2: false,
|
2023-01-17 23:36:38 -05:00
|
|
|
}.NewRemoteTargetHTTPTransport()
|
2021-04-16 21:58:26 -04:00
|
|
|
}
|
|
|
|
|
2018-01-08 17:30:55 -05:00
|
|
|
// Load the json (typically from disk file).
|
2018-02-06 18:37:48 -05:00
|
|
|
func jsonLoad(r io.ReadSeeker, data interface{}) error {
|
2018-01-08 17:30:55 -05:00
|
|
|
if _, err := r.Seek(0, io.SeekStart); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return json.NewDecoder(r).Decode(data)
|
|
|
|
}
|
2018-02-06 18:37:48 -05:00
|
|
|
|
|
|
|
// Save to disk file in json format.
|
|
|
|
func jsonSave(f interface {
|
|
|
|
io.WriteSeeker
|
|
|
|
Truncate(int64) error
|
2022-04-13 15:00:11 -04:00
|
|
|
}, data interface{},
|
|
|
|
) error {
|
2018-02-06 18:37:48 -05:00
|
|
|
b, err := json.Marshal(data)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err = f.Truncate(0); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, err = f.Seek(0, io.SeekStart); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
_, err = f.Write(b)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2018-03-04 17:16:45 -05:00
|
|
|
|
|
|
|
// ceilFrac takes a numerator and denominator representing a fraction
|
|
|
|
// and returns its ceiling. If denominator is 0, it returns 0 instead
|
|
|
|
// of crashing.
|
|
|
|
func ceilFrac(numerator, denominator int64) (ceil int64) {
|
|
|
|
if denominator == 0 {
|
|
|
|
// do nothing on invalid input
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Make denominator positive
|
|
|
|
if denominator < 0 {
|
|
|
|
numerator = -numerator
|
|
|
|
denominator = -denominator
|
|
|
|
}
|
|
|
|
ceil = numerator / denominator
|
|
|
|
if numerator > 0 && numerator%denominator != 0 {
|
|
|
|
ceil++
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2018-03-14 15:01:47 -04:00
|
|
|
|
2022-10-24 20:44:15 -04:00
|
|
|
// cleanMinioInternalMetadataKeys removes X-Amz-Meta- prefix from minio internal
|
|
|
|
// encryption metadata.
|
|
|
|
func cleanMinioInternalMetadataKeys(metadata map[string]string) map[string]string {
|
|
|
|
newMeta := make(map[string]string, len(metadata))
|
|
|
|
for k, v := range metadata {
|
|
|
|
if strings.HasPrefix(k, "X-Amz-Meta-X-Minio-Internal-") {
|
|
|
|
newMeta[strings.TrimPrefix(k, "X-Amz-Meta-")] = v
|
|
|
|
} else {
|
|
|
|
newMeta[k] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return newMeta
|
|
|
|
}
|
|
|
|
|
2021-04-15 19:32:13 -04:00
|
|
|
// pathClean is like path.Clean but does not return "." for
|
|
|
|
// empty inputs, instead returns "empty" as is.
|
|
|
|
func pathClean(p string) string {
|
|
|
|
cp := path.Clean(p)
|
|
|
|
if cp == "." {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
return cp
|
|
|
|
}
|
|
|
|
|
2021-03-11 16:57:03 -05:00
|
|
|
func trimLeadingSlash(ep string) string {
|
2021-03-09 15:58:22 -05:00
|
|
|
if len(ep) > 0 && ep[0] == '/' {
|
|
|
|
// Path ends with '/' preserve it
|
2021-03-11 16:57:03 -05:00
|
|
|
if ep[len(ep)-1] == '/' && len(ep) > 1 {
|
2021-03-09 15:58:22 -05:00
|
|
|
ep = path.Clean(ep)
|
|
|
|
ep += slashSeparator
|
|
|
|
} else {
|
|
|
|
ep = path.Clean(ep)
|
|
|
|
}
|
|
|
|
ep = ep[1:]
|
2020-02-11 22:38:02 -05:00
|
|
|
}
|
2021-03-11 16:57:03 -05:00
|
|
|
return ep
|
|
|
|
}
|
|
|
|
|
|
|
|
// unescapeGeneric is similar to url.PathUnescape or url.QueryUnescape
|
|
|
|
// depending on input, additionally also handles situations such as
|
|
|
|
// `//` are normalized as `/`, also removes any `/` prefix before
|
|
|
|
// returning.
|
|
|
|
func unescapeGeneric(p string, escapeFn func(string) (string, error)) (string, error) {
|
|
|
|
ep, err := escapeFn(p)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return trimLeadingSlash(ep), nil
|
2021-03-09 15:58:22 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// unescapePath is similar to unescapeGeneric but for specifically
|
|
|
|
// path unescaping.
|
|
|
|
func unescapePath(p string) (string, error) {
|
|
|
|
return unescapeGeneric(p, url.PathUnescape)
|
|
|
|
}
|
|
|
|
|
|
|
|
// similar to unescapeGeneric but never returns any error if the unescaping
|
|
|
|
// fails, returns the input as is in such occasion, not meant to be
|
|
|
|
// used where strict validation is expected.
|
|
|
|
func likelyUnescapeGeneric(p string, escapeFn func(string) (string, error)) string {
|
|
|
|
ep, err := unescapeGeneric(p, escapeFn)
|
2020-02-11 22:38:02 -05:00
|
|
|
if err != nil {
|
2021-03-09 15:58:22 -05:00
|
|
|
return p
|
2020-02-11 22:38:02 -05:00
|
|
|
}
|
2021-03-09 15:58:22 -05:00
|
|
|
return ep
|
|
|
|
}
|
|
|
|
|
2022-01-03 04:28:52 -05:00
|
|
|
func updateReqContext(ctx context.Context, objects ...ObjectV) context.Context {
|
|
|
|
req := logger.GetReqInfo(ctx)
|
|
|
|
if req != nil {
|
2022-06-30 13:48:50 -04:00
|
|
|
req.Lock()
|
|
|
|
defer req.Unlock()
|
2022-01-03 04:28:52 -05:00
|
|
|
req.Objects = make([]logger.ObjectVersion, 0, len(objects))
|
|
|
|
for _, ov := range objects {
|
|
|
|
req.Objects = append(req.Objects, logger.ObjectVersion{
|
|
|
|
ObjectName: ov.ObjectName,
|
|
|
|
VersionID: ov.VersionID,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return logger.SetReqInfo(ctx, req)
|
|
|
|
}
|
|
|
|
return ctx
|
|
|
|
}
|
|
|
|
|
2021-03-09 15:58:22 -05:00
|
|
|
// Returns context with ReqInfo details set in the context.
|
|
|
|
func newContext(r *http.Request, w http.ResponseWriter, api string) context.Context {
|
2022-12-06 12:27:26 -05:00
|
|
|
reqID := w.Header().Get(xhttp.AmzRequestID)
|
|
|
|
|
2021-03-09 15:58:22 -05:00
|
|
|
vars := mux.Vars(r)
|
|
|
|
bucket := vars["bucket"]
|
|
|
|
object := likelyUnescapeGeneric(vars["object"], url.PathUnescape)
|
2018-07-02 17:40:18 -04:00
|
|
|
reqInfo := &logger.ReqInfo{
|
2019-07-01 15:22:01 -04:00
|
|
|
DeploymentID: globalDeploymentID,
|
2022-12-06 12:27:26 -05:00
|
|
|
RequestID: reqID,
|
2018-11-19 17:47:03 -05:00
|
|
|
RemoteHost: handlers.GetSourceIP(r),
|
2019-07-18 12:58:37 -04:00
|
|
|
Host: getHostName(r),
|
2018-11-19 17:47:03 -05:00
|
|
|
UserAgent: r.UserAgent(),
|
|
|
|
API: api,
|
|
|
|
BucketName: bucket,
|
|
|
|
ObjectName: object,
|
2022-01-03 04:28:52 -05:00
|
|
|
VersionID: strings.TrimSpace(r.Form.Get(xhttp.VersionID)),
|
2018-07-02 17:40:18 -04:00
|
|
|
}
|
2022-12-06 12:27:26 -05:00
|
|
|
|
|
|
|
ctx := context.WithValue(r.Context(),
|
|
|
|
mcontext.ContextTraceKey,
|
|
|
|
&mcontext.TraceCtxt{
|
|
|
|
AmzReqID: reqID,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
return logger.SetReqInfo(ctx, reqInfo)
|
2018-03-14 15:01:47 -04:00
|
|
|
}
|
2018-03-28 17:14:06 -04:00
|
|
|
|
2018-10-04 20:44:06 -04:00
|
|
|
// Used for registering with rest handlers (have a look at registerStorageRESTHandlers for usage example)
|
|
|
|
// If it is passed ["aaaa", "bbbb"], it returns ["aaaa", "{aaaa:.*}", "bbbb", "{bbbb:.*}"]
|
|
|
|
func restQueries(keys ...string) []string {
|
|
|
|
var accumulator []string
|
|
|
|
for _, key := range keys {
|
|
|
|
accumulator = append(accumulator, key, "{"+key+":.*}")
|
|
|
|
}
|
|
|
|
return accumulator
|
|
|
|
}
|
2019-05-13 15:25:49 -04:00
|
|
|
|
2021-01-28 14:44:48 -05:00
|
|
|
// Suffix returns the longest common suffix of the provided strings
|
|
|
|
func lcpSuffix(strs []string) string {
|
|
|
|
return lcp(strs, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func lcp(strs []string, pre bool) string {
|
|
|
|
// short-circuit empty list
|
|
|
|
if len(strs) == 0 {
|
2019-08-09 13:02:41 -04:00
|
|
|
return ""
|
|
|
|
}
|
2021-01-28 14:44:48 -05:00
|
|
|
xfix := strs[0]
|
|
|
|
// short-circuit single-element list
|
|
|
|
if len(strs) == 1 {
|
|
|
|
return xfix
|
|
|
|
}
|
|
|
|
// compare first to rest
|
|
|
|
for _, str := range strs[1:] {
|
|
|
|
xfixl := len(xfix)
|
|
|
|
strl := len(str)
|
|
|
|
// short-circuit empty strings
|
|
|
|
if xfixl == 0 || strl == 0 {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
// maximum possible length
|
|
|
|
maxl := xfixl
|
|
|
|
if strl < maxl {
|
|
|
|
maxl = strl
|
|
|
|
}
|
|
|
|
// compare letters
|
|
|
|
if pre {
|
|
|
|
// prefix, iterate left to right
|
|
|
|
for i := 0; i < maxl; i++ {
|
|
|
|
if xfix[i] != str[i] {
|
|
|
|
xfix = xfix[:i]
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// suffix, iterate right to left
|
|
|
|
for i := 0; i < maxl; i++ {
|
|
|
|
xi := xfixl - i - 1
|
|
|
|
si := strl - i - 1
|
|
|
|
if xfix[xi] != str[si] {
|
|
|
|
xfix = xfix[xi+1:]
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2019-08-09 13:02:41 -04:00
|
|
|
}
|
|
|
|
}
|
2021-01-28 14:44:48 -05:00
|
|
|
return xfix
|
2019-08-09 13:02:41 -04:00
|
|
|
}
|
2019-08-14 14:43:43 -04:00
|
|
|
|
|
|
|
// Returns the mode in which MinIO is running
|
|
|
|
func getMinioMode() string {
|
2023-03-06 11:56:10 -05:00
|
|
|
switch {
|
|
|
|
case globalIsDistErasure:
|
|
|
|
return globalMinioModeDistErasure
|
|
|
|
case globalIsErasure:
|
|
|
|
return globalMinioModeErasure
|
|
|
|
case globalIsErasureSD:
|
|
|
|
return globalMinioModeErasureSD
|
|
|
|
default:
|
|
|
|
return globalMinioModeFS
|
|
|
|
}
|
2019-08-14 14:43:43 -04:00
|
|
|
}
|
2019-10-23 01:59:13 -04:00
|
|
|
|
2020-03-23 17:17:18 -04:00
|
|
|
func iamPolicyClaimNameOpenID() string {
|
2023-02-26 00:01:37 -05:00
|
|
|
return globalIAMSys.OpenIDConfig.GetIAMPolicyClaimName()
|
2019-10-23 01:59:13 -04:00
|
|
|
}
|
2019-11-12 17:50:18 -05:00
|
|
|
|
2020-03-23 17:17:18 -04:00
|
|
|
func iamPolicyClaimNameSA() string {
|
|
|
|
return "sa-policy"
|
|
|
|
}
|
2020-05-26 15:52:24 -04:00
|
|
|
|
|
|
|
// timedValue contains a synchronized value that is considered valid
|
|
|
|
// for a specific amount of time.
|
|
|
|
// An Update function must be set to provide an updated value when needed.
|
|
|
|
type timedValue struct {
|
|
|
|
// Update must return an updated value.
|
|
|
|
// If an error is returned the cached value is not set.
|
|
|
|
// Only one caller will call this function at any time, others will be blocking.
|
|
|
|
// The returned value can no longer be modified once returned.
|
|
|
|
// Should be set before calling Get().
|
|
|
|
Update func() (interface{}, error)
|
|
|
|
|
|
|
|
// TTL for a cached value.
|
|
|
|
// If not set 1 second TTL is assumed.
|
|
|
|
// Should be set before calling Get().
|
|
|
|
TTL time.Duration
|
|
|
|
|
2022-07-07 20:05:23 -04:00
|
|
|
// When set to true, return the last cached value
|
|
|
|
// even if updating the value errors out
|
|
|
|
Relax bool
|
|
|
|
|
2020-05-26 15:52:24 -04:00
|
|
|
// Once can be used to initialize values for lazy initialization.
|
|
|
|
// Should be set before calling Get().
|
|
|
|
Once sync.Once
|
|
|
|
|
|
|
|
// Managed values.
|
|
|
|
value interface{}
|
|
|
|
lastUpdate time.Time
|
2021-02-12 21:17:52 -05:00
|
|
|
mu sync.RWMutex
|
2020-05-26 15:52:24 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get will return a cached value or fetch a new one.
|
|
|
|
// If the Update function returns an error the value is forwarded as is and not cached.
|
|
|
|
func (t *timedValue) Get() (interface{}, error) {
|
2022-06-25 11:50:16 -04:00
|
|
|
v := t.get(t.ttl())
|
2021-02-12 21:17:52 -05:00
|
|
|
if v != nil {
|
|
|
|
return v, nil
|
2020-05-26 15:52:24 -04:00
|
|
|
}
|
2021-02-12 21:17:52 -05:00
|
|
|
|
2020-05-26 15:52:24 -04:00
|
|
|
v, err := t.Update()
|
|
|
|
if err != nil {
|
2022-07-07 20:05:23 -04:00
|
|
|
if t.Relax {
|
|
|
|
// if update fails, return current
|
|
|
|
// cached value along with error.
|
|
|
|
//
|
|
|
|
// Let the caller decide if they want
|
|
|
|
// to use the returned value based
|
|
|
|
// on error.
|
|
|
|
v = t.get(0)
|
|
|
|
return v, err
|
|
|
|
}
|
2020-05-26 15:52:24 -04:00
|
|
|
return v, err
|
|
|
|
}
|
2021-02-12 21:17:52 -05:00
|
|
|
|
|
|
|
t.update(v)
|
2020-05-26 15:52:24 -04:00
|
|
|
return v, nil
|
|
|
|
}
|
|
|
|
|
2022-06-25 11:50:16 -04:00
|
|
|
func (t *timedValue) ttl() time.Duration {
|
2021-02-12 21:17:52 -05:00
|
|
|
ttl := t.TTL
|
|
|
|
if ttl <= 0 {
|
|
|
|
ttl = time.Second
|
|
|
|
}
|
2022-06-25 11:50:16 -04:00
|
|
|
return ttl
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *timedValue) get(ttl time.Duration) (v interface{}) {
|
2021-02-12 21:17:52 -05:00
|
|
|
t.mu.RLock()
|
|
|
|
defer t.mu.RUnlock()
|
|
|
|
v = t.value
|
2022-06-25 11:50:16 -04:00
|
|
|
if ttl <= 0 {
|
|
|
|
return v
|
|
|
|
}
|
2021-02-12 21:17:52 -05:00
|
|
|
if time.Since(t.lastUpdate) < ttl {
|
|
|
|
return v
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *timedValue) update(v interface{}) {
|
2020-05-26 15:52:24 -04:00
|
|
|
t.mu.Lock()
|
2021-02-12 21:17:52 -05:00
|
|
|
defer t.mu.Unlock()
|
|
|
|
t.value = v
|
|
|
|
t.lastUpdate = time.Now()
|
2020-05-26 15:52:24 -04:00
|
|
|
}
|
2020-09-19 11:39:41 -04:00
|
|
|
|
|
|
|
// On MinIO a directory object is stored as a regular object with "__XLDIR__" suffix.
|
|
|
|
// For ex. "prefix/" is stored as "prefix__XLDIR__"
|
|
|
|
func encodeDirObject(object string) string {
|
|
|
|
if HasSuffix(object, slashSeparator) {
|
|
|
|
return strings.TrimSuffix(object, slashSeparator) + globalDirSuffix
|
|
|
|
}
|
|
|
|
return object
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reverse process of encodeDirObject()
|
|
|
|
func decodeDirObject(object string) string {
|
|
|
|
if HasSuffix(object, globalDirSuffix) {
|
|
|
|
return strings.TrimSuffix(object, globalDirSuffix) + slashSeparator
|
|
|
|
}
|
|
|
|
return object
|
|
|
|
}
|
2021-03-01 15:31:33 -05:00
|
|
|
|
2021-06-24 21:29:30 -04:00
|
|
|
// Helper method to return total number of nodes in cluster
|
|
|
|
func totalNodeCount() uint64 {
|
|
|
|
peers, _ := globalEndpoints.peers()
|
|
|
|
totalNodesCount := uint64(len(peers))
|
|
|
|
if totalNodesCount == 0 {
|
|
|
|
totalNodesCount = 1 // For standalone erasure coding
|
|
|
|
}
|
|
|
|
return totalNodesCount
|
|
|
|
}
|
2021-06-29 02:58:08 -04:00
|
|
|
|
|
|
|
// AuditLogOptions takes options for audit logging subsystem activity
|
|
|
|
type AuditLogOptions struct {
|
2022-07-12 13:43:32 -04:00
|
|
|
Event string
|
2021-06-29 02:58:08 -04:00
|
|
|
APIName string
|
|
|
|
Status string
|
2022-10-24 14:35:07 -04:00
|
|
|
Bucket string
|
|
|
|
Object string
|
2021-06-29 02:58:08 -04:00
|
|
|
VersionID string
|
2022-05-04 03:45:27 -04:00
|
|
|
Error string
|
2022-10-24 14:35:07 -04:00
|
|
|
Tags map[string]interface{}
|
2021-06-29 02:58:08 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// sends audit logs for internal subsystem activity
|
2022-10-24 14:35:07 -04:00
|
|
|
func auditLogInternal(ctx context.Context, opts AuditLogOptions) {
|
|
|
|
if len(logger.AuditTargets()) == 0 {
|
|
|
|
return
|
|
|
|
}
|
2021-06-29 02:58:08 -04:00
|
|
|
entry := audit.NewEntry(globalDeploymentID)
|
2022-07-12 13:43:32 -04:00
|
|
|
entry.Trigger = opts.Event
|
|
|
|
entry.Event = opts.Event
|
2022-05-04 03:45:27 -04:00
|
|
|
entry.Error = opts.Error
|
2021-06-29 02:58:08 -04:00
|
|
|
entry.API.Name = opts.APIName
|
2022-10-24 14:35:07 -04:00
|
|
|
entry.API.Bucket = opts.Bucket
|
2023-02-22 00:21:17 -05:00
|
|
|
entry.API.Objects = []pkgAudit.ObjectVersion{{ObjectName: opts.Object, VersionID: opts.VersionID}}
|
2021-06-29 02:58:08 -04:00
|
|
|
entry.API.Status = opts.Status
|
2022-10-24 14:35:07 -04:00
|
|
|
entry.Tags = opts.Tags
|
2022-05-04 03:45:27 -04:00
|
|
|
// Merge tag information if found - this is currently needed for tags
|
|
|
|
// set during decommissioning.
|
|
|
|
if reqInfo := logger.GetReqInfo(ctx); reqInfo != nil {
|
2022-10-24 14:35:07 -04:00
|
|
|
if tags := reqInfo.GetTagsMap(); len(tags) > 0 {
|
|
|
|
if entry.Tags == nil {
|
|
|
|
entry.Tags = make(map[string]interface{}, len(tags))
|
|
|
|
}
|
|
|
|
for k, v := range tags {
|
|
|
|
entry.Tags[k] = v
|
|
|
|
}
|
|
|
|
}
|
2022-05-04 03:45:27 -04:00
|
|
|
}
|
2021-06-29 02:58:08 -04:00
|
|
|
ctx = logger.SetAuditEntry(ctx, &entry)
|
|
|
|
logger.AuditLog(ctx, nil, nil, nil)
|
|
|
|
}
|
2021-09-10 20:43:34 -04:00
|
|
|
|
2021-11-29 12:06:56 -05:00
|
|
|
func newTLSConfig(getCert certs.GetCertificateFunc) *tls.Config {
|
|
|
|
if getCert == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
tlsConfig := &tls.Config{
|
|
|
|
PreferServerCipherSuites: true,
|
|
|
|
MinVersion: tls.VersionTLS12,
|
|
|
|
NextProtos: []string{"http/1.1", "h2"},
|
|
|
|
GetCertificate: getCert,
|
2022-01-06 14:34:02 -05:00
|
|
|
ClientSessionCache: tls.NewLRUClientSessionCache(tlsClientSessionCacheSize),
|
2021-11-29 12:06:56 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
tlsClientIdentity := env.Get(xtls.EnvIdentityTLSEnabled, "") == config.EnableOn
|
|
|
|
if tlsClientIdentity {
|
|
|
|
tlsConfig.ClientAuth = tls.RequestClientCert
|
|
|
|
}
|
|
|
|
|
2022-06-21 10:54:48 -04:00
|
|
|
if secureCiphers := env.Get(api.EnvAPISecureCiphers, config.EnableOn) == config.EnableOn; secureCiphers {
|
|
|
|
tlsConfig.CipherSuites = fips.TLSCiphers()
|
2021-11-29 12:06:56 -05:00
|
|
|
} else {
|
2022-06-21 10:54:48 -04:00
|
|
|
tlsConfig.CipherSuites = fips.TLSCiphersBackwardCompatible()
|
2021-11-29 12:06:56 -05:00
|
|
|
}
|
2022-06-21 10:54:48 -04:00
|
|
|
tlsConfig.CurvePreferences = fips.TLSCurveIDs()
|
2021-11-29 12:06:56 -05:00
|
|
|
return tlsConfig
|
|
|
|
}
|
2022-01-07 20:41:43 -05:00
|
|
|
|
|
|
|
/////////// Types and functions for OpenID IAM testing
|
|
|
|
|
|
|
|
// OpenIDClientAppParams - contains openID client application params, used in
|
|
|
|
// testing.
|
|
|
|
type OpenIDClientAppParams struct {
|
|
|
|
ClientID, ClientSecret, ProviderURL, RedirectURL string
|
|
|
|
}
|
|
|
|
|
|
|
|
// MockOpenIDTestUserInteraction - tries to login to dex using provided credentials.
|
|
|
|
// It performs the user's browser interaction to login and retrieves the auth
|
|
|
|
// code from dex and exchanges it for a JWT.
|
|
|
|
func MockOpenIDTestUserInteraction(ctx context.Context, pro OpenIDClientAppParams, username, password string) (string, error) {
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
provider, err := oidc.NewProvider(ctx, pro.ProviderURL)
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("unable to create provider: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Configure an OpenID Connect aware OAuth2 client.
|
|
|
|
oauth2Config := oauth2.Config{
|
|
|
|
ClientID: pro.ClientID,
|
|
|
|
ClientSecret: pro.ClientSecret,
|
|
|
|
RedirectURL: pro.RedirectURL,
|
|
|
|
|
|
|
|
// Discovery returns the OAuth2 endpoints.
|
|
|
|
Endpoint: provider.Endpoint(),
|
|
|
|
|
|
|
|
// "openid" is a required scope for OpenID Connect flows.
|
|
|
|
Scopes: []string{oidc.ScopeOpenID, "groups"},
|
|
|
|
}
|
|
|
|
|
|
|
|
state := fmt.Sprintf("x%dx", time.Now().Unix())
|
|
|
|
authCodeURL := oauth2Config.AuthCodeURL(state)
|
|
|
|
// fmt.Printf("authcodeurl: %s\n", authCodeURL)
|
|
|
|
|
|
|
|
var lastReq *http.Request
|
|
|
|
checkRedirect := func(req *http.Request, via []*http.Request) error {
|
|
|
|
// fmt.Printf("CheckRedirect:\n")
|
|
|
|
// fmt.Printf("Upcoming: %s %s\n", req.Method, req.URL.String())
|
|
|
|
// for i, c := range via {
|
|
|
|
// fmt.Printf("Sofar %d: %s %s\n", i, c.Method, c.URL.String())
|
|
|
|
// }
|
|
|
|
// Save the last request in a redirect chain.
|
|
|
|
lastReq = req
|
|
|
|
// We do not follow redirect back to client application.
|
|
|
|
if req.URL.Path == "/oauth_callback" {
|
|
|
|
return http.ErrUseLastResponse
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
dexClient := http.Client{
|
|
|
|
CheckRedirect: checkRedirect,
|
|
|
|
}
|
|
|
|
|
|
|
|
u, err := url.Parse(authCodeURL)
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("url parse err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start the user auth flow. This page would present the login with
|
|
|
|
// email or LDAP option.
|
|
|
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("new request err: %v", err)
|
|
|
|
}
|
|
|
|
_, err = dexClient.Do(req)
|
|
|
|
// fmt.Printf("Do: %#v %#v\n", resp, err)
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("auth url request err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Modify u to choose the ldap option
|
|
|
|
u.Path += "/ldap"
|
|
|
|
// fmt.Println(u)
|
|
|
|
|
|
|
|
// Pick the LDAP login option. This would return a form page after
|
|
|
|
// following some redirects. `lastReq` would be the URL of the form
|
|
|
|
// page, where we need to POST (submit) the form.
|
|
|
|
req, err = http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("new request err (/ldap): %v", err)
|
|
|
|
}
|
|
|
|
_, err = dexClient.Do(req)
|
|
|
|
// fmt.Printf("Fetch LDAP login page: %#v %#v\n", resp, err)
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("request err: %v", err)
|
|
|
|
}
|
|
|
|
// {
|
2022-09-19 14:05:16 -04:00
|
|
|
// bodyBuf, err := io.ReadAll(resp.Body)
|
2022-01-07 20:41:43 -05:00
|
|
|
// if err != nil {
|
|
|
|
// return "", fmt.Errorf("Error reading body: %v", err)
|
|
|
|
// }
|
|
|
|
// fmt.Printf("bodyBuf (for LDAP login page): %s\n", string(bodyBuf))
|
|
|
|
// }
|
|
|
|
|
|
|
|
// Fill the login form with our test creds:
|
|
|
|
// fmt.Printf("login form url: %s\n", lastReq.URL.String())
|
|
|
|
formData := url.Values{}
|
|
|
|
formData.Set("login", username)
|
|
|
|
formData.Set("password", password)
|
|
|
|
req, err = http.NewRequestWithContext(ctx, http.MethodPost, lastReq.URL.String(), strings.NewReader(formData.Encode()))
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("new request err (/login): %v", err)
|
|
|
|
}
|
|
|
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
|
|
|
_, err = dexClient.Do(req)
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("post form err: %v", err)
|
|
|
|
}
|
|
|
|
// fmt.Printf("resp: %#v %#v\n", resp.StatusCode, resp.Header)
|
2022-09-19 14:05:16 -04:00
|
|
|
// bodyBuf, err := io.ReadAll(resp.Body)
|
2022-01-07 20:41:43 -05:00
|
|
|
// if err != nil {
|
|
|
|
// return "", fmt.Errorf("Error reading body: %v", err)
|
|
|
|
// }
|
|
|
|
// fmt.Printf("resp body: %s\n", string(bodyBuf))
|
|
|
|
// fmt.Printf("lastReq: %#v\n", lastReq.URL.String())
|
|
|
|
|
|
|
|
// On form submission, the last redirect response contains the auth
|
|
|
|
// code, which we now have in `lastReq`. Exchange it for a JWT id_token.
|
|
|
|
q := lastReq.URL.Query()
|
|
|
|
// fmt.Printf("lastReq.URL: %#v q: %#v\n", lastReq.URL, q)
|
|
|
|
code := q.Get("code")
|
|
|
|
oauth2Token, err := oauth2Config.Exchange(ctx, code)
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("unable to exchange code for id token: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
rawIDToken, ok := oauth2Token.Extra("id_token").(string)
|
|
|
|
if !ok {
|
|
|
|
return "", fmt.Errorf("id_token not found!")
|
|
|
|
}
|
|
|
|
|
|
|
|
// fmt.Printf("TOKEN: %s\n", rawIDToken)
|
|
|
|
return rawIDToken, nil
|
|
|
|
}
|