2015-04-29 05:19:51 -04:00
|
|
|
/*
|
2019-04-09 14:39:42 -04:00
|
|
|
* MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc.
|
2015-04-29 05:19:51 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2015-04-22 19:28:13 -04:00
|
|
|
|
|
|
|
import (
|
2017-05-31 03:11:06 -04:00
|
|
|
"bytes"
|
2018-03-14 15:01:47 -04:00
|
|
|
"context"
|
2017-12-05 20:58:09 -05:00
|
|
|
"crypto/tls"
|
2015-04-22 19:28:13 -04:00
|
|
|
"encoding/base64"
|
2017-04-07 17:37:32 -04:00
|
|
|
"encoding/json"
|
2016-04-20 20:35:38 -04:00
|
|
|
"encoding/xml"
|
2018-09-18 19:46:35 -04:00
|
|
|
"errors"
|
2016-08-26 03:11:53 -04:00
|
|
|
"fmt"
|
2016-04-20 20:35:38 -04:00
|
|
|
"io"
|
2018-09-05 19:47:14 -04:00
|
|
|
"io/ioutil"
|
2017-12-05 20:58:09 -05:00
|
|
|
"net"
|
2016-09-09 12:38:07 -04:00
|
|
|
"net/http"
|
2016-10-27 06:30:52 -04:00
|
|
|
"net/url"
|
2017-10-13 06:01:15 -04:00
|
|
|
"os"
|
2018-09-18 19:46:35 -04:00
|
|
|
"path/filepath"
|
2017-12-28 12:32:48 -05:00
|
|
|
"reflect"
|
2016-04-20 20:35:38 -04:00
|
|
|
"strings"
|
2017-03-18 14:28:41 -04:00
|
|
|
"time"
|
2016-09-01 23:13:11 -04:00
|
|
|
|
2018-03-14 21:36:54 -04:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
2018-07-02 17:40:18 -04:00
|
|
|
"github.com/minio/minio/pkg/handlers"
|
2018-03-14 21:36:54 -04:00
|
|
|
|
2016-11-22 21:18:22 -05:00
|
|
|
humanize "github.com/dustin/go-humanize"
|
2018-03-14 15:01:47 -04:00
|
|
|
"github.com/gorilla/mux"
|
2016-09-01 23:13:11 -04:00
|
|
|
"github.com/pkg/profile"
|
2015-04-22 19:28:13 -04:00
|
|
|
)
|
|
|
|
|
2018-04-10 12:36:37 -04:00
|
|
|
// IsErrIgnored returns whether given error is ignored or not.
|
|
|
|
func IsErrIgnored(err error, ignoredErrs ...error) bool {
|
|
|
|
return IsErr(err, ignoredErrs...)
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsErr returns whether given error is exact error.
|
|
|
|
func IsErr(err error, errs ...error) bool {
|
|
|
|
for _, exactErr := range errs {
|
|
|
|
if err == exactErr {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2016-09-09 12:38:07 -04:00
|
|
|
// make a copy of http.Header
|
|
|
|
func cloneHeader(h http.Header) http.Header {
|
|
|
|
h2 := make(http.Header, len(h))
|
|
|
|
for k, vv := range h {
|
|
|
|
vv2 := make([]string, len(vv))
|
|
|
|
copy(vv2, vv)
|
|
|
|
h2[k] = vv2
|
|
|
|
|
|
|
|
}
|
|
|
|
return h2
|
|
|
|
}
|
|
|
|
|
2017-01-10 14:01:23 -05:00
|
|
|
// Convert url path into bucket and object name.
|
2018-02-27 22:14:49 -05:00
|
|
|
func urlPath2BucketObjectName(path string) (bucketName, objectName string) {
|
2017-01-10 14:01:23 -05:00
|
|
|
// Trim any preceding slash separator.
|
2018-02-27 22:14:49 -05:00
|
|
|
urlPath := strings.TrimPrefix(path, slashSeparator)
|
2017-01-10 14:01:23 -05:00
|
|
|
|
|
|
|
// Split urlpath using slash separator into a given number of
|
|
|
|
// expected tokens.
|
2017-04-11 18:44:27 -04:00
|
|
|
tokens := strings.SplitN(urlPath, slashSeparator, 2)
|
|
|
|
bucketName = tokens[0]
|
|
|
|
if len(tokens) == 2 {
|
|
|
|
objectName = tokens[1]
|
|
|
|
}
|
2017-01-10 14:01:23 -05:00
|
|
|
|
|
|
|
// Success.
|
|
|
|
return bucketName, objectName
|
|
|
|
}
|
|
|
|
|
2017-01-18 15:24:34 -05:00
|
|
|
// URI scheme constants.
|
|
|
|
const (
|
|
|
|
httpScheme = "http"
|
|
|
|
httpsScheme = "https"
|
|
|
|
)
|
|
|
|
|
2018-12-13 15:09:50 -05:00
|
|
|
// nopCharsetConverter is a dummy charset convert which just copies input to output,
|
|
|
|
// it is used to ignore custom encoding charset in S3 XML body.
|
|
|
|
func nopCharsetConverter(label string, input io.Reader) (io.Reader, error) {
|
|
|
|
return input, nil
|
|
|
|
}
|
|
|
|
|
2016-04-20 20:35:38 -04:00
|
|
|
// xmlDecoder provide decoded value in xml.
|
2016-07-19 00:20:17 -04:00
|
|
|
func xmlDecoder(body io.Reader, v interface{}, size int64) error {
|
|
|
|
var lbody io.Reader
|
|
|
|
if size > 0 {
|
|
|
|
lbody = io.LimitReader(body, size)
|
|
|
|
} else {
|
|
|
|
lbody = body
|
|
|
|
}
|
|
|
|
d := xml.NewDecoder(lbody)
|
2018-12-13 15:09:50 -05:00
|
|
|
// Ignore any encoding set in the XML body
|
|
|
|
d.CharsetReader = nopCharsetConverter
|
2016-04-20 20:35:38 -04:00
|
|
|
return d.Decode(v)
|
|
|
|
}
|
|
|
|
|
2016-03-12 19:08:15 -05:00
|
|
|
// checkValidMD5 - verify if valid md5, returns md5 in bytes.
|
2018-03-16 14:22:34 -04:00
|
|
|
func checkValidMD5(h http.Header) ([]byte, error) {
|
|
|
|
md5B64, ok := h["Content-Md5"]
|
|
|
|
if ok {
|
|
|
|
if md5B64[0] == "" {
|
|
|
|
return nil, fmt.Errorf("Content-Md5 header set to empty value")
|
|
|
|
}
|
|
|
|
return base64.StdEncoding.DecodeString(md5B64[0])
|
|
|
|
}
|
|
|
|
return []byte{}, nil
|
2015-04-22 19:28:13 -04:00
|
|
|
}
|
2015-04-29 05:19:51 -04:00
|
|
|
|
2015-04-29 13:51:59 -04:00
|
|
|
/// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
|
2015-04-29 05:19:51 -04:00
|
|
|
const (
|
2017-11-28 02:29:02 -05:00
|
|
|
// Maximum object size per PUT request is 5TB.
|
2017-03-03 13:14:17 -05:00
|
|
|
// This is a divergence from S3 limit on purpose to support
|
|
|
|
// use cases where users are going to upload large files
|
|
|
|
// using 'curl' and presigned URL.
|
2017-11-28 02:29:02 -05:00
|
|
|
globalMaxObjectSize = 5 * humanize.TiByte
|
2017-03-03 13:14:17 -05:00
|
|
|
|
|
|
|
// Minimum Part size for multipart upload is 5MiB
|
|
|
|
globalMinPartSize = 5 * humanize.MiByte
|
|
|
|
|
|
|
|
// Maximum Part size for multipart upload is 5GiB
|
|
|
|
globalMaxPartSize = 5 * humanize.GiByte
|
|
|
|
|
|
|
|
// Maximum Part ID for multipart upload is 10000
|
|
|
|
// (Acceptable values range from 1 to 10000 inclusive)
|
|
|
|
globalMaxPartID = 10000
|
2018-07-12 17:12:40 -04:00
|
|
|
|
|
|
|
// Default values used while communicating with the cloud backends
|
|
|
|
defaultDialTimeout = 30 * time.Second
|
|
|
|
defaultDialKeepAlive = 30 * time.Second
|
2015-04-29 05:19:51 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
// isMaxObjectSize - verify if max object size
|
2015-12-28 02:00:36 -05:00
|
|
|
func isMaxObjectSize(size int64) bool {
|
2017-03-03 13:14:17 -05:00
|
|
|
return size > globalMaxObjectSize
|
|
|
|
}
|
|
|
|
|
|
|
|
// // Check if part size is more than maximum allowed size.
|
|
|
|
func isMaxAllowedPartSize(size int64) bool {
|
|
|
|
return size > globalMaxPartSize
|
2015-04-29 05:19:51 -04:00
|
|
|
}
|
2016-02-05 06:09:31 -05:00
|
|
|
|
2016-05-08 15:06:05 -04:00
|
|
|
// Check if part size is more than or equal to minimum allowed size.
|
|
|
|
func isMinAllowedPartSize(size int64) bool {
|
2017-03-03 13:14:17 -05:00
|
|
|
return size >= globalMinPartSize
|
2016-05-08 15:06:05 -04:00
|
|
|
}
|
|
|
|
|
2016-05-24 04:52:47 -04:00
|
|
|
// isMaxPartNumber - Check if part ID is greater than the maximum allowed ID.
|
|
|
|
func isMaxPartID(partID int) bool {
|
2017-03-03 13:14:17 -05:00
|
|
|
return partID > globalMaxPartID
|
2016-05-24 04:52:47 -04:00
|
|
|
}
|
|
|
|
|
2017-12-28 12:32:48 -05:00
|
|
|
func contains(slice interface{}, elem interface{}) bool {
|
|
|
|
v := reflect.ValueOf(slice)
|
|
|
|
if v.Kind() == reflect.Slice {
|
|
|
|
for i := 0; i < v.Len(); i++ {
|
|
|
|
if v.Index(i).Interface() == elem {
|
|
|
|
return true
|
|
|
|
}
|
2016-02-05 06:09:31 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
2016-08-05 16:48:31 -04:00
|
|
|
|
2018-09-18 19:46:35 -04:00
|
|
|
// profilerWrapper is created becauses pkg/profiler doesn't
|
|
|
|
// provide any API to calculate the profiler file path in the
|
|
|
|
// disk since the name of this latter is randomly generated.
|
|
|
|
type profilerWrapper struct {
|
|
|
|
stopFn func()
|
|
|
|
pathFn func() string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p profilerWrapper) Stop() {
|
|
|
|
p.stopFn()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p profilerWrapper) Path() string {
|
|
|
|
return p.pathFn()
|
|
|
|
}
|
|
|
|
|
2019-01-14 01:44:20 -05:00
|
|
|
// Returns current profile data, returns error if there is no active
|
|
|
|
// profiling in progress. Stops an active profile.
|
|
|
|
func getProfileData() ([]byte, error) {
|
|
|
|
if globalProfiler == nil {
|
|
|
|
return nil, errors.New("profiler not enabled")
|
|
|
|
}
|
|
|
|
|
|
|
|
profilerPath := globalProfiler.Path()
|
|
|
|
|
|
|
|
// Stop the profiler
|
|
|
|
globalProfiler.Stop()
|
|
|
|
|
|
|
|
profilerFile, err := os.Open(profilerPath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return ioutil.ReadAll(profilerFile)
|
|
|
|
}
|
|
|
|
|
2016-09-01 23:13:11 -04:00
|
|
|
// Starts a profiler returns nil if profiler is not enabled, caller needs to handle this.
|
2019-02-13 07:59:36 -05:00
|
|
|
func startProfiler(profilerType, dirPath string) (minioProfiler, error) {
|
2018-09-18 19:46:35 -04:00
|
|
|
var err error
|
|
|
|
if dirPath == "" {
|
|
|
|
dirPath, err = ioutil.TempDir("", "profile")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var profiler interface {
|
|
|
|
Stop()
|
|
|
|
}
|
|
|
|
|
2018-10-15 14:13:19 -04:00
|
|
|
var profilerFileName string
|
|
|
|
|
|
|
|
// Enable profiler and set the name of the file that pkg/pprof
|
|
|
|
// library creates to store profiling data.
|
2018-09-18 19:46:35 -04:00
|
|
|
switch profilerType {
|
2016-09-01 23:13:11 -04:00
|
|
|
case "cpu":
|
2018-09-18 19:46:35 -04:00
|
|
|
profiler = profile.Start(profile.CPUProfile, profile.NoShutdownHook, profile.ProfilePath(dirPath))
|
2018-10-15 14:13:19 -04:00
|
|
|
profilerFileName = "cpu.pprof"
|
2016-09-01 23:13:11 -04:00
|
|
|
case "mem":
|
2018-09-18 19:46:35 -04:00
|
|
|
profiler = profile.Start(profile.MemProfile, profile.NoShutdownHook, profile.ProfilePath(dirPath))
|
2018-10-15 14:13:19 -04:00
|
|
|
profilerFileName = "mem.pprof"
|
2016-09-01 23:13:11 -04:00
|
|
|
case "block":
|
2018-09-18 19:46:35 -04:00
|
|
|
profiler = profile.Start(profile.BlockProfile, profile.NoShutdownHook, profile.ProfilePath(dirPath))
|
2018-10-15 14:13:19 -04:00
|
|
|
profilerFileName = "block.pprof"
|
2018-09-27 00:02:05 -04:00
|
|
|
case "mutex":
|
|
|
|
profiler = profile.Start(profile.MutexProfile, profile.NoShutdownHook, profile.ProfilePath(dirPath))
|
2018-10-15 14:13:19 -04:00
|
|
|
profilerFileName = "mutex.pprof"
|
2018-09-27 00:02:05 -04:00
|
|
|
case "trace":
|
|
|
|
profiler = profile.Start(profile.TraceProfile, profile.NoShutdownHook, profile.ProfilePath(dirPath))
|
2018-10-15 14:13:19 -04:00
|
|
|
profilerFileName = "trace.out"
|
2016-09-01 23:13:11 -04:00
|
|
|
default:
|
2018-09-18 19:46:35 -04:00
|
|
|
return nil, errors.New("profiler type unknown")
|
2016-09-01 23:13:11 -04:00
|
|
|
}
|
2018-09-18 19:46:35 -04:00
|
|
|
|
|
|
|
return &profilerWrapper{
|
|
|
|
stopFn: profiler.Stop,
|
|
|
|
pathFn: func() string {
|
2018-10-15 14:13:19 -04:00
|
|
|
return filepath.Join(dirPath, profilerFileName)
|
2018-09-18 19:46:35 -04:00
|
|
|
},
|
|
|
|
}, nil
|
2016-09-01 23:13:11 -04:00
|
|
|
}
|
|
|
|
|
2019-02-13 07:59:36 -05:00
|
|
|
// minioProfiler - minio profiler interface.
|
|
|
|
type minioProfiler interface {
|
2018-09-18 19:46:35 -04:00
|
|
|
// Stop the profiler
|
2016-09-01 23:13:11 -04:00
|
|
|
Stop()
|
2018-09-18 19:46:35 -04:00
|
|
|
// Return the path of the profiling file
|
|
|
|
Path() string
|
2016-09-01 23:13:11 -04:00
|
|
|
}
|
|
|
|
|
2019-02-13 07:59:36 -05:00
|
|
|
// Global profiler to be used by service go-routine.
|
|
|
|
var globalProfiler minioProfiler
|
|
|
|
|
2016-09-19 13:17:46 -04:00
|
|
|
// dump the request into a string in JSON format.
|
|
|
|
func dumpRequest(r *http.Request) string {
|
|
|
|
header := cloneHeader(r.Header)
|
|
|
|
header.Set("Host", r.Host)
|
2017-05-31 03:11:06 -04:00
|
|
|
// Replace all '%' to '%%' so that printer format parser
|
|
|
|
// to ignore URL encoded values.
|
|
|
|
rawURI := strings.Replace(r.RequestURI, "%", "%%", -1)
|
2016-09-19 13:17:46 -04:00
|
|
|
req := struct {
|
2017-05-31 03:11:06 -04:00
|
|
|
Method string `json:"method"`
|
|
|
|
RequestURI string `json:"reqURI"`
|
|
|
|
Header http.Header `json:"header"`
|
|
|
|
}{r.Method, rawURI, header}
|
|
|
|
|
|
|
|
var buffer bytes.Buffer
|
|
|
|
enc := json.NewEncoder(&buffer)
|
|
|
|
enc.SetEscapeHTML(false)
|
|
|
|
if err := enc.Encode(&req); err != nil {
|
2017-04-07 17:37:32 -04:00
|
|
|
// Upon error just return Go-syntax representation of the value
|
|
|
|
return fmt.Sprintf("%#v", req)
|
2016-09-19 13:17:46 -04:00
|
|
|
}
|
2017-05-31 03:11:06 -04:00
|
|
|
|
|
|
|
// Formatted string.
|
2019-02-13 07:59:36 -05:00
|
|
|
return strings.TrimSpace(buffer.String())
|
2016-09-19 13:17:46 -04:00
|
|
|
}
|
2017-02-27 17:59:53 -05:00
|
|
|
|
2017-03-02 17:21:30 -05:00
|
|
|
// isFile - returns whether given path is a file or not.
|
|
|
|
func isFile(path string) bool {
|
2017-10-13 06:01:15 -04:00
|
|
|
if fi, err := os.Stat(path); err == nil {
|
2017-03-02 17:21:30 -05:00
|
|
|
return fi.Mode().IsRegular()
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
2017-03-15 19:30:34 -04:00
|
|
|
|
2017-03-18 14:28:41 -04:00
|
|
|
// UTCNow - returns current UTC time.
|
|
|
|
func UTCNow() time.Time {
|
|
|
|
return time.Now().UTC()
|
|
|
|
}
|
2017-10-26 13:17:07 -04:00
|
|
|
|
2017-12-05 20:58:09 -05:00
|
|
|
// GenETag - generate UUID based ETag
|
|
|
|
func GenETag() string {
|
|
|
|
return ToS3ETag(getMD5Hash([]byte(mustGetUUID())))
|
2017-10-26 13:17:07 -04:00
|
|
|
}
|
|
|
|
|
2017-12-05 20:58:09 -05:00
|
|
|
// ToS3ETag - return checksum to ETag
|
|
|
|
func ToS3ETag(etag string) string {
|
2017-10-26 13:17:07 -04:00
|
|
|
etag = canonicalizeETag(etag)
|
|
|
|
|
|
|
|
if !strings.HasSuffix(etag, "-1") {
|
|
|
|
// Tools like s3cmd uses ETag as checksum of data to validate.
|
|
|
|
// Append "-1" to indicate ETag is not a checksum.
|
|
|
|
etag += "-1"
|
|
|
|
}
|
|
|
|
|
|
|
|
return etag
|
|
|
|
}
|
2017-12-05 20:58:09 -05:00
|
|
|
|
|
|
|
// NewCustomHTTPTransport returns a new http configuration
|
|
|
|
// used while communicating with the cloud backends.
|
2018-02-20 15:23:37 -05:00
|
|
|
// This sets the value for MaxIdleConnsPerHost from 2 (go default)
|
|
|
|
// to 100.
|
2018-05-15 21:20:22 -04:00
|
|
|
func NewCustomHTTPTransport() *http.Transport {
|
2017-12-05 20:58:09 -05:00
|
|
|
return &http.Transport{
|
|
|
|
Proxy: http.ProxyFromEnvironment,
|
|
|
|
DialContext: (&net.Dialer{
|
2018-07-12 17:12:40 -04:00
|
|
|
Timeout: defaultDialTimeout,
|
|
|
|
KeepAlive: defaultDialKeepAlive,
|
2017-12-05 20:58:09 -05:00
|
|
|
}).DialContext,
|
2018-04-30 14:59:41 -04:00
|
|
|
MaxIdleConns: 1024,
|
|
|
|
MaxIdleConnsPerHost: 1024,
|
|
|
|
IdleConnTimeout: 30 * time.Second,
|
2017-12-05 20:58:09 -05:00
|
|
|
TLSHandshakeTimeout: 10 * time.Second,
|
|
|
|
ExpectContinueTimeout: 1 * time.Second,
|
|
|
|
TLSClientConfig: &tls.Config{RootCAs: globalRootCAs},
|
|
|
|
DisableCompression: true,
|
|
|
|
}
|
|
|
|
}
|
2018-01-08 17:30:55 -05:00
|
|
|
|
|
|
|
// Load the json (typically from disk file).
|
2018-02-06 18:37:48 -05:00
|
|
|
func jsonLoad(r io.ReadSeeker, data interface{}) error {
|
2018-01-08 17:30:55 -05:00
|
|
|
if _, err := r.Seek(0, io.SeekStart); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return json.NewDecoder(r).Decode(data)
|
|
|
|
}
|
2018-02-06 18:37:48 -05:00
|
|
|
|
|
|
|
// Save to disk file in json format.
|
|
|
|
func jsonSave(f interface {
|
|
|
|
io.WriteSeeker
|
|
|
|
Truncate(int64) error
|
|
|
|
}, data interface{}) error {
|
|
|
|
b, err := json.Marshal(data)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err = f.Truncate(0); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, err = f.Seek(0, io.SeekStart); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
_, err = f.Write(b)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2018-03-04 17:16:45 -05:00
|
|
|
|
|
|
|
// ceilFrac takes a numerator and denominator representing a fraction
|
|
|
|
// and returns its ceiling. If denominator is 0, it returns 0 instead
|
|
|
|
// of crashing.
|
|
|
|
func ceilFrac(numerator, denominator int64) (ceil int64) {
|
|
|
|
if denominator == 0 {
|
|
|
|
// do nothing on invalid input
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Make denominator positive
|
|
|
|
if denominator < 0 {
|
|
|
|
numerator = -numerator
|
|
|
|
denominator = -denominator
|
|
|
|
}
|
|
|
|
ceil = numerator / denominator
|
|
|
|
if numerator > 0 && numerator%denominator != 0 {
|
|
|
|
ceil++
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2018-03-14 15:01:47 -04:00
|
|
|
|
2018-03-14 21:36:54 -04:00
|
|
|
// Returns context with ReqInfo details set in the context.
|
2018-07-20 21:46:32 -04:00
|
|
|
func newContext(r *http.Request, w http.ResponseWriter, api string) context.Context {
|
2018-03-14 15:01:47 -04:00
|
|
|
vars := mux.Vars(r)
|
|
|
|
bucket := vars["bucket"]
|
|
|
|
object := vars["object"]
|
|
|
|
prefix := vars["prefix"]
|
|
|
|
|
|
|
|
if prefix != "" {
|
|
|
|
object = prefix
|
|
|
|
}
|
2018-07-02 17:40:18 -04:00
|
|
|
reqInfo := &logger.ReqInfo{
|
2018-11-19 17:47:03 -05:00
|
|
|
DeploymentID: w.Header().Get(responseDeploymentIDKey),
|
|
|
|
RequestID: w.Header().Get(responseRequestIDKey),
|
|
|
|
RemoteHost: handlers.GetSourceIP(r),
|
|
|
|
UserAgent: r.UserAgent(),
|
|
|
|
API: api,
|
|
|
|
BucketName: bucket,
|
|
|
|
ObjectName: object,
|
2018-07-02 17:40:18 -04:00
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
return logger.SetReqInfo(context.Background(), reqInfo)
|
2018-03-14 15:01:47 -04:00
|
|
|
}
|
2018-03-28 17:14:06 -04:00
|
|
|
|
|
|
|
// isNetworkOrHostDown - if there was a network error or if the host is down.
|
|
|
|
func isNetworkOrHostDown(err error) bool {
|
|
|
|
if err == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
switch err.(type) {
|
|
|
|
case *net.DNSError, *net.OpError, net.UnknownNetworkError:
|
|
|
|
return true
|
|
|
|
case *url.Error:
|
|
|
|
// For a URL error, where it replies back "connection closed"
|
|
|
|
if strings.Contains(err.Error(), "Connection closed by foreign host") {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
if strings.Contains(err.Error(), "net/http: TLS handshake timeout") {
|
|
|
|
// If error is - tlsHandshakeTimeoutError,.
|
|
|
|
return true
|
|
|
|
} else if strings.Contains(err.Error(), "i/o timeout") {
|
|
|
|
// If error is - tcp timeoutError.
|
|
|
|
return true
|
|
|
|
} else if strings.Contains(err.Error(), "connection timed out") {
|
|
|
|
// If err is a net.Dial timeout.
|
|
|
|
return true
|
|
|
|
} else if strings.Contains(err.Error(), "net/http: HTTP/1.x transport connection broken") {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
2018-09-05 19:47:14 -04:00
|
|
|
|
2018-10-04 20:44:06 -04:00
|
|
|
// Used for registering with rest handlers (have a look at registerStorageRESTHandlers for usage example)
|
|
|
|
// If it is passed ["aaaa", "bbbb"], it returns ["aaaa", "{aaaa:.*}", "bbbb", "{bbbb:.*}"]
|
|
|
|
func restQueries(keys ...string) []string {
|
|
|
|
var accumulator []string
|
|
|
|
for _, key := range keys {
|
|
|
|
accumulator = append(accumulator, key, "{"+key+":.*}")
|
|
|
|
}
|
|
|
|
return accumulator
|
|
|
|
}
|
2019-05-13 15:25:49 -04:00
|
|
|
|
|
|
|
// Reverse the input order of a slice of string
|
|
|
|
func reverseStringSlice(input []string) {
|
|
|
|
for left, right := 0, len(input)-1; left < right; left, right = left+1, right-1 {
|
|
|
|
input[left], input[right] = input[right], input[left]
|
|
|
|
}
|
|
|
|
}
|