mirror of
https://github.com/minio/minio.git
synced 2024-12-24 22:25:54 -05:00
fix: use unused cacheMetrics code in prometheus (#9588)
remove all other unusued/deadcode
This commit is contained in:
parent
2ecf5ba1de
commit
6ac48a65cb
@ -33,7 +33,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
@ -52,7 +51,6 @@ import (
|
||||
|
||||
const (
|
||||
maxEConfigJSONSize = 262272
|
||||
defaultNetPerfSize = 100 * humanize.MiByte
|
||||
)
|
||||
|
||||
// Type-safe query params.
|
||||
|
@ -767,17 +767,6 @@ func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIE
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
}
|
||||
|
||||
// writeVersionMismatchResponse - writes custom error responses for version mismatches.
|
||||
func writeVersionMismatchResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL, isJSON bool) {
|
||||
if isJSON {
|
||||
// Generate error response.
|
||||
errorResponse := getAPIErrorResponse(ctx, err, reqURL.String(), w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
|
||||
writeResponse(w, err.HTTPStatusCode, encodeResponseJSON(errorResponse), mimeJSON)
|
||||
} else {
|
||||
writeResponse(w, err.HTTPStatusCode, []byte(err.Description), mimeNone)
|
||||
}
|
||||
}
|
||||
|
||||
// writeCustomErrorResponseJSON - similar to writeErrorResponseJSON,
|
||||
// but accepts the error message directly (this allows messages to be
|
||||
// dynamically generated.)
|
||||
|
@ -40,9 +40,6 @@ const (
|
||||
|
||||
// MinIO configuration file.
|
||||
minioConfigFile = "config.json"
|
||||
|
||||
// MinIO configuration backup file
|
||||
minioConfigBackupFile = minioConfigFile + ".backup"
|
||||
)
|
||||
|
||||
func listServerConfigHistory(ctx context.Context, objAPI ObjectLayer, withData bool, count int) (
|
||||
|
@ -27,7 +27,6 @@ import (
|
||||
|
||||
const (
|
||||
bgLifecycleInterval = 24 * time.Hour
|
||||
bgLifecycleTick = time.Hour
|
||||
)
|
||||
|
||||
// initDailyLifecycle starts the routine that receives the daily
|
||||
|
@ -301,24 +301,6 @@ func decryptObjectInfo(key []byte, bucket, object string, metadata map[string]st
|
||||
}
|
||||
}
|
||||
|
||||
func newDecryptWriterWithObjectKey(client io.Writer, objectEncryptionKey []byte, seqNumber uint32, metadata map[string]string) (io.WriteCloser, error) {
|
||||
writer, err := sio.DecryptWriter(client, sio.Config{
|
||||
Key: objectEncryptionKey,
|
||||
SequenceNumber: seqNumber,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, crypto.ErrInvalidCustomerKey
|
||||
}
|
||||
delete(metadata, crypto.SSEIV)
|
||||
delete(metadata, crypto.SSESealAlgorithm)
|
||||
delete(metadata, crypto.SSECSealedKey)
|
||||
delete(metadata, crypto.SSEMultipart)
|
||||
delete(metadata, crypto.S3SealedKey)
|
||||
delete(metadata, crypto.S3KMSSealedKey)
|
||||
delete(metadata, crypto.S3KMSKeyID)
|
||||
return writer, nil
|
||||
}
|
||||
|
||||
// Adding support for reader based interface
|
||||
|
||||
// DecryptRequestWithSequenceNumberR - same as
|
||||
@ -541,31 +523,6 @@ func (d *DecryptBlocksReader) Read(p []byte) (int, error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// getEncryptedSinglePartOffsetLength - fetch sequence number, encrypted start offset and encrypted length.
|
||||
func getEncryptedSinglePartOffsetLength(offset, length int64, objInfo ObjectInfo) (seqNumber uint32, encOffset int64, encLength int64) {
|
||||
onePkgSize := int64(SSEDAREPackageBlockSize + SSEDAREPackageMetaSize)
|
||||
|
||||
seqNumber = uint32(offset / SSEDAREPackageBlockSize)
|
||||
encOffset = int64(seqNumber) * onePkgSize
|
||||
// The math to compute the encrypted length is always
|
||||
// originalLength i.e (offset+length-1) to be divided under
|
||||
// 64KiB blocks which is the payload size for each encrypted
|
||||
// block. This is then multiplied by final package size which
|
||||
// is basically 64KiB + 32. Finally negate the encrypted offset
|
||||
// to get the final encrypted length on disk.
|
||||
encLength = ((offset+length)/SSEDAREPackageBlockSize)*onePkgSize - encOffset
|
||||
|
||||
// Check for the remainder, to figure if we need one extract package to read from.
|
||||
if (offset+length)%SSEDAREPackageBlockSize > 0 {
|
||||
encLength += onePkgSize
|
||||
}
|
||||
|
||||
if encLength+encOffset > objInfo.EncryptedSize() {
|
||||
encLength = objInfo.EncryptedSize() - encOffset
|
||||
}
|
||||
return seqNumber, encOffset, encLength
|
||||
}
|
||||
|
||||
// DecryptedSize returns the size of the object after decryption in bytes.
|
||||
// It returns an error if the object is not encrypted or marked as encrypted
|
||||
// but has an invalid size.
|
||||
|
@ -873,8 +873,6 @@ func makeFormatXLMetaVolumes(disk StorageAPI) error {
|
||||
return disk.MakeVolBulk(minioMetaBucket, minioMetaTmpBucket, minioMetaMultipartBucket, dataUsageBucket)
|
||||
}
|
||||
|
||||
var initMetaVolIgnoredErrs = append(baseIgnoredErrs, errVolumeExists)
|
||||
|
||||
// Get all UUIDs which are present in reference format should
|
||||
// be present in the list of formats provided, those are considered
|
||||
// as online UUIDs.
|
||||
|
@ -43,8 +43,6 @@ import (
|
||||
|
||||
// minio configuration related constants.
|
||||
const (
|
||||
globalMinioCertExpireWarnDays = time.Hour * 24 * 30 // 30 days.
|
||||
|
||||
globalMinioDefaultPort = "9000"
|
||||
|
||||
globalMinioDefaultRegion = ""
|
||||
@ -210,7 +208,6 @@ var (
|
||||
globalDomainNames []string // Root domains for virtual host style requests
|
||||
globalDomainIPs set.StringSet // Root domain IP address(s) for a distributed MinIO deployment
|
||||
|
||||
globalListingTimeout = newDynamicTimeout( /*30*/ 600*time.Second /*5*/, 600*time.Second) // timeout for listing related ops
|
||||
globalObjectTimeout = newDynamicTimeout( /*1*/ 10*time.Minute /*10*/, 600*time.Second) // timeout for Object API related ops
|
||||
globalOperationTimeout = newDynamicTimeout(10*time.Minute /*30*/, 600*time.Second) // default timeout for general ops
|
||||
globalHealingTimeout = newDynamicTimeout(30*time.Minute /*1*/, 30*time.Minute) // timeout for healing related ops
|
||||
@ -235,11 +232,6 @@ var (
|
||||
// Allocated DNS config wrapper over etcd client.
|
||||
globalDNSConfig *dns.CoreDNS
|
||||
|
||||
// Default usage check interval value.
|
||||
globalDefaultUsageCheckInterval = 12 * time.Hour // 12 hours
|
||||
// Usage check interval value.
|
||||
globalUsageCheckInterval = globalDefaultUsageCheckInterval
|
||||
|
||||
// GlobalKMS initialized KMS configuration
|
||||
GlobalKMS crypto.KMS
|
||||
|
||||
|
@ -17,12 +17,10 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@ -144,10 +142,6 @@ type HTTPStats struct {
|
||||
totalS3Errors HTTPAPIStats
|
||||
}
|
||||
|
||||
func durationStr(totalDuration, totalCount float64) string {
|
||||
return fmt.Sprint(time.Duration(totalDuration/totalCount) * time.Second)
|
||||
}
|
||||
|
||||
// Converts http stats into struct to be sent back to the client.
|
||||
func (st *HTTPStats) toServerHTTPStats() ServerHTTPStats {
|
||||
serverStats := ServerHTTPStats{}
|
||||
|
@ -86,6 +86,7 @@ func (c *minioCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
storageMetricsPrometheus(ch)
|
||||
networkMetricsPrometheus(ch)
|
||||
httpMetricsPrometheus(ch)
|
||||
cacheMetricsPrometheus(ch)
|
||||
gatewayMetricsPrometheus(ch)
|
||||
healingMetricsPrometheus(ch)
|
||||
}
|
||||
|
@ -34,9 +34,6 @@ import (
|
||||
// IPv4 addresses of local host.
|
||||
var localIP4 = mustGetLocalIP4()
|
||||
|
||||
// IPv6 address of local host.
|
||||
var localIP6 = mustGetLocalIP6()
|
||||
|
||||
// mustSplitHostPort is a wrapper to net.SplitHostPort() where error is assumed to be a fatal.
|
||||
func mustSplitHostPort(hostPort string) (host, port string) {
|
||||
xh, err := xnet.ParseHost(hostPort)
|
||||
|
@ -419,26 +419,3 @@ func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, d
|
||||
// Success.
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Fetch the histogram interval corresponding
|
||||
// to the passed object size.
|
||||
func objSizeToHistoInterval(usize uint64) string {
|
||||
size := int64(usize)
|
||||
|
||||
var interval objectHistogramInterval
|
||||
for _, interval = range ObjectsHistogramIntervals {
|
||||
var cond1, cond2 bool
|
||||
if size >= interval.start || interval.start == -1 {
|
||||
cond1 = true
|
||||
}
|
||||
if size <= interval.end || interval.end == -1 {
|
||||
cond2 = true
|
||||
}
|
||||
if cond1 && cond2 {
|
||||
return interval.name
|
||||
}
|
||||
}
|
||||
|
||||
// This would be the last element of histogram intervals
|
||||
return interval.name
|
||||
}
|
||||
|
@ -46,7 +46,6 @@ const (
|
||||
peerRESTMethodLoadPolicy = "/loadpolicy"
|
||||
peerRESTMethodLoadPolicyMapping = "/loadpolicymapping"
|
||||
peerRESTMethodDeletePolicy = "/deletepolicy"
|
||||
peerRESTMethodLoadUsers = "/loadusers"
|
||||
peerRESTMethodLoadGroup = "/loadgroup"
|
||||
peerRESTMethodStartProfiling = "/startprofiling"
|
||||
peerRESTMethodDownloadProfilingData = "/downloadprofilingdata"
|
||||
@ -68,8 +67,6 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
peerRESTNetPerfSize = "netperfsize"
|
||||
peerRESTDrivePerfSize = "driveperfsize"
|
||||
peerRESTBucket = "bucket"
|
||||
peerRESTUser = "user"
|
||||
peerRESTGroup = "group"
|
||||
|
@ -43,27 +43,6 @@ import (
|
||||
type peerRESTServer struct {
|
||||
}
|
||||
|
||||
func getServerInfo() (*ServerInfoData, error) {
|
||||
objLayer := newObjectLayerWithoutSafeModeFn()
|
||||
if objLayer == nil {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
||||
// Server info data.
|
||||
return &ServerInfoData{
|
||||
ConnStats: globalConnStats.toServerConnStats(),
|
||||
HTTPStats: globalHTTPStats.toServerHTTPStats(),
|
||||
Properties: ServerProperties{
|
||||
Uptime: UTCNow().Unix() - globalBootTime.Unix(),
|
||||
Version: Version,
|
||||
CommitID: CommitID,
|
||||
DeploymentID: globalDeploymentID,
|
||||
SQSARN: globalNotificationSys.GetARNList(false),
|
||||
Region: globalServerRegion,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetLocksHandler - returns list of older lock from the server.
|
||||
func (s *peerRESTServer) GetLocksHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
|
@ -351,15 +351,6 @@ func (s *posix) IsOnline() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func isQuitting(endCh chan struct{}) bool {
|
||||
select {
|
||||
case <-endCh:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (s *posix) waitForLowActiveIO() {
|
||||
for atomic.LoadInt32(&s.activeIOCount) >= s.maxActiveIOCount {
|
||||
time.Sleep(lowActiveIOWaitTick)
|
||||
|
@ -149,26 +149,6 @@ func formatXLCleanupTmpLocalEndpoints(endpoints Endpoints) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// validate reference format against list of XL formats.
|
||||
func validateXLFormats(format *formatXLV3, formats []*formatXLV3, endpoints Endpoints, setCount, drivesPerSet int) error {
|
||||
for i := range formats {
|
||||
if formats[i] == nil {
|
||||
continue
|
||||
}
|
||||
if err := formatXLV3Check(format, formats[i]); err != nil {
|
||||
return fmt.Errorf("%s format error: %w", endpoints[i], err)
|
||||
}
|
||||
}
|
||||
if len(format.XL.Sets) != setCount {
|
||||
return fmt.Errorf("Current backend format is inconsistent with input args (%s), Expected set count %d, got %d", endpoints, len(format.XL.Sets), setCount)
|
||||
}
|
||||
if len(format.XL.Sets[0]) != drivesPerSet {
|
||||
return fmt.Errorf("Current backend format is inconsistent with input args (%s), Expected drive count per set %d, got %d", endpoints, len(format.XL.Sets[0]), drivesPerSet)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Following error message is added to fix a regression in release
|
||||
// RELEASE.2018-03-16T22-52-12Z after migrating v1 to v2 to v3. This
|
||||
// migration failed to capture '.This' field properly which indicates
|
||||
|
@ -578,13 +578,6 @@ func restQueries(keys ...string) []string {
|
||||
return accumulator
|
||||
}
|
||||
|
||||
// Reverse the input order of a slice of string
|
||||
func reverseStringSlice(input []string) {
|
||||
for left, right := 0, len(input)-1; left < right; left, right = left+1, right-1 {
|
||||
input[left], input[right] = input[right], input[left]
|
||||
}
|
||||
}
|
||||
|
||||
// lcp finds the longest common prefix of the input strings.
|
||||
// It compares by bytes instead of runes (Unicode code points).
|
||||
// It's up to the caller to do Unicode normalization if desired
|
||||
|
@ -113,25 +113,6 @@ func isEndpointConnected(diskMap map[string]StorageAPI, endpoint string) bool {
|
||||
return disk.IsOnline()
|
||||
}
|
||||
|
||||
func (s *xlSets) getOnlineDisksCount() int {
|
||||
s.xlDisksMu.RLock()
|
||||
defer s.xlDisksMu.RUnlock()
|
||||
count := 0
|
||||
for i := 0; i < s.setCount; i++ {
|
||||
for j := 0; j < s.drivesPerSet; j++ {
|
||||
disk := s.xlDisks[i][j]
|
||||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
if !disk.IsOnline() {
|
||||
continue
|
||||
}
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func (s *xlSets) getDiskMap() map[string]StorageAPI {
|
||||
diskMap := make(map[string]StorageAPI)
|
||||
|
||||
|
@ -393,9 +393,6 @@ func pickValidXLMeta(ctx context.Context, metaArr []xlMetaV1, modTime time.Time,
|
||||
return getXLMetaInQuorum(ctx, metaArr, modTime, quorum)
|
||||
}
|
||||
|
||||
// list of all errors that can be ignored in a metadata operation.
|
||||
var objMetadataOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errVolumeNotFound, errFileNotFound, errFileAccessDenied, errCorruptedFormat)
|
||||
|
||||
// writeXLMetadata - writes `xl.json` to a single disk.
|
||||
func writeXLMetadata(ctx context.Context, disk StorageAPI, bucket, prefix string, xlMeta xlMetaV1) error {
|
||||
jsonFile := path.Join(prefix, xlMetaJSONFile)
|
||||
|
@ -36,14 +36,6 @@ func (xl xlObjects) getUploadIDDir(bucket, object, uploadID string) string {
|
||||
return pathJoin(xl.getMultipartSHADir(bucket, object), uploadID)
|
||||
}
|
||||
|
||||
// getUploadIDLockPath returns the name of the Lock in the form of
|
||||
// bucket/object/uploadID. For locking, the path bucket/object/uploadID
|
||||
// is locked instead of multipart-sha256-Dir/uploadID as it is more
|
||||
// readable in the list-locks output which helps in debugging.
|
||||
func (xl xlObjects) getUploadIDLockPath(bucket, object, uploadID string) string {
|
||||
return pathJoin(bucket, object, uploadID)
|
||||
}
|
||||
|
||||
func (xl xlObjects) getMultipartSHADir(bucket, object string) string {
|
||||
return getSHA256Hash([]byte(pathJoin(bucket, object)))
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user