rename crawler config option to heal (#10678)

This commit is contained in:
Harshavardhana 2020-10-14 13:51:51 -07:00 committed by GitHub
parent f9be783f3e
commit 2042d4873c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 30 additions and 25 deletions

View File

@ -25,9 +25,9 @@ import (
"github.com/minio/minio/cmd/config/api"
"github.com/minio/minio/cmd/config/cache"
"github.com/minio/minio/cmd/config/compress"
"github.com/minio/minio/cmd/config/crawler"
"github.com/minio/minio/cmd/config/dns"
"github.com/minio/minio/cmd/config/etcd"
"github.com/minio/minio/cmd/config/heal"
xldap "github.com/minio/minio/cmd/config/identity/ldap"
"github.com/minio/minio/cmd/config/identity/openid"
"github.com/minio/minio/cmd/config/notify"
@ -56,7 +56,7 @@ func initHelp() {
config.KmsKesSubSys: crypto.DefaultKesKVS,
config.LoggerWebhookSubSys: logger.DefaultKVS,
config.AuditWebhookSubSys: logger.DefaultAuditKVS,
config.CrawlerSubSys: crawler.DefaultKVS,
config.HealSubSys: heal.DefaultKVS,
}
for k, v := range notify.DefaultNotificationKVS {
kvs[k] = v
@ -109,8 +109,8 @@ func initHelp() {
Description: "manage global HTTP API call specific features, such as throttling, authentication types, etc.",
},
config.HelpKV{
Key: config.CrawlerSubSys,
Description: "manage continuous disk crawling for bucket disk usage, lifecycle, quota and data integrity checks",
Key: config.HealSubSys,
Description: "manage object healing frequency and bitrot verification checks",
},
config.HelpKV{
Key: config.LoggerWebhookSubSys,
@ -191,7 +191,7 @@ func initHelp() {
config.EtcdSubSys: etcd.Help,
config.CacheSubSys: cache.Help,
config.CompressionSubSys: compress.Help,
config.CrawlerSubSys: crawler.Help,
config.HealSubSys: heal.Help,
config.IdentityOpenIDSubSys: openid.Help,
config.IdentityLDAPSubSys: xldap.Help,
config.PolicyOPASubSys: opa.Help,
@ -253,7 +253,7 @@ func validateConfig(s config.Config, setDriveCount int) error {
return err
}
if _, err := crawler.LookupConfig(s[config.CrawlerSubSys][config.Default]); err != nil {
if _, err := heal.LookupConfig(s[config.HealSubSys][config.Default]); err != nil {
return err
}
@ -438,9 +438,9 @@ func lookupConfigs(s config.Config, setDriveCount int) {
}
}
}
globalCrawlerConfig, err = crawler.LookupConfig(s[config.CrawlerSubSys][config.Default])
globalHealConfig, err = heal.LookupConfig(s[config.HealSubSys][config.Default])
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to read crawler config: %w", err))
logger.LogIf(ctx, fmt.Errorf("Unable to read heal config: %w", err))
}
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())

View File

@ -76,7 +76,7 @@ const (
KmsKesSubSys = "kms_kes"
LoggerWebhookSubSys = "logger_webhook"
AuditWebhookSubSys = "audit_webhook"
CrawlerSubSys = "crawler"
HealSubSys = "heal"
// Add new constants here if you add new fields to config.
)
@ -113,7 +113,7 @@ var SubSystems = set.CreateStringSet([]string{
PolicyOPASubSys,
IdentityLDAPSubSys,
IdentityOpenIDSubSys,
CrawlerSubSys,
HealSubSys,
NotifyAMQPSubSys,
NotifyESSubSys,
NotifyKafkaSubSys,
@ -140,7 +140,7 @@ var SubSystemsSingleTargets = set.CreateStringSet([]string{
PolicyOPASubSys,
IdentityLDAPSubSys,
IdentityOpenIDSubSys,
CrawlerSubSys,
HealSubSys,
}...)
// Constant separators

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
package crawler
package heal
import (
"errors"
@ -24,20 +24,20 @@ import (
// Compression environment variables
const (
BitrotScan = "bitrotscan"
Bitrot = "bitrotscan"
)
// Config represents the crawler settings.
// Config represents the heal settings.
type Config struct {
// Bitrot will perform bitrot scan on local disk when checking objects.
Bitrot bool `json:"bitrotscan"`
}
var (
// DefaultKVS - default KV config for crawler settings
// DefaultKVS - default KV config for heal settings
DefaultKVS = config.KVS{
config.KV{
Key: BitrotScan,
Key: Bitrot,
Value: config.EnableOff,
},
}
@ -45,7 +45,7 @@ var (
// Help provides help for config values
Help = config.HelpKVS{
config.HelpKV{
Key: BitrotScan,
Key: Bitrot,
Description: `perform bitrot scan on disks when checking objects during crawl`,
Optional: true,
Type: "on|off",
@ -55,12 +55,12 @@ var (
// LookupConfig - lookup config and override with valid environment settings if any.
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
if err = config.CheckValidKeys(config.CrawlerSubSys, kvs, DefaultKVS); err != nil {
if err = config.CheckValidKeys(config.HealSubSys, kvs, DefaultKVS); err != nil {
return cfg, err
}
bitrot := kvs.Get(BitrotScan)
bitrot := kvs.Get(Bitrot)
if bitrot != config.EnableOn && bitrot != config.EnableOff {
return cfg, errors.New(BitrotScan + ": must be 'on' or 'off'")
return cfg, errors.New(Bitrot + ": must be 'on' or 'off'")
}
cfg.Bitrot = bitrot == config.EnableOn
return cfg, nil

View File

@ -29,7 +29,7 @@ import (
"time"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/config/crawler"
"github.com/minio/minio/cmd/config/heal"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/lifecycle"
"github.com/minio/minio/pkg/bucket/replication"
@ -53,7 +53,7 @@ const (
)
var (
globalCrawlerConfig crawler.Config
globalHealConfig heal.Config
dataCrawlerLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
)

View File

@ -52,7 +52,6 @@ EXAMPLES:
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_AFTER{{.AssignmentOperator}}3
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_LOW{{.AssignmentOperator}}75
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_HIGH{{.AssignmentOperator}}85
{{.Prompt}} {{.HelpName}} /shared/nasvol
`

View File

@ -98,6 +98,9 @@ func serverCmdArgs(ctx *cli.Context) []string {
v = env.Get(config.EnvEndpoints, "")
}
if v == "" {
if !ctx.Args().Present() || ctx.Args().First() == "help" {
cli.ShowCommandHelpAndExit(ctx, ctx.Command.Name, 1)
}
return ctx.Args()
}
return strings.Fields(v)

View File

@ -370,7 +370,7 @@ func (s *xlStorage) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCac
if objAPI == nil {
return cache, errServerNotInitialized
}
opts := globalCrawlerConfig
opts := globalHealConfig
dataUsageInfo, err := crawlDataFolder(ctx, s.diskPath, cache, func(item crawlItem) (int64, error) {
// Look for `xl.meta/xl.json' at the leaf.
@ -414,7 +414,10 @@ func (s *xlStorage) CrawlAndGetDataUsage(ctx context.Context, cache dataUsageCac
err := s.VerifyFile(ctx, item.bucket, item.objectPath(), version)
switch err {
case errFileCorrupt:
res, err := objAPI.HealObject(ctx, item.bucket, item.objectPath(), oi.VersionID, madmin.HealOpts{Remove: healDeleteDangling, ScanMode: madmin.HealDeepScan})
res, err := objAPI.HealObject(ctx, item.bucket, item.objectPath(), oi.VersionID, madmin.HealOpts{
Remove: healDeleteDangling,
ScanMode: madmin.HealDeepScan,
})
if err != nil {
if !errors.Is(err, NotImplemented{}) {
logger.LogIf(ctx, err)