converge SNSD deployments into single code (#15988)

This commit is contained in:
Harshavardhana 2022-11-01 16:41:01 -07:00 committed by GitHub
parent 7721595aa9
commit 0d49b365ff
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 28 additions and 4266 deletions

View File

@ -442,23 +442,6 @@ func (b *BucketMetadata) Save(ctx context.Context, api ObjectLayer) error {
return saveConfig(ctx, api, configFile, data) return saveConfig(ctx, api, configFile, data)
} }
// deleteBucketMetadata deletes bucket metadata
// If config does not exist no error is returned.
func deleteBucketMetadata(ctx context.Context, obj objectDeleter, bucket string) error {
metadataFiles := []string{
dataUsageCacheName,
bucketMetadataFile,
path.Join(replicationDir, resyncFileName),
}
for _, metaFile := range metadataFiles {
configFile := path.Join(bucketMetaPrefix, bucket, metaFile)
if err := deleteConfig(ctx, obj, configFile); err != nil && err != errConfigNotFound {
return err
}
}
return nil
}
// migrate config for remote targets by encrypting data if currently unencrypted and kms is configured. // migrate config for remote targets by encrypting data if currently unencrypted and kms is configured.
func (b *BucketMetadata) migrateTargetConfig(ctx context.Context, objectAPI ObjectLayer) error { func (b *BucketMetadata) migrateTargetConfig(ctx context.Context, objectAPI ObjectLayer) error {
var err error var err error

View File

@ -343,7 +343,13 @@ func createServerEndpoints(serverAddr string, args ...string) (
return nil, -1, errInvalidArgument return nil, -1, errInvalidArgument
} }
if !ellipses.HasEllipses(args...) { ok := true
for _, arg := range args {
ok = ok && !ellipses.HasEllipses(arg)
}
// None of the args have ellipses use the old style.
if ok {
setArgs, err := GetAllSets(args...) setArgs, err := GetAllSets(args...)
if err != nil { if err != nil {
return nil, -1, err return nil, -1, err
@ -365,6 +371,10 @@ func createServerEndpoints(serverAddr string, args ...string) (
var foundPrevLocal bool var foundPrevLocal bool
for _, arg := range args { for _, arg := range args {
if !ellipses.HasEllipses(arg) && len(args) > 1 {
// TODO: support SNSD deployments to be decommissioned in future
return nil, -1, fmt.Errorf("all args must have ellipses for pool expansion (%w) args: %s", errInvalidArgument, args)
}
setArgs, err := GetAllSets(arg) setArgs, err := GetAllSets(arg)
if err != nil { if err != nil {
return nil, -1, err return nil, -1, err

View File

@ -63,22 +63,6 @@ func (z *erasureServerPools) SinglePool() bool {
// Initialize new pool of erasure sets. // Initialize new pool of erasure sets.
func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServerPools) (ObjectLayer, error) { func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServerPools) (ObjectLayer, error) {
if endpointServerPools.NEndpoints() == 1 {
ep := endpointServerPools[0]
storageDisks, format, err := waitForFormatErasure(true, ep.Endpoints, 1, ep.SetCount, ep.DrivesPerSet, "", "")
if err != nil {
return nil, err
}
objLayer, err := newErasureSingle(ctx, storageDisks[0], format)
if err != nil {
return nil, err
}
globalLocalDrives = storageDisks
return objLayer, nil
}
var ( var (
deploymentID string deploymentID string
distributionAlgo string distributionAlgo string
@ -1681,7 +1665,7 @@ func (z *erasureServerPools) DeleteBucket(ctx context.Context, bucket string, op
} }
// Purge the entire bucket metadata entirely. // Purge the entire bucket metadata entirely.
z.renameAll(context.Background(), minioMetaBucket, pathJoin(bucketMetaPrefix, bucket)) z.deleteAll(context.Background(), minioMetaBucket, pathJoin(bucketMetaPrefix, bucket))
// If site replication is configured, hold on to deleted bucket state until sites sync // If site replication is configured, hold on to deleted bucket state until sites sync
switch opts.SRDeleteOp { switch opts.SRDeleteOp {
case MarkDelete: case MarkDelete:
@ -1691,12 +1675,12 @@ func (z *erasureServerPools) DeleteBucket(ctx context.Context, bucket string, op
return nil return nil
} }
// renameAll will rename bucket+prefix unconditionally across all disks to // deleteAll will rename bucket+prefix unconditionally across all disks to
// minioMetaTmpDeletedBucket + unique uuid, // minioMetaTmpDeletedBucket + unique uuid,
// Note that set distribution is ignored so it should only be used in cases where // Note that set distribution is ignored so it should only be used in cases where
// data is not distributed across sets. Errors are logged but individual // data is not distributed across sets. Errors are logged but individual
// disk failures are not returned. // disk failures are not returned.
func (z *erasureServerPools) renameAll(ctx context.Context, bucket, prefix string) { func (z *erasureServerPools) deleteAll(ctx context.Context, bucket, prefix string) {
for _, servers := range z.serverPools { for _, servers := range z.serverPools {
for _, set := range servers.sets { for _, set := range servers.sets {
set.deleteAll(ctx, bucket, prefix) set.deleteAll(ctx, bucket, prefix)

File diff suppressed because it is too large Load Diff

View File

@ -45,6 +45,10 @@ type bucketMetacache struct {
updated bool `msg:"-"` updated bool `msg:"-"`
} }
type deleteAllStorager interface {
deleteAll(ctx context.Context, bucket, prefix string)
}
// newBucketMetacache creates a new bucketMetacache. // newBucketMetacache creates a new bucketMetacache.
// Optionally remove all existing caches. // Optionally remove all existing caches.
func newBucketMetacache(bucket string, cleanup bool) *bucketMetacache { func newBucketMetacache(bucket string, cleanup bool) *bucketMetacache {
@ -52,10 +56,10 @@ func newBucketMetacache(bucket string, cleanup bool) *bucketMetacache {
// Recursively delete all caches. // Recursively delete all caches.
objAPI := newObjectLayerFn() objAPI := newObjectLayerFn()
if objAPI != nil { if objAPI != nil {
ez, ok := objAPI.(renameAllStorager) ez, ok := objAPI.(deleteAllStorager)
if ok { if ok {
ctx := context.Background() ctx := context.Background()
ez.renameAll(ctx, minioMetaBucket, metacachePrefixForID(bucket, slashSeparator)) ez.deleteAll(ctx, minioMetaBucket, metacachePrefixForID(bucket, slashSeparator))
} }
} }
} }
@ -215,9 +219,9 @@ func (b *bucketMetacache) deleteAll() {
return return
} }
ez, ok := objAPI.(renameAllStorager) ez, ok := objAPI.(deleteAllStorager)
if !ok { if !ok {
logger.LogIf(ctx, errors.New("bucketMetacache: expected objAPI to be 'renameAllStorager'")) logger.LogIf(ctx, errors.New("bucketMetacache: expected objAPI to be 'deleteAllStorager'"))
return return
} }
@ -226,7 +230,7 @@ func (b *bucketMetacache) deleteAll() {
b.updated = true b.updated = true
// Delete all. // Delete all.
ez.renameAll(ctx, minioMetaBucket, metacachePrefixForID(b.bucket, slashSeparator)) ez.deleteAll(ctx, minioMetaBucket, metacachePrefixForID(b.bucket, slashSeparator))
b.caches = make(map[string]metacache, 10) b.caches = make(map[string]metacache, 10)
b.cachesRoot = make(map[string][]string, 10) b.cachesRoot = make(map[string][]string, 10)
} }

View File

@ -258,248 +258,6 @@ func (z *erasureServerPools) listPath(ctx context.Context, o *listPathOptions) (
return entries, nil return entries, nil
} }
// listPath will return the requested entries.
// If no more entries are in the listing io.EOF is returned,
// otherwise nil or an unexpected error is returned.
// The listPathOptions given will be checked and modified internally.
// Required important fields are Bucket, Prefix, Separator.
// Other important fields are Limit, Marker.
// List ID always derived from the Marker.
func (es *erasureSingle) listPath(ctx context.Context, o *listPathOptions) (entries metaCacheEntriesSorted, err error) {
if err := checkListObjsArgs(ctx, o.Bucket, o.Prefix, o.Marker, es); err != nil {
return entries, err
}
// Marker is set validate pre-condition.
if o.Marker != "" && o.Prefix != "" {
// Marker not common with prefix is not implemented. Send an empty response
if !HasPrefix(o.Marker, o.Prefix) {
return entries, io.EOF
}
}
// With max keys of zero we have reached eof, return right here.
if o.Limit == 0 {
return entries, io.EOF
}
// For delimiter and prefix as '/' we do not list anything at all
// along // with the prefix. On a flat namespace with 'prefix'
// as '/' we don't have any entries, since all the keys are
// of form 'keyName/...'
if strings.HasPrefix(o.Prefix, SlashSeparator) {
return entries, io.EOF
}
// If delimiter is slashSeparator we must return directories of
// the non-recursive scan unless explicitly requested.
o.IncludeDirectories = o.Separator == slashSeparator
if (o.Separator == slashSeparator || o.Separator == "") && !o.Recursive {
o.Recursive = o.Separator != slashSeparator
o.Separator = slashSeparator
} else {
// Default is recursive, if delimiter is set then list non recursive.
o.Recursive = true
}
// Decode and get the optional list id from the marker.
o.parseMarker()
o.BaseDir = baseDirFromPrefix(o.Prefix)
o.Transient = o.Transient || isReservedOrInvalidBucket(o.Bucket, false)
o.SetFilter()
if o.Transient {
o.Create = false
}
// We have 2 cases:
// 1) Cold listing, just list.
// 2) Returning, but with no id. Start async listing.
// 3) Returning, with ID, stream from list.
//
// If we don't have a list id we must ask the server if it has a cache or create a new.
if o.ID != "" && !o.Transient {
resp := localMetacacheMgr.getBucket(ctx, o.Bucket).findCache(*o)
c := &resp
if c.fileNotFound {
// No cache found, no entries found.
return entries, io.EOF
}
if c.status == scanStateError || c.status == scanStateNone {
o.ID = ""
o.Create = false
o.debugln("scan status", c.status, " - waiting a roundtrip to create")
} else {
// Continue listing
o.ID = c.id
go func(meta metacache) {
// Continuously update while we wait.
t := time.NewTicker(metacacheMaxClientWait / 10)
defer t.Stop()
select {
case <-ctx.Done():
// Request is done, stop updating.
return
case <-t.C:
meta.lastHandout = time.Now()
meta, _ = localMetacacheMgr.updateCacheEntry(meta)
}
}(*c)
}
// We have an existing list ID, continue streaming.
if o.Create {
o.debugln("Creating", o)
entries, err = es.listAndSave(ctx, o)
if err == nil || err == io.EOF {
return entries, err
}
entries.truncate(0)
} else {
o.debugln("Resuming", o)
entries, err = es.streamMetadataParts(ctx, *o)
entries.reuse = true // We read from stream and are not sharing results.
if err == nil {
return entries, nil
}
}
if IsErr(err, []error{
nil,
context.Canceled,
context.DeadlineExceeded,
// io.EOF is expected and should be returned but no need to log it.
io.EOF,
}...) {
// Expected good errors we don't need to return error.
return entries, err
}
entries.truncate(0)
o.ID = ""
if err != nil {
if !(isErrObjectNotFound(err) || errors.Is(err, IncompleteBody{}) || isErrVersionNotFound(err)) {
logger.LogIf(ctx, fmt.Errorf("Resuming listing from drives failed %w, proceeding to do raw listing", err))
}
}
}
// Do listing in-place.
// Create output for our results.
// Create filter for results.
o.debugln("Raw List", o)
filterCh := make(chan metaCacheEntry, o.Limit)
listCtx, cancelList := context.WithCancel(ctx)
filteredResults := o.gatherResults(listCtx, filterCh)
var wg sync.WaitGroup
wg.Add(1)
var listErr error
go func(o listPathOptions) {
defer wg.Done()
o.Limit = 0
listErr = es.listMerged(listCtx, o, filterCh)
o.debugln("listMerged returned with", listErr)
}(*o)
entries, err = filteredResults()
cancelList()
wg.Wait()
if listErr != nil && !errors.Is(listErr, context.Canceled) {
return entries, listErr
}
entries.reuse = true
truncated := entries.len() > o.Limit || err == nil
entries.truncate(o.Limit)
if !o.Transient && truncated {
if o.ID == "" {
entries.listID = mustGetUUID()
} else {
entries.listID = o.ID
}
}
if !truncated {
return entries, io.EOF
}
return entries, nil
}
// listMerged will list across all sets and return a merged results stream.
// The result channel is closed when no more results are expected.
func (es *erasureSingle) listMerged(ctx context.Context, o listPathOptions, results chan<- metaCacheEntry) error {
var mu sync.Mutex
var wg sync.WaitGroup
var listErr error
var inputs []chan metaCacheEntry
innerResults := make(chan metaCacheEntry, 100)
inputs = append(inputs, innerResults)
mu.Lock()
listCtx, cancelList := context.WithCancel(ctx)
defer cancelList()
wg.Add(1)
go func() {
defer wg.Done()
err := es.listPathInner(listCtx, o, innerResults)
mu.Lock()
defer mu.Unlock()
listErr = err
}()
mu.Unlock()
// Do lifecycle filtering.
if o.Lifecycle != nil || o.Replication.Config != nil {
filterIn := make(chan metaCacheEntry, 10)
go applyBucketActions(ctx, o, filterIn, results)
// Replace results.
results = filterIn
}
// Gather results to a single channel.
err := mergeEntryChannels(ctx, inputs, results, func(existing, other *metaCacheEntry) (replace bool) {
// Pick object over directory
if existing.isDir() && !other.isDir() {
return true
}
if !existing.isDir() && other.isDir() {
return false
}
eMeta, err := existing.xlmeta()
if err != nil {
return true
}
oMeta, err := other.xlmeta()
if err != nil {
return false
}
// Replace if modtime is newer
if !oMeta.latestModtime().Equal(oMeta.latestModtime()) {
return oMeta.latestModtime().After(eMeta.latestModtime())
}
// Use NumVersions as a final tiebreaker.
return len(oMeta.versions) > len(eMeta.versions)
})
cancelList()
wg.Wait()
if err != nil {
return err
}
if listErr != nil {
if contextCanceled(ctx) {
return nil
}
if listErr.Error() == io.EOF.Error() {
return nil
}
logger.LogIf(ctx, listErr)
return listErr
}
if contextCanceled(ctx) {
return ctx.Err()
}
return nil
}
// listMerged will list across all sets and return a merged results stream. // listMerged will list across all sets and return a merged results stream.
// The result channel is closed when no more results are expected. // The result channel is closed when no more results are expected.
func (z *erasureServerPools) listMerged(ctx context.Context, o listPathOptions, results chan<- metaCacheEntry) error { func (z *erasureServerPools) listMerged(ctx context.Context, o listPathOptions, results chan<- metaCacheEntry) error {
@ -648,73 +406,6 @@ func applyBucketActions(ctx context.Context, o listPathOptions, in <-chan metaCa
} }
} }
func (es *erasureSingle) listAndSave(ctx context.Context, o *listPathOptions) (entries metaCacheEntriesSorted, err error) {
// Use ID as the object name...
o.pool = 0
o.set = 0
saver := es
// Disconnect from call above, but cancel on exit.
listCtx, cancel := context.WithCancel(GlobalContext)
saveCh := make(chan metaCacheEntry, metacacheBlockSize)
inCh := make(chan metaCacheEntry, metacacheBlockSize)
outCh := make(chan metaCacheEntry, o.Limit)
filteredResults := o.gatherResults(ctx, outCh)
mc := o.newMetacache()
meta := metaCacheRPC{meta: &mc, cancel: cancel, rpc: globalNotificationSys.restClientFromHash(pathJoin(o.Bucket, o.Prefix)), o: *o}
// Save listing...
go func() {
if err := saver.saveMetaCacheStream(listCtx, &meta, saveCh); err != nil {
meta.setErr(err.Error())
}
cancel()
}()
// Do listing...
go func(o listPathOptions) {
err := es.listMerged(listCtx, o, inCh)
if err != nil {
meta.setErr(err.Error())
}
o.debugln("listAndSave: listing", o.ID, "finished with ", err)
}(*o)
// Keep track of when we return since we no longer have to send entries to output.
var funcReturned bool
var funcReturnedMu sync.Mutex
defer func() {
funcReturnedMu.Lock()
funcReturned = true
funcReturnedMu.Unlock()
}()
// Write listing to results and saver.
go func() {
var returned bool
for entry := range inCh {
if !returned {
funcReturnedMu.Lock()
returned = funcReturned
funcReturnedMu.Unlock()
outCh <- entry
if returned {
close(outCh)
}
}
entry.reusable = returned
saveCh <- entry
}
if !returned {
close(outCh)
}
close(saveCh)
}()
return filteredResults()
}
func (z *erasureServerPools) listAndSave(ctx context.Context, o *listPathOptions) (entries metaCacheEntriesSorted, err error) { func (z *erasureServerPools) listAndSave(ctx context.Context, o *listPathOptions) (entries metaCacheEntriesSorted, err error) {
// Use ID as the object name... // Use ID as the object name...
o.pool = z.getAvailablePoolIdx(ctx, minioMetaBucket, o.ID, 10<<20) o.pool = z.getAvailablePoolIdx(ctx, minioMetaBucket, o.ID, 10<<20)

View File

@ -564,170 +564,6 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt
} }
} }
func (es *erasureSingle) streamMetadataParts(ctx context.Context, o listPathOptions) (entries metaCacheEntriesSorted, err error) {
retries := 0
rpc := globalNotificationSys.restClientFromHash(pathJoin(o.Bucket, o.Prefix))
for {
if contextCanceled(ctx) {
return entries, ctx.Err()
}
// If many failures, check the cache state.
if retries > 10 {
err := o.checkMetacacheState(ctx, rpc)
if err != nil {
return entries, fmt.Errorf("remote listing canceled: %w", err)
}
retries = 1
}
const retryDelay = 250 * time.Millisecond
// All operations are performed without locks, so we must be careful and allow for failures.
// Read metadata associated with the object from a disk.
if retries > 0 {
_, err := es.disk.ReadVersion(ctx, minioMetaBucket,
o.objectPath(0), "", false)
if err != nil {
time.Sleep(retryDelay)
retries++
continue
}
}
// Load first part metadata...
// Read metadata associated with the object from all disks.
fi, metaArr, onlineDisks, err := es.getObjectFileInfo(ctx, minioMetaBucket, o.objectPath(0), ObjectOptions{}, true)
if err != nil {
switch toObjectErr(err, minioMetaBucket, o.objectPath(0)).(type) {
case ObjectNotFound:
retries++
time.Sleep(retryDelay)
continue
case InsufficientReadQuorum:
retries++
time.Sleep(retryDelay)
continue
default:
return entries, fmt.Errorf("reading first part metadata: %w", err)
}
}
partN, err := o.findFirstPart(fi)
switch {
case err == nil:
case errors.Is(err, io.ErrUnexpectedEOF):
if retries == 10 {
err := o.checkMetacacheState(ctx, rpc)
if err != nil {
return entries, fmt.Errorf("remote listing canceled: %w", err)
}
retries = -1
}
retries++
time.Sleep(retryDelay)
continue
case errors.Is(err, io.EOF):
return entries, io.EOF
}
// We got a stream to start at.
loadedPart := 0
for {
if contextCanceled(ctx) {
return entries, ctx.Err()
}
if partN != loadedPart {
if retries > 10 {
err := o.checkMetacacheState(ctx, rpc)
if err != nil {
return entries, fmt.Errorf("waiting for next part %d: %w", partN, err)
}
retries = 1
}
if retries > 0 {
// Load from one disk only
_, err := es.disk.ReadVersion(ctx, minioMetaBucket,
o.objectPath(partN), "", false)
if err != nil {
time.Sleep(retryDelay)
retries++
continue
}
}
// Load partN metadata...
fi, metaArr, onlineDisks, err = es.getObjectFileInfo(ctx, minioMetaBucket, o.objectPath(partN), ObjectOptions{}, true)
if err != nil {
time.Sleep(retryDelay)
retries++
continue
}
loadedPart = partN
bi, err := getMetacacheBlockInfo(fi, partN)
logger.LogIf(ctx, err)
if err == nil {
if bi.pastPrefix(o.Prefix) {
return entries, io.EOF
}
}
}
pr, pw := io.Pipe()
go func() {
werr := es.getObjectWithFileInfo(ctx, minioMetaBucket, o.objectPath(partN), 0,
fi.Size, pw, fi, metaArr, onlineDisks)
pw.CloseWithError(werr)
}()
tmp := newMetacacheReader(pr)
e, err := tmp.filter(o)
pr.CloseWithError(err)
entries.o = append(entries.o, e.o...)
if o.Limit > 0 && entries.len() > o.Limit {
entries.truncate(o.Limit)
return entries, nil
}
if err == nil {
// We stopped within the listing, we are done for now...
return entries, nil
}
if err != nil && err.Error() != io.EOF.Error() {
switch toObjectErr(err, minioMetaBucket, o.objectPath(partN)).(type) {
case ObjectNotFound:
retries++
time.Sleep(retryDelay)
continue
case InsufficientReadQuorum:
retries++
time.Sleep(retryDelay)
continue
default:
logger.LogIf(ctx, err)
return entries, err
}
}
// We finished at the end of the block.
// And should not expect any more results.
bi, err := getMetacacheBlockInfo(fi, partN)
logger.LogIf(ctx, err)
if err != nil || bi.EOS {
// We are done and there are no more parts.
return entries, io.EOF
}
if bi.endedPrefix(o.Prefix) {
// Nothing more for prefix.
return entries, io.EOF
}
partN++
retries = 0
}
}
}
// getListQuorum interprets list quorum values and returns appropriate // getListQuorum interprets list quorum values and returns appropriate
// acceptable quorum expected for list operations // acceptable quorum expected for list operations
func getListQuorum(quorum string, driveCount int) int { func getListQuorum(quorum string, driveCount int) int {
@ -747,60 +583,6 @@ func getListQuorum(quorum string, driveCount int) int {
return 3 return 3
} }
// Will return io.EOF if continuing would not yield more results.
func (es *erasureSingle) listPathInner(ctx context.Context, o listPathOptions, results chan<- metaCacheEntry) (err error) {
defer close(results)
o.debugf(color.Green("listPath:")+" with options: %#v", o)
// How to resolve results.
resolver := metadataResolutionParams{
dirQuorum: 1,
objQuorum: 1,
bucket: o.Bucket,
}
// Maximum versions requested for "latest" object
// resolution on versioned buckets, this is to be only
// used when o.Versioned is false
if !o.Versioned {
resolver.requestedVersions = 1
}
var limit int
if o.Limit > 0 && o.StopDiskAtLimit {
// Over-read by 2 to know if we truncate results and not reach false EOF.
limit = o.Limit + 2
}
ctxDone := ctx.Done()
return listPathRaw(ctx, listPathRawOptions{
disks: []StorageAPI{es.disk},
bucket: o.Bucket,
path: o.BaseDir,
recursive: o.Recursive,
filterPrefix: o.FilterPrefix,
minDisks: 1,
forwardTo: o.Marker,
perDiskLimit: limit,
agreed: func(entry metaCacheEntry) {
select {
case <-ctxDone:
case results <- entry:
}
},
partial: func(entries metaCacheEntries, errs []error) {
// Results Disagree :-(
entry, ok := entries.resolve(&resolver)
if ok {
select {
case <-ctxDone:
case results <- *entry:
}
}
},
})
}
// Will return io.EOF if continuing would not yield more results. // Will return io.EOF if continuing would not yield more results.
func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions, results chan<- metaCacheEntry) (err error) { func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions, results chan<- metaCacheEntry) (err error) {
defer close(results) defer close(results)
@ -899,133 +681,6 @@ func (m *metaCacheRPC) setErr(err string) {
*m.meta = meta *m.meta = meta
} }
func (es *erasureSingle) saveMetaCacheStream(ctx context.Context, mc *metaCacheRPC, entries <-chan metaCacheEntry) (err error) {
o := mc.o
o.debugf(color.Green("saveMetaCacheStream:")+" with options: %#v", o)
metaMu := &mc.mu
rpc := mc.rpc
cancel := mc.cancel
defer func() {
o.debugln(color.Green("saveMetaCacheStream:")+"err:", err)
if err != nil && !errors.Is(err, io.EOF) {
go mc.setErr(err.Error())
cancel()
}
}()
defer cancel()
// Save continuous updates
go func() {
var err error
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
var exit bool
for !exit {
select {
case <-ticker.C:
case <-ctx.Done():
exit = true
}
metaMu.Lock()
meta := *mc.meta
meta, err = o.updateMetacacheListing(meta, rpc)
if err == nil && time.Since(meta.lastHandout) > metacacheMaxClientWait {
cancel()
exit = true
meta.status = scanStateError
meta.error = fmt.Sprintf("listing canceled since time since last handout was %v ago", time.Since(meta.lastHandout).Round(time.Second))
o.debugln(color.Green("saveMetaCacheStream: ") + meta.error)
meta, err = o.updateMetacacheListing(meta, rpc)
}
if err == nil {
*mc.meta = meta
if meta.status == scanStateError {
cancel()
exit = true
}
}
metaMu.Unlock()
}
}()
const retryDelay = 200 * time.Millisecond
const maxTries = 5
// Keep destination...
// Write results to disk.
bw := newMetacacheBlockWriter(entries, func(b *metacacheBlock) error {
// if the block is 0 bytes and its a first block skip it.
// skip only this for Transient caches.
if len(b.data) == 0 && b.n == 0 && o.Transient {
return nil
}
o.debugln(color.Green("saveMetaCacheStream:")+" saving block", b.n, "to", o.objectPath(b.n))
r, err := hash.NewReader(bytes.NewReader(b.data), int64(len(b.data)), "", "", int64(len(b.data)))
logger.LogIf(ctx, err)
custom := b.headerKV()
_, err = es.putMetacacheObject(ctx, o.objectPath(b.n), NewPutObjReader(r), ObjectOptions{
UserDefined: custom,
})
if err != nil {
mc.setErr(err.Error())
cancel()
return err
}
if b.n == 0 {
return nil
}
// Update block 0 metadata.
var retries int
for {
meta := b.headerKV()
fi := FileInfo{
Metadata: make(map[string]string, len(meta)),
}
for k, v := range meta {
fi.Metadata[k] = v
}
err := es.updateObjectMeta(ctx, minioMetaBucket, o.objectPath(0), fi, es.disk)
if err == nil {
break
}
switch err.(type) {
case ObjectNotFound:
return err
case StorageErr:
return err
case InsufficientReadQuorum:
default:
logger.LogIf(ctx, err)
}
if retries >= maxTries {
return err
}
retries++
time.Sleep(retryDelay)
}
return nil
})
// Blocks while consuming entries or an error occurs.
err = bw.Close()
if err != nil {
mc.setErr(err.Error())
}
metaMu.Lock()
defer metaMu.Unlock()
if mc.meta.error != "" {
return err
}
// Save success
mc.meta.status = scanStateSuccess
meta, err := o.updateMetacacheListing(*mc.meta, rpc)
if err == nil {
*mc.meta = meta
}
return nil
}
func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCacheRPC, entries <-chan metaCacheEntry) (err error) { func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCacheRPC, entries <-chan metaCacheEntry) (err error) {
o := mc.o o := mc.o
o.debugf(color.Green("saveMetaCacheStream:")+" with options: %#v", o) o.debugf(color.Green("saveMetaCacheStream:")+" with options: %#v", o)

View File

@ -155,10 +155,10 @@ func (m *metacache) delete(ctx context.Context) {
logger.LogIf(ctx, errors.New("metacache.delete: no object layer")) logger.LogIf(ctx, errors.New("metacache.delete: no object layer"))
return return
} }
ez, ok := objAPI.(renameAllStorager) ez, ok := objAPI.(deleteAllStorager)
if !ok { if !ok {
logger.LogIf(ctx, errors.New("metacache.delete: expected objAPI to be 'renameAllStorager'")) logger.LogIf(ctx, errors.New("metacache.delete: expected objAPI to be 'deleteAllStorager'"))
return return
} }
ez.renameAll(ctx, minioMetaBucket, metacachePrefixForID(m.bucket, m.id)) ez.deleteAll(ctx, minioMetaBucket, metacachePrefixForID(m.bucket, m.id))
} }

View File

@ -39,7 +39,8 @@ func TestNewObjectLayer(t *testing.T) {
if err != nil { if err != nil {
t.Fatal("Unexpected object layer initialization error", err) t.Fatal("Unexpected object layer initialization error", err)
} }
_, ok := obj.(*erasureSingle)
_, ok := obj.(*erasureServerPools)
if !ok { if !ok {
t.Fatal("Unexpected object layer detected", reflect.TypeOf(obj)) t.Fatal("Unexpected object layer detected", reflect.TypeOf(obj))
} }

View File

@ -4167,9 +4167,6 @@ func (c *SiteReplicationSys) healOLockConfigMetadata(ctx context.Context, objAPI
func (c *SiteReplicationSys) purgeDeletedBucket(ctx context.Context, objAPI ObjectLayer, bucket string) { func (c *SiteReplicationSys) purgeDeletedBucket(ctx context.Context, objAPI ObjectLayer, bucket string) {
z, ok := objAPI.(*erasureServerPools) z, ok := objAPI.(*erasureServerPools)
if !ok { if !ok {
if z, ok := objAPI.(*erasureSingle); ok {
z.purgeDelete(context.Background(), minioMetaBucket, pathJoin(bucketMetaPrefix, deletedBucketsPrefix, bucket))
}
return return
} }
z.purgeDelete(context.Background(), minioMetaBucket, pathJoin(bucketMetaPrefix, deletedBucketsPrefix, bucket)) z.purgeDelete(context.Background(), minioMetaBucket, pathJoin(bucketMetaPrefix, deletedBucketsPrefix, bucket))