crawler: Remove waitForLowActiveIO (#10667)

Only use dynamic delays for the crawler. Even though the max wait was 1 second the number 
of waits could severely impact crawler speed.

Instead of relying on a global metric, we use the stateless local delays to keep the crawler 
running at a speed more adjusted to current conditions.

The only case we keep it is before bitrot checks when enabled.
This commit is contained in:
Klaus Post
2020-10-13 13:45:08 -07:00
committed by GitHub
parent 9c042a503b
commit 03991c5d41
4 changed files with 19 additions and 21 deletions

View File

@@ -62,7 +62,7 @@ func TestDataUsageUpdate(t *testing.T) {
return 0, nil
}
got, err := crawlDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, func() {}, getSize)
got, err := crawlDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize)
if err != nil {
t.Fatal(err)
}
@@ -183,7 +183,7 @@ func TestDataUsageUpdate(t *testing.T) {
},
}
createUsageTestFiles(t, base, bucket, files)
got, err = crawlDataFolder(context.Background(), base, got, func() {}, getSize)
got, err = crawlDataFolder(context.Background(), base, got, getSize)
if err != nil {
t.Fatal(err)
}
@@ -268,7 +268,7 @@ func TestDataUsageUpdate(t *testing.T) {
}
// Changed dir must be picked up in this many cycles.
for i := 0; i < dataUsageUpdateDirCycles; i++ {
got, err = crawlDataFolder(context.Background(), base, got, func() {}, getSize)
got, err = crawlDataFolder(context.Background(), base, got, getSize)
if err != nil {
t.Fatal(err)
}
@@ -355,7 +355,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
}
return 0, nil
}
got, err := crawlDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: "bucket"}}, func() {}, getSize)
got, err := crawlDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: "bucket"}}, getSize)
if err != nil {
t.Fatal(err)
}
@@ -465,7 +465,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
},
}
createUsageTestFiles(t, base, "", files)
got, err = crawlDataFolder(context.Background(), base, got, func() {}, getSize)
got, err = crawlDataFolder(context.Background(), base, got, getSize)
if err != nil {
t.Fatal(err)
}
@@ -548,7 +548,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
}
// Changed dir must be picked up in this many cycles.
for i := 0; i < dataUsageUpdateDirCycles; i++ {
got, err = crawlDataFolder(context.Background(), base, got, func() {}, getSize)
got, err = crawlDataFolder(context.Background(), base, got, getSize)
if err != nil {
t.Fatal(err)
}
@@ -652,7 +652,7 @@ func TestDataUsageCacheSerialize(t *testing.T) {
}
return 0, nil
}
want, err := crawlDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, func() {}, getSize)
want, err := crawlDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize)
if err != nil {
t.Fatal(err)
}