minio/pkg/retry/retry_test.go
Harshavardhana febe9cc26a
fix: avoid timer leaks in dsync/lsync (#9781)
At a customer setup with lots of concurrent calls
it can be observed that in newRetryTimer there
were lots of tiny alloations which are not
relinquished upon retries, in this codepath
we were only interested in re-using the timer
and use it wisely for each locker.

```
(pprof) top
Showing nodes accounting for 8.68TB, 97.02% of 8.95TB total
Dropped 1198 nodes (cum <= 0.04TB)
Showing top 10 nodes out of 79
      flat  flat%   sum%        cum   cum%
    5.95TB 66.50% 66.50%     5.95TB 66.50%  time.NewTimer
    1.16TB 13.02% 79.51%     1.16TB 13.02%  github.com/ncw/directio.AlignedBlock
    0.67TB  7.53% 87.04%     0.70TB  7.78%  github.com/minio/minio/cmd.xlObjects.putObject
    0.21TB  2.36% 89.40%     0.21TB  2.36%  github.com/minio/minio/cmd.(*posix).Walk
    0.19TB  2.08% 91.49%     0.27TB  2.99%  os.statNolog
    0.14TB  1.59% 93.08%     0.14TB  1.60%  os.(*File).readdirnames
    0.10TB  1.09% 94.17%     0.11TB  1.25%  github.com/minio/minio/cmd.readDirN
    0.10TB  1.07% 95.23%     0.10TB  1.07%  syscall.ByteSliceFromString
    0.09TB  1.03% 96.27%     0.09TB  1.03%  strings.(*Builder).grow
    0.07TB  0.75% 97.02%     0.07TB  0.75%  path.(*lazybuf).append
```
2020-06-08 11:28:40 -07:00

86 lines
2.2 KiB
Go

/*
* Minio Cloud Storage, (C) 2020 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package retry
import (
"context"
"testing"
"time"
)
// Tests for retry timer.
func TestRetryTimerSimple(t *testing.T) {
retryCtx, cancel := context.WithCancel(context.Background())
attemptCh := NewTimer(retryCtx)
i := <-attemptCh
if i != 0 {
cancel()
t.Fatalf("Invalid attempt counter returned should be 0, found %d instead", i)
}
i = <-attemptCh
if i <= 0 {
cancel()
t.Fatalf("Invalid attempt counter returned should be greater than 0, found %d instead", i)
}
cancel()
_, ok := <-attemptCh
if ok {
t.Fatal("Attempt counter should be closed")
}
}
// Test retry time with no jitter.
func TestRetryTimerWithNoJitter(t *testing.T) {
retryCtx, cancel := context.WithCancel(context.Background())
defer cancel()
// No jitter
attemptCh := NewTimerWithJitter(retryCtx, time.Millisecond, 5*time.Millisecond, NoJitter)
i := <-attemptCh
if i != 0 {
cancel()
t.Fatalf("Invalid attempt counter returned should be 0, found %d instead", i)
}
// Loop through the maximum possible attempt.
for i = range attemptCh {
if i == 30 {
cancel()
}
}
_, ok := <-attemptCh
if ok {
t.Fatal("Attempt counter should be closed")
}
}
// Test retry time with Jitter greater than MaxJitter.
func TestRetryTimerWithJitter(t *testing.T) {
retryCtx, cancel := context.WithCancel(context.Background())
// Jitter will be set back to 1.0
attemptCh := NewTimerWithJitter(retryCtx, time.Second, 30*time.Second, 2.0)
i := <-attemptCh
if i != 0 {
cancel()
t.Fatalf("Invalid attempt counter returned should be 0, found %d instead", i)
}
cancel()
_, ok := <-attemptCh
if ok {
t.Fatal("Attempt counter should be closed")
}
}