Fix JSON Close data race. (#8486)

The JSON stream library has no safe way of aborting while

Since we cannot expect the called to safely handle "Read" and "Close" calls we must handle this.

Also any Read error returned from upstream will crash the server. We preserve the errors and instead always return io.EOF upstream, but send the error on Close.

`readahead v1.3.1` handles Read after Close better.

Updates to `progressReader` is mostly to ensure safety.

Fixes #8481
This commit is contained in:
Klaus Post
2019-11-06 01:20:37 +03:00
committed by kannappanr
parent 1e457dda7e
commit 26e760ee62
5 changed files with 101 additions and 16 deletions

View File

@@ -18,8 +18,10 @@ package s3select
import (
"compress/bzip2"
"errors"
"fmt"
"io"
"sync"
"sync/atomic"
gzip "github.com/klauspost/pgzip"
@@ -50,13 +52,29 @@ type progressReader struct {
rc io.ReadCloser
scannedReader *countUpReader
processedReader *countUpReader
closedMu sync.Mutex
closed bool
}
func (pr *progressReader) Read(p []byte) (n int, err error) {
// This ensures that Close will block until Read has completed.
// This allows another goroutine to close the reader.
pr.closedMu.Lock()
defer pr.closedMu.Unlock()
if pr.closed {
return 0, errors.New("progressReader: read after Close")
}
return pr.processedReader.Read(p)
}
func (pr *progressReader) Close() error {
pr.closedMu.Lock()
defer pr.closedMu.Unlock()
if pr.closed {
return nil
}
pr.closed = true
return pr.rc.Close()
}