fix: CrawlAndGetDataUsage close pipe() before using a new one (#11600)

also additionally make sure errors during deserializer closes
the reader with right error type such that Write() end
actually see the final error, this avoids a waitGroup usage
and waiting.
This commit is contained in:
Harshavardhana
2021-02-22 10:04:32 -08:00
committed by GitHub
parent 8778828a03
commit c31d2c3fdc
3 changed files with 8 additions and 12 deletions

View File

@@ -175,30 +175,25 @@ func (client *storageRESTClient) CrawlAndGetDataUsage(ctx context.Context, cache
go func() {
pw.CloseWithError(cache.serializeTo(pw))
}()
defer pr.Close()
respBody, err := client.call(ctx, storageRESTMethodCrawlAndGetDataUsage, url.Values{}, pr, -1)
defer http.DrainBody(respBody)
if err != nil {
pr.Close()
return cache, err
}
pr.Close()
var wg sync.WaitGroup
var newCache dataUsageCache
var decErr error
pr, pw = io.Pipe()
wg.Add(1)
go func() {
defer wg.Done()
decErr = newCache.deserialize(pr)
pr.CloseWithError(err)
pr.CloseWithError(newCache.deserialize(pr))
}()
err = waitForHTTPStream(respBody, pw)
pw.CloseWithError(err)
if err != nil {
return cache, err
}
wg.Wait()
return newCache, decErr
return newCache, nil
}
func (client *storageRESTClient) GetDiskID() (string, error) {