docs: fix all Chinese doc links for the new docs site (#6097)

Additionally fix typos, default to US locale words
This commit is contained in:
Harshavardhana
2018-06-28 16:02:02 -07:00
committed by GitHub
parent de251483d1
commit e5e522fc61
42 changed files with 113 additions and 111 deletions

View File

@@ -57,7 +57,7 @@ const (
var (
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
errHealPushStopNDiscard = fmt.Errorf("heal push stopped due to heal stop signal")
errHealStopSignalled = fmt.Errorf("heal stop signalled")
errHealStopSignalled = fmt.Errorf("heal stop signaled")
errFnHealFromAPIErr = func(err error) error {
errCode := toAPIErrorCode(err)
@@ -301,7 +301,7 @@ type healSequence struct {
// current accumulated status of the heal sequence
currentStatus healSequenceStatus
// channel signalled by background routine when traversal has
// channel signaled by background routine when traversal has
// completed
traverseAndHealDoneCh chan error
@@ -441,7 +441,7 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
h.currentStatus.updateLock.Unlock()
// This is a "safe" point for the heal sequence to quit if
// signalled externally.
// signaled externally.
if h.isQuitting() {
return errHealStopSignalled
}

View File

@@ -100,7 +100,7 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
}
// Inititate a list objects operation based on the input params.
// On success would return back ListObjectsInfo object to be
// marshalled into S3 compatible XML header.
// marshaled into S3 compatible XML header.
listObjectsV2Info, err := listObjectsV2(ctx, bucket, prefix, marker, delimiter, maxKeys, fetchOwner, startAfter)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
@@ -166,7 +166,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
}
// Inititate a list objects operation based on the input params.
// On success would return back ListObjectsInfo object to be
// marshalled into S3 compatible XML header.
// marshaled into S3 compatible XML header.
listObjectsInfo, err := listObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)

View File

@@ -238,7 +238,7 @@ func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, star
pipeWriter.CloseWithError(err)
return
}
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
pipeWriter.Close() // Close writer explicitly signaling we wrote all data.
}()
err = dcache.Put(ctx, bucket, object, hashReader, c.getMetadata(objInfo))
if err != nil {

View File

@@ -264,7 +264,7 @@ func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, d
}
return
}
// Close writer explicitly signalling we wrote all data.
// Close writer explicitly signaling we wrote all data.
if gerr := srcInfo.Writer.Close(); gerr != nil {
logger.LogIf(ctx, gerr)
return

View File

@@ -505,7 +505,7 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
}
return
}
// Close writer explicitly signalling we wrote all data.
// Close writer explicitly signaling we wrote all data.
if gerr := srcInfo.Writer.Close(); gerr != nil {
logger.LogIf(ctx, gerr)
return

View File

@@ -688,7 +688,7 @@ func setBucketForwardingHandler(h http.Handler) http.Handler {
// rate.Limiter token bucket configured with maxOpenFileLimit and
// burst set to 1. The request will idle for up to 1*time.Second.
// If the limiter detects the deadline will be exceeded, the request is
// cancelled immediately.
// canceled immediately.
func setRateLimitHandler(h http.Handler) http.Handler {
_, maxLimit, err := sys.GetMaxOpenFileLimit()
logger.FatalIf(err, "Unable to get maximum open file limit", context.Background())

View File

@@ -226,7 +226,7 @@ func TestNamespaceForceUnlockTest(t *testing.T) {
select {
case <-ch:
// Signalled so all is fine.
// Signaled so all is fine.
break
case <-time.After(100 * time.Millisecond):

View File

@@ -470,7 +470,7 @@ func TestPosixMakeVol(t *testing.T) {
}
}
// TestPosixDeleteVol - Validates the expected behaviour of posix.DeleteVol for various cases.
// TestPosixDeleteVol - Validates the expected behavior of posix.DeleteVol for various cases.
func TestPosixDeleteVol(t *testing.T) {
// create posix test setup
posixStorage, path, err := newPosixTestSetup()

View File

@@ -69,7 +69,7 @@ func TestUNCPaths(t *testing.T) {
}
}
// Test to validate posix behaviour on windows when a non-final path component is a file.
// Test to validate posix behavior on windows when a non-final path component is a file.
func TestUNCPathENOTDIR(t *testing.T) {
var err error
// Instantiate posix object to manage a disk

View File

@@ -2420,7 +2420,7 @@ func (s *TestSuiteCommon) TestBucketMultipartList(c *check) {
// unmarshalling works from a client perspective, specifically
// while unmarshalling time.Time type for 'Initiated' field.
// time.Time does not honor xml marshaler, it means that we need
// to encode/format it before giving it to xml marshalling.
// to encode/format it before giving it to xml marshaling.
// This below check adds client side verification to see if its
// truly parseable.

View File

@@ -458,7 +458,7 @@ func getUpdateInfo(timeout time.Duration, mode string) (updateMsg string, sha256
func doUpdate(sha256Hex string, latestReleaseTime time.Time, ok bool) (successMsg string, err error) {
if !ok {
successMsg = greenColorSprintf("Minio update to version RELEASE.%s cancelled.",
successMsg = greenColorSprintf("Minio update to version RELEASE.%s canceled.",
latestReleaseTime.Format(minioReleaseTagTimeLayout))
return successMsg, nil
}

View File

@@ -614,7 +614,7 @@ func (s *xlSets) CopyObject(ctx context.Context, srcBucket, srcObject, destBucke
}
return
}
// Close writer explicitly signalling we wrote all data.
// Close writer explicitly signaling we wrote all data.
if gerr := srcInfo.Writer.Close(); gerr != nil {
logger.LogIf(ctx, gerr)
return

View File

@@ -463,7 +463,7 @@ func writeXLMetadata(ctx context.Context, disk StorageAPI, bucket, prefix string
logger.LogIf(ctx, err)
return err
}
// Persist marshalled data.
// Persist marshaled data.
err = disk.AppendFile(bucket, jsonFile, metadataBytes)
logger.LogIf(ctx, err)
return err

View File

@@ -277,7 +277,7 @@ func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, ds
}
return
}
// Close writer explicitly signalling we wrote all data.
// Close writer explicitly signaling we wrote all data.
if gerr := srcInfo.Writer.Close(); gerr != nil {
logger.LogIf(ctx, gerr)
return
@@ -482,7 +482,7 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
// to indicate where the listing should begin from.
//
// Implements S3 compatible ListObjectParts API. The resulting
// ListPartsInfo structure is marshalled directly into XML and
// ListPartsInfo structure is marshaled directly into XML and
// replied back to the client.
func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListPartsInfo, e error) {
if err := checkListPartsArgs(ctx, bucket, object, xl); err != nil {

View File

@@ -142,7 +142,7 @@ func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBuc
pipeWriter.CloseWithError(toObjectErr(gerr, srcBucket, srcObject))
return
}
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
pipeWriter.Close() // Close writer explicitly signaling we wrote all data.
}()
hashReader, err := hash.NewReader(pipeReader, length, "", "")