2018-10-04 20:44:06 -04:00
|
|
|
/*
|
2019-04-09 14:39:42 -04:00
|
|
|
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
2018-10-04 20:44:06 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
2020-02-01 21:11:29 -05:00
|
|
|
"bufio"
|
2020-10-28 12:18:35 -04:00
|
|
|
"encoding/binary"
|
2019-03-18 16:07:58 -04:00
|
|
|
"encoding/gob"
|
|
|
|
"encoding/hex"
|
2019-02-13 18:29:46 -05:00
|
|
|
"errors"
|
2018-10-04 20:44:06 -04:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2020-03-18 19:19:29 -04:00
|
|
|
"io/ioutil"
|
2019-03-18 16:07:58 -04:00
|
|
|
"net/http"
|
2019-12-26 01:05:54 -05:00
|
|
|
"os/user"
|
2018-10-04 20:44:06 -04:00
|
|
|
"path"
|
|
|
|
"strconv"
|
2019-12-23 19:31:03 -05:00
|
|
|
"strings"
|
2018-10-04 20:44:06 -04:00
|
|
|
"time"
|
|
|
|
|
2020-11-02 20:07:52 -05:00
|
|
|
"github.com/tinylib/msgp/msgp"
|
|
|
|
|
2020-01-30 21:59:22 -05:00
|
|
|
jwtreq "github.com/dgrijalva/jwt-go/request"
|
2018-10-04 20:44:06 -04:00
|
|
|
"github.com/gorilla/mux"
|
2019-10-04 13:35:33 -04:00
|
|
|
"github.com/minio/minio/cmd/config"
|
2019-07-03 01:34:32 -04:00
|
|
|
xhttp "github.com/minio/minio/cmd/http"
|
2020-01-30 21:59:22 -05:00
|
|
|
xjwt "github.com/minio/minio/cmd/jwt"
|
2018-10-04 20:44:06 -04:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
|
|
|
)
|
|
|
|
|
2019-10-25 13:37:53 -04:00
|
|
|
var errDiskStale = errors.New("disk stale")
|
2019-02-13 18:29:46 -05:00
|
|
|
|
2018-10-04 20:44:06 -04:00
|
|
|
// To abstract a disk over network.
|
|
|
|
type storageRESTServer struct {
|
2020-06-12 23:04:01 -04:00
|
|
|
storage *xlStorage
|
2018-10-04 20:44:06 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *storageRESTServer) writeErrorResponse(w http.ResponseWriter, err error) {
|
2020-06-17 17:49:26 -04:00
|
|
|
if errors.Is(err, errDiskStale) {
|
|
|
|
w.WriteHeader(http.StatusPreconditionFailed)
|
|
|
|
} else {
|
|
|
|
w.WriteHeader(http.StatusForbidden)
|
|
|
|
}
|
2018-10-04 20:44:06 -04:00
|
|
|
w.Write([]byte(err.Error()))
|
2019-03-18 01:20:26 -04:00
|
|
|
w.(http.Flusher).Flush()
|
2018-10-04 20:44:06 -04:00
|
|
|
}
|
|
|
|
|
2019-04-18 02:16:27 -04:00
|
|
|
// DefaultSkewTime - skew time is 15 minutes between minio peers.
|
|
|
|
const DefaultSkewTime = 15 * time.Minute
|
|
|
|
|
2019-02-13 18:29:46 -05:00
|
|
|
// Authenticates storage client's requests and validates for skewed time.
|
|
|
|
func storageServerRequestValidate(r *http.Request) error {
|
2020-01-30 21:59:22 -05:00
|
|
|
token, err := jwtreq.AuthorizationHeaderExtractor.ExtractToken(r)
|
2019-04-03 15:16:19 -04:00
|
|
|
if err != nil {
|
2020-01-30 21:59:22 -05:00
|
|
|
if err == jwtreq.ErrNoTokenInRequest {
|
|
|
|
return errNoAuthToken
|
|
|
|
}
|
2019-02-13 18:29:46 -05:00
|
|
|
return err
|
2019-02-12 16:24:14 -05:00
|
|
|
}
|
2020-01-30 21:59:22 -05:00
|
|
|
|
|
|
|
claims := xjwt.NewStandardClaims()
|
|
|
|
if err = xjwt.ParseWithStandardClaims(token, claims, []byte(globalActiveCred.SecretKey)); err != nil {
|
|
|
|
return errAuthentication
|
|
|
|
}
|
|
|
|
|
|
|
|
owner := claims.AccessKey == globalActiveCred.AccessKey || claims.Subject == globalActiveCred.AccessKey
|
|
|
|
if !owner {
|
|
|
|
return errAuthentication
|
|
|
|
}
|
|
|
|
|
2020-11-02 18:15:12 -05:00
|
|
|
if claims.Audience != r.URL.RawQuery {
|
2020-01-30 21:59:22 -05:00
|
|
|
return errAuthentication
|
|
|
|
}
|
|
|
|
|
2018-10-04 20:44:06 -04:00
|
|
|
requestTimeStr := r.Header.Get("X-Minio-Time")
|
|
|
|
requestTime, err := time.Parse(time.RFC3339, requestTimeStr)
|
|
|
|
if err != nil {
|
2019-02-13 18:29:46 -05:00
|
|
|
return err
|
2018-10-04 20:44:06 -04:00
|
|
|
}
|
|
|
|
utcNow := UTCNow()
|
|
|
|
delta := requestTime.Sub(utcNow)
|
|
|
|
if delta < 0 {
|
2020-01-30 21:59:22 -05:00
|
|
|
delta *= -1
|
2018-10-04 20:44:06 -04:00
|
|
|
}
|
|
|
|
if delta > DefaultSkewTime {
|
2019-02-13 18:29:46 -05:00
|
|
|
return fmt.Errorf("client time %v is too apart with server time %v", requestTime, utcNow)
|
|
|
|
}
|
2020-01-30 21:59:22 -05:00
|
|
|
|
2019-02-13 18:29:46 -05:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsValid - To authenticate and verify the time difference.
|
|
|
|
func (s *storageRESTServer) IsValid(w http.ResponseWriter, r *http.Request) bool {
|
2020-09-23 15:00:29 -04:00
|
|
|
if s.storage == nil {
|
|
|
|
s.writeErrorResponse(w, errDiskNotFound)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-02-13 18:29:46 -05:00
|
|
|
if err := storageServerRequestValidate(r); err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return false
|
|
|
|
}
|
2020-09-17 00:14:35 -04:00
|
|
|
|
2019-10-25 13:37:53 -04:00
|
|
|
diskID := r.URL.Query().Get(storageRESTDiskID)
|
|
|
|
if diskID == "" {
|
|
|
|
// Request sent empty disk-id, we allow the request
|
|
|
|
// as the peer might be coming up and trying to read format.json
|
|
|
|
// or create format.json
|
|
|
|
return true
|
2018-10-04 20:44:06 -04:00
|
|
|
}
|
2020-09-17 00:14:35 -04:00
|
|
|
|
2020-03-27 17:48:30 -04:00
|
|
|
storedDiskID, err := s.storage.GetDiskID()
|
2020-07-21 16:54:06 -04:00
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return false
|
2019-02-13 18:29:46 -05:00
|
|
|
}
|
2020-07-21 16:54:06 -04:00
|
|
|
|
|
|
|
if diskID != storedDiskID {
|
|
|
|
s.writeErrorResponse(w, errDiskStale)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// If format.json is available and request sent the right disk-id, we allow the request
|
|
|
|
return true
|
2019-02-13 18:29:46 -05:00
|
|
|
}
|
|
|
|
|
2020-06-17 17:49:26 -04:00
|
|
|
// HealthHandler handler checks if disk is stale
|
|
|
|
func (s *storageRESTServer) HealthHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
s.IsValid(w, r)
|
|
|
|
}
|
|
|
|
|
2018-10-04 20:44:06 -04:00
|
|
|
// DiskInfoHandler - returns disk info.
|
|
|
|
func (s *storageRESTServer) DiskInfoHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
2020-09-04 12:45:06 -04:00
|
|
|
info, err := s.storage.DiskInfo(r.Context())
|
2018-10-04 20:44:06 -04:00
|
|
|
if err != nil {
|
2020-07-13 12:51:07 -04:00
|
|
|
info.Error = err.Error()
|
2018-10-04 20:44:06 -04:00
|
|
|
}
|
|
|
|
defer w.(http.Flusher).Flush()
|
2020-11-04 13:10:54 -05:00
|
|
|
logger.LogIf(r.Context(), msgp.Encode(w, &info))
|
2018-10-04 20:44:06 -04:00
|
|
|
}
|
|
|
|
|
2019-12-12 09:02:37 -05:00
|
|
|
func (s *storageRESTServer) CrawlAndGetDataUsageHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-07-30 22:45:12 -04:00
|
|
|
setEventStreamHeaders(w)
|
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
var cache dataUsageCache
|
2020-06-12 13:28:21 -04:00
|
|
|
err := cache.deserialize(r.Body)
|
2019-12-12 09:02:37 -05:00
|
|
|
if err != nil {
|
2020-03-18 19:19:29 -04:00
|
|
|
logger.LogIf(r.Context(), err)
|
2019-12-12 09:02:37 -05:00
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
done := keepHTTPResponseAlive(w)
|
|
|
|
usageInfo, err := s.storage.CrawlAndGetDataUsage(r.Context(), cache)
|
2020-05-11 23:41:38 -04:00
|
|
|
done(err)
|
2020-03-18 19:19:29 -04:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.Write(usageInfo.serialize())
|
2019-12-12 09:02:37 -05:00
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
}
|
|
|
|
|
2018-10-04 20:44:06 -04:00
|
|
|
// MakeVolHandler - make a volume.
|
|
|
|
func (s *storageRESTServer) MakeVolHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
volume := vars[storageRESTVolume]
|
2020-09-04 12:45:06 -04:00
|
|
|
err := s.storage.MakeVol(r.Context(), volume)
|
2018-10-04 20:44:06 -04:00
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-23 19:31:03 -05:00
|
|
|
// MakeVolBulkHandler - create multiple volumes as a bulk operation.
|
|
|
|
func (s *storageRESTServer) MakeVolBulkHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
volumes := strings.Split(vars[storageRESTVolumes], ",")
|
2020-09-04 12:45:06 -04:00
|
|
|
err := s.storage.MakeVolBulk(r.Context(), volumes...)
|
2019-12-23 19:31:03 -05:00
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-04 20:44:06 -04:00
|
|
|
// ListVolsHandler - list volumes.
|
|
|
|
func (s *storageRESTServer) ListVolsHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
2020-09-04 12:45:06 -04:00
|
|
|
infos, err := s.storage.ListVols(r.Context())
|
2018-10-04 20:44:06 -04:00
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
2020-11-04 13:10:54 -05:00
|
|
|
defer w.(http.Flusher).Flush()
|
|
|
|
logger.LogIf(r.Context(), msgp.Encode(w, VolsInfo(infos)))
|
2018-10-04 20:44:06 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// StatVolHandler - stat a volume.
|
|
|
|
func (s *storageRESTServer) StatVolHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
volume := vars[storageRESTVolume]
|
2020-09-04 12:45:06 -04:00
|
|
|
info, err := s.storage.StatVol(r.Context(), volume)
|
2018-10-04 20:44:06 -04:00
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
2020-11-04 13:10:54 -05:00
|
|
|
defer w.(http.Flusher).Flush()
|
|
|
|
logger.LogIf(r.Context(), msgp.Encode(w, &info))
|
2018-10-04 20:44:06 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteVolumeHandler - delete a volume.
|
|
|
|
func (s *storageRESTServer) DeleteVolHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
volume := vars[storageRESTVolume]
|
2020-03-28 00:52:59 -04:00
|
|
|
forceDelete := vars[storageRESTForceDelete] == "true"
|
2020-09-04 12:45:06 -04:00
|
|
|
err := s.storage.DeleteVol(r.Context(), volume, forceDelete)
|
2018-10-04 20:44:06 -04:00
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-17 07:58:18 -05:00
|
|
|
// AppendFileHandler - append data from the request to the file specified.
|
|
|
|
func (s *storageRESTServer) AppendFileHandler(w http.ResponseWriter, r *http.Request) {
|
2018-10-04 20:44:06 -04:00
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
volume := vars[storageRESTVolume]
|
|
|
|
filePath := vars[storageRESTFilePath]
|
2019-01-17 07:58:18 -05:00
|
|
|
|
|
|
|
buf := make([]byte, r.ContentLength)
|
|
|
|
_, err := io.ReadFull(r.Body, buf)
|
2018-10-04 20:44:06 -04:00
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
2020-09-04 12:45:06 -04:00
|
|
|
err = s.storage.AppendFile(r.Context(), volume, filePath, buf)
|
2018-10-04 20:44:06 -04:00
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-17 07:58:18 -05:00
|
|
|
// CreateFileHandler - fallocate() space for a file and copy the contents from the request.
|
|
|
|
func (s *storageRESTServer) CreateFileHandler(w http.ResponseWriter, r *http.Request) {
|
2018-10-04 20:44:06 -04:00
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
volume := vars[storageRESTVolume]
|
|
|
|
filePath := vars[storageRESTFilePath]
|
|
|
|
|
2019-01-17 07:58:18 -05:00
|
|
|
fileSizeStr := vars[storageRESTLength]
|
|
|
|
fileSize, err := strconv.Atoi(fileSizeStr)
|
2018-10-04 20:44:06 -04:00
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
2020-09-04 12:45:06 -04:00
|
|
|
err = s.storage.CreateFile(r.Context(), volume, filePath, int64(fileSize), r.Body)
|
2018-10-04 20:44:06 -04:00
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// DeleteVersion delete updated metadata.
|
|
|
|
func (s *storageRESTServer) DeleteVersionHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
volume := vars[storageRESTVolume]
|
|
|
|
filePath := vars[storageRESTFilePath]
|
|
|
|
|
2020-06-26 19:49:49 -04:00
|
|
|
if r.ContentLength < 0 {
|
|
|
|
s.writeErrorResponse(w, errInvalidArgument)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-06-23 13:20:31 -04:00
|
|
|
var fi FileInfo
|
2020-11-02 20:07:52 -05:00
|
|
|
if err := msgp.Decode(r.Body, &fi); err != nil {
|
2020-06-23 13:20:31 -04:00
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-09-04 12:45:06 -04:00
|
|
|
err := s.storage.DeleteVersion(r.Context(), volume, filePath, fi)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-26 19:49:49 -04:00
|
|
|
// ReadVersion read metadata of versionID
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *storageRESTServer) ReadVersionHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
volume := vars[storageRESTVolume]
|
|
|
|
filePath := vars[storageRESTFilePath]
|
|
|
|
versionID := vars[storageRESTVersionID]
|
2020-10-26 13:30:46 -04:00
|
|
|
checkDataDir, err := strconv.ParseBool(vars[storageRESTCheckDataDir])
|
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2020-10-26 13:30:46 -04:00
|
|
|
fi, err := s.storage.ReadVersion(r.Context(), volume, filePath, versionID, checkDataDir)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
2020-11-02 20:07:52 -05:00
|
|
|
logger.LogIf(r.Context(), msgp.Encode(w, &fi))
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// WriteMetadata write new updated metadata.
|
|
|
|
func (s *storageRESTServer) WriteMetadataHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
volume := vars[storageRESTVolume]
|
|
|
|
filePath := vars[storageRESTFilePath]
|
|
|
|
|
|
|
|
if r.ContentLength < 0 {
|
|
|
|
s.writeErrorResponse(w, errInvalidArgument)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var fi FileInfo
|
2020-11-02 20:07:52 -05:00
|
|
|
if err := msgp.Decode(r.Body, &fi); err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-11-02 20:07:52 -05:00
|
|
|
err := s.storage.WriteMetadata(r.Context(), volume, filePath, fi)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-14 09:18:35 -05:00
|
|
|
// WriteAllHandler - write to file all content.
|
|
|
|
func (s *storageRESTServer) WriteAllHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
volume := vars[storageRESTVolume]
|
|
|
|
filePath := vars[storageRESTFilePath]
|
|
|
|
|
|
|
|
if r.ContentLength < 0 {
|
|
|
|
s.writeErrorResponse(w, errInvalidArgument)
|
|
|
|
return
|
|
|
|
}
|
2020-11-02 19:14:31 -05:00
|
|
|
tmp := make([]byte, r.ContentLength)
|
|
|
|
_, err := io.ReadFull(r.Body, tmp)
|
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
err = s.storage.WriteAll(r.Context(), volume, filePath, tmp)
|
2018-11-14 09:18:35 -05:00
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// CheckPartsHandler - check if a file metadata exists.
|
|
|
|
func (s *storageRESTServer) CheckPartsHandler(w http.ResponseWriter, r *http.Request) {
|
2018-10-04 20:44:06 -04:00
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
volume := vars[storageRESTVolume]
|
|
|
|
filePath := vars[storageRESTFilePath]
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
if r.ContentLength < 0 {
|
|
|
|
s.writeErrorResponse(w, errInvalidArgument)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var fi FileInfo
|
2020-11-02 20:07:52 -05:00
|
|
|
if err := msgp.Decode(r.Body, &fi); err != nil {
|
2018-10-04 20:44:06 -04:00
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2020-09-04 12:45:06 -04:00
|
|
|
if err := s.storage.CheckParts(r.Context(), volume, filePath, fi); err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CheckFileHandler - check if a file metadata exists.
|
|
|
|
func (s *storageRESTServer) CheckFileHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
volume := vars[storageRESTVolume]
|
|
|
|
filePath := vars[storageRESTFilePath]
|
|
|
|
|
2020-09-04 12:45:06 -04:00
|
|
|
if err := s.storage.CheckFile(r.Context(), volume, filePath); err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
}
|
2018-10-04 20:44:06 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// ReadAllHandler - read all the contents of a file.
|
|
|
|
func (s *storageRESTServer) ReadAllHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
volume := vars[storageRESTVolume]
|
|
|
|
filePath := vars[storageRESTFilePath]
|
|
|
|
|
2020-09-04 12:45:06 -04:00
|
|
|
buf, err := s.storage.ReadAll(r.Context(), volume, filePath)
|
2018-10-04 20:44:06 -04:00
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
2019-07-03 01:34:32 -04:00
|
|
|
w.Header().Set(xhttp.ContentLength, strconv.Itoa(len(buf)))
|
2018-10-04 20:44:06 -04:00
|
|
|
w.Write(buf)
|
2019-03-18 01:20:26 -04:00
|
|
|
w.(http.Flusher).Flush()
|
2018-10-04 20:44:06 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// ReadFileHandler - read section of a file.
|
|
|
|
func (s *storageRESTServer) ReadFileHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
volume := vars[storageRESTVolume]
|
|
|
|
filePath := vars[storageRESTFilePath]
|
|
|
|
offset, err := strconv.Atoi(vars[storageRESTOffset])
|
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
length, err := strconv.Atoi(vars[storageRESTLength])
|
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if offset < 0 || length < 0 {
|
|
|
|
s.writeErrorResponse(w, errInvalidArgument)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
var verifier *BitrotVerifier
|
|
|
|
if vars[storageRESTBitrotAlgo] != "" {
|
|
|
|
hashStr := vars[storageRESTBitrotHash]
|
|
|
|
var hash []byte
|
|
|
|
hash, err = hex.DecodeString(hashStr)
|
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
verifier = NewBitrotVerifier(BitrotAlgorithmFromString(vars[storageRESTBitrotAlgo]), hash)
|
|
|
|
}
|
|
|
|
buf := make([]byte, length)
|
2020-09-04 12:45:06 -04:00
|
|
|
_, err = s.storage.ReadFile(r.Context(), volume, filePath, int64(offset), buf, verifier)
|
2018-10-04 20:44:06 -04:00
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
2019-07-03 01:34:32 -04:00
|
|
|
w.Header().Set(xhttp.ContentLength, strconv.Itoa(len(buf)))
|
2018-10-04 20:44:06 -04:00
|
|
|
w.Write(buf)
|
2019-03-18 01:20:26 -04:00
|
|
|
w.(http.Flusher).Flush()
|
2018-10-04 20:44:06 -04:00
|
|
|
}
|
|
|
|
|
2019-01-17 07:58:18 -05:00
|
|
|
// ReadFileHandler - read section of a file.
|
|
|
|
func (s *storageRESTServer) ReadFileStreamHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
volume := vars[storageRESTVolume]
|
|
|
|
filePath := vars[storageRESTFilePath]
|
|
|
|
offset, err := strconv.Atoi(vars[storageRESTOffset])
|
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
length, err := strconv.Atoi(vars[storageRESTLength])
|
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
2019-03-18 01:20:26 -04:00
|
|
|
|
2020-09-04 12:45:06 -04:00
|
|
|
rc, err := s.storage.ReadFileStream(r.Context(), volume, filePath, int64(offset), int64(length))
|
2019-01-17 07:58:18 -05:00
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer rc.Close()
|
2019-09-26 02:08:24 -04:00
|
|
|
|
2019-07-03 01:34:32 -04:00
|
|
|
w.Header().Set(xhttp.ContentLength, strconv.Itoa(length))
|
2019-03-18 01:20:26 -04:00
|
|
|
|
2019-01-17 07:58:18 -05:00
|
|
|
io.Copy(w, rc)
|
2019-03-18 01:20:26 -04:00
|
|
|
w.(http.Flusher).Flush()
|
2019-01-17 07:58:18 -05:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// WalkVersionsHandler - remote caller to start walking at a requested directory path.
|
|
|
|
func (s *storageRESTServer) WalkVersionsHandler(w http.ResponseWriter, r *http.Request) {
|
2020-03-22 22:23:47 -04:00
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
volume := vars[storageRESTVolume]
|
|
|
|
dirPath := vars[storageRESTDirPath]
|
|
|
|
markerPath := vars[storageRESTMarkerPath]
|
2020-06-12 23:04:01 -04:00
|
|
|
recursive, err := strconv.ParseBool(vars[storageRESTRecursive])
|
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
2020-03-22 22:23:47 -04:00
|
|
|
|
2020-07-30 22:45:12 -04:00
|
|
|
setEventStreamHeaders(w)
|
2020-03-22 22:23:47 -04:00
|
|
|
|
2020-09-04 12:45:06 -04:00
|
|
|
fch, err := s.storage.WalkVersions(r.Context(), volume, dirPath, markerPath, recursive, r.Context().Done())
|
2020-03-22 22:23:47 -04:00
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
2020-11-02 20:07:52 -05:00
|
|
|
encoder := msgp.NewWriter(w)
|
2020-03-22 22:23:47 -04:00
|
|
|
for fi := range fch {
|
2020-11-02 20:07:52 -05:00
|
|
|
logger.LogIf(r.Context(), fi.EncodeMsg(encoder))
|
2020-03-22 22:23:47 -04:00
|
|
|
}
|
2020-11-02 20:07:52 -05:00
|
|
|
logger.LogIf(r.Context(), encoder.Flush())
|
2020-03-22 22:23:47 -04:00
|
|
|
}
|
|
|
|
|
2018-10-04 20:44:06 -04:00
|
|
|
// ListDirHandler - list a directory.
|
|
|
|
func (s *storageRESTServer) ListDirHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
volume := vars[storageRESTVolume]
|
|
|
|
dirPath := vars[storageRESTDirPath]
|
|
|
|
count, err := strconv.Atoi(vars[storageRESTCount])
|
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
2020-03-22 01:10:13 -04:00
|
|
|
|
2020-09-04 12:45:06 -04:00
|
|
|
entries, err := s.storage.ListDir(r.Context(), volume, dirPath, count)
|
2018-10-04 20:44:06 -04:00
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
gob.NewEncoder(w).Encode(&entries)
|
2019-10-01 16:12:15 -04:00
|
|
|
w.(http.Flusher).Flush()
|
2018-10-04 20:44:06 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteFileHandler - delete a file.
|
|
|
|
func (s *storageRESTServer) DeleteFileHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
volume := vars[storageRESTVolume]
|
|
|
|
filePath := vars[storageRESTFilePath]
|
2020-10-28 12:18:35 -04:00
|
|
|
recursive, err := strconv.ParseBool(vars[storageRESTRecursive])
|
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
2018-10-04 20:44:06 -04:00
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
err = s.storage.Delete(r.Context(), volume, filePath, recursive)
|
2018-10-04 20:44:06 -04:00
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// DeleteVersionsErrsResp - collection of delete errors
|
|
|
|
// for bulk version deletes
|
|
|
|
type DeleteVersionsErrsResp struct {
|
2019-09-30 22:01:28 -04:00
|
|
|
Errs []error
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// DeleteVersionsHandler - delete a set of a versions.
|
|
|
|
func (s *storageRESTServer) DeleteVersionsHandler(w http.ResponseWriter, r *http.Request) {
|
2019-05-13 15:25:49 -04:00
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2019-05-13 15:25:49 -04:00
|
|
|
vars := r.URL.Query()
|
|
|
|
volume := vars.Get(storageRESTVolume)
|
2020-02-01 21:11:29 -05:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
totalVersions, err := strconv.Atoi(vars.Get(storageRESTTotalVersions))
|
|
|
|
if err != nil {
|
2020-02-01 21:11:29 -05:00
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
2019-05-13 15:25:49 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
versions := make([]FileInfo, totalVersions)
|
2020-11-02 20:07:52 -05:00
|
|
|
decoder := msgp.NewReader(r.Body)
|
2020-06-12 23:04:01 -04:00
|
|
|
for i := 0; i < totalVersions; i++ {
|
2020-11-02 20:07:52 -05:00
|
|
|
dst := &versions[i]
|
|
|
|
if err := dst.DecodeMsg(decoder); err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
dErrsResp := &DeleteVersionsErrsResp{Errs: make([]error, totalVersions)}
|
2020-03-22 01:10:13 -04:00
|
|
|
|
2020-07-30 22:45:12 -04:00
|
|
|
setEventStreamHeaders(w)
|
2020-03-11 11:56:36 -04:00
|
|
|
encoder := gob.NewEncoder(w)
|
2020-03-18 19:19:29 -04:00
|
|
|
done := keepHTTPResponseAlive(w)
|
2020-09-04 12:45:06 -04:00
|
|
|
errs := s.storage.DeleteVersions(r.Context(), volume, versions)
|
2020-05-11 23:41:38 -04:00
|
|
|
done(nil)
|
2020-06-12 23:04:01 -04:00
|
|
|
for idx := range versions {
|
|
|
|
if errs[idx] != nil {
|
|
|
|
dErrsResp.Errs[idx] = StorageErr(errs[idx].Error())
|
2019-09-30 22:01:28 -04:00
|
|
|
}
|
|
|
|
}
|
2020-03-11 11:56:36 -04:00
|
|
|
encoder.Encode(dErrsResp)
|
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// RenameDataHandler - renames a meta object and data dir to destination.
|
|
|
|
func (s *storageRESTServer) RenameDataHandler(w http.ResponseWriter, r *http.Request) {
|
2020-03-11 11:56:36 -04:00
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
vars := mux.Vars(r)
|
|
|
|
srcVolume := vars[storageRESTSrcVolume]
|
|
|
|
srcFilePath := vars[storageRESTSrcPath]
|
|
|
|
dataDir := vars[storageRESTDataDir]
|
|
|
|
dstVolume := vars[storageRESTDstVolume]
|
|
|
|
dstFilePath := vars[storageRESTDstPath]
|
2020-09-04 12:45:06 -04:00
|
|
|
err := s.storage.RenameData(r.Context(), srcVolume, srcFilePath, dataDir, dstVolume, dstFilePath)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err != nil {
|
2020-03-11 11:56:36 -04:00
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
}
|
2019-05-13 15:25:49 -04:00
|
|
|
}
|
|
|
|
|
2018-10-04 20:44:06 -04:00
|
|
|
// RenameFileHandler - rename a file.
|
|
|
|
func (s *storageRESTServer) RenameFileHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
srcVolume := vars[storageRESTSrcVolume]
|
|
|
|
srcFilePath := vars[storageRESTSrcPath]
|
|
|
|
dstVolume := vars[storageRESTDstVolume]
|
|
|
|
dstFilePath := vars[storageRESTDstPath]
|
2020-09-04 12:45:06 -04:00
|
|
|
err := s.storage.RenameFile(r.Context(), srcVolume, srcFilePath, dstVolume, dstFilePath)
|
2018-10-04 20:44:06 -04:00
|
|
|
if err != nil {
|
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
// keepHTTPResponseAlive can be used to avoid timeouts with long storage
|
2019-12-12 09:02:37 -05:00
|
|
|
// operations, such as bitrot verification or data usage crawling.
|
2020-03-18 19:19:29 -04:00
|
|
|
// Every 10 seconds a space character is sent.
|
|
|
|
// The returned function should always be called to release resources.
|
2020-05-11 23:41:38 -04:00
|
|
|
// An optional error can be sent which will be picked as text only error,
|
|
|
|
// without its original type by the receiver.
|
2020-03-18 19:19:29 -04:00
|
|
|
// waitForHTTPResponse should be used to the receiving side.
|
2020-05-11 23:41:38 -04:00
|
|
|
func keepHTTPResponseAlive(w http.ResponseWriter) func(error) {
|
|
|
|
doneCh := make(chan error)
|
2019-07-08 16:51:18 -04:00
|
|
|
go func() {
|
2020-03-18 19:19:29 -04:00
|
|
|
defer close(doneCh)
|
2019-07-08 16:51:18 -04:00
|
|
|
ticker := time.NewTicker(time.Second * 10)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
2020-05-11 23:41:38 -04:00
|
|
|
// Response not ready, write a filler byte.
|
|
|
|
w.Write([]byte{32})
|
2019-07-08 16:51:18 -04:00
|
|
|
w.(http.Flusher).Flush()
|
2020-05-11 23:41:38 -04:00
|
|
|
case err := <-doneCh:
|
|
|
|
if err != nil {
|
|
|
|
w.Write([]byte{1})
|
|
|
|
w.Write([]byte(err.Error()))
|
|
|
|
} else {
|
|
|
|
w.Write([]byte{0})
|
|
|
|
}
|
2019-07-08 16:51:18 -04:00
|
|
|
ticker.Stop()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2020-05-11 23:41:38 -04:00
|
|
|
return func(err error) {
|
|
|
|
if doneCh == nil {
|
|
|
|
return
|
|
|
|
}
|
2020-03-18 19:19:29 -04:00
|
|
|
// Indicate we are ready to write.
|
2020-05-11 23:41:38 -04:00
|
|
|
doneCh <- err
|
|
|
|
|
2020-03-18 19:19:29 -04:00
|
|
|
// Wait for channel to be closed so we don't race on writes.
|
|
|
|
<-doneCh
|
2020-05-11 23:41:38 -04:00
|
|
|
|
|
|
|
// Clear so we can be called multiple times without crashing.
|
|
|
|
doneCh = nil
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// waitForHTTPResponse will wait for responses where keepHTTPResponseAlive
|
|
|
|
// has been used.
|
|
|
|
// The returned reader contains the payload.
|
|
|
|
func waitForHTTPResponse(respBody io.Reader) (io.Reader, error) {
|
|
|
|
reader := bufio.NewReader(respBody)
|
|
|
|
for {
|
|
|
|
b, err := reader.ReadByte()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-05-11 23:41:38 -04:00
|
|
|
// Check if we have a response ready or a filler byte.
|
|
|
|
switch b {
|
|
|
|
case 0:
|
|
|
|
return reader, nil
|
|
|
|
case 1:
|
|
|
|
errorText, err := ioutil.ReadAll(reader)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
2020-05-11 23:41:38 -04:00
|
|
|
return nil, errors.New(string(errorText))
|
|
|
|
case 32:
|
|
|
|
continue
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("unexpected filler byte: %d", b)
|
2020-03-18 19:19:29 -04:00
|
|
|
}
|
|
|
|
}
|
2019-07-08 16:51:18 -04:00
|
|
|
}
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
// drainCloser can be used for wrapping an http response.
|
|
|
|
// It will drain the body before closing.
|
|
|
|
type drainCloser struct {
|
|
|
|
rc io.ReadCloser
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read forwards the read operation.
|
|
|
|
func (f drainCloser) Read(p []byte) (n int, err error) {
|
|
|
|
return f.rc.Read(p)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close drains the body and closes the upstream.
|
|
|
|
func (f drainCloser) Close() error {
|
|
|
|
xhttp.DrainBody(f.rc)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// httpStreamResponse allows streaming a response, but still send an error.
|
|
|
|
type httpStreamResponse struct {
|
|
|
|
done chan error
|
|
|
|
block chan []byte
|
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write part of the the streaming response.
|
|
|
|
// Note that upstream errors are currently not forwarded, but may be in the future.
|
|
|
|
func (h *httpStreamResponse) Write(b []byte) (int, error) {
|
2020-11-11 21:07:40 -05:00
|
|
|
if len(b) == 0 || h.err != nil {
|
|
|
|
// Ignore 0 length blocks
|
|
|
|
return 0, h.err
|
|
|
|
}
|
2020-10-28 12:18:35 -04:00
|
|
|
tmp := make([]byte, len(b))
|
|
|
|
copy(tmp, b)
|
|
|
|
h.block <- tmp
|
|
|
|
return len(b), h.err
|
|
|
|
}
|
|
|
|
|
|
|
|
// CloseWithError will close the stream and return the specified error.
|
|
|
|
// This can be done several times, but only the first error will be sent.
|
|
|
|
// After calling this the stream should not be written to.
|
|
|
|
func (h *httpStreamResponse) CloseWithError(err error) {
|
|
|
|
if h.done == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
h.done <- err
|
|
|
|
h.err = err
|
|
|
|
// Indicates that the response is done.
|
|
|
|
<-h.done
|
|
|
|
h.done = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// streamHTTPResponse can be used to avoid timeouts with long storage
|
|
|
|
// operations, such as bitrot verification or data usage crawling.
|
|
|
|
// Every 10 seconds a space character is sent.
|
|
|
|
// The returned function should always be called to release resources.
|
|
|
|
// An optional error can be sent which will be picked as text only error,
|
|
|
|
// without its original type by the receiver.
|
|
|
|
// waitForHTTPStream should be used to the receiving side.
|
|
|
|
func streamHTTPResponse(w http.ResponseWriter) *httpStreamResponse {
|
|
|
|
doneCh := make(chan error)
|
|
|
|
blockCh := make(chan []byte)
|
|
|
|
h := httpStreamResponse{done: doneCh, block: blockCh}
|
|
|
|
go func() {
|
|
|
|
ticker := time.NewTicker(time.Second * 10)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
// Response not ready, write a filler byte.
|
|
|
|
w.Write([]byte{32})
|
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
case err := <-doneCh:
|
|
|
|
ticker.Stop()
|
|
|
|
defer close(doneCh)
|
|
|
|
if err != nil {
|
2020-11-25 15:42:48 -05:00
|
|
|
w.Write([]byte{1})
|
|
|
|
w.Write([]byte(err.Error()))
|
2020-10-28 12:18:35 -04:00
|
|
|
} else {
|
|
|
|
w.Write([]byte{0})
|
|
|
|
}
|
|
|
|
return
|
|
|
|
case block := <-blockCh:
|
|
|
|
var tmp [5]byte
|
|
|
|
tmp[0] = 2
|
|
|
|
binary.LittleEndian.PutUint32(tmp[1:], uint32(len(block)))
|
|
|
|
w.Write(tmp[:])
|
|
|
|
w.Write(block)
|
|
|
|
w.(http.Flusher).Flush()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
return &h
|
|
|
|
}
|
|
|
|
|
|
|
|
// waitForHTTPStream will wait for responses where
|
|
|
|
// streamHTTPResponse has been used.
|
|
|
|
// The returned reader contains the payload and must be closed if no error is returned.
|
|
|
|
func waitForHTTPStream(respBody io.ReadCloser, w io.Writer) error {
|
|
|
|
var tmp [1]byte
|
|
|
|
for {
|
|
|
|
_, err := io.ReadFull(respBody, tmp[:])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Check if we have a response ready or a filler byte.
|
|
|
|
switch tmp[0] {
|
|
|
|
case 0:
|
|
|
|
// 0 is unbuffered, copy the rest.
|
|
|
|
_, err := io.Copy(w, respBody)
|
|
|
|
respBody.Close()
|
|
|
|
if err == io.EOF {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
case 1:
|
|
|
|
errorText, err := ioutil.ReadAll(respBody)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
respBody.Close()
|
|
|
|
return errors.New(string(errorText))
|
|
|
|
case 3:
|
2020-11-25 15:42:48 -05:00
|
|
|
// gob style is already deprecated, we can remove this when
|
|
|
|
// storage API version will be greater or equal to 23.
|
2020-10-28 12:18:35 -04:00
|
|
|
defer respBody.Close()
|
|
|
|
dec := gob.NewDecoder(respBody)
|
|
|
|
var err error
|
|
|
|
if de := dec.Decode(&err); de == nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return errors.New("rpc error")
|
|
|
|
case 2:
|
|
|
|
// Block of data
|
|
|
|
var tmp [4]byte
|
|
|
|
_, err := io.ReadFull(respBody, tmp[:])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
length := binary.LittleEndian.Uint32(tmp[:])
|
|
|
|
_, err = io.CopyN(w, respBody, int64(length))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
case 32:
|
|
|
|
continue
|
|
|
|
default:
|
|
|
|
go xhttp.DrainBody(respBody)
|
|
|
|
return fmt.Errorf("unexpected filler byte: %d", tmp[0])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-08 16:51:18 -04:00
|
|
|
// VerifyFileResp - VerifyFile()'s response.
|
|
|
|
type VerifyFileResp struct {
|
|
|
|
Err error
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// VerifyFileHandler - Verify all part of file for bitrot errors.
|
|
|
|
func (s *storageRESTServer) VerifyFileHandler(w http.ResponseWriter, r *http.Request) {
|
2019-07-08 16:51:18 -04:00
|
|
|
if !s.IsValid(w, r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
volume := vars[storageRESTVolume]
|
|
|
|
filePath := vars[storageRESTFilePath]
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
if r.ContentLength < 0 {
|
|
|
|
s.writeErrorResponse(w, errInvalidArgument)
|
2019-07-12 19:29:44 -04:00
|
|
|
return
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
var fi FileInfo
|
2020-11-02 20:07:52 -05:00
|
|
|
if err := msgp.Decode(r.Body, &fi); err != nil {
|
2019-07-08 16:51:18 -04:00
|
|
|
s.writeErrorResponse(w, err)
|
|
|
|
return
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2020-07-30 22:45:12 -04:00
|
|
|
setEventStreamHeaders(w)
|
2019-10-01 16:12:15 -04:00
|
|
|
encoder := gob.NewEncoder(w)
|
2020-03-18 19:19:29 -04:00
|
|
|
done := keepHTTPResponseAlive(w)
|
2020-11-02 20:07:52 -05:00
|
|
|
err := s.storage.VerifyFile(r.Context(), volume, filePath, fi)
|
2020-05-11 23:41:38 -04:00
|
|
|
done(nil)
|
2019-10-01 16:12:15 -04:00
|
|
|
vresp := &VerifyFileResp{}
|
|
|
|
if err != nil {
|
2020-01-14 21:45:17 -05:00
|
|
|
vresp.Err = StorageErr(err.Error())
|
2019-10-01 16:12:15 -04:00
|
|
|
}
|
|
|
|
encoder.Encode(vresp)
|
|
|
|
w.(http.Flusher).Flush()
|
2019-07-08 16:51:18 -04:00
|
|
|
}
|
|
|
|
|
2020-09-28 22:39:32 -04:00
|
|
|
// A single function to write certain errors to be fatal
|
|
|
|
// or informative based on the `exit` flag, please look
|
|
|
|
// at each implementation of error for added hints.
|
|
|
|
//
|
|
|
|
// FIXME: This is an unusual function but serves its purpose for
|
|
|
|
// now, need to revist the overall erroring structure here.
|
|
|
|
// Do not like it :-(
|
|
|
|
func logFatalErrs(err error, endpoint Endpoint, exit bool) {
|
|
|
|
if errors.Is(err, errMinDiskSize) {
|
|
|
|
logger.Fatal(config.ErrUnableToWriteInBackend(err).Hint(err.Error()), "Unable to initialize backend")
|
|
|
|
} else if errors.Is(err, errUnsupportedDisk) {
|
|
|
|
var hint string
|
|
|
|
if endpoint.URL != nil {
|
|
|
|
hint = fmt.Sprintf("Disk '%s' does not support O_DIRECT flags, MinIO erasure coding requires filesystems with O_DIRECT support", endpoint.Path)
|
|
|
|
} else {
|
|
|
|
hint = "Disks do not support O_DIRECT flags, MinIO erasure coding requires filesystems with O_DIRECT support"
|
|
|
|
}
|
|
|
|
logger.Fatal(config.ErrUnsupportedBackend(err).Hint(hint), "Unable to initialize backend")
|
|
|
|
} else if errors.Is(err, errDiskNotDir) {
|
|
|
|
var hint string
|
|
|
|
if endpoint.URL != nil {
|
|
|
|
hint = fmt.Sprintf("Disk '%s' is not a directory, MinIO erasure coding needs a directory", endpoint.Path)
|
|
|
|
} else {
|
|
|
|
hint = "Disks are not directories, MinIO erasure coding needs directories"
|
|
|
|
}
|
|
|
|
logger.Fatal(config.ErrUnableToWriteInBackend(err).Hint(hint), "Unable to initialize backend")
|
|
|
|
} else if errors.Is(err, errFileAccessDenied) {
|
|
|
|
// Show a descriptive error with a hint about how to fix it.
|
|
|
|
var username string
|
|
|
|
if u, err := user.Current(); err == nil {
|
|
|
|
username = u.Username
|
|
|
|
} else {
|
|
|
|
username = "<your-username>"
|
|
|
|
}
|
|
|
|
var hint string
|
|
|
|
if endpoint.URL != nil {
|
|
|
|
hint = fmt.Sprintf("Run the following command to add write permissions: `sudo chown -R %s %s && sudo chmod u+rxw %s`",
|
|
|
|
username, endpoint.Path, endpoint.Path)
|
|
|
|
} else {
|
|
|
|
hint = fmt.Sprintf("Run the following command to add write permissions: `sudo chown -R %s. <path> && sudo chmod u+rxw <path>`", username)
|
|
|
|
}
|
|
|
|
logger.Fatal(config.ErrUnableToWriteInBackend(err).Hint(hint), "Unable to initialize backend")
|
|
|
|
} else if errors.Is(err, errFaultyDisk) {
|
|
|
|
if !exit {
|
|
|
|
logger.LogIf(GlobalContext, fmt.Errorf("disk is faulty at %s, please replace the drive - disk will be offline", endpoint))
|
|
|
|
} else {
|
|
|
|
logger.Fatal(err, "Unable to initialize backend")
|
|
|
|
}
|
|
|
|
} else if errors.Is(err, errDiskFull) {
|
|
|
|
if !exit {
|
|
|
|
logger.LogIf(GlobalContext, fmt.Errorf("disk is already full at %s, incoming I/O will fail - disk will be offline", endpoint))
|
|
|
|
} else {
|
|
|
|
logger.Fatal(err, "Unable to initialize backend")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if !exit {
|
|
|
|
logger.LogIf(GlobalContext, fmt.Errorf("disk returned an unexpected error at %s, please investigate - disk will be offline", endpoint))
|
|
|
|
} else {
|
|
|
|
logger.Fatal(err, "Unable to initialize backend")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-04 20:44:06 -04:00
|
|
|
// registerStorageRPCRouter - register storage rpc router.
|
2020-12-01 16:50:33 -05:00
|
|
|
func registerStorageRESTHandlers(router *mux.Router, endpointServerPools EndpointServerPools) {
|
|
|
|
for _, ep := range endpointServerPools {
|
2019-11-19 20:42:27 -05:00
|
|
|
for _, endpoint := range ep.Endpoints {
|
|
|
|
if !endpoint.IsLocal {
|
|
|
|
continue
|
|
|
|
}
|
2020-08-25 13:55:15 -04:00
|
|
|
storage, err := newXLStorage(endpoint)
|
2019-11-19 20:42:27 -05:00
|
|
|
if err != nil {
|
2020-09-28 22:39:32 -04:00
|
|
|
// if supported errors don't fail, we proceed to
|
|
|
|
// printing message and moving forward.
|
|
|
|
logFatalErrs(err, endpoint, false)
|
2019-11-19 20:42:27 -05:00
|
|
|
}
|
2018-10-04 20:44:06 -04:00
|
|
|
|
2019-11-19 20:42:27 -05:00
|
|
|
server := &storageRESTServer{storage: storage}
|
|
|
|
|
|
|
|
subrouter := router.PathPrefix(path.Join(storageRESTPrefix, endpoint.Path)).Subrouter()
|
|
|
|
|
2020-06-17 17:49:26 -04:00
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodHealth).HandlerFunc(httpTraceHdrs(server.HealthHandler))
|
2019-11-19 20:42:27 -05:00
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDiskInfo).HandlerFunc(httpTraceHdrs(server.DiskInfoHandler))
|
2019-12-12 09:02:37 -05:00
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCrawlAndGetDataUsage).HandlerFunc(httpTraceHdrs(server.CrawlAndGetDataUsageHandler))
|
2019-11-19 20:42:27 -05:00
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodMakeVol).HandlerFunc(httpTraceHdrs(server.MakeVolHandler)).Queries(restQueries(storageRESTVolume)...)
|
2019-12-23 19:31:03 -05:00
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodMakeVolBulk).HandlerFunc(httpTraceHdrs(server.MakeVolBulkHandler)).Queries(restQueries(storageRESTVolumes)...)
|
2019-11-19 20:42:27 -05:00
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodStatVol).HandlerFunc(httpTraceHdrs(server.StatVolHandler)).Queries(restQueries(storageRESTVolume)...)
|
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDeleteVol).HandlerFunc(httpTraceHdrs(server.DeleteVolHandler)).Queries(restQueries(storageRESTVolume)...)
|
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodListVols).HandlerFunc(httpTraceHdrs(server.ListVolsHandler))
|
|
|
|
|
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodAppendFile).HandlerFunc(httpTraceHdrs(server.AppendFileHandler)).
|
|
|
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
|
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodWriteAll).HandlerFunc(httpTraceHdrs(server.WriteAllHandler)).
|
|
|
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
|
2020-06-12 23:04:01 -04:00
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodWriteMetadata).HandlerFunc(httpTraceHdrs(server.WriteMetadataHandler)).
|
|
|
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
|
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDeleteVersion).HandlerFunc(httpTraceHdrs(server.DeleteVersionHandler)).
|
2020-06-26 19:49:49 -04:00
|
|
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
|
2020-06-12 23:04:01 -04:00
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadVersion).HandlerFunc(httpTraceHdrs(server.ReadVersionHandler)).
|
2020-10-26 13:30:46 -04:00
|
|
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTVersionID, storageRESTCheckDataDir)...)
|
2020-06-12 23:04:01 -04:00
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodRenameData).HandlerFunc(httpTraceHdrs(server.RenameDataHandler)).
|
|
|
|
Queries(restQueries(storageRESTSrcVolume, storageRESTSrcPath, storageRESTDataDir,
|
|
|
|
storageRESTDstVolume, storageRESTDstPath)...)
|
2019-11-19 20:42:27 -05:00
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCreateFile).HandlerFunc(httpTraceHdrs(server.CreateFileHandler)).
|
|
|
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTLength)...)
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCheckFile).HandlerFunc(httpTraceHdrs(server.CheckFileHandler)).
|
2019-11-19 20:42:27 -05:00
|
|
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
|
2020-06-12 23:04:01 -04:00
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCheckParts).HandlerFunc(httpTraceHdrs(server.CheckPartsHandler)).
|
|
|
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
|
|
|
|
|
2019-11-19 20:42:27 -05:00
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadAll).HandlerFunc(httpTraceHdrs(server.ReadAllHandler)).
|
|
|
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
|
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadFile).HandlerFunc(httpTraceHdrs(server.ReadFileHandler)).
|
|
|
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTOffset, storageRESTLength, storageRESTBitrotAlgo, storageRESTBitrotHash)...)
|
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadFileStream).HandlerFunc(httpTraceHdrs(server.ReadFileStreamHandler)).
|
|
|
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTOffset, storageRESTLength)...)
|
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodListDir).HandlerFunc(httpTraceHdrs(server.ListDirHandler)).
|
2020-06-12 23:04:01 -04:00
|
|
|
Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTCount)...)
|
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodWalkVersions).HandlerFunc(httpTraceHdrs(server.WalkVersionsHandler)).
|
2020-07-02 13:29:50 -04:00
|
|
|
Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTMarkerPath, storageRESTRecursive)...)
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDeleteVersions).HandlerFunc(httpTraceHdrs(server.DeleteVersionsHandler)).
|
|
|
|
Queries(restQueries(storageRESTVolume, storageRESTTotalVersions)...)
|
2019-11-19 20:42:27 -05:00
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDeleteFile).HandlerFunc(httpTraceHdrs(server.DeleteFileHandler)).
|
2020-10-28 12:18:35 -04:00
|
|
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTRecursive)...)
|
2019-11-19 20:42:27 -05:00
|
|
|
|
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodRenameFile).HandlerFunc(httpTraceHdrs(server.RenameFileHandler)).
|
|
|
|
Queries(restQueries(storageRESTSrcVolume, storageRESTSrcPath, storageRESTDstVolume, storageRESTDstPath)...)
|
2020-06-12 23:04:01 -04:00
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodVerifyFile).HandlerFunc(httpTraceHdrs(server.VerifyFileHandler)).
|
|
|
|
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
|
2020-10-28 12:18:35 -04:00
|
|
|
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodWalkDir).HandlerFunc(httpTraceHdrs(server.WalkDirHandler)).
|
2020-11-18 15:03:16 -05:00
|
|
|
Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTRecursive)...)
|
2019-11-19 20:42:27 -05:00
|
|
|
}
|
2018-10-04 20:44:06 -04:00
|
|
|
}
|
|
|
|
}
|