Add admin file inspector (#12635)

Download files from *any* bucket/path as an encrypted zip file.

The key is included in the response but can be separated so zip 
and the key doesn't have to be sent on the same channel.

Requires https://github.com/minio/pkg/pull/6
This commit is contained in:
Klaus Post 2021-07-09 11:29:16 -07:00 committed by GitHub
parent 28adb29db3
commit d6a2fe02d3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 361 additions and 5 deletions

View File

@ -19,6 +19,7 @@ package cmd
import (
"context"
crand "crypto/rand"
"crypto/subtle"
"crypto/tls"
"encoding/json"
@ -37,6 +38,7 @@ import (
"time"
"github.com/gorilla/mux"
"github.com/klauspost/compress/zip"
"github.com/minio/kes"
"github.com/minio/madmin-go"
"github.com/minio/minio/internal/auth"
@ -49,6 +51,7 @@ import (
"github.com/minio/minio/internal/logger/message/log"
iampolicy "github.com/minio/pkg/iam/policy"
xnet "github.com/minio/pkg/net"
"github.com/secure-io/sio-go"
)
const (
@ -1951,3 +1954,104 @@ func checkConnection(endpointStr string, timeout time.Duration) error {
defer xhttp.DrainBody(resp.Body)
return nil
}
// getRawDataer provides an interface for getting raw FS files.
type getRawDataer interface {
GetRawData(ctx context.Context, volume, file string, fn func(r io.Reader, host string, disk string, filename string, size int64, modtime time.Time) error) error
}
// InspectDataHandler - GET /minio/admin/v3/inspect-data
// ----------
// Download file from all nodes in a zip format
func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "InspectData")
// Validate request signature.
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.InspectDataAction, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
return
}
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
o, ok := newObjectLayerFn().(getRawDataer)
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
volume := r.URL.Query().Get("volume")
file := r.URL.Query().Get("file")
if len(volume) == 0 {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketName), r.URL)
return
}
if len(file) == 0 {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
var key [32]byte
// MUST use crypto/rand
n, err := crand.Read(key[:])
if err != nil || n != len(key) {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
return
}
stream, err := sio.AES_256_GCM.Stream(key[:])
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
return
}
// Zero nonce, we only use each key once, and 32 bytes is plenty.
nonce := make([]byte, stream.NonceSize())
encw := stream.EncryptWriter(w, nonce, nil)
defer encw.Close()
// Write a version for making *incompatible* changes.
// The AdminClient will reject any version it does not know.
w.Write([]byte{1})
// Write key first (without encryption)
_, err = w.Write(key[:])
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
return
}
// Initialize a zip writer which will provide a zipped content
// of profiling data of all nodes
zipWriter := zip.NewWriter(encw)
defer zipWriter.Close()
err = o.GetRawData(ctx, volume, file, func(r io.Reader, host, disk, filename string, size int64, modtime time.Time) error {
// Prefix host+disk
filename = path.Join(host, disk, filename)
header, zerr := zip.FileInfoHeader(dummyFileInfo{
name: filename,
size: size,
mode: 0600,
modTime: modtime,
isDir: false,
sys: nil,
})
if zerr != nil {
logger.LogIf(ctx, zerr)
return nil
}
header.Method = zip.Deflate
zwriter, zerr := zipWriter.CreateHeader(header)
if zerr != nil {
logger.LogIf(ctx, zerr)
return nil
}
if _, err = io.Copy(zwriter, r); err != nil {
logger.LogIf(ctx, err)
}
return nil
})
logger.LogIf(ctx, err)
}

View File

@ -54,6 +54,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
// Info operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/inspect-data").HandlerFunc(httpTraceHdrs(adminAPI.InspectDataHandler)).Queries("volume", "{volume:.*}", "file", "{file:.*}")
// StorageInfo operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/storageinfo").HandlerFunc(httpTraceAll(adminAPI.StorageInfoHandler))

View File

@ -147,6 +147,39 @@ func (z *erasureServerPools) GetDisksID(ids ...string) []StorageAPI {
return res
}
// GetRawData will return all files with a given raw path to the callback.
// Errors are ignored, only errors from the callback are returned.
// For now only direct file paths are supported.
func (z *erasureServerPools) GetRawData(ctx context.Context, volume, file string, fn func(r io.Reader, host string, disk string, filename string, size int64, modtime time.Time) error) error {
for _, s := range z.serverPools {
for _, disks := range s.erasureDisks {
for i, disk := range disks {
if disk == OfflineDisk {
continue
}
si, err := disk.StatInfoFile(ctx, volume, file)
if err != nil {
continue
}
r, err := disk.ReadFileStream(ctx, volume, file, 0, si.Size)
if err != nil {
continue
}
defer r.Close()
did, err := disk.GetDiskID()
if err != nil {
did = fmt.Sprintf("disk-%d", i)
}
err = fn(r, disk.Hostname(), did, pathJoin(volume, file), si.Size, si.ModTime)
if err != nil {
return err
}
}
}
}
return nil
}
func (z *erasureServerPools) SetDriveCounts() []int {
setDriveCounts := make([]int, len(z.serverPools))
for i := range z.serverPools {

View File

@ -27,6 +27,7 @@ import (
"os"
"os/user"
"path"
"path/filepath"
"sort"
"strings"
"sync"
@ -1509,3 +1510,19 @@ func (fs *FSObjects) TransitionObject(ctx context.Context, bucket, object string
func (fs *FSObjects) RestoreTransitionedObject(ctx context.Context, bucket, object string, opts ObjectOptions) error {
return NotImplemented{}
}
// GetRawData returns raw file data to the callback.
// Errors are ignored, only errors from the callback are returned.
// For now only direct file paths are supported.
func (fs *FSObjects) GetRawData(ctx context.Context, volume, file string, fn func(r io.Reader, host string, disk string, filename string, size int64, modtime time.Time) error) error {
f, err := os.Open(filepath.Join(fs.fsPath, volume, file))
if err != nil {
return nil
}
defer f.Close()
st, err := f.Stat()
if err != nil || st.IsDir() {
return nil
}
return fn(f, "fs", fs.fsUUID, file, st.Size(), st.ModTime())
}

View File

@ -291,3 +291,10 @@ func (d *naughtyDisk) VerifyFile(ctx context.Context, volume, path string, fi Fi
}
return d.disk.VerifyFile(ctx, volume, path, fi)
}
func (d *naughtyDisk) StatInfoFile(ctx context.Context, volume, path string) (stat StatInfo, err error) {
if err := d.calcError(); err != nil {
return stat, err
}
return d.disk.StatInfoFile(ctx, volume, path)
}

View File

@ -74,6 +74,7 @@ type StorageAPI interface {
CheckFile(ctx context.Context, volume string, path string) (err error)
Delete(ctx context.Context, volume string, path string, recursive bool) (err error)
VerifyFile(ctx context.Context, volume, path string, fi FileInfo) error
StatInfoFile(ctx context.Context, volume, path string) (stat StatInfo, err error)
// Write all data, syncs the data to disk.
// Should be used for smaller payloads.

View File

@ -684,6 +684,23 @@ func (client *storageRESTClient) VerifyFile(ctx context.Context, volume, path st
return toStorageErr(verifyResp.Err)
}
func (client *storageRESTClient) StatInfoFile(ctx context.Context, volume, path string) (stat StatInfo, err error) {
values := make(url.Values)
values.Set(storageRESTVolume, volume)
values.Set(storageRESTFilePath, path)
respBody, err := client.call(ctx, storageRESTMethodStatInfoFile, values, nil, -1)
if err != nil {
return stat, err
}
defer xhttp.DrainBody(respBody)
respReader, err := waitForHTTPResponse(respBody)
if err != nil {
return stat, err
}
err = stat.DecodeMsg(msgpNewReader(respReader))
return stat, err
}
// Close - marks the client as closed.
func (client *storageRESTClient) Close() error {
client.restClient.Close()

View File

@ -52,6 +52,7 @@ const (
storageRESTMethodRenameFile = "/renamefile"
storageRESTMethodVerifyFile = "/verifyfile"
storageRESTMethodWalkDir = "/walkdir"
storageRESTMethodStatInfoFile = "/statfile"
)
const (

View File

@ -1038,6 +1038,23 @@ func logFatalErrs(err error, endpoint Endpoint, exit bool) {
}
}
// StatInfoFile returns file stat info.
func (s *storageRESTServer) StatInfoFile(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
vars := mux.Vars(r)
volume := vars[storageRESTVolume]
filePath := vars[storageRESTFilePath]
done := keepHTTPResponseAlive(w)
si, err := s.storage.StatInfoFile(r.Context(), volume, filePath)
done(err)
if err != nil {
return
}
msgp.Encode(w, &si)
}
// registerStorageRPCRouter - register storage rpc router.
func registerStorageRESTHandlers(router *mux.Router, endpointServerPools EndpointServerPools) {
storageDisks := make([][]*xlStorage, len(endpointServerPools))
@ -1129,6 +1146,8 @@ func registerStorageRESTHandlers(router *mux.Router, endpointServerPools Endpoin
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodWalkDir).HandlerFunc(httpTraceHdrs(server.WalkDirHandler)).
Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTRecursive)...)
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodStatInfoFile).HandlerFunc(httpTraceHdrs(server.StatInfoFile)).
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
}
}
}

View File

@ -32,12 +32,13 @@ func _() {
_ = x[storageMetricUpdateMetadata-21]
_ = x[storageMetricReadVersion-22]
_ = x[storageMetricReadAll-23]
_ = x[storageMetricLast-24]
_ = x[storageStatInfoFile-24]
_ = x[storageMetricLast-25]
}
const _storageMetric_name = "MakeVolBulkMakeVolListVolsStatVolDeleteVolWalkDirListDirReadFileAppendFileCreateFileReadFileStreamRenameFileRenameDataCheckPartsCheckFileDeleteDeleteVersionsVerifyFileWriteAllDeleteVersionWriteMetadataUpdateMetadataReadVersionReadAllLast"
const _storageMetric_name = "MakeVolBulkMakeVolListVolsStatVolDeleteVolWalkDirListDirReadFileAppendFileCreateFileReadFileStreamRenameFileRenameDataCheckPartsCheckFileDeleteDeleteVersionsVerifyFileWriteAllDeleteVersionWriteMetadataUpdateMetadataReadVersionReadAllstorageStatInfoFileLast"
var _storageMetric_index = [...]uint8{0, 11, 18, 26, 33, 42, 49, 56, 64, 74, 84, 98, 108, 118, 128, 137, 143, 157, 167, 175, 188, 201, 215, 226, 233, 237}
var _storageMetric_index = [...]uint16{0, 11, 18, 26, 33, 42, 49, 56, 64, 74, 84, 98, 108, 118, 128, 137, 143, 157, 167, 175, 188, 201, 215, 226, 233, 252, 256}
func (i storageMetric) String() string {
if i >= storageMetric(len(_storageMetric_index)-1) {

View File

@ -58,6 +58,7 @@ const (
storageMetricUpdateMetadata
storageMetricReadVersion
storageMetricReadAll
storageStatInfoFile
// .... add more
@ -611,6 +612,22 @@ func (p *xlStorageDiskIDCheck) ReadAll(ctx context.Context, volume string, path
return p.storage.ReadAll(ctx, volume, path)
}
func (p *xlStorageDiskIDCheck) StatInfoFile(ctx context.Context, volume, path string) (stat StatInfo, err error) {
defer p.updateStorageMetrics(storageStatInfoFile, volume, path)()
select {
case <-ctx.Done():
return StatInfo{}, ctx.Err()
default:
}
if err = p.checkDiskStale(); err != nil {
return StatInfo{}, err
}
return p.storage.StatInfoFile(ctx, volume, path)
}
func storageTrace(s storageMetric, startTime time.Time, duration time.Duration, path string) madmin.TraceInfo {
return madmin.TraceInfo{
TraceType: madmin.TraceStorage,

View File

@ -2284,3 +2284,32 @@ func (s *xlStorage) VerifyFile(ctx context.Context, volume, path string, fi File
return nil
}
func (s *xlStorage) StatInfoFile(ctx context.Context, volume, path string) (stat StatInfo, err error) {
volumeDir, err := s.getVolDir(volume)
if err != nil {
return stat, err
}
// Stat a volume entry.
if err = Access(volumeDir); err != nil {
if osIsNotExist(err) {
return stat, errVolumeNotFound
} else if isSysErrIO(err) {
return stat, errFaultyDisk
} else if osIsPermission(err) {
return stat, errVolumeAccessDenied
}
return stat, err
}
filePath := pathJoin(volumeDir, path)
if err := checkPathLength(filePath); err != nil {
return stat, err
}
st, _ := Lstat(filePath)
if st == nil {
return stat, errPathNotFound
}
return StatInfo{ModTime: st.ModTime(), Size: st.Size()}, nil
}

View File

@ -0,0 +1,108 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bufio"
"encoding/binary"
"encoding/hex"
"flag"
"fmt"
"hash/crc32"
"io"
"log"
"os"
"strings"
"github.com/secure-io/sio-go"
)
var (
key = flag.String("key", "", "decryption string")
//js = flag.Bool("json", false, "expect json input")
)
func main() {
flag.Parse()
args := flag.Args()
switch len(flag.Args()) {
case 0:
// Read from stdin, write to stdout.
decrypt(*key, os.Stdin, os.Stdout)
return
case 1:
r, err := os.Open(args[0])
fatalErr(err)
defer r.Close()
dstName := strings.TrimSuffix(args[0], ".enc") + ".zip"
w, err := os.Create(dstName)
fatalErr(err)
defer w.Close()
if len(*key) == 0 {
reader := bufio.NewReader(os.Stdin)
fmt.Print("Enter Decryption Key: ")
text, _ := reader.ReadString('\n')
// convert CRLF to LF
*key = strings.Replace(text, "\n", "", -1)
}
decrypt(*key, r, w)
fmt.Println("Output decrypted to", dstName)
return
default:
fatalIf(true, "Only 1 file can be decrypted")
os.Exit(1)
}
}
func decrypt(keyHex string, r io.Reader, w io.Writer) {
keyHex = strings.TrimSpace(keyHex)
fatalIf(len(keyHex) != 72, "Unexpected key length: %d, want 72", len(keyHex))
id, err := hex.DecodeString(keyHex[:8])
fatalErr(err)
key, err := hex.DecodeString(keyHex[8:])
fatalErr(err)
// Verify that CRC is ok.
want := binary.LittleEndian.Uint32(id)
got := crc32.ChecksumIEEE(key)
fatalIf(want != got, "Invalid key checksum, want %x, got %x", want, got)
stream, err := sio.AES_256_GCM.Stream(key)
fatalErr(err)
// Zero nonce, we only use each key once, and 32 bytes is plenty.
nonce := make([]byte, stream.NonceSize())
encr := stream.DecryptReader(r, nonce, nil)
_, err = io.Copy(w, encr)
fatalErr(err)
}
func fatalErr(err error) {
if err == nil {
return
}
log.Fatalln(err)
}
func fatalIf(b bool, msg string, v ...interface{}) {
if !b {
return
}
log.Fatalf(msg, v...)
}

2
go.mod
View File

@ -48,7 +48,7 @@ require (
github.com/minio/madmin-go v1.0.13
github.com/minio/minio-go/v7 v7.0.13-0.20210706013812-337aa536abe2
github.com/minio/parquet-go v1.0.0
github.com/minio/pkg v1.0.8
github.com/minio/pkg v1.0.10
github.com/minio/rpc v1.0.0
github.com/minio/selfupdate v0.3.1
github.com/minio/sha256-simd v1.0.0

3
go.sum
View File

@ -1034,8 +1034,9 @@ github.com/minio/parquet-go v1.0.0 h1:fcWsEvub04Nsl/4hiRBDWlbqd6jhacQieV07a+nhiI
github.com/minio/parquet-go v1.0.0/go.mod h1:aQlkSOfOq2AtQKkuou3mosNVMwNokd+faTacxxk/oHA=
github.com/minio/pkg v1.0.3/go.mod h1:obU54TZ9QlMv0TRaDgQ/JTzf11ZSXxnSfLrm4tMtBP8=
github.com/minio/pkg v1.0.4/go.mod h1:obU54TZ9QlMv0TRaDgQ/JTzf11ZSXxnSfLrm4tMtBP8=
github.com/minio/pkg v1.0.8 h1:lWQwHSeYlvnRoPpO+wS0I4mL6c00ABxBgbGjSmjwOi4=
github.com/minio/pkg v1.0.8/go.mod h1:32x/3OmGB0EOi1N+3ggnp+B5VFkSBBB9svPMVfpnf14=
github.com/minio/pkg v1.0.10 h1:fohpAm/0ttQFf4BzmzH5r6A9JUIfg63AyGCPM0f9/9U=
github.com/minio/pkg v1.0.10/go.mod h1:32x/3OmGB0EOi1N+3ggnp+B5VFkSBBB9svPMVfpnf14=
github.com/minio/rpc v1.0.0 h1:tJCHyLfQF6k6HlMQFpKy2FO/7lc2WP8gLDGMZp18E70=
github.com/minio/rpc v1.0.0/go.mod h1:b9xqF7J0xeMXr0cM4pnBlP7Te7PDsG5JrRxl5dG6Ldk=
github.com/minio/selfupdate v0.3.1 h1:BWEFSNnrZVMUWXbXIgLDNDjbejkmpAmZvy/nCz1HlEs=