Implement inspect data API v2 (#15474)

Co-authored-by: Klaus Post <klauspost@gmail.com>
This commit is contained in:
Anis Elleuch
2022-11-02 21:36:38 +01:00
committed by GitHub
parent d2c9a9e395
commit 7e73fc2870
14 changed files with 1648 additions and 162 deletions

View File

@@ -21,8 +21,12 @@ import (
"bytes"
"context"
crand "crypto/rand"
"crypto/rsa"
"crypto/subtle"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"hash/crc32"
@@ -44,6 +48,7 @@ import (
"github.com/gorilla/mux"
"github.com/klauspost/compress/zip"
"github.com/minio/madmin-go"
"github.com/minio/madmin-go/estream"
"github.com/minio/minio/internal/dsync"
"github.com/minio/minio/internal/handlers"
xhttp "github.com/minio/minio/internal/http"
@@ -2585,13 +2590,14 @@ func embedFileInZip(zipWriter *zip.Writer, name string, data []byte) error {
return err
}
// appendClusterMetaInfoToZip gets information of the current cluster and embedded
// it in the passed zipwriter, This is not a critical function and it is allowed
// to fail with a ten seconds timeout.
func appendClusterMetaInfoToZip(ctx context.Context, zipWriter *zip.Writer) {
// getClusterMetaInfo gets information of the current cluster and
// returns it.
// This is not a critical function, and it is allowed
// to fail with a ten seconds timeout, returning nil.
func getClusterMetaInfo(ctx context.Context) []byte {
objectAPI := newObjectLayerFn()
if objectAPI == nil {
return
return nil
}
// Add a ten seconds timeout because getting profiling data
@@ -2627,20 +2633,29 @@ func appendClusterMetaInfoToZip(ctx context.Context, zipWriter *zip.Writer) {
select {
case <-ctx.Done():
return
return nil
case ci := <-resultCh:
out, err := json.MarshalIndent(ci, "", " ")
if err != nil {
logger.LogIf(ctx, err)
return
}
err = embedFileInZip(zipWriter, "cluster.info", out)
out, err := json.MarshalIndent(ci, "", " ")
if err != nil {
logger.LogIf(ctx, err)
return nil
}
return out
}
}
func bytesToPublicKey(pub []byte) (*rsa.PublicKey, error) {
block, _ := pem.Decode(pub)
if block != nil {
pub = block.Bytes
}
key, err := x509.ParsePKCS1PublicKey(pub)
if err != nil {
return nil, err
}
return key, nil
}
// getRawDataer provides an interface for getting raw FS files.
type getRawDataer interface {
GetRawData(ctx context.Context, volume, file string, fn func(r io.Reader, host string, disk string, filename string, info StatInfo) error) error
@@ -2667,12 +2682,17 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
return
}
if err := parseForm(r); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
volume := r.Form.Get("volume")
file := r.Form.Get("file")
if len(volume) == 0 {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketName), r.URL)
return
}
file := r.Form.Get("file")
if len(file) == 0 {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
@@ -2685,41 +2705,102 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
return
}
var key [32]byte
// MUST use crypto/rand
n, err := crand.Read(key[:])
if err != nil || n != len(key) {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
return
}
stream, err := sio.AES_256_GCM.Stream(key[:])
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
return
}
// Zero nonce, we only use each key once, and 32 bytes is plenty.
nonce := make([]byte, stream.NonceSize())
encw := stream.EncryptWriter(w, nonce, nil)
var publicKey *rsa.PublicKey
defer encw.Close()
publicKeyB64 := r.Form.Get("public-key")
if publicKeyB64 != "" {
publicKeyBytes, err := base64.StdEncoding.DecodeString(publicKeyB64)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
publicKey, err = bytesToPublicKey(publicKeyBytes)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
// Write a version for making *incompatible* changes.
// The AdminClient will reject any version it does not know.
w.Write([]byte{1})
var inspectZipW *zip.Writer
if publicKey != nil {
w.WriteHeader(200)
stream := estream.NewWriter(w)
defer stream.Close()
// Write key first (without encryption)
_, err = w.Write(key[:])
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
return
clusterKey, err := bytesToPublicKey(subnetAdminPublicKey)
if err != nil {
logger.LogIf(ctx, stream.AddError(err.Error()))
return
}
err = stream.AddKeyEncrypted(clusterKey)
if err != nil {
logger.LogIf(ctx, stream.AddError(err.Error()))
return
}
if b := getClusterMetaInfo(ctx); len(b) > 0 {
w, err := stream.AddEncryptedStream("cluster.info", nil)
if err != nil {
logger.LogIf(ctx, err)
return
}
w.Write(b)
w.Close()
}
// Add new key for inspect data.
if err := stream.AddKeyEncrypted(publicKey); err != nil {
logger.LogIf(ctx, stream.AddError(err.Error()))
return
}
encStream, err := stream.AddEncryptedStream("inspect.zip", nil)
if err != nil {
logger.LogIf(ctx, stream.AddError(err.Error()))
return
}
defer encStream.Close()
inspectZipW = zip.NewWriter(encStream)
defer inspectZipW.Close()
} else {
// Legacy: Remove if we stop supporting inspection without public key.
var key [32]byte
// MUST use crypto/rand
n, err := crand.Read(key[:])
if err != nil || n != len(key) {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write a version for making *incompatible* changes.
// The AdminClient will reject any version it does not know.
if publicKey == nil {
w.Write([]byte{1})
w.Write(key[:])
}
stream, err := sio.AES_256_GCM.Stream(key[:])
if err != nil {
logger.LogIf(ctx, err)
return
}
// Zero nonce, we only use each key once, and 32 bytes is plenty.
nonce := make([]byte, stream.NonceSize())
encw := stream.EncryptWriter(w, nonce, nil)
defer encw.Close()
// Initialize a zip writer which will provide a zipped content
// of profiling data of all nodes
inspectZipW = zip.NewWriter(encw)
defer inspectZipW.Close()
if b := getClusterMetaInfo(ctx); len(b) > 0 {
logger.LogIf(ctx, embedFileInZip(inspectZipW, "cluster.info", b))
}
}
// Initialize a zip writer which will provide a zipped content
// of profiling data of all nodes
zipWriter := zip.NewWriter(encw)
defer zipWriter.Close()
rawDataFn := func(r io.Reader, host, disk, filename string, si StatInfo) error {
// Prefix host+disk
filename = path.Join(host, disk, filename)
@@ -2748,17 +2829,17 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
return nil
}
header.Method = zip.Deflate
zwriter, zerr := zipWriter.CreateHeader(header)
zwriter, zerr := inspectZipW.CreateHeader(header)
if zerr != nil {
logger.LogIf(ctx, zerr)
return nil
}
if _, err = io.Copy(zwriter, r); err != nil {
if _, err := io.Copy(zwriter, r); err != nil {
logger.LogIf(ctx, err)
}
return nil
}
err = o.GetRawData(ctx, volume, file, rawDataFn)
err := o.GetRawData(ctx, volume, file, rawDataFn)
if !errors.Is(err, errFileNotFound) {
logger.LogIf(ctx, err)
}
@@ -2770,22 +2851,17 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
if !errors.Is(err, errFileNotFound) {
logger.LogIf(ctx, err)
}
// save args passed to inspect command
inspectArgs := []string{fmt.Sprintf(" Inspect path: %s%s%s\n", volume, slashSeparator, file)}
cmdLine := []string{"Server command line args: "}
for _, pool := range globalEndpoints {
cmdLine = append(cmdLine, pool.CmdLine)
}
cmdLine = append(cmdLine, "\n")
inspectArgs = append(inspectArgs, cmdLine...)
inspectArgsBytes := []byte(strings.Join(inspectArgs, " "))
if err = rawDataFn(bytes.NewReader(inspectArgsBytes), "", "", "inspect-input.txt", StatInfo{
Size: int64(len(inspectArgsBytes)),
}); err != nil {
logger.LogIf(ctx, err)
}
appendClusterMetaInfoToZip(ctx, zipWriter)
// save args passed to inspect command
var sb bytes.Buffer
fmt.Fprintf(&sb, "Inspect path: %s%s%s\n", volume, slashSeparator, file)
sb.WriteString("Server command line args:")
for _, pool := range globalEndpoints {
sb.WriteString(" ")
sb.WriteString(pool.CmdLine)
}
sb.WriteString("\n")
logger.LogIf(ctx, embedFileInZip(inspectZipW, "inspect-input.txt", sb.Bytes()))
}
func createHostAnonymizerForFSMode() map[string]string {

View File

@@ -60,7 +60,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
// Info operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/info").HandlerFunc(gz(httpTraceAll(adminAPI.ServerInfoHandler)))
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/inspect-data").HandlerFunc(httpTraceHdrs(adminAPI.InspectDataHandler)).Queries("volume", "{volume:.*}", "file", "{file:.*}")
adminRouter.Methods(http.MethodGet, http.MethodPost).Path(adminVersion + "/inspect-data").HandlerFunc(httpTraceAll(adminAPI.InspectDataHandler))
// StorageInfo operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/storageinfo").HandlerFunc(gz(httpTraceAll(adminAPI.StorageInfoHandler)))

View File

@@ -370,6 +370,9 @@ var (
// MinIO client
globalMinioClient *minio.Client
// Public key for subnet confidential information
subnetAdminPublicKey = []byte("-----BEGIN PUBLIC KEY-----\nMIIBCgKCAQEAyC+ol5v0FP+QcsR6d1KypR/063FInmNEFsFzbEwlHQyEQN3O7kNI\nwVDN1vqp1wDmJYmv4VZGRGzfFw1q+QV7K1TnysrEjrqpVxfxzDQCoUadAp8IxLLc\ns2fjyDNxnZjoC6fTID9C0khKnEa5fPZZc3Ihci9SiCGkPmyUyCGVSxWXIKqL2Lrj\nyDc0pGeEhWeEPqw6q8X2jvTC246tlzqpDeNsPbcv2KblXRcKniQNbBrizT37CKHQ\nM6hc9kugrZbFuo8U5/4RQvZPJnx/DVjLDyoKo2uzuVQs4s+iBrA5sSSLp8rPED/3\n6DgWw3e244Dxtrg972dIT1IOqgn7KUJzVQIDAQAB\n-----END PUBLIC KEY-----")
// Add new variable global values here.
)

View File

@@ -315,12 +315,12 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io
// Send profiling data to zip as file
for typ, data := range data {
err := embedFileInZip(zipWriter, fmt.Sprintf("profile-%s-%s", thisAddr, typ), data)
if err != nil {
logger.LogIf(ctx, err)
}
logger.LogIf(ctx, err)
}
if b := getClusterMetaInfo(ctx); len(b) > 0 {
logger.LogIf(ctx, embedFileInZip(zipWriter, "cluster.info", b))
}
appendClusterMetaInfoToZip(ctx, zipWriter)
return
}