minio/internal/grid/grid.go
Klaus Post 51aa59a737
perf: websocket grid connectivity for all internode communication (#18461)
This PR adds a WebSocket grid feature that allows servers to communicate via 
a single two-way connection.

There are two request types:

* Single requests, which are `[]byte => ([]byte, error)`. This is for efficient small
  roundtrips with small payloads.

* Streaming requests which are `[]byte, chan []byte => chan []byte (and error)`,
  which allows for different combinations of full two-way streams with an initial payload.

Only a single stream is created between two machines - and there is, as such, no
server/client relation since both sides can initiate and handle requests. Which server
initiates the request is decided deterministically on the server names.

Requests are made through a mux client and server, which handles message
passing, congestion, cancelation, timeouts, etc.

If a connection is lost, all requests are canceled, and the calling server will try
to reconnect. Registered handlers can operate directly on byte 
slices or use a higher-level generics abstraction.

There is no versioning of handlers/clients, and incompatible changes should
be handled by adding new handlers.

The request path can be changed to a new one for any protocol changes.

First, all servers create a "Manager." The manager must know its address 
as well as all remote addresses. This will manage all connections.
To get a connection to any remote, ask the manager to provide it given
the remote address using.

```
func (m *Manager) Connection(host string) *Connection
```

All serverside handlers must also be registered on the manager. This will
make sure that all incoming requests are served. The number of in-flight 
requests and responses must also be given for streaming requests.

The "Connection" returned manages the mux-clients. Requests issued
to the connection will be sent to the remote.

* `func (c *Connection) Request(ctx context.Context, h HandlerID, req []byte) ([]byte, error)`
   performs a single request and returns the result. Any deadline provided on the request is
   forwarded to the server, and canceling the context will make the function return at once.

* `func (c *Connection) NewStream(ctx context.Context, h HandlerID, payload []byte) (st *Stream, err error)`
   will initiate a remote call and send the initial payload.

```Go
// A Stream is a two-way stream.
// All responses *must* be read by the caller.
// If the call is canceled through the context,
//The appropriate error will be returned.
type Stream struct {
	// Responses from the remote server.
	// Channel will be closed after an error or when the remote closes.
	// All responses *must* be read by the caller until either an error is returned or the channel is closed.
	// Canceling the context will cause the context cancellation error to be returned.
	Responses <-chan Response

	// Requests sent to the server.
	// If the handler is defined with 0 incoming capacity this will be nil.
	// Channel *must* be closed to signal the end of the stream.
	// If the request context is canceled, the stream will no longer process requests.
	Requests chan<- []byte
}

type Response struct {
	Msg []byte
	Err error
}
```

There are generic versions of the server/client handlers that allow the use of type
safe implementations for data types that support msgpack marshal/unmarshal.
2023-11-20 17:09:35 -08:00

288 lines
6.3 KiB
Go

// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// Package grid provides single-connection two-way grid communication.
package grid
import (
"context"
"errors"
"fmt"
"io"
"sync"
"time"
"github.com/gobwas/ws/wsutil"
)
// ErrDisconnected is returned when the connection to the remote has been lost during the call.
var ErrDisconnected = errors.New("remote disconnected")
const (
// minBufferSize is the minimum buffer size.
// Buffers below this is not reused.
minBufferSize = 1 << 10
// defaultBufferSize is the default buffer allocation size.
defaultBufferSize = 4 << 10
// maxBufferSize is the maximum buffer size.
// Buffers larger than this is not reused.
maxBufferSize = 64 << 10
// If there is a queue, merge up to this many messages.
maxMergeMessages = 20
// clientPingInterval will ping the remote handler every 15 seconds.
// Clients disconnect when we exceed 2 intervals.
clientPingInterval = 15 * time.Second
// Deadline for single (non-streaming) requests to complete.
// Used if no deadline is provided on context.
defaultSingleRequestTimeout = time.Minute
)
var internalByteBuffer = sync.Pool{
New: func() any {
m := make([]byte, 0, defaultBufferSize)
return &m
},
}
// GetByteBuffer can be replaced with a function that returns a small
// byte buffer.
// When replacing PutByteBuffer should also be replaced
// There is no minimum size.
var GetByteBuffer = func() []byte {
b := *internalByteBuffer.Get().(*[]byte)
return b[:0]
}
// PutByteBuffer is for returning byte buffers.
var PutByteBuffer = func(b []byte) {
if cap(b) >= minBufferSize && cap(b) < maxBufferSize {
internalByteBuffer.Put(&b)
}
}
// readAllInto reads from r and appends to b until an error or EOF and returns the data it read.
// A successful call returns err == nil, not err == EOF. Because readAllInto is
// defined to read from src until EOF, it does not treat an EOF from Read
// as an error to be reported.
func readAllInto(b []byte, r *wsutil.Reader) ([]byte, error) {
for {
if len(b) == cap(b) {
// Add more capacity (let append pick how much).
b = append(b, 0)[:len(b)]
}
n, err := r.Read(b[len(b):cap(b)])
b = b[:len(b)+n]
if err != nil {
if errors.Is(err, io.EOF) {
err = nil
}
return b, err
}
}
}
// getDeadline will truncate the deadline so it is at least 1ms and at most MaxDeadline.
func getDeadline(d time.Duration) time.Duration {
if d < time.Millisecond {
return 0
}
if d > MaxDeadline {
return MaxDeadline
}
return d
}
type writerWrapper struct {
ch chan<- []byte
ctx context.Context
}
func (w *writerWrapper) Write(p []byte) (n int, err error) {
buf := GetByteBuffer()
if cap(buf) < len(p) {
PutByteBuffer(buf)
buf = make([]byte, len(p))
}
buf = buf[:len(p)]
copy(buf, p)
select {
case w.ch <- buf:
return len(p), nil
case <-w.ctx.Done():
return 0, context.Cause(w.ctx)
}
}
// WriterToChannel will return an io.Writer that writes to the given channel.
// The context both allows returning errors on writes and to ensure that
// this isn't abandoned if the channel is no longer being read from.
func WriterToChannel(ctx context.Context, ch chan<- []byte) io.Writer {
return &writerWrapper{ch: ch, ctx: ctx}
}
// bytesOrLength returns small (<=100b) byte slices as string, otherwise length.
func bytesOrLength(b []byte) string {
if len(b) > 100 {
return fmt.Sprintf("%d bytes", len(b))
}
return fmt.Sprint(b)
}
type lockedClientMap struct {
m map[uint64]*muxClient
mu sync.Mutex
}
func (m *lockedClientMap) Load(id uint64) (*muxClient, bool) {
m.mu.Lock()
v, ok := m.m[id]
m.mu.Unlock()
return v, ok
}
func (m *lockedClientMap) LoadAndDelete(id uint64) (*muxClient, bool) {
m.mu.Lock()
v, ok := m.m[id]
if ok {
delete(m.m, id)
}
m.mu.Unlock()
return v, ok
}
func (m *lockedClientMap) Size() int {
m.mu.Lock()
v := len(m.m)
m.mu.Unlock()
return v
}
func (m *lockedClientMap) Delete(id uint64) {
m.mu.Lock()
delete(m.m, id)
m.mu.Unlock()
}
func (m *lockedClientMap) Range(fn func(key uint64, value *muxClient) bool) {
m.mu.Lock()
for k, v := range m.m {
if !fn(k, v) {
break
}
}
m.mu.Unlock()
}
func (m *lockedClientMap) Clear() {
m.mu.Lock()
m.m = map[uint64]*muxClient{}
m.mu.Unlock()
}
func (m *lockedClientMap) LoadOrStore(id uint64, v *muxClient) (*muxClient, bool) {
m.mu.Lock()
v2, ok := m.m[id]
if ok {
m.mu.Unlock()
return v2, true
}
m.m[id] = v
m.mu.Unlock()
return v, false
}
type lockedServerMap struct {
m map[uint64]*muxServer
mu sync.Mutex
}
func (m *lockedServerMap) Load(id uint64) (*muxServer, bool) {
m.mu.Lock()
v, ok := m.m[id]
m.mu.Unlock()
return v, ok
}
func (m *lockedServerMap) LoadAndDelete(id uint64) (*muxServer, bool) {
m.mu.Lock()
v, ok := m.m[id]
if ok {
delete(m.m, id)
}
m.mu.Unlock()
return v, ok
}
func (m *lockedServerMap) Size() int {
m.mu.Lock()
v := len(m.m)
m.mu.Unlock()
return v
}
func (m *lockedServerMap) Delete(id uint64) {
m.mu.Lock()
delete(m.m, id)
m.mu.Unlock()
}
func (m *lockedServerMap) Range(fn func(key uint64, value *muxServer) bool) {
m.mu.Lock()
for k, v := range m.m {
if !fn(k, v) {
break
}
}
m.mu.Unlock()
}
func (m *lockedServerMap) Clear() {
m.mu.Lock()
m.m = map[uint64]*muxServer{}
m.mu.Unlock()
}
func (m *lockedServerMap) LoadOrStore(id uint64, v *muxServer) (*muxServer, bool) {
m.mu.Lock()
v2, ok := m.m[id]
if ok {
m.mu.Unlock()
return v2, true
}
m.m[id] = v
m.mu.Unlock()
return v, false
}
func (m *lockedServerMap) LoadOrCompute(id uint64, fn func() *muxServer) (*muxServer, bool) {
m.mu.Lock()
v2, ok := m.m[id]
if ok {
m.mu.Unlock()
return v2, true
}
v := fn()
m.m[id] = v
m.mu.Unlock()
return v, false
}