Improve tracing & notification scalability (#18903)

* Perform JSON encoding on remote machines and only forward byte slices.
* Migrate tracing & notification to WebSockets.
This commit is contained in:
Klaus Post
2024-01-30 12:49:02 -08:00
committed by GitHub
parent 80ca120088
commit 6da4a9c7bb
11 changed files with 451 additions and 213 deletions

View File

@@ -59,8 +59,10 @@ const (
HandlerRenameData
HandlerRenameFile
HandlerReadAll
HandlerServerVerify
HandlerTrace
HandlerListen
// Add more above here ^^^
// If all handlers are used, the type of Handler can be changed.
// Handlers have no versioning, so non-compatible handler changes must result in new IDs.
@@ -542,6 +544,20 @@ func (h *StreamTypeHandler[Payload, Req, Resp]) Register(m *Manager, handle func
return h.register(m, handle, subroute...)
}
// WithOutCapacity adjusts the output capacity from the handler perspective.
// This must be done prior to registering the handler.
func (h *StreamTypeHandler[Payload, Req, Resp]) WithOutCapacity(out int) *StreamTypeHandler[Payload, Req, Resp] {
h.OutCapacity = out
return h
}
// WithInCapacity adjusts the input capacity from the handler perspective.
// This must be done prior to registering the handler.
func (h *StreamTypeHandler[Payload, Req, Resp]) WithInCapacity(in int) *StreamTypeHandler[Payload, Req, Resp] {
h.InCapacity = in
return h
}
// RegisterNoInput a handler for one-way streaming with payload and output stream.
// An optional subroute can be given. Multiple entries are joined with '/'.
func (h *StreamTypeHandler[Payload, Req, Resp]) RegisterNoInput(m *Manager, handle func(ctx context.Context, p Payload, out chan<- Resp) *RemoteErr, subroute ...string) error {

View File

@@ -30,14 +30,16 @@ func _() {
_ = x[HandlerRenameFile-19]
_ = x[HandlerReadAll-20]
_ = x[HandlerServerVerify-21]
_ = x[handlerTest-22]
_ = x[handlerTest2-23]
_ = x[handlerLast-24]
_ = x[HandlerTrace-22]
_ = x[HandlerListen-23]
_ = x[handlerTest-24]
_ = x[handlerTest2-25]
_ = x[handlerLast-26]
}
const _HandlerID_name = "handlerInvalidLockLockLockRLockLockUnlockLockRUnlockLockRefreshLockForceUnlockWalkDirStatVolDiskInfoNSScannerReadXLReadVersionDeleteFileDeleteVersionUpdateMetadataWriteMetadataCheckPartsRenameDataRenameFileReadAllServerVerifyhandlerTesthandlerTest2handlerLast"
const _HandlerID_name = "handlerInvalidLockLockLockRLockLockUnlockLockRUnlockLockRefreshLockForceUnlockWalkDirStatVolDiskInfoNSScannerReadXLReadVersionDeleteFileDeleteVersionUpdateMetadataWriteMetadataCheckPartsRenameDataRenameFileReadAllServerVerifyTraceListenhandlerTesthandlerTest2handlerLast"
var _HandlerID_index = [...]uint16{0, 14, 22, 31, 41, 52, 63, 78, 85, 92, 100, 109, 115, 126, 136, 149, 163, 176, 186, 196, 206, 213, 225, 236, 248, 259}
var _HandlerID_index = [...]uint16{0, 14, 22, 31, 41, 52, 63, 78, 85, 92, 100, 109, 115, 126, 136, 149, 163, 176, 186, 196, 206, 213, 225, 230, 236, 247, 259, 270}
func (i HandlerID) String() string {
if i >= HandlerID(len(_HandlerID_index)-1) {

View File

@@ -22,6 +22,7 @@ import (
"net/url"
"sort"
"strings"
"sync"
"github.com/tinylib/msgp/msgp"
)
@@ -198,3 +199,133 @@ func (b *Bytes) Msgsize() int {
}
return msgp.ArrayHeaderSize + len(*b)
}
// Recycle puts the Bytes back into the pool.
func (b *Bytes) Recycle() {
if *b != nil {
PutByteBuffer(*b)
*b = nil
}
}
// URLValues can be used for url.Values.
type URLValues map[string][]string
var urlValuesPool = sync.Pool{
New: func() interface{} {
return make(map[string][]string, 10)
},
}
// NewURLValues returns a new URLValues.
func NewURLValues() *URLValues {
u := URLValues(urlValuesPool.Get().(map[string][]string))
return &u
}
// NewURLValuesWith returns a new URLValues with the provided content.
func NewURLValuesWith(values map[string][]string) *URLValues {
u := URLValues(values)
return &u
}
// Values returns the url.Values.
// If u is nil, an empty url.Values is returned.
// The values are a shallow copy of the underlying map.
func (u *URLValues) Values() url.Values {
if u == nil {
return url.Values{}
}
return url.Values(*u)
}
// Recycle the underlying map.
func (u *URLValues) Recycle() {
if *u != nil {
for key := range *u {
delete(*u, key)
}
val := map[string][]string(*u)
urlValuesPool.Put(val)
*u = nil
}
}
// MarshalMsg implements msgp.Marshaler
func (u URLValues) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, u.Msgsize())
o = msgp.AppendMapHeader(o, uint32(len(u)))
for zb0006, zb0007 := range u {
o = msgp.AppendString(o, zb0006)
o = msgp.AppendArrayHeader(o, uint32(len(zb0007)))
for zb0008 := range zb0007 {
o = msgp.AppendString(o, zb0007[zb0008])
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (u *URLValues) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
if *u == nil {
*u = urlValuesPool.Get().(map[string][]string)
}
if len(*u) > 0 {
for key := range *u {
delete(*u, key)
}
}
for zb0004 > 0 {
var zb0001 string
var zb0002 []string
zb0004--
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0005 uint32
zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
if cap(zb0002) >= int(zb0005) {
zb0002 = zb0002[:zb0005]
} else {
zb0002 = make([]string, zb0005)
}
for zb0003 := range zb0002 {
zb0002[zb0003], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, zb0001, zb0003)
return
}
}
(*u)[zb0001] = zb0002
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (u URLValues) Msgsize() (s int) {
s = msgp.MapHeaderSize
if u != nil {
for zb0006, zb0007 := range u {
_ = zb0007
s += msgp.StringPrefixSize + len(zb0006) + msgp.ArrayHeaderSize
for zb0008 := range zb0007 {
s += msgp.StringPrefixSize + len(zb0007[zb0008])
}
}
}
return
}

View File

@@ -18,11 +18,18 @@
package pubsub
import (
"bytes"
"encoding/json"
"fmt"
"sync"
"sync/atomic"
)
// GetByteBuffer returns a byte buffer from the pool.
var GetByteBuffer = func() []byte {
return make([]byte, 0, 4096)
}
// Sub - subscriber entity.
type Sub[T Maskable] struct {
ch chan T
@@ -96,6 +103,62 @@ func (ps *PubSub[T, M]) Subscribe(mask M, subCh chan T, doneCh <-chan struct{},
return nil
}
// SubscribeJSON - Adds a subscriber to pubsub system and returns results with JSON encoding.
func (ps *PubSub[T, M]) SubscribeJSON(mask M, subCh chan<- []byte, doneCh <-chan struct{}, filter func(entry T) bool) error {
totalSubs := atomic.AddInt32(&ps.numSubscribers, 1)
if ps.maxSubscribers > 0 && totalSubs > ps.maxSubscribers {
atomic.AddInt32(&ps.numSubscribers, -1)
return fmt.Errorf("the limit of `%d` subscribers is reached", ps.maxSubscribers)
}
ps.Lock()
defer ps.Unlock()
subChT := make(chan T, 10000)
sub := &Sub[T]{ch: subChT, types: Mask(mask.Mask()), filter: filter}
ps.subs = append(ps.subs, sub)
// We hold a lock, so we are safe to update
combined := Mask(atomic.LoadUint64(&ps.types))
combined.Merge(Mask(mask.Mask()))
atomic.StoreUint64(&ps.types, uint64(combined))
go func() {
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
for {
select {
case <-doneCh:
case v, ok := <-subChT:
if !ok {
break
}
buf.Reset()
err := enc.Encode(v)
if err != nil {
break
}
subCh <- append(GetByteBuffer()[:0], buf.Bytes()...)
continue
}
break
}
ps.Lock()
defer ps.Unlock()
var remainTypes Mask
for i, s := range ps.subs {
if s == sub {
ps.subs = append(ps.subs[:i], ps.subs[i+1:]...)
} else {
remainTypes.Merge(s.types)
}
}
atomic.StoreUint64(&ps.types, uint64(remainTypes))
atomic.AddInt32(&ps.numSubscribers, -1)
}()
return nil
}
// NumSubscribers returns the number of current subscribers,
// The mask is checked against the active subscribed types,
// and 0 will be returned if nobody is subscribed for the type(s).