github.com/nspcc-dev/neo-go@v0.105.2-0.20240517133400-6be757af3eba/pkg/services/rpcsrv/subscription.go (about)

     1  package rpcsrv
     2  
     3  import (
     4  	"sync/atomic"
     5  
     6  	"github.com/gorilla/websocket"
     7  	"github.com/nspcc-dev/neo-go/pkg/neorpc"
     8  )
     9  
    10  type (
    11  	// intEvent is an internal event that has both a proper structure and
    12  	// a websocket-ready message. It's used to serve websocket-based clients
    13  	// as well as internal ones.
    14  	intEvent struct {
    15  		msg *websocket.PreparedMessage
    16  		ntf *neorpc.Notification
    17  	}
    18  	// subscriber is an event subscriber.
    19  	subscriber struct {
    20  		writer    chan<- intEvent
    21  		overflown atomic.Bool
    22  		// These work like slots as there is not a lot of them (it's
    23  		// cheaper doing it this way rather than creating a map),
    24  		// pointing to an EventID is an obvious overkill at the moment, but
    25  		// that's not for long.
    26  		feeds [maxFeeds]feed
    27  	}
    28  	// feed stores subscriber's desired event ID with filter.
    29  	feed struct {
    30  		event  neorpc.EventID
    31  		filter neorpc.SubscriptionFilter
    32  	}
    33  )
    34  
    35  // EventID implements neorpc.EventComparator interface and returns notification ID.
    36  func (f feed) EventID() neorpc.EventID {
    37  	return f.event
    38  }
    39  
    40  // Filter implements neorpc.EventComparator interface and returns notification filter.
    41  func (f feed) Filter() neorpc.SubscriptionFilter {
    42  	return f.filter
    43  }
    44  
    45  const (
    46  	// Maximum number of subscriptions per one client.
    47  	maxFeeds = 16
    48  
    49  	// This sets notification messages buffer depth. It may seem to be quite
    50  	// big, but there is a big gap in speed between internal event processing
    51  	// and networking communication that is combined with spiky nature of our
    52  	// event generation process, which leads to lots of events generated in
    53  	// a short time and they will put some pressure to this buffer (consider
    54  	// ~500 invocation txs in one block with some notifications). At the same
    55  	// time, this channel is about sending pointers, so it's doesn't cost
    56  	// a lot in terms of memory used.
    57  	notificationBufSize = 1024
    58  )