github.com/nats-io/nats-server/v2@v2.11.0-preview.2/server/client.go (about)

     1  // Copyright 2012-2024 The NATS Authors
     2  // Licensed under the Apache License, Version 2.0 (the "License");
     3  // you may not use this file except in compliance with the License.
     4  // You may obtain a copy of the License at
     5  //
     6  // http://www.apache.org/licenses/LICENSE-2.0
     7  //
     8  // Unless required by applicable law or agreed to in writing, software
     9  // distributed under the License is distributed on an "AS IS" BASIS,
    10  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package server
    15  
    16  import (
    17  	"bytes"
    18  	"crypto/tls"
    19  	"crypto/x509"
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"io"
    24  	"math/rand"
    25  	"net"
    26  	"net/http"
    27  	"net/url"
    28  	"regexp"
    29  	"runtime"
    30  	"strconv"
    31  	"strings"
    32  	"sync"
    33  	"sync/atomic"
    34  	"time"
    35  
    36  	"github.com/klauspost/compress/s2"
    37  	"github.com/nats-io/jwt/v2"
    38  	"github.com/nats-io/nats-server/v2/internal/fastrand"
    39  )
    40  
    41  // Type of client connection.
    42  const (
    43  	// CLIENT is an end user.
    44  	CLIENT = iota
    45  	// ROUTER represents another server in the cluster.
    46  	ROUTER
    47  	// GATEWAY is a link between 2 clusters.
    48  	GATEWAY
    49  	// SYSTEM is an internal system client.
    50  	SYSTEM
    51  	// LEAF is for leaf node connections.
    52  	LEAF
    53  	// JETSTREAM is an internal jetstream client.
    54  	JETSTREAM
    55  	// ACCOUNT is for the internal client for accounts.
    56  	ACCOUNT
    57  )
    58  
    59  // Extended type of a CLIENT connection. This is returned by c.clientType()
    60  // and indicate what type of client connection we are dealing with.
    61  // If invoked on a non CLIENT connection, NON_CLIENT type is returned.
    62  const (
    63  	// If the connection is not a CLIENT connection.
    64  	NON_CLIENT = iota
    65  	// Regular NATS client.
    66  	NATS
    67  	// MQTT client.
    68  	MQTT
    69  	// Websocket client.
    70  	WS
    71  )
    72  
    73  const (
    74  	// ClientProtoZero is the original Client protocol from 2009.
    75  	// http://nats.io/documentation/internals/nats-protocol/
    76  	ClientProtoZero = iota
    77  	// ClientProtoInfo signals a client can receive more then the original INFO block.
    78  	// This can be used to update clients on other cluster members, etc.
    79  	ClientProtoInfo
    80  )
    81  
    82  const (
    83  	pingProto = "PING" + _CRLF_
    84  	pongProto = "PONG" + _CRLF_
    85  	errProto  = "-ERR '%s'" + _CRLF_
    86  	okProto   = "+OK" + _CRLF_
    87  )
    88  
    89  // TLS Hanshake client types
    90  const (
    91  	tlsHandshakeLeaf = "leafnode"
    92  	tlsHandshakeMQTT = "mqtt"
    93  )
    94  
    95  const (
    96  	// Scratch buffer size for the processMsg() calls.
    97  	msgScratchSize  = 1024
    98  	msgHeadProto    = "RMSG "
    99  	msgHeadProtoLen = len(msgHeadProto)
   100  
   101  	// For controlling dynamic buffer sizes.
   102  	startBufSize    = 512   // For INFO/CONNECT block
   103  	minBufSize      = 64    // Smallest to shrink to for PING/PONG
   104  	maxBufSize      = 65536 // 64k
   105  	shortsToShrink  = 2     // Trigger to shrink dynamic buffers
   106  	maxFlushPending = 10    // Max fsps to have in order to wait for writeLoop
   107  	readLoopReport  = 2 * time.Second
   108  
   109  	// Server should not send a PING (for RTT) before the first PONG has
   110  	// been sent to the client. However, in case some client libs don't
   111  	// send CONNECT+PING, cap the maximum time before server can send
   112  	// the RTT PING.
   113  	maxNoRTTPingBeforeFirstPong = 2 * time.Second
   114  
   115  	// For stalling fast producers
   116  	stallClientMinDuration = 100 * time.Millisecond
   117  	stallClientMaxDuration = time.Second
   118  )
   119  
   120  var readLoopReportThreshold = readLoopReport
   121  
   122  // Represent client booleans with a bitmask
   123  type clientFlag uint16
   124  
   125  const (
   126  	hdrLine      = "NATS/1.0\r\n"
   127  	emptyHdrLine = "NATS/1.0\r\n\r\n"
   128  )
   129  
   130  // Some client state represented as flags
   131  const (
   132  	connectReceived        clientFlag = 1 << iota // The CONNECT proto has been received
   133  	infoReceived                                  // The INFO protocol has been received
   134  	firstPongSent                                 // The first PONG has been sent
   135  	handshakeComplete                             // For TLS clients, indicate that the handshake is complete
   136  	flushOutbound                                 // Marks client as having a flushOutbound call in progress.
   137  	noReconnect                                   // Indicate that on close, this connection should not attempt a reconnect
   138  	closeConnection                               // Marks that closeConnection has already been called.
   139  	connMarkedClosed                              // Marks that markConnAsClosed has already been called.
   140  	writeLoopStarted                              // Marks that the writeLoop has been started.
   141  	skipFlushOnClose                              // Marks that flushOutbound() should not be called on connection close.
   142  	expectConnect                                 // Marks if this connection is expected to send a CONNECT
   143  	connectProcessFinished                        // Marks if this connection has finished the connect process.
   144  	compressionNegotiated                         // Marks if this connection has negotiated compression level with remote.
   145  	didTLSFirst                                   // Marks if this connection requested and was accepted doing the TLS handshake first (prior to INFO).
   146  	isSlowConsumer                                // Marks connection as a slow consumer.
   147  )
   148  
   149  // set the flag (would be equivalent to set the boolean to true)
   150  func (cf *clientFlag) set(c clientFlag) {
   151  	*cf |= c
   152  }
   153  
   154  // clear the flag (would be equivalent to set the boolean to false)
   155  func (cf *clientFlag) clear(c clientFlag) {
   156  	*cf &= ^c
   157  }
   158  
   159  // isSet returns true if the flag is set, false otherwise
   160  func (cf clientFlag) isSet(c clientFlag) bool {
   161  	return cf&c != 0
   162  }
   163  
   164  // setIfNotSet will set the flag `c` only if that flag was not already
   165  // set and return true to indicate that the flag has been set. Returns
   166  // false otherwise.
   167  func (cf *clientFlag) setIfNotSet(c clientFlag) bool {
   168  	if *cf&c == 0 {
   169  		*cf |= c
   170  		return true
   171  	}
   172  	return false
   173  }
   174  
   175  // ClosedState is the reason client was closed. This will
   176  // be passed into calls to clearConnection, but will only
   177  // be stored in ConnInfo for monitoring.
   178  type ClosedState int
   179  
   180  const (
   181  	ClientClosed = ClosedState(iota + 1)
   182  	AuthenticationTimeout
   183  	AuthenticationViolation
   184  	TLSHandshakeError
   185  	SlowConsumerPendingBytes
   186  	SlowConsumerWriteDeadline
   187  	WriteError
   188  	ReadError
   189  	ParseError
   190  	StaleConnection
   191  	ProtocolViolation
   192  	BadClientProtocolVersion
   193  	WrongPort
   194  	MaxAccountConnectionsExceeded
   195  	MaxConnectionsExceeded
   196  	MaxPayloadExceeded
   197  	MaxControlLineExceeded
   198  	MaxSubscriptionsExceeded
   199  	DuplicateRoute
   200  	RouteRemoved
   201  	ServerShutdown
   202  	AuthenticationExpired
   203  	WrongGateway
   204  	MissingAccount
   205  	Revocation
   206  	InternalClient
   207  	MsgHeaderViolation
   208  	NoRespondersRequiresHeaders
   209  	ClusterNameConflict
   210  	DuplicateRemoteLeafnodeConnection
   211  	DuplicateClientID
   212  	DuplicateServerName
   213  	MinimumVersionRequired
   214  	ClusterNamesIdentical
   215  	Kicked
   216  )
   217  
   218  // Some flags passed to processMsgResults
   219  const pmrNoFlag int = 0
   220  const (
   221  	pmrCollectQueueNames int = 1 << iota
   222  	pmrIgnoreEmptyQueueFilter
   223  	pmrAllowSendFromRouteToRoute
   224  	pmrMsgImportedFromService
   225  )
   226  
   227  type client struct {
   228  	// Here first because of use of atomics, and memory alignment.
   229  	stats
   230  	gwReplyMapping
   231  	kind  int
   232  	srv   *Server
   233  	acc   *Account
   234  	perms *permissions
   235  	in    readCache
   236  	parseState
   237  	opts       ClientOpts
   238  	rrTracking *rrTracking
   239  	mpay       int32
   240  	msubs      int32
   241  	mcl        int32
   242  	mu         sync.Mutex
   243  	cid        uint64
   244  	start      time.Time
   245  	nonce      []byte
   246  	pubKey     string
   247  	nc         net.Conn
   248  	ncs        atomic.Value
   249  	out        outbound
   250  	user       *NkeyUser
   251  	host       string
   252  	port       uint16
   253  	subs       map[string]*subscription
   254  	replies    map[string]*resp
   255  	mperms     *msgDeny
   256  	darray     []string
   257  	pcd        map[*client]struct{}
   258  	atmr       *time.Timer
   259  	expires    time.Time
   260  	ping       pinfo
   261  	msgb       [msgScratchSize]byte
   262  	last       time.Time
   263  	lastIn     time.Time
   264  
   265  	headers bool
   266  
   267  	rtt      time.Duration
   268  	rttStart time.Time
   269  
   270  	route *route
   271  	gw    *gateway
   272  	leaf  *leaf
   273  	ws    *websocket
   274  	mqtt  *mqtt
   275  
   276  	flags clientFlag // Compact booleans into a single field. Size will be increased when needed.
   277  
   278  	rref byte
   279  
   280  	trace bool
   281  	echo  bool
   282  	noIcb bool
   283  
   284  	tags    jwt.TagList
   285  	nameTag string
   286  
   287  	tlsTo *time.Timer
   288  }
   289  
   290  type rrTracking struct {
   291  	rmap map[string]*remoteLatency
   292  	ptmr *time.Timer
   293  	lrt  time.Duration
   294  }
   295  
   296  // Struct for PING initiation from the server.
   297  type pinfo struct {
   298  	tmr *time.Timer
   299  	out int
   300  }
   301  
   302  // outbound holds pending data for a socket.
   303  type outbound struct {
   304  	nb  net.Buffers   // Pending buffers for send, each has fixed capacity as per nbPool below.
   305  	wnb net.Buffers   // Working copy of "nb", reused on each flushOutbound call, partial writes may leave entries here for next iteration.
   306  	pb  int64         // Total pending/queued bytes.
   307  	fsp int32         // Flush signals that are pending per producer from readLoop's pcd.
   308  	sg  *sync.Cond    // To signal writeLoop that there is data to flush.
   309  	wdl time.Duration // Snapshot of write deadline.
   310  	mp  int64         // Snapshot of max pending for client.
   311  	lft time.Duration // Last flush time for Write.
   312  	stc chan struct{} // Stall chan we create to slow down producers on overrun, e.g. fan-in.
   313  	cw  *s2.Writer
   314  }
   315  
   316  const nbPoolSizeSmall = 512   // Underlying array size of small buffer
   317  const nbPoolSizeMedium = 4096 // Underlying array size of medium buffer
   318  const nbPoolSizeLarge = 65536 // Underlying array size of large buffer
   319  
   320  var nbPoolSmall = &sync.Pool{
   321  	New: func() any {
   322  		b := [nbPoolSizeSmall]byte{}
   323  		return &b
   324  	},
   325  }
   326  
   327  var nbPoolMedium = &sync.Pool{
   328  	New: func() any {
   329  		b := [nbPoolSizeMedium]byte{}
   330  		return &b
   331  	},
   332  }
   333  
   334  var nbPoolLarge = &sync.Pool{
   335  	New: func() any {
   336  		b := [nbPoolSizeLarge]byte{}
   337  		return &b
   338  	},
   339  }
   340  
   341  // nbPoolGet returns a frame that is a best-effort match for the given size.
   342  // Once a pooled frame is no longer needed, it should be recycled by passing
   343  // it to nbPoolPut.
   344  func nbPoolGet(sz int) []byte {
   345  	switch {
   346  	case sz <= nbPoolSizeSmall:
   347  		return nbPoolSmall.Get().(*[nbPoolSizeSmall]byte)[:0]
   348  	case sz <= nbPoolSizeMedium:
   349  		return nbPoolMedium.Get().(*[nbPoolSizeMedium]byte)[:0]
   350  	default:
   351  		return nbPoolLarge.Get().(*[nbPoolSizeLarge]byte)[:0]
   352  	}
   353  }
   354  
   355  // nbPoolPut recycles a frame that was retrieved from nbPoolGet. It is not
   356  // safe to return multiple slices referring to chunks of the same underlying
   357  // array as this may create overlaps when the buffers are returned to their
   358  // original size, resulting in race conditions.
   359  func nbPoolPut(b []byte) {
   360  	switch cap(b) {
   361  	case nbPoolSizeSmall:
   362  		b := (*[nbPoolSizeSmall]byte)(b[0:nbPoolSizeSmall])
   363  		nbPoolSmall.Put(b)
   364  	case nbPoolSizeMedium:
   365  		b := (*[nbPoolSizeMedium]byte)(b[0:nbPoolSizeMedium])
   366  		nbPoolMedium.Put(b)
   367  	case nbPoolSizeLarge:
   368  		b := (*[nbPoolSizeLarge]byte)(b[0:nbPoolSizeLarge])
   369  		nbPoolLarge.Put(b)
   370  	default:
   371  		// Ignore frames that are the wrong size, this might happen
   372  		// with WebSocket/MQTT messages as they are framed
   373  	}
   374  }
   375  
   376  type perm struct {
   377  	allow *Sublist
   378  	deny  *Sublist
   379  }
   380  
   381  type permissions struct {
   382  	// Have these 2 first for memory alignment due to the use of atomic.
   383  	pcsz   int32
   384  	prun   int32
   385  	sub    perm
   386  	pub    perm
   387  	resp   *ResponsePermission
   388  	pcache sync.Map
   389  }
   390  
   391  // This is used to dynamically track responses and reply subjects
   392  // for dynamic permissioning.
   393  type resp struct {
   394  	t time.Time
   395  	n int
   396  }
   397  
   398  // msgDeny is used when a user permission for subscriptions has a deny
   399  // clause but a subscription could be made that is of broader scope.
   400  // e.g. deny = "foo", but user subscribes to "*". That subscription should
   401  // succeed but no message sent on foo should be delivered.
   402  type msgDeny struct {
   403  	deny   *Sublist
   404  	dcache map[string]bool
   405  }
   406  
   407  // routeTarget collects information regarding routes and queue groups for
   408  // sending information to a remote.
   409  type routeTarget struct {
   410  	sub *subscription
   411  	qs  []byte
   412  	_qs [32]byte
   413  }
   414  
   415  const (
   416  	maxResultCacheSize   = 512
   417  	maxDenyPermCacheSize = 256
   418  	maxPermCacheSize     = 128
   419  	pruneSize            = 32
   420  	routeTargetInit      = 8
   421  	replyPermLimit       = 4096
   422  )
   423  
   424  // Represent read cache booleans with a bitmask
   425  type readCacheFlag uint16
   426  
   427  const (
   428  	hasMappings         readCacheFlag = 1 << iota // For account subject mappings.
   429  	switchToCompression readCacheFlag = 1 << 1
   430  )
   431  
   432  const sysGroup = "_sys_"
   433  
   434  // Used in readloop to cache hot subject lookups and group statistics.
   435  type readCache struct {
   436  	// These are for clients who are bound to a single account.
   437  	genid   uint64
   438  	results map[string]*SublistResult
   439  
   440  	// This is for routes and gateways to have their own L1 as well that is account aware.
   441  	pacache map[string]*perAccountCache
   442  
   443  	// This is for when we deliver messages across a route. We use this structure
   444  	// to make sure to only send one message and properly scope to queues as needed.
   445  	rts []routeTarget
   446  
   447  	// These are all temporary totals for an invocation of a read in readloop.
   448  	msgs  int32
   449  	bytes int32
   450  	subs  int32
   451  
   452  	rsz int32 // Read buffer size
   453  	srs int32 // Short reads, used for dynamic buffer resizing.
   454  
   455  	// These are for readcache flags to avoid locks.
   456  	flags readCacheFlag
   457  
   458  	// Capture the time we started processing our readLoop.
   459  	start time.Time
   460  }
   461  
   462  // set the flag (would be equivalent to set the boolean to true)
   463  func (rcf *readCacheFlag) set(c readCacheFlag) {
   464  	*rcf |= c
   465  }
   466  
   467  // clear the flag (would be equivalent to set the boolean to false)
   468  func (rcf *readCacheFlag) clear(c readCacheFlag) {
   469  	*rcf &= ^c
   470  }
   471  
   472  // isSet returns true if the flag is set, false otherwise
   473  func (rcf readCacheFlag) isSet(c readCacheFlag) bool {
   474  	return rcf&c != 0
   475  }
   476  
   477  const (
   478  	defaultMaxPerAccountCacheSize   = 4096
   479  	defaultPrunePerAccountCacheSize = 256
   480  	defaultClosedSubsCheckInterval  = 5 * time.Minute
   481  )
   482  
   483  var (
   484  	maxPerAccountCacheSize   = defaultMaxPerAccountCacheSize
   485  	prunePerAccountCacheSize = defaultPrunePerAccountCacheSize
   486  	closedSubsCheckInterval  = defaultClosedSubsCheckInterval
   487  )
   488  
   489  // perAccountCache is for L1 semantics for inbound messages from a route or gateway to mimic the performance of clients.
   490  type perAccountCache struct {
   491  	acc     *Account
   492  	results *SublistResult
   493  	genid   uint64
   494  }
   495  
   496  func (c *client) String() (id string) {
   497  	loaded := c.ncs.Load()
   498  	if loaded != nil {
   499  		return loaded.(string)
   500  	}
   501  
   502  	return _EMPTY_
   503  }
   504  
   505  // GetNonce returns the nonce that was presented to the user on connection
   506  func (c *client) GetNonce() []byte {
   507  	c.mu.Lock()
   508  	defer c.mu.Unlock()
   509  
   510  	return c.nonce
   511  }
   512  
   513  // GetName returns the application supplied name for the connection.
   514  func (c *client) GetName() string {
   515  	c.mu.Lock()
   516  	name := c.opts.Name
   517  	c.mu.Unlock()
   518  	return name
   519  }
   520  
   521  // GetOpts returns the client options provided by the application.
   522  func (c *client) GetOpts() *ClientOpts {
   523  	return &c.opts
   524  }
   525  
   526  // GetTLSConnectionState returns the TLS ConnectionState if TLS is enabled, nil
   527  // otherwise. Implements the ClientAuth interface.
   528  func (c *client) GetTLSConnectionState() *tls.ConnectionState {
   529  	c.mu.Lock()
   530  	defer c.mu.Unlock()
   531  	if c.nc == nil {
   532  		return nil
   533  	}
   534  	tc, ok := c.nc.(*tls.Conn)
   535  	if !ok {
   536  		return nil
   537  	}
   538  	state := tc.ConnectionState()
   539  	return &state
   540  }
   541  
   542  // For CLIENT connections, this function returns the client type, that is,
   543  // NATS (for regular clients), MQTT or WS for websocket.
   544  // If this is invoked for a non CLIENT connection, NON_CLIENT is returned.
   545  //
   546  // This function does not lock the client and accesses fields that are supposed
   547  // to be immutable and therefore it can be invoked outside of the client's lock.
   548  func (c *client) clientType() int {
   549  	switch c.kind {
   550  	case CLIENT:
   551  		if c.isMqtt() {
   552  			return MQTT
   553  		} else if c.isWebsocket() {
   554  			return WS
   555  		}
   556  		return NATS
   557  	default:
   558  		return NON_CLIENT
   559  	}
   560  }
   561  
   562  var clientTypeStringMap = map[int]string{
   563  	NON_CLIENT: _EMPTY_,
   564  	NATS:       "nats",
   565  	WS:         "websocket",
   566  	MQTT:       "mqtt",
   567  }
   568  
   569  func (c *client) clientTypeString() string {
   570  	if typeStringVal, ok := clientTypeStringMap[c.clientType()]; ok {
   571  		return typeStringVal
   572  	}
   573  	return _EMPTY_
   574  }
   575  
   576  // This is the main subscription struct that indicates
   577  // interest in published messages.
   578  // FIXME(dlc) - This is getting bloated for normal subs, need
   579  // to optionally have an opts section for non-normal stuff.
   580  type subscription struct {
   581  	client  *client
   582  	im      *streamImport // This is for import stream support.
   583  	rsi     bool
   584  	si      bool
   585  	shadow  []*subscription // This is to track shadowed accounts.
   586  	icb     msgHandler
   587  	subject []byte
   588  	queue   []byte
   589  	sid     []byte
   590  	origin  []byte
   591  	nm      int64
   592  	max     int64
   593  	qw      int32
   594  	closed  int32
   595  	mqtt    *mqttSub
   596  }
   597  
   598  // Indicate that this subscription is closed.
   599  // This is used in pruning of route and gateway cache items.
   600  func (s *subscription) close() {
   601  	atomic.StoreInt32(&s.closed, 1)
   602  }
   603  
   604  // Return true if this subscription was unsubscribed
   605  // or its connection has been closed.
   606  func (s *subscription) isClosed() bool {
   607  	return atomic.LoadInt32(&s.closed) == 1
   608  }
   609  
   610  type ClientOpts struct {
   611  	Echo         bool   `json:"echo"`
   612  	Verbose      bool   `json:"verbose"`
   613  	Pedantic     bool   `json:"pedantic"`
   614  	TLSRequired  bool   `json:"tls_required"`
   615  	Nkey         string `json:"nkey,omitempty"`
   616  	JWT          string `json:"jwt,omitempty"`
   617  	Sig          string `json:"sig,omitempty"`
   618  	Token        string `json:"auth_token,omitempty"`
   619  	Username     string `json:"user,omitempty"`
   620  	Password     string `json:"pass,omitempty"`
   621  	Name         string `json:"name"`
   622  	Lang         string `json:"lang"`
   623  	Version      string `json:"version"`
   624  	Protocol     int    `json:"protocol"`
   625  	Account      string `json:"account,omitempty"`
   626  	AccountNew   bool   `json:"new_account,omitempty"`
   627  	Headers      bool   `json:"headers,omitempty"`
   628  	NoResponders bool   `json:"no_responders,omitempty"`
   629  
   630  	// Routes and Leafnodes only
   631  	Import *SubjectPermission `json:"import,omitempty"`
   632  	Export *SubjectPermission `json:"export,omitempty"`
   633  
   634  	// Leafnodes
   635  	RemoteAccount string `json:"remote_account,omitempty"`
   636  }
   637  
   638  var defaultOpts = ClientOpts{Verbose: true, Pedantic: true, Echo: true}
   639  var internalOpts = ClientOpts{Verbose: false, Pedantic: false, Echo: false}
   640  
   641  func (c *client) setTraceLevel() {
   642  	if c.kind == SYSTEM && !(atomic.LoadInt32(&c.srv.logging.traceSysAcc) != 0) {
   643  		c.trace = false
   644  	} else {
   645  		c.trace = (atomic.LoadInt32(&c.srv.logging.trace) != 0)
   646  	}
   647  }
   648  
   649  // Lock should be held
   650  func (c *client) initClient() {
   651  	s := c.srv
   652  	c.cid = atomic.AddUint64(&s.gcid, 1)
   653  
   654  	// Outbound data structure setup
   655  	c.out.sg = sync.NewCond(&(c.mu))
   656  	opts := s.getOpts()
   657  	// Snapshots to avoid mutex access in fast paths.
   658  	c.out.wdl = opts.WriteDeadline
   659  	c.out.mp = opts.MaxPending
   660  	// Snapshot max control line since currently can not be changed on reload and we
   661  	// were checking it on each call to parse. If this changes and we allow MaxControlLine
   662  	// to be reloaded without restart, this code will need to change.
   663  	c.mcl = int32(opts.MaxControlLine)
   664  	if c.mcl == 0 {
   665  		c.mcl = MAX_CONTROL_LINE_SIZE
   666  	}
   667  
   668  	c.subs = make(map[string]*subscription)
   669  	c.echo = true
   670  
   671  	c.setTraceLevel()
   672  
   673  	// This is a scratch buffer used for processMsg()
   674  	// The msg header starts with "RMSG ", which can be used
   675  	// for both local and routes.
   676  	// in bytes that is [82 77 83 71 32].
   677  	c.msgb = [msgScratchSize]byte{82, 77, 83, 71, 32}
   678  
   679  	// This is to track pending clients that have data to be flushed
   680  	// after we process inbound msgs from our own connection.
   681  	c.pcd = make(map[*client]struct{})
   682  
   683  	// snapshot the string version of the connection
   684  	var conn string
   685  	if c.nc != nil {
   686  		if addr := c.nc.RemoteAddr(); addr != nil {
   687  			if conn = addr.String(); conn != _EMPTY_ {
   688  				host, port, _ := net.SplitHostPort(conn)
   689  				iPort, _ := strconv.Atoi(port)
   690  				c.host, c.port = host, uint16(iPort)
   691  				if c.isWebsocket() && c.ws.clientIP != _EMPTY_ {
   692  					cip := c.ws.clientIP
   693  					// Surround IPv6 addresses with square brackets, as
   694  					// net.JoinHostPort would do...
   695  					if strings.Contains(cip, ":") {
   696  						cip = "[" + cip + "]"
   697  					}
   698  					conn = fmt.Sprintf("%s/%s", cip, conn)
   699  				}
   700  				// Now that we have extracted host and port, escape
   701  				// the string because it is going to be used in Sprintf
   702  				conn = strings.ReplaceAll(conn, "%", "%%")
   703  			}
   704  		}
   705  	}
   706  
   707  	switch c.kind {
   708  	case CLIENT:
   709  		switch c.clientType() {
   710  		case NATS:
   711  			c.ncs.Store(fmt.Sprintf("%s - cid:%d", conn, c.cid))
   712  		case WS:
   713  			c.ncs.Store(fmt.Sprintf("%s - wid:%d", conn, c.cid))
   714  		case MQTT:
   715  			var ws string
   716  			if c.isWebsocket() {
   717  				ws = "_ws"
   718  			}
   719  			c.ncs.Store(fmt.Sprintf("%s - mid%s:%d", conn, ws, c.cid))
   720  		}
   721  	case ROUTER:
   722  		c.ncs.Store(fmt.Sprintf("%s - rid:%d", conn, c.cid))
   723  	case GATEWAY:
   724  		c.ncs.Store(fmt.Sprintf("%s - gid:%d", conn, c.cid))
   725  	case LEAF:
   726  		var ws string
   727  		if c.isWebsocket() {
   728  			ws = "_ws"
   729  		}
   730  		c.ncs.Store(fmt.Sprintf("%s - lid%s:%d", conn, ws, c.cid))
   731  	case SYSTEM:
   732  		c.ncs.Store("SYSTEM")
   733  	case JETSTREAM:
   734  		c.ncs.Store("JETSTREAM")
   735  	case ACCOUNT:
   736  		c.ncs.Store("ACCOUNT")
   737  	}
   738  }
   739  
   740  // RemoteAddress expose the Address of the client connection,
   741  // nil when not connected or unknown
   742  func (c *client) RemoteAddress() net.Addr {
   743  	c.mu.Lock()
   744  	defer c.mu.Unlock()
   745  
   746  	if c.nc == nil {
   747  		return nil
   748  	}
   749  
   750  	return c.nc.RemoteAddr()
   751  }
   752  
   753  // Helper function to report errors.
   754  func (c *client) reportErrRegisterAccount(acc *Account, err error) {
   755  	if err == ErrTooManyAccountConnections {
   756  		c.maxAccountConnExceeded()
   757  		return
   758  	}
   759  	c.Errorf("Problem registering with account %q: %s", acc.Name, err)
   760  	c.sendErr("Failed Account Registration")
   761  }
   762  
   763  // Kind returns the client kind and will be one of the defined constants like CLIENT, ROUTER, GATEWAY, LEAF
   764  func (c *client) Kind() int {
   765  	c.mu.Lock()
   766  	kind := c.kind
   767  	c.mu.Unlock()
   768  
   769  	return kind
   770  }
   771  
   772  // registerWithAccount will register the given user with a specific
   773  // account. This will change the subject namespace.
   774  func (c *client) registerWithAccount(acc *Account) error {
   775  	if acc == nil {
   776  		return ErrBadAccount
   777  	}
   778  	acc.mu.RLock()
   779  	bad := acc.sl == nil
   780  	acc.mu.RUnlock()
   781  	if bad {
   782  		return ErrBadAccount
   783  	}
   784  	// If we were previously registered, usually to $G, do accounting here to remove.
   785  	if c.acc != nil {
   786  		if prev := c.acc.removeClient(c); prev == 1 && c.srv != nil {
   787  			c.srv.decActiveAccounts()
   788  		}
   789  	}
   790  
   791  	c.mu.Lock()
   792  	kind := c.kind
   793  	srv := c.srv
   794  	c.acc = acc
   795  	c.applyAccountLimits()
   796  	c.mu.Unlock()
   797  
   798  	// Check if we have a max connections violation
   799  	if kind == CLIENT && acc.MaxTotalConnectionsReached() {
   800  		return ErrTooManyAccountConnections
   801  	} else if kind == LEAF {
   802  		// Check if we are already connected to this cluster.
   803  		if rc := c.remoteCluster(); rc != _EMPTY_ && acc.hasLeafNodeCluster(rc) {
   804  			return ErrLeafNodeLoop
   805  		}
   806  		if acc.MaxTotalLeafNodesReached() {
   807  			return ErrTooManyAccountConnections
   808  		}
   809  	}
   810  
   811  	// Add in new one.
   812  	if prev := acc.addClient(c); prev == 0 && srv != nil {
   813  		srv.incActiveAccounts()
   814  	}
   815  
   816  	return nil
   817  }
   818  
   819  // Helper to determine if we have met or exceeded max subs.
   820  func (c *client) subsAtLimit() bool {
   821  	return c.msubs != jwt.NoLimit && len(c.subs) >= int(c.msubs)
   822  }
   823  
   824  func minLimit(value *int32, limit int32) bool {
   825  	v := atomic.LoadInt32(value)
   826  	if v != jwt.NoLimit {
   827  		if limit != jwt.NoLimit {
   828  			if limit < v {
   829  				atomic.StoreInt32(value, limit)
   830  				return true
   831  			}
   832  		}
   833  	} else if limit != jwt.NoLimit {
   834  		atomic.StoreInt32(value, limit)
   835  		return true
   836  	}
   837  	return false
   838  }
   839  
   840  // Apply account limits
   841  // Lock is held on entry.
   842  // FIXME(dlc) - Should server be able to override here?
   843  func (c *client) applyAccountLimits() {
   844  	if c.acc == nil || (c.kind != CLIENT && c.kind != LEAF) {
   845  		return
   846  	}
   847  	atomic.StoreInt32(&c.mpay, jwt.NoLimit)
   848  	c.msubs = jwt.NoLimit
   849  	if c.opts.JWT != _EMPTY_ { // user jwt implies account
   850  		if uc, _ := jwt.DecodeUserClaims(c.opts.JWT); uc != nil {
   851  			c.mpay = int32(uc.Limits.Payload)
   852  			c.msubs = int32(uc.Limits.Subs)
   853  			if uc.IssuerAccount != _EMPTY_ && uc.IssuerAccount != uc.Issuer {
   854  				if scope, ok := c.acc.signingKeys[uc.Issuer]; ok {
   855  					if userScope, ok := scope.(*jwt.UserScope); ok {
   856  						// if signing key disappeared or changed and we don't get here, the client will be disconnected
   857  						c.mpay = int32(userScope.Template.Limits.Payload)
   858  						c.msubs = int32(userScope.Template.Limits.Subs)
   859  					}
   860  				}
   861  			}
   862  		}
   863  	}
   864  
   865  	c.acc.mu.RLock()
   866  	minLimit(&c.mpay, c.acc.mpay)
   867  	minLimit(&c.msubs, c.acc.msubs)
   868  	c.acc.mu.RUnlock()
   869  
   870  	s := c.srv
   871  	opts := s.getOpts()
   872  	mPay := opts.MaxPayload
   873  	// options encode unlimited differently
   874  	if mPay == 0 {
   875  		mPay = jwt.NoLimit
   876  	}
   877  	mSubs := int32(opts.MaxSubs)
   878  	if mSubs == 0 {
   879  		mSubs = jwt.NoLimit
   880  	}
   881  	wasUnlimited := c.mpay == jwt.NoLimit
   882  	if minLimit(&c.mpay, mPay) && !wasUnlimited {
   883  		c.Errorf("Max Payload set to %d from server overrides account or user config", opts.MaxPayload)
   884  	}
   885  	wasUnlimited = c.msubs == jwt.NoLimit
   886  	if minLimit(&c.msubs, mSubs) && !wasUnlimited {
   887  		c.Errorf("Max Subscriptions set to %d from server overrides account or user config", opts.MaxSubs)
   888  	}
   889  	if c.subsAtLimit() {
   890  		go func() {
   891  			c.maxSubsExceeded()
   892  			time.Sleep(20 * time.Millisecond)
   893  			c.closeConnection(MaxSubscriptionsExceeded)
   894  		}()
   895  	}
   896  }
   897  
   898  // RegisterUser allows auth to call back into a new client
   899  // with the authenticated user. This is used to map
   900  // any permissions into the client and setup accounts.
   901  func (c *client) RegisterUser(user *User) {
   902  	// Register with proper account and sublist.
   903  	if user.Account != nil {
   904  		if err := c.registerWithAccount(user.Account); err != nil {
   905  			c.reportErrRegisterAccount(user.Account, err)
   906  			return
   907  		}
   908  	}
   909  
   910  	c.mu.Lock()
   911  
   912  	// Assign permissions.
   913  	if user.Permissions == nil {
   914  		// Reset perms to nil in case client previously had them.
   915  		c.perms = nil
   916  		c.mperms = nil
   917  	} else {
   918  		c.setPermissions(user.Permissions)
   919  	}
   920  
   921  	// allows custom authenticators to set a username to be reported in
   922  	// server events and more
   923  	if user.Username != _EMPTY_ {
   924  		c.opts.Username = user.Username
   925  	}
   926  
   927  	// if a deadline time stamp is set we start a timer to disconnect the user at that time
   928  	if !user.ConnectionDeadline.IsZero() {
   929  		c.setExpirationTimerUnlocked(time.Until(user.ConnectionDeadline))
   930  	}
   931  
   932  	c.mu.Unlock()
   933  }
   934  
   935  // RegisterNkeyUser allows auth to call back into a new nkey
   936  // client with the authenticated user. This is used to map
   937  // any permissions into the client and setup accounts.
   938  func (c *client) RegisterNkeyUser(user *NkeyUser) error {
   939  	// Register with proper account and sublist.
   940  	if user.Account != nil {
   941  		if err := c.registerWithAccount(user.Account); err != nil {
   942  			c.reportErrRegisterAccount(user.Account, err)
   943  			return err
   944  		}
   945  	}
   946  
   947  	c.mu.Lock()
   948  	c.user = user
   949  	// Assign permissions.
   950  	if user.Permissions == nil {
   951  		// Reset perms to nil in case client previously had them.
   952  		c.perms = nil
   953  		c.mperms = nil
   954  	} else {
   955  		c.setPermissions(user.Permissions)
   956  	}
   957  	c.mu.Unlock()
   958  	return nil
   959  }
   960  
   961  func splitSubjectQueue(sq string) ([]byte, []byte, error) {
   962  	vals := strings.Fields(strings.TrimSpace(sq))
   963  	s := []byte(vals[0])
   964  	var q []byte
   965  	if len(vals) == 2 {
   966  		q = []byte(vals[1])
   967  	} else if len(vals) > 2 {
   968  		return nil, nil, fmt.Errorf("invalid subject-queue %q", sq)
   969  	}
   970  	return s, q, nil
   971  }
   972  
   973  // Initializes client.perms structure.
   974  // Lock is held on entry.
   975  func (c *client) setPermissions(perms *Permissions) {
   976  	if perms == nil {
   977  		return
   978  	}
   979  	c.perms = &permissions{}
   980  
   981  	// Loop over publish permissions
   982  	if perms.Publish != nil {
   983  		if perms.Publish.Allow != nil {
   984  			c.perms.pub.allow = NewSublistWithCache()
   985  		}
   986  		for _, pubSubject := range perms.Publish.Allow {
   987  			sub := &subscription{subject: []byte(pubSubject)}
   988  			c.perms.pub.allow.Insert(sub)
   989  		}
   990  		if len(perms.Publish.Deny) > 0 {
   991  			c.perms.pub.deny = NewSublistWithCache()
   992  		}
   993  		for _, pubSubject := range perms.Publish.Deny {
   994  			sub := &subscription{subject: []byte(pubSubject)}
   995  			c.perms.pub.deny.Insert(sub)
   996  		}
   997  	}
   998  
   999  	// Check if we are allowed to send responses.
  1000  	if perms.Response != nil {
  1001  		rp := *perms.Response
  1002  		c.perms.resp = &rp
  1003  		c.replies = make(map[string]*resp)
  1004  	}
  1005  
  1006  	// Loop over subscribe permissions
  1007  	if perms.Subscribe != nil {
  1008  		var err error
  1009  		if len(perms.Subscribe.Allow) > 0 {
  1010  			c.perms.sub.allow = NewSublistWithCache()
  1011  		}
  1012  		for _, subSubject := range perms.Subscribe.Allow {
  1013  			sub := &subscription{}
  1014  			sub.subject, sub.queue, err = splitSubjectQueue(subSubject)
  1015  			if err != nil {
  1016  				c.Errorf("%s", err.Error())
  1017  				continue
  1018  			}
  1019  			c.perms.sub.allow.Insert(sub)
  1020  		}
  1021  		if len(perms.Subscribe.Deny) > 0 {
  1022  			c.perms.sub.deny = NewSublistWithCache()
  1023  			// Also hold onto this array for later.
  1024  			c.darray = perms.Subscribe.Deny
  1025  		}
  1026  		for _, subSubject := range perms.Subscribe.Deny {
  1027  			sub := &subscription{}
  1028  			sub.subject, sub.queue, err = splitSubjectQueue(subSubject)
  1029  			if err != nil {
  1030  				c.Errorf("%s", err.Error())
  1031  				continue
  1032  			}
  1033  			c.perms.sub.deny.Insert(sub)
  1034  		}
  1035  	}
  1036  
  1037  	// If we are a leafnode and we are the hub copy the extracted perms
  1038  	// to resend back to soliciting server. These are reversed from the
  1039  	// way routes interpret them since this is how the soliciting server
  1040  	// will receive these back in an update INFO.
  1041  	if c.isHubLeafNode() {
  1042  		c.opts.Import = perms.Subscribe
  1043  		c.opts.Export = perms.Publish
  1044  	}
  1045  }
  1046  
  1047  // Build public permissions from internal ones.
  1048  // Used for user info requests.
  1049  func (c *client) publicPermissions() *Permissions {
  1050  	c.mu.Lock()
  1051  	defer c.mu.Unlock()
  1052  
  1053  	if c.perms == nil {
  1054  		return nil
  1055  	}
  1056  	perms := &Permissions{
  1057  		Publish:   &SubjectPermission{},
  1058  		Subscribe: &SubjectPermission{},
  1059  	}
  1060  
  1061  	_subs := [32]*subscription{}
  1062  
  1063  	// Publish
  1064  	if c.perms.pub.allow != nil {
  1065  		subs := _subs[:0]
  1066  		c.perms.pub.allow.All(&subs)
  1067  		for _, sub := range subs {
  1068  			perms.Publish.Allow = append(perms.Publish.Allow, string(sub.subject))
  1069  		}
  1070  	}
  1071  	if c.perms.pub.deny != nil {
  1072  		subs := _subs[:0]
  1073  		c.perms.pub.deny.All(&subs)
  1074  		for _, sub := range subs {
  1075  			perms.Publish.Deny = append(perms.Publish.Deny, string(sub.subject))
  1076  		}
  1077  	}
  1078  	// Subsribe
  1079  	if c.perms.sub.allow != nil {
  1080  		subs := _subs[:0]
  1081  		c.perms.sub.allow.All(&subs)
  1082  		for _, sub := range subs {
  1083  			perms.Subscribe.Allow = append(perms.Subscribe.Allow, string(sub.subject))
  1084  		}
  1085  	}
  1086  	if c.perms.sub.deny != nil {
  1087  		subs := _subs[:0]
  1088  		c.perms.sub.deny.All(&subs)
  1089  		for _, sub := range subs {
  1090  			perms.Subscribe.Deny = append(perms.Subscribe.Deny, string(sub.subject))
  1091  		}
  1092  	}
  1093  	// Responses.
  1094  	if c.perms.resp != nil {
  1095  		rp := *c.perms.resp
  1096  		perms.Response = &rp
  1097  	}
  1098  
  1099  	return perms
  1100  }
  1101  
  1102  type denyType int
  1103  
  1104  const (
  1105  	pub = denyType(iota + 1)
  1106  	sub
  1107  	both
  1108  )
  1109  
  1110  // Merge client.perms structure with additional pub deny permissions
  1111  // Lock is held on entry.
  1112  func (c *client) mergeDenyPermissions(what denyType, denyPubs []string) {
  1113  	if len(denyPubs) == 0 {
  1114  		return
  1115  	}
  1116  	if c.perms == nil {
  1117  		c.perms = &permissions{}
  1118  	}
  1119  	var perms []*perm
  1120  	switch what {
  1121  	case pub:
  1122  		perms = []*perm{&c.perms.pub}
  1123  	case sub:
  1124  		perms = []*perm{&c.perms.sub}
  1125  	case both:
  1126  		perms = []*perm{&c.perms.pub, &c.perms.sub}
  1127  	}
  1128  	for _, p := range perms {
  1129  		if p.deny == nil {
  1130  			p.deny = NewSublistWithCache()
  1131  		}
  1132  	FOR_DENY:
  1133  		for _, subj := range denyPubs {
  1134  			r := p.deny.Match(subj)
  1135  			for _, v := range r.qsubs {
  1136  				for _, s := range v {
  1137  					if string(s.subject) == subj {
  1138  						continue FOR_DENY
  1139  					}
  1140  				}
  1141  			}
  1142  			for _, s := range r.psubs {
  1143  				if string(s.subject) == subj {
  1144  					continue FOR_DENY
  1145  				}
  1146  			}
  1147  			sub := &subscription{subject: []byte(subj)}
  1148  			p.deny.Insert(sub)
  1149  		}
  1150  	}
  1151  }
  1152  
  1153  // Merge client.perms structure with additional pub deny permissions
  1154  // Client lock must not be held on entry
  1155  func (c *client) mergeDenyPermissionsLocked(what denyType, denyPubs []string) {
  1156  	c.mu.Lock()
  1157  	c.mergeDenyPermissions(what, denyPubs)
  1158  	c.mu.Unlock()
  1159  }
  1160  
  1161  // Check to see if we have an expiration for the user JWT via base claims.
  1162  // FIXME(dlc) - Clear on connect with new JWT.
  1163  func (c *client) setExpiration(claims *jwt.ClaimsData, validFor time.Duration) {
  1164  	if claims.Expires == 0 {
  1165  		if validFor != 0 {
  1166  			c.setExpirationTimer(validFor)
  1167  		}
  1168  		return
  1169  	}
  1170  	expiresAt := time.Duration(0)
  1171  	tn := time.Now().Unix()
  1172  	if claims.Expires > tn {
  1173  		expiresAt = time.Duration(claims.Expires-tn) * time.Second
  1174  	}
  1175  	if validFor != 0 && validFor < expiresAt {
  1176  		c.setExpirationTimer(validFor)
  1177  	} else {
  1178  		c.setExpirationTimer(expiresAt)
  1179  	}
  1180  }
  1181  
  1182  // This will load up the deny structure used for filtering delivered
  1183  // messages based on a deny clause for subscriptions.
  1184  // Lock should be held.
  1185  func (c *client) loadMsgDenyFilter() {
  1186  	c.mperms = &msgDeny{NewSublistWithCache(), make(map[string]bool)}
  1187  	for _, sub := range c.darray {
  1188  		c.mperms.deny.Insert(&subscription{subject: []byte(sub)})
  1189  	}
  1190  }
  1191  
  1192  // writeLoop is the main socket write functionality.
  1193  // Runs in its own Go routine.
  1194  func (c *client) writeLoop() {
  1195  	defer c.srv.grWG.Done()
  1196  	c.mu.Lock()
  1197  	if c.isClosed() {
  1198  		c.mu.Unlock()
  1199  		return
  1200  	}
  1201  	c.flags.set(writeLoopStarted)
  1202  	c.mu.Unlock()
  1203  
  1204  	// Used to check that we did flush from last wake up.
  1205  	waitOk := true
  1206  	var closed bool
  1207  
  1208  	// Main loop. Will wait to be signaled and then will use
  1209  	// buffered outbound structure for efficient writev to the underlying socket.
  1210  	for {
  1211  		c.mu.Lock()
  1212  		if closed = c.isClosed(); !closed {
  1213  			owtf := c.out.fsp > 0 && c.out.pb < maxBufSize && c.out.fsp < maxFlushPending
  1214  			if waitOk && (c.out.pb == 0 || owtf) {
  1215  				c.out.sg.Wait()
  1216  				// Check that connection has not been closed while lock was released
  1217  				// in the conditional wait.
  1218  				closed = c.isClosed()
  1219  			}
  1220  		}
  1221  		if closed {
  1222  			c.flushAndClose(false)
  1223  			c.mu.Unlock()
  1224  
  1225  			// We should always call closeConnection() to ensure that state is
  1226  			// properly cleaned-up. It will be a no-op if already done.
  1227  			c.closeConnection(WriteError)
  1228  
  1229  			// Now explicitly call reconnect(). Thanks to ref counting, we know
  1230  			// that the reconnect will execute only after connection has been
  1231  			// removed from the server state.
  1232  			c.reconnect()
  1233  			return
  1234  		}
  1235  		// Flush data
  1236  		waitOk = c.flushOutbound()
  1237  		c.mu.Unlock()
  1238  	}
  1239  }
  1240  
  1241  // flushClients will make sure to flush any clients we may have
  1242  // sent to during processing. We pass in a budget as a time.Duration
  1243  // for how much time to spend in place flushing for this client.
  1244  func (c *client) flushClients(budget time.Duration) time.Time {
  1245  	last := time.Now()
  1246  
  1247  	// Check pending clients for flush.
  1248  	for cp := range c.pcd {
  1249  		// TODO(dlc) - Wonder if it makes more sense to create a new map?
  1250  		delete(c.pcd, cp)
  1251  
  1252  		// Queue up a flush for those in the set
  1253  		cp.mu.Lock()
  1254  		// Update last activity for message delivery
  1255  		cp.last = last
  1256  		// Remove ourselves from the pending list.
  1257  		cp.out.fsp--
  1258  
  1259  		// Just ignore if this was closed.
  1260  		if cp.isClosed() {
  1261  			cp.mu.Unlock()
  1262  			continue
  1263  		}
  1264  
  1265  		if budget > 0 && cp.out.lft < 2*budget && cp.flushOutbound() {
  1266  			budget -= cp.out.lft
  1267  		} else {
  1268  			cp.flushSignal()
  1269  		}
  1270  
  1271  		cp.mu.Unlock()
  1272  	}
  1273  	return last
  1274  }
  1275  
  1276  // readLoop is the main socket read functionality.
  1277  // Runs in its own Go routine.
  1278  func (c *client) readLoop(pre []byte) {
  1279  	// Grab the connection off the client, it will be cleared on a close.
  1280  	// We check for that after the loop, but want to avoid a nil dereference
  1281  	c.mu.Lock()
  1282  	s := c.srv
  1283  	defer s.grWG.Done()
  1284  	if c.isClosed() {
  1285  		c.mu.Unlock()
  1286  		return
  1287  	}
  1288  	nc := c.nc
  1289  	ws := c.isWebsocket()
  1290  	if c.isMqtt() {
  1291  		c.mqtt.r = &mqttReader{reader: nc}
  1292  	}
  1293  	c.in.rsz = startBufSize
  1294  
  1295  	// Check the per-account-cache for closed subscriptions
  1296  	cpacc := c.kind == ROUTER || c.kind == GATEWAY
  1297  	// Last per-account-cache check for closed subscriptions
  1298  	lpacc := time.Now()
  1299  	acc := c.acc
  1300  	var masking bool
  1301  	if ws {
  1302  		masking = c.ws.maskread
  1303  	}
  1304  	checkCompress := c.kind == ROUTER || c.kind == LEAF
  1305  	c.mu.Unlock()
  1306  
  1307  	defer func() {
  1308  		if c.isMqtt() {
  1309  			s.mqttHandleClosedClient(c)
  1310  		}
  1311  		// These are used only in the readloop, so we can set them to nil
  1312  		// on exit of the readLoop.
  1313  		c.in.results, c.in.pacache = nil, nil
  1314  	}()
  1315  
  1316  	// Start read buffer.
  1317  	b := make([]byte, c.in.rsz)
  1318  
  1319  	// Websocket clients will return several slices if there are multiple
  1320  	// websocket frames in the blind read. For non WS clients though, we
  1321  	// will always have 1 slice per loop iteration. So we define this here
  1322  	// so non WS clients will use bufs[0] = b[:n].
  1323  	var _bufs [1][]byte
  1324  	bufs := _bufs[:1]
  1325  
  1326  	var wsr *wsReadInfo
  1327  	if ws {
  1328  		wsr = &wsReadInfo{mask: masking}
  1329  		wsr.init()
  1330  	}
  1331  
  1332  	var decompress bool
  1333  	var reader io.Reader
  1334  	reader = nc
  1335  
  1336  	for {
  1337  		var n int
  1338  		var err error
  1339  
  1340  		// If we have a pre buffer parse that first.
  1341  		if len(pre) > 0 {
  1342  			b = pre
  1343  			n = len(pre)
  1344  			pre = nil
  1345  		} else {
  1346  			n, err = reader.Read(b)
  1347  			// If we have any data we will try to parse and exit at the end.
  1348  			if n == 0 && err != nil {
  1349  				c.closeConnection(closedStateForErr(err))
  1350  				return
  1351  			}
  1352  		}
  1353  		if ws {
  1354  			bufs, err = c.wsRead(wsr, reader, b[:n])
  1355  			if bufs == nil && err != nil {
  1356  				if err != io.EOF {
  1357  					c.Errorf("read error: %v", err)
  1358  				}
  1359  				c.closeConnection(closedStateForErr(err))
  1360  				return
  1361  			} else if bufs == nil {
  1362  				continue
  1363  			}
  1364  		} else {
  1365  			bufs[0] = b[:n]
  1366  		}
  1367  
  1368  		// Check if the account has mappings and if so set the local readcache flag.
  1369  		// We check here to make sure any changes such as config reload are reflected here.
  1370  		if c.kind == CLIENT || c.kind == LEAF {
  1371  			if acc.hasMappings() {
  1372  				c.in.flags.set(hasMappings)
  1373  			} else {
  1374  				c.in.flags.clear(hasMappings)
  1375  			}
  1376  		}
  1377  
  1378  		c.in.start = time.Now()
  1379  
  1380  		// Clear inbound stats cache
  1381  		c.in.msgs = 0
  1382  		c.in.bytes = 0
  1383  		c.in.subs = 0
  1384  
  1385  		// Main call into parser for inbound data. This will generate callouts
  1386  		// to process messages, etc.
  1387  		for i := 0; i < len(bufs); i++ {
  1388  			if err := c.parse(bufs[i]); err != nil {
  1389  				if err == ErrMinimumVersionRequired {
  1390  					// Special case here, currently only for leaf node connections.
  1391  					// When process the CONNECT protocol, if the minimum version
  1392  					// required was not met, an error was printed and sent back to
  1393  					// the remote, and connection was closed after a certain delay
  1394  					// (to avoid "rapid" reconnection from the remote).
  1395  					// We don't need to do any of the things below, simply return.
  1396  					return
  1397  				}
  1398  				if dur := time.Since(c.in.start); dur >= readLoopReportThreshold {
  1399  					c.Warnf("Readloop processing time: %v", dur)
  1400  				}
  1401  				// Need to call flushClients because some of the clients have been
  1402  				// assigned messages and their "fsp" incremented, and need now to be
  1403  				// decremented and their writeLoop signaled.
  1404  				c.flushClients(0)
  1405  				// handled inline
  1406  				if err != ErrMaxPayload && err != ErrAuthentication {
  1407  					c.Error(err)
  1408  					c.closeConnection(ProtocolViolation)
  1409  				}
  1410  				return
  1411  			}
  1412  		}
  1413  
  1414  		// If we are a ROUTER/LEAF and have processed an INFO, it is possible that
  1415  		// we are asked to switch to compression now.
  1416  		if checkCompress && c.in.flags.isSet(switchToCompression) {
  1417  			c.in.flags.clear(switchToCompression)
  1418  			// For now we support only s2 compression...
  1419  			reader = s2.NewReader(nc)
  1420  			decompress = true
  1421  		}
  1422  
  1423  		// Updates stats for client and server that were collected
  1424  		// from parsing through the buffer.
  1425  		if c.in.msgs > 0 {
  1426  			atomic.AddInt64(&c.inMsgs, int64(c.in.msgs))
  1427  			atomic.AddInt64(&c.inBytes, int64(c.in.bytes))
  1428  			if acc != nil {
  1429  				atomic.AddInt64(&acc.inMsgs, int64(c.in.msgs))
  1430  				atomic.AddInt64(&acc.inBytes, int64(c.in.bytes))
  1431  			}
  1432  			atomic.AddInt64(&s.inMsgs, int64(c.in.msgs))
  1433  			atomic.AddInt64(&s.inBytes, int64(c.in.bytes))
  1434  		}
  1435  
  1436  		// Signal to writeLoop to flush to socket.
  1437  		last := c.flushClients(0)
  1438  
  1439  		// Update activity, check read buffer size.
  1440  		c.mu.Lock()
  1441  
  1442  		// Activity based on interest changes or data/msgs.
  1443  		// Also update last receive activity for ping sender
  1444  		if c.in.msgs > 0 || c.in.subs > 0 {
  1445  			c.last = last
  1446  			c.lastIn = last
  1447  		}
  1448  
  1449  		if n >= cap(b) {
  1450  			c.in.srs = 0
  1451  		} else if n < cap(b)/2 { // divide by 2 b/c we want less than what we would shrink to.
  1452  			c.in.srs++
  1453  		}
  1454  
  1455  		// Update read buffer size as/if needed.
  1456  		if n >= cap(b) && cap(b) < maxBufSize {
  1457  			// Grow
  1458  			c.in.rsz = int32(cap(b) * 2)
  1459  			b = make([]byte, c.in.rsz)
  1460  		} else if n < cap(b) && cap(b) > minBufSize && c.in.srs > shortsToShrink {
  1461  			// Shrink, for now don't accelerate, ping/pong will eventually sort it out.
  1462  			c.in.rsz = int32(cap(b) / 2)
  1463  			b = make([]byte, c.in.rsz)
  1464  		}
  1465  		// re-snapshot the account since it can change during reload, etc.
  1466  		acc = c.acc
  1467  		// Refresh nc because in some cases, we have upgraded c.nc to TLS.
  1468  		if nc != c.nc {
  1469  			nc = c.nc
  1470  			if decompress && nc != nil {
  1471  				// For now we support only s2 compression...
  1472  				reader.(*s2.Reader).Reset(nc)
  1473  			} else if !decompress {
  1474  				reader = nc
  1475  			}
  1476  		}
  1477  		c.mu.Unlock()
  1478  
  1479  		// Connection was closed
  1480  		if nc == nil {
  1481  			return
  1482  		}
  1483  
  1484  		if dur := time.Since(c.in.start); dur >= readLoopReportThreshold {
  1485  			c.Warnf("Readloop processing time: %v", dur)
  1486  		}
  1487  
  1488  		// We could have had a read error from above but still read some data.
  1489  		// If so do the close here unconditionally.
  1490  		if err != nil {
  1491  			c.closeConnection(closedStateForErr(err))
  1492  			return
  1493  		}
  1494  
  1495  		if cpacc && (c.in.start.Sub(lpacc)) >= closedSubsCheckInterval {
  1496  			c.pruneClosedSubFromPerAccountCache()
  1497  			lpacc = time.Now()
  1498  		}
  1499  	}
  1500  }
  1501  
  1502  // Returns the appropriate closed state for a given read error.
  1503  func closedStateForErr(err error) ClosedState {
  1504  	if err == io.EOF {
  1505  		return ClientClosed
  1506  	}
  1507  	return ReadError
  1508  }
  1509  
  1510  // collapsePtoNB will either returned framed WebSocket buffers or it will
  1511  // return a reference to c.out.nb.
  1512  func (c *client) collapsePtoNB() (net.Buffers, int64) {
  1513  	if c.isWebsocket() {
  1514  		return c.wsCollapsePtoNB()
  1515  	}
  1516  	return c.out.nb, c.out.pb
  1517  }
  1518  
  1519  // flushOutbound will flush outbound buffer to a client.
  1520  // Will return true if data was attempted to be written.
  1521  // Lock must be held
  1522  func (c *client) flushOutbound() bool {
  1523  	if c.flags.isSet(flushOutbound) {
  1524  		// For CLIENT connections, it is possible that the readLoop calls
  1525  		// flushOutbound(). If writeLoop and readLoop compete and we are
  1526  		// here we should release the lock to reduce the risk of spinning.
  1527  		c.mu.Unlock()
  1528  		runtime.Gosched()
  1529  		c.mu.Lock()
  1530  		return false
  1531  	}
  1532  	c.flags.set(flushOutbound)
  1533  	defer func() {
  1534  		// Check flushAndClose() for explanation on why we do this.
  1535  		if c.isClosed() {
  1536  			for i := range c.out.wnb {
  1537  				nbPoolPut(c.out.wnb[i])
  1538  			}
  1539  			c.out.wnb = nil
  1540  		}
  1541  		c.flags.clear(flushOutbound)
  1542  	}()
  1543  
  1544  	// Check for nothing to do.
  1545  	if c.nc == nil || c.srv == nil || c.out.pb == 0 {
  1546  		return true // true because no need to queue a signal.
  1547  	}
  1548  
  1549  	// In the case of a normal socket connection, "collapsed" is just a ref
  1550  	// to "nb". In the case of WebSockets, additional framing is added to
  1551  	// anything that is waiting in "nb". Also keep a note of how many bytes
  1552  	// were queued before we release the mutex.
  1553  	collapsed, attempted := c.collapsePtoNB()
  1554  
  1555  	// Frustratingly, (net.Buffers).WriteTo() modifies the receiver so we
  1556  	// can't work on "nb" directly — while the mutex is unlocked during IO,
  1557  	// something else might call queueOutbound and modify it. So instead we
  1558  	// need a working copy — we'll operate on "wnb" instead. Note that in
  1559  	// the case of a partial write, "wnb" may have remaining data from the
  1560  	// previous write, and in the case of WebSockets, that data may already
  1561  	// be framed, so we are careful not to re-frame "wnb" here. Instead we
  1562  	// will just frame up "nb" and append it onto whatever is left on "wnb".
  1563  	// "nb" will be set to nil so that we can manipulate "collapsed" outside
  1564  	// of the client's lock, which is interesting in case of compression.
  1565  	c.out.nb = nil
  1566  
  1567  	// In case it goes away after releasing the lock.
  1568  	nc := c.nc
  1569  
  1570  	// Capture this (we change the value in some tests)
  1571  	wdl := c.out.wdl
  1572  
  1573  	// Check for compression
  1574  	cw := c.out.cw
  1575  	if cw != nil {
  1576  		// We will have to adjust once we have compressed, so remove for now.
  1577  		c.out.pb -= attempted
  1578  		if c.isWebsocket() {
  1579  			c.ws.fs -= attempted
  1580  		}
  1581  	}
  1582  
  1583  	// Do NOT hold lock during actual IO.
  1584  	c.mu.Unlock()
  1585  
  1586  	// Compress outside of the lock
  1587  	if cw != nil {
  1588  		var err error
  1589  		bb := bytes.Buffer{}
  1590  
  1591  		cw.Reset(&bb)
  1592  		for _, buf := range collapsed {
  1593  			if _, err = cw.Write(buf); err != nil {
  1594  				break
  1595  			}
  1596  		}
  1597  		if err == nil {
  1598  			err = cw.Close()
  1599  		}
  1600  		if err != nil {
  1601  			c.Errorf("Error compressing data: %v", err)
  1602  			// We need to grab the lock now before marking as closed and exiting
  1603  			c.mu.Lock()
  1604  			c.markConnAsClosed(WriteError)
  1605  			return false
  1606  		}
  1607  		collapsed = append(net.Buffers(nil), bb.Bytes())
  1608  		attempted = int64(len(collapsed[0]))
  1609  	}
  1610  
  1611  	// This is safe to do outside of the lock since "collapsed" is no longer
  1612  	// referenced in c.out.nb (which can be modified in queueOutboud() while
  1613  	// the lock is released).
  1614  	c.out.wnb = append(c.out.wnb, collapsed...)
  1615  	var _orig [1024][]byte
  1616  	orig := append(_orig[:0], c.out.wnb...)
  1617  
  1618  	// Since WriteTo is lopping things off the beginning, we need to remember
  1619  	// the start position of the underlying array so that we can get back to it.
  1620  	// Otherwise we'll always "slide forward" and that will result in reallocs.
  1621  	startOfWnb := c.out.wnb[0:]
  1622  
  1623  	// flush here
  1624  	start := time.Now()
  1625  
  1626  	// FIXME(dlc) - writev will do multiple IOs past 1024 on
  1627  	// most platforms, need to account for that with deadline?
  1628  	nc.SetWriteDeadline(start.Add(wdl))
  1629  
  1630  	// Actual write to the socket.
  1631  	n, err := c.out.wnb.WriteTo(nc)
  1632  	nc.SetWriteDeadline(time.Time{})
  1633  
  1634  	lft := time.Since(start)
  1635  
  1636  	// Re-acquire client lock.
  1637  	c.mu.Lock()
  1638  
  1639  	// Adjust if we were compressing.
  1640  	if cw != nil {
  1641  		c.out.pb += attempted
  1642  		if c.isWebsocket() {
  1643  			c.ws.fs += attempted
  1644  		}
  1645  	}
  1646  
  1647  	// At this point, "wnb" has been mutated by WriteTo and any consumed
  1648  	// buffers have been lopped off the beginning, so in order to return
  1649  	// them to the pool, we need to look at the difference between "orig"
  1650  	// and "wnb".
  1651  	for i := 0; i < len(orig)-len(c.out.wnb); i++ {
  1652  		nbPoolPut(orig[i])
  1653  	}
  1654  
  1655  	// At this point it's possible that "nb" has been modified by another
  1656  	// call to queueOutbound while the lock was released, so we'll leave
  1657  	// those for the next iteration. Meanwhile it's possible that we only
  1658  	// managed a partial write of "wnb", so we'll shift anything that
  1659  	// remains up to the beginning of the array to prevent reallocating.
  1660  	// Anything left in "wnb" has already been framed for WebSocket conns
  1661  	// so leave them alone for the next call to flushOutbound.
  1662  	c.out.wnb = append(startOfWnb[:0], c.out.wnb...)
  1663  
  1664  	// If we've written everything but the underlying array of our working
  1665  	// buffer has grown excessively then free it — the GC will tidy it up
  1666  	// and we can allocate a new one next time.
  1667  	if len(c.out.wnb) == 0 && cap(c.out.wnb) > nbPoolSizeLarge*8 {
  1668  		c.out.wnb = nil
  1669  	}
  1670  
  1671  	// Ignore ErrShortWrite errors, they will be handled as partials.
  1672  	var gotWriteTimeout bool
  1673  	if err != nil && err != io.ErrShortWrite {
  1674  		// Handle timeout error (slow consumer) differently
  1675  		if ne, ok := err.(net.Error); ok && ne.Timeout() {
  1676  			gotWriteTimeout = true
  1677  			if closed := c.handleWriteTimeout(n, attempted, len(orig)); closed {
  1678  				return true
  1679  			}
  1680  		} else {
  1681  			// Other errors will cause connection to be closed.
  1682  			// For clients, report as debug but for others report as error.
  1683  			report := c.Debugf
  1684  			if c.kind != CLIENT {
  1685  				report = c.Errorf
  1686  			}
  1687  			report("Error flushing: %v", err)
  1688  			c.markConnAsClosed(WriteError)
  1689  			return true
  1690  		}
  1691  	}
  1692  
  1693  	// Update flush time statistics.
  1694  	c.out.lft = lft
  1695  
  1696  	// Subtract from pending bytes and messages.
  1697  	c.out.pb -= n
  1698  	if c.isWebsocket() {
  1699  		c.ws.fs -= n
  1700  	}
  1701  
  1702  	// Check that if there is still data to send and writeLoop is in wait,
  1703  	// then we need to signal.
  1704  	if c.out.pb > 0 {
  1705  		c.flushSignal()
  1706  	}
  1707  
  1708  	// Check if we have a stalled gate and if so and we are recovering release
  1709  	// any stalled producers. Only kind==CLIENT will stall.
  1710  	if c.out.stc != nil && (n == attempted || c.out.pb < c.out.mp/2) {
  1711  		close(c.out.stc)
  1712  		c.out.stc = nil
  1713  	}
  1714  	// Check if the connection is recovering from being a slow consumer.
  1715  	if !gotWriteTimeout && c.flags.isSet(isSlowConsumer) {
  1716  		c.Noticef("Slow Consumer Recovered: Flush took %.3fs with %d chunks of %d total bytes.", time.Since(start).Seconds(), len(orig), attempted)
  1717  		c.flags.clear(isSlowConsumer)
  1718  	}
  1719  
  1720  	return true
  1721  }
  1722  
  1723  // This is invoked from flushOutbound() for io/timeout error (slow consumer).
  1724  // Returns a boolean to indicate if the connection has been closed or not.
  1725  // Lock is held on entry.
  1726  func (c *client) handleWriteTimeout(written, attempted int64, numChunks int) bool {
  1727  	if tlsConn, ok := c.nc.(*tls.Conn); ok {
  1728  		if !tlsConn.ConnectionState().HandshakeComplete {
  1729  			// Likely a TLSTimeout error instead...
  1730  			c.markConnAsClosed(TLSHandshakeError)
  1731  			// Would need to coordinate with tlstimeout()
  1732  			// to avoid double logging, so skip logging
  1733  			// here, and don't report a slow consumer error.
  1734  			return true
  1735  		}
  1736  	} else if c.flags.isSet(expectConnect) && !c.flags.isSet(connectReceived) {
  1737  		// Under some conditions, a connection may hit a slow consumer write deadline
  1738  		// before the authorization timeout. If that is the case, then we handle
  1739  		// as slow consumer though we do not increase the counter as that can be
  1740  		// misleading.
  1741  		c.markConnAsClosed(SlowConsumerWriteDeadline)
  1742  		return true
  1743  	}
  1744  	alreadySC := c.flags.isSet(isSlowConsumer)
  1745  	scState := "Detected"
  1746  	if alreadySC {
  1747  		scState = "State"
  1748  	}
  1749  
  1750  	// Aggregate slow consumers.
  1751  	atomic.AddInt64(&c.srv.slowConsumers, 1)
  1752  	switch c.kind {
  1753  	case CLIENT:
  1754  		c.srv.scStats.clients.Add(1)
  1755  	case ROUTER:
  1756  		// Only count each Slow Consumer event once.
  1757  		if !alreadySC {
  1758  			c.srv.scStats.routes.Add(1)
  1759  		}
  1760  	case GATEWAY:
  1761  		c.srv.scStats.gateways.Add(1)
  1762  	case LEAF:
  1763  		c.srv.scStats.leafs.Add(1)
  1764  	}
  1765  	if c.acc != nil {
  1766  		atomic.AddInt64(&c.acc.slowConsumers, 1)
  1767  	}
  1768  	c.Noticef("Slow Consumer %s: WriteDeadline of %v exceeded with %d chunks of %d total bytes.",
  1769  		scState, c.out.wdl, numChunks, attempted)
  1770  
  1771  	// We always close CLIENT connections, or when nothing was written at all...
  1772  	if c.kind == CLIENT || written == 0 {
  1773  		c.markConnAsClosed(SlowConsumerWriteDeadline)
  1774  		return true
  1775  	} else {
  1776  		c.flags.setIfNotSet(isSlowConsumer)
  1777  	}
  1778  	return false
  1779  }
  1780  
  1781  // Marks this connection has closed with the given reason.
  1782  // Sets the connMarkedClosed flag and skipFlushOnClose depending on the reason.
  1783  // Depending on the kind of connection, the connection will be saved.
  1784  // If a writeLoop has been started, the final flush will be done there, otherwise
  1785  // flush and close of TCP connection is done here in place.
  1786  // Returns true if closed in place, flase otherwise.
  1787  // Lock is held on entry.
  1788  func (c *client) markConnAsClosed(reason ClosedState) {
  1789  	// Possibly set skipFlushOnClose flag even if connection has already been
  1790  	// mark as closed. The rationale is that a connection may be closed with
  1791  	// a reason that justifies a flush (say after sending an -ERR), but then
  1792  	// the flushOutbound() gets a write error. If that happens, connection
  1793  	// being lost, there is no reason to attempt to flush again during the
  1794  	// teardown when the writeLoop exits.
  1795  	var skipFlush bool
  1796  	switch reason {
  1797  	case ReadError, WriteError, SlowConsumerPendingBytes, SlowConsumerWriteDeadline, TLSHandshakeError:
  1798  		c.flags.set(skipFlushOnClose)
  1799  		skipFlush = true
  1800  	}
  1801  	if c.flags.isSet(connMarkedClosed) {
  1802  		return
  1803  	}
  1804  	c.flags.set(connMarkedClosed)
  1805  	// For a websocket client, unless we are told not to flush, enqueue
  1806  	// a websocket CloseMessage based on the reason.
  1807  	if !skipFlush && c.isWebsocket() && !c.ws.closeSent {
  1808  		c.wsEnqueueCloseMessage(reason)
  1809  	}
  1810  	// Be consistent with the creation: for routes, gateways and leaf,
  1811  	// we use Noticef on create, so use that too for delete.
  1812  	if c.srv != nil {
  1813  		if c.kind == LEAF {
  1814  			if c.acc != nil {
  1815  				c.Noticef("%s connection closed: %s - Account: %s", c.kindString(), reason, c.acc.traceLabel())
  1816  			} else {
  1817  				c.Noticef("%s connection closed: %s", c.kindString(), reason)
  1818  			}
  1819  		} else if c.kind == ROUTER || c.kind == GATEWAY {
  1820  			c.Noticef("%s connection closed: %s", c.kindString(), reason)
  1821  		} else { // Client, System, Jetstream, and Account connections.
  1822  			c.Debugf("%s connection closed: %s", c.kindString(), reason)
  1823  		}
  1824  	}
  1825  
  1826  	// Save off the connection if its a client or leafnode.
  1827  	if c.kind == CLIENT || c.kind == LEAF {
  1828  		if nc := c.nc; nc != nil && c.srv != nil {
  1829  			// TODO: May want to send events to single go routine instead
  1830  			// of creating a new go routine for each save.
  1831  			go c.srv.saveClosedClient(c, nc, reason)
  1832  		}
  1833  	}
  1834  	// If writeLoop exists, let it do the final flush, close and teardown.
  1835  	if c.flags.isSet(writeLoopStarted) {
  1836  		// Since we want the writeLoop to do the final flush and tcp close,
  1837  		// we want the reconnect to be done there too. However, it should'nt
  1838  		// happen before the connection has been removed from the server
  1839  		// state (end of closeConnection()). This ref count allows us to
  1840  		// guarantee that.
  1841  		c.rref++
  1842  		c.flushSignal()
  1843  		return
  1844  	}
  1845  	// Flush (if skipFlushOnClose is not set) and close in place. If flushing,
  1846  	// use a small WriteDeadline.
  1847  	c.flushAndClose(true)
  1848  }
  1849  
  1850  // flushSignal will use server to queue the flush IO operation to a pool of flushers.
  1851  // Lock must be held.
  1852  func (c *client) flushSignal() {
  1853  	// Check that sg is not nil, which will happen if the connection is closed.
  1854  	if c.out.sg != nil {
  1855  		c.out.sg.Signal()
  1856  	}
  1857  }
  1858  
  1859  // Traces a message.
  1860  // Will NOT check if tracing is enabled, does NOT need the client lock.
  1861  func (c *client) traceMsg(msg []byte) {
  1862  	maxTrace := c.srv.getOpts().MaxTracedMsgLen
  1863  	if maxTrace > 0 && (len(msg)-LEN_CR_LF) > maxTrace {
  1864  		tm := fmt.Sprintf("%q", msg[:maxTrace])
  1865  		c.Tracef("<<- MSG_PAYLOAD: [\"%s...\"]", tm[1:maxTrace+1])
  1866  	} else {
  1867  		c.Tracef("<<- MSG_PAYLOAD: [%q]", msg[:len(msg)-LEN_CR_LF])
  1868  	}
  1869  }
  1870  
  1871  // Traces an incoming operation.
  1872  // Will NOT check if tracing is enabled, does NOT need the client lock.
  1873  func (c *client) traceInOp(op string, arg []byte) {
  1874  	c.traceOp("<<- %s", op, arg)
  1875  }
  1876  
  1877  // Traces an outgoing operation.
  1878  // Will NOT check if tracing is enabled, does NOT need the client lock.
  1879  func (c *client) traceOutOp(op string, arg []byte) {
  1880  	c.traceOp("->> %s", op, arg)
  1881  }
  1882  
  1883  func (c *client) traceOp(format, op string, arg []byte) {
  1884  	opa := []any{}
  1885  	if op != _EMPTY_ {
  1886  		opa = append(opa, op)
  1887  	}
  1888  	if arg != nil {
  1889  		opa = append(opa, bytesToString(arg))
  1890  	}
  1891  	c.Tracef(format, opa)
  1892  }
  1893  
  1894  // Process the information messages from Clients and other Routes.
  1895  func (c *client) processInfo(arg []byte) error {
  1896  	info := Info{}
  1897  	if err := json.Unmarshal(arg, &info); err != nil {
  1898  		return err
  1899  	}
  1900  	switch c.kind {
  1901  	case ROUTER:
  1902  		c.processRouteInfo(&info)
  1903  	case GATEWAY:
  1904  		c.processGatewayInfo(&info)
  1905  	case LEAF:
  1906  		c.processLeafnodeInfo(&info)
  1907  	}
  1908  	return nil
  1909  }
  1910  
  1911  func (c *client) processErr(errStr string) {
  1912  	close := true
  1913  	switch c.kind {
  1914  	case CLIENT:
  1915  		c.Errorf("Client Error %s", errStr)
  1916  	case ROUTER:
  1917  		c.Errorf("Route Error %s", errStr)
  1918  	case GATEWAY:
  1919  		c.Errorf("Gateway Error %s", errStr)
  1920  	case LEAF:
  1921  		c.Errorf("Leafnode Error %s", errStr)
  1922  		c.leafProcessErr(errStr)
  1923  		close = false
  1924  	case JETSTREAM:
  1925  		c.Errorf("JetStream Error %s", errStr)
  1926  	}
  1927  	if close {
  1928  		c.closeConnection(ParseError)
  1929  	}
  1930  }
  1931  
  1932  // Password pattern matcher.
  1933  var passPat = regexp.MustCompile(`"?\s*pass\S*?"?\s*[:=]\s*"?(([^",\r\n}])*)`)
  1934  
  1935  // removePassFromTrace removes any notion of passwords from trace
  1936  // messages for logging.
  1937  func removePassFromTrace(arg []byte) []byte {
  1938  	if !bytes.Contains(arg, []byte(`pass`)) {
  1939  		return arg
  1940  	}
  1941  	// Take a copy of the connect proto just for the trace message.
  1942  	var _arg [4096]byte
  1943  	buf := append(_arg[:0], arg...)
  1944  
  1945  	m := passPat.FindAllSubmatchIndex(buf, -1)
  1946  	if len(m) == 0 {
  1947  		return arg
  1948  	}
  1949  
  1950  	redactedPass := []byte("[REDACTED]")
  1951  	for _, i := range m {
  1952  		if len(i) < 4 {
  1953  			continue
  1954  		}
  1955  		start := i[2]
  1956  		end := i[3]
  1957  
  1958  		// Replace password substring.
  1959  		buf = append(buf[:start], append(redactedPass, buf[end:]...)...)
  1960  		break
  1961  	}
  1962  	return buf
  1963  }
  1964  
  1965  // Returns the RTT by computing the elapsed time since now and `start`.
  1966  // On Windows VM where I (IK) run tests, time.Since() will return 0
  1967  // (I suspect some time granularity issues). So return at minimum 1ns.
  1968  func computeRTT(start time.Time) time.Duration {
  1969  	rtt := time.Since(start)
  1970  	if rtt <= 0 {
  1971  		rtt = time.Nanosecond
  1972  	}
  1973  	return rtt
  1974  }
  1975  
  1976  // processConnect will process a client connect op.
  1977  func (c *client) processConnect(arg []byte) error {
  1978  	supportsHeaders := c.srv.supportsHeaders()
  1979  	c.mu.Lock()
  1980  	// If we can't stop the timer because the callback is in progress...
  1981  	if !c.clearAuthTimer() {
  1982  		// wait for it to finish and handle sending the failure back to
  1983  		// the client.
  1984  		for !c.isClosed() {
  1985  			c.mu.Unlock()
  1986  			time.Sleep(25 * time.Millisecond)
  1987  			c.mu.Lock()
  1988  		}
  1989  		c.mu.Unlock()
  1990  		return nil
  1991  	}
  1992  	c.last = time.Now().UTC()
  1993  	// Estimate RTT to start.
  1994  	if c.kind == CLIENT {
  1995  		c.rtt = computeRTT(c.start)
  1996  		if c.srv != nil {
  1997  			c.clearPingTimer()
  1998  			c.setFirstPingTimer()
  1999  		}
  2000  	}
  2001  	kind := c.kind
  2002  	srv := c.srv
  2003  
  2004  	// Moved unmarshalling of clients' Options under the lock.
  2005  	// The client has already been added to the server map, so it is possible
  2006  	// that other routines lookup the client, and access its options under
  2007  	// the client's lock, so unmarshalling the options outside of the lock
  2008  	// would cause data RACEs.
  2009  	if err := json.Unmarshal(arg, &c.opts); err != nil {
  2010  		c.mu.Unlock()
  2011  		return err
  2012  	}
  2013  	// Indicate that the CONNECT protocol has been received, and that the
  2014  	// server now knows which protocol this client supports.
  2015  	c.flags.set(connectReceived)
  2016  	// Capture these under lock
  2017  	c.echo = c.opts.Echo
  2018  	proto := c.opts.Protocol
  2019  	verbose := c.opts.Verbose
  2020  	lang := c.opts.Lang
  2021  	account := c.opts.Account
  2022  	accountNew := c.opts.AccountNew
  2023  
  2024  	if c.kind == CLIENT {
  2025  		var ncs string
  2026  		if c.opts.Version != _EMPTY_ {
  2027  			ncs = fmt.Sprintf("v%s", c.opts.Version)
  2028  		}
  2029  		if c.opts.Lang != _EMPTY_ {
  2030  			if c.opts.Version == _EMPTY_ {
  2031  				ncs = c.opts.Lang
  2032  			} else {
  2033  				ncs = fmt.Sprintf("%s:%s", ncs, c.opts.Lang)
  2034  			}
  2035  		}
  2036  		if c.opts.Name != _EMPTY_ {
  2037  			if c.opts.Version == _EMPTY_ && c.opts.Lang == _EMPTY_ {
  2038  				ncs = c.opts.Name
  2039  			} else {
  2040  				ncs = fmt.Sprintf("%s:%s", ncs, c.opts.Name)
  2041  			}
  2042  		}
  2043  		if ncs != _EMPTY_ {
  2044  			c.ncs.CompareAndSwap(nil, fmt.Sprintf("%s - %q", c, ncs))
  2045  		}
  2046  	}
  2047  
  2048  	// if websocket client, maybe some options through cookies
  2049  	if ws := c.ws; ws != nil {
  2050  		// if JWT not in the CONNECT, use the cookie JWT (possibly empty).
  2051  		if c.opts.JWT == _EMPTY_ {
  2052  			c.opts.JWT = ws.cookieJwt
  2053  		}
  2054  		// if user not in the CONNECT, use the cookie user (possibly empty)
  2055  		if c.opts.Username == _EMPTY_ {
  2056  			c.opts.Username = ws.cookieUsername
  2057  		}
  2058  		// if pass not in the CONNECT, use the cookie password (possibly empty).
  2059  		if c.opts.Password == _EMPTY_ {
  2060  			c.opts.Password = ws.cookiePassword
  2061  		}
  2062  		// if token not in the CONNECT, use the cookie token (possibly empty).
  2063  		if c.opts.Token == _EMPTY_ {
  2064  			c.opts.Token = ws.cookieToken
  2065  		}
  2066  	}
  2067  
  2068  	// when not in operator mode, discard the jwt
  2069  	if srv != nil && srv.trustedKeys == nil {
  2070  		c.opts.JWT = _EMPTY_
  2071  	}
  2072  	ujwt := c.opts.JWT
  2073  
  2074  	// For headers both client and server need to support.
  2075  	c.headers = supportsHeaders && c.opts.Headers
  2076  	c.mu.Unlock()
  2077  
  2078  	if srv != nil {
  2079  		// Applicable to clients only:
  2080  		// As soon as c.opts is unmarshalled and if the proto is at
  2081  		// least ClientProtoInfo, we need to increment the following counter.
  2082  		// This is decremented when client is removed from the server's
  2083  		// clients map.
  2084  		if kind == CLIENT && proto >= ClientProtoInfo {
  2085  			srv.mu.Lock()
  2086  			srv.cproto++
  2087  			srv.mu.Unlock()
  2088  		}
  2089  
  2090  		// Check for Auth
  2091  		if ok := srv.checkAuthentication(c); !ok {
  2092  			// We may fail here because we reached max limits on an account.
  2093  			if ujwt != _EMPTY_ {
  2094  				c.mu.Lock()
  2095  				acc := c.acc
  2096  				c.mu.Unlock()
  2097  				srv.mu.Lock()
  2098  				tooManyAccCons := acc != nil && acc != srv.gacc
  2099  				srv.mu.Unlock()
  2100  				if tooManyAccCons {
  2101  					return ErrTooManyAccountConnections
  2102  				}
  2103  			}
  2104  			c.authViolation()
  2105  			return ErrAuthentication
  2106  		}
  2107  
  2108  		// Check for Account designation, we used to have this as an optional feature for dynamic
  2109  		// sandbox environments. Now its considered an error.
  2110  		if accountNew || account != _EMPTY_ {
  2111  			c.authViolation()
  2112  			return ErrAuthentication
  2113  		}
  2114  
  2115  		// If no account designation.
  2116  		// Do this only for CLIENT and LEAF connections.
  2117  		if c.acc == nil && (c.kind == CLIENT || c.kind == LEAF) {
  2118  			// By default register with the global account.
  2119  			c.registerWithAccount(srv.globalAccount())
  2120  		}
  2121  	}
  2122  
  2123  	switch kind {
  2124  	case CLIENT:
  2125  		// Check client protocol request if it exists.
  2126  		if proto < ClientProtoZero || proto > ClientProtoInfo {
  2127  			c.sendErr(ErrBadClientProtocol.Error())
  2128  			c.closeConnection(BadClientProtocolVersion)
  2129  			return ErrBadClientProtocol
  2130  		}
  2131  		// Check to see that if no_responders is requested
  2132  		// they have header support on as well.
  2133  		c.mu.Lock()
  2134  		misMatch := c.opts.NoResponders && !c.headers
  2135  		c.mu.Unlock()
  2136  		if misMatch {
  2137  			c.sendErr(ErrNoRespondersRequiresHeaders.Error())
  2138  			c.closeConnection(NoRespondersRequiresHeaders)
  2139  			return ErrNoRespondersRequiresHeaders
  2140  		}
  2141  		if verbose {
  2142  			c.sendOK()
  2143  		}
  2144  	case ROUTER:
  2145  		// Delegate the rest of processing to the route
  2146  		return c.processRouteConnect(srv, arg, lang)
  2147  	case GATEWAY:
  2148  		// Delegate the rest of processing to the gateway
  2149  		return c.processGatewayConnect(arg)
  2150  	case LEAF:
  2151  		// Delegate the rest of processing to the leaf node
  2152  		return c.processLeafNodeConnect(srv, arg, lang)
  2153  	}
  2154  	return nil
  2155  }
  2156  
  2157  func (c *client) sendErrAndErr(err string) {
  2158  	c.sendErr(err)
  2159  	c.Errorf(err)
  2160  }
  2161  
  2162  func (c *client) sendErrAndDebug(err string) {
  2163  	c.sendErr(err)
  2164  	c.Debugf(err)
  2165  }
  2166  
  2167  func (c *client) authTimeout() {
  2168  	c.sendErrAndDebug("Authentication Timeout")
  2169  	c.closeConnection(AuthenticationTimeout)
  2170  }
  2171  
  2172  func (c *client) authExpired() {
  2173  	c.sendErrAndDebug("User Authentication Expired")
  2174  	c.closeConnection(AuthenticationExpired)
  2175  }
  2176  
  2177  func (c *client) accountAuthExpired() {
  2178  	c.sendErrAndDebug("Account Authentication Expired")
  2179  	c.closeConnection(AuthenticationExpired)
  2180  }
  2181  
  2182  func (c *client) authViolation() {
  2183  	var s *Server
  2184  	var hasTrustedNkeys, hasNkeys, hasUsers bool
  2185  	if s = c.srv; s != nil {
  2186  		s.mu.RLock()
  2187  		hasTrustedNkeys = s.trustedKeys != nil
  2188  		hasNkeys = s.nkeys != nil
  2189  		hasUsers = s.users != nil
  2190  		s.mu.RUnlock()
  2191  		defer s.sendAuthErrorEvent(c)
  2192  	}
  2193  
  2194  	if hasTrustedNkeys {
  2195  		c.Errorf("%v", ErrAuthentication)
  2196  	} else if hasNkeys {
  2197  		c.Errorf("%s - Nkey %q",
  2198  			ErrAuthentication.Error(),
  2199  			c.opts.Nkey)
  2200  	} else if hasUsers {
  2201  		c.Errorf("%s - User %q",
  2202  			ErrAuthentication.Error(),
  2203  			c.opts.Username)
  2204  	} else {
  2205  		if c.srv != nil {
  2206  			c.Errorf(ErrAuthentication.Error())
  2207  		}
  2208  	}
  2209  	if c.isMqtt() {
  2210  		c.mqttEnqueueConnAck(mqttConnAckRCNotAuthorized, false)
  2211  	} else {
  2212  		c.sendErr("Authorization Violation")
  2213  	}
  2214  	c.closeConnection(AuthenticationViolation)
  2215  }
  2216  
  2217  func (c *client) maxAccountConnExceeded() {
  2218  	c.sendErrAndErr(ErrTooManyAccountConnections.Error())
  2219  	c.closeConnection(MaxAccountConnectionsExceeded)
  2220  }
  2221  
  2222  func (c *client) maxConnExceeded() {
  2223  	c.sendErrAndErr(ErrTooManyConnections.Error())
  2224  	c.closeConnection(MaxConnectionsExceeded)
  2225  }
  2226  
  2227  func (c *client) maxSubsExceeded() {
  2228  	if c.acc.shouldLogMaxSubErr() {
  2229  		c.Errorf(ErrTooManySubs.Error())
  2230  	}
  2231  	c.sendErr(ErrTooManySubs.Error())
  2232  }
  2233  
  2234  func (c *client) maxPayloadViolation(sz int, max int32) {
  2235  	c.Errorf("%s: %d vs %d", ErrMaxPayload.Error(), sz, max)
  2236  	c.sendErr("Maximum Payload Violation")
  2237  	c.closeConnection(MaxPayloadExceeded)
  2238  }
  2239  
  2240  // queueOutbound queues data for a clientconnection.
  2241  // Lock should be held.
  2242  func (c *client) queueOutbound(data []byte) {
  2243  	// Do not keep going if closed
  2244  	if c.isClosed() {
  2245  		return
  2246  	}
  2247  
  2248  	// Add to pending bytes total.
  2249  	c.out.pb += int64(len(data))
  2250  
  2251  	// Take a copy of the slice ref so that we can chop bits off the beginning
  2252  	// without affecting the original "data" slice.
  2253  	toBuffer := data
  2254  
  2255  	// All of the queued []byte have a fixed capacity, so if there's a []byte
  2256  	// at the tail of the buffer list that isn't full yet, we should top that
  2257  	// up first. This helps to ensure we aren't pulling more []bytes from the
  2258  	// pool than we need to.
  2259  	if len(c.out.nb) > 0 {
  2260  		last := &c.out.nb[len(c.out.nb)-1]
  2261  		if free := cap(*last) - len(*last); free > 0 {
  2262  			if l := len(toBuffer); l < free {
  2263  				free = l
  2264  			}
  2265  			*last = append(*last, toBuffer[:free]...)
  2266  			toBuffer = toBuffer[free:]
  2267  		}
  2268  	}
  2269  
  2270  	// Now we can push the rest of the data into new []bytes from the pool
  2271  	// in fixed size chunks. This ensures we don't go over the capacity of any
  2272  	// of the buffers and end up reallocating.
  2273  	for len(toBuffer) > 0 {
  2274  		new := nbPoolGet(len(toBuffer))
  2275  		n := copy(new[:cap(new)], toBuffer)
  2276  		c.out.nb = append(c.out.nb, new[:n])
  2277  		toBuffer = toBuffer[n:]
  2278  	}
  2279  
  2280  	// Check for slow consumer via pending bytes limit.
  2281  	// ok to return here, client is going away.
  2282  	if c.kind == CLIENT && c.out.pb > c.out.mp {
  2283  		// Perf wise, it looks like it is faster to optimistically add than
  2284  		// checking current pb+len(data) and then add to pb.
  2285  		c.out.pb -= int64(len(data))
  2286  
  2287  		// Increment the total and client's slow consumer counters.
  2288  		atomic.AddInt64(&c.srv.slowConsumers, 1)
  2289  		c.srv.scStats.clients.Add(1)
  2290  		if c.acc != nil {
  2291  			atomic.AddInt64(&c.acc.slowConsumers, 1)
  2292  		}
  2293  		c.Noticef("Slow Consumer Detected: MaxPending of %d Exceeded", c.out.mp)
  2294  		c.markConnAsClosed(SlowConsumerPendingBytes)
  2295  		return
  2296  	}
  2297  
  2298  	// Check here if we should create a stall channel if we are falling behind.
  2299  	// We do this here since if we wait for consumer's writeLoop it could be
  2300  	// too late with large number of fan in producers.
  2301  	if c.out.pb > c.out.mp/2 && c.out.stc == nil {
  2302  		c.out.stc = make(chan struct{})
  2303  	}
  2304  }
  2305  
  2306  // Assume the lock is held upon entry.
  2307  func (c *client) enqueueProtoAndFlush(proto []byte, doFlush bool) {
  2308  	if c.isClosed() {
  2309  		return
  2310  	}
  2311  	c.queueOutbound(proto)
  2312  	if !(doFlush && c.flushOutbound()) {
  2313  		c.flushSignal()
  2314  	}
  2315  }
  2316  
  2317  // Queues and then flushes the connection. This should only be called when
  2318  // the writeLoop cannot be started yet. Use enqueueProto() otherwise.
  2319  // Lock is held on entry.
  2320  func (c *client) sendProtoNow(proto []byte) {
  2321  	c.enqueueProtoAndFlush(proto, true)
  2322  }
  2323  
  2324  // Enqueues the given protocol and signal the writeLoop if necessary.
  2325  // Lock is held on entry.
  2326  func (c *client) enqueueProto(proto []byte) {
  2327  	c.enqueueProtoAndFlush(proto, false)
  2328  }
  2329  
  2330  // Assume the lock is held upon entry.
  2331  func (c *client) sendPong() {
  2332  	if c.trace {
  2333  		c.traceOutOp("PONG", nil)
  2334  	}
  2335  	c.enqueueProto([]byte(pongProto))
  2336  }
  2337  
  2338  // Used to kick off a RTT measurement for latency tracking.
  2339  func (c *client) sendRTTPing() bool {
  2340  	c.mu.Lock()
  2341  	sent := c.sendRTTPingLocked()
  2342  	c.mu.Unlock()
  2343  	return sent
  2344  }
  2345  
  2346  // Used to kick off a RTT measurement for latency tracking.
  2347  // This is normally called only when the caller has checked that
  2348  // the c.rtt is 0 and wants to force an update by sending a PING.
  2349  // Client lock held on entry.
  2350  func (c *client) sendRTTPingLocked() bool {
  2351  	if c.isMqtt() {
  2352  		return false
  2353  	}
  2354  	// Most client libs send a CONNECT+PING and wait for a PONG from the
  2355  	// server. So if firstPongSent flag is set, it is ok for server to
  2356  	// send the PING. But in case we have client libs that don't do that,
  2357  	// allow the send of the PING if more than 2 secs have elapsed since
  2358  	// the client TCP connection was accepted.
  2359  	if !c.isClosed() &&
  2360  		(c.flags.isSet(firstPongSent) || time.Since(c.start) > maxNoRTTPingBeforeFirstPong) {
  2361  		c.sendPing()
  2362  		return true
  2363  	}
  2364  	return false
  2365  }
  2366  
  2367  // Assume the lock is held upon entry.
  2368  func (c *client) sendPing() {
  2369  	c.rttStart = time.Now().UTC()
  2370  	c.ping.out++
  2371  	if c.trace {
  2372  		c.traceOutOp("PING", nil)
  2373  	}
  2374  	c.enqueueProto([]byte(pingProto))
  2375  }
  2376  
  2377  // Generates the INFO to be sent to the client with the client ID included.
  2378  // info arg will be copied since passed by value.
  2379  // Assume lock is held.
  2380  func (c *client) generateClientInfoJSON(info Info) []byte {
  2381  	info.CID = c.cid
  2382  	info.ClientIP = c.host
  2383  	info.MaxPayload = c.mpay
  2384  	if c.isWebsocket() {
  2385  		info.ClientConnectURLs = info.WSConnectURLs
  2386  		if c.srv != nil { // Otherwise lame duck info can panic
  2387  			c.srv.websocket.mu.RLock()
  2388  			info.TLSAvailable = c.srv.websocket.tls
  2389  			if c.srv.websocket.tls && c.srv.websocket.server != nil {
  2390  				if tc := c.srv.websocket.server.TLSConfig; tc != nil {
  2391  					info.TLSRequired = !tc.InsecureSkipVerify
  2392  				}
  2393  			}
  2394  			if c.srv.websocket.listener != nil {
  2395  				laddr := c.srv.websocket.listener.Addr().String()
  2396  				if h, p, err := net.SplitHostPort(laddr); err == nil {
  2397  					if p, err := strconv.Atoi(p); err == nil {
  2398  						info.Host = h
  2399  						info.Port = p
  2400  					}
  2401  				}
  2402  			}
  2403  			c.srv.websocket.mu.RUnlock()
  2404  		}
  2405  	}
  2406  	info.WSConnectURLs = nil
  2407  	return generateInfoJSON(&info)
  2408  }
  2409  
  2410  func (c *client) sendErr(err string) {
  2411  	c.mu.Lock()
  2412  	if c.trace {
  2413  		c.traceOutOp("-ERR", []byte(err))
  2414  	}
  2415  	if !c.isMqtt() {
  2416  		c.enqueueProto([]byte(fmt.Sprintf(errProto, err)))
  2417  	}
  2418  	c.mu.Unlock()
  2419  }
  2420  
  2421  func (c *client) sendOK() {
  2422  	c.mu.Lock()
  2423  	if c.trace {
  2424  		c.traceOutOp("OK", nil)
  2425  	}
  2426  	c.enqueueProto([]byte(okProto))
  2427  	c.mu.Unlock()
  2428  }
  2429  
  2430  func (c *client) processPing() {
  2431  	c.mu.Lock()
  2432  
  2433  	if c.isClosed() {
  2434  		c.mu.Unlock()
  2435  		return
  2436  	}
  2437  
  2438  	c.sendPong()
  2439  
  2440  	// Record this to suppress us sending one if this
  2441  	// is within a given time interval for activity.
  2442  	c.lastIn = time.Now()
  2443  
  2444  	// If not a CLIENT, we are done. Also the CONNECT should
  2445  	// have been received, but make sure it is so before proceeding
  2446  	if c.kind != CLIENT || !c.flags.isSet(connectReceived) {
  2447  		c.mu.Unlock()
  2448  		return
  2449  	}
  2450  
  2451  	// If we are here, the CONNECT has been received so we know
  2452  	// if this client supports async INFO or not.
  2453  	var (
  2454  		checkInfoChange bool
  2455  		srv             = c.srv
  2456  	)
  2457  	// For older clients, just flip the firstPongSent flag if not already
  2458  	// set and we are done.
  2459  	if c.opts.Protocol < ClientProtoInfo || srv == nil {
  2460  		c.flags.setIfNotSet(firstPongSent)
  2461  	} else {
  2462  		// This is a client that supports async INFO protocols.
  2463  		// If this is the first PING (so firstPongSent is not set yet),
  2464  		// we will need to check if there was a change in cluster topology
  2465  		// or we have a different max payload. We will send this first before
  2466  		// pong since most clients do flush after connect call.
  2467  		checkInfoChange = !c.flags.isSet(firstPongSent)
  2468  	}
  2469  	c.mu.Unlock()
  2470  
  2471  	if checkInfoChange {
  2472  		opts := srv.getOpts()
  2473  		srv.mu.Lock()
  2474  		c.mu.Lock()
  2475  		// Now that we are under both locks, we can flip the flag.
  2476  		// This prevents sendAsyncInfoToClients() and code here to
  2477  		// send a double INFO protocol.
  2478  		c.flags.set(firstPongSent)
  2479  		// If there was a cluster update since this client was created,
  2480  		// send an updated INFO protocol now.
  2481  		if srv.lastCURLsUpdate >= c.start.UnixNano() || c.mpay != int32(opts.MaxPayload) {
  2482  			c.enqueueProto(c.generateClientInfoJSON(srv.copyInfo()))
  2483  		}
  2484  		c.mu.Unlock()
  2485  		srv.mu.Unlock()
  2486  	}
  2487  }
  2488  
  2489  func (c *client) processPong() {
  2490  	c.mu.Lock()
  2491  	c.ping.out = 0
  2492  	c.rtt = computeRTT(c.rttStart)
  2493  	srv := c.srv
  2494  	reorderGWs := c.kind == GATEWAY && c.gw.outbound
  2495  	// If compression is currently active for a route/leaf connection, if the
  2496  	// compression configuration is s2_auto, check if we should change
  2497  	// the compression level.
  2498  	if c.kind == ROUTER && needsCompression(c.route.compression) {
  2499  		c.updateS2AutoCompressionLevel(&srv.getOpts().Cluster.Compression, &c.route.compression)
  2500  	} else if c.kind == LEAF && needsCompression(c.leaf.compression) {
  2501  		var co *CompressionOpts
  2502  		if r := c.leaf.remote; r != nil {
  2503  			co = &r.Compression
  2504  		} else {
  2505  			co = &srv.getOpts().LeafNode.Compression
  2506  		}
  2507  		c.updateS2AutoCompressionLevel(co, &c.leaf.compression)
  2508  	}
  2509  	c.mu.Unlock()
  2510  	if reorderGWs {
  2511  		srv.gateway.orderOutboundConnections()
  2512  	}
  2513  }
  2514  
  2515  // Select the s2 compression level based on the client's current RTT and the configured
  2516  // RTT thresholds slice. If current level is different than selected one, save the
  2517  // new compression level string and create a new s2 writer.
  2518  // Lock held on entry.
  2519  func (c *client) updateS2AutoCompressionLevel(co *CompressionOpts, compression *string) {
  2520  	if co.Mode != CompressionS2Auto {
  2521  		return
  2522  	}
  2523  	if cm := selectS2AutoModeBasedOnRTT(c.rtt, co.RTTThresholds); cm != *compression {
  2524  		*compression = cm
  2525  		c.out.cw = s2.NewWriter(nil, s2WriterOptions(cm)...)
  2526  	}
  2527  }
  2528  
  2529  // Will return the parts from the raw wire msg.
  2530  func (c *client) msgParts(data []byte) (hdr []byte, msg []byte) {
  2531  	if c != nil && c.pa.hdr > 0 {
  2532  		return data[:c.pa.hdr], data[c.pa.hdr:]
  2533  	}
  2534  	return nil, data
  2535  }
  2536  
  2537  // Header pubs take form HPUB <subject> [reply] <hdr_len> <total_len>\r\n
  2538  func (c *client) processHeaderPub(arg, remaining []byte) error {
  2539  	if !c.headers {
  2540  		return ErrMsgHeadersNotSupported
  2541  	}
  2542  
  2543  	// Unroll splitArgs to avoid runtime/heap issues
  2544  	a := [MAX_HPUB_ARGS][]byte{}
  2545  	args := a[:0]
  2546  	start := -1
  2547  	for i, b := range arg {
  2548  		switch b {
  2549  		case ' ', '\t':
  2550  			if start >= 0 {
  2551  				args = append(args, arg[start:i])
  2552  				start = -1
  2553  			}
  2554  		default:
  2555  			if start < 0 {
  2556  				start = i
  2557  			}
  2558  		}
  2559  	}
  2560  	if start >= 0 {
  2561  		args = append(args, arg[start:])
  2562  	}
  2563  
  2564  	c.pa.arg = arg
  2565  	switch len(args) {
  2566  	case 3:
  2567  		c.pa.subject = args[0]
  2568  		c.pa.reply = nil
  2569  		c.pa.hdr = parseSize(args[1])
  2570  		c.pa.size = parseSize(args[2])
  2571  		c.pa.hdb = args[1]
  2572  		c.pa.szb = args[2]
  2573  	case 4:
  2574  		c.pa.subject = args[0]
  2575  		c.pa.reply = args[1]
  2576  		c.pa.hdr = parseSize(args[2])
  2577  		c.pa.size = parseSize(args[3])
  2578  		c.pa.hdb = args[2]
  2579  		c.pa.szb = args[3]
  2580  	default:
  2581  		return fmt.Errorf("processHeaderPub Parse Error: %q", arg)
  2582  	}
  2583  	if c.pa.hdr < 0 {
  2584  		return fmt.Errorf("processHeaderPub Bad or Missing Header Size: %q", arg)
  2585  	}
  2586  	// If number overruns an int64, parseSize() will have returned a negative value
  2587  	if c.pa.size < 0 {
  2588  		return fmt.Errorf("processHeaderPub Bad or Missing Total Size: %q", arg)
  2589  	}
  2590  	if c.pa.hdr > c.pa.size {
  2591  		return fmt.Errorf("processHeaderPub Header Size larger then TotalSize: %q", arg)
  2592  	}
  2593  	maxPayload := atomic.LoadInt32(&c.mpay)
  2594  	// Use int64() to avoid int32 overrun...
  2595  	if maxPayload != jwt.NoLimit && int64(c.pa.size) > int64(maxPayload) {
  2596  		// If we are given the remaining read buffer (since we do blind reads
  2597  		// we may have the beginning of the message header/payload), we will
  2598  		// look for the tracing header and if found, we will generate a
  2599  		// trace event with the max payload ingress error.
  2600  		// Do this only for CLIENT connections.
  2601  		if c.kind == CLIENT && len(remaining) > 0 {
  2602  			if td := getHeader(MsgTraceDest, remaining); len(td) > 0 {
  2603  				c.initAndSendIngressErrEvent(remaining, string(td), ErrMaxPayload)
  2604  			}
  2605  		}
  2606  		c.maxPayloadViolation(c.pa.size, maxPayload)
  2607  		return ErrMaxPayload
  2608  	}
  2609  	if c.opts.Pedantic && !IsValidLiteralSubject(bytesToString(c.pa.subject)) {
  2610  		c.sendErr("Invalid Publish Subject")
  2611  	}
  2612  	return nil
  2613  }
  2614  
  2615  func (c *client) processPub(arg []byte) error {
  2616  	// Unroll splitArgs to avoid runtime/heap issues
  2617  	a := [MAX_PUB_ARGS][]byte{}
  2618  	args := a[:0]
  2619  	start := -1
  2620  	for i, b := range arg {
  2621  		switch b {
  2622  		case ' ', '\t':
  2623  			if start >= 0 {
  2624  				args = append(args, arg[start:i])
  2625  				start = -1
  2626  			}
  2627  		default:
  2628  			if start < 0 {
  2629  				start = i
  2630  			}
  2631  		}
  2632  	}
  2633  	if start >= 0 {
  2634  		args = append(args, arg[start:])
  2635  	}
  2636  
  2637  	c.pa.arg = arg
  2638  	switch len(args) {
  2639  	case 2:
  2640  		c.pa.subject = args[0]
  2641  		c.pa.reply = nil
  2642  		c.pa.size = parseSize(args[1])
  2643  		c.pa.szb = args[1]
  2644  	case 3:
  2645  		c.pa.subject = args[0]
  2646  		c.pa.reply = args[1]
  2647  		c.pa.size = parseSize(args[2])
  2648  		c.pa.szb = args[2]
  2649  	default:
  2650  		return fmt.Errorf("processPub Parse Error: %q", arg)
  2651  	}
  2652  	// If number overruns an int64, parseSize() will have returned a negative value
  2653  	if c.pa.size < 0 {
  2654  		return fmt.Errorf("processPub Bad or Missing Size: %q", arg)
  2655  	}
  2656  	maxPayload := atomic.LoadInt32(&c.mpay)
  2657  	// Use int64() to avoid int32 overrun...
  2658  	if maxPayload != jwt.NoLimit && int64(c.pa.size) > int64(maxPayload) {
  2659  		c.maxPayloadViolation(c.pa.size, maxPayload)
  2660  		return ErrMaxPayload
  2661  	}
  2662  	if c.opts.Pedantic && !IsValidLiteralSubject(bytesToString(c.pa.subject)) {
  2663  		c.sendErr("Invalid Publish Subject")
  2664  	}
  2665  	return nil
  2666  }
  2667  
  2668  func splitArg(arg []byte) [][]byte {
  2669  	a := [MAX_MSG_ARGS][]byte{}
  2670  	args := a[:0]
  2671  	start := -1
  2672  	for i, b := range arg {
  2673  		switch b {
  2674  		case ' ', '\t', '\r', '\n':
  2675  			if start >= 0 {
  2676  				args = append(args, arg[start:i])
  2677  				start = -1
  2678  			}
  2679  		default:
  2680  			if start < 0 {
  2681  				start = i
  2682  			}
  2683  		}
  2684  	}
  2685  	if start >= 0 {
  2686  		args = append(args, arg[start:])
  2687  	}
  2688  	return args
  2689  }
  2690  
  2691  func (c *client) parseSub(argo []byte, noForward bool) error {
  2692  	// Copy so we do not reference a potentially large buffer
  2693  	// FIXME(dlc) - make more efficient.
  2694  	arg := make([]byte, len(argo))
  2695  	copy(arg, argo)
  2696  	args := splitArg(arg)
  2697  	var (
  2698  		subject []byte
  2699  		queue   []byte
  2700  		sid     []byte
  2701  	)
  2702  	switch len(args) {
  2703  	case 2:
  2704  		subject = args[0]
  2705  		queue = nil
  2706  		sid = args[1]
  2707  	case 3:
  2708  		subject = args[0]
  2709  		queue = args[1]
  2710  		sid = args[2]
  2711  	default:
  2712  		return fmt.Errorf("processSub Parse Error: %q", arg)
  2713  	}
  2714  	// If there was an error, it has been sent to the client. We don't return an
  2715  	// error here to not close the connection as a parsing error.
  2716  	c.processSub(subject, queue, sid, nil, noForward)
  2717  	return nil
  2718  }
  2719  
  2720  func (c *client) processSub(subject, queue, bsid []byte, cb msgHandler, noForward bool) (*subscription, error) {
  2721  	return c.processSubEx(subject, queue, bsid, cb, noForward, false, false)
  2722  }
  2723  
  2724  func (c *client) processSubEx(subject, queue, bsid []byte, cb msgHandler, noForward, si, rsi bool) (*subscription, error) {
  2725  	// Create the subscription
  2726  	sub := &subscription{client: c, subject: subject, queue: queue, sid: bsid, icb: cb, si: si, rsi: rsi}
  2727  
  2728  	c.mu.Lock()
  2729  
  2730  	// Indicate activity.
  2731  	c.in.subs++
  2732  
  2733  	// Grab connection type, account and server info.
  2734  	kind := c.kind
  2735  	acc := c.acc
  2736  	srv := c.srv
  2737  
  2738  	sid := bytesToString(sub.sid)
  2739  
  2740  	// This check does not apply to SYSTEM or JETSTREAM or ACCOUNT clients (because they don't have a `nc`...)
  2741  	// When a connection is closed though, we set c.subs to nil. So check for the map to not be nil.
  2742  	if (c.isClosed() && (kind != SYSTEM && kind != JETSTREAM && kind != ACCOUNT)) || (c.subs == nil) {
  2743  		c.mu.Unlock()
  2744  		return nil, ErrConnectionClosed
  2745  	}
  2746  
  2747  	// Check permissions if applicable.
  2748  	if kind == CLIENT {
  2749  		// First do a pass whether queue subscription is valid. This does not necessarily
  2750  		// mean that it will not be able to plain subscribe.
  2751  		//
  2752  		// allow = ["foo"]            -> can subscribe or queue subscribe to foo using any queue
  2753  		// allow = ["foo v1"]         -> can only queue subscribe to 'foo v1', no plain subs allowed.
  2754  		// allow = ["foo", "foo v1"]  -> can subscribe to 'foo' but can only queue subscribe to 'foo v1'
  2755  		//
  2756  		if sub.queue != nil {
  2757  			if !c.canSubscribe(string(sub.subject), string(sub.queue)) || string(sub.queue) == sysGroup {
  2758  				c.mu.Unlock()
  2759  				c.subPermissionViolation(sub)
  2760  				return nil, ErrSubscribePermissionViolation
  2761  			}
  2762  		} else if !c.canSubscribe(string(sub.subject)) {
  2763  			c.mu.Unlock()
  2764  			c.subPermissionViolation(sub)
  2765  			return nil, ErrSubscribePermissionViolation
  2766  		}
  2767  
  2768  		if opts := srv.getOpts(); opts != nil && opts.MaxSubTokens > 0 {
  2769  			if len(bytes.Split(sub.subject, []byte(tsep))) > int(opts.MaxSubTokens) {
  2770  				c.mu.Unlock()
  2771  				c.maxTokensViolation(sub)
  2772  				return nil, ErrTooManySubTokens
  2773  			}
  2774  		}
  2775  	}
  2776  
  2777  	// Check if we have a maximum on the number of subscriptions.
  2778  	if c.subsAtLimit() {
  2779  		c.mu.Unlock()
  2780  		c.maxSubsExceeded()
  2781  		return nil, ErrTooManySubs
  2782  	}
  2783  
  2784  	var updateGWs bool
  2785  	var err error
  2786  
  2787  	// Subscribe here.
  2788  	es := c.subs[sid]
  2789  	if es == nil {
  2790  		c.subs[sid] = sub
  2791  		if acc != nil && acc.sl != nil {
  2792  			err = acc.sl.Insert(sub)
  2793  			if err != nil {
  2794  				delete(c.subs, sid)
  2795  			} else {
  2796  				updateGWs = c.srv.gateway.enabled
  2797  			}
  2798  		}
  2799  	}
  2800  	// Unlocked from here onward
  2801  	c.mu.Unlock()
  2802  
  2803  	if err != nil {
  2804  		c.sendErr("Invalid Subject")
  2805  		return nil, ErrMalformedSubject
  2806  	} else if c.opts.Verbose && kind != SYSTEM {
  2807  		c.sendOK()
  2808  	}
  2809  
  2810  	// If it was already registered, return it.
  2811  	if es != nil {
  2812  		return es, nil
  2813  	}
  2814  
  2815  	// No account just return.
  2816  	if acc == nil {
  2817  		return sub, nil
  2818  	}
  2819  
  2820  	if err := c.addShadowSubscriptions(acc, sub, true); err != nil {
  2821  		c.Errorf(err.Error())
  2822  	}
  2823  
  2824  	if noForward {
  2825  		return sub, nil
  2826  	}
  2827  
  2828  	// If we are routing and this is a local sub, add to the route map for the associated account.
  2829  	if kind == CLIENT || kind == SYSTEM || kind == JETSTREAM || kind == ACCOUNT {
  2830  		srv.updateRouteSubscriptionMap(acc, sub, 1)
  2831  		if updateGWs {
  2832  			srv.gatewayUpdateSubInterest(acc.Name, sub, 1)
  2833  		}
  2834  	}
  2835  	// Now check on leafnode updates.
  2836  	acc.updateLeafNodes(sub, 1)
  2837  	return sub, nil
  2838  }
  2839  
  2840  // Used to pass stream import matches to addShadowSub
  2841  type ime struct {
  2842  	im          *streamImport
  2843  	overlapSubj string
  2844  	dyn         bool
  2845  }
  2846  
  2847  // If the client's account has stream imports and there are matches for this
  2848  // subscription's subject, then add shadow subscriptions in the other accounts
  2849  // that export this subject.
  2850  //
  2851  // enact=false allows MQTT clients to get the list of shadow subscriptions
  2852  // without enacting them, in order to first obtain matching "retained" messages.
  2853  func (c *client) addShadowSubscriptions(acc *Account, sub *subscription, enact bool) error {
  2854  	if acc == nil {
  2855  		return ErrMissingAccount
  2856  	}
  2857  
  2858  	var (
  2859  		_ims           [16]ime
  2860  		ims            = _ims[:0]
  2861  		imTsa          [32]string
  2862  		tokens         []string
  2863  		tsa            [32]string
  2864  		hasWC          bool
  2865  		tokensModified bool
  2866  	)
  2867  
  2868  	acc.mu.RLock()
  2869  	// If this is from a service import, ignore.
  2870  	if sub.si {
  2871  		acc.mu.RUnlock()
  2872  		return nil
  2873  	}
  2874  	subj := bytesToString(sub.subject)
  2875  	if len(acc.imports.streams) > 0 {
  2876  		tokens = tokenizeSubjectIntoSlice(tsa[:0], subj)
  2877  		for _, tk := range tokens {
  2878  			if tk == pwcs {
  2879  				hasWC = true
  2880  				break
  2881  			}
  2882  		}
  2883  		if !hasWC && tokens[len(tokens)-1] == fwcs {
  2884  			hasWC = true
  2885  		}
  2886  	}
  2887  	// Loop over the import subjects. We have 4 scenarios. If we have an
  2888  	// exact match or a superset match we should use the from field from
  2889  	// the import. If we are a subset or overlap, we have to dynamically calculate
  2890  	// the subject. On overlap, ime requires the overlap subject.
  2891  	for _, im := range acc.imports.streams {
  2892  		if im.invalid {
  2893  			continue
  2894  		}
  2895  		if subj == im.to {
  2896  			ims = append(ims, ime{im, _EMPTY_, false})
  2897  			continue
  2898  		}
  2899  		if tokensModified {
  2900  			// re-tokenize subj to overwrite modifications from a previous iteration
  2901  			tokens = tokenizeSubjectIntoSlice(tsa[:0], subj)
  2902  			tokensModified = false
  2903  		}
  2904  		imTokens := tokenizeSubjectIntoSlice(imTsa[:0], im.to)
  2905  
  2906  		if isSubsetMatchTokenized(tokens, imTokens) {
  2907  			ims = append(ims, ime{im, _EMPTY_, true})
  2908  		} else if hasWC {
  2909  			if isSubsetMatchTokenized(imTokens, tokens) {
  2910  				ims = append(ims, ime{im, _EMPTY_, false})
  2911  			} else {
  2912  				imTokensLen := len(imTokens)
  2913  				for i, t := range tokens {
  2914  					if i >= imTokensLen {
  2915  						break
  2916  					}
  2917  					if t == pwcs && imTokens[i] != fwcs {
  2918  						tokens[i] = imTokens[i]
  2919  						tokensModified = true
  2920  					}
  2921  				}
  2922  				tokensLen := len(tokens)
  2923  				lastIdx := tokensLen - 1
  2924  				if tokens[lastIdx] == fwcs {
  2925  					if imTokensLen >= tokensLen {
  2926  						// rewrite ">" in tokens to be more specific
  2927  						tokens[lastIdx] = imTokens[lastIdx]
  2928  						tokensModified = true
  2929  						if imTokensLen > tokensLen {
  2930  							// copy even more specific parts from import
  2931  							tokens = append(tokens, imTokens[tokensLen:]...)
  2932  						}
  2933  					}
  2934  				}
  2935  				if isSubsetMatchTokenized(tokens, imTokens) {
  2936  					// As isSubsetMatchTokenized was already called with tokens and imTokens,
  2937  					// we wouldn't be here if it where not for tokens being modified.
  2938  					// Hence, Join to re compute the subject string
  2939  					ims = append(ims, ime{im, strings.Join(tokens, tsep), true})
  2940  				}
  2941  			}
  2942  		}
  2943  	}
  2944  	acc.mu.RUnlock()
  2945  
  2946  	var shadow []*subscription
  2947  
  2948  	if len(ims) > 0 {
  2949  		shadow = make([]*subscription, 0, len(ims))
  2950  	}
  2951  
  2952  	// Now walk through collected stream imports that matched.
  2953  	for i := 0; i < len(ims); i++ {
  2954  		ime := &ims[i]
  2955  		// We will create a shadow subscription.
  2956  		nsub, err := c.addShadowSub(sub, ime, enact)
  2957  		if err != nil {
  2958  			return err
  2959  		}
  2960  		shadow = append(shadow, nsub)
  2961  	}
  2962  
  2963  	if shadow != nil {
  2964  		c.mu.Lock()
  2965  		sub.shadow = shadow
  2966  		c.mu.Unlock()
  2967  	}
  2968  
  2969  	return nil
  2970  }
  2971  
  2972  // Add in the shadow subscription.
  2973  func (c *client) addShadowSub(sub *subscription, ime *ime, enact bool) (*subscription, error) {
  2974  	im := ime.im
  2975  	nsub := *sub // copy
  2976  	nsub.im = im
  2977  
  2978  	if !im.usePub && ime.dyn && im.tr != nil {
  2979  		if im.rtr == nil {
  2980  			im.rtr = im.tr.reverse()
  2981  		}
  2982  		s := bytesToString(nsub.subject)
  2983  		if ime.overlapSubj != _EMPTY_ {
  2984  			s = ime.overlapSubj
  2985  		}
  2986  		subj := im.rtr.TransformSubject(s)
  2987  
  2988  		nsub.subject = []byte(subj)
  2989  	} else if !im.usePub || (im.usePub && ime.overlapSubj != _EMPTY_) || !ime.dyn {
  2990  		if ime.overlapSubj != _EMPTY_ {
  2991  			nsub.subject = []byte(ime.overlapSubj)
  2992  		} else {
  2993  			nsub.subject = []byte(im.from)
  2994  		}
  2995  	}
  2996  	// Else use original subject
  2997  
  2998  	if !enact {
  2999  		return &nsub, nil
  3000  	}
  3001  
  3002  	c.Debugf("Creating import subscription on %q from account %q", nsub.subject, im.acc.Name)
  3003  
  3004  	if err := im.acc.sl.Insert(&nsub); err != nil {
  3005  		errs := fmt.Sprintf("Could not add shadow import subscription for account %q", im.acc.Name)
  3006  		c.Debugf(errs)
  3007  		return nil, fmt.Errorf(errs)
  3008  	}
  3009  
  3010  	// Update our route map here.
  3011  	c.srv.updateRemoteSubscription(im.acc, &nsub, 1)
  3012  
  3013  	return &nsub, nil
  3014  }
  3015  
  3016  // canSubscribe determines if the client is authorized to subscribe to the
  3017  // given subject. Assumes caller is holding lock.
  3018  func (c *client) canSubscribe(subject string, optQueue ...string) bool {
  3019  	if c.perms == nil {
  3020  		return true
  3021  	}
  3022  
  3023  	allowed := true
  3024  
  3025  	// Optional queue group.
  3026  	var queue string
  3027  	if len(optQueue) > 0 {
  3028  		queue = optQueue[0]
  3029  	}
  3030  
  3031  	// Check allow list. If no allow list that means all are allowed. Deny can overrule.
  3032  	if c.perms.sub.allow != nil {
  3033  		r := c.perms.sub.allow.Match(subject)
  3034  		allowed = len(r.psubs) > 0
  3035  		if queue != _EMPTY_ && len(r.qsubs) > 0 {
  3036  			// If the queue appears in the allow list, then DO allow.
  3037  			allowed = queueMatches(queue, r.qsubs)
  3038  		}
  3039  		// Leafnodes operate slightly differently in that they allow broader scoped subjects.
  3040  		// They will prune based on publish perms before sending to a leafnode client.
  3041  		if !allowed && c.kind == LEAF && subjectHasWildcard(subject) {
  3042  			r := c.perms.sub.allow.ReverseMatch(subject)
  3043  			allowed = len(r.psubs) != 0
  3044  		}
  3045  	}
  3046  	// If we have a deny list and we think we are allowed, check that as well.
  3047  	if allowed && c.perms.sub.deny != nil {
  3048  		r := c.perms.sub.deny.Match(subject)
  3049  		allowed = len(r.psubs) == 0
  3050  
  3051  		if queue != _EMPTY_ && len(r.qsubs) > 0 {
  3052  			// If the queue appears in the deny list, then DO NOT allow.
  3053  			allowed = !queueMatches(queue, r.qsubs)
  3054  		}
  3055  
  3056  		// We use the actual subscription to signal us to spin up the deny mperms
  3057  		// and cache. We check if the subject is a wildcard that contains any of
  3058  		// the deny clauses.
  3059  		// FIXME(dlc) - We could be smarter and track when these go away and remove.
  3060  		if allowed && c.mperms == nil && subjectHasWildcard(subject) {
  3061  			// Whip through the deny array and check if this wildcard subject is within scope.
  3062  			for _, sub := range c.darray {
  3063  				if subjectIsSubsetMatch(sub, subject) {
  3064  					c.loadMsgDenyFilter()
  3065  					break
  3066  				}
  3067  			}
  3068  		}
  3069  	}
  3070  	return allowed
  3071  }
  3072  
  3073  func queueMatches(queue string, qsubs [][]*subscription) bool {
  3074  	if len(qsubs) == 0 {
  3075  		return true
  3076  	}
  3077  	for _, qsub := range qsubs {
  3078  		qs := qsub[0]
  3079  		qname := bytesToString(qs.queue)
  3080  
  3081  		// NOTE: '*' and '>' tokens can also be valid
  3082  		// queue names so we first check against the
  3083  		// literal name.  e.g. v1.* == v1.*
  3084  		if queue == qname || (subjectHasWildcard(qname) && subjectIsSubsetMatch(queue, qname)) {
  3085  			return true
  3086  		}
  3087  	}
  3088  	return false
  3089  }
  3090  
  3091  // Low level unsubscribe for a given client.
  3092  func (c *client) unsubscribe(acc *Account, sub *subscription, force, remove bool) {
  3093  	if s := c.srv; s != nil && s.isShuttingDown() {
  3094  		return
  3095  	}
  3096  
  3097  	c.mu.Lock()
  3098  	if !force && sub.max > 0 && sub.nm < sub.max {
  3099  		c.Debugf("Deferring actual UNSUB(%s): %d max, %d received", sub.subject, sub.max, sub.nm)
  3100  		c.mu.Unlock()
  3101  		return
  3102  	}
  3103  
  3104  	if c.trace {
  3105  		c.traceOp("<-> %s", "DELSUB", sub.sid)
  3106  	}
  3107  
  3108  	// Remove accounting if requested. This will be false when we close a connection
  3109  	// with open subscriptions.
  3110  	if remove {
  3111  		delete(c.subs, bytesToString(sub.sid))
  3112  		if acc != nil {
  3113  			acc.sl.Remove(sub)
  3114  		}
  3115  	}
  3116  
  3117  	// Check to see if we have shadow subscriptions.
  3118  	var updateRoute bool
  3119  	var updateGWs bool
  3120  	shadowSubs := sub.shadow
  3121  	sub.shadow = nil
  3122  	if len(shadowSubs) > 0 {
  3123  		updateRoute = (c.kind == CLIENT || c.kind == SYSTEM || c.kind == LEAF) && c.srv != nil
  3124  		if updateRoute {
  3125  			updateGWs = c.srv.gateway.enabled
  3126  		}
  3127  	}
  3128  	sub.close()
  3129  	c.mu.Unlock()
  3130  
  3131  	// Process shadow subs if we have them.
  3132  	for _, nsub := range shadowSubs {
  3133  		if err := nsub.im.acc.sl.Remove(nsub); err != nil {
  3134  			c.Debugf("Could not remove shadow import subscription for account %q", nsub.im.acc.Name)
  3135  		} else {
  3136  			if updateRoute {
  3137  				c.srv.updateRouteSubscriptionMap(nsub.im.acc, nsub, -1)
  3138  			}
  3139  			if updateGWs {
  3140  				c.srv.gatewayUpdateSubInterest(nsub.im.acc.Name, nsub, -1)
  3141  			}
  3142  		}
  3143  		// Now check on leafnode updates.
  3144  		nsub.im.acc.updateLeafNodes(nsub, -1)
  3145  	}
  3146  
  3147  	// Now check to see if this was part of a respMap entry for service imports.
  3148  	// We can skip subscriptions on reserved replies.
  3149  	if acc != nil && !isReservedReply(sub.subject) {
  3150  		acc.checkForReverseEntry(string(sub.subject), nil, true)
  3151  	}
  3152  }
  3153  
  3154  func (c *client) processUnsub(arg []byte) error {
  3155  	args := splitArg(arg)
  3156  	var sid []byte
  3157  	max := int64(-1)
  3158  
  3159  	switch len(args) {
  3160  	case 1:
  3161  		sid = args[0]
  3162  	case 2:
  3163  		sid = args[0]
  3164  		max = int64(parseSize(args[1]))
  3165  	default:
  3166  		return fmt.Errorf("processUnsub Parse Error: %q", arg)
  3167  	}
  3168  
  3169  	var sub *subscription
  3170  	var ok, unsub bool
  3171  
  3172  	c.mu.Lock()
  3173  
  3174  	// Indicate activity.
  3175  	c.in.subs++
  3176  
  3177  	// Grab connection type.
  3178  	kind := c.kind
  3179  	srv := c.srv
  3180  	var acc *Account
  3181  
  3182  	updateGWs := false
  3183  	if sub, ok = c.subs[string(sid)]; ok {
  3184  		acc = c.acc
  3185  		if max > 0 && max > sub.nm {
  3186  			sub.max = max
  3187  		} else {
  3188  			// Clear it here to override
  3189  			sub.max = 0
  3190  			unsub = true
  3191  		}
  3192  		updateGWs = srv.gateway.enabled
  3193  	}
  3194  	c.mu.Unlock()
  3195  
  3196  	if c.opts.Verbose {
  3197  		c.sendOK()
  3198  	}
  3199  
  3200  	if unsub {
  3201  		c.unsubscribe(acc, sub, false, true)
  3202  		if acc != nil && (kind == CLIENT || kind == SYSTEM || kind == ACCOUNT || kind == JETSTREAM) {
  3203  			srv.updateRouteSubscriptionMap(acc, sub, -1)
  3204  			if updateGWs {
  3205  				srv.gatewayUpdateSubInterest(acc.Name, sub, -1)
  3206  			}
  3207  		}
  3208  		// Now check on leafnode updates.
  3209  		acc.updateLeafNodes(sub, -1)
  3210  	}
  3211  
  3212  	return nil
  3213  }
  3214  
  3215  // checkDenySub will check if we are allowed to deliver this message in the
  3216  // presence of deny clauses for subscriptions. Deny clauses will not prevent
  3217  // larger scoped wildcard subscriptions, so we need to check at delivery time.
  3218  // Lock should be held.
  3219  func (c *client) checkDenySub(subject string) bool {
  3220  	if denied, ok := c.mperms.dcache[subject]; ok {
  3221  		return denied
  3222  	} else if r := c.mperms.deny.Match(subject); len(r.psubs) != 0 {
  3223  		c.mperms.dcache[subject] = true
  3224  		return true
  3225  	} else {
  3226  		c.mperms.dcache[subject] = false
  3227  	}
  3228  	if len(c.mperms.dcache) > maxDenyPermCacheSize {
  3229  		c.pruneDenyCache()
  3230  	}
  3231  	return false
  3232  }
  3233  
  3234  // Create a message header for routes or leafnodes. Header and origin cluster aware.
  3235  func (c *client) msgHeaderForRouteOrLeaf(subj, reply []byte, rt *routeTarget, acc *Account) []byte {
  3236  	hasHeader := c.pa.hdr > 0
  3237  	subclient := rt.sub.client
  3238  	canReceiveHeader := subclient.headers
  3239  
  3240  	mh := c.msgb[:msgHeadProtoLen]
  3241  	kind := subclient.kind
  3242  	var lnoc bool
  3243  
  3244  	if kind == ROUTER {
  3245  		// If we are coming from a leaf with an origin cluster we need to handle differently
  3246  		// if we can. We will send a route based LMSG which has origin cluster and headers
  3247  		// by default.
  3248  		if c.kind == LEAF && c.remoteCluster() != _EMPTY_ {
  3249  			subclient.mu.Lock()
  3250  			lnoc = subclient.route.lnoc
  3251  			subclient.mu.Unlock()
  3252  		}
  3253  		if lnoc {
  3254  			mh[0] = 'L'
  3255  			mh = append(mh, c.remoteCluster()...)
  3256  			mh = append(mh, ' ')
  3257  		} else {
  3258  			// Router (and Gateway) nodes are RMSG. Set here since leafnodes may rewrite.
  3259  			mh[0] = 'R'
  3260  		}
  3261  		if len(subclient.route.accName) == 0 {
  3262  			mh = append(mh, acc.Name...)
  3263  			mh = append(mh, ' ')
  3264  		}
  3265  	} else {
  3266  		// Leaf nodes are LMSG
  3267  		mh[0] = 'L'
  3268  		// Remap subject if its a shadow subscription, treat like a normal client.
  3269  		if rt.sub.im != nil {
  3270  			if rt.sub.im.tr != nil {
  3271  				to := rt.sub.im.tr.TransformSubject(bytesToString(subj))
  3272  				subj = []byte(to)
  3273  			} else if !rt.sub.im.usePub {
  3274  				subj = []byte(rt.sub.im.to)
  3275  			}
  3276  		}
  3277  	}
  3278  	mh = append(mh, subj...)
  3279  	mh = append(mh, ' ')
  3280  
  3281  	if len(rt.qs) > 0 {
  3282  		if len(reply) > 0 {
  3283  			mh = append(mh, "+ "...) // Signal that there is a reply.
  3284  			mh = append(mh, reply...)
  3285  			mh = append(mh, ' ')
  3286  		} else {
  3287  			mh = append(mh, "| "...) // Only queues
  3288  		}
  3289  		mh = append(mh, rt.qs...)
  3290  	} else if len(reply) > 0 {
  3291  		mh = append(mh, reply...)
  3292  		mh = append(mh, ' ')
  3293  	}
  3294  
  3295  	if lnoc {
  3296  		// leafnode origin LMSG always have a header entry even if zero.
  3297  		if c.pa.hdr <= 0 {
  3298  			mh = append(mh, '0')
  3299  		} else {
  3300  			mh = append(mh, c.pa.hdb...)
  3301  		}
  3302  		mh = append(mh, ' ')
  3303  		mh = append(mh, c.pa.szb...)
  3304  	} else if hasHeader {
  3305  		if canReceiveHeader {
  3306  			mh[0] = 'H'
  3307  			mh = append(mh, c.pa.hdb...)
  3308  			mh = append(mh, ' ')
  3309  			mh = append(mh, c.pa.szb...)
  3310  		} else {
  3311  			// If we are here we need to truncate the payload size
  3312  			nsz := strconv.Itoa(c.pa.size - c.pa.hdr)
  3313  			mh = append(mh, nsz...)
  3314  		}
  3315  	} else {
  3316  		mh = append(mh, c.pa.szb...)
  3317  	}
  3318  	return append(mh, _CRLF_...)
  3319  }
  3320  
  3321  // Create a message header for clients. Header aware.
  3322  func (c *client) msgHeader(subj, reply []byte, sub *subscription) []byte {
  3323  	// See if we should do headers. We have to have a headers msg and
  3324  	// the client we are going to deliver to needs to support headers as well.
  3325  	hasHeader := c.pa.hdr > 0
  3326  	canReceiveHeader := sub.client != nil && sub.client.headers
  3327  
  3328  	var mh []byte
  3329  	if hasHeader && canReceiveHeader {
  3330  		mh = c.msgb[:msgHeadProtoLen]
  3331  		mh[0] = 'H'
  3332  	} else {
  3333  		mh = c.msgb[1:msgHeadProtoLen]
  3334  	}
  3335  	mh = append(mh, subj...)
  3336  	mh = append(mh, ' ')
  3337  
  3338  	if len(sub.sid) > 0 {
  3339  		mh = append(mh, sub.sid...)
  3340  		mh = append(mh, ' ')
  3341  	}
  3342  	if reply != nil {
  3343  		mh = append(mh, reply...)
  3344  		mh = append(mh, ' ')
  3345  	}
  3346  	if hasHeader {
  3347  		if canReceiveHeader {
  3348  			mh = append(mh, c.pa.hdb...)
  3349  			mh = append(mh, ' ')
  3350  			mh = append(mh, c.pa.szb...)
  3351  		} else {
  3352  			// If we are here we need to truncate the payload size
  3353  			nsz := strconv.Itoa(c.pa.size - c.pa.hdr)
  3354  			mh = append(mh, nsz...)
  3355  		}
  3356  	} else {
  3357  		mh = append(mh, c.pa.szb...)
  3358  	}
  3359  	mh = append(mh, _CRLF_...)
  3360  	return mh
  3361  }
  3362  
  3363  func (c *client) stalledWait(producer *client) {
  3364  	stall := c.out.stc
  3365  	ttl := stallDuration(c.out.pb, c.out.mp)
  3366  	c.mu.Unlock()
  3367  	defer c.mu.Lock()
  3368  
  3369  	delay := time.NewTimer(ttl)
  3370  	defer delay.Stop()
  3371  
  3372  	select {
  3373  	case <-stall:
  3374  	case <-delay.C:
  3375  		producer.Debugf("Timed out of fast producer stall (%v)", ttl)
  3376  	}
  3377  }
  3378  
  3379  func stallDuration(pb, mp int64) time.Duration {
  3380  	ttl := stallClientMinDuration
  3381  	if pb >= mp {
  3382  		ttl = stallClientMaxDuration
  3383  	} else if hmp := mp / 2; pb > hmp {
  3384  		bsz := hmp / 10
  3385  		additional := int64(ttl) * ((pb - hmp) / bsz)
  3386  		ttl += time.Duration(additional)
  3387  	}
  3388  	return ttl
  3389  }
  3390  
  3391  // Used to treat maps as efficient set
  3392  var needFlush = struct{}{}
  3393  
  3394  // deliverMsg will deliver a message to a matching subscription and its underlying client.
  3395  // We process all connection/client types. mh is the part that will be protocol/client specific.
  3396  func (c *client) deliverMsg(prodIsMQTT bool, sub *subscription, acc *Account, subject, reply, mh, msg []byte, gwrply bool) bool {
  3397  	// Check if message tracing is enabled.
  3398  	mt, traceOnly := c.isMsgTraceEnabled()
  3399  
  3400  	client := sub.client
  3401  	// Check sub client and check echo. Only do this if not a service import.
  3402  	if client == nil || (c == client && !client.echo && !sub.si) {
  3403  		if client != nil && mt != nil {
  3404  			client.mu.Lock()
  3405  			mt.addEgressEvent(client, sub, errMsgTraceNoEcho)
  3406  			client.mu.Unlock()
  3407  		}
  3408  		return false
  3409  	}
  3410  
  3411  	client.mu.Lock()
  3412  
  3413  	// Check if we have a subscribe deny clause. This will trigger us to check the subject
  3414  	// for a match against the denied subjects.
  3415  	if client.mperms != nil && client.checkDenySub(string(subject)) {
  3416  		mt.addEgressEvent(client, sub, errMsgTraceSubDeny)
  3417  		client.mu.Unlock()
  3418  		return false
  3419  	}
  3420  
  3421  	// New race detector forces this now.
  3422  	if sub.isClosed() {
  3423  		mt.addEgressEvent(client, sub, errMsgTraceSubClosed)
  3424  		client.mu.Unlock()
  3425  		return false
  3426  	}
  3427  
  3428  	// Check if we are a leafnode and have perms to check.
  3429  	if client.kind == LEAF && client.perms != nil {
  3430  		if !client.pubAllowedFullCheck(string(subject), true, true) {
  3431  			mt.addEgressEvent(client, sub, errMsgTracePubViolation)
  3432  			client.mu.Unlock()
  3433  			client.Debugf("Not permitted to deliver to %q", subject)
  3434  			return false
  3435  		}
  3436  	}
  3437  
  3438  	var mtErr string
  3439  	if mt != nil {
  3440  		// For non internal subscription, and if the remote does not support
  3441  		// the tracing feature...
  3442  		if sub.icb == nil && !client.msgTraceSupport() {
  3443  			if traceOnly {
  3444  				// We are not sending the message at all because the user
  3445  				// expects a trace-only and the remote does not support
  3446  				// tracing, which means that it would process/deliver this
  3447  				// message, which may break applications.
  3448  				// Add the Egress with the no-support error message.
  3449  				mt.addEgressEvent(client, sub, errMsgTraceOnlyNoSupport)
  3450  				client.mu.Unlock()
  3451  				return false
  3452  			}
  3453  			// If we are doing delivery, we will still forward the message,
  3454  			// but we add an error to the Egress event to hint that one should
  3455  			// not expect a tracing event from that remote.
  3456  			mtErr = errMsgTraceNoSupport
  3457  		}
  3458  		// For ROUTER, GATEWAY and LEAF, even if we intend to do tracing only,
  3459  		// we will still deliver the message. The remote side will
  3460  		// generate an event based on what happened on that server.
  3461  		if traceOnly && (client.kind == ROUTER || client.kind == GATEWAY || client.kind == LEAF) {
  3462  			traceOnly = false
  3463  		}
  3464  		// If we skip delivery and this is not for a service import, we are done.
  3465  		if traceOnly && (sub.icb == nil || c.noIcb) {
  3466  			mt.addEgressEvent(client, sub, _EMPTY_)
  3467  			client.mu.Unlock()
  3468  			// Although the message is not actually delivered, for the
  3469  			// purpose of "didDeliver", we need to return "true" here.
  3470  			return true
  3471  		}
  3472  	}
  3473  
  3474  	srv := client.srv
  3475  
  3476  	// We don't want to bump the number of delivered messages to the subscription
  3477  	// if we are doing trace-only (since really we are not sending it to the sub).
  3478  	if !traceOnly {
  3479  		sub.nm++
  3480  	}
  3481  
  3482  	// Check if we should auto-unsubscribe.
  3483  	if sub.max > 0 {
  3484  		if client.kind == ROUTER && sub.nm >= sub.max {
  3485  			// The only router based messages that we will see here are remoteReplies.
  3486  			// We handle these slightly differently.
  3487  			defer client.removeReplySub(sub)
  3488  		} else {
  3489  			// For routing..
  3490  			shouldForward := client.kind == CLIENT || client.kind == SYSTEM && client.srv != nil
  3491  			// If we are at the exact number, unsubscribe but
  3492  			// still process the message in hand, otherwise
  3493  			// unsubscribe and drop message on the floor.
  3494  			if sub.nm == sub.max {
  3495  				client.Debugf("Auto-unsubscribe limit of %d reached for sid '%s'", sub.max, sub.sid)
  3496  				// Due to defer, reverse the code order so that execution
  3497  				// is consistent with other cases where we unsubscribe.
  3498  				if shouldForward {
  3499  					defer srv.updateRemoteSubscription(client.acc, sub, -1)
  3500  				}
  3501  				defer client.unsubscribe(client.acc, sub, true, true)
  3502  			} else if sub.nm > sub.max {
  3503  				client.Debugf("Auto-unsubscribe limit [%d] exceeded", sub.max)
  3504  				mt.addEgressEvent(client, sub, errMsgTraceAutoSubExceeded)
  3505  				client.mu.Unlock()
  3506  				client.unsubscribe(client.acc, sub, true, true)
  3507  				if shouldForward {
  3508  					srv.updateRemoteSubscription(client.acc, sub, -1)
  3509  				}
  3510  				return false
  3511  			}
  3512  		}
  3513  	}
  3514  
  3515  	// Check here if we have a header with our message. If this client can not
  3516  	// support we need to strip the headers from the payload.
  3517  	// The actual header would have been processed correctly for us, so just
  3518  	// need to update payload.
  3519  	if c.pa.hdr > 0 && !sub.client.headers {
  3520  		msg = msg[c.pa.hdr:]
  3521  	}
  3522  
  3523  	// Update statistics
  3524  
  3525  	// The msg includes the CR_LF, so pull back out for accounting.
  3526  	msgSize := int64(len(msg))
  3527  	// MQTT producers send messages without CR_LF, so don't remove it for them.
  3528  	if !prodIsMQTT {
  3529  		msgSize -= int64(LEN_CR_LF)
  3530  	}
  3531  
  3532  	// We do not update the outbound stats if we are doing trace only since
  3533  	// this message will not be sent out.
  3534  	if !traceOnly {
  3535  		// No atomic needed since accessed under client lock.
  3536  		// Monitor is reading those also under client's lock.
  3537  		client.outMsgs++
  3538  		client.outBytes += msgSize
  3539  	}
  3540  
  3541  	// Check for internal subscriptions.
  3542  	if sub.icb != nil && !c.noIcb {
  3543  		if gwrply {
  3544  			// We will store in the account, not the client since it will likely
  3545  			// be a different client that will send the reply.
  3546  			srv.trackGWReply(nil, client.acc, reply, c.pa.reply)
  3547  		}
  3548  		client.mu.Unlock()
  3549  
  3550  		// Internal account clients are for service imports and need the '\r\n'.
  3551  		start := time.Now()
  3552  		if client.kind == ACCOUNT {
  3553  			sub.icb(sub, c, acc, string(subject), string(reply), msg)
  3554  		} else {
  3555  			sub.icb(sub, c, acc, string(subject), string(reply), msg[:msgSize])
  3556  		}
  3557  		if dur := time.Since(start); dur >= readLoopReportThreshold {
  3558  			srv.Warnf("Internal subscription on %q took too long: %v", subject, dur)
  3559  		}
  3560  		return true
  3561  	}
  3562  
  3563  	// If we are a client and we detect that the consumer we are
  3564  	// sending to is in a stalled state, go ahead and wait here
  3565  	// with a limit.
  3566  	if c.kind == CLIENT && client.out.stc != nil {
  3567  		client.stalledWait(c)
  3568  	}
  3569  
  3570  	// Check for closed connection
  3571  	if client.isClosed() {
  3572  		mt.addEgressEvent(client, sub, errMsgTraceClientClosed)
  3573  		client.mu.Unlock()
  3574  		return false
  3575  	}
  3576  
  3577  	// We have passed cases where we could possibly fail to deliver.
  3578  	// Do not call for service-import.
  3579  	if mt != nil && sub.icb == nil {
  3580  		mt.addEgressEvent(client, sub, mtErr)
  3581  	}
  3582  
  3583  	// Do a fast check here to see if we should be tracking this from a latency
  3584  	// perspective. This will be for a request being received for an exported service.
  3585  	// This needs to be from a non-client (otherwise tracking happens at requestor).
  3586  	//
  3587  	// Also this check captures if the original reply (c.pa.reply) is a GW routed
  3588  	// reply (since it is known to be > minReplyLen). If that is the case, we need to
  3589  	// track the binding between the routed reply and the reply set in the message
  3590  	// header (which is c.pa.reply without the GNR routing prefix).
  3591  	if client.kind == CLIENT && len(c.pa.reply) > minReplyLen {
  3592  		if gwrply {
  3593  			// Note that we keep track of the GW routed reply in the destination
  3594  			// connection (`client`). The routed reply subject is in `c.pa.reply`,
  3595  			// should that change, we would have to pass the GW routed reply as
  3596  			// a parameter of deliverMsg().
  3597  			srv.trackGWReply(client, nil, reply, c.pa.reply)
  3598  		}
  3599  
  3600  		// If we do not have a registered RTT queue that up now.
  3601  		if client.rtt == 0 {
  3602  			client.sendRTTPingLocked()
  3603  		}
  3604  		// FIXME(dlc) - We may need to optimize this.
  3605  		// We will have tagged this with a suffix ('.T') if we are tracking. This is
  3606  		// needed from sampling. Not all will be tracked.
  3607  		if c.kind != CLIENT && isTrackedReply(c.pa.reply) {
  3608  			client.trackRemoteReply(string(subject), string(c.pa.reply))
  3609  		}
  3610  	}
  3611  
  3612  	// Queue to outbound buffer
  3613  	client.queueOutbound(mh)
  3614  	client.queueOutbound(msg)
  3615  	if prodIsMQTT {
  3616  		// Need to add CR_LF since MQTT producers don't send CR_LF
  3617  		client.queueOutbound([]byte(CR_LF))
  3618  	}
  3619  
  3620  	// If we are tracking dynamic publish permissions that track reply subjects,
  3621  	// do that accounting here. We only look at client.replies which will be non-nil.
  3622  	if client.replies != nil && len(reply) > 0 {
  3623  		client.replies[string(reply)] = &resp{time.Now(), 0}
  3624  		if len(client.replies) > replyPermLimit {
  3625  			client.pruneReplyPerms()
  3626  		}
  3627  	}
  3628  
  3629  	// Check outbound threshold and queue IO flush if needed.
  3630  	// This is specifically looking at situations where we are getting behind and may want
  3631  	// to intervene before this producer goes back to top of readloop. We are in the producer's
  3632  	// readloop go routine at this point.
  3633  	// FIXME(dlc) - We may call this alot, maybe suppress after first call?
  3634  	if len(client.out.nb) != 0 {
  3635  		client.flushSignal()
  3636  	}
  3637  
  3638  	// Add the data size we are responsible for here. This will be processed when we
  3639  	// return to the top of the readLoop.
  3640  	c.addToPCD(client)
  3641  
  3642  	if client.trace {
  3643  		client.traceOutOp(bytesToString(mh[:len(mh)-LEN_CR_LF]), nil)
  3644  	}
  3645  
  3646  	client.mu.Unlock()
  3647  
  3648  	return true
  3649  }
  3650  
  3651  // Add the given sub's client to the list of clients that need flushing.
  3652  // This must be invoked from `c`'s readLoop. No lock for c is required,
  3653  // however, `client` lock must be held on entry. This holds true even
  3654  // if `client` is same than `c`.
  3655  func (c *client) addToPCD(client *client) {
  3656  	if _, ok := c.pcd[client]; !ok {
  3657  		client.out.fsp++
  3658  		c.pcd[client] = needFlush
  3659  	}
  3660  }
  3661  
  3662  // This will track a remote reply for an exported service that has requested
  3663  // latency tracking.
  3664  // Lock assumed to be held.
  3665  func (c *client) trackRemoteReply(subject, reply string) {
  3666  	a := c.acc
  3667  	if a == nil {
  3668  		return
  3669  	}
  3670  
  3671  	var lrt time.Duration
  3672  	var respThresh time.Duration
  3673  
  3674  	a.mu.RLock()
  3675  	se := a.getServiceExport(subject)
  3676  	if se != nil {
  3677  		lrt = a.lowestServiceExportResponseTime()
  3678  		respThresh = se.respThresh
  3679  	}
  3680  	a.mu.RUnlock()
  3681  
  3682  	if se == nil {
  3683  		return
  3684  	}
  3685  
  3686  	if c.rrTracking == nil {
  3687  		c.rrTracking = &rrTracking{
  3688  			rmap: make(map[string]*remoteLatency),
  3689  			ptmr: time.AfterFunc(lrt, c.pruneRemoteTracking),
  3690  			lrt:  lrt,
  3691  		}
  3692  	}
  3693  	rl := remoteLatency{
  3694  		Account:    a.Name,
  3695  		ReqId:      reply,
  3696  		respThresh: respThresh,
  3697  	}
  3698  	rl.M2.RequestStart = time.Now().UTC()
  3699  	c.rrTracking.rmap[reply] = &rl
  3700  }
  3701  
  3702  // pruneRemoteTracking will prune any remote tracking objects
  3703  // that are too old. These are orphaned when a service is not
  3704  // sending reponses etc.
  3705  // Lock should be held upon entry.
  3706  func (c *client) pruneRemoteTracking() {
  3707  	c.mu.Lock()
  3708  	if c.rrTracking == nil {
  3709  		c.mu.Unlock()
  3710  		return
  3711  	}
  3712  	now := time.Now()
  3713  	for subject, rl := range c.rrTracking.rmap {
  3714  		if now.After(rl.M2.RequestStart.Add(rl.respThresh)) {
  3715  			delete(c.rrTracking.rmap, subject)
  3716  		}
  3717  	}
  3718  	if len(c.rrTracking.rmap) > 0 {
  3719  		t := c.rrTracking.ptmr
  3720  		t.Stop()
  3721  		t.Reset(c.rrTracking.lrt)
  3722  	} else {
  3723  		c.rrTracking.ptmr.Stop()
  3724  		c.rrTracking = nil
  3725  	}
  3726  	c.mu.Unlock()
  3727  }
  3728  
  3729  // pruneReplyPerms will remove any stale or expired entries
  3730  // in our reply cache. We make sure to not check too often.
  3731  func (c *client) pruneReplyPerms() {
  3732  	// Make sure we do not check too often.
  3733  	if c.perms.resp == nil {
  3734  		return
  3735  	}
  3736  
  3737  	mm := c.perms.resp.MaxMsgs
  3738  	ttl := c.perms.resp.Expires
  3739  	now := time.Now()
  3740  
  3741  	for k, resp := range c.replies {
  3742  		if mm > 0 && resp.n >= mm {
  3743  			delete(c.replies, k)
  3744  		} else if ttl > 0 && now.Sub(resp.t) > ttl {
  3745  			delete(c.replies, k)
  3746  		}
  3747  	}
  3748  }
  3749  
  3750  // pruneDenyCache will prune the deny cache via randomly
  3751  // deleting items. Doing so pruneSize items at a time.
  3752  // Lock must be held for this one since it is shared under
  3753  // deliverMsg.
  3754  func (c *client) pruneDenyCache() {
  3755  	r := 0
  3756  	for subject := range c.mperms.dcache {
  3757  		delete(c.mperms.dcache, subject)
  3758  		if r++; r > pruneSize {
  3759  			break
  3760  		}
  3761  	}
  3762  }
  3763  
  3764  // prunePubPermsCache will prune the cache via randomly
  3765  // deleting items. Doing so pruneSize items at a time.
  3766  func (c *client) prunePubPermsCache() {
  3767  	// There is a case where we can invoke this from multiple go routines,
  3768  	// (in deliverMsg() if sub.client is a LEAF), so we make sure to prune
  3769  	// from only one go routine at a time.
  3770  	if !atomic.CompareAndSwapInt32(&c.perms.prun, 0, 1) {
  3771  		return
  3772  	}
  3773  	const maxPruneAtOnce = 1000
  3774  	r := 0
  3775  	c.perms.pcache.Range(func(k, _ any) bool {
  3776  		c.perms.pcache.Delete(k)
  3777  		if r++; (r > pruneSize && atomic.LoadInt32(&c.perms.pcsz) < int32(maxPermCacheSize)) ||
  3778  			(r > maxPruneAtOnce) {
  3779  			return false
  3780  		}
  3781  		return true
  3782  	})
  3783  	atomic.AddInt32(&c.perms.pcsz, -int32(r))
  3784  	atomic.StoreInt32(&c.perms.prun, 0)
  3785  }
  3786  
  3787  // pubAllowed checks on publish permissioning.
  3788  // Lock should not be held.
  3789  func (c *client) pubAllowed(subject string) bool {
  3790  	return c.pubAllowedFullCheck(subject, true, false)
  3791  }
  3792  
  3793  // pubAllowedFullCheck checks on all publish permissioning depending
  3794  // on the flag for dynamic reply permissions.
  3795  func (c *client) pubAllowedFullCheck(subject string, fullCheck, hasLock bool) bool {
  3796  	if c.perms == nil || (c.perms.pub.allow == nil && c.perms.pub.deny == nil) {
  3797  		return true
  3798  	}
  3799  	// Check if published subject is allowed if we have permissions in place.
  3800  	v, ok := c.perms.pcache.Load(subject)
  3801  	if ok {
  3802  		return v.(bool)
  3803  	}
  3804  	allowed := true
  3805  	// Cache miss, check allow then deny as needed.
  3806  	if c.perms.pub.allow != nil {
  3807  		r := c.perms.pub.allow.Match(subject)
  3808  		allowed = len(r.psubs) != 0
  3809  	}
  3810  	// If we have a deny list and are currently allowed, check that as well.
  3811  	if allowed && c.perms.pub.deny != nil {
  3812  		r := c.perms.pub.deny.Match(subject)
  3813  		allowed = len(r.psubs) == 0
  3814  	}
  3815  
  3816  	// If we are currently not allowed but we are tracking reply subjects
  3817  	// dynamically, check to see if we are allowed here but avoid pcache.
  3818  	// We need to acquire the lock though.
  3819  	if !allowed && fullCheck && c.perms.resp != nil {
  3820  		if !hasLock {
  3821  			c.mu.Lock()
  3822  		}
  3823  		if resp := c.replies[subject]; resp != nil {
  3824  			resp.n++
  3825  			// Check if we have sent too many responses.
  3826  			if c.perms.resp.MaxMsgs > 0 && resp.n > c.perms.resp.MaxMsgs {
  3827  				delete(c.replies, subject)
  3828  			} else if c.perms.resp.Expires > 0 && time.Since(resp.t) > c.perms.resp.Expires {
  3829  				delete(c.replies, subject)
  3830  			} else {
  3831  				allowed = true
  3832  			}
  3833  		}
  3834  		if !hasLock {
  3835  			c.mu.Unlock()
  3836  		}
  3837  	} else {
  3838  		// Update our cache here.
  3839  		c.perms.pcache.Store(subject, allowed)
  3840  		if n := atomic.AddInt32(&c.perms.pcsz, 1); n > maxPermCacheSize {
  3841  			c.prunePubPermsCache()
  3842  		}
  3843  	}
  3844  	return allowed
  3845  }
  3846  
  3847  // Test whether a reply subject is a service import reply.
  3848  func isServiceReply(reply []byte) bool {
  3849  	// This function is inlined and checking this way is actually faster
  3850  	// than byte-by-byte comparison.
  3851  	return len(reply) > 3 && bytesToString(reply[:4]) == replyPrefix
  3852  }
  3853  
  3854  // Test whether a reply subject is a service import or a gateway routed reply.
  3855  func isReservedReply(reply []byte) bool {
  3856  	if isServiceReply(reply) {
  3857  		return true
  3858  	}
  3859  	rLen := len(reply)
  3860  	// Faster to check with string([:]) than byte-by-byte
  3861  	if rLen > jsAckPreLen && bytesToString(reply[:jsAckPreLen]) == jsAckPre {
  3862  		return true
  3863  	} else if rLen > gwReplyPrefixLen && bytesToString(reply[:gwReplyPrefixLen]) == gwReplyPrefix {
  3864  		return true
  3865  	}
  3866  	return false
  3867  }
  3868  
  3869  // This will decide to call the client code or router code.
  3870  func (c *client) processInboundMsg(msg []byte) {
  3871  	switch c.kind {
  3872  	case CLIENT:
  3873  		c.processInboundClientMsg(msg)
  3874  	case ROUTER:
  3875  		c.processInboundRoutedMsg(msg)
  3876  	case GATEWAY:
  3877  		c.processInboundGatewayMsg(msg)
  3878  	case LEAF:
  3879  		c.processInboundLeafMsg(msg)
  3880  	}
  3881  }
  3882  
  3883  // selectMappedSubject will choose the mapped subject based on the client's inbound subject.
  3884  func (c *client) selectMappedSubject() bool {
  3885  	nsubj, changed := c.acc.selectMappedSubject(bytesToString(c.pa.subject))
  3886  	if changed {
  3887  		c.pa.mapped = c.pa.subject
  3888  		c.pa.subject = []byte(nsubj)
  3889  	}
  3890  	return changed
  3891  }
  3892  
  3893  // processInboundClientMsg is called to process an inbound msg from a client.
  3894  // Return if the message was delivered, and if the message was not delivered
  3895  // due to a permission issue.
  3896  func (c *client) processInboundClientMsg(msg []byte) (bool, bool) {
  3897  	// Update statistics
  3898  	// The msg includes the CR_LF, so pull back out for accounting.
  3899  	c.in.msgs++
  3900  	c.in.bytes += int32(len(msg) - LEN_CR_LF)
  3901  
  3902  	// Check that client (could be here with SYSTEM) is not publishing on reserved "$GNR" prefix.
  3903  	if c.kind == CLIENT && hasGWRoutedReplyPrefix(c.pa.subject) {
  3904  		c.pubPermissionViolation(c.pa.subject)
  3905  		return false, true
  3906  	}
  3907  
  3908  	// Mostly under testing scenarios.
  3909  	c.mu.Lock()
  3910  	if c.srv == nil || c.acc == nil {
  3911  		c.mu.Unlock()
  3912  		return false, false
  3913  	}
  3914  	acc := c.acc
  3915  	genidAddr := &acc.sl.genid
  3916  
  3917  	// Check pub permissions
  3918  	if c.perms != nil && (c.perms.pub.allow != nil || c.perms.pub.deny != nil) && !c.pubAllowedFullCheck(string(c.pa.subject), true, true) {
  3919  		c.mu.Unlock()
  3920  		c.pubPermissionViolation(c.pa.subject)
  3921  		return false, true
  3922  	}
  3923  	c.mu.Unlock()
  3924  
  3925  	// Now check for reserved replies. These are used for service imports.
  3926  	if c.kind == CLIENT && len(c.pa.reply) > 0 && isReservedReply(c.pa.reply) {
  3927  		c.replySubjectViolation(c.pa.reply)
  3928  		return false, true
  3929  	}
  3930  
  3931  	if c.opts.Verbose {
  3932  		c.sendOK()
  3933  	}
  3934  
  3935  	// If MQTT client, check for retain flag now that we have passed permissions check
  3936  	if c.isMqtt() {
  3937  		c.mqttHandlePubRetain()
  3938  	}
  3939  
  3940  	// Doing this inline as opposed to create a function (which otherwise has a measured
  3941  	// performance impact reported in our bench)
  3942  	var isGWRouted bool
  3943  	if c.kind != CLIENT {
  3944  		if atomic.LoadInt32(&acc.gwReplyMapping.check) > 0 {
  3945  			acc.mu.RLock()
  3946  			c.pa.subject, isGWRouted = acc.gwReplyMapping.get(c.pa.subject)
  3947  			acc.mu.RUnlock()
  3948  		}
  3949  	} else if atomic.LoadInt32(&c.gwReplyMapping.check) > 0 {
  3950  		c.mu.Lock()
  3951  		c.pa.subject, isGWRouted = c.gwReplyMapping.get(c.pa.subject)
  3952  		c.mu.Unlock()
  3953  	}
  3954  
  3955  	// If we have an exported service and we are doing remote tracking, check this subject
  3956  	// to see if we need to report the latency.
  3957  	if c.rrTracking != nil {
  3958  		c.mu.Lock()
  3959  		rl := c.rrTracking.rmap[string(c.pa.subject)]
  3960  		if rl != nil {
  3961  			delete(c.rrTracking.rmap, bytesToString(c.pa.subject))
  3962  		}
  3963  		c.mu.Unlock()
  3964  
  3965  		if rl != nil {
  3966  			sl := &rl.M2
  3967  			// Fill this in and send it off to the other side.
  3968  			sl.Status = 200
  3969  			sl.Responder = c.getClientInfo(true)
  3970  			sl.ServiceLatency = time.Since(sl.RequestStart) - sl.Responder.RTT
  3971  			sl.TotalLatency = sl.ServiceLatency + sl.Responder.RTT
  3972  			sanitizeLatencyMetric(sl)
  3973  			lsub := remoteLatencySubjectForResponse(c.pa.subject)
  3974  			c.srv.sendInternalAccountMsg(nil, lsub, rl) // Send to SYS account
  3975  		}
  3976  	}
  3977  
  3978  	// If the subject was converted to the gateway routed subject, then handle it now
  3979  	// and be done with the rest of this function.
  3980  	if isGWRouted {
  3981  		c.handleGWReplyMap(msg)
  3982  		return true, false
  3983  	}
  3984  
  3985  	// Match the subscriptions. We will use our own L1 map if
  3986  	// it's still valid, avoiding contention on the shared sublist.
  3987  	var r *SublistResult
  3988  	var ok bool
  3989  
  3990  	genid := atomic.LoadUint64(genidAddr)
  3991  	if genid == c.in.genid && c.in.results != nil {
  3992  		r, ok = c.in.results[string(c.pa.subject)]
  3993  	} else {
  3994  		// Reset our L1 completely.
  3995  		c.in.results = make(map[string]*SublistResult)
  3996  		c.in.genid = genid
  3997  	}
  3998  
  3999  	// Go back to the sublist data structure.
  4000  	if !ok {
  4001  		// Match may use the subject here to populate a cache, so can not use bytesToString here.
  4002  		r = acc.sl.Match(string(c.pa.subject))
  4003  		if len(r.psubs)+len(r.qsubs) > 0 {
  4004  			c.in.results[string(c.pa.subject)] = r
  4005  			// Prune the results cache. Keeps us from unbounded growth. Random delete.
  4006  			if len(c.in.results) > maxResultCacheSize {
  4007  				n := 0
  4008  				for subject := range c.in.results {
  4009  					delete(c.in.results, subject)
  4010  					if n++; n > pruneSize {
  4011  						break
  4012  					}
  4013  				}
  4014  			}
  4015  		}
  4016  	}
  4017  
  4018  	// Indication if we attempted to deliver the message to anyone.
  4019  	var didDeliver bool
  4020  	var qnames [][]byte
  4021  
  4022  	// Check for no interest, short circuit if so.
  4023  	// This is the fanout scale.
  4024  	if len(r.psubs)+len(r.qsubs) > 0 {
  4025  		flag := pmrNoFlag
  4026  		// If there are matching queue subs and we are in gateway mode,
  4027  		// we need to keep track of the queue names the messages are
  4028  		// delivered to. When sending to the GWs, the RMSG will include
  4029  		// those names so that the remote clusters do not deliver messages
  4030  		// to their queue subs of the same names.
  4031  		if len(r.qsubs) > 0 && c.srv.gateway.enabled &&
  4032  			atomic.LoadInt64(&c.srv.gateway.totalQSubs) > 0 {
  4033  			flag |= pmrCollectQueueNames
  4034  		}
  4035  		didDeliver, qnames = c.processMsgResults(acc, r, msg, c.pa.deliver, c.pa.subject, c.pa.reply, flag)
  4036  	}
  4037  
  4038  	// Now deal with gateways
  4039  	if c.srv.gateway.enabled {
  4040  		reply := c.pa.reply
  4041  		if len(c.pa.deliver) > 0 && c.kind == JETSTREAM && len(c.pa.reply) > 0 {
  4042  			reply = append(reply, '@')
  4043  			reply = append(reply, c.pa.deliver...)
  4044  		}
  4045  		didDeliver = c.sendMsgToGateways(acc, msg, c.pa.subject, reply, qnames) || didDeliver
  4046  	}
  4047  
  4048  	// Check to see if we did not deliver to anyone and the client has a reply subject set
  4049  	// and wants notification of no_responders.
  4050  	if !didDeliver && len(c.pa.reply) > 0 {
  4051  		c.mu.Lock()
  4052  		if c.opts.NoResponders {
  4053  			if sub := c.subForReply(c.pa.reply); sub != nil {
  4054  				proto := fmt.Sprintf("HMSG %s %s 16 16\r\nNATS/1.0 503\r\n\r\n\r\n", c.pa.reply, sub.sid)
  4055  				c.queueOutbound([]byte(proto))
  4056  				c.addToPCD(c)
  4057  			}
  4058  		}
  4059  		c.mu.Unlock()
  4060  	}
  4061  
  4062  	return didDeliver, false
  4063  }
  4064  
  4065  // Return the subscription for this reply subject. Only look at normal subs for this client.
  4066  func (c *client) subForReply(reply []byte) *subscription {
  4067  	r := c.acc.sl.Match(string(reply))
  4068  	for _, sub := range r.psubs {
  4069  		if sub.client == c {
  4070  			return sub
  4071  		}
  4072  	}
  4073  	return nil
  4074  }
  4075  
  4076  // This is invoked knowing that c.pa.subject has been set to the gateway routed subject.
  4077  // This function will send the message to possibly LEAFs and directly back to the origin
  4078  // gateway.
  4079  func (c *client) handleGWReplyMap(msg []byte) bool {
  4080  	// Check for leaf nodes
  4081  	if c.srv.gwLeafSubs.Count() > 0 {
  4082  		if r := c.srv.gwLeafSubs.Match(string(c.pa.subject)); len(r.psubs) > 0 {
  4083  			c.processMsgResults(c.acc, r, msg, c.pa.deliver, c.pa.subject, c.pa.reply, pmrNoFlag)
  4084  		}
  4085  	}
  4086  	if c.srv.gateway.enabled {
  4087  		reply := c.pa.reply
  4088  		if len(c.pa.deliver) > 0 && c.kind == JETSTREAM && len(c.pa.reply) > 0 {
  4089  			reply = append(reply, '@')
  4090  			reply = append(reply, c.pa.deliver...)
  4091  		}
  4092  		c.sendMsgToGateways(c.acc, msg, c.pa.subject, reply, nil)
  4093  	}
  4094  	return true
  4095  }
  4096  
  4097  // Used to setup the response map for a service import request that has a reply subject.
  4098  func (c *client) setupResponseServiceImport(acc *Account, si *serviceImport, tracking bool, header http.Header) *serviceImport {
  4099  	rsi := si.acc.addRespServiceImport(acc, string(c.pa.reply), si, tracking, header)
  4100  	if si.latency != nil {
  4101  		if c.rtt == 0 {
  4102  			// We have a service import that we are tracking but have not established RTT.
  4103  			c.sendRTTPing()
  4104  		}
  4105  		si.acc.mu.Lock()
  4106  		rsi.rc = c
  4107  		si.acc.mu.Unlock()
  4108  	}
  4109  	return rsi
  4110  }
  4111  
  4112  // Will remove a header if present.
  4113  func removeHeaderIfPresent(hdr []byte, key string) []byte {
  4114  	start := bytes.Index(hdr, []byte(key))
  4115  	// key can't be first and we want to check that it is preceded by a '\n'
  4116  	if start < 1 || hdr[start-1] != '\n' {
  4117  		return hdr
  4118  	}
  4119  	index := start + len(key)
  4120  	if index >= len(hdr) || hdr[index] != ':' {
  4121  		return hdr
  4122  	}
  4123  	end := bytes.Index(hdr[start:], []byte(_CRLF_))
  4124  	if end < 0 {
  4125  		return hdr
  4126  	}
  4127  	hdr = append(hdr[:start], hdr[start+end+len(_CRLF_):]...)
  4128  	if len(hdr) <= len(emptyHdrLine) {
  4129  		return nil
  4130  	}
  4131  	return hdr
  4132  }
  4133  
  4134  func removeHeaderIfPrefixPresent(hdr []byte, prefix string) []byte {
  4135  	var index int
  4136  	for {
  4137  		if index >= len(hdr) {
  4138  			return hdr
  4139  		}
  4140  
  4141  		start := bytes.Index(hdr[index:], []byte(prefix))
  4142  		if start < 0 {
  4143  			return hdr
  4144  		}
  4145  		index += start
  4146  		if index < 1 || hdr[index-1] != '\n' {
  4147  			return hdr
  4148  		}
  4149  
  4150  		end := bytes.Index(hdr[index+len(prefix):], []byte(_CRLF_))
  4151  		if end < 0 {
  4152  			return hdr
  4153  		}
  4154  
  4155  		hdr = append(hdr[:index], hdr[index+end+len(prefix)+len(_CRLF_):]...)
  4156  		if len(hdr) <= len(emptyHdrLine) {
  4157  			return nil
  4158  		}
  4159  	}
  4160  }
  4161  
  4162  // Generate a new header based on optional original header and key value.
  4163  // More used in JetStream layers.
  4164  func genHeader(hdr []byte, key, value string) []byte {
  4165  	var bb bytes.Buffer
  4166  	if len(hdr) > LEN_CR_LF {
  4167  		bb.Write(hdr[:len(hdr)-LEN_CR_LF])
  4168  	} else {
  4169  		bb.WriteString(hdrLine)
  4170  	}
  4171  	http.Header{key: []string{value}}.Write(&bb)
  4172  	bb.WriteString(CR_LF)
  4173  	return bb.Bytes()
  4174  }
  4175  
  4176  // This will set a header for the message.
  4177  // Lock does not need to be held but this should only be called
  4178  // from the inbound go routine. We will update the pubArgs.
  4179  // This will replace any previously set header and not add to it per normal spec.
  4180  func (c *client) setHeader(key, value string, msg []byte) []byte {
  4181  	var bb bytes.Buffer
  4182  	var omi int
  4183  	// Write original header if present.
  4184  	if c.pa.hdr > LEN_CR_LF {
  4185  		omi = c.pa.hdr
  4186  		hdr := removeHeaderIfPresent(msg[:c.pa.hdr-LEN_CR_LF], key)
  4187  		if len(hdr) == 0 {
  4188  			bb.WriteString(hdrLine)
  4189  		} else {
  4190  			bb.Write(hdr)
  4191  		}
  4192  	} else {
  4193  		bb.WriteString(hdrLine)
  4194  	}
  4195  	http.Header{key: []string{value}}.Write(&bb)
  4196  	bb.WriteString(CR_LF)
  4197  	nhdr := bb.Len()
  4198  	// Put the original message back.
  4199  	// FIXME(dlc) - This is inefficient.
  4200  	bb.Write(msg[omi:])
  4201  	nsize := bb.Len() - LEN_CR_LF
  4202  	// MQTT producers don't have CRLF, so add it back.
  4203  	if c.isMqtt() {
  4204  		nsize += LEN_CR_LF
  4205  	}
  4206  	// Update pubArgs
  4207  	// If others will use this later we need to save and restore original.
  4208  	c.pa.hdr = nhdr
  4209  	c.pa.size = nsize
  4210  	c.pa.hdb = []byte(strconv.Itoa(nhdr))
  4211  	c.pa.szb = []byte(strconv.Itoa(nsize))
  4212  	return bb.Bytes()
  4213  }
  4214  
  4215  // Will return the value for the header denoted by key or nil if it does not exists.
  4216  // This function ignores errors and tries to achieve speed and no additional allocations.
  4217  func getHeader(key string, hdr []byte) []byte {
  4218  	if len(hdr) == 0 {
  4219  		return nil
  4220  	}
  4221  	index := bytes.Index(hdr, []byte(key))
  4222  	hdrLen := len(hdr)
  4223  	// Check that we have enough characters, this will handle the -1 case of the key not
  4224  	// being found and will also handle not having enough characters for trailing CRLF.
  4225  	if index < 2 {
  4226  		return nil
  4227  	}
  4228  	// There should be a terminating CRLF.
  4229  	if index >= hdrLen-1 || hdr[index-1] != '\n' || hdr[index-2] != '\r' {
  4230  		return nil
  4231  	}
  4232  	// The key should be immediately followed by a : separator.
  4233  	index += len(key) + 1
  4234  	if index >= hdrLen || hdr[index-1] != ':' {
  4235  		return nil
  4236  	}
  4237  	// Skip over whitespace before the value.
  4238  	for index < hdrLen && hdr[index] == ' ' {
  4239  		index++
  4240  	}
  4241  	// Collect together the rest of the value until we hit a CRLF.
  4242  	var value []byte
  4243  	for index < hdrLen {
  4244  		if hdr[index] == '\r' && index < hdrLen-1 && hdr[index+1] == '\n' {
  4245  			break
  4246  		}
  4247  		value = append(value, hdr[index])
  4248  		index++
  4249  	}
  4250  	return value
  4251  }
  4252  
  4253  // For bytes.HasPrefix below.
  4254  var (
  4255  	jsRequestNextPreB = []byte(jsRequestNextPre)
  4256  	jsDirectGetPreB   = []byte(jsDirectGetPre)
  4257  )
  4258  
  4259  // processServiceImport is an internal callback when a subscription matches an imported service
  4260  // from another account. This includes response mappings as well.
  4261  func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byte) {
  4262  	// If we are a GW and this is not a direct serviceImport ignore.
  4263  	isResponse := si.isRespServiceImport()
  4264  	if (c.kind == GATEWAY || c.kind == ROUTER) && !isResponse {
  4265  		return
  4266  	}
  4267  	// Detect cycles and ignore (return) when we detect one.
  4268  	if len(c.pa.psi) > 0 {
  4269  		for i := len(c.pa.psi) - 1; i >= 0; i-- {
  4270  			if psi := c.pa.psi[i]; psi.se == si.se {
  4271  				return
  4272  			}
  4273  		}
  4274  	}
  4275  
  4276  	acc.mu.RLock()
  4277  	var checkJS bool
  4278  	shouldReturn := si.invalid || acc.sl == nil
  4279  	if !shouldReturn && !isResponse && si.to == jsAllAPI {
  4280  		if bytes.HasPrefix(c.pa.subject, jsDirectGetPreB) || bytes.HasPrefix(c.pa.subject, jsRequestNextPreB) {
  4281  			checkJS = true
  4282  		}
  4283  	}
  4284  	siAcc := si.acc
  4285  	allowTrace := si.atrc
  4286  	acc.mu.RUnlock()
  4287  
  4288  	// We have a special case where JetStream pulls in all service imports through one export.
  4289  	// However the GetNext for consumers and DirectGet for streams are a no-op and causes buildups of service imports,
  4290  	// response service imports and rrMap entries which all will need to simply expire.
  4291  	// TODO(dlc) - Come up with something better.
  4292  	if shouldReturn || (checkJS && si.se != nil && si.se.acc == c.srv.SystemAccount()) {
  4293  		return
  4294  	}
  4295  
  4296  	mt, traceOnly := c.isMsgTraceEnabled()
  4297  
  4298  	var nrr []byte
  4299  	var rsi *serviceImport
  4300  
  4301  	// Check if there is a reply present and set up a response.
  4302  	tracking, headers := shouldSample(si.latency, c)
  4303  	if len(c.pa.reply) > 0 {
  4304  		// Special case for now, need to formalize.
  4305  		// TODO(dlc) - Formalize as a service import option for reply rewrite.
  4306  		// For now we can't do $JS.ACK since that breaks pull consumers across accounts.
  4307  		if !bytes.HasPrefix(c.pa.reply, []byte(jsAckPre)) {
  4308  			if rsi = c.setupResponseServiceImport(acc, si, tracking, headers); rsi != nil {
  4309  				nrr = []byte(rsi.from)
  4310  			}
  4311  		} else {
  4312  			// This only happens when we do a pull subscriber that trampolines through another account.
  4313  			// Normally this code is not called.
  4314  			nrr = c.pa.reply
  4315  		}
  4316  	} else if !isResponse && si.latency != nil && tracking {
  4317  		// Check to see if this was a bad request with no reply and we were supposed to be tracking.
  4318  		siAcc.sendBadRequestTrackingLatency(si, c, headers)
  4319  	}
  4320  
  4321  	// Send tracking info here if we are tracking this response.
  4322  	// This is always a response.
  4323  	var didSendTL bool
  4324  	if si.tracking && !si.didDeliver {
  4325  		// Stamp that we attempted delivery.
  4326  		si.didDeliver = true
  4327  		didSendTL = acc.sendTrackingLatency(si, c)
  4328  	}
  4329  
  4330  	// Pick correct "to" subject. If we matched on a wildcard use the literal publish subject.
  4331  	to, subject := si.to, string(c.pa.subject)
  4332  
  4333  	if si.tr != nil {
  4334  		// FIXME(dlc) - This could be slow, may want to look at adding cache to bare transforms?
  4335  		to = si.tr.TransformSubject(subject)
  4336  	} else if si.usePub {
  4337  		to = subject
  4338  	}
  4339  
  4340  	// Copy our pubArg since this gets modified as we process the service import itself.
  4341  	pacopy := c.pa
  4342  
  4343  	// Now check to see if this account has mappings that could affect the service import.
  4344  	// Can't use non-locked trick like in processInboundClientMsg, so just call into selectMappedSubject
  4345  	// so we only lock once.
  4346  	nsubj, changed := siAcc.selectMappedSubject(to)
  4347  	if changed {
  4348  		c.pa.mapped = []byte(to)
  4349  		to = nsubj
  4350  	}
  4351  
  4352  	// Set previous service import to detect chaining.
  4353  	lpsi := len(c.pa.psi)
  4354  	hadPrevSi, share := lpsi > 0, si.share
  4355  	if hadPrevSi {
  4356  		share = c.pa.psi[lpsi-1].share
  4357  	}
  4358  	c.pa.psi = append(c.pa.psi, si)
  4359  
  4360  	// Place our client info for the request in the original message.
  4361  	// This will survive going across routes, etc.
  4362  	if !isResponse {
  4363  		isSysImport := siAcc == c.srv.SystemAccount()
  4364  		var ci *ClientInfo
  4365  		if hadPrevSi && c.pa.hdr >= 0 {
  4366  			var cis ClientInfo
  4367  			if err := json.Unmarshal(getHeader(ClientInfoHdr, msg[:c.pa.hdr]), &cis); err == nil {
  4368  				ci = &cis
  4369  				ci.Service = acc.Name
  4370  				// Check if we are moving into a share details account from a non-shared
  4371  				// and add in server and cluster details.
  4372  				if !share && (si.share || isSysImport) {
  4373  					c.addServerAndClusterInfo(ci)
  4374  				}
  4375  			}
  4376  		} else if c.kind != LEAF || c.pa.hdr < 0 || len(getHeader(ClientInfoHdr, msg[:c.pa.hdr])) == 0 {
  4377  			ci = c.getClientInfo(share)
  4378  			// If we did not share but the imports destination is the system account add in the server and cluster info.
  4379  			if !share && isSysImport {
  4380  				c.addServerAndClusterInfo(ci)
  4381  			}
  4382  		} else if c.kind == LEAF && (si.share || isSysImport) {
  4383  			// We have a leaf header here for ci, augment as above.
  4384  			ci = c.getClientInfo(si.share)
  4385  			if !si.share && isSysImport {
  4386  				c.addServerAndClusterInfo(ci)
  4387  			}
  4388  		}
  4389  		// Set clientInfo if present.
  4390  		if ci != nil {
  4391  			if b, _ := json.Marshal(ci); b != nil {
  4392  				msg = c.setHeader(ClientInfoHdr, bytesToString(b), msg)
  4393  			}
  4394  		}
  4395  	}
  4396  
  4397  	// Set our optional subject(to) and reply.
  4398  	if !isResponse && to != subject {
  4399  		c.pa.subject = []byte(to)
  4400  	}
  4401  	c.pa.reply = nrr
  4402  
  4403  	if changed && c.isMqtt() && c.pa.hdr > 0 {
  4404  		c.srv.mqttStoreQoSMsgForAccountOnNewSubject(c.pa.hdr, msg, siAcc.GetName(), to)
  4405  	}
  4406  
  4407  	// FIXME(dlc) - Do L1 cache trick like normal client?
  4408  	rr := siAcc.sl.Match(to)
  4409  
  4410  	// If we are a route or gateway or leafnode and this message is flipped to a queue subscriber we
  4411  	// need to handle that since the processMsgResults will want a queue filter.
  4412  	flags := pmrMsgImportedFromService
  4413  	if c.kind == GATEWAY || c.kind == ROUTER || c.kind == LEAF {
  4414  		flags |= pmrIgnoreEmptyQueueFilter
  4415  	}
  4416  
  4417  	// We will be calling back into processMsgResults since we are now being called as a normal sub.
  4418  	// We need to take care of the c.in.rts, so save off what is there and use a local version. We
  4419  	// will put back what was there after.
  4420  
  4421  	orts := c.in.rts
  4422  
  4423  	var lrts [routeTargetInit]routeTarget
  4424  	c.in.rts = lrts[:0]
  4425  
  4426  	var skipProcessing bool
  4427  	// If message tracing enabled, add the service import trace.
  4428  	if mt != nil {
  4429  		mt.addServiceImportEvent(siAcc.GetName(), string(pacopy.subject), to)
  4430  		// If we are not allowing tracing and doing trace only, we stop at this level.
  4431  		if !allowTrace {
  4432  			if traceOnly {
  4433  				skipProcessing = true
  4434  			} else {
  4435  				// We are going to do normal processing, and possibly chainning
  4436  				// with other server imports, but the rest won't be traced.
  4437  				// We do so by setting the c.pa.trace to nil (it will be restored
  4438  				// with c.pa = pacopy).
  4439  				c.pa.trace = nil
  4440  				// We also need to disable the message trace headers so that
  4441  				// if the message is routed, it does not initialize tracing in the
  4442  				// remote.
  4443  				positions := disableTraceHeaders(c, msg)
  4444  				defer enableTraceHeaders(msg, positions)
  4445  			}
  4446  		}
  4447  	}
  4448  
  4449  	var didDeliver bool
  4450  
  4451  	if !skipProcessing {
  4452  		// If this is not a gateway connection but gateway is enabled,
  4453  		// try to send this converted message to all gateways.
  4454  		if c.srv.gateway.enabled {
  4455  			flags |= pmrCollectQueueNames
  4456  			var queues [][]byte
  4457  			didDeliver, queues = c.processMsgResults(siAcc, rr, msg, c.pa.deliver, []byte(to), nrr, flags)
  4458  			didDeliver = c.sendMsgToGateways(siAcc, msg, []byte(to), nrr, queues) || didDeliver
  4459  		} else {
  4460  			didDeliver, _ = c.processMsgResults(siAcc, rr, msg, c.pa.deliver, []byte(to), nrr, flags)
  4461  		}
  4462  	}
  4463  
  4464  	// Restore to original values.
  4465  	c.in.rts = orts
  4466  	c.pa = pacopy
  4467  
  4468  	// If this was a message trace but we skip last-mile delivery, we need to
  4469  	// do the remove, so:
  4470  	if mt != nil && traceOnly && didDeliver {
  4471  		didDeliver = false
  4472  	}
  4473  
  4474  	// Determine if we should remove this service import. This is for response service imports.
  4475  	// We will remove if we did not deliver, or if we are a response service import and we are
  4476  	// a singleton, or we have an EOF message.
  4477  	shouldRemove := !didDeliver || (isResponse && (si.rt == Singleton || len(msg) == LEN_CR_LF))
  4478  	// If we are tracking and we did not actually send the latency info we need to suppress the removal.
  4479  	if si.tracking && !didSendTL {
  4480  		shouldRemove = false
  4481  	}
  4482  	// If we are streamed or chunked we need to update our timestamp to avoid cleanup.
  4483  	if si.rt != Singleton && didDeliver {
  4484  		acc.mu.Lock()
  4485  		si.ts = time.Now().UnixNano()
  4486  		acc.mu.Unlock()
  4487  	}
  4488  
  4489  	// Cleanup of a response service import
  4490  	if shouldRemove {
  4491  		reason := rsiOk
  4492  		if !didDeliver {
  4493  			reason = rsiNoDelivery
  4494  		}
  4495  		if isResponse {
  4496  			acc.removeRespServiceImport(si, reason)
  4497  		} else {
  4498  			// This is a main import and since we could not even deliver to the exporting account
  4499  			// go ahead and remove the respServiceImport we created above.
  4500  			siAcc.removeRespServiceImport(rsi, reason)
  4501  		}
  4502  	}
  4503  }
  4504  
  4505  func (c *client) addSubToRouteTargets(sub *subscription) {
  4506  	if c.in.rts == nil {
  4507  		c.in.rts = make([]routeTarget, 0, routeTargetInit)
  4508  	}
  4509  
  4510  	for i := range c.in.rts {
  4511  		rt := &c.in.rts[i]
  4512  		if rt.sub.client == sub.client {
  4513  			if sub.queue != nil {
  4514  				rt.qs = append(rt.qs, sub.queue...)
  4515  				rt.qs = append(rt.qs, ' ')
  4516  			}
  4517  			return
  4518  		}
  4519  	}
  4520  
  4521  	var rt *routeTarget
  4522  	lrts := len(c.in.rts)
  4523  
  4524  	// If we are here we do not have the sub yet in our list
  4525  	// If we have to grow do so here.
  4526  	if lrts == cap(c.in.rts) {
  4527  		c.in.rts = append(c.in.rts, routeTarget{})
  4528  	}
  4529  
  4530  	c.in.rts = c.in.rts[:lrts+1]
  4531  	rt = &c.in.rts[lrts]
  4532  	rt.sub = sub
  4533  	rt.qs = rt._qs[:0]
  4534  	if sub.queue != nil {
  4535  		rt.qs = append(rt.qs, sub.queue...)
  4536  		rt.qs = append(rt.qs, ' ')
  4537  	}
  4538  }
  4539  
  4540  // This processes the sublist results for a given message.
  4541  // Returns if the message was delivered to at least target and queue filters.
  4542  func (c *client) processMsgResults(acc *Account, r *SublistResult, msg, deliver, subject, reply []byte, flags int) (bool, [][]byte) {
  4543  	// For sending messages across routes and leafnodes.
  4544  	// Reset if we have one since we reuse this data structure.
  4545  	if c.in.rts != nil {
  4546  		c.in.rts = c.in.rts[:0]
  4547  	}
  4548  
  4549  	var rplyHasGWPrefix bool
  4550  	var creply = reply
  4551  
  4552  	// If the reply subject is a GW routed reply, we will perform some
  4553  	// tracking in deliverMsg(). We also want to send to the user the
  4554  	// reply without the prefix. `creply` will be set to that and be
  4555  	// used to create the message header for client connections.
  4556  	if rplyHasGWPrefix = isGWRoutedReply(reply); rplyHasGWPrefix {
  4557  		creply = reply[gwSubjectOffset:]
  4558  	}
  4559  
  4560  	// With JetStream we now have times where we want to match a subscription
  4561  	// on one subject, but deliver it with another. e.g. JetStream deliverables.
  4562  	// This only works for last mile, meaning to a client. For other types we need
  4563  	// to use the original subject.
  4564  	subj := subject
  4565  	if len(deliver) > 0 {
  4566  		subj = deliver
  4567  	}
  4568  
  4569  	// Check for JetStream encoded reply subjects.
  4570  	// For now these will only be on $JS.ACK prefixed reply subjects.
  4571  	var remapped bool
  4572  	if len(creply) > 0 &&
  4573  		c.kind != CLIENT && c.kind != SYSTEM && c.kind != JETSTREAM && c.kind != ACCOUNT &&
  4574  		bytes.HasPrefix(creply, []byte(jsAckPre)) {
  4575  		// We need to rewrite the subject and the reply.
  4576  		if li := bytes.LastIndex(creply, []byte("@")); li != -1 && li < len(creply)-1 {
  4577  			remapped = true
  4578  			subj, creply = creply[li+1:], creply[:li]
  4579  		}
  4580  	}
  4581  
  4582  	var didDeliver bool
  4583  
  4584  	// delivery subject for clients
  4585  	var dsubj []byte
  4586  	// Used as scratch if mapping
  4587  	var _dsubj [128]byte
  4588  
  4589  	// For stats, we will keep track of the number of messages that have been
  4590  	// delivered and then multiply by the size of that message and update
  4591  	// server and account stats in a "single" operation (instead of per-sub).
  4592  	// However, we account for situations where the message is possibly changed
  4593  	// by having an extra size
  4594  	var dlvMsgs int64
  4595  	var dlvExtraSize int64
  4596  
  4597  	// We need to know if this is a MQTT producer because they send messages
  4598  	// without CR_LF (we otherwise remove the size of CR_LF from message size).
  4599  	prodIsMQTT := c.isMqtt()
  4600  
  4601  	updateStats := func() {
  4602  		if dlvMsgs == 0 {
  4603  			return
  4604  		}
  4605  		totalBytes := dlvMsgs*int64(len(msg)) + dlvExtraSize
  4606  		// For non MQTT producers, remove the CR_LF * number of messages
  4607  		if !prodIsMQTT {
  4608  			totalBytes -= dlvMsgs * int64(LEN_CR_LF)
  4609  		}
  4610  		if acc != nil {
  4611  			atomic.AddInt64(&acc.outMsgs, dlvMsgs)
  4612  			atomic.AddInt64(&acc.outBytes, totalBytes)
  4613  		}
  4614  		if srv := c.srv; srv != nil {
  4615  			atomic.AddInt64(&srv.outMsgs, dlvMsgs)
  4616  			atomic.AddInt64(&srv.outBytes, totalBytes)
  4617  		}
  4618  	}
  4619  
  4620  	mt, traceOnly := c.isMsgTraceEnabled()
  4621  
  4622  	// Loop over all normal subscriptions that match.
  4623  	for _, sub := range r.psubs {
  4624  		// Check if this is a send to a ROUTER. We now process
  4625  		// these after everything else.
  4626  		switch sub.client.kind {
  4627  		case ROUTER:
  4628  			if (c.kind != ROUTER && !c.isSpokeLeafNode()) || (flags&pmrAllowSendFromRouteToRoute != 0) {
  4629  				c.addSubToRouteTargets(sub)
  4630  			}
  4631  			continue
  4632  		case GATEWAY:
  4633  			// Never send to gateway from here.
  4634  			continue
  4635  		case LEAF:
  4636  			// We handle similarly to routes and use the same data structures.
  4637  			// Leaf node delivery audience is different however.
  4638  			// Also leaf nodes are always no echo, so we make sure we are not
  4639  			// going to send back to ourselves here. For messages from routes we want
  4640  			// to suppress in general unless we know from the hub or its a service reply.
  4641  			if c != sub.client && (c.kind != ROUTER || sub.client.isHubLeafNode() || isServiceReply(c.pa.subject)) {
  4642  				c.addSubToRouteTargets(sub)
  4643  			}
  4644  			continue
  4645  		}
  4646  
  4647  		// Assume delivery subject is the normal subject to this point.
  4648  		dsubj = subj
  4649  
  4650  		// We may need to disable tracing, by setting c.pa.trace to `nil`
  4651  		// before the call to deliverMsg, if so, this will indicate that
  4652  		// we need to put it back.
  4653  		var restorePaTrace bool
  4654  
  4655  		// Check for stream import mapped subs (shadow subs). These apply to local subs only.
  4656  		if sub.im != nil {
  4657  			// If this message was a service import do not re-export to an exported stream.
  4658  			if flags&pmrMsgImportedFromService != 0 {
  4659  				continue
  4660  			}
  4661  			if sub.im.tr != nil {
  4662  				to := sub.im.tr.TransformSubject(bytesToString(subject))
  4663  				dsubj = append(_dsubj[:0], to...)
  4664  			} else if sub.im.usePub {
  4665  				dsubj = append(_dsubj[:0], subj...)
  4666  			} else {
  4667  				dsubj = append(_dsubj[:0], sub.im.to...)
  4668  			}
  4669  
  4670  			if mt != nil {
  4671  				mt.addStreamExportEvent(sub.client, dsubj)
  4672  				// If allow_trace is false...
  4673  				if !sub.im.atrc {
  4674  					// If we are doing only message tracing, we can move to the
  4675  					//  next sub.
  4676  					if traceOnly {
  4677  						// Although the message was not delivered, for the purpose
  4678  						// of didDeliver, we need to set to true (to avoid possible
  4679  						// no responders).
  4680  						didDeliver = true
  4681  						continue
  4682  					}
  4683  					// If we are delivering the message, we need to disable tracing
  4684  					// before calling deliverMsg().
  4685  					c.pa.trace, restorePaTrace = nil, true
  4686  				}
  4687  			}
  4688  
  4689  			// Make sure deliver is set if inbound from a route.
  4690  			if remapped && (c.kind == GATEWAY || c.kind == ROUTER || c.kind == LEAF) {
  4691  				deliver = subj
  4692  			}
  4693  			// If we are mapping for a deliver subject we will reverse roles.
  4694  			// The original subj we set from above is correct for the msg header,
  4695  			// but we need to transform the deliver subject to properly route.
  4696  			if len(deliver) > 0 {
  4697  				dsubj, subj = subj, dsubj
  4698  			}
  4699  		}
  4700  
  4701  		// Remap to the original subject if internal.
  4702  		if sub.icb != nil && sub.rsi {
  4703  			dsubj = subject
  4704  		}
  4705  
  4706  		// Normal delivery
  4707  		mh := c.msgHeader(dsubj, creply, sub)
  4708  		if c.deliverMsg(prodIsMQTT, sub, acc, dsubj, creply, mh, msg, rplyHasGWPrefix) {
  4709  			// We don't count internal deliveries, so do only when sub.icb is nil.
  4710  			if sub.icb == nil {
  4711  				dlvMsgs++
  4712  			}
  4713  			didDeliver = true
  4714  		}
  4715  		if restorePaTrace {
  4716  			c.pa.trace = mt
  4717  		}
  4718  	}
  4719  
  4720  	// Set these up to optionally filter based on the queue lists.
  4721  	// This is for messages received from routes which will have directed
  4722  	// guidance on which queue groups we should deliver to.
  4723  	qf := c.pa.queues
  4724  
  4725  	// Declared here because of goto.
  4726  	var queues [][]byte
  4727  
  4728  	// For all routes/leaf/gateway connections, we may still want to send messages to
  4729  	// leaf nodes or routes even if there are no queue filters since we collect
  4730  	// them above and do not process inline like normal clients.
  4731  	// However, do select queue subs if asked to ignore empty queue filter.
  4732  	if (c.kind == LEAF || c.kind == ROUTER || c.kind == GATEWAY) && len(qf) == 0 && flags&pmrIgnoreEmptyQueueFilter == 0 {
  4733  		goto sendToRoutesOrLeafs
  4734  	}
  4735  
  4736  	// Process queue subs
  4737  	for i := 0; i < len(r.qsubs); i++ {
  4738  		qsubs := r.qsubs[i]
  4739  		// If we have a filter check that here. We could make this a map or someting more
  4740  		// complex but linear search since we expect queues to be small. Should be faster
  4741  		// and more cache friendly.
  4742  		if qf != nil && len(qsubs) > 0 {
  4743  			tqn := qsubs[0].queue
  4744  			for _, qn := range qf {
  4745  				if bytes.Equal(qn, tqn) {
  4746  					goto selectQSub
  4747  				}
  4748  			}
  4749  			continue
  4750  		}
  4751  
  4752  	selectQSub:
  4753  		// We will hold onto remote or lead qsubs when we are coming from
  4754  		// a route or a leaf node just in case we can no longer do local delivery.
  4755  		var rsub, sub *subscription
  4756  		var _ql [32]*subscription
  4757  
  4758  		src := c.kind
  4759  		// If we just came from a route we want to prefer local subs.
  4760  		// So only select from local subs but remember the first rsub
  4761  		// in case all else fails.
  4762  		if src == ROUTER {
  4763  			ql := _ql[:0]
  4764  			for i := 0; i < len(qsubs); i++ {
  4765  				sub = qsubs[i]
  4766  				if sub.client.kind == LEAF || sub.client.kind == ROUTER {
  4767  					// If we have assigned an rsub already, replace if the destination is a LEAF
  4768  					// since we want to favor that compared to a ROUTER. We could make sure that
  4769  					// we override only if previous was a ROUTE and not a LEAF, but we don't have to.
  4770  					if rsub == nil || sub.client.kind == LEAF {
  4771  						rsub = sub
  4772  					}
  4773  				} else {
  4774  					ql = append(ql, sub)
  4775  				}
  4776  			}
  4777  			qsubs = ql
  4778  		}
  4779  
  4780  		sindex := 0
  4781  		lqs := len(qsubs)
  4782  		if lqs > 1 {
  4783  			sindex = int(fastrand.Uint32() % uint32(lqs))
  4784  		}
  4785  
  4786  		// Find a subscription that is able to deliver this message starting at a random index.
  4787  		for i := 0; i < lqs; i++ {
  4788  			if sindex+i < lqs {
  4789  				sub = qsubs[sindex+i]
  4790  			} else {
  4791  				sub = qsubs[(sindex+i)%lqs]
  4792  			}
  4793  			if sub == nil {
  4794  				continue
  4795  			}
  4796  
  4797  			// If we are a spoke leaf node make sure to not forward across routes.
  4798  			// This mimics same behavior for normal subs above.
  4799  			if c.kind == LEAF && c.isSpokeLeafNode() && sub.client.kind == ROUTER {
  4800  				continue
  4801  			}
  4802  
  4803  			// We have taken care of preferring local subs for a message from a route above.
  4804  			// Here we just care about a client or leaf and skipping a leaf and preferring locals.
  4805  			if dst := sub.client.kind; dst == ROUTER || dst == LEAF {
  4806  				if (src == LEAF || src == CLIENT) && dst == LEAF {
  4807  					if rsub == nil {
  4808  						rsub = sub
  4809  					}
  4810  					continue
  4811  				} else {
  4812  					c.addSubToRouteTargets(sub)
  4813  					// Clear rsub since we added a sub.
  4814  					rsub = nil
  4815  					if flags&pmrCollectQueueNames != 0 {
  4816  						queues = append(queues, sub.queue)
  4817  					}
  4818  				}
  4819  				break
  4820  			}
  4821  
  4822  			// Assume delivery subject is normal subject to this point.
  4823  			dsubj = subj
  4824  
  4825  			// We may need to disable tracing, by setting c.pa.trace to `nil`
  4826  			// before the call to deliverMsg, if so, this will indicate that
  4827  			// we need to put it back.
  4828  			var restorePaTrace bool
  4829  			var skipDelivery bool
  4830  
  4831  			// Check for stream import mapped subs. These apply to local subs only.
  4832  			if sub.im != nil {
  4833  				// If this message was a service import do not re-export to an exported stream.
  4834  				if flags&pmrMsgImportedFromService != 0 {
  4835  					continue
  4836  				}
  4837  				if sub.im.tr != nil {
  4838  					to := sub.im.tr.TransformSubject(bytesToString(subject))
  4839  					dsubj = append(_dsubj[:0], to...)
  4840  				} else if sub.im.usePub {
  4841  					dsubj = append(_dsubj[:0], subj...)
  4842  				} else {
  4843  					dsubj = append(_dsubj[:0], sub.im.to...)
  4844  				}
  4845  
  4846  				if mt != nil {
  4847  					mt.addStreamExportEvent(sub.client, dsubj)
  4848  					// If allow_trace is false...
  4849  					if !sub.im.atrc {
  4850  						// If we are doing only message tracing, we are done
  4851  						// with this queue group.
  4852  						if traceOnly {
  4853  							skipDelivery = true
  4854  						} else {
  4855  							// If we are delivering, we need to disable tracing
  4856  							// before the call to deliverMsg()
  4857  							c.pa.trace, restorePaTrace = nil, true
  4858  						}
  4859  					}
  4860  				}
  4861  
  4862  				// Make sure deliver is set if inbound from a route.
  4863  				if remapped && (c.kind == GATEWAY || c.kind == ROUTER || c.kind == LEAF) {
  4864  					deliver = subj
  4865  				}
  4866  				// If we are mapping for a deliver subject we will reverse roles.
  4867  				// The original subj we set from above is correct for the msg header,
  4868  				// but we need to transform the deliver subject to properly route.
  4869  				if len(deliver) > 0 {
  4870  					dsubj, subj = subj, dsubj
  4871  				}
  4872  			}
  4873  
  4874  			var delivered bool
  4875  			if !skipDelivery {
  4876  				mh := c.msgHeader(dsubj, creply, sub)
  4877  				delivered = c.deliverMsg(prodIsMQTT, sub, acc, subject, creply, mh, msg, rplyHasGWPrefix)
  4878  				if restorePaTrace {
  4879  					c.pa.trace = mt
  4880  				}
  4881  			}
  4882  			if skipDelivery || delivered {
  4883  				// Update only if not skipped.
  4884  				if !skipDelivery && sub.icb == nil {
  4885  					dlvMsgs++
  4886  				}
  4887  				// Do the rest even when message delivery was skipped.
  4888  				didDeliver = true
  4889  				// Clear rsub
  4890  				rsub = nil
  4891  				if flags&pmrCollectQueueNames != 0 {
  4892  					queues = append(queues, sub.queue)
  4893  				}
  4894  				break
  4895  			}
  4896  		}
  4897  
  4898  		if rsub != nil {
  4899  			// If we are here we tried to deliver to a local qsub
  4900  			// but failed. So we will send it to a remote or leaf node.
  4901  			c.addSubToRouteTargets(rsub)
  4902  			if flags&pmrCollectQueueNames != 0 {
  4903  				queues = append(queues, rsub.queue)
  4904  			}
  4905  		}
  4906  	}
  4907  
  4908  sendToRoutesOrLeafs:
  4909  
  4910  	// If no messages for routes or leafnodes return here.
  4911  	if len(c.in.rts) == 0 {
  4912  		updateStats()
  4913  		return didDeliver, queues
  4914  	}
  4915  
  4916  	// If we do have a deliver subject we need to do something with it.
  4917  	// Again this is when JetStream (but possibly others) wants the system
  4918  	// to rewrite the delivered subject. The way we will do that is place it
  4919  	// at the end of the reply subject if it exists.
  4920  	if len(deliver) > 0 && len(reply) > 0 {
  4921  		reply = append(reply, '@')
  4922  		reply = append(reply, deliver...)
  4923  	}
  4924  
  4925  	// Copy off original pa in case it changes.
  4926  	pa := c.pa
  4927  
  4928  	if mt != nil {
  4929  		// We are going to replace "pa" with our copy of c.pa, but to restore
  4930  		// to the original copy of c.pa, we need to save it again.
  4931  		cpa := pa
  4932  		msg = mt.setOriginAccountHeaderIfNeeded(c, acc, msg)
  4933  		defer func() { c.pa = cpa }()
  4934  		// Update pa with our current c.pa state.
  4935  		pa = c.pa
  4936  	}
  4937  
  4938  	// We address by index to avoid struct copy.
  4939  	// We have inline structs for memory layout and cache coherency.
  4940  	for i := range c.in.rts {
  4941  		rt := &c.in.rts[i]
  4942  		dc := rt.sub.client
  4943  		dmsg, hset := msg, false
  4944  
  4945  		// Check if we have an origin cluster set from a leafnode message.
  4946  		// If so make sure we do not send it back to the same cluster for a different
  4947  		// leafnode. Cluster wide no echo.
  4948  		if dc.kind == LEAF {
  4949  			// Check two scenarios. One is inbound from a route (c.pa.origin)
  4950  			if c.kind == ROUTER && len(c.pa.origin) > 0 {
  4951  				if bytesToString(c.pa.origin) == dc.remoteCluster() {
  4952  					continue
  4953  				}
  4954  			}
  4955  			// The other is leaf to leaf.
  4956  			if c.kind == LEAF {
  4957  				src, dest := c.remoteCluster(), dc.remoteCluster()
  4958  				if src != _EMPTY_ && src == dest {
  4959  					continue
  4960  				}
  4961  			}
  4962  
  4963  			// We need to check if this is a request that has a stamped client information header.
  4964  			// This will contain an account but will represent the account from the leafnode. If
  4965  			// they are not named the same this would cause an account lookup failure trying to
  4966  			// process the request for something like JetStream or other system services that rely
  4967  			// on the client info header. We can just check for reply and the presence of a header
  4968  			// to avoid slow downs for all traffic.
  4969  			if len(c.pa.reply) > 0 && c.pa.hdr >= 0 {
  4970  				dmsg, hset = c.checkLeafClientInfoHeader(msg)
  4971  			}
  4972  		}
  4973  
  4974  		if mt != nil {
  4975  			dmsg = mt.setHopHeader(c, dmsg)
  4976  			hset = true
  4977  		}
  4978  
  4979  		mh := c.msgHeaderForRouteOrLeaf(subject, reply, rt, acc)
  4980  		if c.deliverMsg(prodIsMQTT, rt.sub, acc, subject, reply, mh, dmsg, false) {
  4981  			if rt.sub.icb == nil {
  4982  				dlvMsgs++
  4983  				dlvExtraSize += int64(len(dmsg) - len(msg))
  4984  			}
  4985  			didDeliver = true
  4986  		}
  4987  
  4988  		// If we set the header reset the origin pub args.
  4989  		if hset {
  4990  			c.pa = pa
  4991  		}
  4992  	}
  4993  	updateStats()
  4994  	return didDeliver, queues
  4995  }
  4996  
  4997  // Check and swap accounts on a client info header destined across a leafnode.
  4998  func (c *client) checkLeafClientInfoHeader(msg []byte) (dmsg []byte, setHdr bool) {
  4999  	if c.pa.hdr < 0 || len(msg) < c.pa.hdr {
  5000  		return msg, false
  5001  	}
  5002  	cir := getHeader(ClientInfoHdr, msg[:c.pa.hdr])
  5003  	if len(cir) == 0 {
  5004  		return msg, false
  5005  	}
  5006  
  5007  	dmsg = msg
  5008  
  5009  	var ci ClientInfo
  5010  	if err := json.Unmarshal(cir, &ci); err == nil {
  5011  		if v, _ := c.srv.leafRemoteAccounts.Load(ci.Account); v != nil {
  5012  			remoteAcc := v.(string)
  5013  			if ci.Account != remoteAcc {
  5014  				ci.Account = remoteAcc
  5015  				if b, _ := json.Marshal(ci); b != nil {
  5016  					dmsg, setHdr = c.setHeader(ClientInfoHdr, bytesToString(b), msg), true
  5017  				}
  5018  			}
  5019  		}
  5020  	}
  5021  	return dmsg, setHdr
  5022  }
  5023  
  5024  func (c *client) pubPermissionViolation(subject []byte) {
  5025  	errTxt := fmt.Sprintf("Permissions Violation for Publish to %q", subject)
  5026  	if mt, _ := c.isMsgTraceEnabled(); mt != nil {
  5027  		mt.setIngressError(errTxt)
  5028  	}
  5029  	c.sendErr(errTxt)
  5030  	c.Errorf("Publish Violation - %s, Subject %q", c.getAuthUser(), subject)
  5031  }
  5032  
  5033  func (c *client) subPermissionViolation(sub *subscription) {
  5034  	errTxt := fmt.Sprintf("Permissions Violation for Subscription to %q", sub.subject)
  5035  	logTxt := fmt.Sprintf("Subscription Violation - %s, Subject %q, SID %s",
  5036  		c.getAuthUser(), sub.subject, sub.sid)
  5037  
  5038  	if sub.queue != nil {
  5039  		errTxt = fmt.Sprintf("Permissions Violation for Subscription to %q using queue %q", sub.subject, sub.queue)
  5040  		logTxt = fmt.Sprintf("Subscription Violation - %s, Subject %q, Queue: %q, SID %s",
  5041  			c.getAuthUser(), sub.subject, sub.queue, sub.sid)
  5042  	}
  5043  
  5044  	c.sendErr(errTxt)
  5045  	c.Errorf(logTxt)
  5046  }
  5047  
  5048  func (c *client) replySubjectViolation(reply []byte) {
  5049  	errTxt := fmt.Sprintf("Permissions Violation for Publish with Reply of %q", reply)
  5050  	if mt, _ := c.isMsgTraceEnabled(); mt != nil {
  5051  		mt.setIngressError(errTxt)
  5052  	}
  5053  	c.sendErr(errTxt)
  5054  	c.Errorf("Publish Violation - %s, Reply %q", c.getAuthUser(), reply)
  5055  }
  5056  
  5057  func (c *client) maxTokensViolation(sub *subscription) {
  5058  	errTxt := fmt.Sprintf("Permissions Violation for Subscription to %q, too many tokens", sub.subject)
  5059  	logTxt := fmt.Sprintf("Subscription Violation Too Many Tokens - %s, Subject %q, SID %s",
  5060  		c.getAuthUser(), sub.subject, sub.sid)
  5061  	c.sendErr(errTxt)
  5062  	c.Errorf(logTxt)
  5063  }
  5064  
  5065  func (c *client) processPingTimer() {
  5066  	c.mu.Lock()
  5067  	c.ping.tmr = nil
  5068  	// Check if connection is still opened
  5069  	if c.isClosed() {
  5070  		c.mu.Unlock()
  5071  		return
  5072  	}
  5073  
  5074  	c.Debugf("%s Ping Timer", c.kindString())
  5075  
  5076  	var sendPing bool
  5077  
  5078  	opts := c.srv.getOpts()
  5079  	pingInterval := opts.PingInterval
  5080  	if c.kind == ROUTER && opts.Cluster.PingInterval > 0 {
  5081  		pingInterval = opts.Cluster.PingInterval
  5082  	}
  5083  	pingInterval = adjustPingInterval(c.kind, pingInterval)
  5084  	now := time.Now()
  5085  	needRTT := c.rtt == 0 || now.Sub(c.rttStart) > DEFAULT_RTT_MEASUREMENT_INTERVAL
  5086  
  5087  	// Do not delay PINGs for ROUTER, GATEWAY or spoke LEAF connections.
  5088  	if c.kind == ROUTER || c.kind == GATEWAY || c.isSpokeLeafNode() {
  5089  		sendPing = true
  5090  	} else {
  5091  		// If we received client data or a ping from the other side within the PingInterval,
  5092  		// then there is no need to send a ping.
  5093  		if delta := now.Sub(c.lastIn); delta < pingInterval && !needRTT {
  5094  			c.Debugf("Delaying PING due to remote client data or ping %v ago", delta.Round(time.Second))
  5095  		} else {
  5096  			sendPing = true
  5097  		}
  5098  	}
  5099  
  5100  	if sendPing {
  5101  		// Check for violation
  5102  		maxPingsOut := opts.MaxPingsOut
  5103  		if c.kind == ROUTER && opts.Cluster.MaxPingsOut > 0 {
  5104  			maxPingsOut = opts.Cluster.MaxPingsOut
  5105  		}
  5106  		if c.ping.out+1 > maxPingsOut {
  5107  			c.Debugf("Stale Client Connection - Closing")
  5108  			c.enqueueProto([]byte(fmt.Sprintf(errProto, "Stale Connection")))
  5109  			c.mu.Unlock()
  5110  			c.closeConnection(StaleConnection)
  5111  			return
  5112  		}
  5113  		// Send PING
  5114  		c.sendPing()
  5115  	}
  5116  
  5117  	// Reset to fire again.
  5118  	c.setPingTimer()
  5119  	c.mu.Unlock()
  5120  }
  5121  
  5122  // Returns the smallest value between the given `d` and some max value
  5123  // based on the connection kind.
  5124  func adjustPingInterval(kind int, d time.Duration) time.Duration {
  5125  	switch kind {
  5126  	case ROUTER:
  5127  		if d > routeMaxPingInterval {
  5128  			return routeMaxPingInterval
  5129  		}
  5130  	case GATEWAY:
  5131  		if d > gatewayMaxPingInterval {
  5132  			return gatewayMaxPingInterval
  5133  		}
  5134  	}
  5135  	return d
  5136  }
  5137  
  5138  // Lock should be held
  5139  func (c *client) setPingTimer() {
  5140  	if c.srv == nil {
  5141  		return
  5142  	}
  5143  	opts := c.srv.getOpts()
  5144  	d := opts.PingInterval
  5145  	if c.kind == ROUTER && opts.Cluster.PingInterval > 0 {
  5146  		d = opts.Cluster.PingInterval
  5147  	}
  5148  	d = adjustPingInterval(c.kind, d)
  5149  	c.ping.tmr = time.AfterFunc(d, c.processPingTimer)
  5150  }
  5151  
  5152  // Lock should be held
  5153  func (c *client) clearPingTimer() {
  5154  	if c.ping.tmr == nil {
  5155  		return
  5156  	}
  5157  	c.ping.tmr.Stop()
  5158  	c.ping.tmr = nil
  5159  }
  5160  
  5161  func (c *client) clearTlsToTimer() {
  5162  	if c.tlsTo == nil {
  5163  		return
  5164  	}
  5165  	c.tlsTo.Stop()
  5166  	c.tlsTo = nil
  5167  }
  5168  
  5169  // Lock should be held
  5170  func (c *client) setAuthTimer(d time.Duration) {
  5171  	c.atmr = time.AfterFunc(d, c.authTimeout)
  5172  }
  5173  
  5174  // Lock should be held
  5175  func (c *client) clearAuthTimer() bool {
  5176  	if c.atmr == nil {
  5177  		return true
  5178  	}
  5179  	stopped := c.atmr.Stop()
  5180  	c.atmr = nil
  5181  	return stopped
  5182  }
  5183  
  5184  // We may reuse atmr for expiring user jwts,
  5185  // so check connectReceived.
  5186  // Lock assume held on entry.
  5187  func (c *client) awaitingAuth() bool {
  5188  	return !c.flags.isSet(connectReceived) && c.atmr != nil
  5189  }
  5190  
  5191  // This will set the atmr for the JWT expiration time.
  5192  // We will lock on entry.
  5193  func (c *client) setExpirationTimer(d time.Duration) {
  5194  	c.mu.Lock()
  5195  	c.setExpirationTimerUnlocked(d)
  5196  	c.mu.Unlock()
  5197  }
  5198  
  5199  // This will set the atmr for the JWT expiration time. client lock should be held before call
  5200  func (c *client) setExpirationTimerUnlocked(d time.Duration) {
  5201  	c.atmr = time.AfterFunc(d, c.authExpired)
  5202  	// This is an JWT expiration.
  5203  	if c.flags.isSet(connectReceived) {
  5204  		c.expires = time.Now().Add(d).Truncate(time.Second)
  5205  	}
  5206  }
  5207  
  5208  // Return when this client expires via a claim, or 0 if not set.
  5209  func (c *client) claimExpiration() time.Duration {
  5210  	c.mu.Lock()
  5211  	defer c.mu.Unlock()
  5212  	if c.expires.IsZero() {
  5213  		return 0
  5214  	}
  5215  	return time.Until(c.expires).Truncate(time.Second)
  5216  }
  5217  
  5218  // Possibly flush the connection and then close the low level connection.
  5219  // The boolean `minimalFlush` indicates if the flush operation should have a
  5220  // minimal write deadline.
  5221  // Lock is held on entry.
  5222  func (c *client) flushAndClose(minimalFlush bool) {
  5223  	if !c.flags.isSet(skipFlushOnClose) && c.out.pb > 0 {
  5224  		if minimalFlush {
  5225  			const lowWriteDeadline = 100 * time.Millisecond
  5226  
  5227  			// Reduce the write deadline if needed.
  5228  			if c.out.wdl > lowWriteDeadline {
  5229  				c.out.wdl = lowWriteDeadline
  5230  			}
  5231  		}
  5232  		c.flushOutbound()
  5233  	}
  5234  	for i := range c.out.nb {
  5235  		nbPoolPut(c.out.nb[i])
  5236  	}
  5237  	c.out.nb = nil
  5238  	// We can't touch c.out.wnb when a flushOutbound is in progress since it
  5239  	// is accessed outside the lock there. If in progress, the cleanup will be
  5240  	// done in flushOutbound when detecting that connection is closed.
  5241  	if !c.flags.isSet(flushOutbound) {
  5242  		for i := range c.out.wnb {
  5243  			nbPoolPut(c.out.wnb[i])
  5244  		}
  5245  		c.out.wnb = nil
  5246  	}
  5247  	// This seem to be important (from experimentation) for the GC to release
  5248  	// the connection.
  5249  	c.out.sg = nil
  5250  
  5251  	// Close the low level connection.
  5252  	if c.nc != nil {
  5253  		// Starting with Go 1.16, the low level close will set its own deadline
  5254  		// of 5 seconds, so setting our own deadline does not work. Instead,
  5255  		// we will close the TLS connection in separate go routine.
  5256  		nc := c.nc
  5257  		c.nc = nil
  5258  		if _, ok := nc.(*tls.Conn); ok {
  5259  			go func() { nc.Close() }()
  5260  		} else {
  5261  			nc.Close()
  5262  		}
  5263  	}
  5264  }
  5265  
  5266  var kindStringMap = map[int]string{
  5267  	CLIENT:    "Client",
  5268  	ROUTER:    "Router",
  5269  	GATEWAY:   "Gateway",
  5270  	LEAF:      "Leafnode",
  5271  	JETSTREAM: "JetStream",
  5272  	ACCOUNT:   "Account",
  5273  	SYSTEM:    "System",
  5274  }
  5275  
  5276  func (c *client) kindString() string {
  5277  	if kindStringVal, ok := kindStringMap[c.kind]; ok {
  5278  		return kindStringVal
  5279  	}
  5280  	return "Unknown Type"
  5281  }
  5282  
  5283  // swapAccountAfterReload will check to make sure the bound account for this client
  5284  // is current. Under certain circumstances after a reload we could be pointing to
  5285  // an older one.
  5286  func (c *client) swapAccountAfterReload() {
  5287  	c.mu.Lock()
  5288  	srv := c.srv
  5289  	an := c.acc.GetName()
  5290  	c.mu.Unlock()
  5291  	if srv == nil {
  5292  		return
  5293  	}
  5294  	if acc, _ := srv.LookupAccount(an); acc != nil {
  5295  		c.mu.Lock()
  5296  		if c.acc != acc {
  5297  			c.acc = acc
  5298  		}
  5299  		c.mu.Unlock()
  5300  	}
  5301  }
  5302  
  5303  // processSubsOnConfigReload removes any subscriptions the client has that are no
  5304  // longer authorized, and checks for imports (accounts) due to a config reload.
  5305  func (c *client) processSubsOnConfigReload(awcsti map[string]struct{}) {
  5306  	c.mu.Lock()
  5307  	var (
  5308  		checkPerms = c.perms != nil
  5309  		checkAcc   = c.acc != nil
  5310  		acc        = c.acc
  5311  	)
  5312  	if !checkPerms && !checkAcc {
  5313  		c.mu.Unlock()
  5314  		return
  5315  	}
  5316  	var (
  5317  		_subs    [32]*subscription
  5318  		subs     = _subs[:0]
  5319  		_removed [32]*subscription
  5320  		removed  = _removed[:0]
  5321  		srv      = c.srv
  5322  	)
  5323  	if checkAcc {
  5324  		// We actually only want to check if stream imports have changed.
  5325  		if _, ok := awcsti[acc.Name]; !ok {
  5326  			checkAcc = false
  5327  		}
  5328  	}
  5329  	// We will clear any mperms we have here. It will rebuild on the fly with canSubscribe,
  5330  	// so we do that here as we collect them. We will check result down below.
  5331  	c.mperms = nil
  5332  	// Collect client's subs under the lock
  5333  	for _, sub := range c.subs {
  5334  		// Just checking to rebuild mperms under the lock, will collect removed though here.
  5335  		// Only collect under subs array of canSubscribe and checkAcc true.
  5336  		canSub := c.canSubscribe(string(sub.subject))
  5337  		canQSub := sub.queue != nil && c.canSubscribe(string(sub.subject), string(sub.queue))
  5338  
  5339  		if !canSub && !canQSub {
  5340  			removed = append(removed, sub)
  5341  		} else if checkAcc {
  5342  			subs = append(subs, sub)
  5343  		}
  5344  	}
  5345  	c.mu.Unlock()
  5346  
  5347  	// This list is all subs who are allowed and we need to check accounts.
  5348  	for _, sub := range subs {
  5349  		c.mu.Lock()
  5350  		oldShadows := sub.shadow
  5351  		sub.shadow = nil
  5352  		c.mu.Unlock()
  5353  		c.addShadowSubscriptions(acc, sub, true)
  5354  		for _, nsub := range oldShadows {
  5355  			nsub.im.acc.sl.Remove(nsub)
  5356  		}
  5357  	}
  5358  
  5359  	// Unsubscribe all that need to be removed and report back to client and logs.
  5360  	for _, sub := range removed {
  5361  		c.unsubscribe(acc, sub, true, true)
  5362  		c.sendErr(fmt.Sprintf("Permissions Violation for Subscription to %q (sid %q)",
  5363  			sub.subject, sub.sid))
  5364  		srv.Noticef("Removed sub %q (sid %q) for %s - not authorized",
  5365  			sub.subject, sub.sid, c.getAuthUser())
  5366  	}
  5367  }
  5368  
  5369  // Allows us to count up all the queue subscribers during close.
  5370  type qsub struct {
  5371  	sub *subscription
  5372  	n   int32
  5373  }
  5374  
  5375  func (c *client) closeConnection(reason ClosedState) {
  5376  	c.mu.Lock()
  5377  	if c.flags.isSet(closeConnection) {
  5378  		c.mu.Unlock()
  5379  		return
  5380  	}
  5381  	// Note that we may have markConnAsClosed() invoked before closeConnection(),
  5382  	// so don't set this to 1, instead bump the count.
  5383  	c.rref++
  5384  	c.flags.set(closeConnection)
  5385  	c.clearAuthTimer()
  5386  	c.clearPingTimer()
  5387  	c.clearTlsToTimer()
  5388  	c.markConnAsClosed(reason)
  5389  
  5390  	// Unblock anyone who is potentially stalled waiting on us.
  5391  	if c.out.stc != nil {
  5392  		close(c.out.stc)
  5393  		c.out.stc = nil
  5394  	}
  5395  
  5396  	// If we have remote latency tracking running shut that down.
  5397  	if c.rrTracking != nil {
  5398  		c.rrTracking.ptmr.Stop()
  5399  		c.rrTracking = nil
  5400  	}
  5401  
  5402  	// If we are shutting down, no need to do all the accounting on subs, etc.
  5403  	if reason == ServerShutdown {
  5404  		s := c.srv
  5405  		c.mu.Unlock()
  5406  		if s != nil {
  5407  			// Unregister
  5408  			s.removeClient(c)
  5409  		}
  5410  		return
  5411  	}
  5412  
  5413  	var (
  5414  		kind        = c.kind
  5415  		srv         = c.srv
  5416  		noReconnect = c.flags.isSet(noReconnect)
  5417  		acc         = c.acc
  5418  		spoke       bool
  5419  	)
  5420  
  5421  	// Snapshot for use if we are a client connection.
  5422  	// FIXME(dlc) - we can just stub in a new one for client
  5423  	// and reference existing one.
  5424  	var subs []*subscription
  5425  	if kind == CLIENT || kind == LEAF || kind == JETSTREAM {
  5426  		var _subs [32]*subscription
  5427  		subs = _subs[:0]
  5428  		// Do not set c.subs to nil or delete the sub from c.subs here because
  5429  		// it will be needed in saveClosedClient (which has been started as a
  5430  		// go routine in markConnAsClosed). Cleanup will be done there.
  5431  		for _, sub := range c.subs {
  5432  			// Auto-unsubscribe subscriptions must be unsubscribed forcibly.
  5433  			sub.max = 0
  5434  			sub.close()
  5435  			subs = append(subs, sub)
  5436  		}
  5437  		spoke = c.isSpokeLeafNode()
  5438  	}
  5439  
  5440  	c.mu.Unlock()
  5441  
  5442  	// Remove client's or leaf node or jetstream subscriptions.
  5443  	if acc != nil && (kind == CLIENT || kind == LEAF || kind == JETSTREAM) {
  5444  		acc.sl.RemoveBatch(subs)
  5445  	} else if kind == ROUTER {
  5446  		c.removeRemoteSubs()
  5447  	}
  5448  
  5449  	if srv != nil {
  5450  		// Unregister
  5451  		srv.removeClient(c)
  5452  
  5453  		// Update remote subscriptions.
  5454  		if acc != nil && (kind == CLIENT || kind == LEAF || kind == JETSTREAM) {
  5455  			qsubs := map[string]*qsub{}
  5456  			for _, sub := range subs {
  5457  				// Call unsubscribe here to cleanup shadow subscriptions and such.
  5458  				c.unsubscribe(acc, sub, true, false)
  5459  				// Update route as normal for a normal subscriber.
  5460  				if sub.queue == nil {
  5461  					if !spoke {
  5462  						srv.updateRouteSubscriptionMap(acc, sub, -1)
  5463  						if srv.gateway.enabled {
  5464  							srv.gatewayUpdateSubInterest(acc.Name, sub, -1)
  5465  						}
  5466  					}
  5467  					acc.updateLeafNodes(sub, -1)
  5468  				} else {
  5469  					// We handle queue subscribers special in case we
  5470  					// have a bunch we can just send one update to the
  5471  					// connected routes.
  5472  					num := int32(1)
  5473  					if kind == LEAF {
  5474  						num = sub.qw
  5475  					}
  5476  					// TODO(dlc) - Better to use string builder?
  5477  					key := bytesToString(sub.subject) + " " + bytesToString(sub.queue)
  5478  					if esub, ok := qsubs[key]; ok {
  5479  						esub.n += num
  5480  					} else {
  5481  						qsubs[key] = &qsub{sub, num}
  5482  					}
  5483  				}
  5484  			}
  5485  			// Process any qsubs here.
  5486  			for _, esub := range qsubs {
  5487  				if !spoke {
  5488  					srv.updateRouteSubscriptionMap(acc, esub.sub, -(esub.n))
  5489  					if srv.gateway.enabled {
  5490  						srv.gatewayUpdateSubInterest(acc.Name, esub.sub, -(esub.n))
  5491  					}
  5492  				}
  5493  				acc.updateLeafNodes(esub.sub, -(esub.n))
  5494  			}
  5495  			if prev := acc.removeClient(c); prev == 1 {
  5496  				srv.decActiveAccounts()
  5497  			}
  5498  		}
  5499  	}
  5500  
  5501  	// Don't reconnect connections that have been marked with
  5502  	// the no reconnect flag.
  5503  	if noReconnect {
  5504  		return
  5505  	}
  5506  
  5507  	c.reconnect()
  5508  }
  5509  
  5510  // Depending on the kind of connections, this may attempt to recreate a connection.
  5511  // The actual reconnect attempt will be started in a go routine.
  5512  func (c *client) reconnect() {
  5513  	var (
  5514  		retryImplicit bool
  5515  		gwName        string
  5516  		gwIsOutbound  bool
  5517  		gwCfg         *gatewayCfg
  5518  		leafCfg       *leafNodeCfg
  5519  	)
  5520  
  5521  	c.mu.Lock()
  5522  	// Decrease the ref count and perform the reconnect only if == 0.
  5523  	c.rref--
  5524  	if c.flags.isSet(noReconnect) || c.rref > 0 {
  5525  		c.mu.Unlock()
  5526  		return
  5527  	}
  5528  	if c.route != nil {
  5529  		// A route is marked as solicited if it was given an URL to connect to,
  5530  		// which would be the case even with implicit (due to gossip), so mark this
  5531  		// as a retry for a route that is solicited and not explicit.
  5532  		retryImplicit = c.route.retry || (c.route.didSolicit && c.route.routeType == Implicit)
  5533  	}
  5534  	kind := c.kind
  5535  	switch kind {
  5536  	case GATEWAY:
  5537  		gwName = c.gw.name
  5538  		gwIsOutbound = c.gw.outbound
  5539  		gwCfg = c.gw.cfg
  5540  	case LEAF:
  5541  		if c.isSolicitedLeafNode() {
  5542  			leafCfg = c.leaf.remote
  5543  		}
  5544  	}
  5545  	srv := c.srv
  5546  	c.mu.Unlock()
  5547  
  5548  	// Check for a solicited route. If it was, start up a reconnect unless
  5549  	// we are already connected to the other end.
  5550  	if didSolicit := c.isSolicitedRoute(); didSolicit || retryImplicit {
  5551  		srv.mu.Lock()
  5552  		defer srv.mu.Unlock()
  5553  
  5554  		// Capture these under lock
  5555  		c.mu.Lock()
  5556  		rid := c.route.remoteID
  5557  		rtype := c.route.routeType
  5558  		rurl := c.route.url
  5559  		accName := string(c.route.accName)
  5560  		checkRID := accName == _EMPTY_ && srv.getOpts().Cluster.PoolSize < 1 && rid != _EMPTY_
  5561  		c.mu.Unlock()
  5562  
  5563  		// It is possible that the server is being shutdown.
  5564  		// If so, don't try to reconnect
  5565  		if !srv.isRunning() {
  5566  			return
  5567  		}
  5568  
  5569  		if checkRID && srv.routes[rid] != nil {
  5570  			// This is the case of "no pool". Make sure that the registered one
  5571  			// is upgraded to solicited if the connection trying to reconnect
  5572  			// was a solicited one.
  5573  			if didSolicit {
  5574  				if remote := srv.routes[rid][0]; remote != nil {
  5575  					upgradeRouteToSolicited(remote, rurl, rtype)
  5576  				}
  5577  			}
  5578  			srv.Debugf("Not attempting reconnect for solicited route, already connected to %q", rid)
  5579  			return
  5580  		} else if rid == srv.info.ID {
  5581  			srv.Debugf("Detected route to self, ignoring %q", rurl.Redacted())
  5582  			return
  5583  		} else if rtype != Implicit || retryImplicit {
  5584  			srv.Debugf("Attempting reconnect for solicited route %q", rurl.Redacted())
  5585  			// Keep track of this go-routine so we can wait for it on
  5586  			// server shutdown.
  5587  			srv.startGoRoutine(func() { srv.reConnectToRoute(rurl, rtype, accName) })
  5588  		}
  5589  	} else if srv != nil && kind == GATEWAY && gwIsOutbound {
  5590  		if gwCfg != nil {
  5591  			srv.Debugf("Attempting reconnect for gateway %q", gwName)
  5592  			// Run this as a go routine since we may be called within
  5593  			// the solicitGateway itself if there was an error during
  5594  			// the creation of the gateway connection.
  5595  			srv.startGoRoutine(func() { srv.reconnectGateway(gwCfg) })
  5596  		} else {
  5597  			srv.Debugf("Gateway %q not in configuration, not attempting reconnect", gwName)
  5598  		}
  5599  	} else if leafCfg != nil {
  5600  		// Check if this is a solicited leaf node. Start up a reconnect.
  5601  		srv.startGoRoutine(func() { srv.reConnectToRemoteLeafNode(leafCfg) })
  5602  	}
  5603  }
  5604  
  5605  // Set the noReconnect flag. This is used before a call to closeConnection()
  5606  // to prevent the connection to reconnect (routes, gateways).
  5607  func (c *client) setNoReconnect() {
  5608  	c.mu.Lock()
  5609  	c.flags.set(noReconnect)
  5610  	c.mu.Unlock()
  5611  }
  5612  
  5613  // Returns the client's RTT value with the protection of the client's lock.
  5614  func (c *client) getRTTValue() time.Duration {
  5615  	c.mu.Lock()
  5616  	rtt := c.rtt
  5617  	c.mu.Unlock()
  5618  	return rtt
  5619  }
  5620  
  5621  // This function is used by ROUTER and GATEWAY connections to
  5622  // look for a subject on a given account (since these type of
  5623  // connections are not bound to a specific account).
  5624  // If the c.pa.subject is found in the cache, the cached result
  5625  // is returned, otherwse, we match the account's sublist and update
  5626  // the cache. The cache is pruned if reaching a certain size.
  5627  func (c *client) getAccAndResultFromCache() (*Account, *SublistResult) {
  5628  	var (
  5629  		acc *Account
  5630  		pac *perAccountCache
  5631  		r   *SublistResult
  5632  		ok  bool
  5633  	)
  5634  	// Check our cache.
  5635  	if pac, ok = c.in.pacache[string(c.pa.pacache)]; ok {
  5636  		// Check the genid to see if it's still valid.
  5637  		// Since v2.10.0, the config reload of accounts has been fixed
  5638  		// and an account's sublist pointer should not change, so no need to
  5639  		// lock to access it.
  5640  		sl := pac.acc.sl
  5641  
  5642  		if genid := atomic.LoadUint64(&sl.genid); genid != pac.genid {
  5643  			ok = false
  5644  			delete(c.in.pacache, bytesToString(c.pa.pacache))
  5645  		} else {
  5646  			acc = pac.acc
  5647  			r = pac.results
  5648  		}
  5649  	}
  5650  
  5651  	if !ok {
  5652  		if c.kind == ROUTER && len(c.route.accName) > 0 {
  5653  			acc = c.acc
  5654  		} else {
  5655  			// Match correct account and sublist.
  5656  			if acc, _ = c.srv.LookupAccount(string(c.pa.account)); acc == nil {
  5657  				return nil, nil
  5658  			}
  5659  		}
  5660  		sl := acc.sl
  5661  
  5662  		// Match against the account sublist.
  5663  		r = sl.Match(string(c.pa.subject))
  5664  
  5665  		// Store in our cache
  5666  		c.in.pacache[string(c.pa.pacache)] = &perAccountCache{acc, r, atomic.LoadUint64(&sl.genid)}
  5667  
  5668  		// Check if we need to prune.
  5669  		if len(c.in.pacache) > maxPerAccountCacheSize {
  5670  			c.prunePerAccountCache()
  5671  		}
  5672  	}
  5673  	return acc, r
  5674  }
  5675  
  5676  // Account will return the associated account for this client.
  5677  func (c *client) Account() *Account {
  5678  	if c == nil {
  5679  		return nil
  5680  	}
  5681  	c.mu.Lock()
  5682  	acc := c.acc
  5683  	c.mu.Unlock()
  5684  	return acc
  5685  }
  5686  
  5687  // prunePerAccountCache will prune off a random number of cache entries.
  5688  func (c *client) prunePerAccountCache() {
  5689  	n := 0
  5690  	for cacheKey := range c.in.pacache {
  5691  		delete(c.in.pacache, cacheKey)
  5692  		if n++; n > prunePerAccountCacheSize {
  5693  			break
  5694  		}
  5695  	}
  5696  }
  5697  
  5698  // pruneClosedSubFromPerAccountCache remove entries that contain subscriptions
  5699  // that have been closed.
  5700  func (c *client) pruneClosedSubFromPerAccountCache() {
  5701  	for cacheKey, pac := range c.in.pacache {
  5702  		for _, sub := range pac.results.psubs {
  5703  			if sub.isClosed() {
  5704  				goto REMOVE
  5705  			}
  5706  		}
  5707  		for _, qsub := range pac.results.qsubs {
  5708  			for _, sub := range qsub {
  5709  				if sub.isClosed() {
  5710  					goto REMOVE
  5711  				}
  5712  			}
  5713  		}
  5714  		continue
  5715  	REMOVE:
  5716  		delete(c.in.pacache, cacheKey)
  5717  	}
  5718  }
  5719  
  5720  // Returns our service account for this request.
  5721  func (ci *ClientInfo) serviceAccount() string {
  5722  	if ci == nil {
  5723  		return _EMPTY_
  5724  	}
  5725  	if ci.Service != _EMPTY_ {
  5726  		return ci.Service
  5727  	}
  5728  	return ci.Account
  5729  }
  5730  
  5731  // Add in our server and cluster information to this client info.
  5732  func (c *client) addServerAndClusterInfo(ci *ClientInfo) {
  5733  	if ci == nil {
  5734  		return
  5735  	}
  5736  	// Server
  5737  	if c.kind != LEAF {
  5738  		ci.Server = c.srv.Name()
  5739  	} else if c.kind == LEAF {
  5740  		ci.Server = c.leaf.remoteServer
  5741  	}
  5742  	// Cluster
  5743  	ci.Cluster = c.srv.cachedClusterName()
  5744  	// If we have gateways fill in cluster alternates.
  5745  	// These will be in RTT asc order.
  5746  	if c.srv.gateway.enabled {
  5747  		var gws []*client
  5748  		c.srv.getOutboundGatewayConnections(&gws)
  5749  		for _, c := range gws {
  5750  			c.mu.Lock()
  5751  			cn := c.gw.name
  5752  			c.mu.Unlock()
  5753  			ci.Alternates = append(ci.Alternates, cn)
  5754  		}
  5755  	}
  5756  }
  5757  
  5758  // Grabs the information for this client.
  5759  func (c *client) getClientInfo(detailed bool) *ClientInfo {
  5760  	if c == nil || (c.kind != CLIENT && c.kind != LEAF && c.kind != JETSTREAM && c.kind != ACCOUNT) {
  5761  		return nil
  5762  	}
  5763  
  5764  	// Result
  5765  	var ci ClientInfo
  5766  
  5767  	if detailed {
  5768  		c.addServerAndClusterInfo(&ci)
  5769  	}
  5770  
  5771  	c.mu.Lock()
  5772  	// RTT and Account are always added.
  5773  	ci.Account = accForClient(c)
  5774  	ci.RTT = c.rtt
  5775  	// Detailed signals additional opt in.
  5776  	if detailed {
  5777  		ci.Start = &c.start
  5778  		ci.Host = c.host
  5779  		ci.ID = c.cid
  5780  		ci.Name = c.opts.Name
  5781  		ci.User = c.getRawAuthUser()
  5782  		ci.Lang = c.opts.Lang
  5783  		ci.Version = c.opts.Version
  5784  		ci.Jwt = c.opts.JWT
  5785  		ci.IssuerKey = issuerForClient(c)
  5786  		ci.NameTag = c.nameTag
  5787  		ci.Tags = c.tags
  5788  		ci.Kind = c.kindString()
  5789  		ci.ClientType = c.clientTypeString()
  5790  	}
  5791  	c.mu.Unlock()
  5792  	return &ci
  5793  }
  5794  
  5795  func (c *client) doTLSServerHandshake(typ string, tlsConfig *tls.Config, timeout float64, pCerts PinnedCertSet) error {
  5796  	_, err := c.doTLSHandshake(typ, false, nil, tlsConfig, _EMPTY_, timeout, pCerts)
  5797  	return err
  5798  }
  5799  
  5800  func (c *client) doTLSClientHandshake(typ string, url *url.URL, tlsConfig *tls.Config, tlsName string, timeout float64, pCerts PinnedCertSet) (bool, error) {
  5801  	return c.doTLSHandshake(typ, true, url, tlsConfig, tlsName, timeout, pCerts)
  5802  }
  5803  
  5804  // Performs either server or client side (if solicit is true) TLS Handshake.
  5805  // On error, the TLS handshake error has been logged and the connection
  5806  // has been closed.
  5807  //
  5808  // Lock is held on entry.
  5809  func (c *client) doTLSHandshake(typ string, solicit bool, url *url.URL, tlsConfig *tls.Config, tlsName string, timeout float64, pCerts PinnedCertSet) (bool, error) {
  5810  	var host string
  5811  	var resetTLSName bool
  5812  	var err error
  5813  
  5814  	// Capture kind for some debug/error statements.
  5815  	kind := c.kind
  5816  
  5817  	// If we solicited, we will act like the client, otherwise the server.
  5818  	if solicit {
  5819  		c.Debugf("Starting TLS %s client handshake", typ)
  5820  		if tlsConfig.ServerName == _EMPTY_ {
  5821  			// If the given url is a hostname, use this hostname for the
  5822  			// ServerName. If it is an IP, use the cfg's tlsName. If none
  5823  			// is available, resort to current IP.
  5824  			host = url.Hostname()
  5825  			if tlsName != _EMPTY_ && net.ParseIP(host) != nil {
  5826  				host = tlsName
  5827  			}
  5828  			tlsConfig.ServerName = host
  5829  		}
  5830  		c.nc = tls.Client(c.nc, tlsConfig)
  5831  	} else {
  5832  		if kind == CLIENT {
  5833  			c.Debugf("Starting TLS client connection handshake")
  5834  		} else {
  5835  			c.Debugf("Starting TLS %s server handshake", typ)
  5836  		}
  5837  		c.nc = tls.Server(c.nc, tlsConfig)
  5838  	}
  5839  
  5840  	conn := c.nc.(*tls.Conn)
  5841  
  5842  	// Setup the timeout
  5843  	ttl := secondsToDuration(timeout)
  5844  	c.tlsTo = time.AfterFunc(ttl, func() { tlsTimeout(c, conn) })
  5845  	conn.SetReadDeadline(time.Now().Add(ttl))
  5846  
  5847  	c.mu.Unlock()
  5848  	if err = conn.Handshake(); err != nil {
  5849  		if solicit {
  5850  			// Based on type of error, possibly clear the saved tlsName
  5851  			// See: https://github.com/nats-io/nats-server/issues/1256
  5852  			// NOTE: As of Go 1.20, the HostnameError is wrapped so cannot
  5853  			// type assert to check directly.
  5854  			var hostnameErr x509.HostnameError
  5855  			if errors.As(err, &hostnameErr) {
  5856  				if host == tlsName {
  5857  					resetTLSName = true
  5858  				}
  5859  			}
  5860  		}
  5861  	} else if !c.matchesPinnedCert(pCerts) {
  5862  		err = ErrCertNotPinned
  5863  	}
  5864  
  5865  	if err != nil {
  5866  		if kind == CLIENT {
  5867  			c.Errorf("TLS handshake error: %v", err)
  5868  		} else {
  5869  			c.Errorf("TLS %s handshake error: %v", typ, err)
  5870  		}
  5871  		c.closeConnection(TLSHandshakeError)
  5872  
  5873  		// Grab the lock before returning since the caller was holding the lock on entry
  5874  		c.mu.Lock()
  5875  		// Returning any error is fine. Since the connection is closed ErrConnectionClosed
  5876  		// is appropriate.
  5877  		return resetTLSName, ErrConnectionClosed
  5878  	}
  5879  
  5880  	// Reset the read deadline
  5881  	conn.SetReadDeadline(time.Time{})
  5882  
  5883  	// Re-Grab lock
  5884  	c.mu.Lock()
  5885  
  5886  	// To be consistent with client, set this flag to indicate that handshake is done
  5887  	c.flags.set(handshakeComplete)
  5888  
  5889  	// The connection still may have been closed on success handshake due
  5890  	// to a race with tls timeout. If that the case, return error indicating
  5891  	// that the connection is closed.
  5892  	if c.isClosed() {
  5893  		err = ErrConnectionClosed
  5894  	}
  5895  
  5896  	return false, err
  5897  }
  5898  
  5899  // getRawAuthUserLock returns the raw auth user for the client.
  5900  // Will acquire the client lock.
  5901  func (c *client) getRawAuthUserLock() string {
  5902  	c.mu.Lock()
  5903  	defer c.mu.Unlock()
  5904  	return c.getRawAuthUser()
  5905  }
  5906  
  5907  // getRawAuthUser returns the raw auth user for the client.
  5908  // Lock should be held.
  5909  func (c *client) getRawAuthUser() string {
  5910  	switch {
  5911  	case c.opts.Nkey != _EMPTY_:
  5912  		return c.opts.Nkey
  5913  	case c.opts.Username != _EMPTY_:
  5914  		return c.opts.Username
  5915  	case c.opts.JWT != _EMPTY_:
  5916  		return c.pubKey
  5917  	case c.opts.Token != _EMPTY_:
  5918  		return c.opts.Token
  5919  	default:
  5920  		return _EMPTY_
  5921  	}
  5922  }
  5923  
  5924  // getAuthUser returns the auth user for the client.
  5925  // Lock should be held.
  5926  func (c *client) getAuthUser() string {
  5927  	switch {
  5928  	case c.opts.Nkey != _EMPTY_:
  5929  		return fmt.Sprintf("Nkey %q", c.opts.Nkey)
  5930  	case c.opts.Username != _EMPTY_:
  5931  		return fmt.Sprintf("User %q", c.opts.Username)
  5932  	case c.opts.JWT != _EMPTY_:
  5933  		return fmt.Sprintf("JWT User %q", c.pubKey)
  5934  	case c.opts.Token != _EMPTY_:
  5935  		return fmt.Sprintf("Token %q", c.opts.Token)
  5936  	default:
  5937  		return `User "N/A"`
  5938  	}
  5939  }
  5940  
  5941  // Given an array of strings, this function converts it to a map as long
  5942  // as all the content (converted to upper-case) matches some constants.
  5943  
  5944  // Converts the given array of strings to a map of string.
  5945  // The strings are converted to upper-case and added to the map only
  5946  // if the server recognize them as valid connection types.
  5947  // If there are unknown connection types, the map of valid ones is returned
  5948  // along with an error that contains the name of the unknown.
  5949  func convertAllowedConnectionTypes(cts []string) (map[string]struct{}, error) {
  5950  	var unknown []string
  5951  	m := make(map[string]struct{}, len(cts))
  5952  	for _, i := range cts {
  5953  		i = strings.ToUpper(i)
  5954  		switch i {
  5955  		case jwt.ConnectionTypeStandard, jwt.ConnectionTypeWebsocket,
  5956  			jwt.ConnectionTypeLeafnode, jwt.ConnectionTypeLeafnodeWS,
  5957  			jwt.ConnectionTypeMqtt, jwt.ConnectionTypeMqttWS:
  5958  			m[i] = struct{}{}
  5959  		default:
  5960  			unknown = append(unknown, i)
  5961  		}
  5962  	}
  5963  	var err error
  5964  	// We will still return the map of valid ones.
  5965  	if len(unknown) != 0 {
  5966  		err = fmt.Errorf("invalid connection types %q", unknown)
  5967  	}
  5968  	return m, err
  5969  }
  5970  
  5971  // This will return true if the connection is of a type present in the given `acts` map.
  5972  // Note that so far this is used only for CLIENT or LEAF connections.
  5973  // But a CLIENT can be standard or websocket (and other types in the future).
  5974  func (c *client) connectionTypeAllowed(acts map[string]struct{}) bool {
  5975  	// Empty means all type of clients are allowed
  5976  	if len(acts) == 0 {
  5977  		return true
  5978  	}
  5979  	var want string
  5980  	switch c.kind {
  5981  	case CLIENT:
  5982  		switch c.clientType() {
  5983  		case NATS:
  5984  			want = jwt.ConnectionTypeStandard
  5985  		case WS:
  5986  			want = jwt.ConnectionTypeWebsocket
  5987  		case MQTT:
  5988  			if c.isWebsocket() {
  5989  				want = jwt.ConnectionTypeMqttWS
  5990  			} else {
  5991  				want = jwt.ConnectionTypeMqtt
  5992  			}
  5993  		}
  5994  	case LEAF:
  5995  		if c.isWebsocket() {
  5996  			want = jwt.ConnectionTypeLeafnodeWS
  5997  		} else {
  5998  			want = jwt.ConnectionTypeLeafnode
  5999  		}
  6000  	}
  6001  	_, ok := acts[want]
  6002  	return ok
  6003  }
  6004  
  6005  // isClosed returns true if either closeConnection or connMarkedClosed
  6006  // flag have been set, or if `nc` is nil, which may happen in tests.
  6007  func (c *client) isClosed() bool {
  6008  	return c.flags.isSet(closeConnection) || c.flags.isSet(connMarkedClosed) || c.nc == nil
  6009  }
  6010  
  6011  // Logging functionality scoped to a client or route.
  6012  func (c *client) Error(err error) {
  6013  	c.srv.Errors(c, err)
  6014  }
  6015  
  6016  func (c *client) Errorf(format string, v ...any) {
  6017  	format = fmt.Sprintf("%s - %s", c, format)
  6018  	c.srv.Errorf(format, v...)
  6019  }
  6020  
  6021  func (c *client) Debugf(format string, v ...any) {
  6022  	format = fmt.Sprintf("%s - %s", c, format)
  6023  	c.srv.Debugf(format, v...)
  6024  }
  6025  
  6026  func (c *client) Noticef(format string, v ...any) {
  6027  	format = fmt.Sprintf("%s - %s", c, format)
  6028  	c.srv.Noticef(format, v...)
  6029  }
  6030  
  6031  func (c *client) Tracef(format string, v ...any) {
  6032  	format = fmt.Sprintf("%s - %s", c, format)
  6033  	c.srv.Tracef(format, v...)
  6034  }
  6035  
  6036  func (c *client) Warnf(format string, v ...any) {
  6037  	format = fmt.Sprintf("%s - %s", c, format)
  6038  	c.srv.Warnf(format, v...)
  6039  }
  6040  
  6041  func (c *client) RateLimitWarnf(format string, v ...any) {
  6042  	// Do the check before adding the client info to the format...
  6043  	statement := fmt.Sprintf(format, v...)
  6044  	if _, loaded := c.srv.rateLimitLogging.LoadOrStore(statement, time.Now()); loaded {
  6045  		return
  6046  	}
  6047  	c.Warnf("%s", statement)
  6048  }
  6049  
  6050  // Set the very first PING to a lower interval to capture the initial RTT.
  6051  // After that the PING interval will be set to the user defined value.
  6052  // Client lock should be held.
  6053  func (c *client) setFirstPingTimer() {
  6054  	s := c.srv
  6055  	if s == nil {
  6056  		return
  6057  	}
  6058  	opts := s.getOpts()
  6059  	d := opts.PingInterval
  6060  
  6061  	if c.kind == ROUTER && opts.Cluster.PingInterval > 0 {
  6062  		d = opts.Cluster.PingInterval
  6063  	}
  6064  	if !opts.DisableShortFirstPing {
  6065  		if c.kind != CLIENT {
  6066  			if d > firstPingInterval {
  6067  				d = firstPingInterval
  6068  			}
  6069  			d = adjustPingInterval(c.kind, d)
  6070  		} else if d > firstClientPingInterval {
  6071  			d = firstClientPingInterval
  6072  		}
  6073  	}
  6074  	// We randomize the first one by an offset up to 20%, e.g. 2m ~= max 24s.
  6075  	addDelay := rand.Int63n(int64(d / 5))
  6076  	d += time.Duration(addDelay)
  6077  	// In the case of ROUTER/LEAF and when compression is configured, it is possible
  6078  	// that this timer was already set, but just to detect a stale connection
  6079  	// since we have to delay the first PING after compression negotiation
  6080  	// occurred.
  6081  	if c.ping.tmr != nil {
  6082  		c.ping.tmr.Stop()
  6083  	}
  6084  	c.ping.tmr = time.AfterFunc(d, c.processPingTimer)
  6085  }