get.pme.sh/pnats@v0.0.0-20240304004023-26bb5a137ed0/server/leafnode.go (about)

     1  // Copyright 2019-2024 The NATS Authors
     2  // Licensed under the Apache License, Version 2.0 (the "License");
     3  // you may not use this file except in compliance with the License.
     4  // You may obtain a copy of the License at
     5  //
     6  // http://www.apache.org/licenses/LICENSE-2.0
     7  //
     8  // Unless required by applicable law or agreed to in writing, software
     9  // distributed under the License is distributed on an "AS IS" BASIS,
    10  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package server
    15  
    16  import (
    17  	"bufio"
    18  	"bytes"
    19  	"crypto/tls"
    20  	"encoding/base64"
    21  	"encoding/json"
    22  	"fmt"
    23  	"math/rand"
    24  	"net"
    25  	"net/http"
    26  	"net/url"
    27  	"os"
    28  	"path"
    29  	"reflect"
    30  	"regexp"
    31  	"runtime"
    32  	"strconv"
    33  	"strings"
    34  	"sync"
    35  	"sync/atomic"
    36  	"time"
    37  
    38  	"github.com/klauspost/compress/s2"
    39  	"github.com/nats-io/jwt/v2"
    40  	"github.com/nats-io/nkeys"
    41  	"github.com/nats-io/nuid"
    42  )
    43  
    44  const (
    45  	// Warning when user configures leafnode TLS insecure
    46  	leafnodeTLSInsecureWarning = "TLS certificate chain and hostname of solicited leafnodes will not be verified. DO NOT USE IN PRODUCTION!"
    47  
    48  	// When a loop is detected, delay the reconnect of solicited connection.
    49  	leafNodeReconnectDelayAfterLoopDetected = 30 * time.Second
    50  
    51  	// When a server receives a message causing a permission violation, the
    52  	// connection is closed and it won't attempt to reconnect for that long.
    53  	leafNodeReconnectAfterPermViolation = 30 * time.Second
    54  
    55  	// When we have the same cluster name as the hub.
    56  	leafNodeReconnectDelayAfterClusterNameSame = 30 * time.Second
    57  
    58  	// Prefix for loop detection subject
    59  	leafNodeLoopDetectionSubjectPrefix = "$LDS."
    60  
    61  	// Path added to URL to indicate to WS server that the connection is a
    62  	// LEAF connection as opposed to a CLIENT.
    63  	leafNodeWSPath = "/leafnode"
    64  
    65  	// This is the time the server will wait, when receiving a CONNECT,
    66  	// before closing the connection if the required minimum version is not met.
    67  	leafNodeWaitBeforeClose = 5 * time.Second
    68  )
    69  
    70  type leaf struct {
    71  	// We have any auth stuff here for solicited connections.
    72  	remote *leafNodeCfg
    73  	// isSpoke tells us what role we are playing.
    74  	// Used when we receive a connection but otherside tells us they are a hub.
    75  	isSpoke bool
    76  	// remoteCluster is when we are a hub but the spoke leafnode is part of a cluster.
    77  	remoteCluster string
    78  	// remoteServer holds onto the remove server's name or ID.
    79  	remoteServer string
    80  	// domain name of remote server
    81  	remoteDomain string
    82  	// account name of remote server
    83  	remoteAccName string
    84  	// Used to suppress sub and unsub interest. Same as routes but our audience
    85  	// here is tied to this leaf node. This will hold all subscriptions except this
    86  	// leaf nodes. This represents all the interest we want to send to the other side.
    87  	smap map[string]int32
    88  	// This map will contain all the subscriptions that have been added to the smap
    89  	// during initLeafNodeSmapAndSendSubs. It is short lived and is there to avoid
    90  	// race between processing of a sub where sub is added to account sublist but
    91  	// updateSmap has not be called on that "thread", while in the LN readloop,
    92  	// when processing CONNECT, initLeafNodeSmapAndSendSubs is invoked and add
    93  	// this subscription to smap. When processing of the sub then calls updateSmap,
    94  	// we would add it a second time in the smap causing later unsub to suppress the LS-.
    95  	tsub  map[*subscription]struct{}
    96  	tsubt *time.Timer
    97  	// Selected compression mode, which may be different from the server configured mode.
    98  	compression string
    99  }
   100  
   101  // Used for remote (solicited) leafnodes.
   102  type leafNodeCfg struct {
   103  	sync.RWMutex
   104  	*RemoteLeafOpts
   105  	urls      []*url.URL
   106  	curURL    *url.URL
   107  	tlsName   string
   108  	username  string
   109  	password  string
   110  	perms     *Permissions
   111  	connDelay time.Duration // Delay before a connect, could be used while detecting loop condition, etc..
   112  }
   113  
   114  // Check to see if this is a solicited leafnode. We do special processing for solicited.
   115  func (c *client) isSolicitedLeafNode() bool {
   116  	return c.kind == LEAF && c.leaf.remote != nil
   117  }
   118  
   119  // Returns true if this is a solicited leafnode and is not configured to be treated as a hub or a receiving
   120  // connection leafnode where the otherside has declared itself to be the hub.
   121  func (c *client) isSpokeLeafNode() bool {
   122  	return c.kind == LEAF && c.leaf.isSpoke
   123  }
   124  
   125  func (c *client) isHubLeafNode() bool {
   126  	return c.kind == LEAF && !c.leaf.isSpoke
   127  }
   128  
   129  // This will spin up go routines to solicit the remote leaf node connections.
   130  func (s *Server) solicitLeafNodeRemotes(remotes []*RemoteLeafOpts) {
   131  	sysAccName := _EMPTY_
   132  	sAcc := s.SystemAccount()
   133  	if sAcc != nil {
   134  		sysAccName = sAcc.Name
   135  	}
   136  	addRemote := func(r *RemoteLeafOpts, isSysAccRemote bool) *leafNodeCfg {
   137  		s.mu.Lock()
   138  		remote := newLeafNodeCfg(r)
   139  		creds := remote.Credentials
   140  		accName := remote.LocalAccount
   141  		s.leafRemoteCfgs = append(s.leafRemoteCfgs, remote)
   142  		// Print notice if
   143  		if isSysAccRemote {
   144  			if len(remote.DenyExports) > 0 {
   145  				s.Noticef("Remote for System Account uses restricted export permissions")
   146  			}
   147  			if len(remote.DenyImports) > 0 {
   148  				s.Noticef("Remote for System Account uses restricted import permissions")
   149  			}
   150  		}
   151  		s.mu.Unlock()
   152  		if creds != _EMPTY_ {
   153  			contents, err := os.ReadFile(creds)
   154  			defer wipeSlice(contents)
   155  			if err != nil {
   156  				s.Errorf("Error reading LeafNode Remote Credentials file %q: %v", creds, err)
   157  			} else if items := credsRe.FindAllSubmatch(contents, -1); len(items) < 2 {
   158  				s.Errorf("LeafNode Remote Credentials file %q malformed", creds)
   159  			} else if _, err := nkeys.FromSeed(items[1][1]); err != nil {
   160  				s.Errorf("LeafNode Remote Credentials file %q has malformed seed", creds)
   161  			} else if uc, err := jwt.DecodeUserClaims(string(items[0][1])); err != nil {
   162  				s.Errorf("LeafNode Remote Credentials file %q has malformed user jwt", creds)
   163  			} else if isSysAccRemote {
   164  				if !uc.Permissions.Pub.Empty() || !uc.Permissions.Sub.Empty() || uc.Permissions.Resp != nil {
   165  					s.Noticef("LeafNode Remote for System Account uses credentials file %q with restricted permissions", creds)
   166  				}
   167  			} else {
   168  				if !uc.Permissions.Pub.Empty() || !uc.Permissions.Sub.Empty() || uc.Permissions.Resp != nil {
   169  					s.Noticef("LeafNode Remote for Account %s uses credentials file %q with restricted permissions", accName, creds)
   170  				}
   171  			}
   172  		}
   173  		return remote
   174  	}
   175  	for _, r := range remotes {
   176  		remote := addRemote(r, r.LocalAccount == sysAccName)
   177  		s.startGoRoutine(func() { s.connectToRemoteLeafNode(remote, true) })
   178  	}
   179  }
   180  
   181  func (s *Server) remoteLeafNodeStillValid(remote *leafNodeCfg) bool {
   182  	for _, ri := range s.getOpts().LeafNode.Remotes {
   183  		// FIXME(dlc) - What about auth changes?
   184  		if reflect.DeepEqual(ri.URLs, remote.URLs) {
   185  			return true
   186  		}
   187  	}
   188  	return false
   189  }
   190  
   191  // Ensure that leafnode is properly configured.
   192  func validateLeafNode(o *Options) error {
   193  	if err := validateLeafNodeAuthOptions(o); err != nil {
   194  		return err
   195  	}
   196  
   197  	// Users can bind to any local account, if its empty we will assume the $G account.
   198  	for _, r := range o.LeafNode.Remotes {
   199  		if r.LocalAccount == _EMPTY_ {
   200  			r.LocalAccount = globalAccountName
   201  		}
   202  	}
   203  
   204  	// In local config mode, check that leafnode configuration refers to accounts that exist.
   205  	if len(o.TrustedOperators) == 0 {
   206  		accNames := map[string]struct{}{}
   207  		for _, a := range o.Accounts {
   208  			accNames[a.Name] = struct{}{}
   209  		}
   210  		// global account is always created
   211  		accNames[DEFAULT_GLOBAL_ACCOUNT] = struct{}{}
   212  		// in the context of leaf nodes, empty account means global account
   213  		accNames[_EMPTY_] = struct{}{}
   214  		// system account either exists or, if not disabled, will be created
   215  		if o.SystemAccount == _EMPTY_ && !o.NoSystemAccount {
   216  			accNames[DEFAULT_SYSTEM_ACCOUNT] = struct{}{}
   217  		}
   218  		checkAccountExists := func(accName string, cfgType string) error {
   219  			if _, ok := accNames[accName]; !ok {
   220  				return fmt.Errorf("cannot find local account %q specified in leafnode %s", accName, cfgType)
   221  			}
   222  			return nil
   223  		}
   224  		if err := checkAccountExists(o.LeafNode.Account, "authorization"); err != nil {
   225  			return err
   226  		}
   227  		for _, lu := range o.LeafNode.Users {
   228  			if lu.Account == nil { // means global account
   229  				continue
   230  			}
   231  			if err := checkAccountExists(lu.Account.Name, "authorization"); err != nil {
   232  				return err
   233  			}
   234  		}
   235  		for _, r := range o.LeafNode.Remotes {
   236  			if err := checkAccountExists(r.LocalAccount, "remote"); err != nil {
   237  				return err
   238  			}
   239  		}
   240  	} else {
   241  		if len(o.LeafNode.Users) != 0 {
   242  			return fmt.Errorf("operator mode does not allow specifying users in leafnode config")
   243  		}
   244  		for _, r := range o.LeafNode.Remotes {
   245  			if !nkeys.IsValidPublicAccountKey(r.LocalAccount) {
   246  				return fmt.Errorf(
   247  					"operator mode requires account nkeys in remotes. " +
   248  						"Please add an `account` key to each remote in your `leafnodes` section, to assign it to an account. " +
   249  						"Each account value should be a 56 character public key, starting with the letter 'A'")
   250  			}
   251  		}
   252  		if o.LeafNode.Port != 0 && o.LeafNode.Account != "" && !nkeys.IsValidPublicAccountKey(o.LeafNode.Account) {
   253  			return fmt.Errorf("operator mode and non account nkeys are incompatible")
   254  		}
   255  	}
   256  
   257  	// Validate compression settings
   258  	if o.LeafNode.Compression.Mode != _EMPTY_ {
   259  		if err := validateAndNormalizeCompressionOption(&o.LeafNode.Compression, CompressionS2Auto); err != nil {
   260  			return err
   261  		}
   262  	}
   263  
   264  	// If a remote has a websocket scheme, all need to have it.
   265  	for _, rcfg := range o.LeafNode.Remotes {
   266  		if len(rcfg.URLs) >= 2 {
   267  			firstIsWS, ok := isWSURL(rcfg.URLs[0]), true
   268  			for i := 1; i < len(rcfg.URLs); i++ {
   269  				u := rcfg.URLs[i]
   270  				if isWS := isWSURL(u); isWS && !firstIsWS || !isWS && firstIsWS {
   271  					ok = false
   272  					break
   273  				}
   274  			}
   275  			if !ok {
   276  				return fmt.Errorf("remote leaf node configuration cannot have a mix of websocket and non-websocket urls: %q", redactURLList(rcfg.URLs))
   277  			}
   278  		}
   279  		// Validate compression settings
   280  		if rcfg.Compression.Mode != _EMPTY_ {
   281  			if err := validateAndNormalizeCompressionOption(&rcfg.Compression, CompressionS2Auto); err != nil {
   282  				return err
   283  			}
   284  		}
   285  	}
   286  
   287  	if o.LeafNode.Port == 0 {
   288  		return nil
   289  	}
   290  
   291  	// If MinVersion is defined, check that it is valid.
   292  	if mv := o.LeafNode.MinVersion; mv != _EMPTY_ {
   293  		if err := checkLeafMinVersionConfig(mv); err != nil {
   294  			return err
   295  		}
   296  	}
   297  
   298  	// The checks below will be done only when detecting that we are configured
   299  	// with gateways. So if an option validation needs to be done regardless,
   300  	// it MUST be done before this point!
   301  
   302  	if o.Gateway.Name == _EMPTY_ && o.Gateway.Port == 0 {
   303  		return nil
   304  	}
   305  	// If we are here we have both leaf nodes and gateways defined, make sure there
   306  	// is a system account defined.
   307  	if o.SystemAccount == _EMPTY_ {
   308  		return fmt.Errorf("leaf nodes and gateways (both being defined) require a system account to also be configured")
   309  	}
   310  	if err := validatePinnedCerts(o.LeafNode.TLSPinnedCerts); err != nil {
   311  		return fmt.Errorf("leafnode: %v", err)
   312  	}
   313  	return nil
   314  }
   315  
   316  func checkLeafMinVersionConfig(mv string) error {
   317  	if ok, err := versionAtLeastCheckError(mv, 2, 8, 0); !ok || err != nil {
   318  		if err != nil {
   319  			return fmt.Errorf("invalid leafnode's minimum version: %v", err)
   320  		} else {
   321  			return fmt.Errorf("the minimum version should be at least 2.8.0")
   322  		}
   323  	}
   324  	return nil
   325  }
   326  
   327  // Used to validate user names in LeafNode configuration.
   328  // - rejects mix of single and multiple users.
   329  // - rejects duplicate user names.
   330  func validateLeafNodeAuthOptions(o *Options) error {
   331  	if len(o.LeafNode.Users) == 0 {
   332  		return nil
   333  	}
   334  	if o.LeafNode.Username != _EMPTY_ {
   335  		return fmt.Errorf("can not have a single user/pass and a users array")
   336  	}
   337  	if o.LeafNode.Nkey != _EMPTY_ {
   338  		return fmt.Errorf("can not have a single nkey and a users array")
   339  	}
   340  	users := map[string]struct{}{}
   341  	for _, u := range o.LeafNode.Users {
   342  		if _, exists := users[u.Username]; exists {
   343  			return fmt.Errorf("duplicate user %q detected in leafnode authorization", u.Username)
   344  		}
   345  		users[u.Username] = struct{}{}
   346  	}
   347  	return nil
   348  }
   349  
   350  // Update remote LeafNode TLS configurations after a config reload.
   351  func (s *Server) updateRemoteLeafNodesTLSConfig(opts *Options) {
   352  	max := len(opts.LeafNode.Remotes)
   353  	if max == 0 {
   354  		return
   355  	}
   356  
   357  	s.mu.RLock()
   358  	defer s.mu.RUnlock()
   359  
   360  	// Changes in the list of remote leaf nodes is not supported.
   361  	// However, make sure that we don't go over the arrays.
   362  	if len(s.leafRemoteCfgs) < max {
   363  		max = len(s.leafRemoteCfgs)
   364  	}
   365  	for i := 0; i < max; i++ {
   366  		ro := opts.LeafNode.Remotes[i]
   367  		cfg := s.leafRemoteCfgs[i]
   368  		if ro.TLSConfig != nil {
   369  			cfg.Lock()
   370  			cfg.TLSConfig = ro.TLSConfig.Clone()
   371  			cfg.TLSHandshakeFirst = ro.TLSHandshakeFirst
   372  			cfg.Unlock()
   373  		}
   374  	}
   375  }
   376  
   377  func (s *Server) reConnectToRemoteLeafNode(remote *leafNodeCfg) {
   378  	delay := s.getOpts().LeafNode.ReconnectInterval
   379  	select {
   380  	case <-time.After(delay):
   381  	case <-s.quitCh:
   382  		s.grWG.Done()
   383  		return
   384  	}
   385  	s.connectToRemoteLeafNode(remote, false)
   386  }
   387  
   388  // Creates a leafNodeCfg object that wraps the RemoteLeafOpts.
   389  func newLeafNodeCfg(remote *RemoteLeafOpts) *leafNodeCfg {
   390  	cfg := &leafNodeCfg{
   391  		RemoteLeafOpts: remote,
   392  		urls:           make([]*url.URL, 0, len(remote.URLs)),
   393  	}
   394  	if len(remote.DenyExports) > 0 || len(remote.DenyImports) > 0 {
   395  		perms := &Permissions{}
   396  		if len(remote.DenyExports) > 0 {
   397  			perms.Publish = &SubjectPermission{Deny: remote.DenyExports}
   398  		}
   399  		if len(remote.DenyImports) > 0 {
   400  			perms.Subscribe = &SubjectPermission{Deny: remote.DenyImports}
   401  		}
   402  		cfg.perms = perms
   403  	}
   404  	// Start with the one that is configured. We will add to this
   405  	// array when receiving async leafnode INFOs.
   406  	cfg.urls = append(cfg.urls, cfg.URLs...)
   407  	// If allowed to randomize, do it on our copy of URLs
   408  	if !remote.NoRandomize {
   409  		rand.Shuffle(len(cfg.urls), func(i, j int) {
   410  			cfg.urls[i], cfg.urls[j] = cfg.urls[j], cfg.urls[i]
   411  		})
   412  	}
   413  	// If we are TLS make sure we save off a proper servername if possible.
   414  	// Do same for user/password since we may need them to connect to
   415  	// a bare URL that we get from INFO protocol.
   416  	for _, u := range cfg.urls {
   417  		cfg.saveTLSHostname(u)
   418  		cfg.saveUserPassword(u)
   419  		// If the url(s) have the "wss://" scheme, and we don't have a TLS
   420  		// config, mark that we should be using TLS anyway.
   421  		if !cfg.TLS && isWSSURL(u) {
   422  			cfg.TLS = true
   423  		}
   424  	}
   425  	return cfg
   426  }
   427  
   428  // Will pick an URL from the list of available URLs.
   429  func (cfg *leafNodeCfg) pickNextURL() *url.URL {
   430  	cfg.Lock()
   431  	defer cfg.Unlock()
   432  	// If the current URL is the first in the list and we have more than
   433  	// one URL, then move that one to end of the list.
   434  	if cfg.curURL != nil && len(cfg.urls) > 1 && urlsAreEqual(cfg.curURL, cfg.urls[0]) {
   435  		first := cfg.urls[0]
   436  		copy(cfg.urls, cfg.urls[1:])
   437  		cfg.urls[len(cfg.urls)-1] = first
   438  	}
   439  	cfg.curURL = cfg.urls[0]
   440  	return cfg.curURL
   441  }
   442  
   443  // Returns the current URL
   444  func (cfg *leafNodeCfg) getCurrentURL() *url.URL {
   445  	cfg.RLock()
   446  	defer cfg.RUnlock()
   447  	return cfg.curURL
   448  }
   449  
   450  // Returns how long the server should wait before attempting
   451  // to solicit a remote leafnode connection.
   452  func (cfg *leafNodeCfg) getConnectDelay() time.Duration {
   453  	cfg.RLock()
   454  	delay := cfg.connDelay
   455  	cfg.RUnlock()
   456  	return delay
   457  }
   458  
   459  // Sets the connect delay.
   460  func (cfg *leafNodeCfg) setConnectDelay(delay time.Duration) {
   461  	cfg.Lock()
   462  	cfg.connDelay = delay
   463  	cfg.Unlock()
   464  }
   465  
   466  // Ensure that non-exported options (used in tests) have
   467  // been properly set.
   468  func (s *Server) setLeafNodeNonExportedOptions() {
   469  	opts := s.getOpts()
   470  	s.leafNodeOpts.dialTimeout = opts.LeafNode.dialTimeout
   471  	if s.leafNodeOpts.dialTimeout == 0 {
   472  		// Use same timeouts as routes for now.
   473  		s.leafNodeOpts.dialTimeout = DEFAULT_ROUTE_DIAL
   474  	}
   475  	s.leafNodeOpts.resolver = opts.LeafNode.resolver
   476  	if s.leafNodeOpts.resolver == nil {
   477  		s.leafNodeOpts.resolver = net.DefaultResolver
   478  	}
   479  }
   480  
   481  const sharedSysAccDelay = 250 * time.Millisecond
   482  
   483  func (s *Server) connectToRemoteLeafNode(remote *leafNodeCfg, firstConnect bool) {
   484  	defer s.grWG.Done()
   485  
   486  	if remote == nil || len(remote.URLs) == 0 {
   487  		s.Debugf("Empty remote leafnode definition, nothing to connect")
   488  		return
   489  	}
   490  
   491  	opts := s.getOpts()
   492  	reconnectDelay := opts.LeafNode.ReconnectInterval
   493  	s.mu.Lock()
   494  	dialTimeout := s.leafNodeOpts.dialTimeout
   495  	resolver := s.leafNodeOpts.resolver
   496  	var isSysAcc bool
   497  	if s.eventsEnabled() {
   498  		isSysAcc = remote.LocalAccount == s.sys.account.Name
   499  	}
   500  	s.mu.Unlock()
   501  
   502  	// If we are sharing a system account and we are not standalone delay to gather some info prior.
   503  	if firstConnect && isSysAcc && !s.standAloneMode() {
   504  		s.Debugf("Will delay first leafnode connect to shared system account due to clustering")
   505  		remote.setConnectDelay(sharedSysAccDelay)
   506  	}
   507  
   508  	if connDelay := remote.getConnectDelay(); connDelay > 0 {
   509  		select {
   510  		case <-time.After(connDelay):
   511  		case <-s.quitCh:
   512  			return
   513  		}
   514  		remote.setConnectDelay(0)
   515  	}
   516  
   517  	var conn net.Conn
   518  
   519  	const connErrFmt = "Error trying to connect as leafnode to remote server %q (attempt %v): %v"
   520  
   521  	attempts := 0
   522  	for s.isRunning() && s.remoteLeafNodeStillValid(remote) {
   523  		rURL := remote.pickNextURL()
   524  		url, err := s.getRandomIP(resolver, rURL.Host, nil)
   525  		if err == nil {
   526  			var ipStr string
   527  			if url != rURL.Host {
   528  				ipStr = fmt.Sprintf(" (%s)", url)
   529  			}
   530  			// Some test may want to disable remotes from connecting
   531  			if s.isLeafConnectDisabled() {
   532  				s.Debugf("Will not attempt to connect to remote server on %q%s, leafnodes currently disabled", rURL.Host, ipStr)
   533  				err = ErrLeafNodeDisabled
   534  			} else {
   535  				s.Debugf("Trying to connect as leafnode to remote server on %q%s", rURL.Host, ipStr)
   536  				conn, err = s.network.DialTimeoutCause("tcp", url, dialTimeout, "leaf")
   537  			}
   538  		}
   539  		if err != nil {
   540  			jitter := time.Duration(rand.Int63n(int64(reconnectDelay)))
   541  			delay := reconnectDelay + jitter
   542  			attempts++
   543  			if s.shouldReportConnectErr(firstConnect, attempts) {
   544  				s.Errorf(connErrFmt, rURL.Host, attempts, err)
   545  			} else {
   546  				s.Debugf(connErrFmt, rURL.Host, attempts, err)
   547  			}
   548  			select {
   549  			case <-s.quitCh:
   550  				return
   551  			case <-time.After(delay):
   552  				// Check if we should migrate any JetStream assets while this remote is down.
   553  				s.checkJetStreamMigrate(remote)
   554  				continue
   555  			}
   556  		}
   557  		if !s.remoteLeafNodeStillValid(remote) {
   558  			conn.Close()
   559  			return
   560  		}
   561  
   562  		// We have a connection here to a remote server.
   563  		// Go ahead and create our leaf node and return.
   564  		s.createLeafNode(conn, rURL, remote, nil)
   565  
   566  		// Clear any observer states if we had them.
   567  		s.clearObserverState(remote)
   568  
   569  		return
   570  	}
   571  }
   572  
   573  // This will clear any observer state such that stream or consumer assets on this server can become leaders again.
   574  func (s *Server) clearObserverState(remote *leafNodeCfg) {
   575  	s.mu.RLock()
   576  	accName := remote.LocalAccount
   577  	s.mu.RUnlock()
   578  
   579  	acc, err := s.LookupAccount(accName)
   580  	if err != nil {
   581  		s.Warnf("Error looking up account [%s] checking for JetStream clear observer state on a leafnode", accName)
   582  		return
   583  	}
   584  
   585  	// Walk all streams looking for any clustered stream, skip otherwise.
   586  	for _, mset := range acc.streams() {
   587  		node := mset.raftNode()
   588  		if node == nil {
   589  			// Not R>1
   590  			continue
   591  		}
   592  		// Check consumers
   593  		for _, o := range mset.getConsumers() {
   594  			if n := o.raftNode(); n != nil {
   595  				// Ensure we can become a leader again.
   596  				n.SetObserver(false)
   597  			}
   598  		}
   599  		// Ensure we can not become a leader again.
   600  		node.SetObserver(false)
   601  	}
   602  }
   603  
   604  // Check to see if we should migrate any assets from this account.
   605  func (s *Server) checkJetStreamMigrate(remote *leafNodeCfg) {
   606  	s.mu.RLock()
   607  	accName, shouldMigrate := remote.LocalAccount, remote.JetStreamClusterMigrate
   608  	s.mu.RUnlock()
   609  
   610  	if !shouldMigrate {
   611  		return
   612  	}
   613  
   614  	acc, err := s.LookupAccount(accName)
   615  	if err != nil {
   616  		s.Warnf("Error looking up account [%s] checking for JetStream migration on a leafnode", accName)
   617  		return
   618  	}
   619  
   620  	// Walk all streams looking for any clustered stream, skip otherwise.
   621  	// If we are the leader force stepdown.
   622  	for _, mset := range acc.streams() {
   623  		node := mset.raftNode()
   624  		if node == nil {
   625  			// Not R>1
   626  			continue
   627  		}
   628  		// Collect any consumers
   629  		for _, o := range mset.getConsumers() {
   630  			if n := o.raftNode(); n != nil {
   631  				if n.Leader() {
   632  					n.StepDown()
   633  				}
   634  				// Ensure we can not become a leader while in this state.
   635  				n.SetObserver(true)
   636  			}
   637  		}
   638  		// Stepdown if this stream was leader.
   639  		if node.Leader() {
   640  			node.StepDown()
   641  		}
   642  		// Ensure we can not become a leader while in this state.
   643  		node.SetObserver(true)
   644  	}
   645  }
   646  
   647  // Helper for checking.
   648  func (s *Server) isLeafConnectDisabled() bool {
   649  	s.mu.RLock()
   650  	defer s.mu.RUnlock()
   651  	return s.leafDisableConnect
   652  }
   653  
   654  // Save off the tlsName for when we use TLS and mix hostnames and IPs. IPs usually
   655  // come from the server we connect to.
   656  //
   657  // We used to save the name only if there was a TLSConfig or scheme equal to "tls".
   658  // However, this was causing failures for users that did not set the scheme (and
   659  // their remote connections did not have a tls{} block).
   660  // We now save the host name regardless in case the remote returns an INFO indicating
   661  // that TLS is required.
   662  func (cfg *leafNodeCfg) saveTLSHostname(u *url.URL) {
   663  	if cfg.tlsName == _EMPTY_ && net.ParseIP(u.Hostname()) == nil {
   664  		cfg.tlsName = u.Hostname()
   665  	}
   666  }
   667  
   668  // Save off the username/password for when we connect using a bare URL
   669  // that we get from the INFO protocol.
   670  func (cfg *leafNodeCfg) saveUserPassword(u *url.URL) {
   671  	if cfg.username == _EMPTY_ && u.User != nil {
   672  		cfg.username = u.User.Username()
   673  		cfg.password, _ = u.User.Password()
   674  	}
   675  }
   676  
   677  // This starts the leafnode accept loop in a go routine, unless it
   678  // is detected that the server has already been shutdown.
   679  func (s *Server) startLeafNodeAcceptLoop() {
   680  	// Snapshot server options.
   681  	opts := s.getOpts()
   682  
   683  	port := opts.LeafNode.Port
   684  	if port == -1 {
   685  		port = 0
   686  	}
   687  
   688  	if s.isShuttingDown() {
   689  		return
   690  	}
   691  
   692  	s.mu.Lock()
   693  	hp := net.JoinHostPort(opts.LeafNode.Host, strconv.Itoa(port))
   694  	l, e := s.network.ListenCause("tcp", hp, "leaf")
   695  	s.leafNodeListenerErr = e
   696  	if e != nil {
   697  		s.mu.Unlock()
   698  		s.Fatalf("Error listening on leafnode port: %d - %v", opts.LeafNode.Port, e)
   699  		return
   700  	}
   701  
   702  	s.Noticef("Listening for leafnode connections on %s",
   703  		net.JoinHostPort(opts.LeafNode.Host, strconv.Itoa(l.Addr().(*net.TCPAddr).Port)))
   704  
   705  	tlsRequired := opts.LeafNode.TLSConfig != nil
   706  	tlsVerify := tlsRequired && opts.LeafNode.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert
   707  	// Do not set compression in this Info object, it would possibly cause
   708  	// issues when sending asynchronous INFO to the remote.
   709  	info := Info{
   710  		ID:            s.info.ID,
   711  		Name:          s.info.Name,
   712  		Version:       s.info.Version,
   713  		GitCommit:     gitCommit,
   714  		GoVersion:     runtime.Version(),
   715  		AuthRequired:  true,
   716  		TLSRequired:   tlsRequired,
   717  		TLSVerify:     tlsVerify,
   718  		MaxPayload:    s.info.MaxPayload, // TODO(dlc) - Allow override?
   719  		Headers:       s.supportsHeaders(),
   720  		JetStream:     opts.JetStream,
   721  		Domain:        opts.JetStreamDomain,
   722  		Proto:         s.getServerProto(),
   723  		InfoOnConnect: true,
   724  	}
   725  	// If we have selected a random port...
   726  	if port == 0 {
   727  		// Write resolved port back to options.
   728  		opts.LeafNode.Port = l.Addr().(*net.TCPAddr).Port
   729  	}
   730  
   731  	s.leafNodeInfo = info
   732  	// Possibly override Host/Port and set IP based on Cluster.Advertise
   733  	if err := s.setLeafNodeInfoHostPortAndIP(); err != nil {
   734  		s.Fatalf("Error setting leafnode INFO with LeafNode.Advertise value of %s, err=%v", opts.LeafNode.Advertise, err)
   735  		l.Close()
   736  		s.mu.Unlock()
   737  		return
   738  	}
   739  	s.leafURLsMap[s.leafNodeInfo.IP]++
   740  	s.generateLeafNodeInfoJSON()
   741  
   742  	// Setup state that can enable shutdown
   743  	s.leafNodeListener = l
   744  
   745  	// As of now, a server that does not have remotes configured would
   746  	// never solicit a connection, so we should not have to warn if
   747  	// InsecureSkipVerify is set in main LeafNodes config (since
   748  	// this TLS setting matters only when soliciting a connection).
   749  	// Still, warn if insecure is set in any of LeafNode block.
   750  	// We need to check remotes, even if tls is not required on accept.
   751  	warn := tlsRequired && opts.LeafNode.TLSConfig.InsecureSkipVerify
   752  	if !warn {
   753  		for _, r := range opts.LeafNode.Remotes {
   754  			if r.TLSConfig != nil && r.TLSConfig.InsecureSkipVerify {
   755  				warn = true
   756  				break
   757  			}
   758  		}
   759  	}
   760  	if warn {
   761  		s.Warnf(leafnodeTLSInsecureWarning)
   762  	}
   763  	go s.acceptConnections(l, "Leafnode", func(conn net.Conn) { s.createLeafNode(conn, nil, nil, nil) }, nil)
   764  	s.mu.Unlock()
   765  }
   766  
   767  // RegEx to match a creds file with user JWT and Seed.
   768  var credsRe = regexp.MustCompile(`\s*(?:(?:[-]{3,}[^\n]*[-]{3,}\n)(.+)(?:\n\s*[-]{3,}[^\n]*[-]{3,}\n))`)
   769  
   770  // clusterName is provided as argument to avoid lock ordering issues with the locked client c
   771  // Lock should be held entering here.
   772  func (c *client) sendLeafConnect(clusterName string, headers bool) error {
   773  	// We support basic user/pass and operator based user JWT with signatures.
   774  	cinfo := leafConnectInfo{
   775  		Version:       VERSION,
   776  		ID:            c.srv.info.ID,
   777  		Domain:        c.srv.info.Domain,
   778  		Name:          c.srv.info.Name,
   779  		Hub:           c.leaf.remote.Hub,
   780  		Cluster:       clusterName,
   781  		Headers:       headers,
   782  		JetStream:     c.acc.jetStreamConfigured(),
   783  		DenyPub:       c.leaf.remote.DenyImports,
   784  		Compression:   c.leaf.compression,
   785  		RemoteAccount: c.acc.GetName(),
   786  		Proto:         c.srv.getServerProto(),
   787  	}
   788  
   789  	// If a signature callback is specified, this takes precedence over anything else.
   790  	if cb := c.leaf.remote.SignatureCB; cb != nil {
   791  		nonce := c.nonce
   792  		c.mu.Unlock()
   793  		jwt, sigraw, err := cb(nonce)
   794  		c.mu.Lock()
   795  		if err == nil && c.isClosed() {
   796  			err = ErrConnectionClosed
   797  		}
   798  		if err != nil {
   799  			c.Errorf("Error signing the nonce: %v", err)
   800  			return err
   801  		}
   802  		sig := base64.RawURLEncoding.EncodeToString(sigraw)
   803  		cinfo.JWT, cinfo.Sig = jwt, sig
   804  
   805  	} else if creds := c.leaf.remote.Credentials; creds != _EMPTY_ {
   806  		// Check for credentials first, that will take precedence..
   807  		c.Debugf("Authenticating with credentials file %q", c.leaf.remote.Credentials)
   808  		contents, err := os.ReadFile(creds)
   809  		if err != nil {
   810  			c.Errorf("%v", err)
   811  			return err
   812  		}
   813  		defer wipeSlice(contents)
   814  		items := credsRe.FindAllSubmatch(contents, -1)
   815  		if len(items) < 2 {
   816  			c.Errorf("Credentials file malformed")
   817  			return err
   818  		}
   819  		// First result should be the user JWT.
   820  		// We copy here so that the file containing the seed will be wiped appropriately.
   821  		raw := items[0][1]
   822  		tmp := make([]byte, len(raw))
   823  		copy(tmp, raw)
   824  		// Seed is second item.
   825  		kp, err := nkeys.FromSeed(items[1][1])
   826  		if err != nil {
   827  			c.Errorf("Credentials file has malformed seed")
   828  			return err
   829  		}
   830  		// Wipe our key on exit.
   831  		defer kp.Wipe()
   832  
   833  		sigraw, _ := kp.Sign(c.nonce)
   834  		sig := base64.RawURLEncoding.EncodeToString(sigraw)
   835  		cinfo.JWT = bytesToString(tmp)
   836  		cinfo.Sig = sig
   837  	} else if nkey := c.leaf.remote.Nkey; nkey != _EMPTY_ {
   838  		kp, err := nkeys.FromSeed([]byte(nkey))
   839  		if err != nil {
   840  			c.Errorf("Remote nkey has malformed seed")
   841  			return err
   842  		}
   843  		// Wipe our key on exit.
   844  		defer kp.Wipe()
   845  		sigraw, _ := kp.Sign(c.nonce)
   846  		sig := base64.RawURLEncoding.EncodeToString(sigraw)
   847  		pkey, _ := kp.PublicKey()
   848  		cinfo.Nkey = pkey
   849  		cinfo.Sig = sig
   850  	} else if userInfo := c.leaf.remote.curURL.User; userInfo != nil {
   851  		cinfo.User = userInfo.Username()
   852  		cinfo.Pass, _ = userInfo.Password()
   853  	} else if c.leaf.remote.username != _EMPTY_ {
   854  		cinfo.User = c.leaf.remote.username
   855  		cinfo.Pass = c.leaf.remote.password
   856  	}
   857  	b, err := json.Marshal(cinfo)
   858  	if err != nil {
   859  		c.Errorf("Error marshaling CONNECT to remote leafnode: %v\n", err)
   860  		return err
   861  	}
   862  	// Although this call is made before the writeLoop is created,
   863  	// we don't really need to send in place. The protocol will be
   864  	// sent out by the writeLoop.
   865  	c.enqueueProto([]byte(fmt.Sprintf(ConProto, b)))
   866  	return nil
   867  }
   868  
   869  // Makes a deep copy of the LeafNode Info structure.
   870  // The server lock is held on entry.
   871  func (s *Server) copyLeafNodeInfo() *Info {
   872  	clone := s.leafNodeInfo
   873  	// Copy the array of urls.
   874  	if len(s.leafNodeInfo.LeafNodeURLs) > 0 {
   875  		clone.LeafNodeURLs = append([]string(nil), s.leafNodeInfo.LeafNodeURLs...)
   876  	}
   877  	return &clone
   878  }
   879  
   880  // Adds a LeafNode URL that we get when a route connects to the Info structure.
   881  // Regenerates the JSON byte array so that it can be sent to LeafNode connections.
   882  // Returns a boolean indicating if the URL was added or not.
   883  // Server lock is held on entry
   884  func (s *Server) addLeafNodeURL(urlStr string) bool {
   885  	if s.leafURLsMap.addUrl(urlStr) {
   886  		s.generateLeafNodeInfoJSON()
   887  		return true
   888  	}
   889  	return false
   890  }
   891  
   892  // Removes a LeafNode URL of the route that is disconnecting from the Info structure.
   893  // Regenerates the JSON byte array so that it can be sent to LeafNode connections.
   894  // Returns a boolean indicating if the URL was removed or not.
   895  // Server lock is held on entry.
   896  func (s *Server) removeLeafNodeURL(urlStr string) bool {
   897  	// Don't need to do this if we are removing the route connection because
   898  	// we are shuting down...
   899  	if s.isShuttingDown() {
   900  		return false
   901  	}
   902  	if s.leafURLsMap.removeUrl(urlStr) {
   903  		s.generateLeafNodeInfoJSON()
   904  		return true
   905  	}
   906  	return false
   907  }
   908  
   909  // Server lock is held on entry
   910  func (s *Server) generateLeafNodeInfoJSON() {
   911  	s.leafNodeInfo.Cluster = s.cachedClusterName()
   912  	s.leafNodeInfo.LeafNodeURLs = s.leafURLsMap.getAsStringSlice()
   913  	s.leafNodeInfo.WSConnectURLs = s.websocket.connectURLsMap.getAsStringSlice()
   914  	s.leafNodeInfoJSON = generateInfoJSON(&s.leafNodeInfo)
   915  }
   916  
   917  // Sends an async INFO protocol so that the connected servers can update
   918  // their list of LeafNode urls.
   919  func (s *Server) sendAsyncLeafNodeInfo() {
   920  	for _, c := range s.leafs {
   921  		c.mu.Lock()
   922  		c.enqueueProto(s.leafNodeInfoJSON)
   923  		c.mu.Unlock()
   924  	}
   925  }
   926  
   927  // Called when an inbound leafnode connection is accepted or we create one for a solicited leafnode.
   928  func (s *Server) createLeafNode(conn net.Conn, rURL *url.URL, remote *leafNodeCfg, ws *websocket) *client {
   929  	// Snapshot server options.
   930  	opts := s.getOpts()
   931  
   932  	maxPay := int32(opts.MaxPayload)
   933  	maxSubs := int32(opts.MaxSubs)
   934  	// For system, maxSubs of 0 means unlimited, so re-adjust here.
   935  	if maxSubs == 0 {
   936  		maxSubs = -1
   937  	}
   938  	now := time.Now().UTC()
   939  
   940  	c := &client{srv: s, nc: conn, kind: LEAF, opts: defaultOpts, mpay: maxPay, msubs: maxSubs, start: now, last: now}
   941  	// Do not update the smap here, we need to do it in initLeafNodeSmapAndSendSubs
   942  	c.leaf = &leaf{}
   943  
   944  	// For accepted LN connections, ws will be != nil if it was accepted
   945  	// through the Websocket port.
   946  	c.ws = ws
   947  
   948  	// For remote, check if the scheme starts with "ws", if so, we will initiate
   949  	// a remote Leaf Node connection as a websocket connection.
   950  	if remote != nil && rURL != nil && isWSURL(rURL) {
   951  		remote.RLock()
   952  		c.ws = &websocket{compress: remote.Websocket.Compression, maskwrite: !remote.Websocket.NoMasking}
   953  		remote.RUnlock()
   954  	}
   955  
   956  	// Determines if we are soliciting the connection or not.
   957  	var solicited bool
   958  	var acc *Account
   959  	var remoteSuffix string
   960  	if remote != nil {
   961  		// For now, if lookup fails, we will constantly try
   962  		// to recreate this LN connection.
   963  		lacc := remote.LocalAccount
   964  		var err error
   965  		acc, err = s.LookupAccount(lacc)
   966  		if err != nil {
   967  			// An account not existing is something that can happen with nats/http account resolver and the account
   968  			// has not yet been pushed, or the request failed for other reasons.
   969  			// remote needs to be set or retry won't happen
   970  			c.leaf.remote = remote
   971  			c.closeConnection(MissingAccount)
   972  			s.Errorf("Unable to lookup account %s for solicited leafnode connection: %v", lacc, err)
   973  			return nil
   974  		}
   975  		remoteSuffix = fmt.Sprintf(" for account: %s", acc.traceLabel())
   976  	}
   977  
   978  	c.mu.Lock()
   979  	c.initClient()
   980  	c.Noticef("Leafnode connection created%s %s", remoteSuffix, c.opts.Name)
   981  
   982  	var tlsFirst bool
   983  	if remote != nil {
   984  		solicited = true
   985  		remote.Lock()
   986  		c.leaf.remote = remote
   987  		c.setPermissions(remote.perms)
   988  		if !c.leaf.remote.Hub {
   989  			c.leaf.isSpoke = true
   990  		}
   991  		tlsFirst = remote.TLSHandshakeFirst
   992  		remote.Unlock()
   993  		c.acc = acc
   994  	} else {
   995  		c.flags.set(expectConnect)
   996  		if ws != nil {
   997  			c.Debugf("Leafnode compression=%v", c.ws.compress)
   998  		}
   999  	}
  1000  	c.mu.Unlock()
  1001  
  1002  	var nonce [nonceLen]byte
  1003  	var info *Info
  1004  
  1005  	// Grab this before the client lock below.
  1006  	if !solicited {
  1007  		// Grab server variables
  1008  		s.mu.Lock()
  1009  		info = s.copyLeafNodeInfo()
  1010  		// For tests that want to simulate old servers, do not set the compression
  1011  		// on the INFO protocol if configured with CompressionNotSupported.
  1012  		if cm := opts.LeafNode.Compression.Mode; cm != CompressionNotSupported {
  1013  			info.Compression = cm
  1014  		}
  1015  		s.generateNonce(nonce[:])
  1016  		s.mu.Unlock()
  1017  	}
  1018  
  1019  	// Grab lock
  1020  	c.mu.Lock()
  1021  
  1022  	var preBuf []byte
  1023  	if solicited {
  1024  		// For websocket connection, we need to send an HTTP request,
  1025  		// and get the response before starting the readLoop to get
  1026  		// the INFO, etc..
  1027  		if c.isWebsocket() {
  1028  			var err error
  1029  			var closeReason ClosedState
  1030  
  1031  			preBuf, closeReason, err = c.leafNodeSolicitWSConnection(opts, rURL, remote)
  1032  			if err != nil {
  1033  				c.Errorf("Error soliciting websocket connection: %v", err)
  1034  				c.mu.Unlock()
  1035  				if closeReason != 0 {
  1036  					c.closeConnection(closeReason)
  1037  				}
  1038  				return nil
  1039  			}
  1040  		} else {
  1041  			// If configured to do TLS handshake first
  1042  			if tlsFirst {
  1043  				if _, err := c.leafClientHandshakeIfNeeded(remote, opts); err != nil {
  1044  					c.mu.Unlock()
  1045  					return nil
  1046  				}
  1047  			}
  1048  			// We need to wait for the info, but not for too long.
  1049  			c.nc.SetReadDeadline(time.Now().Add(DEFAULT_LEAFNODE_INFO_WAIT))
  1050  		}
  1051  
  1052  		// We will process the INFO from the readloop and finish by
  1053  		// sending the CONNECT and finish registration later.
  1054  	} else {
  1055  		// Send our info to the other side.
  1056  		// Remember the nonce we sent here for signatures, etc.
  1057  		c.nonce = make([]byte, nonceLen)
  1058  		copy(c.nonce, nonce[:])
  1059  		info.Nonce = bytesToString(c.nonce)
  1060  		info.CID = c.cid
  1061  		proto := generateInfoJSON(info)
  1062  		if !opts.LeafNode.TLSHandshakeFirst {
  1063  			// We have to send from this go routine because we may
  1064  			// have to block for TLS handshake before we start our
  1065  			// writeLoop go routine. The other side needs to receive
  1066  			// this before it can initiate the TLS handshake..
  1067  			c.sendProtoNow(proto)
  1068  
  1069  			// The above call could have marked the connection as closed (due to TCP error).
  1070  			if c.isClosed() {
  1071  				c.mu.Unlock()
  1072  				c.closeConnection(WriteError)
  1073  				return nil
  1074  			}
  1075  		}
  1076  
  1077  		// Check to see if we need to spin up TLS.
  1078  		if !c.isWebsocket() && info.TLSRequired {
  1079  			// Perform server-side TLS handshake.
  1080  			if err := c.doTLSServerHandshake(tlsHandshakeLeaf, opts.LeafNode.TLSConfig, opts.LeafNode.TLSTimeout, opts.LeafNode.TLSPinnedCerts); err != nil {
  1081  				c.mu.Unlock()
  1082  				return nil
  1083  			}
  1084  		}
  1085  
  1086  		// If the user wants the TLS handshake to occur first, now that it is
  1087  		// done, send the INFO protocol.
  1088  		if opts.LeafNode.TLSHandshakeFirst {
  1089  			c.sendProtoNow(proto)
  1090  			if c.isClosed() {
  1091  				c.mu.Unlock()
  1092  				c.closeConnection(WriteError)
  1093  				return nil
  1094  			}
  1095  		}
  1096  
  1097  		// Leaf nodes will always require a CONNECT to let us know
  1098  		// when we are properly bound to an account.
  1099  		//
  1100  		// If compression is configured, we can't set the authTimer here because
  1101  		// it would cause the parser to fail any incoming protocol that is not a
  1102  		// CONNECT (and we need to exchange INFO protocols for compression
  1103  		// negotiation). So instead, use the ping timer until we are done with
  1104  		// negotiation and can set the auth timer.
  1105  		timeout := secondsToDuration(opts.LeafNode.AuthTimeout)
  1106  		if needsCompression(opts.LeafNode.Compression.Mode) {
  1107  			c.ping.tmr = time.AfterFunc(timeout, func() {
  1108  				c.authTimeout()
  1109  			})
  1110  		} else {
  1111  			c.setAuthTimer(timeout)
  1112  		}
  1113  	}
  1114  
  1115  	// Keep track in case server is shutdown before we can successfully register.
  1116  	if !s.addToTempClients(c.cid, c) {
  1117  		c.mu.Unlock()
  1118  		c.setNoReconnect()
  1119  		c.closeConnection(ServerShutdown)
  1120  		return nil
  1121  	}
  1122  
  1123  	// Spin up the read loop.
  1124  	s.startGoRoutine(func() { c.readLoop(preBuf) })
  1125  
  1126  	// We will spin the write loop for solicited connections only
  1127  	// when processing the INFO and after switching to TLS if needed.
  1128  	if !solicited {
  1129  		s.startGoRoutine(func() { c.writeLoop() })
  1130  	}
  1131  
  1132  	c.mu.Unlock()
  1133  
  1134  	return c
  1135  }
  1136  
  1137  // Will perform the client-side TLS handshake if needed. Assumes that this
  1138  // is called by the solicit side (remote will be non nil). Returns `true`
  1139  // if TLS is required, `false` otherwise.
  1140  // Lock held on entry.
  1141  func (c *client) leafClientHandshakeIfNeeded(remote *leafNodeCfg, opts *Options) (bool, error) {
  1142  	// Check if TLS is required and gather TLS config variables.
  1143  	tlsRequired, tlsConfig, tlsName, tlsTimeout := c.leafNodeGetTLSConfigForSolicit(remote)
  1144  	if !tlsRequired {
  1145  		return false, nil
  1146  	}
  1147  
  1148  	// If TLS required, peform handshake.
  1149  	// Get the URL that was used to connect to the remote server.
  1150  	rURL := remote.getCurrentURL()
  1151  
  1152  	// Perform the client-side TLS handshake.
  1153  	if resetTLSName, err := c.doTLSClientHandshake(tlsHandshakeLeaf, rURL, tlsConfig, tlsName, tlsTimeout, opts.LeafNode.TLSPinnedCerts); err != nil {
  1154  		// Check if we need to reset the remote's TLS name.
  1155  		if resetTLSName {
  1156  			remote.Lock()
  1157  			remote.tlsName = _EMPTY_
  1158  			remote.Unlock()
  1159  		}
  1160  		return false, err
  1161  	}
  1162  	return true, nil
  1163  }
  1164  
  1165  func (c *client) processLeafnodeInfo(info *Info) {
  1166  	c.mu.Lock()
  1167  	if c.leaf == nil || c.isClosed() {
  1168  		c.mu.Unlock()
  1169  		return
  1170  	}
  1171  	s := c.srv
  1172  	opts := s.getOpts()
  1173  	remote := c.leaf.remote
  1174  	didSolicit := remote != nil
  1175  	firstINFO := !c.flags.isSet(infoReceived)
  1176  
  1177  	// In case of websocket, the TLS handshake has been already done.
  1178  	// So check only for non websocket connections and for configurations
  1179  	// where the TLS Handshake was not done first.
  1180  	if didSolicit && !c.flags.isSet(handshakeComplete) && !c.isWebsocket() && !remote.TLSHandshakeFirst {
  1181  		// If the server requires TLS, we need to set this in the remote
  1182  		// otherwise if there is no TLS configuration block for the remote,
  1183  		// the solicit side will not attempt to perform the TLS handshake.
  1184  		if firstINFO && info.TLSRequired {
  1185  			remote.TLS = true
  1186  		}
  1187  		if _, err := c.leafClientHandshakeIfNeeded(remote, opts); err != nil {
  1188  			c.mu.Unlock()
  1189  			return
  1190  		}
  1191  	}
  1192  
  1193  	// Check for compression, unless already done.
  1194  	if firstINFO && !c.flags.isSet(compressionNegotiated) {
  1195  		// Prevent from getting back here.
  1196  		c.flags.set(compressionNegotiated)
  1197  
  1198  		var co *CompressionOpts
  1199  		if !didSolicit {
  1200  			co = &opts.LeafNode.Compression
  1201  		} else {
  1202  			co = &remote.Compression
  1203  		}
  1204  		if needsCompression(co.Mode) {
  1205  			// Release client lock since following function will need server lock.
  1206  			c.mu.Unlock()
  1207  			compress, err := s.negotiateLeafCompression(c, didSolicit, info.Compression, co)
  1208  			if err != nil {
  1209  				c.sendErrAndErr(err.Error())
  1210  				c.closeConnection(ProtocolViolation)
  1211  				return
  1212  			}
  1213  			if compress {
  1214  				// Done for now, will get back another INFO protocol...
  1215  				return
  1216  			}
  1217  			// No compression because one side does not want/can't, so proceed.
  1218  			c.mu.Lock()
  1219  			// Check that the connection did not close if the lock was released.
  1220  			if c.isClosed() {
  1221  				c.mu.Unlock()
  1222  				return
  1223  			}
  1224  		} else {
  1225  			// Coming from an old server, the Compression field would be the empty
  1226  			// string. For servers that are configured with CompressionNotSupported,
  1227  			// this makes them behave as old servers.
  1228  			if info.Compression == _EMPTY_ || co.Mode == CompressionNotSupported {
  1229  				c.leaf.compression = CompressionNotSupported
  1230  			} else {
  1231  				c.leaf.compression = CompressionOff
  1232  			}
  1233  		}
  1234  		// Accepting side does not normally process an INFO protocol during
  1235  		// initial connection handshake. So we keep it consistent by returning
  1236  		// if we are not soliciting.
  1237  		if !didSolicit {
  1238  			// If we had created the ping timer instead of the auth timer, we will
  1239  			// clear the ping timer and set the auth timer now that the compression
  1240  			// negotiation is done.
  1241  			if info.Compression != _EMPTY_ && c.ping.tmr != nil {
  1242  				clearTimer(&c.ping.tmr)
  1243  				c.setAuthTimer(secondsToDuration(opts.LeafNode.AuthTimeout))
  1244  			}
  1245  			c.mu.Unlock()
  1246  			return
  1247  		}
  1248  		// Fall through and process the INFO protocol as usual.
  1249  	}
  1250  
  1251  	// Note: For now, only the initial INFO has a nonce. We
  1252  	// will probably do auto key rotation at some point.
  1253  	if firstINFO {
  1254  		// Mark that the INFO protocol has been received.
  1255  		c.flags.set(infoReceived)
  1256  		// Prevent connecting to non leafnode port. Need to do this only for
  1257  		// the first INFO, not for async INFO updates...
  1258  		//
  1259  		// Content of INFO sent by the server when accepting a tcp connection.
  1260  		// -------------------------------------------------------------------
  1261  		// Listen Port Of | CID | ClientConnectURLs | LeafNodeURLs | Gateway |
  1262  		// -------------------------------------------------------------------
  1263  		//      CLIENT    |  X* |        X**        |              |         |
  1264  		//      ROUTE     |     |        X**        |      X***    |         |
  1265  		//     GATEWAY    |     |                   |              |    X    |
  1266  		//     LEAFNODE   |  X  |                   |       X      |         |
  1267  		// -------------------------------------------------------------------
  1268  		// *   Not on older servers.
  1269  		// **  Not if "no advertise" is enabled.
  1270  		// *** Not if leafnode's "no advertise" is enabled.
  1271  		//
  1272  		// As seen from above, a solicited LeafNode connection should receive
  1273  		// from the remote server an INFO with CID and LeafNodeURLs. Anything
  1274  		// else should be considered an attempt to connect to a wrong port.
  1275  		if didSolicit && (info.CID == 0 || info.LeafNodeURLs == nil) {
  1276  			c.mu.Unlock()
  1277  			c.Errorf(ErrConnectedToWrongPort.Error())
  1278  			c.closeConnection(WrongPort)
  1279  			return
  1280  		}
  1281  		// Capture a nonce here.
  1282  		c.nonce = []byte(info.Nonce)
  1283  		if info.TLSRequired && didSolicit {
  1284  			remote.TLS = true
  1285  		}
  1286  		supportsHeaders := c.srv.supportsHeaders()
  1287  		c.headers = supportsHeaders && info.Headers
  1288  
  1289  		// Remember the remote server.
  1290  		// Pre 2.2.0 servers are not sending their server name.
  1291  		// In that case, use info.ID, which, for those servers, matches
  1292  		// the content of the field `Name` in the leafnode CONNECT protocol.
  1293  		if info.Name == _EMPTY_ {
  1294  			c.leaf.remoteServer = info.ID
  1295  		} else {
  1296  			c.leaf.remoteServer = info.Name
  1297  		}
  1298  		c.leaf.remoteDomain = info.Domain
  1299  		c.leaf.remoteCluster = info.Cluster
  1300  		// We send the protocol version in the INFO protocol.
  1301  		// Keep track of it, so we know if this connection supports message
  1302  		// tracing for instance.
  1303  		c.opts.Protocol = info.Proto
  1304  	}
  1305  
  1306  	// For both initial INFO and async INFO protocols, Possibly
  1307  	// update our list of remote leafnode URLs we can connect to.
  1308  	if didSolicit && (len(info.LeafNodeURLs) > 0 || len(info.WSConnectURLs) > 0) {
  1309  		// Consider the incoming array as the most up-to-date
  1310  		// representation of the remote cluster's list of URLs.
  1311  		c.updateLeafNodeURLs(info)
  1312  	}
  1313  
  1314  	// Check to see if we have permissions updates here.
  1315  	if info.Import != nil || info.Export != nil {
  1316  		perms := &Permissions{
  1317  			Publish:   info.Export,
  1318  			Subscribe: info.Import,
  1319  		}
  1320  		// Check if we have local deny clauses that we need to merge.
  1321  		if remote := c.leaf.remote; remote != nil {
  1322  			if len(remote.DenyExports) > 0 {
  1323  				if perms.Publish == nil {
  1324  					perms.Publish = &SubjectPermission{}
  1325  				}
  1326  				perms.Publish.Deny = append(perms.Publish.Deny, remote.DenyExports...)
  1327  			}
  1328  			if len(remote.DenyImports) > 0 {
  1329  				if perms.Subscribe == nil {
  1330  					perms.Subscribe = &SubjectPermission{}
  1331  				}
  1332  				perms.Subscribe.Deny = append(perms.Subscribe.Deny, remote.DenyImports...)
  1333  			}
  1334  		}
  1335  		c.setPermissions(perms)
  1336  	}
  1337  
  1338  	var resumeConnect bool
  1339  
  1340  	// If this is a remote connection and this is the first INFO protocol,
  1341  	// then we need to finish the connect process by sending CONNECT, etc..
  1342  	if firstINFO && didSolicit {
  1343  		// Clear deadline that was set in createLeafNode while waiting for the INFO.
  1344  		c.nc.SetDeadline(time.Time{})
  1345  		resumeConnect = true
  1346  	} else if !firstINFO && didSolicit {
  1347  		c.leaf.remoteAccName = info.RemoteAccount
  1348  	}
  1349  
  1350  	// Check if we have the remote account information and if so make sure it's stored.
  1351  	if info.RemoteAccount != _EMPTY_ {
  1352  		s.leafRemoteAccounts.Store(c.acc.Name, info.RemoteAccount)
  1353  	}
  1354  	c.mu.Unlock()
  1355  
  1356  	finishConnect := info.ConnectInfo
  1357  	if resumeConnect && s != nil {
  1358  		s.leafNodeResumeConnectProcess(c)
  1359  		if !info.InfoOnConnect {
  1360  			finishConnect = true
  1361  		}
  1362  	}
  1363  	if finishConnect {
  1364  		s.leafNodeFinishConnectProcess(c)
  1365  	}
  1366  }
  1367  
  1368  func (s *Server) negotiateLeafCompression(c *client, didSolicit bool, infoCompression string, co *CompressionOpts) (bool, error) {
  1369  	// Negotiate the appropriate compression mode (or no compression)
  1370  	cm, err := selectCompressionMode(co.Mode, infoCompression)
  1371  	if err != nil {
  1372  		return false, err
  1373  	}
  1374  	c.mu.Lock()
  1375  	// For "auto" mode, set the initial compression mode based on RTT
  1376  	if cm == CompressionS2Auto {
  1377  		if c.rttStart.IsZero() {
  1378  			c.rtt = computeRTT(c.start)
  1379  		}
  1380  		cm = selectS2AutoModeBasedOnRTT(c.rtt, co.RTTThresholds)
  1381  	}
  1382  	// Keep track of the negotiated compression mode.
  1383  	c.leaf.compression = cm
  1384  	cid := c.cid
  1385  	var nonce string
  1386  	if !didSolicit {
  1387  		nonce = bytesToString(c.nonce)
  1388  	}
  1389  	c.mu.Unlock()
  1390  
  1391  	if !needsCompression(cm) {
  1392  		return false, nil
  1393  	}
  1394  
  1395  	// If we end-up doing compression...
  1396  
  1397  	// Generate an INFO with the chosen compression mode.
  1398  	s.mu.Lock()
  1399  	info := s.copyLeafNodeInfo()
  1400  	info.Compression, info.CID, info.Nonce = compressionModeForInfoProtocol(co, cm), cid, nonce
  1401  	infoProto := generateInfoJSON(info)
  1402  	s.mu.Unlock()
  1403  
  1404  	// If we solicited, then send this INFO protocol BEFORE switching
  1405  	// to compression writer. However, if we did not, we send it after.
  1406  	c.mu.Lock()
  1407  	if didSolicit {
  1408  		c.enqueueProto(infoProto)
  1409  		// Make sure it is completely flushed (the pending bytes goes to
  1410  		// 0) before proceeding.
  1411  		for c.out.pb > 0 && !c.isClosed() {
  1412  			c.flushOutbound()
  1413  		}
  1414  	}
  1415  	// This is to notify the readLoop that it should switch to a
  1416  	// (de)compression reader.
  1417  	c.in.flags.set(switchToCompression)
  1418  	// Create the compress writer before queueing the INFO protocol for
  1419  	// a route that did not solicit. It will make sure that that proto
  1420  	// is sent with compression on.
  1421  	c.out.cw = s2.NewWriter(nil, s2WriterOptions(cm)...)
  1422  	if !didSolicit {
  1423  		c.enqueueProto(infoProto)
  1424  	}
  1425  	c.mu.Unlock()
  1426  	return true, nil
  1427  }
  1428  
  1429  // When getting a leaf node INFO protocol, use the provided
  1430  // array of urls to update the list of possible endpoints.
  1431  func (c *client) updateLeafNodeURLs(info *Info) {
  1432  	cfg := c.leaf.remote
  1433  	cfg.Lock()
  1434  	defer cfg.Unlock()
  1435  
  1436  	// We have ensured that if a remote has a WS scheme, then all are.
  1437  	// So check if first is WS, then add WS URLs, otherwise, add non WS ones.
  1438  	if len(cfg.URLs) > 0 && isWSURL(cfg.URLs[0]) {
  1439  		// It does not really matter if we use "ws://" or "wss://" here since
  1440  		// we will have already marked that the remote should use TLS anyway.
  1441  		// But use proper scheme for log statements, etc...
  1442  		proto := wsSchemePrefix
  1443  		if cfg.TLS {
  1444  			proto = wsSchemePrefixTLS
  1445  		}
  1446  		c.doUpdateLNURLs(cfg, proto, info.WSConnectURLs)
  1447  		return
  1448  	}
  1449  	c.doUpdateLNURLs(cfg, "nats-leaf", info.LeafNodeURLs)
  1450  }
  1451  
  1452  func (c *client) doUpdateLNURLs(cfg *leafNodeCfg, scheme string, URLs []string) {
  1453  	cfg.urls = make([]*url.URL, 0, 1+len(URLs))
  1454  	// Add the ones we receive in the protocol
  1455  	for _, surl := range URLs {
  1456  		url, err := url.Parse(fmt.Sprintf("%s://%s", scheme, surl))
  1457  		if err != nil {
  1458  			// As per below, the URLs we receive should not have contained URL info, so this should be safe to log.
  1459  			c.Errorf("Error parsing url %q: %v", surl, err)
  1460  			continue
  1461  		}
  1462  		// Do not add if it's the same as what we already have configured.
  1463  		var dup bool
  1464  		for _, u := range cfg.URLs {
  1465  			// URLs that we receive never have user info, but the
  1466  			// ones that were configured may have. Simply compare
  1467  			// host and port to decide if they are equal or not.
  1468  			if url.Host == u.Host && url.Port() == u.Port() {
  1469  				dup = true
  1470  				break
  1471  			}
  1472  		}
  1473  		if !dup {
  1474  			cfg.urls = append(cfg.urls, url)
  1475  			cfg.saveTLSHostname(url)
  1476  		}
  1477  	}
  1478  	// Add the configured one
  1479  	cfg.urls = append(cfg.urls, cfg.URLs...)
  1480  }
  1481  
  1482  // Similar to setInfoHostPortAndGenerateJSON, but for leafNodeInfo.
  1483  func (s *Server) setLeafNodeInfoHostPortAndIP() error {
  1484  	opts := s.getOpts()
  1485  	if opts.LeafNode.Advertise != _EMPTY_ {
  1486  		advHost, advPort, err := parseHostPort(opts.LeafNode.Advertise, opts.LeafNode.Port)
  1487  		if err != nil {
  1488  			return err
  1489  		}
  1490  		s.leafNodeInfo.Host = advHost
  1491  		s.leafNodeInfo.Port = advPort
  1492  	} else {
  1493  		s.leafNodeInfo.Host = opts.LeafNode.Host
  1494  		s.leafNodeInfo.Port = opts.LeafNode.Port
  1495  		// If the host is "0.0.0.0" or "::" we need to resolve to a public IP.
  1496  		// This will return at most 1 IP.
  1497  		hostIsIPAny, ips, err := s.getNonLocalIPsIfHostIsIPAny(s.leafNodeInfo.Host, false)
  1498  		if err != nil {
  1499  			return err
  1500  		}
  1501  		if hostIsIPAny {
  1502  			if len(ips) == 0 {
  1503  				s.Errorf("Could not find any non-local IP for leafnode's listen specification %q",
  1504  					s.leafNodeInfo.Host)
  1505  			} else {
  1506  				// Take the first from the list...
  1507  				s.leafNodeInfo.Host = ips[0]
  1508  			}
  1509  		}
  1510  	}
  1511  	// Use just host:port for the IP
  1512  	s.leafNodeInfo.IP = net.JoinHostPort(s.leafNodeInfo.Host, strconv.Itoa(s.leafNodeInfo.Port))
  1513  	if opts.LeafNode.Advertise != _EMPTY_ {
  1514  		s.Noticef("Advertise address for leafnode is set to %s", s.leafNodeInfo.IP)
  1515  	}
  1516  	return nil
  1517  }
  1518  
  1519  // Add the connection to the map of leaf nodes.
  1520  // If `checkForDup` is true (invoked when a leafnode is accepted), then we check
  1521  // if a connection already exists for the same server name and account.
  1522  // That can happen when the remote is attempting to reconnect while the accepting
  1523  // side did not detect the connection as broken yet.
  1524  // But it can also happen when there is a misconfiguration and the remote is
  1525  // creating two (or more) connections that bind to the same account on the accept
  1526  // side.
  1527  // When a duplicate is found, the new connection is accepted and the old is closed
  1528  // (this solves the stale connection situation). An error is returned to help the
  1529  // remote detect the misconfiguration when the duplicate is the result of that
  1530  // misconfiguration.
  1531  func (s *Server) addLeafNodeConnection(c *client, srvName, clusterName string, checkForDup bool) {
  1532  	var accName string
  1533  	c.mu.Lock()
  1534  	cid := c.cid
  1535  	acc := c.acc
  1536  	if acc != nil {
  1537  		accName = acc.Name
  1538  	}
  1539  	myRemoteDomain := c.leaf.remoteDomain
  1540  	mySrvName := c.leaf.remoteServer
  1541  	remoteAccName := c.leaf.remoteAccName
  1542  	myClustName := c.leaf.remoteCluster
  1543  	solicited := c.leaf.remote != nil
  1544  	c.mu.Unlock()
  1545  
  1546  	var old *client
  1547  	s.mu.Lock()
  1548  	// We check for empty because in some test we may send empty CONNECT{}
  1549  	if checkForDup && srvName != _EMPTY_ {
  1550  		for _, ol := range s.leafs {
  1551  			ol.mu.Lock()
  1552  			// We care here only about non solicited Leafnode. This function
  1553  			// is more about replacing stale connections than detecting loops.
  1554  			// We have code for the loop detection elsewhere, which also delays
  1555  			// attempt to reconnect.
  1556  			if !ol.isSolicitedLeafNode() && ol.leaf.remoteServer == srvName &&
  1557  				ol.leaf.remoteCluster == clusterName && ol.acc.Name == accName &&
  1558  				remoteAccName != _EMPTY_ && ol.leaf.remoteAccName == remoteAccName {
  1559  				old = ol
  1560  			}
  1561  			ol.mu.Unlock()
  1562  			if old != nil {
  1563  				break
  1564  			}
  1565  		}
  1566  	}
  1567  	// Store new connection in the map
  1568  	s.leafs[cid] = c
  1569  	s.mu.Unlock()
  1570  	s.removeFromTempClients(cid)
  1571  
  1572  	// If applicable, evict the old one.
  1573  	if old != nil {
  1574  		old.sendErrAndErr(DuplicateRemoteLeafnodeConnection.String())
  1575  		old.closeConnection(DuplicateRemoteLeafnodeConnection)
  1576  		c.Warnf("Replacing connection from same server")
  1577  	}
  1578  
  1579  	srvDecorated := func() string {
  1580  		if myClustName == _EMPTY_ {
  1581  			return mySrvName
  1582  		}
  1583  		return fmt.Sprintf("%s/%s", mySrvName, myClustName)
  1584  	}
  1585  
  1586  	opts := s.getOpts()
  1587  	sysAcc := s.SystemAccount()
  1588  	js := s.getJetStream()
  1589  	var meta *raft
  1590  	if js != nil {
  1591  		if mg := js.getMetaGroup(); mg != nil {
  1592  			meta = mg.(*raft)
  1593  		}
  1594  	}
  1595  	blockMappingOutgoing := false
  1596  	// Deny (non domain) JetStream API traffic unless system account is shared
  1597  	// and domain names are identical and extending is not disabled
  1598  
  1599  	// Check if backwards compatibility has been enabled and needs to be acted on
  1600  	forceSysAccDeny := false
  1601  	if len(opts.JsAccDefaultDomain) > 0 {
  1602  		if acc == sysAcc {
  1603  			for _, d := range opts.JsAccDefaultDomain {
  1604  				if d == _EMPTY_ {
  1605  					// Extending JetStream via leaf node is mutually exclusive with a domain mapping to the empty/default domain.
  1606  					// As soon as one mapping to "" is found, disable the ability to extend JS via a leaf node.
  1607  					c.Noticef("Not extending remote JetStream domain %q due to presence of empty default domain", myRemoteDomain)
  1608  					forceSysAccDeny = true
  1609  					break
  1610  				}
  1611  			}
  1612  		} else if domain, ok := opts.JsAccDefaultDomain[accName]; ok && domain == _EMPTY_ {
  1613  			// for backwards compatibility with old setups that do not have a domain name set
  1614  			c.Debugf("Skipping deny %q for account %q due to default domain", jsAllAPI, accName)
  1615  			return
  1616  		}
  1617  	}
  1618  
  1619  	// If the server has JS disabled, it may still be part of a JetStream that could be extended.
  1620  	// This is either signaled by js being disabled and a domain set,
  1621  	// or in cases where no domain name exists, an extension hint is set.
  1622  	// However, this is only relevant in mixed setups.
  1623  	//
  1624  	// If the system account connects but default domains are present, JetStream can't be extended.
  1625  	if opts.JetStreamDomain != myRemoteDomain || (!opts.JetStream && (opts.JetStreamDomain == _EMPTY_ && opts.JetStreamExtHint != jsWillExtend)) ||
  1626  		sysAcc == nil || acc == nil || forceSysAccDeny {
  1627  		// If domain names mismatch always deny. This applies to system accounts as well as non system accounts.
  1628  		// Not having a system account, account or JetStream disabled is considered a mismatch as well.
  1629  		if acc != nil && acc == sysAcc {
  1630  			c.Noticef("System account connected from %s", srvDecorated())
  1631  			c.Noticef("JetStream not extended, domains differ")
  1632  			c.mergeDenyPermissionsLocked(both, denyAllJs)
  1633  			// When a remote with a system account is present in a server, unless otherwise disabled, the server will be
  1634  			// started in observer mode. Now that it is clear that this not used, turn the observer mode off.
  1635  			if solicited && meta != nil && meta.IsObserver() {
  1636  				meta.setObserver(false, extNotExtended)
  1637  				c.Debugf("Turning JetStream metadata controller Observer Mode off")
  1638  				// Take note that the domain was not extended to avoid this state from startup.
  1639  				writePeerState(js.config.StoreDir, meta.currentPeerState())
  1640  				// Meta controller can't be leader yet.
  1641  				// Yet it is possible that due to observer mode every server already stopped campaigning.
  1642  				// Therefore this server needs to be kicked into campaigning gear explicitly.
  1643  				meta.Campaign()
  1644  			}
  1645  		} else {
  1646  			c.Noticef("JetStream using domains: local %q, remote %q", opts.JetStreamDomain, myRemoteDomain)
  1647  			c.mergeDenyPermissionsLocked(both, denyAllClientJs)
  1648  		}
  1649  		blockMappingOutgoing = true
  1650  	} else if acc == sysAcc {
  1651  		// system account and same domain
  1652  		s.sys.client.Noticef("Extending JetStream domain %q as System Account connected from server %s",
  1653  			myRemoteDomain, srvDecorated())
  1654  		// In an extension use case, pin leadership to server remotes connect to.
  1655  		// Therefore, server with a remote that are not already in observer mode, need to be put into it.
  1656  		if solicited && meta != nil && !meta.IsObserver() {
  1657  			meta.setObserver(true, extExtended)
  1658  			c.Debugf("Turning JetStream metadata controller Observer Mode on - System Account Connected")
  1659  			// Take note that the domain was not extended to avoid this state next startup.
  1660  			writePeerState(js.config.StoreDir, meta.currentPeerState())
  1661  			// If this server is the leader already, step down so a new leader can be elected (that is not an observer)
  1662  			meta.StepDown()
  1663  		}
  1664  	} else {
  1665  		// This deny is needed in all cases (system account shared or not)
  1666  		// If the system account is shared, jsAllAPI traffic will go through the system account.
  1667  		// So in order to prevent duplicate delivery (from system and actual account) suppress it on the account.
  1668  		// If the system account is NOT shared, jsAllAPI traffic has no business
  1669  		c.Debugf("Adding deny %+v for account %q", denyAllClientJs, accName)
  1670  		c.mergeDenyPermissionsLocked(both, denyAllClientJs)
  1671  	}
  1672  	// If we have a specified JetStream domain we will want to add a mapping to
  1673  	// allow access cross domain for each non-system account.
  1674  	if opts.JetStreamDomain != _EMPTY_ && opts.JetStream && acc != nil && acc != sysAcc {
  1675  		for src, dest := range generateJSMappingTable(opts.JetStreamDomain) {
  1676  			if err := acc.AddMapping(src, dest); err != nil {
  1677  				c.Debugf("Error adding JetStream domain mapping: %s", err.Error())
  1678  			} else {
  1679  				c.Debugf("Adding JetStream Domain Mapping %q -> %s to account %q", src, dest, accName)
  1680  			}
  1681  		}
  1682  		if blockMappingOutgoing {
  1683  			src := fmt.Sprintf(jsDomainAPI, opts.JetStreamDomain)
  1684  			// make sure that messages intended for this domain, do not leave the cluster via this leaf node connection
  1685  			// This is a guard against a miss-config with two identical domain names and will only cover some forms
  1686  			// of this issue, not all of them.
  1687  			// This guards against a hub and a spoke having the same domain name.
  1688  			// But not two spokes having the same one and the request coming from the hub.
  1689  			c.mergeDenyPermissionsLocked(pub, []string{src})
  1690  			c.Debugf("Adding deny %q for outgoing messages to account %q", src, accName)
  1691  		}
  1692  	}
  1693  }
  1694  
  1695  func (s *Server) removeLeafNodeConnection(c *client) {
  1696  	c.mu.Lock()
  1697  	cid := c.cid
  1698  	if c.leaf != nil && c.leaf.tsubt != nil {
  1699  		c.leaf.tsubt.Stop()
  1700  		c.leaf.tsubt = nil
  1701  	}
  1702  	c.mu.Unlock()
  1703  	s.mu.Lock()
  1704  	delete(s.leafs, cid)
  1705  	s.mu.Unlock()
  1706  	s.removeFromTempClients(cid)
  1707  }
  1708  
  1709  // Connect information for solicited leafnodes.
  1710  type leafConnectInfo struct {
  1711  	Version   string   `json:"version,omitempty"`
  1712  	Nkey      string   `json:"nkey,omitempty"`
  1713  	JWT       string   `json:"jwt,omitempty"`
  1714  	Sig       string   `json:"sig,omitempty"`
  1715  	User      string   `json:"user,omitempty"`
  1716  	Pass      string   `json:"pass,omitempty"`
  1717  	ID        string   `json:"server_id,omitempty"`
  1718  	Domain    string   `json:"domain,omitempty"`
  1719  	Name      string   `json:"name,omitempty"`
  1720  	Hub       bool     `json:"is_hub,omitempty"`
  1721  	Cluster   string   `json:"cluster,omitempty"`
  1722  	Headers   bool     `json:"headers,omitempty"`
  1723  	JetStream bool     `json:"jetstream,omitempty"`
  1724  	DenyPub   []string `json:"deny_pub,omitempty"`
  1725  
  1726  	// There was an existing field called:
  1727  	// >> Comp bool `json:"compression,omitempty"`
  1728  	// that has never been used. With support for compression, we now need
  1729  	// a field that is a string. So we use a different json tag:
  1730  	Compression string `json:"compress_mode,omitempty"`
  1731  
  1732  	// Just used to detect wrong connection attempts.
  1733  	Gateway string `json:"gateway,omitempty"`
  1734  
  1735  	// Tells the accept side which account the remote is binding to.
  1736  	RemoteAccount string `json:"remote_account,omitempty"`
  1737  
  1738  	// The accept side of a LEAF connection, unlike ROUTER and GATEWAY, receives
  1739  	// only the CONNECT protocol, and no INFO. So we need to send the protocol
  1740  	// version as part of the CONNECT. It will indicate if a connection supports
  1741  	// some features, such as message tracing.
  1742  	// We use `protocol` as the JSON tag, so this is automatically unmarshal'ed
  1743  	// in the low level process CONNECT.
  1744  	Proto int `json:"protocol,omitempty"`
  1745  }
  1746  
  1747  // processLeafNodeConnect will process the inbound connect args.
  1748  // Once we are here we are bound to an account, so can send any interest that
  1749  // we would have to the other side.
  1750  func (c *client) processLeafNodeConnect(s *Server, arg []byte, lang string) error {
  1751  	// Way to detect clients that incorrectly connect to the route listen
  1752  	// port. Client provided "lang" in the CONNECT protocol while LEAFNODEs don't.
  1753  	if lang != _EMPTY_ {
  1754  		c.sendErrAndErr(ErrClientConnectedToLeafNodePort.Error())
  1755  		c.closeConnection(WrongPort)
  1756  		return ErrClientConnectedToLeafNodePort
  1757  	}
  1758  
  1759  	// Unmarshal as a leaf node connect protocol
  1760  	proto := &leafConnectInfo{}
  1761  	if err := json.Unmarshal(arg, proto); err != nil {
  1762  		return err
  1763  	}
  1764  
  1765  	// Check for cluster name collisions.
  1766  	if cn := s.cachedClusterName(); cn != _EMPTY_ && proto.Cluster != _EMPTY_ && proto.Cluster == cn {
  1767  		c.sendErrAndErr(ErrLeafNodeHasSameClusterName.Error())
  1768  		c.closeConnection(ClusterNamesIdentical)
  1769  		return ErrLeafNodeHasSameClusterName
  1770  	}
  1771  
  1772  	// Reject if this has Gateway which means that it would be from a gateway
  1773  	// connection that incorrectly connects to the leafnode port.
  1774  	if proto.Gateway != _EMPTY_ {
  1775  		errTxt := fmt.Sprintf("Rejecting connection from gateway %q on the leafnode port", proto.Gateway)
  1776  		c.Errorf(errTxt)
  1777  		c.sendErr(errTxt)
  1778  		c.closeConnection(WrongGateway)
  1779  		return ErrWrongGateway
  1780  	}
  1781  
  1782  	if mv := s.getOpts().LeafNode.MinVersion; mv != _EMPTY_ {
  1783  		major, minor, update, _ := versionComponents(mv)
  1784  		if !versionAtLeast(proto.Version, major, minor, update) {
  1785  			// We are going to send back an INFO because otherwise recent
  1786  			// versions of the remote server would simply break the connection
  1787  			// after 2 seconds if not receiving it. Instead, we want the
  1788  			// other side to just "stall" until we finish waiting for the holding
  1789  			// period and close the connection below.
  1790  			s.sendPermsAndAccountInfo(c)
  1791  			c.sendErrAndErr(fmt.Sprintf("connection rejected since minimum version required is %q", mv))
  1792  			select {
  1793  			case <-c.srv.quitCh:
  1794  			case <-time.After(leafNodeWaitBeforeClose):
  1795  			}
  1796  			c.closeConnection(MinimumVersionRequired)
  1797  			return ErrMinimumVersionRequired
  1798  		}
  1799  	}
  1800  
  1801  	// Check if this server supports headers.
  1802  	supportHeaders := c.srv.supportsHeaders()
  1803  
  1804  	c.mu.Lock()
  1805  	// Leaf Nodes do not do echo or verbose or pedantic.
  1806  	c.opts.Verbose = false
  1807  	c.opts.Echo = false
  1808  	c.opts.Pedantic = false
  1809  	// This inbound connection will be marked as supporting headers if this server
  1810  	// support headers and the remote has sent in the CONNECT protocol that it does
  1811  	// support headers too.
  1812  	c.headers = supportHeaders && proto.Headers
  1813  	// If the compression level is still not set, set it based on what has been
  1814  	// given to us in the CONNECT protocol.
  1815  	if c.leaf.compression == _EMPTY_ {
  1816  		// But if proto.Compression is _EMPTY_, set it to CompressionNotSupported
  1817  		if proto.Compression == _EMPTY_ {
  1818  			c.leaf.compression = CompressionNotSupported
  1819  		} else {
  1820  			c.leaf.compression = proto.Compression
  1821  		}
  1822  	}
  1823  
  1824  	// Remember the remote server.
  1825  	c.leaf.remoteServer = proto.Name
  1826  	// Remember the remote account name
  1827  	c.leaf.remoteAccName = proto.RemoteAccount
  1828  
  1829  	// If the other side has declared itself a hub, so we will take on the spoke role.
  1830  	if proto.Hub {
  1831  		c.leaf.isSpoke = true
  1832  	}
  1833  
  1834  	// The soliciting side is part of a cluster.
  1835  	if proto.Cluster != _EMPTY_ {
  1836  		c.leaf.remoteCluster = proto.Cluster
  1837  	}
  1838  
  1839  	c.leaf.remoteDomain = proto.Domain
  1840  
  1841  	// When a leaf solicits a connection to a hub, the perms that it will use on the soliciting leafnode's
  1842  	// behalf are correct for them, but inside the hub need to be reversed since data is flowing in the opposite direction.
  1843  	if !c.isSolicitedLeafNode() && c.perms != nil {
  1844  		sp, pp := c.perms.sub, c.perms.pub
  1845  		c.perms.sub, c.perms.pub = pp, sp
  1846  		if c.opts.Import != nil {
  1847  			c.darray = c.opts.Import.Deny
  1848  		} else {
  1849  			c.darray = nil
  1850  		}
  1851  	}
  1852  
  1853  	// Set the Ping timer
  1854  	c.setFirstPingTimer()
  1855  
  1856  	// If we received pub deny permissions from the other end, merge with existing ones.
  1857  	c.mergeDenyPermissions(pub, proto.DenyPub)
  1858  
  1859  	c.mu.Unlock()
  1860  
  1861  	// Register the cluster, even if empty, as long as we are acting as a hub.
  1862  	if !proto.Hub {
  1863  		c.acc.registerLeafNodeCluster(proto.Cluster)
  1864  	}
  1865  
  1866  	// Add in the leafnode here since we passed through auth at this point.
  1867  	s.addLeafNodeConnection(c, proto.Name, proto.Cluster, true)
  1868  
  1869  	// If we have permissions bound to this leafnode we need to send then back to the
  1870  	// origin server for local enforcement.
  1871  	s.sendPermsAndAccountInfo(c)
  1872  
  1873  	// Create and initialize the smap since we know our bound account now.
  1874  	// This will send all registered subs too.
  1875  	s.initLeafNodeSmapAndSendSubs(c)
  1876  
  1877  	// Announce the account connect event for a leaf node.
  1878  	// This will no-op as needed.
  1879  	s.sendLeafNodeConnect(c.acc)
  1880  
  1881  	return nil
  1882  }
  1883  
  1884  // Returns the remote cluster name. This is set only once so does not require a lock.
  1885  func (c *client) remoteCluster() string {
  1886  	if c.leaf == nil {
  1887  		return _EMPTY_
  1888  	}
  1889  	return c.leaf.remoteCluster
  1890  }
  1891  
  1892  // Sends back an info block to the soliciting leafnode to let it know about
  1893  // its permission settings for local enforcement.
  1894  func (s *Server) sendPermsAndAccountInfo(c *client) {
  1895  	// Copy
  1896  	info := s.copyLeafNodeInfo()
  1897  	c.mu.Lock()
  1898  	info.CID = c.cid
  1899  	info.Import = c.opts.Import
  1900  	info.Export = c.opts.Export
  1901  	info.RemoteAccount = c.acc.Name
  1902  	info.ConnectInfo = true
  1903  	c.enqueueProto(generateInfoJSON(info))
  1904  	c.mu.Unlock()
  1905  }
  1906  
  1907  // Snapshot the current subscriptions from the sublist into our smap which
  1908  // we will keep updated from now on.
  1909  // Also send the registered subscriptions.
  1910  func (s *Server) initLeafNodeSmapAndSendSubs(c *client) {
  1911  	acc := c.acc
  1912  	if acc == nil {
  1913  		c.Debugf("Leafnode does not have an account bound")
  1914  		return
  1915  	}
  1916  	// Collect all account subs here.
  1917  	_subs := [1024]*subscription{}
  1918  	subs := _subs[:0]
  1919  	ims := []string{}
  1920  
  1921  	// Hold the client lock otherwise there can be a race and miss some subs.
  1922  	c.mu.Lock()
  1923  	defer c.mu.Unlock()
  1924  
  1925  	acc.mu.RLock()
  1926  	accName := acc.Name
  1927  	accNTag := acc.nameTag
  1928  
  1929  	// To make printing look better when no friendly name present.
  1930  	if accNTag != _EMPTY_ {
  1931  		accNTag = "/" + accNTag
  1932  	}
  1933  
  1934  	// If we are solicited we only send interest for local clients.
  1935  	if c.isSpokeLeafNode() {
  1936  		acc.sl.localSubs(&subs, true)
  1937  	} else {
  1938  		acc.sl.All(&subs)
  1939  	}
  1940  
  1941  	// Check if we have an existing service import reply.
  1942  	siReply := copyBytes(acc.siReply)
  1943  
  1944  	// Since leaf nodes only send on interest, if the bound
  1945  	// account has import services we need to send those over.
  1946  	for isubj := range acc.imports.services {
  1947  		if c.isSpokeLeafNode() && !c.canSubscribe(isubj) {
  1948  			c.Debugf("Not permitted to import service %q on behalf of %s%s", isubj, accName, accNTag)
  1949  			continue
  1950  		}
  1951  		ims = append(ims, isubj)
  1952  	}
  1953  	// Likewise for mappings.
  1954  	for _, m := range acc.mappings {
  1955  		if c.isSpokeLeafNode() && !c.canSubscribe(m.src) {
  1956  			c.Debugf("Not permitted to import mapping %q on behalf of %s%s", m.src, accName, accNTag)
  1957  			continue
  1958  		}
  1959  		ims = append(ims, m.src)
  1960  	}
  1961  
  1962  	// Create a unique subject that will be used for loop detection.
  1963  	lds := acc.lds
  1964  	acc.mu.RUnlock()
  1965  
  1966  	// Check if we have to create the LDS.
  1967  	if lds == _EMPTY_ {
  1968  		lds = leafNodeLoopDetectionSubjectPrefix + nuid.Next()
  1969  		acc.mu.Lock()
  1970  		acc.lds = lds
  1971  		acc.mu.Unlock()
  1972  	}
  1973  
  1974  	// Now check for gateway interest. Leafnodes will put this into
  1975  	// the proper mode to propagate, but they are not held in the account.
  1976  	gwsa := [16]*client{}
  1977  	gws := gwsa[:0]
  1978  	s.getOutboundGatewayConnections(&gws)
  1979  	for _, cgw := range gws {
  1980  		cgw.mu.Lock()
  1981  		gw := cgw.gw
  1982  		cgw.mu.Unlock()
  1983  		if gw != nil {
  1984  			if ei, _ := gw.outsim.Load(accName); ei != nil {
  1985  				if e := ei.(*outsie); e != nil && e.sl != nil {
  1986  					e.sl.All(&subs)
  1987  				}
  1988  			}
  1989  		}
  1990  	}
  1991  
  1992  	applyGlobalRouting := s.gateway.enabled
  1993  	if c.isSpokeLeafNode() {
  1994  		// Add a fake subscription for this solicited leafnode connection
  1995  		// so that we can send back directly for mapped GW replies.
  1996  		c.srv.gwLeafSubs.Insert(&subscription{client: c, subject: []byte(gwReplyPrefix + ">")})
  1997  	}
  1998  
  1999  	// Now walk the results and add them to our smap
  2000  	rc := c.leaf.remoteCluster
  2001  	c.leaf.smap = make(map[string]int32)
  2002  	for _, sub := range subs {
  2003  		// Check perms regardless of role.
  2004  		if c.perms != nil && !c.canSubscribe(string(sub.subject)) {
  2005  			c.Debugf("Not permitted to subscribe to %q on behalf of %s%s", sub.subject, accName, accNTag)
  2006  			continue
  2007  		}
  2008  		// We ignore ourselves here.
  2009  		// Also don't add the subscription if it has a origin cluster and the
  2010  		// cluster name matches the one of the client we are sending to.
  2011  		if c != sub.client && (sub.origin == nil || (bytesToString(sub.origin) != rc)) {
  2012  			count := int32(1)
  2013  			if len(sub.queue) > 0 && sub.qw > 0 {
  2014  				count = sub.qw
  2015  			}
  2016  			c.leaf.smap[keyFromSub(sub)] += count
  2017  			if c.leaf.tsub == nil {
  2018  				c.leaf.tsub = make(map[*subscription]struct{})
  2019  			}
  2020  			c.leaf.tsub[sub] = struct{}{}
  2021  		}
  2022  	}
  2023  	// FIXME(dlc) - We need to update appropriately on an account claims update.
  2024  	for _, isubj := range ims {
  2025  		c.leaf.smap[isubj]++
  2026  	}
  2027  	// If we have gateways enabled we need to make sure the other side sends us responses
  2028  	// that have been augmented from the original subscription.
  2029  	// TODO(dlc) - Should we lock this down more?
  2030  	if applyGlobalRouting {
  2031  		c.leaf.smap[oldGWReplyPrefix+"*.>"]++
  2032  		c.leaf.smap[gwReplyPrefix+">"]++
  2033  	}
  2034  	// Detect loops by subscribing to a specific subject and checking
  2035  	// if this sub is coming back to us.
  2036  	c.leaf.smap[lds]++
  2037  
  2038  	// Check if we need to add an existing siReply to our map.
  2039  	// This will be a prefix so add on the wildcard.
  2040  	if siReply != nil {
  2041  		wcsub := append(siReply, '>')
  2042  		c.leaf.smap[string(wcsub)]++
  2043  	}
  2044  	// Queue all protocols. There is no max pending limit for LN connection,
  2045  	// so we don't need chunking. The writes will happen from the writeLoop.
  2046  	var b bytes.Buffer
  2047  	for key, n := range c.leaf.smap {
  2048  		c.writeLeafSub(&b, key, n)
  2049  	}
  2050  	if b.Len() > 0 {
  2051  		c.enqueueProto(b.Bytes())
  2052  	}
  2053  	if c.leaf.tsub != nil {
  2054  		// Clear the tsub map after 5 seconds.
  2055  		c.leaf.tsubt = time.AfterFunc(5*time.Second, func() {
  2056  			c.mu.Lock()
  2057  			if c.leaf != nil {
  2058  				c.leaf.tsub = nil
  2059  				c.leaf.tsubt = nil
  2060  			}
  2061  			c.mu.Unlock()
  2062  		})
  2063  	}
  2064  }
  2065  
  2066  // updateInterestForAccountOnGateway called from gateway code when processing RS+ and RS-.
  2067  func (s *Server) updateInterestForAccountOnGateway(accName string, sub *subscription, delta int32) {
  2068  	acc, err := s.LookupAccount(accName)
  2069  	if acc == nil || err != nil {
  2070  		s.Debugf("No or bad account for %q, failed to update interest from gateway", accName)
  2071  		return
  2072  	}
  2073  	acc.updateLeafNodes(sub, delta)
  2074  }
  2075  
  2076  // updateLeafNodes will make sure to update the account smap for the subscription.
  2077  // Will also forward to all leaf nodes as needed.
  2078  func (acc *Account) updateLeafNodes(sub *subscription, delta int32) {
  2079  	if acc == nil || sub == nil {
  2080  		return
  2081  	}
  2082  
  2083  	// We will do checks for no leafnodes and same cluster here inline and under the
  2084  	// general account read lock.
  2085  	// If we feel we need to update the leafnodes we will do that out of line to avoid
  2086  	// blocking routes or GWs.
  2087  
  2088  	acc.mu.RLock()
  2089  	// First check if we even have leafnodes here.
  2090  	if acc.nleafs == 0 {
  2091  		acc.mu.RUnlock()
  2092  		return
  2093  	}
  2094  
  2095  	// Is this a loop detection subject.
  2096  	isLDS := bytes.HasPrefix(sub.subject, []byte(leafNodeLoopDetectionSubjectPrefix))
  2097  
  2098  	// Capture the cluster even if its empty.
  2099  	var cluster string
  2100  	if sub.origin != nil {
  2101  		cluster = bytesToString(sub.origin)
  2102  	}
  2103  
  2104  	// If we have an isolated cluster we can return early, as long as it is not a loop detection subject.
  2105  	// Empty clusters will return false for the check.
  2106  	if !isLDS && acc.isLeafNodeClusterIsolated(cluster) {
  2107  		acc.mu.RUnlock()
  2108  		return
  2109  	}
  2110  
  2111  	// We can release the general account lock.
  2112  	acc.mu.RUnlock()
  2113  
  2114  	// We can hold the list lock here to avoid having to copy a large slice.
  2115  	acc.lmu.RLock()
  2116  	defer acc.lmu.RUnlock()
  2117  
  2118  	// Do this once.
  2119  	subject := string(sub.subject)
  2120  
  2121  	// Walk the connected leafnodes.
  2122  	for _, ln := range acc.lleafs {
  2123  		if ln == sub.client {
  2124  			continue
  2125  		}
  2126  		// Check to make sure this sub does not have an origin cluster that matches the leafnode.
  2127  		ln.mu.Lock()
  2128  		// If skipped, make sure that we still let go the "$LDS." subscription that allows
  2129  		// the detection of loops as long as different cluster.
  2130  		clusterDifferent := cluster != ln.remoteCluster()
  2131  		if (isLDS && clusterDifferent) || ((cluster == _EMPTY_ || clusterDifferent) && (delta <= 0 || ln.canSubscribe(subject))) {
  2132  			ln.updateSmap(sub, delta, isLDS)
  2133  		}
  2134  		ln.mu.Unlock()
  2135  	}
  2136  }
  2137  
  2138  // This will make an update to our internal smap and determine if we should send out
  2139  // an interest update to the remote side.
  2140  // Lock should be held.
  2141  func (c *client) updateSmap(sub *subscription, delta int32, isLDS bool) {
  2142  	if c.leaf.smap == nil {
  2143  		return
  2144  	}
  2145  
  2146  	// If we are solicited make sure this is a local client or a non-solicited leaf node
  2147  	skind := sub.client.kind
  2148  	updateClient := skind == CLIENT || skind == SYSTEM || skind == JETSTREAM || skind == ACCOUNT
  2149  	if !isLDS && c.isSpokeLeafNode() && !(updateClient || (skind == LEAF && !sub.client.isSpokeLeafNode())) {
  2150  		return
  2151  	}
  2152  
  2153  	// For additions, check if that sub has just been processed during initLeafNodeSmapAndSendSubs
  2154  	if delta > 0 && c.leaf.tsub != nil {
  2155  		if _, present := c.leaf.tsub[sub]; present {
  2156  			delete(c.leaf.tsub, sub)
  2157  			if len(c.leaf.tsub) == 0 {
  2158  				c.leaf.tsub = nil
  2159  				c.leaf.tsubt.Stop()
  2160  				c.leaf.tsubt = nil
  2161  			}
  2162  			return
  2163  		}
  2164  	}
  2165  
  2166  	key := keyFromSub(sub)
  2167  	n, ok := c.leaf.smap[key]
  2168  	if delta < 0 && !ok {
  2169  		return
  2170  	}
  2171  
  2172  	// We will update if its a queue, if count is zero (or negative), or we were 0 and are N > 0.
  2173  	update := sub.queue != nil || (n <= 0 && n+delta > 0) || (n > 0 && n+delta <= 0)
  2174  	n += delta
  2175  	if n > 0 {
  2176  		c.leaf.smap[key] = n
  2177  	} else {
  2178  		delete(c.leaf.smap, key)
  2179  	}
  2180  	if update {
  2181  		c.sendLeafNodeSubUpdate(key, n)
  2182  	}
  2183  }
  2184  
  2185  // Used to force add subjects to the subject map.
  2186  func (c *client) forceAddToSmap(subj string) {
  2187  	c.mu.Lock()
  2188  	defer c.mu.Unlock()
  2189  
  2190  	if c.leaf.smap == nil {
  2191  		return
  2192  	}
  2193  	n := c.leaf.smap[subj]
  2194  	if n != 0 {
  2195  		return
  2196  	}
  2197  	// Place into the map since it was not there.
  2198  	c.leaf.smap[subj] = 1
  2199  	c.sendLeafNodeSubUpdate(subj, 1)
  2200  }
  2201  
  2202  // Used to force remove a subject from the subject map.
  2203  func (c *client) forceRemoveFromSmap(subj string) {
  2204  	c.mu.Lock()
  2205  	defer c.mu.Unlock()
  2206  
  2207  	if c.leaf.smap == nil {
  2208  		return
  2209  	}
  2210  	n := c.leaf.smap[subj]
  2211  	if n == 0 {
  2212  		return
  2213  	}
  2214  	n--
  2215  	if n == 0 {
  2216  		// Remove is now zero
  2217  		delete(c.leaf.smap, subj)
  2218  		c.sendLeafNodeSubUpdate(subj, 0)
  2219  	} else {
  2220  		c.leaf.smap[subj] = n
  2221  	}
  2222  }
  2223  
  2224  // Send the subscription interest change to the other side.
  2225  // Lock should be held.
  2226  func (c *client) sendLeafNodeSubUpdate(key string, n int32) {
  2227  	// If we are a spoke, we need to check if we are allowed to send this subscription over to the hub.
  2228  	if c.isSpokeLeafNode() {
  2229  		checkPerms := true
  2230  		if len(key) > 0 && (key[0] == '$' || key[0] == '_') {
  2231  			if strings.HasPrefix(key, leafNodeLoopDetectionSubjectPrefix) ||
  2232  				strings.HasPrefix(key, oldGWReplyPrefix) ||
  2233  				strings.HasPrefix(key, gwReplyPrefix) {
  2234  				checkPerms = false
  2235  			}
  2236  		}
  2237  		if checkPerms && !c.canSubscribe(key) {
  2238  			return
  2239  		}
  2240  	}
  2241  	// If we are here we can send over to the other side.
  2242  	_b := [64]byte{}
  2243  	b := bytes.NewBuffer(_b[:0])
  2244  	c.writeLeafSub(b, key, n)
  2245  	c.enqueueProto(b.Bytes())
  2246  }
  2247  
  2248  // Helper function to build the key.
  2249  func keyFromSub(sub *subscription) string {
  2250  	var sb strings.Builder
  2251  	sb.Grow(len(sub.subject) + len(sub.queue) + 1)
  2252  	sb.Write(sub.subject)
  2253  	if sub.queue != nil {
  2254  		// Just make the key subject spc group, e.g. 'foo bar'
  2255  		sb.WriteByte(' ')
  2256  		sb.Write(sub.queue)
  2257  	}
  2258  	return sb.String()
  2259  }
  2260  
  2261  // Lock should be held.
  2262  func (c *client) writeLeafSub(w *bytes.Buffer, key string, n int32) {
  2263  	if key == _EMPTY_ {
  2264  		return
  2265  	}
  2266  	if n > 0 {
  2267  		w.WriteString("LS+ " + key)
  2268  		// Check for queue semantics, if found write n.
  2269  		if strings.Contains(key, " ") {
  2270  			w.WriteString(" ")
  2271  			var b [12]byte
  2272  			var i = len(b)
  2273  			for l := n; l > 0; l /= 10 {
  2274  				i--
  2275  				b[i] = digits[l%10]
  2276  			}
  2277  			w.Write(b[i:])
  2278  			if c.trace {
  2279  				arg := fmt.Sprintf("%s %d", key, n)
  2280  				c.traceOutOp("LS+", []byte(arg))
  2281  			}
  2282  		} else if c.trace {
  2283  			c.traceOutOp("LS+", []byte(key))
  2284  		}
  2285  	} else {
  2286  		w.WriteString("LS- " + key)
  2287  		if c.trace {
  2288  			c.traceOutOp("LS-", []byte(key))
  2289  		}
  2290  	}
  2291  	w.WriteString(CR_LF)
  2292  }
  2293  
  2294  // processLeafSub will process an inbound sub request for the remote leaf node.
  2295  func (c *client) processLeafSub(argo []byte) (err error) {
  2296  	// Indicate activity.
  2297  	c.in.subs++
  2298  
  2299  	srv := c.srv
  2300  	if srv == nil {
  2301  		return nil
  2302  	}
  2303  
  2304  	// Copy so we do not reference a potentially large buffer
  2305  	arg := make([]byte, len(argo))
  2306  	copy(arg, argo)
  2307  
  2308  	args := splitArg(arg)
  2309  	sub := &subscription{client: c}
  2310  
  2311  	switch len(args) {
  2312  	case 1:
  2313  		sub.queue = nil
  2314  	case 3:
  2315  		sub.queue = args[1]
  2316  		sub.qw = int32(parseSize(args[2]))
  2317  	default:
  2318  		return fmt.Errorf("processLeafSub Parse Error: '%s'", arg)
  2319  	}
  2320  	sub.subject = args[0]
  2321  
  2322  	c.mu.Lock()
  2323  	if c.isClosed() {
  2324  		c.mu.Unlock()
  2325  		return nil
  2326  	}
  2327  
  2328  	acc := c.acc
  2329  	// Check if we have a loop.
  2330  	ldsPrefix := bytes.HasPrefix(sub.subject, []byte(leafNodeLoopDetectionSubjectPrefix))
  2331  
  2332  	if ldsPrefix && bytesToString(sub.subject) == acc.getLDSubject() {
  2333  		c.mu.Unlock()
  2334  		c.handleLeafNodeLoop(true)
  2335  		return nil
  2336  	}
  2337  
  2338  	// Check permissions if applicable. (but exclude the $LDS, $GR and _GR_)
  2339  	checkPerms := true
  2340  	if sub.subject[0] == '$' || sub.subject[0] == '_' {
  2341  		if ldsPrefix ||
  2342  			bytes.HasPrefix(sub.subject, []byte(oldGWReplyPrefix)) ||
  2343  			bytes.HasPrefix(sub.subject, []byte(gwReplyPrefix)) {
  2344  			checkPerms = false
  2345  		}
  2346  	}
  2347  
  2348  	// If we are a hub check that we can publish to this subject.
  2349  	if checkPerms {
  2350  		subj := string(sub.subject)
  2351  		if subjectIsLiteral(subj) && !c.pubAllowedFullCheck(subj, true, true) {
  2352  			c.mu.Unlock()
  2353  			c.leafSubPermViolation(sub.subject)
  2354  			c.Debugf(fmt.Sprintf("Permissions Violation for Subscription to %q", sub.subject))
  2355  			return nil
  2356  		}
  2357  	}
  2358  
  2359  	// Check if we have a maximum on the number of subscriptions.
  2360  	if c.subsAtLimit() {
  2361  		c.mu.Unlock()
  2362  		c.maxSubsExceeded()
  2363  		return nil
  2364  	}
  2365  
  2366  	// If we have an origin cluster associated mark that in the sub.
  2367  	if rc := c.remoteCluster(); rc != _EMPTY_ {
  2368  		sub.origin = []byte(rc)
  2369  	}
  2370  
  2371  	// Like Routes, we store local subs by account and subject and optionally queue name.
  2372  	// If we have a queue it will have a trailing weight which we do not want.
  2373  	if sub.queue != nil {
  2374  		sub.sid = arg[:len(arg)-len(args[2])-1]
  2375  	} else {
  2376  		sub.sid = arg
  2377  	}
  2378  	key := bytesToString(sub.sid)
  2379  	osub := c.subs[key]
  2380  	updateGWs := false
  2381  	delta := int32(1)
  2382  	if osub == nil {
  2383  		c.subs[key] = sub
  2384  		// Now place into the account sl.
  2385  		if err := acc.sl.Insert(sub); err != nil {
  2386  			delete(c.subs, key)
  2387  			c.mu.Unlock()
  2388  			c.Errorf("Could not insert subscription: %v", err)
  2389  			c.sendErr("Invalid Subscription")
  2390  			return nil
  2391  		}
  2392  		updateGWs = srv.gateway.enabled
  2393  	} else if sub.queue != nil {
  2394  		// For a queue we need to update the weight.
  2395  		delta = sub.qw - atomic.LoadInt32(&osub.qw)
  2396  		atomic.StoreInt32(&osub.qw, sub.qw)
  2397  		acc.sl.UpdateRemoteQSub(osub)
  2398  	}
  2399  	spoke := c.isSpokeLeafNode()
  2400  	c.mu.Unlock()
  2401  
  2402  	// Only add in shadow subs if a new sub or qsub.
  2403  	if osub == nil {
  2404  		if err := c.addShadowSubscriptions(acc, sub, true); err != nil {
  2405  			c.Errorf(err.Error())
  2406  		}
  2407  	}
  2408  
  2409  	// If we are not solicited, treat leaf node subscriptions similar to a
  2410  	// client subscription, meaning we forward them to routes, gateways and
  2411  	// other leaf nodes as needed.
  2412  	if !spoke {
  2413  		// If we are routing add to the route map for the associated account.
  2414  		srv.updateRouteSubscriptionMap(acc, sub, delta)
  2415  		if updateGWs {
  2416  			srv.gatewayUpdateSubInterest(acc.Name, sub, delta)
  2417  		}
  2418  	}
  2419  	// Now check on leafnode updates for other leaf nodes. We understand solicited
  2420  	// and non-solicited state in this call so we will do the right thing.
  2421  	acc.updateLeafNodes(sub, delta)
  2422  
  2423  	return nil
  2424  }
  2425  
  2426  // If the leafnode is a solicited, set the connect delay based on default
  2427  // or private option (for tests). Sends the error to the other side, log and
  2428  // close the connection.
  2429  func (c *client) handleLeafNodeLoop(sendErr bool) {
  2430  	accName, delay := c.setLeafConnectDelayIfSoliciting(leafNodeReconnectDelayAfterLoopDetected)
  2431  	errTxt := fmt.Sprintf("Loop detected for leafnode account=%q. Delaying attempt to reconnect for %v", accName, delay)
  2432  	if sendErr {
  2433  		c.sendErr(errTxt)
  2434  	}
  2435  
  2436  	c.Errorf(errTxt)
  2437  	// If we are here with "sendErr" false, it means that this is the server
  2438  	// that received the error. The other side will have closed the connection,
  2439  	// but does not hurt to close here too.
  2440  	c.closeConnection(ProtocolViolation)
  2441  }
  2442  
  2443  // processLeafUnsub will process an inbound unsub request for the remote leaf node.
  2444  func (c *client) processLeafUnsub(arg []byte) error {
  2445  	// Indicate any activity, so pub and sub or unsubs.
  2446  	c.in.subs++
  2447  
  2448  	acc := c.acc
  2449  	srv := c.srv
  2450  
  2451  	c.mu.Lock()
  2452  	if c.isClosed() {
  2453  		c.mu.Unlock()
  2454  		return nil
  2455  	}
  2456  
  2457  	updateGWs := false
  2458  	spoke := c.isSpokeLeafNode()
  2459  	// We store local subs by account and subject and optionally queue name.
  2460  	// LS- will have the arg exactly as the key.
  2461  	sub, ok := c.subs[string(arg)]
  2462  	c.mu.Unlock()
  2463  
  2464  	if ok {
  2465  		c.unsubscribe(acc, sub, true, true)
  2466  		updateGWs = srv.gateway.enabled
  2467  	}
  2468  
  2469  	if !spoke {
  2470  		// If we are routing subtract from the route map for the associated account.
  2471  		srv.updateRouteSubscriptionMap(acc, sub, -1)
  2472  		// Gateways
  2473  		if updateGWs {
  2474  			srv.gatewayUpdateSubInterest(acc.Name, sub, -1)
  2475  		}
  2476  	}
  2477  	// Now check on leafnode updates for other leaf nodes.
  2478  	acc.updateLeafNodes(sub, -1)
  2479  	return nil
  2480  }
  2481  
  2482  func (c *client) processLeafHeaderMsgArgs(arg []byte) error {
  2483  	// Unroll splitArgs to avoid runtime/heap issues
  2484  	a := [MAX_MSG_ARGS][]byte{}
  2485  	args := a[:0]
  2486  	start := -1
  2487  	for i, b := range arg {
  2488  		switch b {
  2489  		case ' ', '\t', '\r', '\n':
  2490  			if start >= 0 {
  2491  				args = append(args, arg[start:i])
  2492  				start = -1
  2493  			}
  2494  		default:
  2495  			if start < 0 {
  2496  				start = i
  2497  			}
  2498  		}
  2499  	}
  2500  	if start >= 0 {
  2501  		args = append(args, arg[start:])
  2502  	}
  2503  
  2504  	c.pa.arg = arg
  2505  	switch len(args) {
  2506  	case 0, 1, 2:
  2507  		return fmt.Errorf("processLeafHeaderMsgArgs Parse Error: '%s'", args)
  2508  	case 3:
  2509  		c.pa.reply = nil
  2510  		c.pa.queues = nil
  2511  		c.pa.hdb = args[1]
  2512  		c.pa.hdr = parseSize(args[1])
  2513  		c.pa.szb = args[2]
  2514  		c.pa.size = parseSize(args[2])
  2515  	case 4:
  2516  		c.pa.reply = args[1]
  2517  		c.pa.queues = nil
  2518  		c.pa.hdb = args[2]
  2519  		c.pa.hdr = parseSize(args[2])
  2520  		c.pa.szb = args[3]
  2521  		c.pa.size = parseSize(args[3])
  2522  	default:
  2523  		// args[1] is our reply indicator. Should be + or | normally.
  2524  		if len(args[1]) != 1 {
  2525  			return fmt.Errorf("processLeafHeaderMsgArgs Bad or Missing Reply Indicator: '%s'", args[1])
  2526  		}
  2527  		switch args[1][0] {
  2528  		case '+':
  2529  			c.pa.reply = args[2]
  2530  		case '|':
  2531  			c.pa.reply = nil
  2532  		default:
  2533  			return fmt.Errorf("processLeafHeaderMsgArgs Bad or Missing Reply Indicator: '%s'", args[1])
  2534  		}
  2535  		// Grab header size.
  2536  		c.pa.hdb = args[len(args)-2]
  2537  		c.pa.hdr = parseSize(c.pa.hdb)
  2538  
  2539  		// Grab size.
  2540  		c.pa.szb = args[len(args)-1]
  2541  		c.pa.size = parseSize(c.pa.szb)
  2542  
  2543  		// Grab queue names.
  2544  		if c.pa.reply != nil {
  2545  			c.pa.queues = args[3 : len(args)-2]
  2546  		} else {
  2547  			c.pa.queues = args[2 : len(args)-2]
  2548  		}
  2549  	}
  2550  	if c.pa.hdr < 0 {
  2551  		return fmt.Errorf("processLeafHeaderMsgArgs Bad or Missing Header Size: '%s'", arg)
  2552  	}
  2553  	if c.pa.size < 0 {
  2554  		return fmt.Errorf("processLeafHeaderMsgArgs Bad or Missing Size: '%s'", args)
  2555  	}
  2556  	if c.pa.hdr > c.pa.size {
  2557  		return fmt.Errorf("processLeafHeaderMsgArgs Header Size larger then TotalSize: '%s'", arg)
  2558  	}
  2559  
  2560  	// Common ones processed after check for arg length
  2561  	c.pa.subject = args[0]
  2562  
  2563  	return nil
  2564  }
  2565  
  2566  func (c *client) processLeafMsgArgs(arg []byte) error {
  2567  	// Unroll splitArgs to avoid runtime/heap issues
  2568  	a := [MAX_MSG_ARGS][]byte{}
  2569  	args := a[:0]
  2570  	start := -1
  2571  	for i, b := range arg {
  2572  		switch b {
  2573  		case ' ', '\t', '\r', '\n':
  2574  			if start >= 0 {
  2575  				args = append(args, arg[start:i])
  2576  				start = -1
  2577  			}
  2578  		default:
  2579  			if start < 0 {
  2580  				start = i
  2581  			}
  2582  		}
  2583  	}
  2584  	if start >= 0 {
  2585  		args = append(args, arg[start:])
  2586  	}
  2587  
  2588  	c.pa.arg = arg
  2589  	switch len(args) {
  2590  	case 0, 1:
  2591  		return fmt.Errorf("processLeafMsgArgs Parse Error: '%s'", args)
  2592  	case 2:
  2593  		c.pa.reply = nil
  2594  		c.pa.queues = nil
  2595  		c.pa.szb = args[1]
  2596  		c.pa.size = parseSize(args[1])
  2597  	case 3:
  2598  		c.pa.reply = args[1]
  2599  		c.pa.queues = nil
  2600  		c.pa.szb = args[2]
  2601  		c.pa.size = parseSize(args[2])
  2602  	default:
  2603  		// args[1] is our reply indicator. Should be + or | normally.
  2604  		if len(args[1]) != 1 {
  2605  			return fmt.Errorf("processLeafMsgArgs Bad or Missing Reply Indicator: '%s'", args[1])
  2606  		}
  2607  		switch args[1][0] {
  2608  		case '+':
  2609  			c.pa.reply = args[2]
  2610  		case '|':
  2611  			c.pa.reply = nil
  2612  		default:
  2613  			return fmt.Errorf("processLeafMsgArgs Bad or Missing Reply Indicator: '%s'", args[1])
  2614  		}
  2615  		// Grab size.
  2616  		c.pa.szb = args[len(args)-1]
  2617  		c.pa.size = parseSize(c.pa.szb)
  2618  
  2619  		// Grab queue names.
  2620  		if c.pa.reply != nil {
  2621  			c.pa.queues = args[3 : len(args)-1]
  2622  		} else {
  2623  			c.pa.queues = args[2 : len(args)-1]
  2624  		}
  2625  	}
  2626  	if c.pa.size < 0 {
  2627  		return fmt.Errorf("processLeafMsgArgs Bad or Missing Size: '%s'", args)
  2628  	}
  2629  
  2630  	// Common ones processed after check for arg length
  2631  	c.pa.subject = args[0]
  2632  
  2633  	return nil
  2634  }
  2635  
  2636  // processInboundLeafMsg is called to process an inbound msg from a leaf node.
  2637  func (c *client) processInboundLeafMsg(msg []byte) {
  2638  	// Update statistics
  2639  	// The msg includes the CR_LF, so pull back out for accounting.
  2640  	c.in.msgs++
  2641  	c.in.bytes += int32(len(msg) - LEN_CR_LF)
  2642  
  2643  	srv, acc, subject := c.srv, c.acc, string(c.pa.subject)
  2644  
  2645  	// Mostly under testing scenarios.
  2646  	if srv == nil || acc == nil {
  2647  		return
  2648  	}
  2649  
  2650  	// Match the subscriptions. We will use our own L1 map if
  2651  	// it's still valid, avoiding contention on the shared sublist.
  2652  	var r *SublistResult
  2653  	var ok bool
  2654  
  2655  	genid := atomic.LoadUint64(&c.acc.sl.genid)
  2656  	if genid == c.in.genid && c.in.results != nil {
  2657  		r, ok = c.in.results[subject]
  2658  	} else {
  2659  		// Reset our L1 completely.
  2660  		c.in.results = make(map[string]*SublistResult)
  2661  		c.in.genid = genid
  2662  	}
  2663  
  2664  	// Go back to the sublist data structure.
  2665  	if !ok {
  2666  		r = c.acc.sl.Match(subject)
  2667  		c.in.results[subject] = r
  2668  		// Prune the results cache. Keeps us from unbounded growth. Random delete.
  2669  		if len(c.in.results) > maxResultCacheSize {
  2670  			n := 0
  2671  			for subj := range c.in.results {
  2672  				delete(c.in.results, subj)
  2673  				if n++; n > pruneSize {
  2674  					break
  2675  				}
  2676  			}
  2677  		}
  2678  	}
  2679  
  2680  	// Collect queue names if needed.
  2681  	var qnames [][]byte
  2682  
  2683  	// Check for no interest, short circuit if so.
  2684  	// This is the fanout scale.
  2685  	if len(r.psubs)+len(r.qsubs) > 0 {
  2686  		flag := pmrNoFlag
  2687  		// If we have queue subs in this cluster, then if we run in gateway
  2688  		// mode and the remote gateways have queue subs, then we need to
  2689  		// collect the queue groups this message was sent to so that we
  2690  		// exclude them when sending to gateways.
  2691  		if len(r.qsubs) > 0 && c.srv.gateway.enabled &&
  2692  			atomic.LoadInt64(&c.srv.gateway.totalQSubs) > 0 {
  2693  			flag |= pmrCollectQueueNames
  2694  		}
  2695  		// If this is a mapped subject that means the mapped interest
  2696  		// is what got us here, but this might not have a queue designation
  2697  		// If that is the case, make sure we ignore to process local queue subscribers.
  2698  		if len(c.pa.mapped) > 0 && len(c.pa.queues) == 0 {
  2699  			flag |= pmrIgnoreEmptyQueueFilter
  2700  		}
  2701  		_, qnames = c.processMsgResults(acc, r, msg, nil, c.pa.subject, c.pa.reply, flag)
  2702  	}
  2703  
  2704  	// Now deal with gateways
  2705  	if c.srv.gateway.enabled {
  2706  		c.sendMsgToGateways(acc, msg, c.pa.subject, c.pa.reply, qnames)
  2707  	}
  2708  }
  2709  
  2710  // Handles a subscription permission violation.
  2711  // See leafPermViolation() for details.
  2712  func (c *client) leafSubPermViolation(subj []byte) {
  2713  	c.leafPermViolation(false, subj)
  2714  }
  2715  
  2716  // Common function to process publish or subscribe leafnode permission violation.
  2717  // Sends the permission violation error to the remote, logs it and closes the connection.
  2718  // If this is from a server soliciting, the reconnection will be delayed.
  2719  func (c *client) leafPermViolation(pub bool, subj []byte) {
  2720  	if c.isSpokeLeafNode() {
  2721  		// For spokes these are no-ops since the hub server told us our permissions.
  2722  		// We just need to not send these over to the other side since we will get cutoff.
  2723  		return
  2724  	}
  2725  	// FIXME(dlc) ?
  2726  	c.setLeafConnectDelayIfSoliciting(leafNodeReconnectAfterPermViolation)
  2727  	var action string
  2728  	if pub {
  2729  		c.sendErr(fmt.Sprintf("Permissions Violation for Publish to %q", subj))
  2730  		action = "Publish"
  2731  	} else {
  2732  		c.sendErr(fmt.Sprintf("Permissions Violation for Subscription to %q", subj))
  2733  		action = "Subscription"
  2734  	}
  2735  	c.Errorf("%s Violation on %q - Check other side configuration", action, subj)
  2736  	// TODO: add a new close reason that is more appropriate?
  2737  	c.closeConnection(ProtocolViolation)
  2738  }
  2739  
  2740  // Invoked from generic processErr() for LEAF connections.
  2741  func (c *client) leafProcessErr(errStr string) {
  2742  	// Check if we got a cluster name collision.
  2743  	if strings.Contains(errStr, ErrLeafNodeHasSameClusterName.Error()) {
  2744  		_, delay := c.setLeafConnectDelayIfSoliciting(leafNodeReconnectDelayAfterClusterNameSame)
  2745  		c.Errorf("Leafnode connection dropped with same cluster name error. Delaying attempt to reconnect for %v", delay)
  2746  		return
  2747  	}
  2748  
  2749  	// We will look for Loop detected error coming from the other side.
  2750  	// If we solicit, set the connect delay.
  2751  	if !strings.Contains(errStr, "Loop detected") {
  2752  		return
  2753  	}
  2754  	c.handleLeafNodeLoop(false)
  2755  }
  2756  
  2757  // If this leaf connection solicits, sets the connect delay to the given value,
  2758  // or the one from the server option's LeafNode.connDelay if one is set (for tests).
  2759  // Returns the connection's account name and delay.
  2760  func (c *client) setLeafConnectDelayIfSoliciting(delay time.Duration) (string, time.Duration) {
  2761  	c.mu.Lock()
  2762  	if c.isSolicitedLeafNode() {
  2763  		if s := c.srv; s != nil {
  2764  			if srvdelay := s.getOpts().LeafNode.connDelay; srvdelay != 0 {
  2765  				delay = srvdelay
  2766  			}
  2767  		}
  2768  		c.leaf.remote.setConnectDelay(delay)
  2769  	}
  2770  	accName := c.acc.Name
  2771  	c.mu.Unlock()
  2772  	return accName, delay
  2773  }
  2774  
  2775  // For the given remote Leafnode configuration, this function returns
  2776  // if TLS is required, and if so, will return a clone of the TLS Config
  2777  // (since some fields will be changed during handshake), the TLS server
  2778  // name that is remembered, and the TLS timeout.
  2779  func (c *client) leafNodeGetTLSConfigForSolicit(remote *leafNodeCfg) (bool, *tls.Config, string, float64) {
  2780  	var (
  2781  		tlsConfig  *tls.Config
  2782  		tlsName    string
  2783  		tlsTimeout float64
  2784  	)
  2785  
  2786  	remote.RLock()
  2787  	defer remote.RUnlock()
  2788  
  2789  	tlsRequired := remote.TLS || remote.TLSConfig != nil
  2790  	if tlsRequired {
  2791  		if remote.TLSConfig != nil {
  2792  			tlsConfig = remote.TLSConfig.Clone()
  2793  		} else {
  2794  			tlsConfig = &tls.Config{MinVersion: tls.VersionTLS12}
  2795  		}
  2796  		tlsName = remote.tlsName
  2797  		tlsTimeout = remote.TLSTimeout
  2798  		if tlsTimeout == 0 {
  2799  			tlsTimeout = float64(TLS_TIMEOUT / time.Second)
  2800  		}
  2801  	}
  2802  
  2803  	return tlsRequired, tlsConfig, tlsName, tlsTimeout
  2804  }
  2805  
  2806  // Initiates the LeafNode Websocket connection by:
  2807  // - doing the TLS handshake if needed
  2808  // - sending the HTTP request
  2809  // - waiting for the HTTP response
  2810  //
  2811  // Since some bufio reader is used to consume the HTTP response, this function
  2812  // returns the slice of buffered bytes (if any) so that the readLoop that will
  2813  // be started after that consume those first before reading from the socket.
  2814  // The boolean
  2815  //
  2816  // Lock held on entry.
  2817  func (c *client) leafNodeSolicitWSConnection(opts *Options, rURL *url.URL, remote *leafNodeCfg) ([]byte, ClosedState, error) {
  2818  	remote.RLock()
  2819  	compress := remote.Websocket.Compression
  2820  	// By default the server will mask outbound frames, but it can be disabled with this option.
  2821  	noMasking := remote.Websocket.NoMasking
  2822  	remote.RUnlock()
  2823  	// Will do the client-side TLS handshake if needed.
  2824  	tlsRequired, err := c.leafClientHandshakeIfNeeded(remote, opts)
  2825  	if err != nil {
  2826  		// 0 will indicate that the connection was already closed
  2827  		return nil, 0, err
  2828  	}
  2829  
  2830  	// For http request, we need the passed URL to contain either http or https scheme.
  2831  	scheme := "http"
  2832  	if tlsRequired {
  2833  		scheme = "https"
  2834  	}
  2835  	// We will use the `/leafnode` path to tell the accepting WS server that it should
  2836  	// create a LEAF connection, not a CLIENT.
  2837  	// In case we use the user's URL path in the future, make sure we append the user's
  2838  	// path to our `/leafnode` path.
  2839  	lpath := leafNodeWSPath
  2840  	if curPath := rURL.EscapedPath(); curPath != _EMPTY_ {
  2841  		if curPath[0] == '/' {
  2842  			curPath = curPath[1:]
  2843  		}
  2844  		lpath = path.Join(curPath, lpath)
  2845  	} else {
  2846  		lpath = lpath[1:]
  2847  	}
  2848  	ustr := fmt.Sprintf("%s://%s/%s", scheme, rURL.Host, lpath)
  2849  	u, _ := url.Parse(ustr)
  2850  	req := &http.Request{
  2851  		Method:     "GET",
  2852  		URL:        u,
  2853  		Proto:      "HTTP/1.1",
  2854  		ProtoMajor: 1,
  2855  		ProtoMinor: 1,
  2856  		Header:     make(http.Header),
  2857  		Host:       u.Host,
  2858  	}
  2859  	wsKey, err := wsMakeChallengeKey()
  2860  	if err != nil {
  2861  		return nil, WriteError, err
  2862  	}
  2863  
  2864  	req.Header["Upgrade"] = []string{"websocket"}
  2865  	req.Header["Connection"] = []string{"Upgrade"}
  2866  	req.Header["Sec-WebSocket-Key"] = []string{wsKey}
  2867  	req.Header["Sec-WebSocket-Version"] = []string{"13"}
  2868  	if compress {
  2869  		req.Header.Add("Sec-WebSocket-Extensions", wsPMCReqHeaderValue)
  2870  	}
  2871  	if noMasking {
  2872  		req.Header.Add(wsNoMaskingHeader, wsNoMaskingValue)
  2873  	}
  2874  	if err := req.Write(c.nc); err != nil {
  2875  		return nil, WriteError, err
  2876  	}
  2877  
  2878  	var resp *http.Response
  2879  
  2880  	br := bufio.NewReaderSize(c.nc, MAX_CONTROL_LINE_SIZE)
  2881  	c.nc.SetReadDeadline(time.Now().Add(DEFAULT_LEAFNODE_INFO_WAIT))
  2882  	resp, err = http.ReadResponse(br, req)
  2883  	if err == nil &&
  2884  		(resp.StatusCode != 101 ||
  2885  			!strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
  2886  			!strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
  2887  			resp.Header.Get("Sec-Websocket-Accept") != wsAcceptKey(wsKey)) {
  2888  
  2889  		err = fmt.Errorf("invalid websocket connection")
  2890  	}
  2891  	// Check compression extension...
  2892  	if err == nil && c.ws.compress {
  2893  		// Check that not only permessage-deflate extension is present, but that
  2894  		// we also have server and client no context take over.
  2895  		srvCompress, noCtxTakeover := wsPMCExtensionSupport(resp.Header, false)
  2896  
  2897  		// If server does not support compression, then simply disable it in our side.
  2898  		if !srvCompress {
  2899  			c.ws.compress = false
  2900  		} else if !noCtxTakeover {
  2901  			err = fmt.Errorf("compression negotiation error")
  2902  		}
  2903  	}
  2904  	// Same for no masking...
  2905  	if err == nil && noMasking {
  2906  		// Check if server accepts no masking
  2907  		if resp.Header.Get(wsNoMaskingHeader) != wsNoMaskingValue {
  2908  			// Nope, need to mask our writes as any client would do.
  2909  			c.ws.maskwrite = true
  2910  		}
  2911  	}
  2912  	if resp != nil {
  2913  		resp.Body.Close()
  2914  	}
  2915  	if err != nil {
  2916  		return nil, ReadError, err
  2917  	}
  2918  	c.Debugf("Leafnode compression=%v masking=%v", c.ws.compress, c.ws.maskwrite)
  2919  
  2920  	var preBuf []byte
  2921  	// We have to slurp whatever is in the bufio reader and pass that to the readloop.
  2922  	if n := br.Buffered(); n != 0 {
  2923  		preBuf, _ = br.Peek(n)
  2924  	}
  2925  	return preBuf, 0, nil
  2926  }
  2927  
  2928  const connectProcessTimeout = 2 * time.Second
  2929  
  2930  // This is invoked for remote LEAF remote connections after processing the INFO
  2931  // protocol.
  2932  func (s *Server) leafNodeResumeConnectProcess(c *client) {
  2933  	clusterName := s.ClusterName()
  2934  
  2935  	c.mu.Lock()
  2936  	if c.isClosed() {
  2937  		c.mu.Unlock()
  2938  		return
  2939  	}
  2940  	if err := c.sendLeafConnect(clusterName, c.headers); err != nil {
  2941  		c.mu.Unlock()
  2942  		c.closeConnection(WriteError)
  2943  		return
  2944  	}
  2945  
  2946  	// Spin up the write loop.
  2947  	s.startGoRoutine(func() { c.writeLoop() })
  2948  
  2949  	// timeout leafNodeFinishConnectProcess
  2950  	c.ping.tmr = time.AfterFunc(connectProcessTimeout, func() {
  2951  		c.mu.Lock()
  2952  		// check if leafNodeFinishConnectProcess was called and prevent later leafNodeFinishConnectProcess
  2953  		if !c.flags.setIfNotSet(connectProcessFinished) {
  2954  			c.mu.Unlock()
  2955  			return
  2956  		}
  2957  		clearTimer(&c.ping.tmr)
  2958  		closed := c.isClosed()
  2959  		c.mu.Unlock()
  2960  		if !closed {
  2961  			c.sendErrAndDebug("Stale Leaf Node Connection - Closing")
  2962  			c.closeConnection(StaleConnection)
  2963  		}
  2964  	})
  2965  	c.mu.Unlock()
  2966  	c.Debugf("Remote leafnode connect msg sent")
  2967  }
  2968  
  2969  // This is invoked for remote LEAF connections after processing the INFO
  2970  // protocol and leafNodeResumeConnectProcess.
  2971  // This will send LS+ the CONNECT protocol and register the leaf node.
  2972  func (s *Server) leafNodeFinishConnectProcess(c *client) {
  2973  	c.mu.Lock()
  2974  	if !c.flags.setIfNotSet(connectProcessFinished) {
  2975  		c.mu.Unlock()
  2976  		return
  2977  	}
  2978  	if c.isClosed() {
  2979  		c.mu.Unlock()
  2980  		s.removeLeafNodeConnection(c)
  2981  		return
  2982  	}
  2983  	remote := c.leaf.remote
  2984  	// Check if we will need to send the system connect event.
  2985  	remote.RLock()
  2986  	sendSysConnectEvent := remote.Hub
  2987  	remote.RUnlock()
  2988  
  2989  	// Capture account before releasing lock
  2990  	acc := c.acc
  2991  	// cancel connectProcessTimeout
  2992  	clearTimer(&c.ping.tmr)
  2993  	c.mu.Unlock()
  2994  
  2995  	// Make sure we register with the account here.
  2996  	if err := c.registerWithAccount(acc); err != nil {
  2997  		if err == ErrTooManyAccountConnections {
  2998  			c.maxAccountConnExceeded()
  2999  			return
  3000  		} else if err == ErrLeafNodeLoop {
  3001  			c.handleLeafNodeLoop(true)
  3002  			return
  3003  		}
  3004  		c.Errorf("Registering leaf with account %s resulted in error: %v", acc.Name, err)
  3005  		c.closeConnection(ProtocolViolation)
  3006  		return
  3007  	}
  3008  	s.addLeafNodeConnection(c, _EMPTY_, _EMPTY_, false)
  3009  	s.initLeafNodeSmapAndSendSubs(c)
  3010  	if sendSysConnectEvent {
  3011  		s.sendLeafNodeConnect(acc)
  3012  	}
  3013  
  3014  	// The above functions are not atomically under the client
  3015  	// lock doing those operations. It is possible - since we
  3016  	// have started the read/write loops - that the connection
  3017  	// is closed before or in between. This would leave the
  3018  	// closed LN connection possible registered with the account
  3019  	// and/or the server's leafs map. So check if connection
  3020  	// is closed, and if so, manually cleanup.
  3021  	c.mu.Lock()
  3022  	closed := c.isClosed()
  3023  	if !closed {
  3024  		c.setFirstPingTimer()
  3025  	}
  3026  	c.mu.Unlock()
  3027  	if closed {
  3028  		s.removeLeafNodeConnection(c)
  3029  		if prev := acc.removeClient(c); prev == 1 {
  3030  			s.decActiveAccounts()
  3031  		}
  3032  	}
  3033  }