github.com/nats-io/nats-server/v2@v2.11.0-preview.2/server/leafnode.go (about)

     1  // Copyright 2019-2024 The NATS Authors
     2  // Licensed under the Apache License, Version 2.0 (the "License");
     3  // you may not use this file except in compliance with the License.
     4  // You may obtain a copy of the License at
     5  //
     6  // http://www.apache.org/licenses/LICENSE-2.0
     7  //
     8  // Unless required by applicable law or agreed to in writing, software
     9  // distributed under the License is distributed on an "AS IS" BASIS,
    10  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package server
    15  
    16  import (
    17  	"bufio"
    18  	"bytes"
    19  	"crypto/tls"
    20  	"encoding/base64"
    21  	"encoding/json"
    22  	"fmt"
    23  	"math/rand"
    24  	"net"
    25  	"net/http"
    26  	"net/url"
    27  	"os"
    28  	"path"
    29  	"reflect"
    30  	"regexp"
    31  	"runtime"
    32  	"strconv"
    33  	"strings"
    34  	"sync"
    35  	"sync/atomic"
    36  	"time"
    37  
    38  	"github.com/klauspost/compress/s2"
    39  	"github.com/nats-io/jwt/v2"
    40  	"github.com/nats-io/nkeys"
    41  	"github.com/nats-io/nuid"
    42  )
    43  
    44  const (
    45  	// Warning when user configures leafnode TLS insecure
    46  	leafnodeTLSInsecureWarning = "TLS certificate chain and hostname of solicited leafnodes will not be verified. DO NOT USE IN PRODUCTION!"
    47  
    48  	// When a loop is detected, delay the reconnect of solicited connection.
    49  	leafNodeReconnectDelayAfterLoopDetected = 30 * time.Second
    50  
    51  	// When a server receives a message causing a permission violation, the
    52  	// connection is closed and it won't attempt to reconnect for that long.
    53  	leafNodeReconnectAfterPermViolation = 30 * time.Second
    54  
    55  	// When we have the same cluster name as the hub.
    56  	leafNodeReconnectDelayAfterClusterNameSame = 30 * time.Second
    57  
    58  	// Prefix for loop detection subject
    59  	leafNodeLoopDetectionSubjectPrefix = "$LDS."
    60  
    61  	// Path added to URL to indicate to WS server that the connection is a
    62  	// LEAF connection as opposed to a CLIENT.
    63  	leafNodeWSPath = "/leafnode"
    64  
    65  	// This is the time the server will wait, when receiving a CONNECT,
    66  	// before closing the connection if the required minimum version is not met.
    67  	leafNodeWaitBeforeClose = 5 * time.Second
    68  )
    69  
    70  type leaf struct {
    71  	// We have any auth stuff here for solicited connections.
    72  	remote *leafNodeCfg
    73  	// isSpoke tells us what role we are playing.
    74  	// Used when we receive a connection but otherside tells us they are a hub.
    75  	isSpoke bool
    76  	// remoteCluster is when we are a hub but the spoke leafnode is part of a cluster.
    77  	remoteCluster string
    78  	// remoteServer holds onto the remove server's name or ID.
    79  	remoteServer string
    80  	// domain name of remote server
    81  	remoteDomain string
    82  	// account name of remote server
    83  	remoteAccName string
    84  	// Used to suppress sub and unsub interest. Same as routes but our audience
    85  	// here is tied to this leaf node. This will hold all subscriptions except this
    86  	// leaf nodes. This represents all the interest we want to send to the other side.
    87  	smap map[string]int32
    88  	// This map will contain all the subscriptions that have been added to the smap
    89  	// during initLeafNodeSmapAndSendSubs. It is short lived and is there to avoid
    90  	// race between processing of a sub where sub is added to account sublist but
    91  	// updateSmap has not be called on that "thread", while in the LN readloop,
    92  	// when processing CONNECT, initLeafNodeSmapAndSendSubs is invoked and add
    93  	// this subscription to smap. When processing of the sub then calls updateSmap,
    94  	// we would add it a second time in the smap causing later unsub to suppress the LS-.
    95  	tsub  map[*subscription]struct{}
    96  	tsubt *time.Timer
    97  	// Selected compression mode, which may be different from the server configured mode.
    98  	compression string
    99  	// This is for GW map replies.
   100  	gwSub *subscription
   101  }
   102  
   103  // Used for remote (solicited) leafnodes.
   104  type leafNodeCfg struct {
   105  	sync.RWMutex
   106  	*RemoteLeafOpts
   107  	urls      []*url.URL
   108  	curURL    *url.URL
   109  	tlsName   string
   110  	username  string
   111  	password  string
   112  	perms     *Permissions
   113  	connDelay time.Duration // Delay before a connect, could be used while detecting loop condition, etc..
   114  }
   115  
   116  // Check to see if this is a solicited leafnode. We do special processing for solicited.
   117  func (c *client) isSolicitedLeafNode() bool {
   118  	return c.kind == LEAF && c.leaf.remote != nil
   119  }
   120  
   121  // Returns true if this is a solicited leafnode and is not configured to be treated as a hub or a receiving
   122  // connection leafnode where the otherside has declared itself to be the hub.
   123  func (c *client) isSpokeLeafNode() bool {
   124  	return c.kind == LEAF && c.leaf.isSpoke
   125  }
   126  
   127  func (c *client) isHubLeafNode() bool {
   128  	return c.kind == LEAF && !c.leaf.isSpoke
   129  }
   130  
   131  // This will spin up go routines to solicit the remote leaf node connections.
   132  func (s *Server) solicitLeafNodeRemotes(remotes []*RemoteLeafOpts) {
   133  	sysAccName := _EMPTY_
   134  	sAcc := s.SystemAccount()
   135  	if sAcc != nil {
   136  		sysAccName = sAcc.Name
   137  	}
   138  	addRemote := func(r *RemoteLeafOpts, isSysAccRemote bool) *leafNodeCfg {
   139  		s.mu.Lock()
   140  		remote := newLeafNodeCfg(r)
   141  		creds := remote.Credentials
   142  		accName := remote.LocalAccount
   143  		s.leafRemoteCfgs = append(s.leafRemoteCfgs, remote)
   144  		// Print notice if
   145  		if isSysAccRemote {
   146  			if len(remote.DenyExports) > 0 {
   147  				s.Noticef("Remote for System Account uses restricted export permissions")
   148  			}
   149  			if len(remote.DenyImports) > 0 {
   150  				s.Noticef("Remote for System Account uses restricted import permissions")
   151  			}
   152  		}
   153  		s.mu.Unlock()
   154  		if creds != _EMPTY_ {
   155  			contents, err := os.ReadFile(creds)
   156  			defer wipeSlice(contents)
   157  			if err != nil {
   158  				s.Errorf("Error reading LeafNode Remote Credentials file %q: %v", creds, err)
   159  			} else if items := credsRe.FindAllSubmatch(contents, -1); len(items) < 2 {
   160  				s.Errorf("LeafNode Remote Credentials file %q malformed", creds)
   161  			} else if _, err := nkeys.FromSeed(items[1][1]); err != nil {
   162  				s.Errorf("LeafNode Remote Credentials file %q has malformed seed", creds)
   163  			} else if uc, err := jwt.DecodeUserClaims(string(items[0][1])); err != nil {
   164  				s.Errorf("LeafNode Remote Credentials file %q has malformed user jwt", creds)
   165  			} else if isSysAccRemote {
   166  				if !uc.Permissions.Pub.Empty() || !uc.Permissions.Sub.Empty() || uc.Permissions.Resp != nil {
   167  					s.Noticef("LeafNode Remote for System Account uses credentials file %q with restricted permissions", creds)
   168  				}
   169  			} else {
   170  				if !uc.Permissions.Pub.Empty() || !uc.Permissions.Sub.Empty() || uc.Permissions.Resp != nil {
   171  					s.Noticef("LeafNode Remote for Account %s uses credentials file %q with restricted permissions", accName, creds)
   172  				}
   173  			}
   174  		}
   175  		return remote
   176  	}
   177  	for _, r := range remotes {
   178  		remote := addRemote(r, r.LocalAccount == sysAccName)
   179  		s.startGoRoutine(func() { s.connectToRemoteLeafNode(remote, true) })
   180  	}
   181  }
   182  
   183  func (s *Server) remoteLeafNodeStillValid(remote *leafNodeCfg) bool {
   184  	for _, ri := range s.getOpts().LeafNode.Remotes {
   185  		// FIXME(dlc) - What about auth changes?
   186  		if reflect.DeepEqual(ri.URLs, remote.URLs) {
   187  			return true
   188  		}
   189  	}
   190  	return false
   191  }
   192  
   193  // Ensure that leafnode is properly configured.
   194  func validateLeafNode(o *Options) error {
   195  	if err := validateLeafNodeAuthOptions(o); err != nil {
   196  		return err
   197  	}
   198  
   199  	// Users can bind to any local account, if its empty we will assume the $G account.
   200  	for _, r := range o.LeafNode.Remotes {
   201  		if r.LocalAccount == _EMPTY_ {
   202  			r.LocalAccount = globalAccountName
   203  		}
   204  	}
   205  
   206  	// In local config mode, check that leafnode configuration refers to accounts that exist.
   207  	if len(o.TrustedOperators) == 0 {
   208  		accNames := map[string]struct{}{}
   209  		for _, a := range o.Accounts {
   210  			accNames[a.Name] = struct{}{}
   211  		}
   212  		// global account is always created
   213  		accNames[DEFAULT_GLOBAL_ACCOUNT] = struct{}{}
   214  		// in the context of leaf nodes, empty account means global account
   215  		accNames[_EMPTY_] = struct{}{}
   216  		// system account either exists or, if not disabled, will be created
   217  		if o.SystemAccount == _EMPTY_ && !o.NoSystemAccount {
   218  			accNames[DEFAULT_SYSTEM_ACCOUNT] = struct{}{}
   219  		}
   220  		checkAccountExists := func(accName string, cfgType string) error {
   221  			if _, ok := accNames[accName]; !ok {
   222  				return fmt.Errorf("cannot find local account %q specified in leafnode %s", accName, cfgType)
   223  			}
   224  			return nil
   225  		}
   226  		if err := checkAccountExists(o.LeafNode.Account, "authorization"); err != nil {
   227  			return err
   228  		}
   229  		for _, lu := range o.LeafNode.Users {
   230  			if lu.Account == nil { // means global account
   231  				continue
   232  			}
   233  			if err := checkAccountExists(lu.Account.Name, "authorization"); err != nil {
   234  				return err
   235  			}
   236  		}
   237  		for _, r := range o.LeafNode.Remotes {
   238  			if err := checkAccountExists(r.LocalAccount, "remote"); err != nil {
   239  				return err
   240  			}
   241  		}
   242  	} else {
   243  		if len(o.LeafNode.Users) != 0 {
   244  			return fmt.Errorf("operator mode does not allow specifying users in leafnode config")
   245  		}
   246  		for _, r := range o.LeafNode.Remotes {
   247  			if !nkeys.IsValidPublicAccountKey(r.LocalAccount) {
   248  				return fmt.Errorf(
   249  					"operator mode requires account nkeys in remotes. " +
   250  						"Please add an `account` key to each remote in your `leafnodes` section, to assign it to an account. " +
   251  						"Each account value should be a 56 character public key, starting with the letter 'A'")
   252  			}
   253  		}
   254  		if o.LeafNode.Port != 0 && o.LeafNode.Account != "" && !nkeys.IsValidPublicAccountKey(o.LeafNode.Account) {
   255  			return fmt.Errorf("operator mode and non account nkeys are incompatible")
   256  		}
   257  	}
   258  
   259  	// Validate compression settings
   260  	if o.LeafNode.Compression.Mode != _EMPTY_ {
   261  		if err := validateAndNormalizeCompressionOption(&o.LeafNode.Compression, CompressionS2Auto); err != nil {
   262  			return err
   263  		}
   264  	}
   265  
   266  	// If a remote has a websocket scheme, all need to have it.
   267  	for _, rcfg := range o.LeafNode.Remotes {
   268  		if len(rcfg.URLs) >= 2 {
   269  			firstIsWS, ok := isWSURL(rcfg.URLs[0]), true
   270  			for i := 1; i < len(rcfg.URLs); i++ {
   271  				u := rcfg.URLs[i]
   272  				if isWS := isWSURL(u); isWS && !firstIsWS || !isWS && firstIsWS {
   273  					ok = false
   274  					break
   275  				}
   276  			}
   277  			if !ok {
   278  				return fmt.Errorf("remote leaf node configuration cannot have a mix of websocket and non-websocket urls: %q", redactURLList(rcfg.URLs))
   279  			}
   280  		}
   281  		// Validate compression settings
   282  		if rcfg.Compression.Mode != _EMPTY_ {
   283  			if err := validateAndNormalizeCompressionOption(&rcfg.Compression, CompressionS2Auto); err != nil {
   284  				return err
   285  			}
   286  		}
   287  	}
   288  
   289  	if o.LeafNode.Port == 0 {
   290  		return nil
   291  	}
   292  
   293  	// If MinVersion is defined, check that it is valid.
   294  	if mv := o.LeafNode.MinVersion; mv != _EMPTY_ {
   295  		if err := checkLeafMinVersionConfig(mv); err != nil {
   296  			return err
   297  		}
   298  	}
   299  
   300  	// The checks below will be done only when detecting that we are configured
   301  	// with gateways. So if an option validation needs to be done regardless,
   302  	// it MUST be done before this point!
   303  
   304  	if o.Gateway.Name == _EMPTY_ && o.Gateway.Port == 0 {
   305  		return nil
   306  	}
   307  	// If we are here we have both leaf nodes and gateways defined, make sure there
   308  	// is a system account defined.
   309  	if o.SystemAccount == _EMPTY_ {
   310  		return fmt.Errorf("leaf nodes and gateways (both being defined) require a system account to also be configured")
   311  	}
   312  	if err := validatePinnedCerts(o.LeafNode.TLSPinnedCerts); err != nil {
   313  		return fmt.Errorf("leafnode: %v", err)
   314  	}
   315  	return nil
   316  }
   317  
   318  func checkLeafMinVersionConfig(mv string) error {
   319  	if ok, err := versionAtLeastCheckError(mv, 2, 8, 0); !ok || err != nil {
   320  		if err != nil {
   321  			return fmt.Errorf("invalid leafnode's minimum version: %v", err)
   322  		} else {
   323  			return fmt.Errorf("the minimum version should be at least 2.8.0")
   324  		}
   325  	}
   326  	return nil
   327  }
   328  
   329  // Used to validate user names in LeafNode configuration.
   330  // - rejects mix of single and multiple users.
   331  // - rejects duplicate user names.
   332  func validateLeafNodeAuthOptions(o *Options) error {
   333  	if len(o.LeafNode.Users) == 0 {
   334  		return nil
   335  	}
   336  	if o.LeafNode.Username != _EMPTY_ {
   337  		return fmt.Errorf("can not have a single user/pass and a users array")
   338  	}
   339  	if o.LeafNode.Nkey != _EMPTY_ {
   340  		return fmt.Errorf("can not have a single nkey and a users array")
   341  	}
   342  	users := map[string]struct{}{}
   343  	for _, u := range o.LeafNode.Users {
   344  		if _, exists := users[u.Username]; exists {
   345  			return fmt.Errorf("duplicate user %q detected in leafnode authorization", u.Username)
   346  		}
   347  		users[u.Username] = struct{}{}
   348  	}
   349  	return nil
   350  }
   351  
   352  // Update remote LeafNode TLS configurations after a config reload.
   353  func (s *Server) updateRemoteLeafNodesTLSConfig(opts *Options) {
   354  	max := len(opts.LeafNode.Remotes)
   355  	if max == 0 {
   356  		return
   357  	}
   358  
   359  	s.mu.RLock()
   360  	defer s.mu.RUnlock()
   361  
   362  	// Changes in the list of remote leaf nodes is not supported.
   363  	// However, make sure that we don't go over the arrays.
   364  	if len(s.leafRemoteCfgs) < max {
   365  		max = len(s.leafRemoteCfgs)
   366  	}
   367  	for i := 0; i < max; i++ {
   368  		ro := opts.LeafNode.Remotes[i]
   369  		cfg := s.leafRemoteCfgs[i]
   370  		if ro.TLSConfig != nil {
   371  			cfg.Lock()
   372  			cfg.TLSConfig = ro.TLSConfig.Clone()
   373  			cfg.TLSHandshakeFirst = ro.TLSHandshakeFirst
   374  			cfg.Unlock()
   375  		}
   376  	}
   377  }
   378  
   379  func (s *Server) reConnectToRemoteLeafNode(remote *leafNodeCfg) {
   380  	delay := s.getOpts().LeafNode.ReconnectInterval
   381  	select {
   382  	case <-time.After(delay):
   383  	case <-s.quitCh:
   384  		s.grWG.Done()
   385  		return
   386  	}
   387  	s.connectToRemoteLeafNode(remote, false)
   388  }
   389  
   390  // Creates a leafNodeCfg object that wraps the RemoteLeafOpts.
   391  func newLeafNodeCfg(remote *RemoteLeafOpts) *leafNodeCfg {
   392  	cfg := &leafNodeCfg{
   393  		RemoteLeafOpts: remote,
   394  		urls:           make([]*url.URL, 0, len(remote.URLs)),
   395  	}
   396  	if len(remote.DenyExports) > 0 || len(remote.DenyImports) > 0 {
   397  		perms := &Permissions{}
   398  		if len(remote.DenyExports) > 0 {
   399  			perms.Publish = &SubjectPermission{Deny: remote.DenyExports}
   400  		}
   401  		if len(remote.DenyImports) > 0 {
   402  			perms.Subscribe = &SubjectPermission{Deny: remote.DenyImports}
   403  		}
   404  		cfg.perms = perms
   405  	}
   406  	// Start with the one that is configured. We will add to this
   407  	// array when receiving async leafnode INFOs.
   408  	cfg.urls = append(cfg.urls, cfg.URLs...)
   409  	// If allowed to randomize, do it on our copy of URLs
   410  	if !remote.NoRandomize {
   411  		rand.Shuffle(len(cfg.urls), func(i, j int) {
   412  			cfg.urls[i], cfg.urls[j] = cfg.urls[j], cfg.urls[i]
   413  		})
   414  	}
   415  	// If we are TLS make sure we save off a proper servername if possible.
   416  	// Do same for user/password since we may need them to connect to
   417  	// a bare URL that we get from INFO protocol.
   418  	for _, u := range cfg.urls {
   419  		cfg.saveTLSHostname(u)
   420  		cfg.saveUserPassword(u)
   421  		// If the url(s) have the "wss://" scheme, and we don't have a TLS
   422  		// config, mark that we should be using TLS anyway.
   423  		if !cfg.TLS && isWSSURL(u) {
   424  			cfg.TLS = true
   425  		}
   426  	}
   427  	return cfg
   428  }
   429  
   430  // Will pick an URL from the list of available URLs.
   431  func (cfg *leafNodeCfg) pickNextURL() *url.URL {
   432  	cfg.Lock()
   433  	defer cfg.Unlock()
   434  	// If the current URL is the first in the list and we have more than
   435  	// one URL, then move that one to end of the list.
   436  	if cfg.curURL != nil && len(cfg.urls) > 1 && urlsAreEqual(cfg.curURL, cfg.urls[0]) {
   437  		first := cfg.urls[0]
   438  		copy(cfg.urls, cfg.urls[1:])
   439  		cfg.urls[len(cfg.urls)-1] = first
   440  	}
   441  	cfg.curURL = cfg.urls[0]
   442  	return cfg.curURL
   443  }
   444  
   445  // Returns the current URL
   446  func (cfg *leafNodeCfg) getCurrentURL() *url.URL {
   447  	cfg.RLock()
   448  	defer cfg.RUnlock()
   449  	return cfg.curURL
   450  }
   451  
   452  // Returns how long the server should wait before attempting
   453  // to solicit a remote leafnode connection.
   454  func (cfg *leafNodeCfg) getConnectDelay() time.Duration {
   455  	cfg.RLock()
   456  	delay := cfg.connDelay
   457  	cfg.RUnlock()
   458  	return delay
   459  }
   460  
   461  // Sets the connect delay.
   462  func (cfg *leafNodeCfg) setConnectDelay(delay time.Duration) {
   463  	cfg.Lock()
   464  	cfg.connDelay = delay
   465  	cfg.Unlock()
   466  }
   467  
   468  // Ensure that non-exported options (used in tests) have
   469  // been properly set.
   470  func (s *Server) setLeafNodeNonExportedOptions() {
   471  	opts := s.getOpts()
   472  	s.leafNodeOpts.dialTimeout = opts.LeafNode.dialTimeout
   473  	if s.leafNodeOpts.dialTimeout == 0 {
   474  		// Use same timeouts as routes for now.
   475  		s.leafNodeOpts.dialTimeout = DEFAULT_ROUTE_DIAL
   476  	}
   477  	s.leafNodeOpts.resolver = opts.LeafNode.resolver
   478  	if s.leafNodeOpts.resolver == nil {
   479  		s.leafNodeOpts.resolver = net.DefaultResolver
   480  	}
   481  }
   482  
   483  const sharedSysAccDelay = 250 * time.Millisecond
   484  
   485  func (s *Server) connectToRemoteLeafNode(remote *leafNodeCfg, firstConnect bool) {
   486  	defer s.grWG.Done()
   487  
   488  	if remote == nil || len(remote.URLs) == 0 {
   489  		s.Debugf("Empty remote leafnode definition, nothing to connect")
   490  		return
   491  	}
   492  
   493  	opts := s.getOpts()
   494  	reconnectDelay := opts.LeafNode.ReconnectInterval
   495  	s.mu.Lock()
   496  	dialTimeout := s.leafNodeOpts.dialTimeout
   497  	resolver := s.leafNodeOpts.resolver
   498  	var isSysAcc bool
   499  	if s.eventsEnabled() {
   500  		isSysAcc = remote.LocalAccount == s.sys.account.Name
   501  	}
   502  	s.mu.Unlock()
   503  
   504  	// If we are sharing a system account and we are not standalone delay to gather some info prior.
   505  	if firstConnect && isSysAcc && !s.standAloneMode() {
   506  		s.Debugf("Will delay first leafnode connect to shared system account due to clustering")
   507  		remote.setConnectDelay(sharedSysAccDelay)
   508  	}
   509  
   510  	if connDelay := remote.getConnectDelay(); connDelay > 0 {
   511  		select {
   512  		case <-time.After(connDelay):
   513  		case <-s.quitCh:
   514  			return
   515  		}
   516  		remote.setConnectDelay(0)
   517  	}
   518  
   519  	var conn net.Conn
   520  
   521  	const connErrFmt = "Error trying to connect as leafnode to remote server %q (attempt %v): %v"
   522  
   523  	attempts := 0
   524  	for s.isRunning() && s.remoteLeafNodeStillValid(remote) {
   525  		rURL := remote.pickNextURL()
   526  		url, err := s.getRandomIP(resolver, rURL.Host, nil)
   527  		if err == nil {
   528  			var ipStr string
   529  			if url != rURL.Host {
   530  				ipStr = fmt.Sprintf(" (%s)", url)
   531  			}
   532  			// Some test may want to disable remotes from connecting
   533  			if s.isLeafConnectDisabled() {
   534  				s.Debugf("Will not attempt to connect to remote server on %q%s, leafnodes currently disabled", rURL.Host, ipStr)
   535  				err = ErrLeafNodeDisabled
   536  			} else {
   537  				s.Debugf("Trying to connect as leafnode to remote server on %q%s", rURL.Host, ipStr)
   538  				conn, err = natsDialTimeout("tcp", url, dialTimeout)
   539  			}
   540  		}
   541  		if err != nil {
   542  			jitter := time.Duration(rand.Int63n(int64(reconnectDelay)))
   543  			delay := reconnectDelay + jitter
   544  			attempts++
   545  			if s.shouldReportConnectErr(firstConnect, attempts) {
   546  				s.Errorf(connErrFmt, rURL.Host, attempts, err)
   547  			} else {
   548  				s.Debugf(connErrFmt, rURL.Host, attempts, err)
   549  			}
   550  			select {
   551  			case <-s.quitCh:
   552  				return
   553  			case <-time.After(delay):
   554  				// Check if we should migrate any JetStream assets while this remote is down.
   555  				s.checkJetStreamMigrate(remote)
   556  				continue
   557  			}
   558  		}
   559  		if !s.remoteLeafNodeStillValid(remote) {
   560  			conn.Close()
   561  			return
   562  		}
   563  
   564  		// We have a connection here to a remote server.
   565  		// Go ahead and create our leaf node and return.
   566  		s.createLeafNode(conn, rURL, remote, nil)
   567  
   568  		// Clear any observer states if we had them.
   569  		s.clearObserverState(remote)
   570  
   571  		return
   572  	}
   573  }
   574  
   575  // This will clear any observer state such that stream or consumer assets on this server can become leaders again.
   576  func (s *Server) clearObserverState(remote *leafNodeCfg) {
   577  	s.mu.RLock()
   578  	accName := remote.LocalAccount
   579  	s.mu.RUnlock()
   580  
   581  	acc, err := s.LookupAccount(accName)
   582  	if err != nil {
   583  		s.Warnf("Error looking up account [%s] checking for JetStream clear observer state on a leafnode", accName)
   584  		return
   585  	}
   586  
   587  	// Walk all streams looking for any clustered stream, skip otherwise.
   588  	for _, mset := range acc.streams() {
   589  		node := mset.raftNode()
   590  		if node == nil {
   591  			// Not R>1
   592  			continue
   593  		}
   594  		// Check consumers
   595  		for _, o := range mset.getConsumers() {
   596  			if n := o.raftNode(); n != nil {
   597  				// Ensure we can become a leader again.
   598  				n.SetObserver(false)
   599  			}
   600  		}
   601  		// Ensure we can not become a leader again.
   602  		node.SetObserver(false)
   603  	}
   604  }
   605  
   606  // Check to see if we should migrate any assets from this account.
   607  func (s *Server) checkJetStreamMigrate(remote *leafNodeCfg) {
   608  	s.mu.RLock()
   609  	accName, shouldMigrate := remote.LocalAccount, remote.JetStreamClusterMigrate
   610  	s.mu.RUnlock()
   611  
   612  	if !shouldMigrate {
   613  		return
   614  	}
   615  
   616  	acc, err := s.LookupAccount(accName)
   617  	if err != nil {
   618  		s.Warnf("Error looking up account [%s] checking for JetStream migration on a leafnode", accName)
   619  		return
   620  	}
   621  
   622  	// Walk all streams looking for any clustered stream, skip otherwise.
   623  	// If we are the leader force stepdown.
   624  	for _, mset := range acc.streams() {
   625  		node := mset.raftNode()
   626  		if node == nil {
   627  			// Not R>1
   628  			continue
   629  		}
   630  		// Collect any consumers
   631  		for _, o := range mset.getConsumers() {
   632  			if n := o.raftNode(); n != nil {
   633  				if n.Leader() {
   634  					n.StepDown()
   635  				}
   636  				// Ensure we can not become a leader while in this state.
   637  				n.SetObserver(true)
   638  			}
   639  		}
   640  		// Stepdown if this stream was leader.
   641  		if node.Leader() {
   642  			node.StepDown()
   643  		}
   644  		// Ensure we can not become a leader while in this state.
   645  		node.SetObserver(true)
   646  	}
   647  }
   648  
   649  // Helper for checking.
   650  func (s *Server) isLeafConnectDisabled() bool {
   651  	s.mu.RLock()
   652  	defer s.mu.RUnlock()
   653  	return s.leafDisableConnect
   654  }
   655  
   656  // Save off the tlsName for when we use TLS and mix hostnames and IPs. IPs usually
   657  // come from the server we connect to.
   658  //
   659  // We used to save the name only if there was a TLSConfig or scheme equal to "tls".
   660  // However, this was causing failures for users that did not set the scheme (and
   661  // their remote connections did not have a tls{} block).
   662  // We now save the host name regardless in case the remote returns an INFO indicating
   663  // that TLS is required.
   664  func (cfg *leafNodeCfg) saveTLSHostname(u *url.URL) {
   665  	if cfg.tlsName == _EMPTY_ && net.ParseIP(u.Hostname()) == nil {
   666  		cfg.tlsName = u.Hostname()
   667  	}
   668  }
   669  
   670  // Save off the username/password for when we connect using a bare URL
   671  // that we get from the INFO protocol.
   672  func (cfg *leafNodeCfg) saveUserPassword(u *url.URL) {
   673  	if cfg.username == _EMPTY_ && u.User != nil {
   674  		cfg.username = u.User.Username()
   675  		cfg.password, _ = u.User.Password()
   676  	}
   677  }
   678  
   679  // This starts the leafnode accept loop in a go routine, unless it
   680  // is detected that the server has already been shutdown.
   681  func (s *Server) startLeafNodeAcceptLoop() {
   682  	// Snapshot server options.
   683  	opts := s.getOpts()
   684  
   685  	port := opts.LeafNode.Port
   686  	if port == -1 {
   687  		port = 0
   688  	}
   689  
   690  	if s.isShuttingDown() {
   691  		return
   692  	}
   693  
   694  	s.mu.Lock()
   695  	hp := net.JoinHostPort(opts.LeafNode.Host, strconv.Itoa(port))
   696  	l, e := natsListen("tcp", hp)
   697  	s.leafNodeListenerErr = e
   698  	if e != nil {
   699  		s.mu.Unlock()
   700  		s.Fatalf("Error listening on leafnode port: %d - %v", opts.LeafNode.Port, e)
   701  		return
   702  	}
   703  
   704  	s.Noticef("Listening for leafnode connections on %s",
   705  		net.JoinHostPort(opts.LeafNode.Host, strconv.Itoa(l.Addr().(*net.TCPAddr).Port)))
   706  
   707  	tlsRequired := opts.LeafNode.TLSConfig != nil
   708  	tlsVerify := tlsRequired && opts.LeafNode.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert
   709  	// Do not set compression in this Info object, it would possibly cause
   710  	// issues when sending asynchronous INFO to the remote.
   711  	info := Info{
   712  		ID:            s.info.ID,
   713  		Name:          s.info.Name,
   714  		Version:       s.info.Version,
   715  		GitCommit:     gitCommit,
   716  		GoVersion:     runtime.Version(),
   717  		AuthRequired:  true,
   718  		TLSRequired:   tlsRequired,
   719  		TLSVerify:     tlsVerify,
   720  		MaxPayload:    s.info.MaxPayload, // TODO(dlc) - Allow override?
   721  		Headers:       s.supportsHeaders(),
   722  		JetStream:     opts.JetStream,
   723  		Domain:        opts.JetStreamDomain,
   724  		Proto:         s.getServerProto(),
   725  		InfoOnConnect: true,
   726  	}
   727  	// If we have selected a random port...
   728  	if port == 0 {
   729  		// Write resolved port back to options.
   730  		opts.LeafNode.Port = l.Addr().(*net.TCPAddr).Port
   731  	}
   732  
   733  	s.leafNodeInfo = info
   734  	// Possibly override Host/Port and set IP based on Cluster.Advertise
   735  	if err := s.setLeafNodeInfoHostPortAndIP(); err != nil {
   736  		s.Fatalf("Error setting leafnode INFO with LeafNode.Advertise value of %s, err=%v", opts.LeafNode.Advertise, err)
   737  		l.Close()
   738  		s.mu.Unlock()
   739  		return
   740  	}
   741  	s.leafURLsMap[s.leafNodeInfo.IP]++
   742  	s.generateLeafNodeInfoJSON()
   743  
   744  	// Setup state that can enable shutdown
   745  	s.leafNodeListener = l
   746  
   747  	// As of now, a server that does not have remotes configured would
   748  	// never solicit a connection, so we should not have to warn if
   749  	// InsecureSkipVerify is set in main LeafNodes config (since
   750  	// this TLS setting matters only when soliciting a connection).
   751  	// Still, warn if insecure is set in any of LeafNode block.
   752  	// We need to check remotes, even if tls is not required on accept.
   753  	warn := tlsRequired && opts.LeafNode.TLSConfig.InsecureSkipVerify
   754  	if !warn {
   755  		for _, r := range opts.LeafNode.Remotes {
   756  			if r.TLSConfig != nil && r.TLSConfig.InsecureSkipVerify {
   757  				warn = true
   758  				break
   759  			}
   760  		}
   761  	}
   762  	if warn {
   763  		s.Warnf(leafnodeTLSInsecureWarning)
   764  	}
   765  	go s.acceptConnections(l, "Leafnode", func(conn net.Conn) { s.createLeafNode(conn, nil, nil, nil) }, nil)
   766  	s.mu.Unlock()
   767  }
   768  
   769  // RegEx to match a creds file with user JWT and Seed.
   770  var credsRe = regexp.MustCompile(`\s*(?:(?:[-]{3,}[^\n]*[-]{3,}\n)(.+)(?:\n\s*[-]{3,}[^\n]*[-]{3,}\n))`)
   771  
   772  // clusterName is provided as argument to avoid lock ordering issues with the locked client c
   773  // Lock should be held entering here.
   774  func (c *client) sendLeafConnect(clusterName string, headers bool) error {
   775  	// We support basic user/pass and operator based user JWT with signatures.
   776  	cinfo := leafConnectInfo{
   777  		Version:       VERSION,
   778  		ID:            c.srv.info.ID,
   779  		Domain:        c.srv.info.Domain,
   780  		Name:          c.srv.info.Name,
   781  		Hub:           c.leaf.remote.Hub,
   782  		Cluster:       clusterName,
   783  		Headers:       headers,
   784  		JetStream:     c.acc.jetStreamConfigured(),
   785  		DenyPub:       c.leaf.remote.DenyImports,
   786  		Compression:   c.leaf.compression,
   787  		RemoteAccount: c.acc.GetName(),
   788  		Proto:         c.srv.getServerProto(),
   789  	}
   790  
   791  	// If a signature callback is specified, this takes precedence over anything else.
   792  	if cb := c.leaf.remote.SignatureCB; cb != nil {
   793  		nonce := c.nonce
   794  		c.mu.Unlock()
   795  		jwt, sigraw, err := cb(nonce)
   796  		c.mu.Lock()
   797  		if err == nil && c.isClosed() {
   798  			err = ErrConnectionClosed
   799  		}
   800  		if err != nil {
   801  			c.Errorf("Error signing the nonce: %v", err)
   802  			return err
   803  		}
   804  		sig := base64.RawURLEncoding.EncodeToString(sigraw)
   805  		cinfo.JWT, cinfo.Sig = jwt, sig
   806  
   807  	} else if creds := c.leaf.remote.Credentials; creds != _EMPTY_ {
   808  		// Check for credentials first, that will take precedence..
   809  		c.Debugf("Authenticating with credentials file %q", c.leaf.remote.Credentials)
   810  		contents, err := os.ReadFile(creds)
   811  		if err != nil {
   812  			c.Errorf("%v", err)
   813  			return err
   814  		}
   815  		defer wipeSlice(contents)
   816  		items := credsRe.FindAllSubmatch(contents, -1)
   817  		if len(items) < 2 {
   818  			c.Errorf("Credentials file malformed")
   819  			return err
   820  		}
   821  		// First result should be the user JWT.
   822  		// We copy here so that the file containing the seed will be wiped appropriately.
   823  		raw := items[0][1]
   824  		tmp := make([]byte, len(raw))
   825  		copy(tmp, raw)
   826  		// Seed is second item.
   827  		kp, err := nkeys.FromSeed(items[1][1])
   828  		if err != nil {
   829  			c.Errorf("Credentials file has malformed seed")
   830  			return err
   831  		}
   832  		// Wipe our key on exit.
   833  		defer kp.Wipe()
   834  
   835  		sigraw, _ := kp.Sign(c.nonce)
   836  		sig := base64.RawURLEncoding.EncodeToString(sigraw)
   837  		cinfo.JWT = bytesToString(tmp)
   838  		cinfo.Sig = sig
   839  	} else if nkey := c.leaf.remote.Nkey; nkey != _EMPTY_ {
   840  		kp, err := nkeys.FromSeed([]byte(nkey))
   841  		if err != nil {
   842  			c.Errorf("Remote nkey has malformed seed")
   843  			return err
   844  		}
   845  		// Wipe our key on exit.
   846  		defer kp.Wipe()
   847  		sigraw, _ := kp.Sign(c.nonce)
   848  		sig := base64.RawURLEncoding.EncodeToString(sigraw)
   849  		pkey, _ := kp.PublicKey()
   850  		cinfo.Nkey = pkey
   851  		cinfo.Sig = sig
   852  	} else if userInfo := c.leaf.remote.curURL.User; userInfo != nil {
   853  		cinfo.User = userInfo.Username()
   854  		cinfo.Pass, _ = userInfo.Password()
   855  	} else if c.leaf.remote.username != _EMPTY_ {
   856  		cinfo.User = c.leaf.remote.username
   857  		cinfo.Pass = c.leaf.remote.password
   858  	}
   859  	b, err := json.Marshal(cinfo)
   860  	if err != nil {
   861  		c.Errorf("Error marshaling CONNECT to remote leafnode: %v\n", err)
   862  		return err
   863  	}
   864  	// Although this call is made before the writeLoop is created,
   865  	// we don't really need to send in place. The protocol will be
   866  	// sent out by the writeLoop.
   867  	c.enqueueProto([]byte(fmt.Sprintf(ConProto, b)))
   868  	return nil
   869  }
   870  
   871  // Makes a deep copy of the LeafNode Info structure.
   872  // The server lock is held on entry.
   873  func (s *Server) copyLeafNodeInfo() *Info {
   874  	clone := s.leafNodeInfo
   875  	// Copy the array of urls.
   876  	if len(s.leafNodeInfo.LeafNodeURLs) > 0 {
   877  		clone.LeafNodeURLs = append([]string(nil), s.leafNodeInfo.LeafNodeURLs...)
   878  	}
   879  	return &clone
   880  }
   881  
   882  // Adds a LeafNode URL that we get when a route connects to the Info structure.
   883  // Regenerates the JSON byte array so that it can be sent to LeafNode connections.
   884  // Returns a boolean indicating if the URL was added or not.
   885  // Server lock is held on entry
   886  func (s *Server) addLeafNodeURL(urlStr string) bool {
   887  	if s.leafURLsMap.addUrl(urlStr) {
   888  		s.generateLeafNodeInfoJSON()
   889  		return true
   890  	}
   891  	return false
   892  }
   893  
   894  // Removes a LeafNode URL of the route that is disconnecting from the Info structure.
   895  // Regenerates the JSON byte array so that it can be sent to LeafNode connections.
   896  // Returns a boolean indicating if the URL was removed or not.
   897  // Server lock is held on entry.
   898  func (s *Server) removeLeafNodeURL(urlStr string) bool {
   899  	// Don't need to do this if we are removing the route connection because
   900  	// we are shuting down...
   901  	if s.isShuttingDown() {
   902  		return false
   903  	}
   904  	if s.leafURLsMap.removeUrl(urlStr) {
   905  		s.generateLeafNodeInfoJSON()
   906  		return true
   907  	}
   908  	return false
   909  }
   910  
   911  // Server lock is held on entry
   912  func (s *Server) generateLeafNodeInfoJSON() {
   913  	s.leafNodeInfo.Cluster = s.cachedClusterName()
   914  	s.leafNodeInfo.LeafNodeURLs = s.leafURLsMap.getAsStringSlice()
   915  	s.leafNodeInfo.WSConnectURLs = s.websocket.connectURLsMap.getAsStringSlice()
   916  	s.leafNodeInfoJSON = generateInfoJSON(&s.leafNodeInfo)
   917  }
   918  
   919  // Sends an async INFO protocol so that the connected servers can update
   920  // their list of LeafNode urls.
   921  func (s *Server) sendAsyncLeafNodeInfo() {
   922  	for _, c := range s.leafs {
   923  		c.mu.Lock()
   924  		c.enqueueProto(s.leafNodeInfoJSON)
   925  		c.mu.Unlock()
   926  	}
   927  }
   928  
   929  // Called when an inbound leafnode connection is accepted or we create one for a solicited leafnode.
   930  func (s *Server) createLeafNode(conn net.Conn, rURL *url.URL, remote *leafNodeCfg, ws *websocket) *client {
   931  	// Snapshot server options.
   932  	opts := s.getOpts()
   933  
   934  	maxPay := int32(opts.MaxPayload)
   935  	maxSubs := int32(opts.MaxSubs)
   936  	// For system, maxSubs of 0 means unlimited, so re-adjust here.
   937  	if maxSubs == 0 {
   938  		maxSubs = -1
   939  	}
   940  	now := time.Now().UTC()
   941  
   942  	c := &client{srv: s, nc: conn, kind: LEAF, opts: defaultOpts, mpay: maxPay, msubs: maxSubs, start: now, last: now}
   943  	// Do not update the smap here, we need to do it in initLeafNodeSmapAndSendSubs
   944  	c.leaf = &leaf{}
   945  
   946  	// For accepted LN connections, ws will be != nil if it was accepted
   947  	// through the Websocket port.
   948  	c.ws = ws
   949  
   950  	// For remote, check if the scheme starts with "ws", if so, we will initiate
   951  	// a remote Leaf Node connection as a websocket connection.
   952  	if remote != nil && rURL != nil && isWSURL(rURL) {
   953  		remote.RLock()
   954  		c.ws = &websocket{compress: remote.Websocket.Compression, maskwrite: !remote.Websocket.NoMasking}
   955  		remote.RUnlock()
   956  	}
   957  
   958  	// Determines if we are soliciting the connection or not.
   959  	var solicited bool
   960  	var acc *Account
   961  	var remoteSuffix string
   962  	if remote != nil {
   963  		// For now, if lookup fails, we will constantly try
   964  		// to recreate this LN connection.
   965  		lacc := remote.LocalAccount
   966  		var err error
   967  		acc, err = s.LookupAccount(lacc)
   968  		if err != nil {
   969  			// An account not existing is something that can happen with nats/http account resolver and the account
   970  			// has not yet been pushed, or the request failed for other reasons.
   971  			// remote needs to be set or retry won't happen
   972  			c.leaf.remote = remote
   973  			c.closeConnection(MissingAccount)
   974  			s.Errorf("Unable to lookup account %s for solicited leafnode connection: %v", lacc, err)
   975  			return nil
   976  		}
   977  		remoteSuffix = fmt.Sprintf(" for account: %s", acc.traceLabel())
   978  	}
   979  
   980  	c.mu.Lock()
   981  	c.initClient()
   982  	c.Noticef("Leafnode connection created%s %s", remoteSuffix, c.opts.Name)
   983  
   984  	var tlsFirst bool
   985  	if remote != nil {
   986  		solicited = true
   987  		remote.Lock()
   988  		c.leaf.remote = remote
   989  		c.setPermissions(remote.perms)
   990  		if !c.leaf.remote.Hub {
   991  			c.leaf.isSpoke = true
   992  		}
   993  		tlsFirst = remote.TLSHandshakeFirst
   994  		remote.Unlock()
   995  		c.acc = acc
   996  	} else {
   997  		c.flags.set(expectConnect)
   998  		if ws != nil {
   999  			c.Debugf("Leafnode compression=%v", c.ws.compress)
  1000  		}
  1001  	}
  1002  	c.mu.Unlock()
  1003  
  1004  	var nonce [nonceLen]byte
  1005  	var info *Info
  1006  
  1007  	// Grab this before the client lock below.
  1008  	if !solicited {
  1009  		// Grab server variables
  1010  		s.mu.Lock()
  1011  		info = s.copyLeafNodeInfo()
  1012  		// For tests that want to simulate old servers, do not set the compression
  1013  		// on the INFO protocol if configured with CompressionNotSupported.
  1014  		if cm := opts.LeafNode.Compression.Mode; cm != CompressionNotSupported {
  1015  			info.Compression = cm
  1016  		}
  1017  		s.generateNonce(nonce[:])
  1018  		s.mu.Unlock()
  1019  	}
  1020  
  1021  	// Grab lock
  1022  	c.mu.Lock()
  1023  
  1024  	var preBuf []byte
  1025  	if solicited {
  1026  		// For websocket connection, we need to send an HTTP request,
  1027  		// and get the response before starting the readLoop to get
  1028  		// the INFO, etc..
  1029  		if c.isWebsocket() {
  1030  			var err error
  1031  			var closeReason ClosedState
  1032  
  1033  			preBuf, closeReason, err = c.leafNodeSolicitWSConnection(opts, rURL, remote)
  1034  			if err != nil {
  1035  				c.Errorf("Error soliciting websocket connection: %v", err)
  1036  				c.mu.Unlock()
  1037  				if closeReason != 0 {
  1038  					c.closeConnection(closeReason)
  1039  				}
  1040  				return nil
  1041  			}
  1042  		} else {
  1043  			// If configured to do TLS handshake first
  1044  			if tlsFirst {
  1045  				if _, err := c.leafClientHandshakeIfNeeded(remote, opts); err != nil {
  1046  					c.mu.Unlock()
  1047  					return nil
  1048  				}
  1049  			}
  1050  			// We need to wait for the info, but not for too long.
  1051  			c.nc.SetReadDeadline(time.Now().Add(DEFAULT_LEAFNODE_INFO_WAIT))
  1052  		}
  1053  
  1054  		// We will process the INFO from the readloop and finish by
  1055  		// sending the CONNECT and finish registration later.
  1056  	} else {
  1057  		// Send our info to the other side.
  1058  		// Remember the nonce we sent here for signatures, etc.
  1059  		c.nonce = make([]byte, nonceLen)
  1060  		copy(c.nonce, nonce[:])
  1061  		info.Nonce = bytesToString(c.nonce)
  1062  		info.CID = c.cid
  1063  		proto := generateInfoJSON(info)
  1064  		if !opts.LeafNode.TLSHandshakeFirst {
  1065  			// We have to send from this go routine because we may
  1066  			// have to block for TLS handshake before we start our
  1067  			// writeLoop go routine. The other side needs to receive
  1068  			// this before it can initiate the TLS handshake..
  1069  			c.sendProtoNow(proto)
  1070  
  1071  			// The above call could have marked the connection as closed (due to TCP error).
  1072  			if c.isClosed() {
  1073  				c.mu.Unlock()
  1074  				c.closeConnection(WriteError)
  1075  				return nil
  1076  			}
  1077  		}
  1078  
  1079  		// Check to see if we need to spin up TLS.
  1080  		if !c.isWebsocket() && info.TLSRequired {
  1081  			// Perform server-side TLS handshake.
  1082  			if err := c.doTLSServerHandshake(tlsHandshakeLeaf, opts.LeafNode.TLSConfig, opts.LeafNode.TLSTimeout, opts.LeafNode.TLSPinnedCerts); err != nil {
  1083  				c.mu.Unlock()
  1084  				return nil
  1085  			}
  1086  		}
  1087  
  1088  		// If the user wants the TLS handshake to occur first, now that it is
  1089  		// done, send the INFO protocol.
  1090  		if opts.LeafNode.TLSHandshakeFirst {
  1091  			c.sendProtoNow(proto)
  1092  			if c.isClosed() {
  1093  				c.mu.Unlock()
  1094  				c.closeConnection(WriteError)
  1095  				return nil
  1096  			}
  1097  		}
  1098  
  1099  		// Leaf nodes will always require a CONNECT to let us know
  1100  		// when we are properly bound to an account.
  1101  		//
  1102  		// If compression is configured, we can't set the authTimer here because
  1103  		// it would cause the parser to fail any incoming protocol that is not a
  1104  		// CONNECT (and we need to exchange INFO protocols for compression
  1105  		// negotiation). So instead, use the ping timer until we are done with
  1106  		// negotiation and can set the auth timer.
  1107  		timeout := secondsToDuration(opts.LeafNode.AuthTimeout)
  1108  		if needsCompression(opts.LeafNode.Compression.Mode) {
  1109  			c.ping.tmr = time.AfterFunc(timeout, func() {
  1110  				c.authTimeout()
  1111  			})
  1112  		} else {
  1113  			c.setAuthTimer(timeout)
  1114  		}
  1115  	}
  1116  
  1117  	// Keep track in case server is shutdown before we can successfully register.
  1118  	if !s.addToTempClients(c.cid, c) {
  1119  		c.mu.Unlock()
  1120  		c.setNoReconnect()
  1121  		c.closeConnection(ServerShutdown)
  1122  		return nil
  1123  	}
  1124  
  1125  	// Spin up the read loop.
  1126  	s.startGoRoutine(func() { c.readLoop(preBuf) })
  1127  
  1128  	// We will spin the write loop for solicited connections only
  1129  	// when processing the INFO and after switching to TLS if needed.
  1130  	if !solicited {
  1131  		s.startGoRoutine(func() { c.writeLoop() })
  1132  	}
  1133  
  1134  	c.mu.Unlock()
  1135  
  1136  	return c
  1137  }
  1138  
  1139  // Will perform the client-side TLS handshake if needed. Assumes that this
  1140  // is called by the solicit side (remote will be non nil). Returns `true`
  1141  // if TLS is required, `false` otherwise.
  1142  // Lock held on entry.
  1143  func (c *client) leafClientHandshakeIfNeeded(remote *leafNodeCfg, opts *Options) (bool, error) {
  1144  	// Check if TLS is required and gather TLS config variables.
  1145  	tlsRequired, tlsConfig, tlsName, tlsTimeout := c.leafNodeGetTLSConfigForSolicit(remote)
  1146  	if !tlsRequired {
  1147  		return false, nil
  1148  	}
  1149  
  1150  	// If TLS required, peform handshake.
  1151  	// Get the URL that was used to connect to the remote server.
  1152  	rURL := remote.getCurrentURL()
  1153  
  1154  	// Perform the client-side TLS handshake.
  1155  	if resetTLSName, err := c.doTLSClientHandshake(tlsHandshakeLeaf, rURL, tlsConfig, tlsName, tlsTimeout, opts.LeafNode.TLSPinnedCerts); err != nil {
  1156  		// Check if we need to reset the remote's TLS name.
  1157  		if resetTLSName {
  1158  			remote.Lock()
  1159  			remote.tlsName = _EMPTY_
  1160  			remote.Unlock()
  1161  		}
  1162  		return false, err
  1163  	}
  1164  	return true, nil
  1165  }
  1166  
  1167  func (c *client) processLeafnodeInfo(info *Info) {
  1168  	c.mu.Lock()
  1169  	if c.leaf == nil || c.isClosed() {
  1170  		c.mu.Unlock()
  1171  		return
  1172  	}
  1173  	s := c.srv
  1174  	opts := s.getOpts()
  1175  	remote := c.leaf.remote
  1176  	didSolicit := remote != nil
  1177  	firstINFO := !c.flags.isSet(infoReceived)
  1178  
  1179  	// In case of websocket, the TLS handshake has been already done.
  1180  	// So check only for non websocket connections and for configurations
  1181  	// where the TLS Handshake was not done first.
  1182  	if didSolicit && !c.flags.isSet(handshakeComplete) && !c.isWebsocket() && !remote.TLSHandshakeFirst {
  1183  		// If the server requires TLS, we need to set this in the remote
  1184  		// otherwise if there is no TLS configuration block for the remote,
  1185  		// the solicit side will not attempt to perform the TLS handshake.
  1186  		if firstINFO && info.TLSRequired {
  1187  			remote.TLS = true
  1188  		}
  1189  		if _, err := c.leafClientHandshakeIfNeeded(remote, opts); err != nil {
  1190  			c.mu.Unlock()
  1191  			return
  1192  		}
  1193  	}
  1194  
  1195  	// Check for compression, unless already done.
  1196  	if firstINFO && !c.flags.isSet(compressionNegotiated) {
  1197  		// Prevent from getting back here.
  1198  		c.flags.set(compressionNegotiated)
  1199  
  1200  		var co *CompressionOpts
  1201  		if !didSolicit {
  1202  			co = &opts.LeafNode.Compression
  1203  		} else {
  1204  			co = &remote.Compression
  1205  		}
  1206  		if needsCompression(co.Mode) {
  1207  			// Release client lock since following function will need server lock.
  1208  			c.mu.Unlock()
  1209  			compress, err := s.negotiateLeafCompression(c, didSolicit, info.Compression, co)
  1210  			if err != nil {
  1211  				c.sendErrAndErr(err.Error())
  1212  				c.closeConnection(ProtocolViolation)
  1213  				return
  1214  			}
  1215  			if compress {
  1216  				// Done for now, will get back another INFO protocol...
  1217  				return
  1218  			}
  1219  			// No compression because one side does not want/can't, so proceed.
  1220  			c.mu.Lock()
  1221  			// Check that the connection did not close if the lock was released.
  1222  			if c.isClosed() {
  1223  				c.mu.Unlock()
  1224  				return
  1225  			}
  1226  		} else {
  1227  			// Coming from an old server, the Compression field would be the empty
  1228  			// string. For servers that are configured with CompressionNotSupported,
  1229  			// this makes them behave as old servers.
  1230  			if info.Compression == _EMPTY_ || co.Mode == CompressionNotSupported {
  1231  				c.leaf.compression = CompressionNotSupported
  1232  			} else {
  1233  				c.leaf.compression = CompressionOff
  1234  			}
  1235  		}
  1236  		// Accepting side does not normally process an INFO protocol during
  1237  		// initial connection handshake. So we keep it consistent by returning
  1238  		// if we are not soliciting.
  1239  		if !didSolicit {
  1240  			// If we had created the ping timer instead of the auth timer, we will
  1241  			// clear the ping timer and set the auth timer now that the compression
  1242  			// negotiation is done.
  1243  			if info.Compression != _EMPTY_ && c.ping.tmr != nil {
  1244  				clearTimer(&c.ping.tmr)
  1245  				c.setAuthTimer(secondsToDuration(opts.LeafNode.AuthTimeout))
  1246  			}
  1247  			c.mu.Unlock()
  1248  			return
  1249  		}
  1250  		// Fall through and process the INFO protocol as usual.
  1251  	}
  1252  
  1253  	// Note: For now, only the initial INFO has a nonce. We
  1254  	// will probably do auto key rotation at some point.
  1255  	if firstINFO {
  1256  		// Mark that the INFO protocol has been received.
  1257  		c.flags.set(infoReceived)
  1258  		// Prevent connecting to non leafnode port. Need to do this only for
  1259  		// the first INFO, not for async INFO updates...
  1260  		//
  1261  		// Content of INFO sent by the server when accepting a tcp connection.
  1262  		// -------------------------------------------------------------------
  1263  		// Listen Port Of | CID | ClientConnectURLs | LeafNodeURLs | Gateway |
  1264  		// -------------------------------------------------------------------
  1265  		//      CLIENT    |  X* |        X**        |              |         |
  1266  		//      ROUTE     |     |        X**        |      X***    |         |
  1267  		//     GATEWAY    |     |                   |              |    X    |
  1268  		//     LEAFNODE   |  X  |                   |       X      |         |
  1269  		// -------------------------------------------------------------------
  1270  		// *   Not on older servers.
  1271  		// **  Not if "no advertise" is enabled.
  1272  		// *** Not if leafnode's "no advertise" is enabled.
  1273  		//
  1274  		// As seen from above, a solicited LeafNode connection should receive
  1275  		// from the remote server an INFO with CID and LeafNodeURLs. Anything
  1276  		// else should be considered an attempt to connect to a wrong port.
  1277  		if didSolicit && (info.CID == 0 || info.LeafNodeURLs == nil) {
  1278  			c.mu.Unlock()
  1279  			c.Errorf(ErrConnectedToWrongPort.Error())
  1280  			c.closeConnection(WrongPort)
  1281  			return
  1282  		}
  1283  		// Capture a nonce here.
  1284  		c.nonce = []byte(info.Nonce)
  1285  		if info.TLSRequired && didSolicit {
  1286  			remote.TLS = true
  1287  		}
  1288  		supportsHeaders := c.srv.supportsHeaders()
  1289  		c.headers = supportsHeaders && info.Headers
  1290  
  1291  		// Remember the remote server.
  1292  		// Pre 2.2.0 servers are not sending their server name.
  1293  		// In that case, use info.ID, which, for those servers, matches
  1294  		// the content of the field `Name` in the leafnode CONNECT protocol.
  1295  		if info.Name == _EMPTY_ {
  1296  			c.leaf.remoteServer = info.ID
  1297  		} else {
  1298  			c.leaf.remoteServer = info.Name
  1299  		}
  1300  		c.leaf.remoteDomain = info.Domain
  1301  		c.leaf.remoteCluster = info.Cluster
  1302  		// We send the protocol version in the INFO protocol.
  1303  		// Keep track of it, so we know if this connection supports message
  1304  		// tracing for instance.
  1305  		c.opts.Protocol = info.Proto
  1306  	}
  1307  
  1308  	// For both initial INFO and async INFO protocols, Possibly
  1309  	// update our list of remote leafnode URLs we can connect to.
  1310  	if didSolicit && (len(info.LeafNodeURLs) > 0 || len(info.WSConnectURLs) > 0) {
  1311  		// Consider the incoming array as the most up-to-date
  1312  		// representation of the remote cluster's list of URLs.
  1313  		c.updateLeafNodeURLs(info)
  1314  	}
  1315  
  1316  	// Check to see if we have permissions updates here.
  1317  	if info.Import != nil || info.Export != nil {
  1318  		perms := &Permissions{
  1319  			Publish:   info.Export,
  1320  			Subscribe: info.Import,
  1321  		}
  1322  		// Check if we have local deny clauses that we need to merge.
  1323  		if remote := c.leaf.remote; remote != nil {
  1324  			if len(remote.DenyExports) > 0 {
  1325  				if perms.Publish == nil {
  1326  					perms.Publish = &SubjectPermission{}
  1327  				}
  1328  				perms.Publish.Deny = append(perms.Publish.Deny, remote.DenyExports...)
  1329  			}
  1330  			if len(remote.DenyImports) > 0 {
  1331  				if perms.Subscribe == nil {
  1332  					perms.Subscribe = &SubjectPermission{}
  1333  				}
  1334  				perms.Subscribe.Deny = append(perms.Subscribe.Deny, remote.DenyImports...)
  1335  			}
  1336  		}
  1337  		c.setPermissions(perms)
  1338  	}
  1339  
  1340  	var resumeConnect bool
  1341  
  1342  	// If this is a remote connection and this is the first INFO protocol,
  1343  	// then we need to finish the connect process by sending CONNECT, etc..
  1344  	if firstINFO && didSolicit {
  1345  		// Clear deadline that was set in createLeafNode while waiting for the INFO.
  1346  		c.nc.SetDeadline(time.Time{})
  1347  		resumeConnect = true
  1348  	} else if !firstINFO && didSolicit {
  1349  		c.leaf.remoteAccName = info.RemoteAccount
  1350  	}
  1351  
  1352  	// Check if we have the remote account information and if so make sure it's stored.
  1353  	if info.RemoteAccount != _EMPTY_ {
  1354  		s.leafRemoteAccounts.Store(c.acc.Name, info.RemoteAccount)
  1355  	}
  1356  	c.mu.Unlock()
  1357  
  1358  	finishConnect := info.ConnectInfo
  1359  	if resumeConnect && s != nil {
  1360  		s.leafNodeResumeConnectProcess(c)
  1361  		if !info.InfoOnConnect {
  1362  			finishConnect = true
  1363  		}
  1364  	}
  1365  	if finishConnect {
  1366  		s.leafNodeFinishConnectProcess(c)
  1367  	}
  1368  }
  1369  
  1370  func (s *Server) negotiateLeafCompression(c *client, didSolicit bool, infoCompression string, co *CompressionOpts) (bool, error) {
  1371  	// Negotiate the appropriate compression mode (or no compression)
  1372  	cm, err := selectCompressionMode(co.Mode, infoCompression)
  1373  	if err != nil {
  1374  		return false, err
  1375  	}
  1376  	c.mu.Lock()
  1377  	// For "auto" mode, set the initial compression mode based on RTT
  1378  	if cm == CompressionS2Auto {
  1379  		if c.rttStart.IsZero() {
  1380  			c.rtt = computeRTT(c.start)
  1381  		}
  1382  		cm = selectS2AutoModeBasedOnRTT(c.rtt, co.RTTThresholds)
  1383  	}
  1384  	// Keep track of the negotiated compression mode.
  1385  	c.leaf.compression = cm
  1386  	cid := c.cid
  1387  	var nonce string
  1388  	if !didSolicit {
  1389  		nonce = bytesToString(c.nonce)
  1390  	}
  1391  	c.mu.Unlock()
  1392  
  1393  	if !needsCompression(cm) {
  1394  		return false, nil
  1395  	}
  1396  
  1397  	// If we end-up doing compression...
  1398  
  1399  	// Generate an INFO with the chosen compression mode.
  1400  	s.mu.Lock()
  1401  	info := s.copyLeafNodeInfo()
  1402  	info.Compression, info.CID, info.Nonce = compressionModeForInfoProtocol(co, cm), cid, nonce
  1403  	infoProto := generateInfoJSON(info)
  1404  	s.mu.Unlock()
  1405  
  1406  	// If we solicited, then send this INFO protocol BEFORE switching
  1407  	// to compression writer. However, if we did not, we send it after.
  1408  	c.mu.Lock()
  1409  	if didSolicit {
  1410  		c.enqueueProto(infoProto)
  1411  		// Make sure it is completely flushed (the pending bytes goes to
  1412  		// 0) before proceeding.
  1413  		for c.out.pb > 0 && !c.isClosed() {
  1414  			c.flushOutbound()
  1415  		}
  1416  	}
  1417  	// This is to notify the readLoop that it should switch to a
  1418  	// (de)compression reader.
  1419  	c.in.flags.set(switchToCompression)
  1420  	// Create the compress writer before queueing the INFO protocol for
  1421  	// a route that did not solicit. It will make sure that that proto
  1422  	// is sent with compression on.
  1423  	c.out.cw = s2.NewWriter(nil, s2WriterOptions(cm)...)
  1424  	if !didSolicit {
  1425  		c.enqueueProto(infoProto)
  1426  	}
  1427  	c.mu.Unlock()
  1428  	return true, nil
  1429  }
  1430  
  1431  // When getting a leaf node INFO protocol, use the provided
  1432  // array of urls to update the list of possible endpoints.
  1433  func (c *client) updateLeafNodeURLs(info *Info) {
  1434  	cfg := c.leaf.remote
  1435  	cfg.Lock()
  1436  	defer cfg.Unlock()
  1437  
  1438  	// We have ensured that if a remote has a WS scheme, then all are.
  1439  	// So check if first is WS, then add WS URLs, otherwise, add non WS ones.
  1440  	if len(cfg.URLs) > 0 && isWSURL(cfg.URLs[0]) {
  1441  		// It does not really matter if we use "ws://" or "wss://" here since
  1442  		// we will have already marked that the remote should use TLS anyway.
  1443  		// But use proper scheme for log statements, etc...
  1444  		proto := wsSchemePrefix
  1445  		if cfg.TLS {
  1446  			proto = wsSchemePrefixTLS
  1447  		}
  1448  		c.doUpdateLNURLs(cfg, proto, info.WSConnectURLs)
  1449  		return
  1450  	}
  1451  	c.doUpdateLNURLs(cfg, "nats-leaf", info.LeafNodeURLs)
  1452  }
  1453  
  1454  func (c *client) doUpdateLNURLs(cfg *leafNodeCfg, scheme string, URLs []string) {
  1455  	cfg.urls = make([]*url.URL, 0, 1+len(URLs))
  1456  	// Add the ones we receive in the protocol
  1457  	for _, surl := range URLs {
  1458  		url, err := url.Parse(fmt.Sprintf("%s://%s", scheme, surl))
  1459  		if err != nil {
  1460  			// As per below, the URLs we receive should not have contained URL info, so this should be safe to log.
  1461  			c.Errorf("Error parsing url %q: %v", surl, err)
  1462  			continue
  1463  		}
  1464  		// Do not add if it's the same as what we already have configured.
  1465  		var dup bool
  1466  		for _, u := range cfg.URLs {
  1467  			// URLs that we receive never have user info, but the
  1468  			// ones that were configured may have. Simply compare
  1469  			// host and port to decide if they are equal or not.
  1470  			if url.Host == u.Host && url.Port() == u.Port() {
  1471  				dup = true
  1472  				break
  1473  			}
  1474  		}
  1475  		if !dup {
  1476  			cfg.urls = append(cfg.urls, url)
  1477  			cfg.saveTLSHostname(url)
  1478  		}
  1479  	}
  1480  	// Add the configured one
  1481  	cfg.urls = append(cfg.urls, cfg.URLs...)
  1482  }
  1483  
  1484  // Similar to setInfoHostPortAndGenerateJSON, but for leafNodeInfo.
  1485  func (s *Server) setLeafNodeInfoHostPortAndIP() error {
  1486  	opts := s.getOpts()
  1487  	if opts.LeafNode.Advertise != _EMPTY_ {
  1488  		advHost, advPort, err := parseHostPort(opts.LeafNode.Advertise, opts.LeafNode.Port)
  1489  		if err != nil {
  1490  			return err
  1491  		}
  1492  		s.leafNodeInfo.Host = advHost
  1493  		s.leafNodeInfo.Port = advPort
  1494  	} else {
  1495  		s.leafNodeInfo.Host = opts.LeafNode.Host
  1496  		s.leafNodeInfo.Port = opts.LeafNode.Port
  1497  		// If the host is "0.0.0.0" or "::" we need to resolve to a public IP.
  1498  		// This will return at most 1 IP.
  1499  		hostIsIPAny, ips, err := s.getNonLocalIPsIfHostIsIPAny(s.leafNodeInfo.Host, false)
  1500  		if err != nil {
  1501  			return err
  1502  		}
  1503  		if hostIsIPAny {
  1504  			if len(ips) == 0 {
  1505  				s.Errorf("Could not find any non-local IP for leafnode's listen specification %q",
  1506  					s.leafNodeInfo.Host)
  1507  			} else {
  1508  				// Take the first from the list...
  1509  				s.leafNodeInfo.Host = ips[0]
  1510  			}
  1511  		}
  1512  	}
  1513  	// Use just host:port for the IP
  1514  	s.leafNodeInfo.IP = net.JoinHostPort(s.leafNodeInfo.Host, strconv.Itoa(s.leafNodeInfo.Port))
  1515  	if opts.LeafNode.Advertise != _EMPTY_ {
  1516  		s.Noticef("Advertise address for leafnode is set to %s", s.leafNodeInfo.IP)
  1517  	}
  1518  	return nil
  1519  }
  1520  
  1521  // Add the connection to the map of leaf nodes.
  1522  // If `checkForDup` is true (invoked when a leafnode is accepted), then we check
  1523  // if a connection already exists for the same server name and account.
  1524  // That can happen when the remote is attempting to reconnect while the accepting
  1525  // side did not detect the connection as broken yet.
  1526  // But it can also happen when there is a misconfiguration and the remote is
  1527  // creating two (or more) connections that bind to the same account on the accept
  1528  // side.
  1529  // When a duplicate is found, the new connection is accepted and the old is closed
  1530  // (this solves the stale connection situation). An error is returned to help the
  1531  // remote detect the misconfiguration when the duplicate is the result of that
  1532  // misconfiguration.
  1533  func (s *Server) addLeafNodeConnection(c *client, srvName, clusterName string, checkForDup bool) {
  1534  	var accName string
  1535  	c.mu.Lock()
  1536  	cid := c.cid
  1537  	acc := c.acc
  1538  	if acc != nil {
  1539  		accName = acc.Name
  1540  	}
  1541  	myRemoteDomain := c.leaf.remoteDomain
  1542  	mySrvName := c.leaf.remoteServer
  1543  	remoteAccName := c.leaf.remoteAccName
  1544  	myClustName := c.leaf.remoteCluster
  1545  	solicited := c.leaf.remote != nil
  1546  	c.mu.Unlock()
  1547  
  1548  	var old *client
  1549  	s.mu.Lock()
  1550  	// We check for empty because in some test we may send empty CONNECT{}
  1551  	if checkForDup && srvName != _EMPTY_ {
  1552  		for _, ol := range s.leafs {
  1553  			ol.mu.Lock()
  1554  			// We care here only about non solicited Leafnode. This function
  1555  			// is more about replacing stale connections than detecting loops.
  1556  			// We have code for the loop detection elsewhere, which also delays
  1557  			// attempt to reconnect.
  1558  			if !ol.isSolicitedLeafNode() && ol.leaf.remoteServer == srvName &&
  1559  				ol.leaf.remoteCluster == clusterName && ol.acc.Name == accName &&
  1560  				remoteAccName != _EMPTY_ && ol.leaf.remoteAccName == remoteAccName {
  1561  				old = ol
  1562  			}
  1563  			ol.mu.Unlock()
  1564  			if old != nil {
  1565  				break
  1566  			}
  1567  		}
  1568  	}
  1569  	// Store new connection in the map
  1570  	s.leafs[cid] = c
  1571  	s.mu.Unlock()
  1572  	s.removeFromTempClients(cid)
  1573  
  1574  	// If applicable, evict the old one.
  1575  	if old != nil {
  1576  		old.sendErrAndErr(DuplicateRemoteLeafnodeConnection.String())
  1577  		old.closeConnection(DuplicateRemoteLeafnodeConnection)
  1578  		c.Warnf("Replacing connection from same server")
  1579  	}
  1580  
  1581  	srvDecorated := func() string {
  1582  		if myClustName == _EMPTY_ {
  1583  			return mySrvName
  1584  		}
  1585  		return fmt.Sprintf("%s/%s", mySrvName, myClustName)
  1586  	}
  1587  
  1588  	opts := s.getOpts()
  1589  	sysAcc := s.SystemAccount()
  1590  	js := s.getJetStream()
  1591  	var meta *raft
  1592  	if js != nil {
  1593  		if mg := js.getMetaGroup(); mg != nil {
  1594  			meta = mg.(*raft)
  1595  		}
  1596  	}
  1597  	blockMappingOutgoing := false
  1598  	// Deny (non domain) JetStream API traffic unless system account is shared
  1599  	// and domain names are identical and extending is not disabled
  1600  
  1601  	// Check if backwards compatibility has been enabled and needs to be acted on
  1602  	forceSysAccDeny := false
  1603  	if len(opts.JsAccDefaultDomain) > 0 {
  1604  		if acc == sysAcc {
  1605  			for _, d := range opts.JsAccDefaultDomain {
  1606  				if d == _EMPTY_ {
  1607  					// Extending JetStream via leaf node is mutually exclusive with a domain mapping to the empty/default domain.
  1608  					// As soon as one mapping to "" is found, disable the ability to extend JS via a leaf node.
  1609  					c.Noticef("Not extending remote JetStream domain %q due to presence of empty default domain", myRemoteDomain)
  1610  					forceSysAccDeny = true
  1611  					break
  1612  				}
  1613  			}
  1614  		} else if domain, ok := opts.JsAccDefaultDomain[accName]; ok && domain == _EMPTY_ {
  1615  			// for backwards compatibility with old setups that do not have a domain name set
  1616  			c.Debugf("Skipping deny %q for account %q due to default domain", jsAllAPI, accName)
  1617  			return
  1618  		}
  1619  	}
  1620  
  1621  	// If the server has JS disabled, it may still be part of a JetStream that could be extended.
  1622  	// This is either signaled by js being disabled and a domain set,
  1623  	// or in cases where no domain name exists, an extension hint is set.
  1624  	// However, this is only relevant in mixed setups.
  1625  	//
  1626  	// If the system account connects but default domains are present, JetStream can't be extended.
  1627  	if opts.JetStreamDomain != myRemoteDomain || (!opts.JetStream && (opts.JetStreamDomain == _EMPTY_ && opts.JetStreamExtHint != jsWillExtend)) ||
  1628  		sysAcc == nil || acc == nil || forceSysAccDeny {
  1629  		// If domain names mismatch always deny. This applies to system accounts as well as non system accounts.
  1630  		// Not having a system account, account or JetStream disabled is considered a mismatch as well.
  1631  		if acc != nil && acc == sysAcc {
  1632  			c.Noticef("System account connected from %s", srvDecorated())
  1633  			c.Noticef("JetStream not extended, domains differ")
  1634  			c.mergeDenyPermissionsLocked(both, denyAllJs)
  1635  			// When a remote with a system account is present in a server, unless otherwise disabled, the server will be
  1636  			// started in observer mode. Now that it is clear that this not used, turn the observer mode off.
  1637  			if solicited && meta != nil && meta.IsObserver() {
  1638  				meta.setObserver(false, extNotExtended)
  1639  				c.Debugf("Turning JetStream metadata controller Observer Mode off")
  1640  				// Take note that the domain was not extended to avoid this state from startup.
  1641  				writePeerState(js.config.StoreDir, meta.currentPeerState())
  1642  				// Meta controller can't be leader yet.
  1643  				// Yet it is possible that due to observer mode every server already stopped campaigning.
  1644  				// Therefore this server needs to be kicked into campaigning gear explicitly.
  1645  				meta.Campaign()
  1646  			}
  1647  		} else {
  1648  			c.Noticef("JetStream using domains: local %q, remote %q", opts.JetStreamDomain, myRemoteDomain)
  1649  			c.mergeDenyPermissionsLocked(both, denyAllClientJs)
  1650  		}
  1651  		blockMappingOutgoing = true
  1652  	} else if acc == sysAcc {
  1653  		// system account and same domain
  1654  		s.sys.client.Noticef("Extending JetStream domain %q as System Account connected from server %s",
  1655  			myRemoteDomain, srvDecorated())
  1656  		// In an extension use case, pin leadership to server remotes connect to.
  1657  		// Therefore, server with a remote that are not already in observer mode, need to be put into it.
  1658  		if solicited && meta != nil && !meta.IsObserver() {
  1659  			meta.setObserver(true, extExtended)
  1660  			c.Debugf("Turning JetStream metadata controller Observer Mode on - System Account Connected")
  1661  			// Take note that the domain was not extended to avoid this state next startup.
  1662  			writePeerState(js.config.StoreDir, meta.currentPeerState())
  1663  			// If this server is the leader already, step down so a new leader can be elected (that is not an observer)
  1664  			meta.StepDown()
  1665  		}
  1666  	} else {
  1667  		// This deny is needed in all cases (system account shared or not)
  1668  		// If the system account is shared, jsAllAPI traffic will go through the system account.
  1669  		// So in order to prevent duplicate delivery (from system and actual account) suppress it on the account.
  1670  		// If the system account is NOT shared, jsAllAPI traffic has no business
  1671  		c.Debugf("Adding deny %+v for account %q", denyAllClientJs, accName)
  1672  		c.mergeDenyPermissionsLocked(both, denyAllClientJs)
  1673  	}
  1674  	// If we have a specified JetStream domain we will want to add a mapping to
  1675  	// allow access cross domain for each non-system account.
  1676  	if opts.JetStreamDomain != _EMPTY_ && opts.JetStream && acc != nil && acc != sysAcc {
  1677  		for src, dest := range generateJSMappingTable(opts.JetStreamDomain) {
  1678  			if err := acc.AddMapping(src, dest); err != nil {
  1679  				c.Debugf("Error adding JetStream domain mapping: %s", err.Error())
  1680  			} else {
  1681  				c.Debugf("Adding JetStream Domain Mapping %q -> %s to account %q", src, dest, accName)
  1682  			}
  1683  		}
  1684  		if blockMappingOutgoing {
  1685  			src := fmt.Sprintf(jsDomainAPI, opts.JetStreamDomain)
  1686  			// make sure that messages intended for this domain, do not leave the cluster via this leaf node connection
  1687  			// This is a guard against a miss-config with two identical domain names and will only cover some forms
  1688  			// of this issue, not all of them.
  1689  			// This guards against a hub and a spoke having the same domain name.
  1690  			// But not two spokes having the same one and the request coming from the hub.
  1691  			c.mergeDenyPermissionsLocked(pub, []string{src})
  1692  			c.Debugf("Adding deny %q for outgoing messages to account %q", src, accName)
  1693  		}
  1694  	}
  1695  }
  1696  
  1697  func (s *Server) removeLeafNodeConnection(c *client) {
  1698  	c.mu.Lock()
  1699  	cid := c.cid
  1700  	if c.leaf != nil {
  1701  		if c.leaf.tsubt != nil {
  1702  			c.leaf.tsubt.Stop()
  1703  			c.leaf.tsubt = nil
  1704  		}
  1705  		if c.leaf.gwSub != nil {
  1706  			s.gwLeafSubs.Remove(c.leaf.gwSub)
  1707  			// We need to set this to nil for GC to release the connection
  1708  			c.leaf.gwSub = nil
  1709  		}
  1710  	}
  1711  	c.mu.Unlock()
  1712  	s.mu.Lock()
  1713  	delete(s.leafs, cid)
  1714  	s.mu.Unlock()
  1715  	s.removeFromTempClients(cid)
  1716  }
  1717  
  1718  // Connect information for solicited leafnodes.
  1719  type leafConnectInfo struct {
  1720  	Version   string   `json:"version,omitempty"`
  1721  	Nkey      string   `json:"nkey,omitempty"`
  1722  	JWT       string   `json:"jwt,omitempty"`
  1723  	Sig       string   `json:"sig,omitempty"`
  1724  	User      string   `json:"user,omitempty"`
  1725  	Pass      string   `json:"pass,omitempty"`
  1726  	ID        string   `json:"server_id,omitempty"`
  1727  	Domain    string   `json:"domain,omitempty"`
  1728  	Name      string   `json:"name,omitempty"`
  1729  	Hub       bool     `json:"is_hub,omitempty"`
  1730  	Cluster   string   `json:"cluster,omitempty"`
  1731  	Headers   bool     `json:"headers,omitempty"`
  1732  	JetStream bool     `json:"jetstream,omitempty"`
  1733  	DenyPub   []string `json:"deny_pub,omitempty"`
  1734  
  1735  	// There was an existing field called:
  1736  	// >> Comp bool `json:"compression,omitempty"`
  1737  	// that has never been used. With support for compression, we now need
  1738  	// a field that is a string. So we use a different json tag:
  1739  	Compression string `json:"compress_mode,omitempty"`
  1740  
  1741  	// Just used to detect wrong connection attempts.
  1742  	Gateway string `json:"gateway,omitempty"`
  1743  
  1744  	// Tells the accept side which account the remote is binding to.
  1745  	RemoteAccount string `json:"remote_account,omitempty"`
  1746  
  1747  	// The accept side of a LEAF connection, unlike ROUTER and GATEWAY, receives
  1748  	// only the CONNECT protocol, and no INFO. So we need to send the protocol
  1749  	// version as part of the CONNECT. It will indicate if a connection supports
  1750  	// some features, such as message tracing.
  1751  	// We use `protocol` as the JSON tag, so this is automatically unmarshal'ed
  1752  	// in the low level process CONNECT.
  1753  	Proto int `json:"protocol,omitempty"`
  1754  }
  1755  
  1756  // processLeafNodeConnect will process the inbound connect args.
  1757  // Once we are here we are bound to an account, so can send any interest that
  1758  // we would have to the other side.
  1759  func (c *client) processLeafNodeConnect(s *Server, arg []byte, lang string) error {
  1760  	// Way to detect clients that incorrectly connect to the route listen
  1761  	// port. Client provided "lang" in the CONNECT protocol while LEAFNODEs don't.
  1762  	if lang != _EMPTY_ {
  1763  		c.sendErrAndErr(ErrClientConnectedToLeafNodePort.Error())
  1764  		c.closeConnection(WrongPort)
  1765  		return ErrClientConnectedToLeafNodePort
  1766  	}
  1767  
  1768  	// Unmarshal as a leaf node connect protocol
  1769  	proto := &leafConnectInfo{}
  1770  	if err := json.Unmarshal(arg, proto); err != nil {
  1771  		return err
  1772  	}
  1773  
  1774  	// Check for cluster name collisions.
  1775  	if cn := s.cachedClusterName(); cn != _EMPTY_ && proto.Cluster != _EMPTY_ && proto.Cluster == cn {
  1776  		c.sendErrAndErr(ErrLeafNodeHasSameClusterName.Error())
  1777  		c.closeConnection(ClusterNamesIdentical)
  1778  		return ErrLeafNodeHasSameClusterName
  1779  	}
  1780  
  1781  	// Reject if this has Gateway which means that it would be from a gateway
  1782  	// connection that incorrectly connects to the leafnode port.
  1783  	if proto.Gateway != _EMPTY_ {
  1784  		errTxt := fmt.Sprintf("Rejecting connection from gateway %q on the leafnode port", proto.Gateway)
  1785  		c.Errorf(errTxt)
  1786  		c.sendErr(errTxt)
  1787  		c.closeConnection(WrongGateway)
  1788  		return ErrWrongGateway
  1789  	}
  1790  
  1791  	if mv := s.getOpts().LeafNode.MinVersion; mv != _EMPTY_ {
  1792  		major, minor, update, _ := versionComponents(mv)
  1793  		if !versionAtLeast(proto.Version, major, minor, update) {
  1794  			// We are going to send back an INFO because otherwise recent
  1795  			// versions of the remote server would simply break the connection
  1796  			// after 2 seconds if not receiving it. Instead, we want the
  1797  			// other side to just "stall" until we finish waiting for the holding
  1798  			// period and close the connection below.
  1799  			s.sendPermsAndAccountInfo(c)
  1800  			c.sendErrAndErr(fmt.Sprintf("connection rejected since minimum version required is %q", mv))
  1801  			select {
  1802  			case <-c.srv.quitCh:
  1803  			case <-time.After(leafNodeWaitBeforeClose):
  1804  			}
  1805  			c.closeConnection(MinimumVersionRequired)
  1806  			return ErrMinimumVersionRequired
  1807  		}
  1808  	}
  1809  
  1810  	// Check if this server supports headers.
  1811  	supportHeaders := c.srv.supportsHeaders()
  1812  
  1813  	c.mu.Lock()
  1814  	// Leaf Nodes do not do echo or verbose or pedantic.
  1815  	c.opts.Verbose = false
  1816  	c.opts.Echo = false
  1817  	c.opts.Pedantic = false
  1818  	// This inbound connection will be marked as supporting headers if this server
  1819  	// support headers and the remote has sent in the CONNECT protocol that it does
  1820  	// support headers too.
  1821  	c.headers = supportHeaders && proto.Headers
  1822  	// If the compression level is still not set, set it based on what has been
  1823  	// given to us in the CONNECT protocol.
  1824  	if c.leaf.compression == _EMPTY_ {
  1825  		// But if proto.Compression is _EMPTY_, set it to CompressionNotSupported
  1826  		if proto.Compression == _EMPTY_ {
  1827  			c.leaf.compression = CompressionNotSupported
  1828  		} else {
  1829  			c.leaf.compression = proto.Compression
  1830  		}
  1831  	}
  1832  
  1833  	// Remember the remote server.
  1834  	c.leaf.remoteServer = proto.Name
  1835  	// Remember the remote account name
  1836  	c.leaf.remoteAccName = proto.RemoteAccount
  1837  
  1838  	// If the other side has declared itself a hub, so we will take on the spoke role.
  1839  	if proto.Hub {
  1840  		c.leaf.isSpoke = true
  1841  	}
  1842  
  1843  	// The soliciting side is part of a cluster.
  1844  	if proto.Cluster != _EMPTY_ {
  1845  		c.leaf.remoteCluster = proto.Cluster
  1846  	}
  1847  
  1848  	c.leaf.remoteDomain = proto.Domain
  1849  
  1850  	// When a leaf solicits a connection to a hub, the perms that it will use on the soliciting leafnode's
  1851  	// behalf are correct for them, but inside the hub need to be reversed since data is flowing in the opposite direction.
  1852  	if !c.isSolicitedLeafNode() && c.perms != nil {
  1853  		sp, pp := c.perms.sub, c.perms.pub
  1854  		c.perms.sub, c.perms.pub = pp, sp
  1855  		if c.opts.Import != nil {
  1856  			c.darray = c.opts.Import.Deny
  1857  		} else {
  1858  			c.darray = nil
  1859  		}
  1860  	}
  1861  
  1862  	// Set the Ping timer
  1863  	c.setFirstPingTimer()
  1864  
  1865  	// If we received pub deny permissions from the other end, merge with existing ones.
  1866  	c.mergeDenyPermissions(pub, proto.DenyPub)
  1867  
  1868  	c.mu.Unlock()
  1869  
  1870  	// Register the cluster, even if empty, as long as we are acting as a hub.
  1871  	if !proto.Hub {
  1872  		c.acc.registerLeafNodeCluster(proto.Cluster)
  1873  	}
  1874  
  1875  	// Add in the leafnode here since we passed through auth at this point.
  1876  	s.addLeafNodeConnection(c, proto.Name, proto.Cluster, true)
  1877  
  1878  	// If we have permissions bound to this leafnode we need to send then back to the
  1879  	// origin server for local enforcement.
  1880  	s.sendPermsAndAccountInfo(c)
  1881  
  1882  	// Create and initialize the smap since we know our bound account now.
  1883  	// This will send all registered subs too.
  1884  	s.initLeafNodeSmapAndSendSubs(c)
  1885  
  1886  	// Announce the account connect event for a leaf node.
  1887  	// This will no-op as needed.
  1888  	s.sendLeafNodeConnect(c.acc)
  1889  
  1890  	return nil
  1891  }
  1892  
  1893  // Returns the remote cluster name. This is set only once so does not require a lock.
  1894  func (c *client) remoteCluster() string {
  1895  	if c.leaf == nil {
  1896  		return _EMPTY_
  1897  	}
  1898  	return c.leaf.remoteCluster
  1899  }
  1900  
  1901  // Sends back an info block to the soliciting leafnode to let it know about
  1902  // its permission settings for local enforcement.
  1903  func (s *Server) sendPermsAndAccountInfo(c *client) {
  1904  	// Copy
  1905  	info := s.copyLeafNodeInfo()
  1906  	c.mu.Lock()
  1907  	info.CID = c.cid
  1908  	info.Import = c.opts.Import
  1909  	info.Export = c.opts.Export
  1910  	info.RemoteAccount = c.acc.Name
  1911  	info.ConnectInfo = true
  1912  	c.enqueueProto(generateInfoJSON(info))
  1913  	c.mu.Unlock()
  1914  }
  1915  
  1916  // Snapshot the current subscriptions from the sublist into our smap which
  1917  // we will keep updated from now on.
  1918  // Also send the registered subscriptions.
  1919  func (s *Server) initLeafNodeSmapAndSendSubs(c *client) {
  1920  	acc := c.acc
  1921  	if acc == nil {
  1922  		c.Debugf("Leafnode does not have an account bound")
  1923  		return
  1924  	}
  1925  	// Collect all account subs here.
  1926  	_subs := [1024]*subscription{}
  1927  	subs := _subs[:0]
  1928  	ims := []string{}
  1929  
  1930  	// Hold the client lock otherwise there can be a race and miss some subs.
  1931  	c.mu.Lock()
  1932  	defer c.mu.Unlock()
  1933  
  1934  	acc.mu.RLock()
  1935  	accName := acc.Name
  1936  	accNTag := acc.nameTag
  1937  
  1938  	// To make printing look better when no friendly name present.
  1939  	if accNTag != _EMPTY_ {
  1940  		accNTag = "/" + accNTag
  1941  	}
  1942  
  1943  	// If we are solicited we only send interest for local clients.
  1944  	if c.isSpokeLeafNode() {
  1945  		acc.sl.localSubs(&subs, true)
  1946  	} else {
  1947  		acc.sl.All(&subs)
  1948  	}
  1949  
  1950  	// Check if we have an existing service import reply.
  1951  	siReply := copyBytes(acc.siReply)
  1952  
  1953  	// Since leaf nodes only send on interest, if the bound
  1954  	// account has import services we need to send those over.
  1955  	for isubj := range acc.imports.services {
  1956  		if c.isSpokeLeafNode() && !c.canSubscribe(isubj) {
  1957  			c.Debugf("Not permitted to import service %q on behalf of %s%s", isubj, accName, accNTag)
  1958  			continue
  1959  		}
  1960  		ims = append(ims, isubj)
  1961  	}
  1962  	// Likewise for mappings.
  1963  	for _, m := range acc.mappings {
  1964  		if c.isSpokeLeafNode() && !c.canSubscribe(m.src) {
  1965  			c.Debugf("Not permitted to import mapping %q on behalf of %s%s", m.src, accName, accNTag)
  1966  			continue
  1967  		}
  1968  		ims = append(ims, m.src)
  1969  	}
  1970  
  1971  	// Create a unique subject that will be used for loop detection.
  1972  	lds := acc.lds
  1973  	acc.mu.RUnlock()
  1974  
  1975  	// Check if we have to create the LDS.
  1976  	if lds == _EMPTY_ {
  1977  		lds = leafNodeLoopDetectionSubjectPrefix + nuid.Next()
  1978  		acc.mu.Lock()
  1979  		acc.lds = lds
  1980  		acc.mu.Unlock()
  1981  	}
  1982  
  1983  	// Now check for gateway interest. Leafnodes will put this into
  1984  	// the proper mode to propagate, but they are not held in the account.
  1985  	gwsa := [16]*client{}
  1986  	gws := gwsa[:0]
  1987  	s.getOutboundGatewayConnections(&gws)
  1988  	for _, cgw := range gws {
  1989  		cgw.mu.Lock()
  1990  		gw := cgw.gw
  1991  		cgw.mu.Unlock()
  1992  		if gw != nil {
  1993  			if ei, _ := gw.outsim.Load(accName); ei != nil {
  1994  				if e := ei.(*outsie); e != nil && e.sl != nil {
  1995  					e.sl.All(&subs)
  1996  				}
  1997  			}
  1998  		}
  1999  	}
  2000  
  2001  	applyGlobalRouting := s.gateway.enabled
  2002  	if c.isSpokeLeafNode() {
  2003  		// Add a fake subscription for this solicited leafnode connection
  2004  		// so that we can send back directly for mapped GW replies.
  2005  		// We need to keep track of this subscription so it can be removed
  2006  		// when the connection is closed so that the GC can release it.
  2007  		c.leaf.gwSub = &subscription{client: c, subject: []byte(gwReplyPrefix + ">")}
  2008  		c.srv.gwLeafSubs.Insert(c.leaf.gwSub)
  2009  	}
  2010  
  2011  	// Now walk the results and add them to our smap
  2012  	rc := c.leaf.remoteCluster
  2013  	c.leaf.smap = make(map[string]int32)
  2014  	for _, sub := range subs {
  2015  		// Check perms regardless of role.
  2016  		if c.perms != nil && !c.canSubscribe(string(sub.subject)) {
  2017  			c.Debugf("Not permitted to subscribe to %q on behalf of %s%s", sub.subject, accName, accNTag)
  2018  			continue
  2019  		}
  2020  		// We ignore ourselves here.
  2021  		// Also don't add the subscription if it has a origin cluster and the
  2022  		// cluster name matches the one of the client we are sending to.
  2023  		if c != sub.client && (sub.origin == nil || (bytesToString(sub.origin) != rc)) {
  2024  			count := int32(1)
  2025  			if len(sub.queue) > 0 && sub.qw > 0 {
  2026  				count = sub.qw
  2027  			}
  2028  			c.leaf.smap[keyFromSub(sub)] += count
  2029  			if c.leaf.tsub == nil {
  2030  				c.leaf.tsub = make(map[*subscription]struct{})
  2031  			}
  2032  			c.leaf.tsub[sub] = struct{}{}
  2033  		}
  2034  	}
  2035  	// FIXME(dlc) - We need to update appropriately on an account claims update.
  2036  	for _, isubj := range ims {
  2037  		c.leaf.smap[isubj]++
  2038  	}
  2039  	// If we have gateways enabled we need to make sure the other side sends us responses
  2040  	// that have been augmented from the original subscription.
  2041  	// TODO(dlc) - Should we lock this down more?
  2042  	if applyGlobalRouting {
  2043  		c.leaf.smap[oldGWReplyPrefix+"*.>"]++
  2044  		c.leaf.smap[gwReplyPrefix+">"]++
  2045  	}
  2046  	// Detect loops by subscribing to a specific subject and checking
  2047  	// if this sub is coming back to us.
  2048  	c.leaf.smap[lds]++
  2049  
  2050  	// Check if we need to add an existing siReply to our map.
  2051  	// This will be a prefix so add on the wildcard.
  2052  	if siReply != nil {
  2053  		wcsub := append(siReply, '>')
  2054  		c.leaf.smap[string(wcsub)]++
  2055  	}
  2056  	// Queue all protocols. There is no max pending limit for LN connection,
  2057  	// so we don't need chunking. The writes will happen from the writeLoop.
  2058  	var b bytes.Buffer
  2059  	for key, n := range c.leaf.smap {
  2060  		c.writeLeafSub(&b, key, n)
  2061  	}
  2062  	if b.Len() > 0 {
  2063  		c.enqueueProto(b.Bytes())
  2064  	}
  2065  	if c.leaf.tsub != nil {
  2066  		// Clear the tsub map after 5 seconds.
  2067  		c.leaf.tsubt = time.AfterFunc(5*time.Second, func() {
  2068  			c.mu.Lock()
  2069  			if c.leaf != nil {
  2070  				c.leaf.tsub = nil
  2071  				c.leaf.tsubt = nil
  2072  			}
  2073  			c.mu.Unlock()
  2074  		})
  2075  	}
  2076  }
  2077  
  2078  // updateInterestForAccountOnGateway called from gateway code when processing RS+ and RS-.
  2079  func (s *Server) updateInterestForAccountOnGateway(accName string, sub *subscription, delta int32) {
  2080  	acc, err := s.LookupAccount(accName)
  2081  	if acc == nil || err != nil {
  2082  		s.Debugf("No or bad account for %q, failed to update interest from gateway", accName)
  2083  		return
  2084  	}
  2085  	acc.updateLeafNodes(sub, delta)
  2086  }
  2087  
  2088  // updateLeafNodes will make sure to update the account smap for the subscription.
  2089  // Will also forward to all leaf nodes as needed.
  2090  func (acc *Account) updateLeafNodes(sub *subscription, delta int32) {
  2091  	if acc == nil || sub == nil {
  2092  		return
  2093  	}
  2094  
  2095  	// We will do checks for no leafnodes and same cluster here inline and under the
  2096  	// general account read lock.
  2097  	// If we feel we need to update the leafnodes we will do that out of line to avoid
  2098  	// blocking routes or GWs.
  2099  
  2100  	acc.mu.RLock()
  2101  	// First check if we even have leafnodes here.
  2102  	if acc.nleafs == 0 {
  2103  		acc.mu.RUnlock()
  2104  		return
  2105  	}
  2106  
  2107  	// Is this a loop detection subject.
  2108  	isLDS := bytes.HasPrefix(sub.subject, []byte(leafNodeLoopDetectionSubjectPrefix))
  2109  
  2110  	// Capture the cluster even if its empty.
  2111  	var cluster string
  2112  	if sub.origin != nil {
  2113  		cluster = bytesToString(sub.origin)
  2114  	}
  2115  
  2116  	// If we have an isolated cluster we can return early, as long as it is not a loop detection subject.
  2117  	// Empty clusters will return false for the check.
  2118  	if !isLDS && acc.isLeafNodeClusterIsolated(cluster) {
  2119  		acc.mu.RUnlock()
  2120  		return
  2121  	}
  2122  
  2123  	// We can release the general account lock.
  2124  	acc.mu.RUnlock()
  2125  
  2126  	// We can hold the list lock here to avoid having to copy a large slice.
  2127  	acc.lmu.RLock()
  2128  	defer acc.lmu.RUnlock()
  2129  
  2130  	// Do this once.
  2131  	subject := string(sub.subject)
  2132  
  2133  	// Walk the connected leafnodes.
  2134  	for _, ln := range acc.lleafs {
  2135  		if ln == sub.client {
  2136  			continue
  2137  		}
  2138  		// Check to make sure this sub does not have an origin cluster that matches the leafnode.
  2139  		ln.mu.Lock()
  2140  		// If skipped, make sure that we still let go the "$LDS." subscription that allows
  2141  		// the detection of loops as long as different cluster.
  2142  		clusterDifferent := cluster != ln.remoteCluster()
  2143  		if (isLDS && clusterDifferent) || ((cluster == _EMPTY_ || clusterDifferent) && (delta <= 0 || ln.canSubscribe(subject))) {
  2144  			ln.updateSmap(sub, delta, isLDS)
  2145  		}
  2146  		ln.mu.Unlock()
  2147  	}
  2148  }
  2149  
  2150  // This will make an update to our internal smap and determine if we should send out
  2151  // an interest update to the remote side.
  2152  // Lock should be held.
  2153  func (c *client) updateSmap(sub *subscription, delta int32, isLDS bool) {
  2154  	if c.leaf.smap == nil {
  2155  		return
  2156  	}
  2157  
  2158  	// If we are solicited make sure this is a local client or a non-solicited leaf node
  2159  	skind := sub.client.kind
  2160  	updateClient := skind == CLIENT || skind == SYSTEM || skind == JETSTREAM || skind == ACCOUNT
  2161  	if !isLDS && c.isSpokeLeafNode() && !(updateClient || (skind == LEAF && !sub.client.isSpokeLeafNode())) {
  2162  		return
  2163  	}
  2164  
  2165  	// For additions, check if that sub has just been processed during initLeafNodeSmapAndSendSubs
  2166  	if delta > 0 && c.leaf.tsub != nil {
  2167  		if _, present := c.leaf.tsub[sub]; present {
  2168  			delete(c.leaf.tsub, sub)
  2169  			if len(c.leaf.tsub) == 0 {
  2170  				c.leaf.tsub = nil
  2171  				c.leaf.tsubt.Stop()
  2172  				c.leaf.tsubt = nil
  2173  			}
  2174  			return
  2175  		}
  2176  	}
  2177  
  2178  	key := keyFromSub(sub)
  2179  	n, ok := c.leaf.smap[key]
  2180  	if delta < 0 && !ok {
  2181  		return
  2182  	}
  2183  
  2184  	// We will update if its a queue, if count is zero (or negative), or we were 0 and are N > 0.
  2185  	update := sub.queue != nil || (n <= 0 && n+delta > 0) || (n > 0 && n+delta <= 0)
  2186  	n += delta
  2187  	if n > 0 {
  2188  		c.leaf.smap[key] = n
  2189  	} else {
  2190  		delete(c.leaf.smap, key)
  2191  	}
  2192  	if update {
  2193  		c.sendLeafNodeSubUpdate(key, n)
  2194  	}
  2195  }
  2196  
  2197  // Used to force add subjects to the subject map.
  2198  func (c *client) forceAddToSmap(subj string) {
  2199  	c.mu.Lock()
  2200  	defer c.mu.Unlock()
  2201  
  2202  	if c.leaf.smap == nil {
  2203  		return
  2204  	}
  2205  	n := c.leaf.smap[subj]
  2206  	if n != 0 {
  2207  		return
  2208  	}
  2209  	// Place into the map since it was not there.
  2210  	c.leaf.smap[subj] = 1
  2211  	c.sendLeafNodeSubUpdate(subj, 1)
  2212  }
  2213  
  2214  // Used to force remove a subject from the subject map.
  2215  func (c *client) forceRemoveFromSmap(subj string) {
  2216  	c.mu.Lock()
  2217  	defer c.mu.Unlock()
  2218  
  2219  	if c.leaf.smap == nil {
  2220  		return
  2221  	}
  2222  	n := c.leaf.smap[subj]
  2223  	if n == 0 {
  2224  		return
  2225  	}
  2226  	n--
  2227  	if n == 0 {
  2228  		// Remove is now zero
  2229  		delete(c.leaf.smap, subj)
  2230  		c.sendLeafNodeSubUpdate(subj, 0)
  2231  	} else {
  2232  		c.leaf.smap[subj] = n
  2233  	}
  2234  }
  2235  
  2236  // Send the subscription interest change to the other side.
  2237  // Lock should be held.
  2238  func (c *client) sendLeafNodeSubUpdate(key string, n int32) {
  2239  	// If we are a spoke, we need to check if we are allowed to send this subscription over to the hub.
  2240  	if c.isSpokeLeafNode() {
  2241  		checkPerms := true
  2242  		if len(key) > 0 && (key[0] == '$' || key[0] == '_') {
  2243  			if strings.HasPrefix(key, leafNodeLoopDetectionSubjectPrefix) ||
  2244  				strings.HasPrefix(key, oldGWReplyPrefix) ||
  2245  				strings.HasPrefix(key, gwReplyPrefix) {
  2246  				checkPerms = false
  2247  			}
  2248  		}
  2249  		if checkPerms && !c.canSubscribe(key) {
  2250  			return
  2251  		}
  2252  	}
  2253  	// If we are here we can send over to the other side.
  2254  	_b := [64]byte{}
  2255  	b := bytes.NewBuffer(_b[:0])
  2256  	c.writeLeafSub(b, key, n)
  2257  	c.enqueueProto(b.Bytes())
  2258  }
  2259  
  2260  // Helper function to build the key.
  2261  func keyFromSub(sub *subscription) string {
  2262  	var sb strings.Builder
  2263  	sb.Grow(len(sub.subject) + len(sub.queue) + 1)
  2264  	sb.Write(sub.subject)
  2265  	if sub.queue != nil {
  2266  		// Just make the key subject spc group, e.g. 'foo bar'
  2267  		sb.WriteByte(' ')
  2268  		sb.Write(sub.queue)
  2269  	}
  2270  	return sb.String()
  2271  }
  2272  
  2273  // Lock should be held.
  2274  func (c *client) writeLeafSub(w *bytes.Buffer, key string, n int32) {
  2275  	if key == _EMPTY_ {
  2276  		return
  2277  	}
  2278  	if n > 0 {
  2279  		w.WriteString("LS+ " + key)
  2280  		// Check for queue semantics, if found write n.
  2281  		if strings.Contains(key, " ") {
  2282  			w.WriteString(" ")
  2283  			var b [12]byte
  2284  			var i = len(b)
  2285  			for l := n; l > 0; l /= 10 {
  2286  				i--
  2287  				b[i] = digits[l%10]
  2288  			}
  2289  			w.Write(b[i:])
  2290  			if c.trace {
  2291  				arg := fmt.Sprintf("%s %d", key, n)
  2292  				c.traceOutOp("LS+", []byte(arg))
  2293  			}
  2294  		} else if c.trace {
  2295  			c.traceOutOp("LS+", []byte(key))
  2296  		}
  2297  	} else {
  2298  		w.WriteString("LS- " + key)
  2299  		if c.trace {
  2300  			c.traceOutOp("LS-", []byte(key))
  2301  		}
  2302  	}
  2303  	w.WriteString(CR_LF)
  2304  }
  2305  
  2306  // processLeafSub will process an inbound sub request for the remote leaf node.
  2307  func (c *client) processLeafSub(argo []byte) (err error) {
  2308  	// Indicate activity.
  2309  	c.in.subs++
  2310  
  2311  	srv := c.srv
  2312  	if srv == nil {
  2313  		return nil
  2314  	}
  2315  
  2316  	// Copy so we do not reference a potentially large buffer
  2317  	arg := make([]byte, len(argo))
  2318  	copy(arg, argo)
  2319  
  2320  	args := splitArg(arg)
  2321  	sub := &subscription{client: c}
  2322  
  2323  	switch len(args) {
  2324  	case 1:
  2325  		sub.queue = nil
  2326  	case 3:
  2327  		sub.queue = args[1]
  2328  		sub.qw = int32(parseSize(args[2]))
  2329  	default:
  2330  		return fmt.Errorf("processLeafSub Parse Error: '%s'", arg)
  2331  	}
  2332  	sub.subject = args[0]
  2333  
  2334  	c.mu.Lock()
  2335  	if c.isClosed() {
  2336  		c.mu.Unlock()
  2337  		return nil
  2338  	}
  2339  
  2340  	acc := c.acc
  2341  	// Check if we have a loop.
  2342  	ldsPrefix := bytes.HasPrefix(sub.subject, []byte(leafNodeLoopDetectionSubjectPrefix))
  2343  
  2344  	if ldsPrefix && bytesToString(sub.subject) == acc.getLDSubject() {
  2345  		c.mu.Unlock()
  2346  		c.handleLeafNodeLoop(true)
  2347  		return nil
  2348  	}
  2349  
  2350  	// Check permissions if applicable. (but exclude the $LDS, $GR and _GR_)
  2351  	checkPerms := true
  2352  	if sub.subject[0] == '$' || sub.subject[0] == '_' {
  2353  		if ldsPrefix ||
  2354  			bytes.HasPrefix(sub.subject, []byte(oldGWReplyPrefix)) ||
  2355  			bytes.HasPrefix(sub.subject, []byte(gwReplyPrefix)) {
  2356  			checkPerms = false
  2357  		}
  2358  	}
  2359  
  2360  	// If we are a hub check that we can publish to this subject.
  2361  	if checkPerms {
  2362  		subj := string(sub.subject)
  2363  		if subjectIsLiteral(subj) && !c.pubAllowedFullCheck(subj, true, true) {
  2364  			c.mu.Unlock()
  2365  			c.leafSubPermViolation(sub.subject)
  2366  			c.Debugf(fmt.Sprintf("Permissions Violation for Subscription to %q", sub.subject))
  2367  			return nil
  2368  		}
  2369  	}
  2370  
  2371  	// Check if we have a maximum on the number of subscriptions.
  2372  	if c.subsAtLimit() {
  2373  		c.mu.Unlock()
  2374  		c.maxSubsExceeded()
  2375  		return nil
  2376  	}
  2377  
  2378  	// If we have an origin cluster associated mark that in the sub.
  2379  	if rc := c.remoteCluster(); rc != _EMPTY_ {
  2380  		sub.origin = []byte(rc)
  2381  	}
  2382  
  2383  	// Like Routes, we store local subs by account and subject and optionally queue name.
  2384  	// If we have a queue it will have a trailing weight which we do not want.
  2385  	if sub.queue != nil {
  2386  		sub.sid = arg[:len(arg)-len(args[2])-1]
  2387  	} else {
  2388  		sub.sid = arg
  2389  	}
  2390  	key := bytesToString(sub.sid)
  2391  	osub := c.subs[key]
  2392  	updateGWs := false
  2393  	delta := int32(1)
  2394  	if osub == nil {
  2395  		c.subs[key] = sub
  2396  		// Now place into the account sl.
  2397  		if err := acc.sl.Insert(sub); err != nil {
  2398  			delete(c.subs, key)
  2399  			c.mu.Unlock()
  2400  			c.Errorf("Could not insert subscription: %v", err)
  2401  			c.sendErr("Invalid Subscription")
  2402  			return nil
  2403  		}
  2404  		updateGWs = srv.gateway.enabled
  2405  	} else if sub.queue != nil {
  2406  		// For a queue we need to update the weight.
  2407  		delta = sub.qw - atomic.LoadInt32(&osub.qw)
  2408  		atomic.StoreInt32(&osub.qw, sub.qw)
  2409  		acc.sl.UpdateRemoteQSub(osub)
  2410  	}
  2411  	spoke := c.isSpokeLeafNode()
  2412  	c.mu.Unlock()
  2413  
  2414  	// Only add in shadow subs if a new sub or qsub.
  2415  	if osub == nil {
  2416  		if err := c.addShadowSubscriptions(acc, sub, true); err != nil {
  2417  			c.Errorf(err.Error())
  2418  		}
  2419  	}
  2420  
  2421  	// If we are not solicited, treat leaf node subscriptions similar to a
  2422  	// client subscription, meaning we forward them to routes, gateways and
  2423  	// other leaf nodes as needed.
  2424  	if !spoke {
  2425  		// If we are routing add to the route map for the associated account.
  2426  		srv.updateRouteSubscriptionMap(acc, sub, delta)
  2427  		if updateGWs {
  2428  			srv.gatewayUpdateSubInterest(acc.Name, sub, delta)
  2429  		}
  2430  	}
  2431  	// Now check on leafnode updates for other leaf nodes. We understand solicited
  2432  	// and non-solicited state in this call so we will do the right thing.
  2433  	acc.updateLeafNodes(sub, delta)
  2434  
  2435  	return nil
  2436  }
  2437  
  2438  // If the leafnode is a solicited, set the connect delay based on default
  2439  // or private option (for tests). Sends the error to the other side, log and
  2440  // close the connection.
  2441  func (c *client) handleLeafNodeLoop(sendErr bool) {
  2442  	accName, delay := c.setLeafConnectDelayIfSoliciting(leafNodeReconnectDelayAfterLoopDetected)
  2443  	errTxt := fmt.Sprintf("Loop detected for leafnode account=%q. Delaying attempt to reconnect for %v", accName, delay)
  2444  	if sendErr {
  2445  		c.sendErr(errTxt)
  2446  	}
  2447  
  2448  	c.Errorf(errTxt)
  2449  	// If we are here with "sendErr" false, it means that this is the server
  2450  	// that received the error. The other side will have closed the connection,
  2451  	// but does not hurt to close here too.
  2452  	c.closeConnection(ProtocolViolation)
  2453  }
  2454  
  2455  // processLeafUnsub will process an inbound unsub request for the remote leaf node.
  2456  func (c *client) processLeafUnsub(arg []byte) error {
  2457  	// Indicate any activity, so pub and sub or unsubs.
  2458  	c.in.subs++
  2459  
  2460  	acc := c.acc
  2461  	srv := c.srv
  2462  
  2463  	c.mu.Lock()
  2464  	if c.isClosed() {
  2465  		c.mu.Unlock()
  2466  		return nil
  2467  	}
  2468  
  2469  	updateGWs := false
  2470  	spoke := c.isSpokeLeafNode()
  2471  	// We store local subs by account and subject and optionally queue name.
  2472  	// LS- will have the arg exactly as the key.
  2473  	sub, ok := c.subs[string(arg)]
  2474  	c.mu.Unlock()
  2475  
  2476  	if ok {
  2477  		c.unsubscribe(acc, sub, true, true)
  2478  		updateGWs = srv.gateway.enabled
  2479  	}
  2480  
  2481  	if !spoke {
  2482  		// If we are routing subtract from the route map for the associated account.
  2483  		srv.updateRouteSubscriptionMap(acc, sub, -1)
  2484  		// Gateways
  2485  		if updateGWs {
  2486  			srv.gatewayUpdateSubInterest(acc.Name, sub, -1)
  2487  		}
  2488  	}
  2489  	// Now check on leafnode updates for other leaf nodes.
  2490  	acc.updateLeafNodes(sub, -1)
  2491  	return nil
  2492  }
  2493  
  2494  func (c *client) processLeafHeaderMsgArgs(arg []byte) error {
  2495  	// Unroll splitArgs to avoid runtime/heap issues
  2496  	a := [MAX_MSG_ARGS][]byte{}
  2497  	args := a[:0]
  2498  	start := -1
  2499  	for i, b := range arg {
  2500  		switch b {
  2501  		case ' ', '\t', '\r', '\n':
  2502  			if start >= 0 {
  2503  				args = append(args, arg[start:i])
  2504  				start = -1
  2505  			}
  2506  		default:
  2507  			if start < 0 {
  2508  				start = i
  2509  			}
  2510  		}
  2511  	}
  2512  	if start >= 0 {
  2513  		args = append(args, arg[start:])
  2514  	}
  2515  
  2516  	c.pa.arg = arg
  2517  	switch len(args) {
  2518  	case 0, 1, 2:
  2519  		return fmt.Errorf("processLeafHeaderMsgArgs Parse Error: '%s'", args)
  2520  	case 3:
  2521  		c.pa.reply = nil
  2522  		c.pa.queues = nil
  2523  		c.pa.hdb = args[1]
  2524  		c.pa.hdr = parseSize(args[1])
  2525  		c.pa.szb = args[2]
  2526  		c.pa.size = parseSize(args[2])
  2527  	case 4:
  2528  		c.pa.reply = args[1]
  2529  		c.pa.queues = nil
  2530  		c.pa.hdb = args[2]
  2531  		c.pa.hdr = parseSize(args[2])
  2532  		c.pa.szb = args[3]
  2533  		c.pa.size = parseSize(args[3])
  2534  	default:
  2535  		// args[1] is our reply indicator. Should be + or | normally.
  2536  		if len(args[1]) != 1 {
  2537  			return fmt.Errorf("processLeafHeaderMsgArgs Bad or Missing Reply Indicator: '%s'", args[1])
  2538  		}
  2539  		switch args[1][0] {
  2540  		case '+':
  2541  			c.pa.reply = args[2]
  2542  		case '|':
  2543  			c.pa.reply = nil
  2544  		default:
  2545  			return fmt.Errorf("processLeafHeaderMsgArgs Bad or Missing Reply Indicator: '%s'", args[1])
  2546  		}
  2547  		// Grab header size.
  2548  		c.pa.hdb = args[len(args)-2]
  2549  		c.pa.hdr = parseSize(c.pa.hdb)
  2550  
  2551  		// Grab size.
  2552  		c.pa.szb = args[len(args)-1]
  2553  		c.pa.size = parseSize(c.pa.szb)
  2554  
  2555  		// Grab queue names.
  2556  		if c.pa.reply != nil {
  2557  			c.pa.queues = args[3 : len(args)-2]
  2558  		} else {
  2559  			c.pa.queues = args[2 : len(args)-2]
  2560  		}
  2561  	}
  2562  	if c.pa.hdr < 0 {
  2563  		return fmt.Errorf("processLeafHeaderMsgArgs Bad or Missing Header Size: '%s'", arg)
  2564  	}
  2565  	if c.pa.size < 0 {
  2566  		return fmt.Errorf("processLeafHeaderMsgArgs Bad or Missing Size: '%s'", args)
  2567  	}
  2568  	if c.pa.hdr > c.pa.size {
  2569  		return fmt.Errorf("processLeafHeaderMsgArgs Header Size larger then TotalSize: '%s'", arg)
  2570  	}
  2571  
  2572  	// Common ones processed after check for arg length
  2573  	c.pa.subject = args[0]
  2574  
  2575  	return nil
  2576  }
  2577  
  2578  func (c *client) processLeafMsgArgs(arg []byte) error {
  2579  	// Unroll splitArgs to avoid runtime/heap issues
  2580  	a := [MAX_MSG_ARGS][]byte{}
  2581  	args := a[:0]
  2582  	start := -1
  2583  	for i, b := range arg {
  2584  		switch b {
  2585  		case ' ', '\t', '\r', '\n':
  2586  			if start >= 0 {
  2587  				args = append(args, arg[start:i])
  2588  				start = -1
  2589  			}
  2590  		default:
  2591  			if start < 0 {
  2592  				start = i
  2593  			}
  2594  		}
  2595  	}
  2596  	if start >= 0 {
  2597  		args = append(args, arg[start:])
  2598  	}
  2599  
  2600  	c.pa.arg = arg
  2601  	switch len(args) {
  2602  	case 0, 1:
  2603  		return fmt.Errorf("processLeafMsgArgs Parse Error: '%s'", args)
  2604  	case 2:
  2605  		c.pa.reply = nil
  2606  		c.pa.queues = nil
  2607  		c.pa.szb = args[1]
  2608  		c.pa.size = parseSize(args[1])
  2609  	case 3:
  2610  		c.pa.reply = args[1]
  2611  		c.pa.queues = nil
  2612  		c.pa.szb = args[2]
  2613  		c.pa.size = parseSize(args[2])
  2614  	default:
  2615  		// args[1] is our reply indicator. Should be + or | normally.
  2616  		if len(args[1]) != 1 {
  2617  			return fmt.Errorf("processLeafMsgArgs Bad or Missing Reply Indicator: '%s'", args[1])
  2618  		}
  2619  		switch args[1][0] {
  2620  		case '+':
  2621  			c.pa.reply = args[2]
  2622  		case '|':
  2623  			c.pa.reply = nil
  2624  		default:
  2625  			return fmt.Errorf("processLeafMsgArgs Bad or Missing Reply Indicator: '%s'", args[1])
  2626  		}
  2627  		// Grab size.
  2628  		c.pa.szb = args[len(args)-1]
  2629  		c.pa.size = parseSize(c.pa.szb)
  2630  
  2631  		// Grab queue names.
  2632  		if c.pa.reply != nil {
  2633  			c.pa.queues = args[3 : len(args)-1]
  2634  		} else {
  2635  			c.pa.queues = args[2 : len(args)-1]
  2636  		}
  2637  	}
  2638  	if c.pa.size < 0 {
  2639  		return fmt.Errorf("processLeafMsgArgs Bad or Missing Size: '%s'", args)
  2640  	}
  2641  
  2642  	// Common ones processed after check for arg length
  2643  	c.pa.subject = args[0]
  2644  
  2645  	return nil
  2646  }
  2647  
  2648  // processInboundLeafMsg is called to process an inbound msg from a leaf node.
  2649  func (c *client) processInboundLeafMsg(msg []byte) {
  2650  	// Update statistics
  2651  	// The msg includes the CR_LF, so pull back out for accounting.
  2652  	c.in.msgs++
  2653  	c.in.bytes += int32(len(msg) - LEN_CR_LF)
  2654  
  2655  	srv, acc, subject := c.srv, c.acc, string(c.pa.subject)
  2656  
  2657  	// Mostly under testing scenarios.
  2658  	if srv == nil || acc == nil {
  2659  		return
  2660  	}
  2661  
  2662  	// Match the subscriptions. We will use our own L1 map if
  2663  	// it's still valid, avoiding contention on the shared sublist.
  2664  	var r *SublistResult
  2665  	var ok bool
  2666  
  2667  	genid := atomic.LoadUint64(&c.acc.sl.genid)
  2668  	if genid == c.in.genid && c.in.results != nil {
  2669  		r, ok = c.in.results[subject]
  2670  	} else {
  2671  		// Reset our L1 completely.
  2672  		c.in.results = make(map[string]*SublistResult)
  2673  		c.in.genid = genid
  2674  	}
  2675  
  2676  	// Go back to the sublist data structure.
  2677  	if !ok {
  2678  		r = c.acc.sl.Match(subject)
  2679  		c.in.results[subject] = r
  2680  		// Prune the results cache. Keeps us from unbounded growth. Random delete.
  2681  		if len(c.in.results) > maxResultCacheSize {
  2682  			n := 0
  2683  			for subj := range c.in.results {
  2684  				delete(c.in.results, subj)
  2685  				if n++; n > pruneSize {
  2686  					break
  2687  				}
  2688  			}
  2689  		}
  2690  	}
  2691  
  2692  	// Collect queue names if needed.
  2693  	var qnames [][]byte
  2694  
  2695  	// Check for no interest, short circuit if so.
  2696  	// This is the fanout scale.
  2697  	if len(r.psubs)+len(r.qsubs) > 0 {
  2698  		flag := pmrNoFlag
  2699  		// If we have queue subs in this cluster, then if we run in gateway
  2700  		// mode and the remote gateways have queue subs, then we need to
  2701  		// collect the queue groups this message was sent to so that we
  2702  		// exclude them when sending to gateways.
  2703  		if len(r.qsubs) > 0 && c.srv.gateway.enabled &&
  2704  			atomic.LoadInt64(&c.srv.gateway.totalQSubs) > 0 {
  2705  			flag |= pmrCollectQueueNames
  2706  		}
  2707  		// If this is a mapped subject that means the mapped interest
  2708  		// is what got us here, but this might not have a queue designation
  2709  		// If that is the case, make sure we ignore to process local queue subscribers.
  2710  		if len(c.pa.mapped) > 0 && len(c.pa.queues) == 0 {
  2711  			flag |= pmrIgnoreEmptyQueueFilter
  2712  		}
  2713  		_, qnames = c.processMsgResults(acc, r, msg, nil, c.pa.subject, c.pa.reply, flag)
  2714  	}
  2715  
  2716  	// Now deal with gateways
  2717  	if c.srv.gateway.enabled {
  2718  		c.sendMsgToGateways(acc, msg, c.pa.subject, c.pa.reply, qnames)
  2719  	}
  2720  }
  2721  
  2722  // Handles a subscription permission violation.
  2723  // See leafPermViolation() for details.
  2724  func (c *client) leafSubPermViolation(subj []byte) {
  2725  	c.leafPermViolation(false, subj)
  2726  }
  2727  
  2728  // Common function to process publish or subscribe leafnode permission violation.
  2729  // Sends the permission violation error to the remote, logs it and closes the connection.
  2730  // If this is from a server soliciting, the reconnection will be delayed.
  2731  func (c *client) leafPermViolation(pub bool, subj []byte) {
  2732  	if c.isSpokeLeafNode() {
  2733  		// For spokes these are no-ops since the hub server told us our permissions.
  2734  		// We just need to not send these over to the other side since we will get cutoff.
  2735  		return
  2736  	}
  2737  	// FIXME(dlc) ?
  2738  	c.setLeafConnectDelayIfSoliciting(leafNodeReconnectAfterPermViolation)
  2739  	var action string
  2740  	if pub {
  2741  		c.sendErr(fmt.Sprintf("Permissions Violation for Publish to %q", subj))
  2742  		action = "Publish"
  2743  	} else {
  2744  		c.sendErr(fmt.Sprintf("Permissions Violation for Subscription to %q", subj))
  2745  		action = "Subscription"
  2746  	}
  2747  	c.Errorf("%s Violation on %q - Check other side configuration", action, subj)
  2748  	// TODO: add a new close reason that is more appropriate?
  2749  	c.closeConnection(ProtocolViolation)
  2750  }
  2751  
  2752  // Invoked from generic processErr() for LEAF connections.
  2753  func (c *client) leafProcessErr(errStr string) {
  2754  	// Check if we got a cluster name collision.
  2755  	if strings.Contains(errStr, ErrLeafNodeHasSameClusterName.Error()) {
  2756  		_, delay := c.setLeafConnectDelayIfSoliciting(leafNodeReconnectDelayAfterClusterNameSame)
  2757  		c.Errorf("Leafnode connection dropped with same cluster name error. Delaying attempt to reconnect for %v", delay)
  2758  		return
  2759  	}
  2760  
  2761  	// We will look for Loop detected error coming from the other side.
  2762  	// If we solicit, set the connect delay.
  2763  	if !strings.Contains(errStr, "Loop detected") {
  2764  		return
  2765  	}
  2766  	c.handleLeafNodeLoop(false)
  2767  }
  2768  
  2769  // If this leaf connection solicits, sets the connect delay to the given value,
  2770  // or the one from the server option's LeafNode.connDelay if one is set (for tests).
  2771  // Returns the connection's account name and delay.
  2772  func (c *client) setLeafConnectDelayIfSoliciting(delay time.Duration) (string, time.Duration) {
  2773  	c.mu.Lock()
  2774  	if c.isSolicitedLeafNode() {
  2775  		if s := c.srv; s != nil {
  2776  			if srvdelay := s.getOpts().LeafNode.connDelay; srvdelay != 0 {
  2777  				delay = srvdelay
  2778  			}
  2779  		}
  2780  		c.leaf.remote.setConnectDelay(delay)
  2781  	}
  2782  	accName := c.acc.Name
  2783  	c.mu.Unlock()
  2784  	return accName, delay
  2785  }
  2786  
  2787  // For the given remote Leafnode configuration, this function returns
  2788  // if TLS is required, and if so, will return a clone of the TLS Config
  2789  // (since some fields will be changed during handshake), the TLS server
  2790  // name that is remembered, and the TLS timeout.
  2791  func (c *client) leafNodeGetTLSConfigForSolicit(remote *leafNodeCfg) (bool, *tls.Config, string, float64) {
  2792  	var (
  2793  		tlsConfig  *tls.Config
  2794  		tlsName    string
  2795  		tlsTimeout float64
  2796  	)
  2797  
  2798  	remote.RLock()
  2799  	defer remote.RUnlock()
  2800  
  2801  	tlsRequired := remote.TLS || remote.TLSConfig != nil
  2802  	if tlsRequired {
  2803  		if remote.TLSConfig != nil {
  2804  			tlsConfig = remote.TLSConfig.Clone()
  2805  		} else {
  2806  			tlsConfig = &tls.Config{MinVersion: tls.VersionTLS12}
  2807  		}
  2808  		tlsName = remote.tlsName
  2809  		tlsTimeout = remote.TLSTimeout
  2810  		if tlsTimeout == 0 {
  2811  			tlsTimeout = float64(TLS_TIMEOUT / time.Second)
  2812  		}
  2813  	}
  2814  
  2815  	return tlsRequired, tlsConfig, tlsName, tlsTimeout
  2816  }
  2817  
  2818  // Initiates the LeafNode Websocket connection by:
  2819  // - doing the TLS handshake if needed
  2820  // - sending the HTTP request
  2821  // - waiting for the HTTP response
  2822  //
  2823  // Since some bufio reader is used to consume the HTTP response, this function
  2824  // returns the slice of buffered bytes (if any) so that the readLoop that will
  2825  // be started after that consume those first before reading from the socket.
  2826  // The boolean
  2827  //
  2828  // Lock held on entry.
  2829  func (c *client) leafNodeSolicitWSConnection(opts *Options, rURL *url.URL, remote *leafNodeCfg) ([]byte, ClosedState, error) {
  2830  	remote.RLock()
  2831  	compress := remote.Websocket.Compression
  2832  	// By default the server will mask outbound frames, but it can be disabled with this option.
  2833  	noMasking := remote.Websocket.NoMasking
  2834  	remote.RUnlock()
  2835  	// Will do the client-side TLS handshake if needed.
  2836  	tlsRequired, err := c.leafClientHandshakeIfNeeded(remote, opts)
  2837  	if err != nil {
  2838  		// 0 will indicate that the connection was already closed
  2839  		return nil, 0, err
  2840  	}
  2841  
  2842  	// For http request, we need the passed URL to contain either http or https scheme.
  2843  	scheme := "http"
  2844  	if tlsRequired {
  2845  		scheme = "https"
  2846  	}
  2847  	// We will use the `/leafnode` path to tell the accepting WS server that it should
  2848  	// create a LEAF connection, not a CLIENT.
  2849  	// In case we use the user's URL path in the future, make sure we append the user's
  2850  	// path to our `/leafnode` path.
  2851  	lpath := leafNodeWSPath
  2852  	if curPath := rURL.EscapedPath(); curPath != _EMPTY_ {
  2853  		if curPath[0] == '/' {
  2854  			curPath = curPath[1:]
  2855  		}
  2856  		lpath = path.Join(curPath, lpath)
  2857  	} else {
  2858  		lpath = lpath[1:]
  2859  	}
  2860  	ustr := fmt.Sprintf("%s://%s/%s", scheme, rURL.Host, lpath)
  2861  	u, _ := url.Parse(ustr)
  2862  	req := &http.Request{
  2863  		Method:     "GET",
  2864  		URL:        u,
  2865  		Proto:      "HTTP/1.1",
  2866  		ProtoMajor: 1,
  2867  		ProtoMinor: 1,
  2868  		Header:     make(http.Header),
  2869  		Host:       u.Host,
  2870  	}
  2871  	wsKey, err := wsMakeChallengeKey()
  2872  	if err != nil {
  2873  		return nil, WriteError, err
  2874  	}
  2875  
  2876  	req.Header["Upgrade"] = []string{"websocket"}
  2877  	req.Header["Connection"] = []string{"Upgrade"}
  2878  	req.Header["Sec-WebSocket-Key"] = []string{wsKey}
  2879  	req.Header["Sec-WebSocket-Version"] = []string{"13"}
  2880  	if compress {
  2881  		req.Header.Add("Sec-WebSocket-Extensions", wsPMCReqHeaderValue)
  2882  	}
  2883  	if noMasking {
  2884  		req.Header.Add(wsNoMaskingHeader, wsNoMaskingValue)
  2885  	}
  2886  	if err := req.Write(c.nc); err != nil {
  2887  		return nil, WriteError, err
  2888  	}
  2889  
  2890  	var resp *http.Response
  2891  
  2892  	br := bufio.NewReaderSize(c.nc, MAX_CONTROL_LINE_SIZE)
  2893  	c.nc.SetReadDeadline(time.Now().Add(DEFAULT_LEAFNODE_INFO_WAIT))
  2894  	resp, err = http.ReadResponse(br, req)
  2895  	if err == nil &&
  2896  		(resp.StatusCode != 101 ||
  2897  			!strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
  2898  			!strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
  2899  			resp.Header.Get("Sec-Websocket-Accept") != wsAcceptKey(wsKey)) {
  2900  
  2901  		err = fmt.Errorf("invalid websocket connection")
  2902  	}
  2903  	// Check compression extension...
  2904  	if err == nil && c.ws.compress {
  2905  		// Check that not only permessage-deflate extension is present, but that
  2906  		// we also have server and client no context take over.
  2907  		srvCompress, noCtxTakeover := wsPMCExtensionSupport(resp.Header, false)
  2908  
  2909  		// If server does not support compression, then simply disable it in our side.
  2910  		if !srvCompress {
  2911  			c.ws.compress = false
  2912  		} else if !noCtxTakeover {
  2913  			err = fmt.Errorf("compression negotiation error")
  2914  		}
  2915  	}
  2916  	// Same for no masking...
  2917  	if err == nil && noMasking {
  2918  		// Check if server accepts no masking
  2919  		if resp.Header.Get(wsNoMaskingHeader) != wsNoMaskingValue {
  2920  			// Nope, need to mask our writes as any client would do.
  2921  			c.ws.maskwrite = true
  2922  		}
  2923  	}
  2924  	if resp != nil {
  2925  		resp.Body.Close()
  2926  	}
  2927  	if err != nil {
  2928  		return nil, ReadError, err
  2929  	}
  2930  	c.Debugf("Leafnode compression=%v masking=%v", c.ws.compress, c.ws.maskwrite)
  2931  
  2932  	var preBuf []byte
  2933  	// We have to slurp whatever is in the bufio reader and pass that to the readloop.
  2934  	if n := br.Buffered(); n != 0 {
  2935  		preBuf, _ = br.Peek(n)
  2936  	}
  2937  	return preBuf, 0, nil
  2938  }
  2939  
  2940  const connectProcessTimeout = 2 * time.Second
  2941  
  2942  // This is invoked for remote LEAF remote connections after processing the INFO
  2943  // protocol.
  2944  func (s *Server) leafNodeResumeConnectProcess(c *client) {
  2945  	clusterName := s.ClusterName()
  2946  
  2947  	c.mu.Lock()
  2948  	if c.isClosed() {
  2949  		c.mu.Unlock()
  2950  		return
  2951  	}
  2952  	if err := c.sendLeafConnect(clusterName, c.headers); err != nil {
  2953  		c.mu.Unlock()
  2954  		c.closeConnection(WriteError)
  2955  		return
  2956  	}
  2957  
  2958  	// Spin up the write loop.
  2959  	s.startGoRoutine(func() { c.writeLoop() })
  2960  
  2961  	// timeout leafNodeFinishConnectProcess
  2962  	c.ping.tmr = time.AfterFunc(connectProcessTimeout, func() {
  2963  		c.mu.Lock()
  2964  		// check if leafNodeFinishConnectProcess was called and prevent later leafNodeFinishConnectProcess
  2965  		if !c.flags.setIfNotSet(connectProcessFinished) {
  2966  			c.mu.Unlock()
  2967  			return
  2968  		}
  2969  		clearTimer(&c.ping.tmr)
  2970  		closed := c.isClosed()
  2971  		c.mu.Unlock()
  2972  		if !closed {
  2973  			c.sendErrAndDebug("Stale Leaf Node Connection - Closing")
  2974  			c.closeConnection(StaleConnection)
  2975  		}
  2976  	})
  2977  	c.mu.Unlock()
  2978  	c.Debugf("Remote leafnode connect msg sent")
  2979  }
  2980  
  2981  // This is invoked for remote LEAF connections after processing the INFO
  2982  // protocol and leafNodeResumeConnectProcess.
  2983  // This will send LS+ the CONNECT protocol and register the leaf node.
  2984  func (s *Server) leafNodeFinishConnectProcess(c *client) {
  2985  	c.mu.Lock()
  2986  	if !c.flags.setIfNotSet(connectProcessFinished) {
  2987  		c.mu.Unlock()
  2988  		return
  2989  	}
  2990  	if c.isClosed() {
  2991  		c.mu.Unlock()
  2992  		s.removeLeafNodeConnection(c)
  2993  		return
  2994  	}
  2995  	remote := c.leaf.remote
  2996  	// Check if we will need to send the system connect event.
  2997  	remote.RLock()
  2998  	sendSysConnectEvent := remote.Hub
  2999  	remote.RUnlock()
  3000  
  3001  	// Capture account before releasing lock
  3002  	acc := c.acc
  3003  	// cancel connectProcessTimeout
  3004  	clearTimer(&c.ping.tmr)
  3005  	c.mu.Unlock()
  3006  
  3007  	// Make sure we register with the account here.
  3008  	if err := c.registerWithAccount(acc); err != nil {
  3009  		if err == ErrTooManyAccountConnections {
  3010  			c.maxAccountConnExceeded()
  3011  			return
  3012  		} else if err == ErrLeafNodeLoop {
  3013  			c.handleLeafNodeLoop(true)
  3014  			return
  3015  		}
  3016  		c.Errorf("Registering leaf with account %s resulted in error: %v", acc.Name, err)
  3017  		c.closeConnection(ProtocolViolation)
  3018  		return
  3019  	}
  3020  	s.addLeafNodeConnection(c, _EMPTY_, _EMPTY_, false)
  3021  	s.initLeafNodeSmapAndSendSubs(c)
  3022  	if sendSysConnectEvent {
  3023  		s.sendLeafNodeConnect(acc)
  3024  	}
  3025  
  3026  	// The above functions are not atomically under the client
  3027  	// lock doing those operations. It is possible - since we
  3028  	// have started the read/write loops - that the connection
  3029  	// is closed before or in between. This would leave the
  3030  	// closed LN connection possible registered with the account
  3031  	// and/or the server's leafs map. So check if connection
  3032  	// is closed, and if so, manually cleanup.
  3033  	c.mu.Lock()
  3034  	closed := c.isClosed()
  3035  	if !closed {
  3036  		c.setFirstPingTimer()
  3037  	}
  3038  	c.mu.Unlock()
  3039  	if closed {
  3040  		s.removeLeafNodeConnection(c)
  3041  		if prev := acc.removeClient(c); prev == 1 {
  3042  			s.decActiveAccounts()
  3043  		}
  3044  	}
  3045  }