github.com/nats-io/nats-server/v2@v2.11.0-preview.2/server/jetstream.go (about)

     1  // Copyright 2019-2024 The NATS Authors
     2  // Licensed under the Apache License, Version 2.0 (the "License");
     3  // you may not use this file except in compliance with the License.
     4  // You may obtain a copy of the License at
     5  //
     6  // http://www.apache.org/licenses/LICENSE-2.0
     7  //
     8  // Unless required by applicable law or agreed to in writing, software
     9  // distributed under the License is distributed on an "AS IS" BASIS,
    10  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package server
    15  
    16  import (
    17  	"crypto/hmac"
    18  	"crypto/sha256"
    19  	"encoding/binary"
    20  	"encoding/hex"
    21  	"encoding/json"
    22  	"fmt"
    23  	"math"
    24  	"os"
    25  	"path/filepath"
    26  	"runtime/debug"
    27  	"strconv"
    28  	"strings"
    29  	"sync"
    30  	"sync/atomic"
    31  	"time"
    32  
    33  	"github.com/minio/highwayhash"
    34  	"github.com/nats-io/nats-server/v2/server/sysmem"
    35  	"github.com/nats-io/nats-server/v2/server/tpm"
    36  	"github.com/nats-io/nkeys"
    37  	"github.com/nats-io/nuid"
    38  )
    39  
    40  // JetStreamConfig determines this server's configuration.
    41  // MaxMemory and MaxStore are in bytes.
    42  type JetStreamConfig struct {
    43  	MaxMemory    int64         `json:"max_memory"`
    44  	MaxStore     int64         `json:"max_storage"`
    45  	StoreDir     string        `json:"store_dir,omitempty"`
    46  	SyncInterval time.Duration `json:"sync_interval,omitempty"`
    47  	SyncAlways   bool          `json:"sync_always,omitempty"`
    48  	Domain       string        `json:"domain,omitempty"`
    49  	CompressOK   bool          `json:"compress_ok,omitempty"`
    50  	UniqueTag    string        `json:"unique_tag,omitempty"`
    51  }
    52  
    53  // Statistics about JetStream for this server.
    54  type JetStreamStats struct {
    55  	Memory         uint64            `json:"memory"`
    56  	Store          uint64            `json:"storage"`
    57  	ReservedMemory uint64            `json:"reserved_memory"`
    58  	ReservedStore  uint64            `json:"reserved_storage"`
    59  	Accounts       int               `json:"accounts"`
    60  	HAAssets       int               `json:"ha_assets"`
    61  	API            JetStreamAPIStats `json:"api"`
    62  }
    63  
    64  type JetStreamAccountLimits struct {
    65  	MaxMemory            int64 `json:"max_memory"`
    66  	MaxStore             int64 `json:"max_storage"`
    67  	MaxStreams           int   `json:"max_streams"`
    68  	MaxConsumers         int   `json:"max_consumers"`
    69  	MaxAckPending        int   `json:"max_ack_pending"`
    70  	MemoryMaxStreamBytes int64 `json:"memory_max_stream_bytes"`
    71  	StoreMaxStreamBytes  int64 `json:"storage_max_stream_bytes"`
    72  	MaxBytesRequired     bool  `json:"max_bytes_required"`
    73  }
    74  
    75  type JetStreamTier struct {
    76  	Memory         uint64                 `json:"memory"`
    77  	Store          uint64                 `json:"storage"`
    78  	ReservedMemory uint64                 `json:"reserved_memory"`
    79  	ReservedStore  uint64                 `json:"reserved_storage"`
    80  	Streams        int                    `json:"streams"`
    81  	Consumers      int                    `json:"consumers"`
    82  	Limits         JetStreamAccountLimits `json:"limits"`
    83  }
    84  
    85  // JetStreamAccountStats returns current statistics about the account's JetStream usage.
    86  type JetStreamAccountStats struct {
    87  	JetStreamTier                          // in case tiers are used, reflects totals with limits not set
    88  	Domain        string                   `json:"domain,omitempty"`
    89  	API           JetStreamAPIStats        `json:"api"`
    90  	Tiers         map[string]JetStreamTier `json:"tiers,omitempty"` // indexed by tier name
    91  }
    92  
    93  type JetStreamAPIStats struct {
    94  	Total    uint64 `json:"total"`
    95  	Errors   uint64 `json:"errors"`
    96  	Inflight uint64 `json:"inflight,omitempty"`
    97  }
    98  
    99  // This is for internal accounting for JetStream for this server.
   100  type jetStream struct {
   101  	// These are here first because of atomics on 32bit systems.
   102  	apiInflight   int64
   103  	apiTotal      int64
   104  	apiErrors     int64
   105  	memReserved   int64
   106  	storeReserved int64
   107  	memUsed       int64
   108  	storeUsed     int64
   109  	clustered     int32
   110  	mu            sync.RWMutex
   111  	srv           *Server
   112  	config        JetStreamConfig
   113  	cluster       *jetStreamCluster
   114  	accounts      map[string]*jsAccount
   115  	apiSubs       *Sublist
   116  	started       time.Time
   117  
   118  	// System level request to purge a stream move
   119  	accountPurge *subscription
   120  
   121  	// Some bools regarding general state.
   122  	metaRecovering bool
   123  	standAlone     bool
   124  	oos            bool
   125  	shuttingDown   bool
   126  
   127  	// Atomic versions
   128  	disabled atomic.Bool
   129  }
   130  
   131  type remoteUsage struct {
   132  	tiers map[string]*jsaUsage // indexed by tier name
   133  	api   uint64
   134  	err   uint64
   135  }
   136  
   137  type jsaStorage struct {
   138  	total jsaUsage
   139  	local jsaUsage
   140  }
   141  
   142  // This represents a jetstream enabled account.
   143  // Worth noting that we include the jetstream pointer, this is because
   144  // in general we want to be very efficient when receiving messages on
   145  // an internal sub for a stream, so we will direct link to the stream
   146  // and walk backwards as needed vs multiple hash lookups and locks, etc.
   147  type jsAccount struct {
   148  	mu        sync.RWMutex
   149  	js        *jetStream
   150  	account   *Account
   151  	storeDir  string
   152  	inflight  sync.Map
   153  	streams   map[string]*stream
   154  	templates map[string]*streamTemplate
   155  	store     TemplateStore
   156  
   157  	// From server
   158  	sendq *ipQueue[*pubMsg]
   159  
   160  	// For limiting only running one checkAndSync at a time.
   161  	sync atomic.Bool
   162  
   163  	// Usage/limits related fields that will be protected by usageMu
   164  	usageMu    sync.RWMutex
   165  	limits     map[string]JetStreamAccountLimits // indexed by tierName
   166  	usage      map[string]*jsaStorage            // indexed by tierName
   167  	rusage     map[string]*remoteUsage           // indexed by node id
   168  	apiTotal   uint64
   169  	apiErrors  uint64
   170  	usageApi   uint64
   171  	usageErr   uint64
   172  	updatesPub string
   173  	updatesSub *subscription
   174  	lupdate    time.Time
   175  	utimer     *time.Timer
   176  }
   177  
   178  // Track general usage for this account.
   179  type jsaUsage struct {
   180  	mem   int64
   181  	store int64
   182  }
   183  
   184  // EnableJetStream will enable JetStream support on this server with the given configuration.
   185  // A nil configuration will dynamically choose the limits and temporary file storage directory.
   186  func (s *Server) EnableJetStream(config *JetStreamConfig) error {
   187  	if s.JetStreamEnabled() {
   188  		return fmt.Errorf("jetstream already enabled")
   189  	}
   190  
   191  	s.Noticef("Starting JetStream")
   192  	if config == nil || config.MaxMemory <= 0 || config.MaxStore <= 0 {
   193  		var storeDir, domain, uniqueTag string
   194  		var maxStore, maxMem int64
   195  		if config != nil {
   196  			storeDir, domain, uniqueTag = config.StoreDir, config.Domain, config.UniqueTag
   197  			maxStore, maxMem = config.MaxStore, config.MaxMemory
   198  		}
   199  		config = s.dynJetStreamConfig(storeDir, maxStore, maxMem)
   200  		if maxMem > 0 {
   201  			config.MaxMemory = maxMem
   202  		}
   203  		if domain != _EMPTY_ {
   204  			config.Domain = domain
   205  		}
   206  		if uniqueTag != _EMPTY_ {
   207  			config.UniqueTag = uniqueTag
   208  		}
   209  		s.Debugf("JetStream creating dynamic configuration - %s memory, %s disk", friendlyBytes(config.MaxMemory), friendlyBytes(config.MaxStore))
   210  	} else if config.StoreDir != _EMPTY_ {
   211  		config.StoreDir = filepath.Join(config.StoreDir, JetStreamStoreDir)
   212  	}
   213  
   214  	cfg := *config
   215  	if cfg.StoreDir == _EMPTY_ {
   216  		cfg.StoreDir = filepath.Join(os.TempDir(), JetStreamStoreDir)
   217  	}
   218  
   219  	// We will consistently place the 'jetstream' directory under the storedir that was handed to us. Prior to 2.2.3 though
   220  	// we could have a directory on disk without the 'jetstream' directory. This will check and fix if needed.
   221  	if err := s.checkStoreDir(&cfg); err != nil {
   222  		return err
   223  	}
   224  
   225  	return s.enableJetStream(cfg)
   226  }
   227  
   228  // Function signature to generate a key encryption key.
   229  type keyGen func(context []byte) ([]byte, error)
   230  
   231  // Return a key generation function or nil if encryption not enabled.
   232  // keyGen defined in filestore.go - keyGen func(iv, context []byte) []byte
   233  func (s *Server) jsKeyGen(jsKey, info string) keyGen {
   234  	if ek := jsKey; ek != _EMPTY_ {
   235  		return func(context []byte) ([]byte, error) {
   236  			h := hmac.New(sha256.New, []byte(ek))
   237  			if _, err := h.Write([]byte(info)); err != nil {
   238  				return nil, err
   239  			}
   240  			if _, err := h.Write(context); err != nil {
   241  				return nil, err
   242  			}
   243  			return h.Sum(nil), nil
   244  		}
   245  	}
   246  	return nil
   247  }
   248  
   249  // Decode the encrypted metafile.
   250  func (s *Server) decryptMeta(sc StoreCipher, ekey, buf []byte, acc, context string) ([]byte, bool, error) {
   251  	if len(ekey) < minMetaKeySize {
   252  		return nil, false, errBadKeySize
   253  	}
   254  	var osc StoreCipher
   255  	switch sc {
   256  	case AES:
   257  		osc = ChaCha
   258  	case ChaCha:
   259  		osc = AES
   260  	}
   261  	type prfWithCipher struct {
   262  		keyGen
   263  		StoreCipher
   264  	}
   265  	var prfs []prfWithCipher
   266  	if prf := s.jsKeyGen(s.getOpts().JetStreamKey, acc); prf == nil {
   267  		return nil, false, errNoEncryption
   268  	} else {
   269  		// First of all, try our current encryption keys with both
   270  		// store cipher algorithms.
   271  		prfs = append(prfs, prfWithCipher{prf, sc})
   272  		prfs = append(prfs, prfWithCipher{prf, osc})
   273  	}
   274  	if prf := s.jsKeyGen(s.getOpts().JetStreamOldKey, acc); prf != nil {
   275  		// Then, if we have an old encryption key, try with also with
   276  		// both store cipher algorithms.
   277  		prfs = append(prfs, prfWithCipher{prf, sc})
   278  		prfs = append(prfs, prfWithCipher{prf, osc})
   279  	}
   280  
   281  	for i, prf := range prfs {
   282  		rb, err := prf.keyGen([]byte(context))
   283  		if err != nil {
   284  			continue
   285  		}
   286  		kek, err := genEncryptionKey(prf.StoreCipher, rb)
   287  		if err != nil {
   288  			continue
   289  		}
   290  		ns := kek.NonceSize()
   291  		seed, err := kek.Open(nil, ekey[:ns], ekey[ns:], nil)
   292  		if err != nil {
   293  			continue
   294  		}
   295  		aek, err := genEncryptionKey(prf.StoreCipher, seed)
   296  		if err != nil {
   297  			continue
   298  		}
   299  		if aek.NonceSize() != kek.NonceSize() {
   300  			continue
   301  		}
   302  		plain, err := aek.Open(nil, buf[:ns], buf[ns:], nil)
   303  		if err != nil {
   304  			continue
   305  		}
   306  		return plain, i > 0, nil
   307  	}
   308  	return nil, false, fmt.Errorf("unable to recover keys")
   309  }
   310  
   311  // Check to make sure directory has the jetstream directory.
   312  // We will have it properly configured here now regardless, so need to look inside.
   313  func (s *Server) checkStoreDir(cfg *JetStreamConfig) error {
   314  	fis, _ := os.ReadDir(cfg.StoreDir)
   315  	// If we have nothing underneath us, could be just starting new, but if we see this we can check.
   316  	if len(fis) != 0 {
   317  		return nil
   318  	}
   319  	// Let's check the directory above. If it has us 'jetstream' but also other stuff that we can
   320  	// identify as accounts then we can fix.
   321  	fis, _ = os.ReadDir(filepath.Dir(cfg.StoreDir))
   322  	// If just one that is us 'jetstream' and all is ok.
   323  	if len(fis) == 1 {
   324  		return nil
   325  	}
   326  
   327  	haveJetstreamDir := false
   328  	for _, fi := range fis {
   329  		if fi.Name() == JetStreamStoreDir {
   330  			haveJetstreamDir = true
   331  			break
   332  		}
   333  	}
   334  
   335  	for _, fi := range fis {
   336  		// Skip the 'jetstream' directory.
   337  		if fi.Name() == JetStreamStoreDir {
   338  			continue
   339  		}
   340  		// Let's see if this is an account.
   341  		if accName := fi.Name(); accName != _EMPTY_ {
   342  			_, ok := s.accounts.Load(accName)
   343  			if !ok && s.AccountResolver() != nil && nkeys.IsValidPublicAccountKey(accName) {
   344  				// Account is not local but matches the NKEY account public key,
   345  				// this is enough indication to move this directory, no need to
   346  				// fetch the account.
   347  				ok = true
   348  			}
   349  			// If this seems to be an account go ahead and move the directory. This will include all assets
   350  			// like streams and consumers.
   351  			if ok {
   352  				if !haveJetstreamDir {
   353  					err := os.Mkdir(filepath.Join(filepath.Dir(cfg.StoreDir), JetStreamStoreDir), defaultDirPerms)
   354  					if err != nil {
   355  						return err
   356  					}
   357  					haveJetstreamDir = true
   358  				}
   359  				old := filepath.Join(filepath.Dir(cfg.StoreDir), fi.Name())
   360  				new := filepath.Join(cfg.StoreDir, fi.Name())
   361  				s.Noticef("JetStream relocated account %q to %q", old, new)
   362  				if err := os.Rename(old, new); err != nil {
   363  					return err
   364  				}
   365  			}
   366  		}
   367  	}
   368  
   369  	return nil
   370  }
   371  
   372  // This function sets/updates the jetstream encryption key and cipher based
   373  // on options. If the TPM options have been specified, a key is generated
   374  // and sealed by the TPM.
   375  func (s *Server) initJetStreamEncryption() (err error) {
   376  	opts := s.getOpts()
   377  
   378  	// The TPM settings and other encryption settings are mutually exclusive.
   379  	if opts.JetStreamKey != _EMPTY_ && opts.JetStreamTpm.KeysFile != _EMPTY_ {
   380  		return fmt.Errorf("JetStream encryption key may not be used with TPM options")
   381  	}
   382  	// if we are using the standard method to set the encryption key just return and carry on.
   383  	if opts.JetStreamKey != _EMPTY_ {
   384  		return nil
   385  	}
   386  	// if the tpm options are not used then no encryption has been configured and return.
   387  	if opts.JetStreamTpm.KeysFile == _EMPTY_ {
   388  		return nil
   389  	}
   390  
   391  	if opts.JetStreamTpm.Pcr == 0 {
   392  		// Default PCR to use in the TPM. Values can be 0-23, and most platforms
   393  		// reserve values 0-12 for the OS, boot locker, disc encryption, etc.
   394  		// 16 used for debugging. In sticking to NATS tradition, we'll use 22
   395  		// as the default with the option being configurable.
   396  		opts.JetStreamTpm.Pcr = 22
   397  	}
   398  
   399  	// Using the TPM to generate or get the encryption key and update the encryption options.
   400  	opts.JetStreamKey, err = tpm.LoadJetStreamEncryptionKeyFromTPM(opts.JetStreamTpm.SrkPassword,
   401  		opts.JetStreamTpm.KeysFile, opts.JetStreamTpm.KeyPassword, opts.JetStreamTpm.Pcr)
   402  
   403  	return err
   404  }
   405  
   406  // enableJetStream will start up the JetStream subsystem.
   407  func (s *Server) enableJetStream(cfg JetStreamConfig) error {
   408  	js := &jetStream{srv: s, config: cfg, accounts: make(map[string]*jsAccount), apiSubs: NewSublistNoCache()}
   409  	s.gcbMu.Lock()
   410  	if s.gcbOutMax = s.getOpts().JetStreamMaxCatchup; s.gcbOutMax == 0 {
   411  		s.gcbOutMax = defaultMaxTotalCatchupOutBytes
   412  	}
   413  	s.gcbMu.Unlock()
   414  
   415  	s.js.Store(js)
   416  
   417  	// FIXME(dlc) - Allow memory only operation?
   418  	if stat, err := os.Stat(cfg.StoreDir); os.IsNotExist(err) {
   419  		if err := os.MkdirAll(cfg.StoreDir, defaultDirPerms); err != nil {
   420  			return fmt.Errorf("could not create storage directory - %v", err)
   421  		}
   422  	} else {
   423  		// Make sure its a directory and that we can write to it.
   424  		if stat == nil || !stat.IsDir() {
   425  			return fmt.Errorf("storage directory is not a directory")
   426  		}
   427  		tmpfile, err := os.CreateTemp(cfg.StoreDir, "_test_")
   428  		if err != nil {
   429  			return fmt.Errorf("storage directory is not writable")
   430  		}
   431  		tmpfile.Close()
   432  		os.Remove(tmpfile.Name())
   433  	}
   434  
   435  	if err := s.initJetStreamEncryption(); err != nil {
   436  		return err
   437  	}
   438  
   439  	// JetStream is an internal service so we need to make sure we have a system account.
   440  	// This system account will export the JetStream service endpoints.
   441  	if s.SystemAccount() == nil {
   442  		s.SetDefaultSystemAccount()
   443  	}
   444  
   445  	opts := s.getOpts()
   446  	if !opts.DisableJetStreamBanner {
   447  		s.Noticef("    _ ___ _____ ___ _____ ___ ___   _   __  __")
   448  		s.Noticef(" _ | | __|_   _/ __|_   _| _ \\ __| /_\\ |  \\/  |")
   449  		s.Noticef("| || | _|  | | \\__ \\ | | |   / _| / _ \\| |\\/| |")
   450  		s.Noticef(" \\__/|___| |_| |___/ |_| |_|_\\___/_/ \\_\\_|  |_|")
   451  		s.Noticef("")
   452  		s.Noticef("         https://docs.nats.io/jetstream")
   453  		s.Noticef("")
   454  	}
   455  	s.Noticef("---------------- JETSTREAM ----------------")
   456  	s.Noticef("  Max Memory:      %s", friendlyBytes(cfg.MaxMemory))
   457  	s.Noticef("  Max Storage:     %s", friendlyBytes(cfg.MaxStore))
   458  	s.Noticef("  Store Directory: \"%s\"", cfg.StoreDir)
   459  	if cfg.Domain != _EMPTY_ {
   460  		s.Noticef("  Domain:          %s", cfg.Domain)
   461  	}
   462  
   463  	if ek := opts.JetStreamKey; ek != _EMPTY_ {
   464  		s.Noticef("  Encryption:      %s", opts.JetStreamCipher)
   465  	}
   466  	if opts.JetStreamTpm.KeysFile != _EMPTY_ {
   467  		s.Noticef("  TPM File:        %q, Pcr: %d", opts.JetStreamTpm.KeysFile,
   468  			opts.JetStreamTpm.Pcr)
   469  	}
   470  	s.Noticef("-------------------------------------------")
   471  
   472  	// Setup our internal subscriptions.
   473  	if err := s.setJetStreamExportSubs(); err != nil {
   474  		return fmt.Errorf("setting up internal jetstream subscriptions failed: %v", err)
   475  	}
   476  
   477  	// Setup our internal system exports.
   478  	s.Debugf("  Exports:")
   479  	s.Debugf("     %s", jsAllAPI)
   480  	s.setupJetStreamExports()
   481  
   482  	standAlone, canExtend := s.standAloneMode(), s.canExtendOtherDomain()
   483  	if standAlone && canExtend && s.getOpts().JetStreamExtHint != jsWillExtend {
   484  		canExtend = false
   485  		s.Noticef("Standalone server started in clustered mode do not support extending domains")
   486  		s.Noticef(`Manually disable standalone mode by setting the JetStream Option "extension_hint: %s"`, jsWillExtend)
   487  	}
   488  
   489  	// Indicate if we will be standalone for checking resource reservations, etc.
   490  	js.setJetStreamStandAlone(standAlone && !canExtend)
   491  
   492  	// Enable accounts and restore state before starting clustering.
   493  	if err := s.enableJetStreamAccounts(); err != nil {
   494  		return err
   495  	}
   496  
   497  	// If we are in clustered mode go ahead and start the meta controller.
   498  	if !standAlone || canExtend {
   499  		if err := s.enableJetStreamClustering(); err != nil {
   500  			return err
   501  		}
   502  	}
   503  
   504  	// Mark when we are up and running.
   505  	js.setStarted()
   506  
   507  	return nil
   508  }
   509  
   510  const jsNoExtend = "no_extend"
   511  const jsWillExtend = "will_extend"
   512  
   513  // This will check if we have a solicited leafnode that shares the system account
   514  // and extension is not manually disabled
   515  func (s *Server) canExtendOtherDomain() bool {
   516  	opts := s.getOpts()
   517  	sysAcc := s.SystemAccount().GetName()
   518  	for _, r := range opts.LeafNode.Remotes {
   519  		if r.LocalAccount == sysAcc {
   520  			for _, denySub := range r.DenyImports {
   521  				if subjectIsSubsetMatch(denySub, raftAllSubj) {
   522  					return false
   523  				}
   524  			}
   525  			return true
   526  		}
   527  	}
   528  	return false
   529  }
   530  
   531  func (s *Server) updateJetStreamInfoStatus(enabled bool) {
   532  	s.mu.Lock()
   533  	s.info.JetStream = enabled
   534  	s.mu.Unlock()
   535  }
   536  
   537  // restartJetStream will try to re-enable JetStream during a reload if it had been disabled during runtime.
   538  func (s *Server) restartJetStream() error {
   539  	opts := s.getOpts()
   540  	cfg := JetStreamConfig{
   541  		StoreDir:     opts.StoreDir,
   542  		SyncInterval: opts.SyncInterval,
   543  		SyncAlways:   opts.SyncAlways,
   544  		MaxMemory:    opts.JetStreamMaxMemory,
   545  		MaxStore:     opts.JetStreamMaxStore,
   546  		Domain:       opts.JetStreamDomain,
   547  	}
   548  	s.Noticef("Restarting JetStream")
   549  	err := s.EnableJetStream(&cfg)
   550  	if err != nil {
   551  		s.Warnf("Can't start JetStream: %v", err)
   552  		return s.DisableJetStream()
   553  	}
   554  	s.updateJetStreamInfoStatus(true)
   555  	return nil
   556  }
   557  
   558  // checkJetStreamExports will check if we have the JS exports setup
   559  // on the system account, and if not go ahead and set them up.
   560  func (s *Server) checkJetStreamExports() {
   561  	if sacc := s.SystemAccount(); sacc != nil {
   562  		sacc.mu.RLock()
   563  		se := sacc.getServiceExport(jsAllAPI)
   564  		sacc.mu.RUnlock()
   565  		if se == nil {
   566  			s.setupJetStreamExports()
   567  		}
   568  	}
   569  }
   570  
   571  func (s *Server) setupJetStreamExports() {
   572  	// Setup our internal system export.
   573  	if err := s.SystemAccount().AddServiceExport(jsAllAPI, nil); err != nil {
   574  		s.Warnf("Error setting up jetstream service exports: %v", err)
   575  	}
   576  }
   577  
   578  func (s *Server) jetStreamOOSPending() (wasPending bool) {
   579  	if js := s.getJetStream(); js != nil {
   580  		js.mu.Lock()
   581  		wasPending = js.oos
   582  		js.oos = true
   583  		js.mu.Unlock()
   584  	}
   585  	return wasPending
   586  }
   587  
   588  func (s *Server) setJetStreamDisabled() {
   589  	if js := s.getJetStream(); js != nil {
   590  		js.disabled.Store(true)
   591  	}
   592  }
   593  
   594  func (s *Server) handleOutOfSpace(mset *stream) {
   595  	if s.JetStreamEnabled() && !s.jetStreamOOSPending() {
   596  		var stream string
   597  		if mset != nil {
   598  			stream = mset.name()
   599  			s.Errorf("JetStream out of %s resources, will be DISABLED", mset.Store().Type())
   600  		} else {
   601  			s.Errorf("JetStream out of resources, will be DISABLED")
   602  		}
   603  
   604  		go s.DisableJetStream()
   605  
   606  		adv := &JSServerOutOfSpaceAdvisory{
   607  			TypedEvent: TypedEvent{
   608  				Type: JSServerOutOfStorageAdvisoryType,
   609  				ID:   nuid.Next(),
   610  				Time: time.Now().UTC(),
   611  			},
   612  			Server:   s.Name(),
   613  			ServerID: s.ID(),
   614  			Stream:   stream,
   615  			Cluster:  s.cachedClusterName(),
   616  			Domain:   s.getOpts().JetStreamDomain,
   617  		}
   618  		s.publishAdvisory(nil, JSAdvisoryServerOutOfStorage, adv)
   619  	}
   620  }
   621  
   622  // DisableJetStream will turn off JetStream and signals in clustered mode
   623  // to have the metacontroller remove us from the peer list.
   624  func (s *Server) DisableJetStream() error {
   625  	if !s.JetStreamEnabled() {
   626  		return nil
   627  	}
   628  
   629  	s.setJetStreamDisabled()
   630  
   631  	if s.JetStreamIsClustered() {
   632  		isLeader := s.JetStreamIsLeader()
   633  		js, cc := s.getJetStreamCluster()
   634  		if js == nil {
   635  			s.shutdownJetStream()
   636  			return nil
   637  		}
   638  		js.mu.RLock()
   639  		meta := cc.meta
   640  		js.mu.RUnlock()
   641  
   642  		if meta != nil {
   643  			if isLeader {
   644  				s.Warnf("JetStream initiating meta leader transfer")
   645  				meta.StepDown()
   646  				select {
   647  				case <-s.quitCh:
   648  					return nil
   649  				case <-time.After(2 * time.Second):
   650  				}
   651  				if !s.JetStreamIsCurrent() {
   652  					s.Warnf("JetStream timeout waiting for meta leader transfer")
   653  				}
   654  			}
   655  			meta.Delete()
   656  		}
   657  	}
   658  
   659  	// Update our info status.
   660  	s.updateJetStreamInfoStatus(false)
   661  
   662  	// Normal shutdown.
   663  	s.shutdownJetStream()
   664  
   665  	// Shut down the RAFT groups.
   666  	s.shutdownRaftNodes()
   667  
   668  	return nil
   669  }
   670  
   671  func (s *Server) enableJetStreamAccounts() error {
   672  	// If we have no configured accounts setup then setup imports on global account.
   673  	if s.globalAccountOnly() {
   674  		gacc := s.GlobalAccount()
   675  		gacc.mu.Lock()
   676  		if len(gacc.jsLimits) == 0 {
   677  			gacc.jsLimits = defaultJSAccountTiers
   678  		}
   679  		gacc.mu.Unlock()
   680  		if err := s.configJetStream(gacc); err != nil {
   681  			return err
   682  		}
   683  	} else if err := s.configAllJetStreamAccounts(); err != nil {
   684  		return fmt.Errorf("Error enabling jetstream on configured accounts: %v", err)
   685  	}
   686  	return nil
   687  }
   688  
   689  // enableAllJetStreamServiceImportsAndMappings turns on all service imports and mappings for jetstream for this account.
   690  func (a *Account) enableAllJetStreamServiceImportsAndMappings() error {
   691  	a.mu.RLock()
   692  	s := a.srv
   693  	a.mu.RUnlock()
   694  
   695  	if s == nil {
   696  		return fmt.Errorf("jetstream account not registered")
   697  	}
   698  
   699  	if !a.serviceImportExists(jsAllAPI) {
   700  		// Capture si so we can turn on implicit sharing with JetStream layer.
   701  		// Make sure to set "to" otherwise will incur performance slow down.
   702  		si, err := a.addServiceImport(s.SystemAccount(), jsAllAPI, jsAllAPI, nil)
   703  		if err != nil {
   704  			return fmt.Errorf("Error setting up jetstream service imports for account: %v", err)
   705  		}
   706  		a.mu.Lock()
   707  		si.share = true
   708  		a.mu.Unlock()
   709  	}
   710  
   711  	// Check if we have a Domain specified.
   712  	// If so add in a subject mapping that will allow local connected clients to reach us here as well.
   713  	if opts := s.getOpts(); opts.JetStreamDomain != _EMPTY_ {
   714  		mappings := generateJSMappingTable(opts.JetStreamDomain)
   715  		a.mu.RLock()
   716  		for _, m := range a.mappings {
   717  			delete(mappings, m.src)
   718  		}
   719  		a.mu.RUnlock()
   720  		for src, dest := range mappings {
   721  			if err := a.AddMapping(src, dest); err != nil {
   722  				s.Errorf("Error adding JetStream domain mapping: %v", err)
   723  			}
   724  		}
   725  	}
   726  
   727  	return nil
   728  }
   729  
   730  // enableJetStreamInfoServiceImportOnly will enable the single service import responder.
   731  // Should we do them all regardless?
   732  func (a *Account) enableJetStreamInfoServiceImportOnly() error {
   733  	// Check if this import would be overshadowed. This can happen when accounts
   734  	// are importing from another account for JS access.
   735  	if a.serviceImportShadowed(JSApiAccountInfo) {
   736  		return nil
   737  	}
   738  
   739  	return a.enableAllJetStreamServiceImportsAndMappings()
   740  }
   741  
   742  func (s *Server) configJetStream(acc *Account) error {
   743  	if acc == nil {
   744  		return nil
   745  	}
   746  	acc.mu.RLock()
   747  	jsLimits := acc.jsLimits
   748  	acc.mu.RUnlock()
   749  	if jsLimits != nil {
   750  		// Check if already enabled. This can be during a reload.
   751  		if acc.JetStreamEnabled() {
   752  			if err := acc.enableAllJetStreamServiceImportsAndMappings(); err != nil {
   753  				return err
   754  			}
   755  			if err := acc.UpdateJetStreamLimits(jsLimits); err != nil {
   756  				return err
   757  			}
   758  		} else {
   759  			if err := acc.EnableJetStream(jsLimits); err != nil {
   760  				return err
   761  			}
   762  			if s.gateway.enabled {
   763  				s.switchAccountToInterestMode(acc.GetName())
   764  			}
   765  		}
   766  	} else if acc != s.SystemAccount() {
   767  		if acc.JetStreamEnabled() {
   768  			acc.DisableJetStream()
   769  		}
   770  		// We will setup basic service imports to respond to
   771  		// requests if JS is enabled for this account.
   772  		if err := acc.enableJetStreamInfoServiceImportOnly(); err != nil {
   773  			return err
   774  		}
   775  	}
   776  	return nil
   777  }
   778  
   779  // configAllJetStreamAccounts walk all configured accounts and turn on jetstream if requested.
   780  func (s *Server) configAllJetStreamAccounts() error {
   781  	// Check to see if system account has been enabled. We could arrive here via reload and
   782  	// a non-default system account.
   783  	s.checkJetStreamExports()
   784  
   785  	// Bail if server not enabled. If it was enabled and a reload turns it off
   786  	// that will be handled elsewhere.
   787  	js := s.getJetStream()
   788  	if js == nil {
   789  		return nil
   790  	}
   791  
   792  	// Snapshot into our own list. Might not be needed.
   793  	s.mu.RLock()
   794  	if s.sys != nil {
   795  		// clustered stream removal will perform this cleanup as well
   796  		// this is mainly for initial cleanup
   797  		saccName := s.sys.account.Name
   798  		accStoreDirs, _ := os.ReadDir(js.config.StoreDir)
   799  		for _, acc := range accStoreDirs {
   800  			if accName := acc.Name(); accName != saccName {
   801  				// no op if not empty
   802  				accDir := filepath.Join(js.config.StoreDir, accName)
   803  				os.Remove(filepath.Join(accDir, streamsDir))
   804  				os.Remove(accDir)
   805  			}
   806  		}
   807  	}
   808  
   809  	var jsAccounts []*Account
   810  	s.accounts.Range(func(k, v any) bool {
   811  		jsAccounts = append(jsAccounts, v.(*Account))
   812  		return true
   813  	})
   814  	accounts := &s.accounts
   815  	s.mu.RUnlock()
   816  
   817  	// Process any jetstream enabled accounts here. These will be accounts we are
   818  	// already aware of at startup etc.
   819  	for _, acc := range jsAccounts {
   820  		if err := s.configJetStream(acc); err != nil {
   821  			return err
   822  		}
   823  	}
   824  
   825  	// Now walk all the storage we have and resolve any accounts that we did not process already.
   826  	// This is important in resolver/operator models.
   827  	fis, _ := os.ReadDir(js.config.StoreDir)
   828  	for _, fi := range fis {
   829  		if accName := fi.Name(); accName != _EMPTY_ {
   830  			// Only load up ones not already loaded since they are processed above.
   831  			if _, ok := accounts.Load(accName); !ok {
   832  				if acc, err := s.lookupAccount(accName); err != nil && acc != nil {
   833  					if err := s.configJetStream(acc); err != nil {
   834  						return err
   835  					}
   836  				}
   837  			}
   838  		}
   839  	}
   840  
   841  	return nil
   842  }
   843  
   844  // Mark our started time.
   845  func (js *jetStream) setStarted() {
   846  	js.mu.Lock()
   847  	defer js.mu.Unlock()
   848  	js.started = time.Now()
   849  }
   850  
   851  func (js *jetStream) isEnabled() bool {
   852  	if js == nil {
   853  		return false
   854  	}
   855  	return !js.disabled.Load()
   856  }
   857  
   858  // Mark that we will be in standlone mode.
   859  func (js *jetStream) setJetStreamStandAlone(isStandAlone bool) {
   860  	if js == nil {
   861  		return
   862  	}
   863  	js.mu.Lock()
   864  	defer js.mu.Unlock()
   865  	if js.standAlone = isStandAlone; js.standAlone {
   866  		// Update our server atomic.
   867  		js.srv.isMetaLeader.Store(true)
   868  		js.accountPurge, _ = js.srv.systemSubscribe(JSApiAccountPurge, _EMPTY_, false, nil, js.srv.jsLeaderAccountPurgeRequest)
   869  	} else if js.accountPurge != nil {
   870  		js.srv.sysUnsubscribe(js.accountPurge)
   871  	}
   872  }
   873  
   874  // JetStreamEnabled reports if jetstream is enabled for this server.
   875  func (s *Server) JetStreamEnabled() bool {
   876  	return s.getJetStream().isEnabled()
   877  }
   878  
   879  // JetStreamEnabledForDomain will report if any servers have JetStream enabled within this domain.
   880  func (s *Server) JetStreamEnabledForDomain() bool {
   881  	if s.JetStreamEnabled() {
   882  		return true
   883  	}
   884  
   885  	var jsFound bool
   886  	// If we are here we do not have JetStream enabled for ourselves, but we need to check all connected servers.
   887  	// TODO(dlc) - Could optimize and memoize this.
   888  	s.nodeToInfo.Range(func(k, v any) bool {
   889  		// This should not be dependent on online status, so only check js.
   890  		if v.(nodeInfo).js {
   891  			jsFound = true
   892  			return false
   893  		}
   894  		return true
   895  	})
   896  
   897  	return jsFound
   898  }
   899  
   900  // Will signal that all pull requests for consumers on this server are now invalid.
   901  func (s *Server) signalPullConsumers() {
   902  	js := s.getJetStream()
   903  	if js == nil {
   904  		return
   905  	}
   906  
   907  	js.mu.RLock()
   908  	defer js.mu.RUnlock()
   909  
   910  	// In case we have stale pending requests.
   911  	const hdr = "NATS/1.0 409 Server Shutdown\r\n" + JSPullRequestPendingMsgs + ": %d\r\n" + JSPullRequestPendingBytes + ": %d\r\n\r\n"
   912  	var didSend bool
   913  
   914  	for _, jsa := range js.accounts {
   915  		jsa.mu.RLock()
   916  		for _, stream := range jsa.streams {
   917  			stream.mu.RLock()
   918  			for _, o := range stream.consumers {
   919  				o.mu.RLock()
   920  				// Only signal on R1.
   921  				if o.cfg.Replicas <= 1 {
   922  					for reply, wr := range o.pendingRequests() {
   923  						shdr := fmt.Sprintf(hdr, wr.n, wr.b)
   924  						o.outq.send(newJSPubMsg(reply, _EMPTY_, _EMPTY_, []byte(shdr), nil, nil, 0))
   925  						didSend = true
   926  					}
   927  				}
   928  				o.mu.RUnlock()
   929  			}
   930  			stream.mu.RUnlock()
   931  		}
   932  		jsa.mu.RUnlock()
   933  	}
   934  	// Give time for migration information to make it out of our server.
   935  	if didSend {
   936  		time.Sleep(50 * time.Millisecond)
   937  	}
   938  }
   939  
   940  // Helper for determining if we are shutting down.
   941  func (js *jetStream) isShuttingDown() bool {
   942  	js.mu.RLock()
   943  	defer js.mu.RUnlock()
   944  	return js.shuttingDown
   945  }
   946  
   947  // Shutdown jetstream for this server.
   948  func (s *Server) shutdownJetStream() {
   949  	js := s.getJetStream()
   950  	if js == nil {
   951  		return
   952  	}
   953  
   954  	s.Noticef("Initiating JetStream Shutdown...")
   955  	defer s.Noticef("JetStream Shutdown")
   956  
   957  	// If we have folks blocked on sync requests, unblock.
   958  	// Send 1 is enough, but use select in case they were all present.
   959  	select {
   960  	case s.syncOutSem <- struct{}{}:
   961  	default:
   962  	}
   963  
   964  	var _a [512]*Account
   965  	accounts := _a[:0]
   966  
   967  	js.mu.Lock()
   968  	// Collect accounts.
   969  	for _, jsa := range js.accounts {
   970  		if a := jsa.acc(); a != nil {
   971  			accounts = append(accounts, a)
   972  		}
   973  	}
   974  	accPurgeSub := js.accountPurge
   975  	js.accountPurge = nil
   976  	// Signal we are shutting down.
   977  	js.shuttingDown = true
   978  	js.mu.Unlock()
   979  
   980  	if accPurgeSub != nil {
   981  		s.sysUnsubscribe(accPurgeSub)
   982  	}
   983  
   984  	for _, a := range accounts {
   985  		a.removeJetStream()
   986  	}
   987  
   988  	s.js.Store(nil)
   989  
   990  	js.mu.Lock()
   991  	js.accounts = nil
   992  
   993  	var qch chan struct{}
   994  
   995  	if cc := js.cluster; cc != nil {
   996  		if cc.qch != nil {
   997  			qch = cc.qch
   998  			cc.qch = nil
   999  		}
  1000  		js.stopUpdatesSub()
  1001  		if cc.c != nil {
  1002  			cc.c.closeConnection(ClientClosed)
  1003  			cc.c = nil
  1004  		}
  1005  		cc.meta = nil
  1006  	}
  1007  	js.mu.Unlock()
  1008  
  1009  	// If we were clustered signal the monitor cluster go routine.
  1010  	// We will wait for a bit for it to close.
  1011  	// Do this without the lock.
  1012  	if qch != nil {
  1013  		select {
  1014  		case qch <- struct{}{}:
  1015  			select {
  1016  			case <-qch:
  1017  			case <-time.After(2 * time.Second):
  1018  				s.Warnf("Did not receive signal for successful shutdown of cluster routine")
  1019  			}
  1020  		default:
  1021  		}
  1022  	}
  1023  }
  1024  
  1025  // JetStreamConfig will return the current config. Useful if the system
  1026  // created a dynamic configuration. A copy is returned.
  1027  func (s *Server) JetStreamConfig() *JetStreamConfig {
  1028  	var c *JetStreamConfig
  1029  	if js := s.getJetStream(); js != nil {
  1030  		copy := js.config
  1031  		c = &(copy)
  1032  	}
  1033  	return c
  1034  }
  1035  
  1036  // StoreDir returns the current JetStream directory.
  1037  func (s *Server) StoreDir() string {
  1038  	js := s.getJetStream()
  1039  	if js == nil {
  1040  		return _EMPTY_
  1041  	}
  1042  	return js.config.StoreDir
  1043  }
  1044  
  1045  // JetStreamNumAccounts returns the number of enabled accounts this server is tracking.
  1046  func (s *Server) JetStreamNumAccounts() int {
  1047  	js := s.getJetStream()
  1048  	if js == nil {
  1049  		return 0
  1050  	}
  1051  	js.mu.Lock()
  1052  	defer js.mu.Unlock()
  1053  	return len(js.accounts)
  1054  }
  1055  
  1056  // JetStreamReservedResources returns the reserved resources if JetStream is enabled.
  1057  func (s *Server) JetStreamReservedResources() (int64, int64, error) {
  1058  	js := s.getJetStream()
  1059  	if js == nil {
  1060  		return -1, -1, NewJSNotEnabledForAccountError()
  1061  	}
  1062  	js.mu.RLock()
  1063  	defer js.mu.RUnlock()
  1064  	return js.memReserved, js.storeReserved, nil
  1065  }
  1066  
  1067  func (s *Server) getJetStream() *jetStream {
  1068  	return s.js.Load()
  1069  }
  1070  
  1071  func (a *Account) assignJetStreamLimits(limits map[string]JetStreamAccountLimits) {
  1072  	a.mu.Lock()
  1073  	a.jsLimits = limits
  1074  	a.mu.Unlock()
  1075  }
  1076  
  1077  // EnableJetStream will enable JetStream on this account with the defined limits.
  1078  // This is a helper for JetStreamEnableAccount.
  1079  func (a *Account) EnableJetStream(limits map[string]JetStreamAccountLimits) error {
  1080  	a.mu.RLock()
  1081  	s := a.srv
  1082  	a.mu.RUnlock()
  1083  
  1084  	if s == nil {
  1085  		return fmt.Errorf("jetstream account not registered")
  1086  	}
  1087  
  1088  	if s.SystemAccount() == a {
  1089  		return fmt.Errorf("jetstream can not be enabled on the system account")
  1090  	}
  1091  
  1092  	s.mu.RLock()
  1093  	if s.sys == nil {
  1094  		s.mu.RUnlock()
  1095  		return ErrServerNotRunning
  1096  	}
  1097  	sendq := s.sys.sendq
  1098  	s.mu.RUnlock()
  1099  
  1100  	// No limits means we dynamically set up limits.
  1101  	// We also place limits here so we know that the account is configured for JetStream.
  1102  	if len(limits) == 0 {
  1103  		limits = defaultJSAccountTiers
  1104  	}
  1105  
  1106  	a.assignJetStreamLimits(limits)
  1107  
  1108  	js := s.getJetStream()
  1109  	if js == nil {
  1110  		return NewJSNotEnabledError()
  1111  	}
  1112  
  1113  	js.mu.Lock()
  1114  	if jsa, ok := js.accounts[a.Name]; ok {
  1115  		a.mu.Lock()
  1116  		a.js = jsa
  1117  		a.mu.Unlock()
  1118  		js.mu.Unlock()
  1119  		return a.enableAllJetStreamServiceImportsAndMappings()
  1120  	}
  1121  
  1122  	// Check the limits against existing reservations.
  1123  	if err := js.sufficientResources(limits); err != nil {
  1124  		js.mu.Unlock()
  1125  		return err
  1126  	}
  1127  
  1128  	sysNode := s.Node()
  1129  
  1130  	jsa := &jsAccount{js: js, account: a, limits: limits, streams: make(map[string]*stream), sendq: sendq, usage: make(map[string]*jsaStorage)}
  1131  	jsa.storeDir = filepath.Join(js.config.StoreDir, a.Name)
  1132  
  1133  	// A single server does not need to do the account updates at this point.
  1134  	if js.cluster != nil || !s.standAloneMode() {
  1135  		jsa.usageMu.Lock()
  1136  		jsa.utimer = time.AfterFunc(usageTick, jsa.sendClusterUsageUpdateTimer)
  1137  		// Cluster mode updates to resource usage. System internal prevents echos.
  1138  		jsa.updatesPub = fmt.Sprintf(jsaUpdatesPubT, a.Name, sysNode)
  1139  		jsa.updatesSub, _ = s.sysSubscribe(fmt.Sprintf(jsaUpdatesSubT, a.Name), jsa.remoteUpdateUsage)
  1140  		jsa.usageMu.Unlock()
  1141  	}
  1142  
  1143  	js.accounts[a.Name] = jsa
  1144  	// Stamp inside account as well. Needs to be done under js's lock.
  1145  	a.mu.Lock()
  1146  	a.js = jsa
  1147  	a.mu.Unlock()
  1148  	js.mu.Unlock()
  1149  
  1150  	// Create the proper imports here.
  1151  	if err := a.enableAllJetStreamServiceImportsAndMappings(); err != nil {
  1152  		return err
  1153  	}
  1154  
  1155  	s.Debugf("Enabled JetStream for account %q", a.Name)
  1156  	if l, ok := limits[_EMPTY_]; ok {
  1157  		s.Debugf("  Max Memory:      %s", friendlyBytes(l.MaxMemory))
  1158  		s.Debugf("  Max Storage:     %s", friendlyBytes(l.MaxStore))
  1159  	} else {
  1160  		for t, l := range limits {
  1161  			s.Debugf("  Tier: %s", t)
  1162  			s.Debugf("    Max Memory:      %s", friendlyBytes(l.MaxMemory))
  1163  			s.Debugf("    Max Storage:     %s", friendlyBytes(l.MaxStore))
  1164  		}
  1165  	}
  1166  
  1167  	// Clean up any old snapshots that were orphaned while staging.
  1168  	os.RemoveAll(filepath.Join(js.config.StoreDir, snapStagingDir))
  1169  
  1170  	sdir := filepath.Join(jsa.storeDir, streamsDir)
  1171  	if _, err := os.Stat(sdir); os.IsNotExist(err) {
  1172  		if err := os.MkdirAll(sdir, defaultDirPerms); err != nil {
  1173  			return fmt.Errorf("could not create storage streams directory - %v", err)
  1174  		}
  1175  		// Just need to make sure we can write to the directory.
  1176  		// Remove the directory will create later if needed.
  1177  		os.RemoveAll(sdir)
  1178  		// when empty remove parent directory, which may have been created as well
  1179  		os.Remove(jsa.storeDir)
  1180  	} else {
  1181  		// Restore any state here.
  1182  		s.Debugf("Recovering JetStream state for account %q", a.Name)
  1183  	}
  1184  
  1185  	// Check templates first since messsage sets will need proper ownership.
  1186  	// FIXME(dlc) - Make this consistent.
  1187  	tdir := filepath.Join(jsa.storeDir, tmplsDir)
  1188  	if stat, err := os.Stat(tdir); err == nil && stat.IsDir() {
  1189  		key := sha256.Sum256([]byte("templates"))
  1190  		hh, err := highwayhash.New64(key[:])
  1191  		if err != nil {
  1192  			return err
  1193  		}
  1194  		fis, _ := os.ReadDir(tdir)
  1195  		for _, fi := range fis {
  1196  			metafile := filepath.Join(tdir, fi.Name(), JetStreamMetaFile)
  1197  			metasum := filepath.Join(tdir, fi.Name(), JetStreamMetaFileSum)
  1198  			buf, err := os.ReadFile(metafile)
  1199  			if err != nil {
  1200  				s.Warnf("  Error reading StreamTemplate metafile %q: %v", metasum, err)
  1201  				continue
  1202  			}
  1203  			if _, err := os.Stat(metasum); os.IsNotExist(err) {
  1204  				s.Warnf("  Missing StreamTemplate checksum for %q", metasum)
  1205  				continue
  1206  			}
  1207  			sum, err := os.ReadFile(metasum)
  1208  			if err != nil {
  1209  				s.Warnf("  Error reading StreamTemplate checksum %q: %v", metasum, err)
  1210  				continue
  1211  			}
  1212  			hh.Reset()
  1213  			hh.Write(buf)
  1214  			checksum := hex.EncodeToString(hh.Sum(nil))
  1215  			if checksum != string(sum) {
  1216  				s.Warnf("  StreamTemplate checksums do not match %q vs %q", sum, checksum)
  1217  				continue
  1218  			}
  1219  			var cfg StreamTemplateConfig
  1220  			if err := json.Unmarshal(buf, &cfg); err != nil {
  1221  				s.Warnf("  Error unmarshalling StreamTemplate metafile: %v", err)
  1222  				continue
  1223  			}
  1224  			cfg.Config.Name = _EMPTY_
  1225  			if _, err := a.addStreamTemplate(&cfg); err != nil {
  1226  				s.Warnf("  Error recreating StreamTemplate %q: %v", cfg.Name, err)
  1227  				continue
  1228  			}
  1229  		}
  1230  	}
  1231  
  1232  	// Collect consumers, do after all streams.
  1233  	type ce struct {
  1234  		mset *stream
  1235  		odir string
  1236  	}
  1237  	var consumers []*ce
  1238  
  1239  	// Collect any interest policy streams to check for
  1240  	// https://github.com/nats-io/nats-server/issues/3612
  1241  	var ipstreams []*stream
  1242  
  1243  	// Remember if we should be encrypted and what cipher we think we should use.
  1244  	encrypted := s.getOpts().JetStreamKey != _EMPTY_
  1245  	plaintext := true
  1246  	sc := s.getOpts().JetStreamCipher
  1247  
  1248  	// Now recover the streams.
  1249  	fis, _ := os.ReadDir(sdir)
  1250  	for _, fi := range fis {
  1251  		mdir := filepath.Join(sdir, fi.Name())
  1252  		// Check for partially deleted streams. They are marked with "." prefix.
  1253  		if strings.HasPrefix(fi.Name(), tsep) {
  1254  			go os.RemoveAll(mdir)
  1255  			continue
  1256  		}
  1257  		key := sha256.Sum256([]byte(fi.Name()))
  1258  		hh, err := highwayhash.New64(key[:])
  1259  		if err != nil {
  1260  			return err
  1261  		}
  1262  		metafile := filepath.Join(mdir, JetStreamMetaFile)
  1263  		metasum := filepath.Join(mdir, JetStreamMetaFileSum)
  1264  		if _, err := os.Stat(metafile); os.IsNotExist(err) {
  1265  			s.Warnf("  Missing stream metafile for %q", metafile)
  1266  			continue
  1267  		}
  1268  		buf, err := os.ReadFile(metafile)
  1269  		if err != nil {
  1270  			s.Warnf("  Error reading metafile %q: %v", metafile, err)
  1271  			continue
  1272  		}
  1273  		if _, err := os.Stat(metasum); os.IsNotExist(err) {
  1274  			s.Warnf("  Missing stream checksum file %q", metasum)
  1275  			continue
  1276  		}
  1277  		sum, err := os.ReadFile(metasum)
  1278  		if err != nil {
  1279  			s.Warnf("  Error reading Stream metafile checksum %q: %v", metasum, err)
  1280  			continue
  1281  		}
  1282  		hh.Write(buf)
  1283  		checksum := hex.EncodeToString(hh.Sum(nil))
  1284  		if checksum != string(sum) {
  1285  			s.Warnf("  Stream metafile %q: checksums do not match %q vs %q", metafile, sum, checksum)
  1286  			continue
  1287  		}
  1288  
  1289  		// Track if we are converting ciphers.
  1290  		var convertingCiphers bool
  1291  
  1292  		// Check if we are encrypted.
  1293  		keyFile := filepath.Join(mdir, JetStreamMetaFileKey)
  1294  		keyBuf, err := os.ReadFile(keyFile)
  1295  		if err == nil {
  1296  			s.Debugf("  Stream metafile is encrypted, reading encrypted keyfile")
  1297  			if len(keyBuf) < minMetaKeySize {
  1298  				s.Warnf("  Bad stream encryption key length of %d", len(keyBuf))
  1299  				continue
  1300  			}
  1301  			// Decode the buffer before proceeding.
  1302  			var nbuf []byte
  1303  			nbuf, convertingCiphers, err = s.decryptMeta(sc, keyBuf, buf, a.Name, fi.Name())
  1304  			if err != nil {
  1305  				s.Warnf("  Error decrypting our stream metafile: %v", err)
  1306  				continue
  1307  			}
  1308  			buf = nbuf
  1309  			plaintext = false
  1310  		}
  1311  
  1312  		var cfg FileStreamInfo
  1313  		if err := json.Unmarshal(buf, &cfg); err != nil {
  1314  			s.Warnf("  Error unmarshalling stream metafile %q: %v", metafile, err)
  1315  			continue
  1316  		}
  1317  
  1318  		if cfg.Template != _EMPTY_ {
  1319  			if err := jsa.addStreamNameToTemplate(cfg.Template, cfg.Name); err != nil {
  1320  				s.Warnf("  Error adding stream %q to template %q: %v", cfg.Name, cfg.Template, err)
  1321  			}
  1322  		}
  1323  
  1324  		// We had a bug that set a default de dupe window on mirror, despite that being not a valid config
  1325  		fixCfgMirrorWithDedupWindow(&cfg.StreamConfig)
  1326  
  1327  		// We had a bug that could allow subjects in that had prefix or suffix spaces. We check for that here
  1328  		// and will patch them on the fly for now. We will warn about them.
  1329  		var hadSubjErr bool
  1330  		for i, subj := range cfg.StreamConfig.Subjects {
  1331  			if !IsValidSubject(subj) {
  1332  				s.Warnf("  Detected bad subject %q while adding stream %q, will attempt to repair", subj, cfg.Name)
  1333  				if nsubj := strings.TrimSpace(subj); IsValidSubject(nsubj) {
  1334  					s.Warnf("  Bad subject %q repaired to %q", subj, nsubj)
  1335  					cfg.StreamConfig.Subjects[i] = nsubj
  1336  				} else {
  1337  					s.Warnf("  Error recreating stream %q: %v", cfg.Name, "invalid subject")
  1338  					hadSubjErr = true
  1339  					break
  1340  				}
  1341  			}
  1342  		}
  1343  		if hadSubjErr {
  1344  			continue
  1345  		}
  1346  
  1347  		// The other possible bug is assigning subjects to mirrors, so check for that and patch as well.
  1348  		if cfg.StreamConfig.Mirror != nil && len(cfg.StreamConfig.Subjects) > 0 {
  1349  			s.Warnf("  Detected subjects on a mirrored stream %q, will remove", cfg.Name)
  1350  			cfg.StreamConfig.Subjects = nil
  1351  		}
  1352  
  1353  		s.Noticef("  Starting restore for stream '%s > %s'", a.Name, cfg.StreamConfig.Name)
  1354  		rt := time.Now()
  1355  
  1356  		// Log if we are converting from plaintext to encrypted.
  1357  		if encrypted {
  1358  			if plaintext {
  1359  				s.Noticef("  Encrypting stream '%s > %s'", a.Name, cfg.StreamConfig.Name)
  1360  			} else if convertingCiphers {
  1361  				s.Noticef("  Converting to %s for stream '%s > %s'", sc, a.Name, cfg.StreamConfig.Name)
  1362  				// Remove the key file to have system regenerate with the new cipher.
  1363  				os.Remove(keyFile)
  1364  			}
  1365  		}
  1366  
  1367  		// Add in the stream.
  1368  		mset, err := a.addStream(&cfg.StreamConfig)
  1369  		if err != nil {
  1370  			s.Warnf("  Error recreating stream %q: %v", cfg.Name, err)
  1371  			// If we removed a keyfile from above make sure to put it back.
  1372  			if convertingCiphers {
  1373  				err := os.WriteFile(keyFile, keyBuf, defaultFilePerms)
  1374  				if err != nil {
  1375  					s.Warnf("  Error replacing meta keyfile for stream %q: %v", cfg.Name, err)
  1376  				}
  1377  			}
  1378  			continue
  1379  		}
  1380  		if !cfg.Created.IsZero() {
  1381  			mset.setCreatedTime(cfg.Created)
  1382  		}
  1383  
  1384  		state := mset.state()
  1385  		s.Noticef("  Restored %s messages for stream '%s > %s' in %v",
  1386  			comma(int64(state.Msgs)), mset.accName(), mset.name(), time.Since(rt).Round(time.Millisecond))
  1387  
  1388  		// Collect to check for dangling messages.
  1389  		// TODO(dlc) - Can be removed eventually.
  1390  		if cfg.StreamConfig.Retention == InterestPolicy {
  1391  			ipstreams = append(ipstreams, mset)
  1392  		}
  1393  
  1394  		// Now do the consumers.
  1395  		odir := filepath.Join(sdir, fi.Name(), consumerDir)
  1396  		consumers = append(consumers, &ce{mset, odir})
  1397  	}
  1398  
  1399  	for _, e := range consumers {
  1400  		ofis, _ := os.ReadDir(e.odir)
  1401  		if len(ofis) > 0 {
  1402  			s.Noticef("  Recovering %d consumers for stream - '%s > %s'", len(ofis), e.mset.accName(), e.mset.name())
  1403  		}
  1404  		for _, ofi := range ofis {
  1405  			metafile := filepath.Join(e.odir, ofi.Name(), JetStreamMetaFile)
  1406  			metasum := filepath.Join(e.odir, ofi.Name(), JetStreamMetaFileSum)
  1407  			if _, err := os.Stat(metafile); os.IsNotExist(err) {
  1408  				s.Warnf("    Missing consumer metafile %q", metafile)
  1409  				continue
  1410  			}
  1411  			buf, err := os.ReadFile(metafile)
  1412  			if err != nil {
  1413  				s.Warnf("    Error reading consumer metafile %q: %v", metafile, err)
  1414  				continue
  1415  			}
  1416  			if _, err := os.Stat(metasum); os.IsNotExist(err) {
  1417  				s.Warnf("    Missing consumer checksum for %q", metasum)
  1418  				continue
  1419  			}
  1420  
  1421  			// Check if we are encrypted.
  1422  			if key, err := os.ReadFile(filepath.Join(e.odir, ofi.Name(), JetStreamMetaFileKey)); err == nil {
  1423  				s.Debugf("  Consumer metafile is encrypted, reading encrypted keyfile")
  1424  				// Decode the buffer before proceeding.
  1425  				ctxName := e.mset.name() + tsep + ofi.Name()
  1426  				nbuf, _, err := s.decryptMeta(sc, key, buf, a.Name, ctxName)
  1427  				if err != nil {
  1428  					s.Warnf("  Error decrypting our consumer metafile: %v", err)
  1429  					continue
  1430  				}
  1431  				buf = nbuf
  1432  			}
  1433  
  1434  			var cfg FileConsumerInfo
  1435  			if err := json.Unmarshal(buf, &cfg); err != nil {
  1436  				s.Warnf("    Error unmarshalling consumer metafile %q: %v", metafile, err)
  1437  				continue
  1438  			}
  1439  			isEphemeral := !isDurableConsumer(&cfg.ConsumerConfig)
  1440  			if isEphemeral {
  1441  				// This is an ephermal consumer and this could fail on restart until
  1442  				// the consumer can reconnect. We will create it as a durable and switch it.
  1443  				cfg.ConsumerConfig.Durable = ofi.Name()
  1444  			}
  1445  			obs, err := e.mset.addConsumerWithAssignment(&cfg.ConsumerConfig, _EMPTY_, nil, true, ActionCreateOrUpdate)
  1446  			if err != nil {
  1447  				s.Warnf("    Error adding consumer %q: %v", cfg.Name, err)
  1448  				continue
  1449  			}
  1450  			if isEphemeral {
  1451  				obs.switchToEphemeral()
  1452  			}
  1453  			if !cfg.Created.IsZero() {
  1454  				obs.setCreatedTime(cfg.Created)
  1455  			}
  1456  			if err != nil {
  1457  				s.Warnf("    Error restoring consumer %q state: %v", cfg.Name, err)
  1458  			}
  1459  		}
  1460  	}
  1461  
  1462  	// Make sure to cleanup any old remaining snapshots.
  1463  	os.RemoveAll(filepath.Join(jsa.storeDir, snapsDir))
  1464  
  1465  	// Check interest policy streams for auto cleanup.
  1466  	for _, mset := range ipstreams {
  1467  		mset.checkForOrphanMsgs()
  1468  		mset.checkConsumerReplication()
  1469  	}
  1470  
  1471  	s.Debugf("JetStream state for account %q recovered", a.Name)
  1472  
  1473  	return nil
  1474  }
  1475  
  1476  // Return whether we require MaxBytes to be set and if > 0 an upper limit for stream size exists
  1477  // Both limits are independent of each other.
  1478  func (a *Account) maxBytesLimits(cfg *StreamConfig) (bool, int64) {
  1479  	a.mu.RLock()
  1480  	jsa := a.js
  1481  	a.mu.RUnlock()
  1482  	if jsa == nil {
  1483  		return false, 0
  1484  	}
  1485  	jsa.usageMu.RLock()
  1486  	selectedLimits, _, ok := jsa.selectLimits(cfg)
  1487  	jsa.usageMu.RUnlock()
  1488  	if !ok {
  1489  		return false, 0
  1490  	}
  1491  	maxStreamBytes := int64(0)
  1492  	if cfg.Storage == MemoryStorage {
  1493  		maxStreamBytes = selectedLimits.MemoryMaxStreamBytes
  1494  	} else {
  1495  		maxStreamBytes = selectedLimits.StoreMaxStreamBytes
  1496  	}
  1497  	return selectedLimits.MaxBytesRequired, maxStreamBytes
  1498  }
  1499  
  1500  // NumStreams will return how many streams we have.
  1501  func (a *Account) numStreams() int {
  1502  	a.mu.RLock()
  1503  	jsa := a.js
  1504  	a.mu.RUnlock()
  1505  	if jsa == nil {
  1506  		return 0
  1507  	}
  1508  	jsa.mu.Lock()
  1509  	n := len(jsa.streams)
  1510  	jsa.mu.Unlock()
  1511  	return n
  1512  }
  1513  
  1514  // Streams will return all known streams.
  1515  func (a *Account) streams() []*stream {
  1516  	return a.filteredStreams(_EMPTY_)
  1517  }
  1518  
  1519  func (a *Account) filteredStreams(filter string) []*stream {
  1520  	a.mu.RLock()
  1521  	jsa := a.js
  1522  	a.mu.RUnlock()
  1523  
  1524  	if jsa == nil {
  1525  		return nil
  1526  	}
  1527  
  1528  	jsa.mu.RLock()
  1529  	defer jsa.mu.RUnlock()
  1530  
  1531  	var msets []*stream
  1532  	for _, mset := range jsa.streams {
  1533  		if filter != _EMPTY_ {
  1534  			for _, subj := range mset.cfg.Subjects {
  1535  				if SubjectsCollide(filter, subj) {
  1536  					msets = append(msets, mset)
  1537  					break
  1538  				}
  1539  			}
  1540  		} else {
  1541  			msets = append(msets, mset)
  1542  		}
  1543  	}
  1544  
  1545  	return msets
  1546  }
  1547  
  1548  // lookupStream will lookup a stream by name.
  1549  func (a *Account) lookupStream(name string) (*stream, error) {
  1550  	a.mu.RLock()
  1551  	jsa := a.js
  1552  	a.mu.RUnlock()
  1553  
  1554  	if jsa == nil {
  1555  		return nil, NewJSNotEnabledForAccountError()
  1556  	}
  1557  	jsa.mu.RLock()
  1558  	defer jsa.mu.RUnlock()
  1559  
  1560  	mset, ok := jsa.streams[name]
  1561  	if !ok {
  1562  		return nil, NewJSStreamNotFoundError()
  1563  	}
  1564  	return mset, nil
  1565  }
  1566  
  1567  // UpdateJetStreamLimits will update the account limits for a JetStream enabled account.
  1568  func (a *Account) UpdateJetStreamLimits(limits map[string]JetStreamAccountLimits) error {
  1569  	a.mu.RLock()
  1570  	s, jsa := a.srv, a.js
  1571  	a.mu.RUnlock()
  1572  
  1573  	if s == nil {
  1574  		return fmt.Errorf("jetstream account not registered")
  1575  	}
  1576  	js := s.getJetStream()
  1577  	if js == nil {
  1578  		return NewJSNotEnabledError()
  1579  	}
  1580  	if jsa == nil {
  1581  		return NewJSNotEnabledForAccountError()
  1582  	}
  1583  
  1584  	if len(limits) == 0 {
  1585  		limits = defaultJSAccountTiers
  1586  	}
  1587  
  1588  	// Calculate the delta between what we have and what we want.
  1589  	jsa.usageMu.RLock()
  1590  	dl := diffCheckedLimits(jsa.limits, limits)
  1591  	jsa.usageMu.RUnlock()
  1592  
  1593  	js.mu.Lock()
  1594  	// Check the limits against existing reservations.
  1595  	if err := js.sufficientResources(dl); err != nil {
  1596  		js.mu.Unlock()
  1597  		return err
  1598  	}
  1599  	js.mu.Unlock()
  1600  
  1601  	// Update
  1602  	jsa.usageMu.Lock()
  1603  	jsa.limits = limits
  1604  	jsa.usageMu.Unlock()
  1605  
  1606  	return nil
  1607  }
  1608  
  1609  func diffCheckedLimits(a, b map[string]JetStreamAccountLimits) map[string]JetStreamAccountLimits {
  1610  	diff := map[string]JetStreamAccountLimits{}
  1611  	for t, la := range a {
  1612  		// in a, not in b will return 0
  1613  		lb := b[t]
  1614  		diff[t] = JetStreamAccountLimits{
  1615  			MaxMemory: lb.MaxMemory - la.MaxMemory,
  1616  			MaxStore:  lb.MaxStore - la.MaxStore,
  1617  		}
  1618  	}
  1619  	for t, lb := range b {
  1620  		if la, ok := a[t]; !ok {
  1621  			// only in b not in a. (in a and b already covered)
  1622  			diff[t] = JetStreamAccountLimits{
  1623  				MaxMemory: lb.MaxMemory - la.MaxMemory,
  1624  				MaxStore:  lb.MaxStore - la.MaxStore,
  1625  			}
  1626  		}
  1627  	}
  1628  	return diff
  1629  }
  1630  
  1631  // Return reserved bytes for memory and store for this account on this server.
  1632  // Lock should be held.
  1633  func (jsa *jsAccount) reservedStorage(tier string) (mem, store uint64) {
  1634  	for _, mset := range jsa.streams {
  1635  		cfg := &mset.cfg
  1636  		if tier == _EMPTY_ || tier == tierName(cfg) && cfg.MaxBytes > 0 {
  1637  			switch cfg.Storage {
  1638  			case FileStorage:
  1639  				store += uint64(cfg.MaxBytes)
  1640  			case MemoryStorage:
  1641  				mem += uint64(cfg.MaxBytes)
  1642  			}
  1643  		}
  1644  	}
  1645  	return mem, store
  1646  }
  1647  
  1648  // Return reserved bytes for memory and store for this account in clustered mode.
  1649  // js lock should be held.
  1650  func reservedStorage(sas map[string]*streamAssignment, tier string) (mem, store uint64) {
  1651  	for _, sa := range sas {
  1652  		cfg := sa.Config
  1653  		if tier == _EMPTY_ || tier == tierName(cfg) && cfg.MaxBytes > 0 {
  1654  			switch cfg.Storage {
  1655  			case FileStorage:
  1656  				store += uint64(cfg.MaxBytes)
  1657  			case MemoryStorage:
  1658  				mem += uint64(cfg.MaxBytes)
  1659  			}
  1660  		}
  1661  	}
  1662  	return mem, store
  1663  }
  1664  
  1665  // JetStreamUsage reports on JetStream usage and limits for an account.
  1666  func (a *Account) JetStreamUsage() JetStreamAccountStats {
  1667  	a.mu.RLock()
  1668  	jsa, aname := a.js, a.Name
  1669  	accJsLimits := a.jsLimits
  1670  	a.mu.RUnlock()
  1671  
  1672  	var stats JetStreamAccountStats
  1673  	if jsa != nil {
  1674  		js := jsa.js
  1675  		js.mu.RLock()
  1676  		cc := js.cluster
  1677  		singleServer := cc == nil
  1678  		jsa.mu.RLock()
  1679  		jsa.usageMu.RLock()
  1680  		stats.Memory, stats.Store = jsa.storageTotals()
  1681  		stats.Domain = js.config.Domain
  1682  		stats.API = JetStreamAPIStats{
  1683  			Total:  jsa.apiTotal,
  1684  			Errors: jsa.apiErrors,
  1685  		}
  1686  		if singleServer {
  1687  			stats.ReservedMemory, stats.ReservedStore = jsa.reservedStorage(_EMPTY_)
  1688  		} else {
  1689  			stats.ReservedMemory, stats.ReservedStore = reservedStorage(cc.streams[aname], _EMPTY_)
  1690  		}
  1691  		l, defaultTier := jsa.limits[_EMPTY_]
  1692  		if defaultTier {
  1693  			stats.Limits = l
  1694  		} else {
  1695  			skipped := 0
  1696  			stats.Tiers = make(map[string]JetStreamTier)
  1697  			for t, total := range jsa.usage {
  1698  				if _, ok := jsa.limits[t]; !ok && (*total) == (jsaStorage{}) {
  1699  					// skip tiers not present that don't contain a count
  1700  					// In case this shows an empty stream, that tier will be added when iterating over streams
  1701  					skipped++
  1702  				} else {
  1703  					tier := JetStreamTier{
  1704  						Memory: uint64(total.total.mem),
  1705  						Store:  uint64(total.total.store),
  1706  						Limits: jsa.limits[t],
  1707  					}
  1708  					if singleServer {
  1709  						tier.ReservedMemory, tier.ReservedStore = jsa.reservedStorage(t)
  1710  					} else {
  1711  						tier.ReservedMemory, tier.ReservedStore = reservedStorage(cc.streams[aname], t)
  1712  					}
  1713  					stats.Tiers[t] = tier
  1714  				}
  1715  			}
  1716  			if len(accJsLimits) != len(jsa.usage)-skipped {
  1717  				// insert unused limits
  1718  				for t, lim := range accJsLimits {
  1719  					if _, ok := stats.Tiers[t]; !ok {
  1720  						tier := JetStreamTier{Limits: lim}
  1721  						if singleServer {
  1722  							tier.ReservedMemory, tier.ReservedStore = jsa.reservedStorage(t)
  1723  						} else {
  1724  							tier.ReservedMemory, tier.ReservedStore = reservedStorage(cc.streams[aname], t)
  1725  						}
  1726  						stats.Tiers[t] = tier
  1727  					}
  1728  				}
  1729  			}
  1730  		}
  1731  		jsa.usageMu.RUnlock()
  1732  
  1733  		// Clustered
  1734  		if cc := js.cluster; cc != nil {
  1735  			sas := cc.streams[aname]
  1736  			if defaultTier {
  1737  				stats.Streams = len(sas)
  1738  				stats.ReservedMemory, stats.ReservedStore = reservedStorage(sas, _EMPTY_)
  1739  			}
  1740  			for _, sa := range sas {
  1741  				stats.Consumers += len(sa.consumers)
  1742  				if !defaultTier {
  1743  					tier := tierName(sa.Config)
  1744  					u, ok := stats.Tiers[tier]
  1745  					if !ok {
  1746  						u = JetStreamTier{}
  1747  					}
  1748  					u.Streams++
  1749  					stats.Streams++
  1750  					u.Consumers += len(sa.consumers)
  1751  					stats.Tiers[tier] = u
  1752  				}
  1753  			}
  1754  		} else {
  1755  			if defaultTier {
  1756  				stats.Streams = len(jsa.streams)
  1757  			}
  1758  			for _, mset := range jsa.streams {
  1759  				consCount := mset.numConsumers()
  1760  				stats.Consumers += consCount
  1761  				if !defaultTier {
  1762  					u, ok := stats.Tiers[mset.tier]
  1763  					if !ok {
  1764  						u = JetStreamTier{}
  1765  					}
  1766  					u.Streams++
  1767  					stats.Streams++
  1768  					u.Consumers += consCount
  1769  					stats.Tiers[mset.tier] = u
  1770  				}
  1771  			}
  1772  		}
  1773  		jsa.mu.RUnlock()
  1774  		js.mu.RUnlock()
  1775  	}
  1776  	return stats
  1777  }
  1778  
  1779  // DisableJetStream will disable JetStream for this account.
  1780  func (a *Account) DisableJetStream() error {
  1781  	return a.removeJetStream()
  1782  }
  1783  
  1784  // removeJetStream is called when JetStream has been disabled for this account.
  1785  func (a *Account) removeJetStream() error {
  1786  	a.mu.Lock()
  1787  	s := a.srv
  1788  	a.js = nil
  1789  	a.mu.Unlock()
  1790  
  1791  	if s == nil {
  1792  		return fmt.Errorf("jetstream account not registered")
  1793  	}
  1794  
  1795  	js := s.getJetStream()
  1796  	if js == nil {
  1797  		return NewJSNotEnabledForAccountError()
  1798  	}
  1799  
  1800  	return js.disableJetStream(js.lookupAccount(a))
  1801  }
  1802  
  1803  // Disable JetStream for the account.
  1804  func (js *jetStream) disableJetStream(jsa *jsAccount) error {
  1805  	if jsa == nil || jsa.account == nil {
  1806  		return NewJSNotEnabledForAccountError()
  1807  	}
  1808  
  1809  	js.mu.Lock()
  1810  	delete(js.accounts, jsa.account.Name)
  1811  	js.mu.Unlock()
  1812  
  1813  	jsa.delete()
  1814  
  1815  	return nil
  1816  }
  1817  
  1818  // jetStreamConfigured reports whether the account has JetStream configured, regardless of this
  1819  // servers JetStream status.
  1820  func (a *Account) jetStreamConfigured() bool {
  1821  	if a == nil {
  1822  		return false
  1823  	}
  1824  	a.mu.RLock()
  1825  	defer a.mu.RUnlock()
  1826  	return len(a.jsLimits) > 0
  1827  }
  1828  
  1829  // JetStreamEnabled is a helper to determine if jetstream is enabled for an account.
  1830  func (a *Account) JetStreamEnabled() bool {
  1831  	if a == nil {
  1832  		return false
  1833  	}
  1834  	a.mu.RLock()
  1835  	enabled := a.js != nil
  1836  	a.mu.RUnlock()
  1837  	return enabled
  1838  }
  1839  
  1840  func (jsa *jsAccount) remoteUpdateUsage(sub *subscription, c *client, _ *Account, subject, _ string, msg []byte) {
  1841  	// jsa.js.srv is immutable and guaranteed to no be nil, so no lock needed.
  1842  	s := jsa.js.srv
  1843  
  1844  	jsa.usageMu.Lock()
  1845  	defer jsa.usageMu.Unlock()
  1846  
  1847  	if len(msg) < minUsageUpdateLen {
  1848  		s.Warnf("Ignoring remote usage update with size too short")
  1849  		return
  1850  	}
  1851  	var rnode string
  1852  	if li := strings.LastIndexByte(subject, btsep); li > 0 && li < len(subject) {
  1853  		rnode = subject[li+1:]
  1854  	}
  1855  	if rnode == _EMPTY_ {
  1856  		s.Warnf("Received remote usage update with no remote node")
  1857  		return
  1858  	}
  1859  	rUsage, ok := jsa.rusage[rnode]
  1860  	if !ok {
  1861  		if jsa.rusage == nil {
  1862  			jsa.rusage = make(map[string]*remoteUsage)
  1863  		}
  1864  		rUsage = &remoteUsage{tiers: make(map[string]*jsaUsage)}
  1865  		jsa.rusage[rnode] = rUsage
  1866  	}
  1867  	updateTotal := func(tierName string, memUsed, storeUsed int64) {
  1868  		total, ok := jsa.usage[tierName]
  1869  		if !ok {
  1870  			total = &jsaStorage{}
  1871  			jsa.usage[tierName] = total
  1872  		}
  1873  		// Update the usage for this remote.
  1874  		if usage := rUsage.tiers[tierName]; usage != nil {
  1875  			// Decrement our old values.
  1876  			total.total.mem -= usage.mem
  1877  			total.total.store -= usage.store
  1878  			usage.mem, usage.store = memUsed, storeUsed
  1879  		} else {
  1880  			rUsage.tiers[tierName] = &jsaUsage{memUsed, storeUsed}
  1881  		}
  1882  		total.total.mem += memUsed
  1883  		total.total.store += storeUsed
  1884  	}
  1885  
  1886  	var le = binary.LittleEndian
  1887  	apiTotal, apiErrors := le.Uint64(msg[16:]), le.Uint64(msg[24:])
  1888  	memUsed, storeUsed := int64(le.Uint64(msg[0:])), int64(le.Uint64(msg[8:]))
  1889  
  1890  	// We later extended the data structure to support multiple tiers
  1891  	var excessRecordCnt uint32
  1892  	var tierName string
  1893  
  1894  	if len(msg) >= usageMultiTiersLen {
  1895  		excessRecordCnt = le.Uint32(msg[minUsageUpdateLen:])
  1896  		length := le.Uint64(msg[minUsageUpdateLen+4:])
  1897  		// Need to protect past this point in case this is wrong.
  1898  		if uint64(len(msg)) < usageMultiTiersLen+length {
  1899  			s.Warnf("Received corrupt remote usage update")
  1900  			return
  1901  		}
  1902  		tierName = string(msg[usageMultiTiersLen : usageMultiTiersLen+length])
  1903  		msg = msg[usageMultiTiersLen+length:]
  1904  	}
  1905  	updateTotal(tierName, memUsed, storeUsed)
  1906  	for ; excessRecordCnt > 0 && len(msg) >= usageRecordLen; excessRecordCnt-- {
  1907  		memUsed, storeUsed := int64(le.Uint64(msg[0:])), int64(le.Uint64(msg[8:]))
  1908  		length := le.Uint64(msg[16:])
  1909  		if uint64(len(msg)) < usageRecordLen+length {
  1910  			s.Warnf("Received corrupt remote usage update on excess record")
  1911  			return
  1912  		}
  1913  		tierName = string(msg[usageRecordLen : usageRecordLen+length])
  1914  		msg = msg[usageRecordLen+length:]
  1915  		updateTotal(tierName, memUsed, storeUsed)
  1916  	}
  1917  	jsa.apiTotal -= rUsage.api
  1918  	jsa.apiErrors -= rUsage.err
  1919  	rUsage.api = apiTotal
  1920  	rUsage.err = apiErrors
  1921  	jsa.apiTotal += apiTotal
  1922  	jsa.apiErrors += apiErrors
  1923  }
  1924  
  1925  // When we detect a skew of some sort this will verify the usage reporting is correct.
  1926  // No locks should be held.
  1927  func (jsa *jsAccount) checkAndSyncUsage(tierName string, storeType StorageType) {
  1928  	// This will run in a separate go routine, so check that we are only running once.
  1929  	if !jsa.sync.CompareAndSwap(false, true) {
  1930  		return
  1931  	}
  1932  	defer jsa.sync.Store(false)
  1933  
  1934  	// Hold the account read lock and the usage lock while we calculate.
  1935  	// We scope by tier and storage type, but if R3 File has 200 streams etc. could
  1936  	// show a pause. I did test with > 100 non-active streams and was 80-200ns or so.
  1937  	// Should be rare this gets called as well.
  1938  	jsa.mu.RLock()
  1939  	defer jsa.mu.RUnlock()
  1940  	js := jsa.js
  1941  	if js == nil {
  1942  		return
  1943  	}
  1944  	s := js.srv
  1945  
  1946  	// We need to collect the stream stores before we acquire the usage lock since in storeUpdates the
  1947  	// stream lock could be held if deletion are inline with storing a new message, e.g. via limits.
  1948  	var stores []StreamStore
  1949  	for _, mset := range jsa.streams {
  1950  		mset.mu.RLock()
  1951  		if mset.tier == tierName && mset.stype == storeType && mset.store != nil {
  1952  			stores = append(stores, mset.store)
  1953  		}
  1954  		mset.mu.RUnlock()
  1955  	}
  1956  
  1957  	// Now range and qualify, hold usage lock to prevent updates.
  1958  	jsa.usageMu.Lock()
  1959  	defer jsa.usageMu.Unlock()
  1960  
  1961  	usage, ok := jsa.usage[tierName]
  1962  	if !ok {
  1963  		return
  1964  	}
  1965  
  1966  	// Collect current total for all stream stores that matched.
  1967  	var total int64
  1968  	var state StreamState
  1969  	for _, store := range stores {
  1970  		store.FastState(&state)
  1971  		total += int64(state.Bytes)
  1972  	}
  1973  
  1974  	var needClusterUpdate bool
  1975  	// If we do not match on our calculations compute delta and adjust.
  1976  	if storeType == MemoryStorage {
  1977  		if total != usage.local.mem {
  1978  			s.Warnf("MemStore usage drift of %v vs %v detected for account %q",
  1979  				friendlyBytes(total), friendlyBytes(usage.local.mem), jsa.account.GetName())
  1980  			delta := total - usage.local.mem
  1981  			usage.local.mem += delta
  1982  			usage.total.mem += delta
  1983  			atomic.AddInt64(&js.memUsed, delta)
  1984  			needClusterUpdate = true
  1985  		}
  1986  	} else {
  1987  		if total != usage.local.store {
  1988  			s.Warnf("FileStore usage drift of %v vs %v detected for account %q",
  1989  				friendlyBytes(total), friendlyBytes(usage.local.store), jsa.account.GetName())
  1990  			delta := total - usage.local.store
  1991  			usage.local.store += delta
  1992  			usage.total.store += delta
  1993  			atomic.AddInt64(&js.storeUsed, delta)
  1994  			needClusterUpdate = true
  1995  		}
  1996  	}
  1997  
  1998  	// Publish our local updates if in clustered mode.
  1999  	if needClusterUpdate && js.isClusteredNoLock() {
  2000  		jsa.sendClusterUsageUpdate()
  2001  	}
  2002  }
  2003  
  2004  // Updates accounting on in use memory and storage. This is called from locally
  2005  // by the lower storage layers.
  2006  func (jsa *jsAccount) updateUsage(tierName string, storeType StorageType, delta int64) {
  2007  	// jsa.js is immutable and cannot be nil, so ok w/o lock.
  2008  	js := jsa.js
  2009  	// updateUsage() may be invoked under the mset's lock, so we can't get
  2010  	// the js' lock to check if clustered. So use this function that make
  2011  	// use of an atomic to do the check without having data race reports.
  2012  	isClustered := js.isClusteredNoLock()
  2013  
  2014  	var needsCheck bool
  2015  	jsa.usageMu.Lock()
  2016  	s, ok := jsa.usage[tierName]
  2017  	if !ok {
  2018  		s = &jsaStorage{}
  2019  		jsa.usage[tierName] = s
  2020  	}
  2021  	if storeType == MemoryStorage {
  2022  		s.local.mem += delta
  2023  		s.total.mem += delta
  2024  		atomic.AddInt64(&js.memUsed, delta)
  2025  		needsCheck = s.local.mem < 0
  2026  	} else {
  2027  		s.local.store += delta
  2028  		s.total.store += delta
  2029  		atomic.AddInt64(&js.storeUsed, delta)
  2030  		needsCheck = s.local.store < 0
  2031  	}
  2032  	// Publish our local updates if in clustered mode.
  2033  	if isClustered {
  2034  		jsa.sendClusterUsageUpdate()
  2035  	}
  2036  	jsa.usageMu.Unlock()
  2037  
  2038  	if needsCheck {
  2039  		// We could be holding the stream lock from up in the stack, and this
  2040  		// will want the jsa lock, which would violate locking order.
  2041  		// So do this in a Go routine. The function will check if it is already running.
  2042  		go jsa.checkAndSyncUsage(tierName, storeType)
  2043  	}
  2044  }
  2045  
  2046  var usageTick = 1500 * time.Millisecond
  2047  
  2048  func (jsa *jsAccount) sendClusterUsageUpdateTimer() {
  2049  	jsa.usageMu.Lock()
  2050  	defer jsa.usageMu.Unlock()
  2051  	jsa.sendClusterUsageUpdate()
  2052  	if jsa.utimer != nil {
  2053  		jsa.utimer.Reset(usageTick)
  2054  	}
  2055  }
  2056  
  2057  // For usage fields.
  2058  const (
  2059  	minUsageUpdateLen    = 32
  2060  	stackUsageUpdate     = 72
  2061  	usageRecordLen       = 24
  2062  	usageMultiTiersLen   = 44
  2063  	apiStatsAndNumTiers  = 20
  2064  	minUsageUpdateWindow = 250 * time.Millisecond
  2065  )
  2066  
  2067  // Send updates to our account usage for this server.
  2068  // jsa.usageMu lock should be held.
  2069  func (jsa *jsAccount) sendClusterUsageUpdate() {
  2070  	// These values are absolute so we can limit send rates.
  2071  	now := time.Now()
  2072  	if now.Sub(jsa.lupdate) < minUsageUpdateWindow {
  2073  		return
  2074  	}
  2075  	jsa.lupdate = now
  2076  
  2077  	lenUsage := len(jsa.usage)
  2078  	if lenUsage == 0 {
  2079  		return
  2080  	}
  2081  	// every base record contains mem/store/len(tier) as well as the tier name
  2082  	l := usageRecordLen * lenUsage
  2083  	for tier := range jsa.usage {
  2084  		l += len(tier)
  2085  	}
  2086  	// first record contains api/usage errors as well as count for extra base records
  2087  	l += apiStatsAndNumTiers
  2088  
  2089  	var raw [stackUsageUpdate]byte
  2090  	var b []byte
  2091  	if l > stackUsageUpdate {
  2092  		b = make([]byte, l)
  2093  	} else {
  2094  		b = raw[:l]
  2095  	}
  2096  
  2097  	var i int
  2098  	var le = binary.LittleEndian
  2099  	for tier, usage := range jsa.usage {
  2100  		le.PutUint64(b[i+0:], uint64(usage.local.mem))
  2101  		le.PutUint64(b[i+8:], uint64(usage.local.store))
  2102  		if i == 0 {
  2103  			le.PutUint64(b[16:], jsa.usageApi)
  2104  			le.PutUint64(b[24:], jsa.usageErr)
  2105  			le.PutUint32(b[32:], uint32(len(jsa.usage)-1))
  2106  			le.PutUint64(b[36:], uint64(len(tier)))
  2107  			copy(b[usageMultiTiersLen:], tier)
  2108  			i = usageMultiTiersLen + len(tier)
  2109  		} else {
  2110  			le.PutUint64(b[i+16:], uint64(len(tier)))
  2111  			copy(b[i+usageRecordLen:], tier)
  2112  			i += usageRecordLen + len(tier)
  2113  		}
  2114  	}
  2115  	jsa.sendq.push(newPubMsg(nil, jsa.updatesPub, _EMPTY_, nil, nil, b, noCompression, false, false))
  2116  }
  2117  
  2118  func (js *jetStream) wouldExceedLimits(storeType StorageType, sz int) bool {
  2119  	var (
  2120  		total *int64
  2121  		max   int64
  2122  	)
  2123  	if storeType == MemoryStorage {
  2124  		total, max = &js.memUsed, js.config.MaxMemory
  2125  	} else {
  2126  		total, max = &js.storeUsed, js.config.MaxStore
  2127  	}
  2128  	return atomic.LoadInt64(total) > (max + int64(sz))
  2129  }
  2130  
  2131  func (js *jetStream) limitsExceeded(storeType StorageType) bool {
  2132  	return js.wouldExceedLimits(storeType, 0)
  2133  }
  2134  
  2135  func tierName(cfg *StreamConfig) string {
  2136  	// TODO (mh) this is where we could select based off a placement tag as well "qos:tier"
  2137  	replicas := cfg.Replicas
  2138  	if replicas == 0 {
  2139  		replicas = 1
  2140  	}
  2141  	return fmt.Sprintf("R%d", replicas)
  2142  }
  2143  
  2144  func isSameTier(cfgA, cfgB *StreamConfig) bool {
  2145  	// TODO (mh) this is where we could select based off a placement tag as well "qos:tier"
  2146  	return cfgA.Replicas == cfgB.Replicas
  2147  }
  2148  
  2149  func (jsa *jsAccount) jetStreamAndClustered() (*jetStream, bool) {
  2150  	jsa.mu.RLock()
  2151  	js := jsa.js
  2152  	jsa.mu.RUnlock()
  2153  	return js, js.isClustered()
  2154  }
  2155  
  2156  // jsa.usageMu read lock should be held.
  2157  func (jsa *jsAccount) selectLimits(cfg *StreamConfig) (JetStreamAccountLimits, string, bool) {
  2158  	if selectedLimits, ok := jsa.limits[_EMPTY_]; ok {
  2159  		return selectedLimits, _EMPTY_, true
  2160  	}
  2161  	tier := tierName(cfg)
  2162  	if selectedLimits, ok := jsa.limits[tier]; ok {
  2163  		return selectedLimits, tier, true
  2164  	}
  2165  	return JetStreamAccountLimits{}, _EMPTY_, false
  2166  }
  2167  
  2168  // Lock should be held.
  2169  func (jsa *jsAccount) countStreams(tier string, cfg *StreamConfig) int {
  2170  	streams := len(jsa.streams)
  2171  	if tier != _EMPTY_ {
  2172  		streams = 0
  2173  		for _, sa := range jsa.streams {
  2174  			if isSameTier(&sa.cfg, cfg) {
  2175  				streams++
  2176  			}
  2177  		}
  2178  	}
  2179  	return streams
  2180  }
  2181  
  2182  // jsa.usageMu read lock (at least) should be held.
  2183  func (jsa *jsAccount) storageTotals() (uint64, uint64) {
  2184  	mem := uint64(0)
  2185  	store := uint64(0)
  2186  	for _, sa := range jsa.usage {
  2187  		mem += uint64(sa.total.mem)
  2188  		store += uint64(sa.total.store)
  2189  	}
  2190  	return mem, store
  2191  }
  2192  
  2193  func (jsa *jsAccount) limitsExceeded(storeType StorageType, tierName string, replicas int) (bool, *ApiError) {
  2194  	return jsa.wouldExceedLimits(storeType, tierName, replicas, _EMPTY_, nil, nil)
  2195  }
  2196  
  2197  func (jsa *jsAccount) wouldExceedLimits(storeType StorageType, tierName string, replicas int, subj string, hdr, msg []byte) (bool, *ApiError) {
  2198  	jsa.usageMu.RLock()
  2199  	defer jsa.usageMu.RUnlock()
  2200  
  2201  	selectedLimits, ok := jsa.limits[tierName]
  2202  	if !ok {
  2203  		return true, NewJSNoLimitsError()
  2204  	}
  2205  	inUse := jsa.usage[tierName]
  2206  	if inUse == nil {
  2207  		// Imply totals of 0
  2208  		return false, nil
  2209  	}
  2210  	r := int64(replicas)
  2211  	// Make sure replicas is correct.
  2212  	if r < 1 {
  2213  		r = 1
  2214  	}
  2215  	// This is for limits. If we have no tier, consider all to be flat, vs tiers like R3 where we want to scale limit by replication.
  2216  	lr := r
  2217  	if tierName == _EMPTY_ {
  2218  		lr = 1
  2219  	}
  2220  
  2221  	// Since tiers are flat we need to scale limit up by replicas when checking.
  2222  	if storeType == MemoryStorage {
  2223  		totalMem := inUse.total.mem + (int64(memStoreMsgSize(subj, hdr, msg)) * r)
  2224  		if selectedLimits.MemoryMaxStreamBytes > 0 && totalMem > selectedLimits.MemoryMaxStreamBytes*lr {
  2225  			return true, nil
  2226  		}
  2227  		if selectedLimits.MaxMemory >= 0 && totalMem > selectedLimits.MaxMemory*lr {
  2228  			return true, nil
  2229  		}
  2230  	} else {
  2231  		totalStore := inUse.total.store + (int64(fileStoreMsgSize(subj, hdr, msg)) * r)
  2232  		if selectedLimits.StoreMaxStreamBytes > 0 && totalStore > selectedLimits.StoreMaxStreamBytes*lr {
  2233  			return true, nil
  2234  		}
  2235  		if selectedLimits.MaxStore >= 0 && totalStore > selectedLimits.MaxStore*lr {
  2236  			return true, nil
  2237  		}
  2238  	}
  2239  
  2240  	return false, nil
  2241  }
  2242  
  2243  // Check account limits.
  2244  // Read Lock should be held
  2245  func (js *jetStream) checkAccountLimits(selected *JetStreamAccountLimits, config *StreamConfig, currentRes int64) error {
  2246  	return js.checkLimits(selected, config, false, currentRes, 0)
  2247  }
  2248  
  2249  // Check account and server limits.
  2250  // Read Lock should be held
  2251  func (js *jetStream) checkAllLimits(selected *JetStreamAccountLimits, config *StreamConfig, currentRes, maxBytesOffset int64) error {
  2252  	return js.checkLimits(selected, config, true, currentRes, maxBytesOffset)
  2253  }
  2254  
  2255  // Check if a new proposed msg set while exceed our account limits.
  2256  // Lock should be held.
  2257  func (js *jetStream) checkLimits(selected *JetStreamAccountLimits, config *StreamConfig, checkServer bool, currentRes, maxBytesOffset int64) error {
  2258  	// Check MaxConsumers
  2259  	if config.MaxConsumers > 0 && selected.MaxConsumers > 0 && config.MaxConsumers > selected.MaxConsumers {
  2260  		return NewJSMaximumConsumersLimitError()
  2261  	}
  2262  	// stream limit is checked separately on stream create only!
  2263  	// Check storage, memory or disk.
  2264  	return js.checkBytesLimits(selected, config.MaxBytes, config.Storage, checkServer, currentRes, maxBytesOffset)
  2265  }
  2266  
  2267  // Check if additional bytes will exceed our account limits and optionally the server itself.
  2268  // Read Lock should be held.
  2269  func (js *jetStream) checkBytesLimits(selectedLimits *JetStreamAccountLimits, addBytes int64, storage StorageType, checkServer bool, currentRes, maxBytesOffset int64) error {
  2270  	if addBytes < 0 {
  2271  		addBytes = 1
  2272  	}
  2273  	totalBytes := addBytes + maxBytesOffset
  2274  
  2275  	switch storage {
  2276  	case MemoryStorage:
  2277  		// Account limits defined.
  2278  		if selectedLimits.MaxMemory >= 0 && currentRes+totalBytes > selectedLimits.MaxMemory {
  2279  			return NewJSMemoryResourcesExceededError()
  2280  		}
  2281  		// Check if this server can handle request.
  2282  		if checkServer && js.memReserved+addBytes > js.config.MaxMemory {
  2283  			return NewJSMemoryResourcesExceededError()
  2284  		}
  2285  	case FileStorage:
  2286  		// Account limits defined.
  2287  		if selectedLimits.MaxStore >= 0 && currentRes+totalBytes > selectedLimits.MaxStore {
  2288  			return NewJSStorageResourcesExceededError()
  2289  		}
  2290  		// Check if this server can handle request.
  2291  		if checkServer && js.storeReserved+addBytes > js.config.MaxStore {
  2292  			return NewJSStorageResourcesExceededError()
  2293  		}
  2294  	}
  2295  
  2296  	return nil
  2297  }
  2298  
  2299  func (jsa *jsAccount) acc() *Account {
  2300  	return jsa.account
  2301  }
  2302  
  2303  // Delete the JetStream resources.
  2304  func (jsa *jsAccount) delete() {
  2305  	var streams []*stream
  2306  	var ts []string
  2307  
  2308  	jsa.mu.Lock()
  2309  	// The update timer and subs need to be protected by usageMu lock
  2310  	jsa.usageMu.Lock()
  2311  	if jsa.utimer != nil {
  2312  		jsa.utimer.Stop()
  2313  		jsa.utimer = nil
  2314  	}
  2315  	if jsa.updatesSub != nil && jsa.js.srv != nil {
  2316  		s := jsa.js.srv
  2317  		s.sysUnsubscribe(jsa.updatesSub)
  2318  		jsa.updatesSub = nil
  2319  	}
  2320  	jsa.usageMu.Unlock()
  2321  
  2322  	for _, ms := range jsa.streams {
  2323  		streams = append(streams, ms)
  2324  	}
  2325  	acc := jsa.account
  2326  	for _, t := range jsa.templates {
  2327  		ts = append(ts, t.Name)
  2328  	}
  2329  	jsa.templates = nil
  2330  	jsa.mu.Unlock()
  2331  
  2332  	for _, mset := range streams {
  2333  		mset.stop(false, false)
  2334  	}
  2335  
  2336  	for _, t := range ts {
  2337  		acc.deleteStreamTemplate(t)
  2338  	}
  2339  }
  2340  
  2341  // Lookup the jetstream account for a given account.
  2342  func (js *jetStream) lookupAccount(a *Account) *jsAccount {
  2343  	if a == nil {
  2344  		return nil
  2345  	}
  2346  	js.mu.RLock()
  2347  	jsa := js.accounts[a.Name]
  2348  	js.mu.RUnlock()
  2349  	return jsa
  2350  }
  2351  
  2352  // Report on JetStream stats and usage for this server.
  2353  func (js *jetStream) usageStats() *JetStreamStats {
  2354  	var stats JetStreamStats
  2355  	js.mu.RLock()
  2356  	stats.Accounts = len(js.accounts)
  2357  	stats.ReservedMemory = uint64(js.memReserved)
  2358  	stats.ReservedStore = uint64(js.storeReserved)
  2359  	s := js.srv
  2360  	js.mu.RUnlock()
  2361  	stats.API.Total = uint64(atomic.LoadInt64(&js.apiTotal))
  2362  	stats.API.Errors = uint64(atomic.LoadInt64(&js.apiErrors))
  2363  	stats.API.Inflight = uint64(atomic.LoadInt64(&js.apiInflight))
  2364  	// Make sure we do not report negative.
  2365  	used := atomic.LoadInt64(&js.memUsed)
  2366  	if used < 0 {
  2367  		used = 0
  2368  	}
  2369  	stats.Memory = uint64(used)
  2370  	used = atomic.LoadInt64(&js.storeUsed)
  2371  	if used < 0 {
  2372  		used = 0
  2373  	}
  2374  	stats.Store = uint64(used)
  2375  	stats.HAAssets = s.numRaftNodes()
  2376  	return &stats
  2377  }
  2378  
  2379  // Check to see if we have enough system resources for this account.
  2380  // Lock should be held.
  2381  func (js *jetStream) sufficientResources(limits map[string]JetStreamAccountLimits) error {
  2382  	// If we are clustered we do not really know how many resources will be ultimately available.
  2383  	// This needs to be handled out of band.
  2384  	// If we are a single server, we can make decisions here.
  2385  	if limits == nil || !js.standAlone {
  2386  		return nil
  2387  	}
  2388  
  2389  	totalMaxBytes := func(limits map[string]JetStreamAccountLimits) (int64, int64) {
  2390  		totalMaxMemory := int64(0)
  2391  		totalMaxStore := int64(0)
  2392  		for _, l := range limits {
  2393  			if l.MaxMemory > 0 {
  2394  				totalMaxMemory += l.MaxMemory
  2395  			}
  2396  			if l.MaxStore > 0 {
  2397  				totalMaxStore += l.MaxStore
  2398  			}
  2399  		}
  2400  		return totalMaxMemory, totalMaxStore
  2401  	}
  2402  
  2403  	totalMaxMemory, totalMaxStore := totalMaxBytes(limits)
  2404  
  2405  	// Reserved is now specific to the MaxBytes for streams.
  2406  	if js.memReserved+totalMaxMemory > js.config.MaxMemory {
  2407  		return NewJSMemoryResourcesExceededError()
  2408  	}
  2409  	if js.storeReserved+totalMaxStore > js.config.MaxStore {
  2410  		return NewJSStorageResourcesExceededError()
  2411  	}
  2412  
  2413  	// Since we know if we are here we are single server mode, check the account reservations.
  2414  	var storeReserved, memReserved int64
  2415  	for _, jsa := range js.accounts {
  2416  		if jsa.account.IsExpired() {
  2417  			continue
  2418  		}
  2419  		jsa.usageMu.RLock()
  2420  		maxMemory, maxStore := totalMaxBytes(jsa.limits)
  2421  		jsa.usageMu.RUnlock()
  2422  		memReserved += maxMemory
  2423  		storeReserved += maxStore
  2424  	}
  2425  
  2426  	if memReserved+totalMaxMemory > js.config.MaxMemory {
  2427  		return NewJSMemoryResourcesExceededError()
  2428  	}
  2429  	if storeReserved+totalMaxStore > js.config.MaxStore {
  2430  		return NewJSStorageResourcesExceededError()
  2431  	}
  2432  
  2433  	return nil
  2434  }
  2435  
  2436  // This will reserve the stream resources requested.
  2437  // This will spin off off of MaxBytes.
  2438  func (js *jetStream) reserveStreamResources(cfg *StreamConfig) {
  2439  	if cfg == nil || cfg.MaxBytes <= 0 {
  2440  		return
  2441  	}
  2442  
  2443  	js.mu.Lock()
  2444  	switch cfg.Storage {
  2445  	case MemoryStorage:
  2446  		js.memReserved += cfg.MaxBytes
  2447  	case FileStorage:
  2448  		js.storeReserved += cfg.MaxBytes
  2449  	}
  2450  	s, clustered := js.srv, !js.standAlone
  2451  	js.mu.Unlock()
  2452  	// If clustered send an update to the system immediately.
  2453  	if clustered {
  2454  		s.sendStatszUpdate()
  2455  	}
  2456  }
  2457  
  2458  // Release reserved resources held by a stream.
  2459  func (js *jetStream) releaseStreamResources(cfg *StreamConfig) {
  2460  	if cfg == nil || cfg.MaxBytes <= 0 {
  2461  		return
  2462  	}
  2463  
  2464  	js.mu.Lock()
  2465  	switch cfg.Storage {
  2466  	case MemoryStorage:
  2467  		js.memReserved -= cfg.MaxBytes
  2468  	case FileStorage:
  2469  		js.storeReserved -= cfg.MaxBytes
  2470  	}
  2471  	s, clustered := js.srv, !js.standAlone
  2472  	js.mu.Unlock()
  2473  	// If clustered send an update to the system immediately.
  2474  	if clustered {
  2475  		s.sendStatszUpdate()
  2476  	}
  2477  }
  2478  
  2479  const (
  2480  	// JetStreamStoreDir is the prefix we use.
  2481  	JetStreamStoreDir = "jetstream"
  2482  	// JetStreamMaxStoreDefault is the default disk storage limit. 1TB
  2483  	JetStreamMaxStoreDefault = 1024 * 1024 * 1024 * 1024
  2484  	// JetStreamMaxMemDefault is only used when we can't determine system memory. 256MB
  2485  	JetStreamMaxMemDefault = 1024 * 1024 * 256
  2486  	// snapshot staging for restores.
  2487  	snapStagingDir = ".snap-staging"
  2488  )
  2489  
  2490  // Dynamically create a config with a tmp based directory (repeatable) and 75% of system memory.
  2491  func (s *Server) dynJetStreamConfig(storeDir string, maxStore, maxMem int64) *JetStreamConfig {
  2492  	jsc := &JetStreamConfig{}
  2493  	if storeDir != _EMPTY_ {
  2494  		jsc.StoreDir = filepath.Join(storeDir, JetStreamStoreDir)
  2495  	} else {
  2496  		// Create one in tmp directory, but make it consistent for restarts.
  2497  		jsc.StoreDir = filepath.Join(os.TempDir(), "nats", JetStreamStoreDir)
  2498  	}
  2499  
  2500  	opts := s.getOpts()
  2501  
  2502  	// Sync options.
  2503  	jsc.SyncInterval = opts.SyncInterval
  2504  	jsc.SyncAlways = opts.SyncAlways
  2505  
  2506  	if opts.maxStoreSet && maxStore >= 0 {
  2507  		jsc.MaxStore = maxStore
  2508  	} else {
  2509  		jsc.MaxStore = diskAvailable(jsc.StoreDir)
  2510  	}
  2511  
  2512  	if opts.maxMemSet && maxMem >= 0 {
  2513  		jsc.MaxMemory = maxMem
  2514  	} else {
  2515  		// Estimate to 75% of total memory if we can determine system memory.
  2516  		if sysMem := sysmem.Memory(); sysMem > 0 {
  2517  			// Check if we have been limited with GOMEMLIMIT and if lower use that value.
  2518  			if gml := debug.SetMemoryLimit(-1); gml != math.MaxInt64 && gml < sysMem {
  2519  				s.Debugf("JetStream detected GOMEMLIMIT of %v", friendlyBytes(gml))
  2520  				sysMem = gml
  2521  			}
  2522  			jsc.MaxMemory = sysMem / 4 * 3
  2523  		} else {
  2524  			jsc.MaxMemory = JetStreamMaxMemDefault
  2525  		}
  2526  	}
  2527  
  2528  	return jsc
  2529  }
  2530  
  2531  // Helper function.
  2532  func (a *Account) checkForJetStream() (*Server, *jsAccount, error) {
  2533  	a.mu.RLock()
  2534  	s := a.srv
  2535  	jsa := a.js
  2536  	a.mu.RUnlock()
  2537  
  2538  	if s == nil || jsa == nil {
  2539  		return nil, nil, NewJSNotEnabledForAccountError()
  2540  	}
  2541  
  2542  	return s, jsa, nil
  2543  }
  2544  
  2545  // StreamTemplateConfig allows a configuration to auto-create streams based on this template when a message
  2546  // is received that matches. Each new stream will use the config as the template config to create them.
  2547  type StreamTemplateConfig struct {
  2548  	Name       string        `json:"name"`
  2549  	Config     *StreamConfig `json:"config"`
  2550  	MaxStreams uint32        `json:"max_streams"`
  2551  }
  2552  
  2553  // StreamTemplateInfo
  2554  type StreamTemplateInfo struct {
  2555  	Config  *StreamTemplateConfig `json:"config"`
  2556  	Streams []string              `json:"streams"`
  2557  }
  2558  
  2559  // streamTemplate
  2560  type streamTemplate struct {
  2561  	mu  sync.Mutex
  2562  	tc  *client
  2563  	jsa *jsAccount
  2564  	*StreamTemplateConfig
  2565  	streams []string
  2566  }
  2567  
  2568  func (t *StreamTemplateConfig) deepCopy() *StreamTemplateConfig {
  2569  	copy := *t
  2570  	cfg := *t.Config
  2571  	copy.Config = &cfg
  2572  	return &copy
  2573  }
  2574  
  2575  // addStreamTemplate will add a stream template to this account that allows auto-creation of streams.
  2576  func (a *Account) addStreamTemplate(tc *StreamTemplateConfig) (*streamTemplate, error) {
  2577  	s, jsa, err := a.checkForJetStream()
  2578  	if err != nil {
  2579  		return nil, err
  2580  	}
  2581  	if tc.Config.Name != "" {
  2582  		return nil, fmt.Errorf("template config name should be empty")
  2583  	}
  2584  	if len(tc.Name) > JSMaxNameLen {
  2585  		return nil, fmt.Errorf("template name is too long, maximum allowed is %d", JSMaxNameLen)
  2586  	}
  2587  
  2588  	// FIXME(dlc) - Hacky
  2589  	tcopy := tc.deepCopy()
  2590  	tcopy.Config.Name = "_"
  2591  	cfg, apiErr := s.checkStreamCfg(tcopy.Config, a)
  2592  	if apiErr != nil {
  2593  		return nil, apiErr
  2594  	}
  2595  	tcopy.Config = &cfg
  2596  	t := &streamTemplate{
  2597  		StreamTemplateConfig: tcopy,
  2598  		tc:                   s.createInternalJetStreamClient(),
  2599  		jsa:                  jsa,
  2600  	}
  2601  	t.tc.registerWithAccount(a)
  2602  
  2603  	jsa.mu.Lock()
  2604  	if jsa.templates == nil {
  2605  		jsa.templates = make(map[string]*streamTemplate)
  2606  		// Create the appropriate store
  2607  		if cfg.Storage == FileStorage {
  2608  			jsa.store = newTemplateFileStore(jsa.storeDir)
  2609  		} else {
  2610  			jsa.store = newTemplateMemStore()
  2611  		}
  2612  	} else if _, ok := jsa.templates[tcopy.Name]; ok {
  2613  		jsa.mu.Unlock()
  2614  		return nil, fmt.Errorf("template with name %q already exists", tcopy.Name)
  2615  	}
  2616  	jsa.templates[tcopy.Name] = t
  2617  	jsa.mu.Unlock()
  2618  
  2619  	// FIXME(dlc) - we can not overlap subjects between templates. Need to have test.
  2620  
  2621  	// Setup the internal subscriptions to trap the messages.
  2622  	if err := t.createTemplateSubscriptions(); err != nil {
  2623  		return nil, err
  2624  	}
  2625  	if err := jsa.store.Store(t); err != nil {
  2626  		t.delete()
  2627  		return nil, err
  2628  	}
  2629  	return t, nil
  2630  }
  2631  
  2632  func (t *streamTemplate) createTemplateSubscriptions() error {
  2633  	if t == nil {
  2634  		return fmt.Errorf("no template")
  2635  	}
  2636  	if t.tc == nil {
  2637  		return fmt.Errorf("template not enabled")
  2638  	}
  2639  	c := t.tc
  2640  	if !c.srv.EventsEnabled() {
  2641  		return ErrNoSysAccount
  2642  	}
  2643  	sid := 1
  2644  	for _, subject := range t.Config.Subjects {
  2645  		// Now create the subscription
  2646  		if _, err := c.processSub([]byte(subject), nil, []byte(strconv.Itoa(sid)), t.processInboundTemplateMsg, false); err != nil {
  2647  			c.acc.deleteStreamTemplate(t.Name)
  2648  			return err
  2649  		}
  2650  		sid++
  2651  	}
  2652  	return nil
  2653  }
  2654  
  2655  func (t *streamTemplate) processInboundTemplateMsg(_ *subscription, pc *client, acc *Account, subject, reply string, msg []byte) {
  2656  	if t == nil || t.jsa == nil {
  2657  		return
  2658  	}
  2659  	jsa := t.jsa
  2660  	cn := canonicalName(subject)
  2661  
  2662  	jsa.mu.Lock()
  2663  	// If we already are registered then we can just return here.
  2664  	if _, ok := jsa.streams[cn]; ok {
  2665  		jsa.mu.Unlock()
  2666  		return
  2667  	}
  2668  	jsa.mu.Unlock()
  2669  
  2670  	// Check if we are at the maximum and grab some variables.
  2671  	t.mu.Lock()
  2672  	c := t.tc
  2673  	cfg := *t.Config
  2674  	cfg.Template = t.Name
  2675  	atLimit := len(t.streams) >= int(t.MaxStreams)
  2676  	if !atLimit {
  2677  		t.streams = append(t.streams, cn)
  2678  	}
  2679  	t.mu.Unlock()
  2680  
  2681  	if atLimit {
  2682  		c.RateLimitWarnf("JetStream could not create stream for account %q on subject %q, at limit", acc.Name, subject)
  2683  		return
  2684  	}
  2685  
  2686  	// We need to create the stream here.
  2687  	// Change the config from the template and only use literal subject.
  2688  	cfg.Name = cn
  2689  	cfg.Subjects = []string{subject}
  2690  	mset, err := acc.addStream(&cfg)
  2691  	if err != nil {
  2692  		acc.validateStreams(t)
  2693  		c.RateLimitWarnf("JetStream could not create stream for account %q on subject %q: %v", acc.Name, subject, err)
  2694  		return
  2695  	}
  2696  
  2697  	// Process this message directly by invoking mset.
  2698  	mset.processInboundJetStreamMsg(nil, pc, acc, subject, reply, msg)
  2699  }
  2700  
  2701  // lookupStreamTemplate looks up the names stream template.
  2702  func (a *Account) lookupStreamTemplate(name string) (*streamTemplate, error) {
  2703  	_, jsa, err := a.checkForJetStream()
  2704  	if err != nil {
  2705  		return nil, err
  2706  	}
  2707  	jsa.mu.Lock()
  2708  	defer jsa.mu.Unlock()
  2709  	if jsa.templates == nil {
  2710  		return nil, fmt.Errorf("template not found")
  2711  	}
  2712  	t, ok := jsa.templates[name]
  2713  	if !ok {
  2714  		return nil, fmt.Errorf("template not found")
  2715  	}
  2716  	return t, nil
  2717  }
  2718  
  2719  // This function will check all named streams and make sure they are valid.
  2720  func (a *Account) validateStreams(t *streamTemplate) {
  2721  	t.mu.Lock()
  2722  	var vstreams []string
  2723  	for _, sname := range t.streams {
  2724  		if _, err := a.lookupStream(sname); err == nil {
  2725  			vstreams = append(vstreams, sname)
  2726  		}
  2727  	}
  2728  	t.streams = vstreams
  2729  	t.mu.Unlock()
  2730  }
  2731  
  2732  func (t *streamTemplate) delete() error {
  2733  	if t == nil {
  2734  		return fmt.Errorf("nil stream template")
  2735  	}
  2736  
  2737  	t.mu.Lock()
  2738  	jsa := t.jsa
  2739  	c := t.tc
  2740  	t.tc = nil
  2741  	defer func() {
  2742  		if c != nil {
  2743  			c.closeConnection(ClientClosed)
  2744  		}
  2745  	}()
  2746  	t.mu.Unlock()
  2747  
  2748  	if jsa == nil {
  2749  		return NewJSNotEnabledForAccountError()
  2750  	}
  2751  
  2752  	jsa.mu.Lock()
  2753  	if jsa.templates == nil {
  2754  		jsa.mu.Unlock()
  2755  		return fmt.Errorf("template not found")
  2756  	}
  2757  	if _, ok := jsa.templates[t.Name]; !ok {
  2758  		jsa.mu.Unlock()
  2759  		return fmt.Errorf("template not found")
  2760  	}
  2761  	delete(jsa.templates, t.Name)
  2762  	acc := jsa.account
  2763  	jsa.mu.Unlock()
  2764  
  2765  	// Remove streams associated with this template.
  2766  	var streams []*stream
  2767  	t.mu.Lock()
  2768  	for _, name := range t.streams {
  2769  		if mset, err := acc.lookupStream(name); err == nil {
  2770  			streams = append(streams, mset)
  2771  		}
  2772  	}
  2773  	t.mu.Unlock()
  2774  
  2775  	if jsa.store != nil {
  2776  		if err := jsa.store.Delete(t); err != nil {
  2777  			return fmt.Errorf("error deleting template from store: %v", err)
  2778  		}
  2779  	}
  2780  
  2781  	var lastErr error
  2782  	for _, mset := range streams {
  2783  		if err := mset.delete(); err != nil {
  2784  			lastErr = err
  2785  		}
  2786  	}
  2787  	return lastErr
  2788  }
  2789  
  2790  func (a *Account) deleteStreamTemplate(name string) error {
  2791  	t, err := a.lookupStreamTemplate(name)
  2792  	if err != nil {
  2793  		return NewJSStreamTemplateNotFoundError()
  2794  	}
  2795  	return t.delete()
  2796  }
  2797  
  2798  func (a *Account) templates() []*streamTemplate {
  2799  	var ts []*streamTemplate
  2800  	_, jsa, err := a.checkForJetStream()
  2801  	if err != nil {
  2802  		return nil
  2803  	}
  2804  
  2805  	jsa.mu.Lock()
  2806  	for _, t := range jsa.templates {
  2807  		// FIXME(dlc) - Copy?
  2808  		ts = append(ts, t)
  2809  	}
  2810  	jsa.mu.Unlock()
  2811  
  2812  	return ts
  2813  }
  2814  
  2815  // Will add a stream to a template, this is for recovery.
  2816  func (jsa *jsAccount) addStreamNameToTemplate(tname, mname string) error {
  2817  	if jsa.templates == nil {
  2818  		return fmt.Errorf("template not found")
  2819  	}
  2820  	t, ok := jsa.templates[tname]
  2821  	if !ok {
  2822  		return fmt.Errorf("template not found")
  2823  	}
  2824  	// We found template.
  2825  	t.mu.Lock()
  2826  	t.streams = append(t.streams, mname)
  2827  	t.mu.Unlock()
  2828  	return nil
  2829  }
  2830  
  2831  // This will check if a template owns this stream.
  2832  // jsAccount lock should be held
  2833  func (jsa *jsAccount) checkTemplateOwnership(tname, sname string) bool {
  2834  	if jsa.templates == nil {
  2835  		return false
  2836  	}
  2837  	t, ok := jsa.templates[tname]
  2838  	if !ok {
  2839  		return false
  2840  	}
  2841  	// We found template, make sure we are in streams.
  2842  	for _, streamName := range t.streams {
  2843  		if sname == streamName {
  2844  			return true
  2845  		}
  2846  	}
  2847  	return false
  2848  }
  2849  
  2850  type Number interface {
  2851  	int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64 | float32 | float64
  2852  }
  2853  
  2854  // friendlyBytes returns a string with the given bytes int64
  2855  // represented as a size, such as 1KB, 10MB, etc...
  2856  func friendlyBytes[T Number](bytes T) string {
  2857  	fbytes := float64(bytes)
  2858  	base := 1024
  2859  	pre := []string{"K", "M", "G", "T", "P", "E"}
  2860  	if fbytes < float64(base) {
  2861  		return fmt.Sprintf("%v B", fbytes)
  2862  	}
  2863  	exp := int(math.Log(fbytes) / math.Log(float64(base)))
  2864  	index := exp - 1
  2865  	return fmt.Sprintf("%.2f %sB", fbytes/math.Pow(float64(base), float64(exp)), pre[index])
  2866  }
  2867  
  2868  func isValidName(name string) bool {
  2869  	if name == _EMPTY_ {
  2870  		return false
  2871  	}
  2872  	return !strings.ContainsAny(name, " \t\r\n\f.*>")
  2873  }
  2874  
  2875  // CanonicalName will replace all token separators '.' with '_'.
  2876  // This can be used when naming streams or consumers with multi-token subjects.
  2877  func canonicalName(name string) string {
  2878  	return strings.ReplaceAll(name, ".", "_")
  2879  }
  2880  
  2881  // To throttle the out of resources errors.
  2882  func (s *Server) resourcesExceededError() {
  2883  	var didAlert bool
  2884  
  2885  	s.rerrMu.Lock()
  2886  	if now := time.Now(); now.Sub(s.rerrLast) > 10*time.Second {
  2887  		s.Errorf("JetStream resource limits exceeded for server")
  2888  		s.rerrLast = now
  2889  		didAlert = true
  2890  	}
  2891  	s.rerrMu.Unlock()
  2892  
  2893  	// If we are meta leader we should relinguish that here.
  2894  	if didAlert {
  2895  		if js := s.getJetStream(); js != nil {
  2896  			js.mu.RLock()
  2897  			if cc := js.cluster; cc != nil && cc.isLeader() {
  2898  				cc.meta.StepDown()
  2899  			}
  2900  			js.mu.RUnlock()
  2901  		}
  2902  	}
  2903  }
  2904  
  2905  // For validating options.
  2906  func validateJetStreamOptions(o *Options) error {
  2907  	// in non operator mode, the account names need to be configured
  2908  	if len(o.JsAccDefaultDomain) > 0 {
  2909  		if len(o.TrustedOperators) == 0 {
  2910  			for a, domain := range o.JsAccDefaultDomain {
  2911  				found := false
  2912  				if isReservedAccount(a) {
  2913  					found = true
  2914  				} else {
  2915  					for _, acc := range o.Accounts {
  2916  						if a == acc.GetName() {
  2917  							if len(acc.jsLimits) > 0 && domain != _EMPTY_ {
  2918  								return fmt.Errorf("default_js_domain contains account name %q with enabled JetStream", a)
  2919  							}
  2920  							found = true
  2921  							break
  2922  						}
  2923  					}
  2924  				}
  2925  				if !found {
  2926  					return fmt.Errorf("in non operator mode, `default_js_domain` references non existing account %q", a)
  2927  				}
  2928  			}
  2929  		} else {
  2930  			for a := range o.JsAccDefaultDomain {
  2931  				if !nkeys.IsValidPublicAccountKey(a) {
  2932  					return fmt.Errorf("default_js_domain contains account name %q, which is not a valid public account nkey", a)
  2933  				}
  2934  			}
  2935  		}
  2936  		for a, d := range o.JsAccDefaultDomain {
  2937  			sacc := DEFAULT_SYSTEM_ACCOUNT
  2938  			if o.SystemAccount != _EMPTY_ {
  2939  				sacc = o.SystemAccount
  2940  			}
  2941  			if a == sacc {
  2942  				return fmt.Errorf("system account %q can not be in default_js_domain", a)
  2943  			}
  2944  			if d == _EMPTY_ {
  2945  				continue
  2946  			}
  2947  			if sub := fmt.Sprintf(jsDomainAPI, d); !IsValidSubject(sub) {
  2948  				return fmt.Errorf("default_js_domain contains account %q with invalid domain name %q", a, d)
  2949  			}
  2950  		}
  2951  	}
  2952  	if o.JetStreamDomain != _EMPTY_ {
  2953  		if subj := fmt.Sprintf(jsDomainAPI, o.JetStreamDomain); !IsValidSubject(subj) {
  2954  			return fmt.Errorf("invalid domain name: derived %q is not a valid subject", subj)
  2955  		}
  2956  
  2957  		if !isValidName(o.JetStreamDomain) {
  2958  			return fmt.Errorf("invalid domain name: may not contain ., * or >")
  2959  		}
  2960  	}
  2961  	// If not clustered no checks needed past here.
  2962  	if !o.JetStream || o.Cluster.Port == 0 {
  2963  		return nil
  2964  	}
  2965  	if o.ServerName == _EMPTY_ {
  2966  		return fmt.Errorf("jetstream cluster requires `server_name` to be set")
  2967  	}
  2968  	if o.Cluster.Name == _EMPTY_ {
  2969  		return fmt.Errorf("jetstream cluster requires `cluster.name` to be set")
  2970  	}
  2971  
  2972  	h := strings.ToLower(o.JetStreamExtHint)
  2973  	switch h {
  2974  	case jsWillExtend, jsNoExtend, _EMPTY_:
  2975  		o.JetStreamExtHint = h
  2976  	default:
  2977  		return fmt.Errorf("expected 'no_extend' for string value, got '%s'", h)
  2978  	}
  2979  
  2980  	if o.JetStreamMaxCatchup < 0 {
  2981  		return fmt.Errorf("jetstream max catchup cannot be negative")
  2982  	}
  2983  	return nil
  2984  }
  2985  
  2986  // We had a bug that set a default de dupe window on mirror, despite that being not a valid config
  2987  func fixCfgMirrorWithDedupWindow(cfg *StreamConfig) {
  2988  	if cfg == nil || cfg.Mirror == nil {
  2989  		return
  2990  	}
  2991  	if cfg.Duplicates != 0 {
  2992  		cfg.Duplicates = 0
  2993  	}
  2994  }