get.pme.sh/pnats@v0.0.0-20240304004023-26bb5a137ed0/server/jetstream_cluster_2_test.go (about)

     1  // Copyright 2020-2023 The NATS Authors
     2  // Licensed under the Apache License, Version 2.0 (the "License");
     3  // you may not use this file except in compliance with the License.
     4  // You may obtain a copy of the License at
     5  //
     6  // http://www.apache.org/licenses/LICENSE-2.0
     7  //
     8  // Unless required by applicable law or agreed to in writing, software
     9  // distributed under the License is distributed on an "AS IS" BASIS,
    10  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  //go:build !skip_js_tests && !skip_js_cluster_tests_2
    15  // +build !skip_js_tests,!skip_js_cluster_tests_2
    16  
    17  package server
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	crand "crypto/rand"
    23  	"encoding/binary"
    24  	"encoding/hex"
    25  	"encoding/json"
    26  	"errors"
    27  	"fmt"
    28  	"math/rand"
    29  	"os"
    30  	"path/filepath"
    31  	"reflect"
    32  	"runtime"
    33  	"strconv"
    34  	"strings"
    35  	"sync"
    36  	"sync/atomic"
    37  	"testing"
    38  	"time"
    39  
    40  	"github.com/nats-io/nats.go"
    41  )
    42  
    43  func TestJetStreamClusterJSAPIImport(t *testing.T) {
    44  	c := createJetStreamClusterWithTemplate(t, jsClusterImportsTempl, "C1", 3)
    45  	defer c.shutdown()
    46  
    47  	// Client based API - This will connect to the non-js account which imports JS.
    48  	// Connect below does an AccountInfo call.
    49  	s := c.randomNonLeader()
    50  	nc, js := jsClientConnect(t, s)
    51  	defer nc.Close()
    52  
    53  	if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 2}); err != nil {
    54  		t.Fatalf("Unexpected error: %v", err)
    55  	}
    56  	// Note if this was ephemeral we would need to setup export/import for that subject.
    57  	sub, err := js.SubscribeSync("TEST", nats.Durable("dlc"))
    58  	if err != nil {
    59  		t.Fatalf("Unexpected error: %v", err)
    60  	}
    61  	// Make sure we can look up both.
    62  	if _, err := js.StreamInfo("TEST"); err != nil {
    63  		t.Fatalf("Unexpected error: %v", err)
    64  	}
    65  	if _, err := sub.ConsumerInfo(); err != nil {
    66  		t.Fatalf("Unexpected error: %v", err)
    67  	}
    68  	// Names list..
    69  	var names []string
    70  	for name := range js.StreamNames() {
    71  		names = append(names, name)
    72  	}
    73  	if len(names) != 1 {
    74  		t.Fatalf("Expected only 1 stream but got %d", len(names))
    75  	}
    76  
    77  	// Now send to stream.
    78  	if _, err := js.Publish("TEST", []byte("OK")); err != nil {
    79  		t.Fatalf("Unexpected publish error: %v", err)
    80  	}
    81  
    82  	sub, err = js.PullSubscribe("TEST", "tr")
    83  	if err != nil {
    84  		t.Fatalf("Unexpected error: %v", err)
    85  	}
    86  	msgs := fetchMsgs(t, sub, 1, 5*time.Second)
    87  
    88  	m := msgs[0]
    89  	if m.Subject != "TEST" {
    90  		t.Fatalf("Expected subject of %q, got %q", "TEST", m.Subject)
    91  	}
    92  	if m.Header != nil {
    93  		t.Fatalf("Expected no header on the message, got: %v", m.Header)
    94  	}
    95  	meta, err := m.Metadata()
    96  	if err != nil {
    97  		t.Fatalf("Unexpected error: %v", err)
    98  	}
    99  	if meta.Sequence.Consumer != 1 || meta.Sequence.Stream != 1 || meta.NumDelivered != 1 || meta.NumPending != 0 {
   100  		t.Fatalf("Bad meta: %+v", meta)
   101  	}
   102  
   103  	js.Publish("TEST", []byte("Second"))
   104  	js.Publish("TEST", []byte("Third"))
   105  
   106  	checkFor(t, time.Second, 15*time.Millisecond, func() error {
   107  		ci, err := js.ConsumerInfo("TEST", "tr")
   108  		if err != nil {
   109  			return fmt.Errorf("Error getting consumer info: %v", err)
   110  		}
   111  		if ci.NumPending != 2 {
   112  			return fmt.Errorf("NumPending still not 1: %v", ci.NumPending)
   113  		}
   114  		return nil
   115  	})
   116  
   117  	// Ack across accounts.
   118  	m, err = nc.Request("$JS.API.CONSUMER.MSG.NEXT.TEST.tr", []byte("+NXT"), 2*time.Second)
   119  	if err != nil {
   120  		t.Fatalf("Unexpected error: %v", err)
   121  	}
   122  	meta, err = m.Metadata()
   123  	if err != nil {
   124  		t.Fatalf("Unexpected error: %v", err)
   125  	}
   126  	if meta.Sequence.Consumer != 2 || meta.Sequence.Stream != 2 || meta.NumDelivered != 1 || meta.NumPending != 1 {
   127  		t.Fatalf("Bad meta: %+v", meta)
   128  	}
   129  
   130  	// AckNext
   131  	_, err = nc.Request(m.Reply, []byte("+NXT"), 2*time.Second)
   132  	if err != nil {
   133  		t.Fatalf("Unexpected error: %v", err)
   134  	}
   135  }
   136  
   137  func TestJetStreamClusterMultiRestartBug(t *testing.T) {
   138  	c := createJetStreamClusterExplicit(t, "R3", 3)
   139  	defer c.shutdown()
   140  
   141  	// Client based API
   142  	nc, js := jsClientConnect(t, c.randomServer())
   143  	defer nc.Close()
   144  
   145  	_, err := js.AddStream(&nats.StreamConfig{
   146  		Name:     "TEST",
   147  		Subjects: []string{"foo", "bar"},
   148  		Replicas: 3,
   149  	})
   150  	if err != nil {
   151  		t.Fatalf("Unexpected error: %v", err)
   152  	}
   153  
   154  	// Send in 10000 messages.
   155  	msg, toSend := make([]byte, 4*1024), 10000
   156  	crand.Read(msg)
   157  
   158  	for i := 0; i < toSend; i++ {
   159  		if _, err = js.Publish("foo", msg); err != nil {
   160  			t.Fatalf("Unexpected publish error: %v", err)
   161  		}
   162  	}
   163  
   164  	checkFor(t, 10*time.Second, 250*time.Millisecond, func() error {
   165  		si, err := js.StreamInfo("TEST")
   166  		if err != nil {
   167  			return fmt.Errorf("Unexpected error: %v", err)
   168  		}
   169  		if si.State.Msgs != uint64(toSend) {
   170  			return fmt.Errorf("Expected to have %d messages, got %d", toSend, si.State.Msgs)
   171  		}
   172  		return nil
   173  	})
   174  
   175  	// For this bug, we will stop and remove the complete state from one server.
   176  	s := c.randomServer()
   177  	opts := s.getOpts()
   178  	s.Shutdown()
   179  	removeDir(t, opts.StoreDir)
   180  
   181  	// Then restart it.
   182  	c.restartAll()
   183  	c.waitOnAllCurrent()
   184  	c.waitOnStreamLeader("$G", "TEST")
   185  
   186  	s = c.serverByName(s.Name())
   187  	c.waitOnStreamCurrent(s, "$G", "TEST")
   188  
   189  	// Now restart them all..
   190  	c.stopAll()
   191  	c.restartAll()
   192  	c.waitOnLeader()
   193  	c.waitOnStreamLeader("$G", "TEST")
   194  
   195  	// Create new client.
   196  	nc, js = jsClientConnect(t, c.randomServer())
   197  	defer nc.Close()
   198  
   199  	// Make sure the replicas are current.
   200  	js2, err := nc.JetStream(nats.MaxWait(250 * time.Millisecond))
   201  	if err != nil {
   202  		t.Fatalf("Unexpected error: %v", err)
   203  	}
   204  	checkFor(t, 20*time.Second, 250*time.Millisecond, func() error {
   205  		si, _ := js2.StreamInfo("TEST")
   206  		if si == nil || si.Cluster == nil {
   207  			return fmt.Errorf("No stream info or cluster")
   208  		}
   209  		for _, pi := range si.Cluster.Replicas {
   210  			if !pi.Current {
   211  				return fmt.Errorf("Peer not current: %+v", pi)
   212  			}
   213  		}
   214  		return nil
   215  	})
   216  }
   217  
   218  func TestJetStreamClusterServerLimits(t *testing.T) {
   219  	// 2MB memory, 8MB disk
   220  	c := createJetStreamClusterWithTemplate(t, jsClusterLimitsTempl, "R3L", 3)
   221  	defer c.shutdown()
   222  
   223  	// Client based API
   224  	nc, js := jsClientConnect(t, c.randomServer())
   225  	defer nc.Close()
   226  
   227  	msg, toSend := make([]byte, 4*1024), 5000
   228  	crand.Read(msg)
   229  
   230  	// Memory first.
   231  	max_mem := uint64(2*1024*1024) + uint64(len(msg))
   232  
   233  	_, err := js.AddStream(&nats.StreamConfig{
   234  		Name:     "TM",
   235  		Replicas: 3,
   236  		Storage:  nats.MemoryStorage,
   237  	})
   238  	if err != nil {
   239  		t.Fatalf("Unexpected error: %v", err)
   240  	}
   241  	// Grab stream leader.
   242  	sl := c.streamLeader("ONE", "TM")
   243  	require_True(t, sl != nil)
   244  
   245  	for i := 0; i < toSend; i++ {
   246  		if _, err = js.Publish("TM", msg); err != nil {
   247  			break
   248  		}
   249  	}
   250  	if err == nil || !strings.HasPrefix(err.Error(), "nats: insufficient resources") {
   251  		t.Fatalf("Expected a ErrJetStreamResourcesExceeded error, got %v", err)
   252  	}
   253  
   254  	// Since we have run all servers out of resources, no leader will be elected to respond to stream info.
   255  	// So check manually.
   256  	acc, err := sl.LookupAccount("ONE")
   257  	require_NoError(t, err)
   258  	mset, err := acc.lookupStream("TM")
   259  	require_NoError(t, err)
   260  	if state := mset.state(); state.Bytes > max_mem {
   261  		t.Fatalf("Expected bytes of %v to not be greater then %v",
   262  			friendlyBytes(int64(state.Bytes)),
   263  			friendlyBytes(int64(max_mem)),
   264  		)
   265  	}
   266  
   267  	c.waitOnLeader()
   268  
   269  	// Now disk.
   270  	max_disk := uint64(8*1024*1024) + uint64(len(msg))
   271  
   272  	_, err = js.AddStream(&nats.StreamConfig{
   273  		Name:     "TF",
   274  		Replicas: 3,
   275  	})
   276  	if err != nil {
   277  		t.Fatalf("Unexpected error: %v", err)
   278  	}
   279  	// Grab stream leader.
   280  	sl = c.streamLeader("ONE", "TF")
   281  	require_True(t, sl != nil)
   282  
   283  	for i := 0; i < toSend; i++ {
   284  		if _, err = js.Publish("TF", msg); err != nil {
   285  			break
   286  		}
   287  	}
   288  	if err == nil || !strings.HasPrefix(err.Error(), "nats: insufficient resources") {
   289  		t.Fatalf("Expected a ErrJetStreamResourcesExceeded error, got %v", err)
   290  	}
   291  
   292  	// Since we have run all servers out of resources, no leader will be elected to respond to stream info.
   293  	// So check manually.
   294  	acc, err = sl.LookupAccount("ONE")
   295  	require_NoError(t, err)
   296  	mset, err = acc.lookupStream("TF")
   297  	require_NoError(t, err)
   298  	if state := mset.state(); state.Bytes > max_disk {
   299  		t.Fatalf("Expected bytes of %v to not be greater then %v",
   300  			friendlyBytes(int64(state.Bytes)),
   301  			friendlyBytes(int64(max_disk)),
   302  		)
   303  	}
   304  }
   305  
   306  func TestJetStreamClusterAccountLoadFailure(t *testing.T) {
   307  	c := createJetStreamClusterWithTemplate(t, jsClusterLimitsTempl, "R3L", 3)
   308  	defer c.shutdown()
   309  
   310  	// Client based API
   311  	nc, js := jsClientConnect(t, c.leader())
   312  	defer nc.Close()
   313  
   314  	// Remove the "ONE" account from non-leader
   315  	s := c.randomNonLeader()
   316  	s.mu.Lock()
   317  	s.accounts.Delete("ONE")
   318  	s.mu.Unlock()
   319  
   320  	_, err := js.AddStream(&nats.StreamConfig{Name: "F", Replicas: 3})
   321  	if err == nil || !strings.Contains(err.Error(), "account not found") {
   322  		t.Fatalf("Expected an 'account not found' error but got %v", err)
   323  	}
   324  }
   325  
   326  func TestJetStreamClusterAckPendingWithExpired(t *testing.T) {
   327  	c := createJetStreamClusterExplicit(t, "R3", 3)
   328  	defer c.shutdown()
   329  
   330  	// Client based API
   331  	nc, js := jsClientConnect(t, c.randomServer())
   332  	defer nc.Close()
   333  
   334  	_, err := js.AddStream(&nats.StreamConfig{
   335  		Name:     "TEST",
   336  		Subjects: []string{"foo", "bar"},
   337  		Replicas: 3,
   338  		MaxAge:   500 * time.Millisecond,
   339  	})
   340  	if err != nil {
   341  		t.Fatalf("Unexpected error: %v", err)
   342  	}
   343  
   344  	// Send in 100 messages.
   345  	msg, toSend := make([]byte, 256), 100
   346  	crand.Read(msg)
   347  
   348  	for i := 0; i < toSend; i++ {
   349  		if _, err = js.Publish("foo", msg); err != nil {
   350  			t.Fatalf("Unexpected publish error: %v", err)
   351  		}
   352  	}
   353  
   354  	sub, err := js.SubscribeSync("foo")
   355  	if err != nil {
   356  		t.Fatalf("Unexpected error: %v", err)
   357  	}
   358  
   359  	checkSubsPending(t, sub, toSend)
   360  	ci, err := sub.ConsumerInfo()
   361  	if err != nil {
   362  		t.Fatalf("Unexpected error: %v", err)
   363  	}
   364  	if ci.NumAckPending != toSend {
   365  		t.Fatalf("Expected %d to be pending, got %d", toSend, ci.NumAckPending)
   366  	}
   367  
   368  	// Wait for messages to expire.
   369  	checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
   370  		si, err := js.StreamInfo("TEST")
   371  		if err != nil {
   372  			t.Fatalf("Unexpected error: %v", err)
   373  		}
   374  		if si.State.Msgs != 0 {
   375  			return fmt.Errorf("Expected 0 msgs, got state: %+v", si.State)
   376  		}
   377  		return nil
   378  	})
   379  
   380  	// Once expired these messages can not be redelivered so should not be considered ack pending at this point.
   381  	// Now ack..
   382  	ci, err = sub.ConsumerInfo()
   383  	if err != nil {
   384  		t.Fatalf("Unexpected error: %v", err)
   385  	}
   386  	if ci.NumAckPending != 0 {
   387  		t.Fatalf("Expected nothing to be ack pending, got %d", ci.NumAckPending)
   388  	}
   389  }
   390  
   391  func TestJetStreamClusterAckPendingWithMaxRedelivered(t *testing.T) {
   392  	c := createJetStreamClusterExplicit(t, "R3", 3)
   393  	defer c.shutdown()
   394  
   395  	// Client based API
   396  	nc, js := jsClientConnect(t, c.randomServer())
   397  	defer nc.Close()
   398  
   399  	_, err := js.AddStream(&nats.StreamConfig{
   400  		Name:     "TEST",
   401  		Subjects: []string{"foo", "bar"},
   402  		Replicas: 3,
   403  	})
   404  	if err != nil {
   405  		t.Fatalf("Unexpected error: %v", err)
   406  	}
   407  
   408  	// Send in 100 messages.
   409  	msg, toSend := []byte("HELLO"), 100
   410  
   411  	for i := 0; i < toSend; i++ {
   412  		if _, err = js.Publish("foo", msg); err != nil {
   413  			t.Fatalf("Unexpected publish error: %v", err)
   414  		}
   415  	}
   416  
   417  	sub, err := js.SubscribeSync("foo",
   418  		nats.MaxDeliver(2),
   419  		nats.Durable("dlc"),
   420  		nats.AckWait(10*time.Millisecond),
   421  		nats.MaxAckPending(50),
   422  	)
   423  	if err != nil {
   424  		t.Fatalf("Unexpected error: %v", err)
   425  	}
   426  
   427  	checkSubsPending(t, sub, toSend*2)
   428  
   429  	checkFor(t, 5*time.Second, 100*time.Millisecond, func() error {
   430  		ci, err := sub.ConsumerInfo()
   431  		if err != nil {
   432  			return err
   433  		}
   434  		if ci.NumAckPending != 0 {
   435  			return fmt.Errorf("Expected nothing to be ack pending, got %d", ci.NumAckPending)
   436  		}
   437  		return nil
   438  	})
   439  }
   440  
   441  func TestJetStreamClusterMixedMode(t *testing.T) {
   442  	for _, test := range []struct {
   443  		name string
   444  		tmpl string
   445  	}{
   446  		{"multi-account", jsClusterLimitsTempl},
   447  		{"global-account", jsMixedModeGlobalAccountTempl},
   448  	} {
   449  		t.Run(test.name, func(t *testing.T) {
   450  
   451  			c := createMixedModeCluster(t, test.tmpl, "MM5", _EMPTY_, 3, 2, true)
   452  			defer c.shutdown()
   453  
   454  			// Client based API - Non-JS server.
   455  			nc, js := jsClientConnect(t, c.serverByName("S-5"))
   456  			defer nc.Close()
   457  
   458  			_, err := js.AddStream(&nats.StreamConfig{
   459  				Name:     "TEST",
   460  				Subjects: []string{"foo", "bar"},
   461  				Replicas: 3,
   462  			})
   463  			if err != nil {
   464  				t.Fatalf("Unexpected error: %v", err)
   465  			}
   466  
   467  			ml := c.leader()
   468  			if ml == nil {
   469  				t.Fatalf("No metaleader")
   470  			}
   471  
   472  			// Make sure we are tracking only the JS peers.
   473  			checkFor(t, 5*time.Second, 100*time.Millisecond, func() error {
   474  				peers := ml.JetStreamClusterPeers()
   475  				if len(peers) == 3 {
   476  					return nil
   477  				}
   478  				return fmt.Errorf("Not correct number of peers, expected %d, got %d", 3, len(peers))
   479  			})
   480  
   481  			// Grab the underlying raft structure and make sure the system adjusts its cluster set size.
   482  			meta := ml.getJetStream().getMetaGroup().(*raft)
   483  			checkFor(t, 5*time.Second, 100*time.Millisecond, func() error {
   484  				ps := meta.currentPeerState()
   485  				if len(ps.knownPeers) != 3 {
   486  					return fmt.Errorf("Expected known peers to be 3, but got %+v", ps.knownPeers)
   487  				}
   488  				if ps.clusterSize < 3 {
   489  					return fmt.Errorf("Expected cluster size to be 3, but got %+v", ps)
   490  				}
   491  				return nil
   492  			})
   493  		})
   494  	}
   495  }
   496  
   497  func TestJetStreamClusterLeafnodeSpokes(t *testing.T) {
   498  	c := createJetStreamCluster(t, jsClusterTempl, "HUB", _EMPTY_, 3, 22020, false)
   499  	defer c.shutdown()
   500  
   501  	lnc1 := c.createLeafNodesWithStartPortAndDomain("R1", 3, 22110, _EMPTY_)
   502  	defer lnc1.shutdown()
   503  
   504  	lnc2 := c.createLeafNodesWithStartPortAndDomain("R2", 3, 22120, _EMPTY_)
   505  	defer lnc2.shutdown()
   506  
   507  	lnc3 := c.createLeafNodesWithStartPortAndDomain("R3", 3, 22130, _EMPTY_)
   508  	defer lnc3.shutdown()
   509  
   510  	// Wait on all peers.
   511  	c.waitOnPeerCount(12)
   512  
   513  	// Make sure shrinking works.
   514  	lnc3.shutdown()
   515  	c.waitOnPeerCount(9)
   516  
   517  	lnc3 = c.createLeafNodesWithStartPortAndDomain("LNC3", 3, 22130, _EMPTY_)
   518  	defer lnc3.shutdown()
   519  
   520  	c.waitOnPeerCount(12)
   521  }
   522  
   523  func TestJetStreamClusterLeafNodeDenyNoDupe(t *testing.T) {
   524  	tmpl := strings.Replace(jsClusterAccountsTempl, "store_dir:", "domain: CORE, store_dir:", 1)
   525  	c := createJetStreamCluster(t, tmpl, "CORE", _EMPTY_, 3, 18033, true)
   526  	defer c.shutdown()
   527  
   528  	tmpl = strings.Replace(jsClusterTemplWithSingleLeafNode, "store_dir:", "domain: SPOKE, store_dir:", 1)
   529  	ln := c.createLeafNodeWithTemplate("LN-SPOKE", tmpl)
   530  	defer ln.Shutdown()
   531  
   532  	checkLeafNodeConnectedCount(t, ln, 2)
   533  
   534  	// Now disconnect our leafnode connections by restarting the server we are connected to..
   535  	for _, s := range c.servers {
   536  		if s.ClusterName() != c.name {
   537  			continue
   538  		}
   539  		if nln := s.NumLeafNodes(); nln > 0 {
   540  			s.Shutdown()
   541  			c.restartServer(s)
   542  		}
   543  	}
   544  	// Make sure we are back connected.
   545  	checkLeafNodeConnectedCount(t, ln, 2)
   546  
   547  	// Now grab leaf varz and make sure we have no dupe deny clauses.
   548  	vz, err := ln.Varz(nil)
   549  	if err != nil {
   550  		t.Fatalf("Unexpected error: %v", err)
   551  	}
   552  	// Grab the correct remote.
   553  	for _, remote := range vz.LeafNode.Remotes {
   554  		if remote.LocalAccount == ln.SystemAccount().Name {
   555  			if remote.Deny != nil && len(remote.Deny.Exports) > 3 { // denyAll := []string{jscAllSubj, raftAllSubj, jsAllAPI}
   556  				t.Fatalf("Dupe entries found: %+v", remote.Deny)
   557  			}
   558  			break
   559  		}
   560  	}
   561  }
   562  
   563  // Multiple JS domains.
   564  func TestJetStreamClusterSingleLeafNodeWithoutSharedSystemAccount(t *testing.T) {
   565  	c := createJetStreamCluster(t, strings.Replace(jsClusterAccountsTempl, "store_dir", "domain: CORE, store_dir", 1), "HUB", _EMPTY_, 3, 14333, true)
   566  	defer c.shutdown()
   567  
   568  	ln := c.createSingleLeafNodeNoSystemAccount()
   569  	defer ln.Shutdown()
   570  
   571  	// The setup here has a single leafnode server with two accounts. One has JS, the other does not.
   572  	// We want to to test the following.
   573  	// 1. For the account without JS, we simply will pass through to the HUB. Meaning since our local account
   574  	//    does not have it, we simply inherit the hub's by default.
   575  	// 2. For the JS enabled account, we are isolated and use our local one only.
   576  
   577  	// Check behavior of the account without JS.
   578  	// Normally this should fail since our local account is not enabled. However, since we are bridging
   579  	// via the leafnode we expect this to work here.
   580  	nc, js := jsClientConnectEx(t, ln, "CORE", nats.UserInfo("n", "p"))
   581  	defer nc.Close()
   582  
   583  	si, err := js.AddStream(&nats.StreamConfig{
   584  		Name:     "TEST",
   585  		Subjects: []string{"foo"},
   586  		Replicas: 2,
   587  	})
   588  	if err != nil {
   589  		t.Fatalf("Unexpected error: %v", err)
   590  	}
   591  	if si.Cluster == nil || si.Cluster.Name != "HUB" {
   592  		t.Fatalf("Expected stream to be placed in %q", "HUB")
   593  	}
   594  	// Do some other API calls.
   595  	_, err = js.AddConsumer("TEST", &nats.ConsumerConfig{Durable: "C1", AckPolicy: nats.AckExplicitPolicy})
   596  	if err != nil {
   597  		t.Fatalf("Unexpected error: %v", err)
   598  	}
   599  	seen := 0
   600  	for name := range js.StreamNames() {
   601  		seen++
   602  		if name != "TEST" {
   603  			t.Fatalf("Expected only %q but got %q", "TEST", name)
   604  		}
   605  	}
   606  	if seen != 1 {
   607  		t.Fatalf("Expected only 1 stream, got %d", seen)
   608  	}
   609  	if _, err := js.StreamInfo("TEST"); err != nil {
   610  		t.Fatalf("Unexpected error: %v", err)
   611  	}
   612  	if err := js.PurgeStream("TEST"); err != nil {
   613  		t.Fatalf("Unexpected purge error: %v", err)
   614  	}
   615  	if err := js.DeleteConsumer("TEST", "C1"); err != nil {
   616  		t.Fatalf("Unexpected error: %v", err)
   617  	}
   618  	if _, err := js.UpdateStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"bar"}, Replicas: 2}); err != nil {
   619  		t.Fatalf("Unexpected error: %v", err)
   620  	}
   621  	if err := js.DeleteStream("TEST"); err != nil {
   622  		t.Fatalf("Unexpected error: %v", err)
   623  	}
   624  
   625  	// Now check the enabled account.
   626  	// Check the enabled account only talks to its local JS domain by default.
   627  	nc, js = jsClientConnect(t, ln, nats.UserInfo("y", "p"))
   628  	defer nc.Close()
   629  
   630  	sub, err := nc.SubscribeSync(JSAdvisoryStreamCreatedPre + ".>")
   631  	if err != nil {
   632  		t.Fatalf("Unexpected error: %v", err)
   633  	}
   634  
   635  	si, err = js.AddStream(&nats.StreamConfig{
   636  		Name:     "TEST",
   637  		Subjects: []string{"foo"},
   638  	})
   639  	if err != nil {
   640  		t.Fatalf("Unexpected error: %v", err)
   641  	}
   642  	if si.Cluster != nil {
   643  		t.Fatalf("Expected no cluster designation for stream since created on single LN server")
   644  	}
   645  
   646  	// Wait for a bit and make sure we only get one of these.
   647  	// The HUB domain should be cut off by default.
   648  	time.Sleep(250 * time.Millisecond)
   649  	checkSubsPending(t, sub, 1)
   650  	// Drain.
   651  	for _, err := sub.NextMsg(0); err == nil; _, err = sub.NextMsg(0) {
   652  	}
   653  
   654  	// Now try to talk to the HUB JS domain through a new context that uses a different mapped subject.
   655  	// This is similar to how we let users cross JS domains between accounts as well.
   656  	js, err = nc.JetStream(nats.APIPrefix("$JS.HUB.API"))
   657  	if err != nil {
   658  		t.Fatalf("Unexpected error getting JetStream context: %v", err)
   659  	}
   660  	// This should fail here with jetstream not enabled.
   661  	if _, err := js.AccountInfo(); err != nats.ErrJetStreamNotEnabled {
   662  		t.Fatalf("Unexpected error: %v", err)
   663  	}
   664  
   665  	// Now add in a mapping to the connected account in the HUB.
   666  	// This aligns with the APIPrefix context above and works across leafnodes.
   667  	// TODO(dlc) - Should we have a mapping section for leafnode solicit?
   668  	c.addSubjectMapping("ONE", "$JS.HUB.API.>", "$JS.API.>")
   669  
   670  	// Now it should work.
   671  	if _, err := js.AccountInfo(); err != nil {
   672  		t.Fatalf("Unexpected error: %v", err)
   673  	}
   674  
   675  	// Make sure we can add a stream, etc.
   676  	si, err = js.AddStream(&nats.StreamConfig{
   677  		Name:     "TEST22",
   678  		Subjects: []string{"bar"},
   679  		Replicas: 2,
   680  	})
   681  	if err != nil {
   682  		t.Fatalf("Unexpected error: %v", err)
   683  	}
   684  	if si.Cluster == nil || si.Cluster.Name != "HUB" {
   685  		t.Fatalf("Expected stream to be placed in %q", "HUB")
   686  	}
   687  
   688  	jsLocal, err := nc.JetStream()
   689  	if err != nil {
   690  		t.Fatalf("Unexpected error getting JetStream context: %v", err)
   691  	}
   692  
   693  	// Create a mirror on the local leafnode for stream TEST22.
   694  	_, err = jsLocal.AddStream(&nats.StreamConfig{
   695  		Name: "M",
   696  		Mirror: &nats.StreamSource{
   697  			Name:     "TEST22",
   698  			External: &nats.ExternalStream{APIPrefix: "$JS.HUB.API"},
   699  		},
   700  	})
   701  	if err != nil {
   702  		t.Fatalf("Unexpected error: %v", err)
   703  	}
   704  
   705  	// Publish a message to the HUB's TEST22 stream.
   706  	if _, err := js.Publish("bar", []byte("OK")); err != nil {
   707  		t.Fatalf("Unexpected publish error: %v", err)
   708  	}
   709  	// Make sure the message arrives in our mirror.
   710  	checkFor(t, 5*time.Second, 100*time.Millisecond, func() error {
   711  		si, err := jsLocal.StreamInfo("M")
   712  		if err != nil {
   713  			return fmt.Errorf("Could not get stream info: %v", err)
   714  		}
   715  		if si.State.Msgs != 1 {
   716  			return fmt.Errorf("Expected 1 msg, got state: %+v", si.State)
   717  		}
   718  		return nil
   719  	})
   720  
   721  	// Now do the reverse and create a sourced stream in the HUB from our local stream on leafnode.
   722  	// Inside the HUB we need to be able to find our local leafnode JetStream assets, so we need
   723  	// a mapping in the LN server to allow this to work. Normally this will just be in server config.
   724  	acc, err := ln.LookupAccount("JSY")
   725  	if err != nil {
   726  		c.t.Fatalf("Unexpected error on %v: %v", ln, err)
   727  	}
   728  	if err := acc.AddMapping("$JS.LN.API.>", "$JS.API.>"); err != nil {
   729  		c.t.Fatalf("Error adding mapping: %v", err)
   730  	}
   731  
   732  	// js is the HUB JetStream context here.
   733  	_, err = js.AddStream(&nats.StreamConfig{
   734  		Name: "S",
   735  		Sources: []*nats.StreamSource{{
   736  			Name:     "M",
   737  			External: &nats.ExternalStream{APIPrefix: "$JS.LN.API"},
   738  		}},
   739  	})
   740  	if err != nil {
   741  		t.Fatalf("Unexpected error: %v", err)
   742  	}
   743  
   744  	// Make sure the message arrives in our sourced stream.
   745  	checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
   746  		si, err := js.StreamInfo("S")
   747  		if err != nil {
   748  			return fmt.Errorf("Could not get stream info: %v", err)
   749  		}
   750  		if si.State.Msgs != 1 {
   751  			return fmt.Errorf("Expected 1 msg, got state: %+v", si.State)
   752  		}
   753  		return nil
   754  	})
   755  }
   756  
   757  // JetStream Domains
   758  func TestJetStreamClusterDomains(t *testing.T) {
   759  	// This adds in domain config option to template.
   760  	tmpl := strings.Replace(jsClusterAccountsTempl, "store_dir:", "domain: CORE, store_dir:", 1)
   761  	c := createJetStreamCluster(t, tmpl, "CORE", _EMPTY_, 3, 12232, true)
   762  	defer c.shutdown()
   763  
   764  	// This leafnode is a single server with no domain but sharing the system account.
   765  	// This extends the CORE domain through this leafnode.
   766  	ln := c.createLeafNodeWithTemplate("LN-SYS",
   767  		strings.ReplaceAll(jsClusterTemplWithSingleLeafNode, "store_dir:", "extension_hint: will_extend, domain: CORE, store_dir:"))
   768  	defer ln.Shutdown()
   769  
   770  	checkLeafNodeConnectedCount(t, ln, 2)
   771  
   772  	// This shows we have extended this system.
   773  	c.waitOnPeerCount(4)
   774  	if ml := c.leader(); ml == ln {
   775  		t.Fatalf("Detected a meta-leader in the leafnode: %s", ml)
   776  	}
   777  
   778  	// Now create another LN but with a domain defined.
   779  	tmpl = strings.Replace(jsClusterTemplWithSingleLeafNode, "store_dir:", "domain: SPOKE, store_dir:", 1)
   780  	spoke := c.createLeafNodeWithTemplate("LN-SPOKE", tmpl)
   781  	defer spoke.Shutdown()
   782  
   783  	checkLeafNodeConnectedCount(t, spoke, 2)
   784  
   785  	// Should be the same, should not extend the CORE domain.
   786  	c.waitOnPeerCount(4)
   787  
   788  	// The domain signals to the system that we are our own JetStream domain and should not extend CORE.
   789  	// We want to check to make sure we have all the deny properly setup.
   790  	spoke.mu.Lock()
   791  	// var hasDE, hasDI bool
   792  	for _, ln := range spoke.leafs {
   793  		ln.mu.Lock()
   794  		remote := ln.leaf.remote
   795  		ln.mu.Unlock()
   796  		remote.RLock()
   797  		if remote.RemoteLeafOpts.LocalAccount == "$SYS" {
   798  			for _, s := range denyAllJs {
   799  				if r := ln.perms.pub.deny.Match(s); len(r.psubs) != 1 {
   800  					t.Fatalf("Expected to have deny permission for %s", s)
   801  				}
   802  				if r := ln.perms.sub.deny.Match(s); len(r.psubs) != 1 {
   803  					t.Fatalf("Expected to have deny permission for %s", s)
   804  				}
   805  			}
   806  		}
   807  		remote.RUnlock()
   808  	}
   809  	spoke.mu.Unlock()
   810  
   811  	// Now do some operations.
   812  	// Check the enabled account only talks to its local JS domain by default.
   813  	nc, js := jsClientConnect(t, spoke)
   814  	defer nc.Close()
   815  
   816  	si, err := js.AddStream(&nats.StreamConfig{
   817  		Name:     "TEST",
   818  		Subjects: []string{"foo"},
   819  	})
   820  	if err != nil {
   821  		t.Fatalf("Unexpected error: %v", err)
   822  	}
   823  	if si.Cluster != nil {
   824  		t.Fatalf("Expected no cluster designation for stream since created on single LN server")
   825  	}
   826  
   827  	// Now try to talk to the CORE JS domain through a new context that uses a different mapped subject.
   828  	jsCore, err := nc.JetStream(nats.APIPrefix("$JS.CORE.API"))
   829  	if err != nil {
   830  		t.Fatalf("Unexpected error getting JetStream context: %v", err)
   831  	}
   832  	if _, err := jsCore.AccountInfo(); err != nil {
   833  		t.Fatalf("Unexpected error: %v", err)
   834  	}
   835  	// Make sure we can add a stream, etc.
   836  	si, err = jsCore.AddStream(&nats.StreamConfig{
   837  		Name:     "TEST22",
   838  		Subjects: []string{"bar"},
   839  		Replicas: 2,
   840  	})
   841  	if err != nil {
   842  		t.Fatalf("Unexpected error: %v", err)
   843  	}
   844  	if si.Cluster == nil || si.Cluster.Name != "CORE" {
   845  		t.Fatalf("Expected stream to be placed in %q, got %q", "CORE", si.Cluster.Name)
   846  	}
   847  
   848  	jsLocal, err := nc.JetStream()
   849  	if err != nil {
   850  		t.Fatalf("Unexpected error getting JetStream context: %v", err)
   851  	}
   852  
   853  	// Create a mirror on our local leafnode for stream TEST22.
   854  	_, err = jsLocal.AddStream(&nats.StreamConfig{
   855  		Name: "M",
   856  		Mirror: &nats.StreamSource{
   857  			Name:     "TEST22",
   858  			External: &nats.ExternalStream{APIPrefix: "$JS.CORE.API"},
   859  		},
   860  	})
   861  	if err != nil {
   862  		t.Fatalf("Unexpected error: %v", err)
   863  	}
   864  
   865  	// Publish a message to the CORE's TEST22 stream.
   866  	if _, err := jsCore.Publish("bar", []byte("OK")); err != nil {
   867  		t.Fatalf("Unexpected publish error: %v", err)
   868  	}
   869  
   870  	// Make sure the message arrives in our mirror.
   871  	checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
   872  		si, err := jsLocal.StreamInfo("M")
   873  		if err != nil {
   874  			return fmt.Errorf("Could not get stream info: %v", err)
   875  		}
   876  		if si.State.Msgs != 1 {
   877  			return fmt.Errorf("Expected 1 msg, got state: %+v", si.State)
   878  		}
   879  		return nil
   880  	})
   881  
   882  	// jsCore is the CORE JetStream domain.
   883  	// Create a sourced stream in the CORE that is sourced from our mirror stream in our leafnode.
   884  	_, err = jsCore.AddStream(&nats.StreamConfig{
   885  		Name: "S",
   886  		Sources: []*nats.StreamSource{{
   887  			Name:     "M",
   888  			External: &nats.ExternalStream{APIPrefix: "$JS.SPOKE.API"},
   889  		}},
   890  	})
   891  	if err != nil {
   892  		t.Fatalf("Unexpected error: %v", err)
   893  	}
   894  
   895  	// Make sure the message arrives in our sourced stream.
   896  	checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
   897  		si, err := jsCore.StreamInfo("S")
   898  		if err != nil {
   899  			return fmt.Errorf("Could not get stream info: %v", err)
   900  		}
   901  		if si.State.Msgs != 1 {
   902  			return fmt.Errorf("Expected 1 msg, got state: %+v", si.State)
   903  		}
   904  		return nil
   905  	})
   906  
   907  	// Now connect directly to the CORE cluster and make sure we can operate there.
   908  	nc, jsLocal = jsClientConnect(t, c.randomServer())
   909  	defer nc.Close()
   910  
   911  	// Create the js contexts again.
   912  	jsSpoke, err := nc.JetStream(nats.APIPrefix("$JS.SPOKE.API"))
   913  	if err != nil {
   914  		t.Fatalf("Unexpected error getting JetStream context: %v", err)
   915  	}
   916  
   917  	// Publish a message to the CORE's TEST22 stream.
   918  	if _, err := jsLocal.Publish("bar", []byte("OK")); err != nil {
   919  		t.Fatalf("Unexpected publish error: %v", err)
   920  	}
   921  
   922  	// Make sure the message arrives in our mirror.
   923  	checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
   924  		si, err := jsSpoke.StreamInfo("M")
   925  		if err != nil {
   926  			return fmt.Errorf("Could not get stream info: %v", err)
   927  		}
   928  		if si.State.Msgs != 2 {
   929  			return fmt.Errorf("Expected 2 msgs, got state: %+v", si.State)
   930  		}
   931  		return nil
   932  	})
   933  
   934  	// Make sure the message arrives in our sourced stream.
   935  	checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
   936  		si, err := jsLocal.StreamInfo("S")
   937  		if err != nil {
   938  			return fmt.Errorf("Could not get stream info: %v", err)
   939  		}
   940  		if si.State.Msgs != 2 {
   941  			return fmt.Errorf("Expected 2 msgs, got state: %+v", si.State)
   942  		}
   943  		return nil
   944  	})
   945  
   946  	// We are connected to the CORE domain/system. Create a JetStream context referencing ourselves.
   947  	jsCore, err = nc.JetStream(nats.APIPrefix("$JS.CORE.API"))
   948  	if err != nil {
   949  		t.Fatalf("Unexpected error getting JetStream context: %v", err)
   950  	}
   951  
   952  	si, err = jsCore.StreamInfo("S")
   953  	if err != nil {
   954  		t.Fatalf("Could not get stream info: %v", err)
   955  	}
   956  	if si.State.Msgs != 2 {
   957  		t.Fatalf("Expected 2 msgs, got state: %+v", si.State)
   958  	}
   959  }
   960  
   961  func TestJetStreamClusterDomainsWithNoJSHub(t *testing.T) {
   962  	// Create our hub cluster with no JetStream defined.
   963  	c := createMixedModeCluster(t, jsClusterAccountsTempl, "NOJS5", _EMPTY_, 0, 5, false)
   964  	defer c.shutdown()
   965  
   966  	ln := c.createSingleLeafNodeNoSystemAccountAndEnablesJetStream()
   967  	defer ln.Shutdown()
   968  
   969  	lnd := c.createSingleLeafNodeNoSystemAccountAndEnablesJetStreamWithDomain("SPOKE", "nojs")
   970  	defer lnd.Shutdown()
   971  
   972  	// Client based API - Connected to the core cluster with no JS but account has JS.
   973  	s := c.randomServer()
   974  	// Make sure the JS interest from the LNs has made it to this server.
   975  	checkSubInterest(t, s, "NOJS", "$JS.SPOKE.API.INFO", time.Second)
   976  	nc, _ := jsClientConnect(t, s, nats.UserInfo("nojs", "p"))
   977  	defer nc.Close()
   978  
   979  	cfg := &nats.StreamConfig{
   980  		Name:     "TEST",
   981  		Subjects: []string{"foo"},
   982  	}
   983  	req, err := json.Marshal(cfg)
   984  	if err != nil {
   985  		t.Fatalf("Unexpected error: %v", err)
   986  	}
   987  
   988  	// Do by hand to make sure we only get one response.
   989  	sis := fmt.Sprintf(strings.ReplaceAll(JSApiStreamCreateT, JSApiPrefix, "$JS.SPOKE.API"), "TEST")
   990  	rs := nats.NewInbox()
   991  	sub, _ := nc.SubscribeSync(rs)
   992  	nc.PublishRequest(sis, rs, req)
   993  	// Wait for response.
   994  	checkSubsPending(t, sub, 1)
   995  	// Double check to make sure we only have 1.
   996  	if nr, _, err := sub.Pending(); err != nil || nr != 1 {
   997  		t.Fatalf("Expected 1 response, got %d and %v", nr, err)
   998  	}
   999  	resp, err := sub.NextMsg(time.Second)
  1000  	require_NoError(t, err)
  1001  
  1002  	// This StreamInfo should *not* have a domain set.
  1003  	// Do by hand until this makes it to the Go client.
  1004  	var si StreamInfo
  1005  	if err = json.Unmarshal(resp.Data, &si); err != nil {
  1006  		t.Fatalf("Unexpected error: %v", err)
  1007  	}
  1008  	if si.Domain != _EMPTY_ {
  1009  		t.Fatalf("Expected to have NO domain set but got %q", si.Domain)
  1010  	}
  1011  
  1012  	// Now let's create a stream specifically on the SPOKE domain.
  1013  	js, err := nc.JetStream(nats.Domain("SPOKE"))
  1014  	if err != nil {
  1015  		t.Fatalf("Unexpected error getting JetStream context: %v", err)
  1016  	}
  1017  	_, err = js.AddStream(&nats.StreamConfig{
  1018  		Name:     "TEST22",
  1019  		Subjects: []string{"bar"},
  1020  	})
  1021  	if err != nil {
  1022  		t.Fatalf("Unexpected error: %v", err)
  1023  	}
  1024  
  1025  	// Now lookup by hand to check domain.
  1026  	resp, err = nc.Request("$JS.SPOKE.API.STREAM.INFO.TEST22", nil, time.Second)
  1027  	if err != nil {
  1028  		t.Fatalf("Unexpected error: %v", err)
  1029  	}
  1030  	if err = json.Unmarshal(resp.Data, &si); err != nil {
  1031  		t.Fatalf("Unexpected error: %v", err)
  1032  	}
  1033  	if si.Domain != "SPOKE" {
  1034  		t.Fatalf("Expected to have domain set to %q but got %q", "SPOKE", si.Domain)
  1035  	}
  1036  }
  1037  
  1038  // Issue #2205
  1039  func TestJetStreamClusterDomainsAndAPIResponses(t *testing.T) {
  1040  	// This adds in domain config option to template.
  1041  	tmpl := strings.Replace(jsClusterAccountsTempl, "store_dir:", "domain: CORE, store_dir:", 1)
  1042  	c := createJetStreamCluster(t, tmpl, "CORE", _EMPTY_, 3, 12232, true)
  1043  	defer c.shutdown()
  1044  
  1045  	// Now create spoke LN cluster.
  1046  	tmpl = strings.Replace(jsClusterTemplWithLeafNode, "store_dir:", "domain: SPOKE, store_dir:", 1)
  1047  	lnc := c.createLeafNodesWithTemplateAndStartPort(tmpl, "SPOKE", 5, 23913)
  1048  	defer lnc.shutdown()
  1049  
  1050  	lnc.waitOnClusterReady()
  1051  
  1052  	// Make the physical connection to the CORE.
  1053  	nc, js := jsClientConnect(t, c.randomServer())
  1054  	defer nc.Close()
  1055  
  1056  	_, err := js.AddStream(&nats.StreamConfig{
  1057  		Name:     "TEST",
  1058  		Subjects: []string{"foo"},
  1059  		Replicas: 2,
  1060  	})
  1061  	if err != nil {
  1062  		t.Fatalf("Unexpected error: %v", err)
  1063  	}
  1064  
  1065  	// Now create JS domain context and try to do same in LN cluster.
  1066  	// The issue referenced above details a bug where we can not receive a positive response.
  1067  	nc, _ = jsClientConnect(t, c.randomServer())
  1068  	defer nc.Close()
  1069  
  1070  	jsSpoke, err := nc.JetStream(nats.APIPrefix("$JS.SPOKE.API"))
  1071  	if err != nil {
  1072  		t.Fatalf("Unexpected error getting JetStream context: %v", err)
  1073  	}
  1074  
  1075  	si, err := jsSpoke.AddStream(&nats.StreamConfig{
  1076  		Name:     "TEST",
  1077  		Subjects: []string{"foo"},
  1078  		Replicas: 2,
  1079  	})
  1080  	if err != nil {
  1081  		t.Fatalf("Unexpected error: %v", err)
  1082  	}
  1083  	if si.Cluster.Name != "SPOKE" {
  1084  		t.Fatalf("Expected %q as the cluster, got %q", "SPOKE", si.Cluster.Name)
  1085  	}
  1086  }
  1087  
  1088  // Issue #2202
  1089  func TestJetStreamClusterDomainsAndSameNameSources(t *testing.T) {
  1090  	tmpl := strings.Replace(jsClusterAccountsTempl, "store_dir:", "domain: CORE, store_dir:", 1)
  1091  	c := createJetStreamCluster(t, tmpl, "CORE", _EMPTY_, 3, 9323, true)
  1092  	defer c.shutdown()
  1093  
  1094  	tmpl = strings.Replace(jsClusterTemplWithSingleLeafNode, "store_dir:", "domain: SPOKE-1, store_dir:", 1)
  1095  	spoke1 := c.createLeafNodeWithTemplate("LN-SPOKE-1", tmpl)
  1096  	defer spoke1.Shutdown()
  1097  
  1098  	tmpl = strings.Replace(jsClusterTemplWithSingleLeafNode, "store_dir:", "domain: SPOKE-2, store_dir:", 1)
  1099  	spoke2 := c.createLeafNodeWithTemplate("LN-SPOKE-2", tmpl)
  1100  	defer spoke2.Shutdown()
  1101  
  1102  	checkLeafNodeConnectedCount(t, spoke1, 2)
  1103  	checkLeafNodeConnectedCount(t, spoke2, 2)
  1104  
  1105  	subjFor := func(s *Server) string {
  1106  		switch s {
  1107  		case spoke1:
  1108  			return "foo"
  1109  		case spoke2:
  1110  			return "bar"
  1111  		}
  1112  		return "TEST"
  1113  	}
  1114  
  1115  	// Create the same name stream in both spoke domains.
  1116  	for _, s := range []*Server{spoke1, spoke2} {
  1117  		nc, js := jsClientConnect(t, s)
  1118  		defer nc.Close()
  1119  		_, err := js.AddStream(&nats.StreamConfig{
  1120  			Name:     "TEST",
  1121  			Subjects: []string{subjFor(s)},
  1122  		})
  1123  		if err != nil {
  1124  			t.Fatalf("Unexpected error: %v", err)
  1125  		}
  1126  		nc.Close()
  1127  	}
  1128  
  1129  	// Now connect to the hub and create a sourced stream from both leafnode streams.
  1130  	nc, js := jsClientConnect(t, c.randomServer())
  1131  	defer nc.Close()
  1132  
  1133  	_, err := js.AddStream(&nats.StreamConfig{
  1134  		Name: "S",
  1135  		Sources: []*nats.StreamSource{
  1136  			{
  1137  				Name:     "TEST",
  1138  				External: &nats.ExternalStream{APIPrefix: "$JS.SPOKE-1.API"},
  1139  			},
  1140  			{
  1141  				Name:     "TEST",
  1142  				External: &nats.ExternalStream{APIPrefix: "$JS.SPOKE-2.API"},
  1143  			},
  1144  		},
  1145  	})
  1146  	if err != nil {
  1147  		t.Fatalf("Unexpected error: %v", err)
  1148  	}
  1149  
  1150  	// Publish a message to each spoke stream and we will check that our sourced stream gets both.
  1151  	for _, s := range []*Server{spoke1, spoke2} {
  1152  		nc, js := jsClientConnect(t, s)
  1153  		defer nc.Close()
  1154  		js.Publish(subjFor(s), []byte("DOUBLE TROUBLE"))
  1155  		si, err := js.StreamInfo("TEST")
  1156  		if err != nil {
  1157  			t.Fatalf("Unexpected error: %v", err)
  1158  		}
  1159  		if si.State.Msgs != 1 {
  1160  			t.Fatalf("Expected 1 msg, got %d", si.State.Msgs)
  1161  		}
  1162  		nc.Close()
  1163  	}
  1164  
  1165  	// Now make sure we have 2 msgs in our sourced stream.
  1166  	checkFor(t, 2*time.Second, 50*time.Millisecond, func() error {
  1167  		si, err := js.StreamInfo("S")
  1168  		require_NoError(t, err)
  1169  		if si.State.Msgs != 2 {
  1170  			return fmt.Errorf("Expected 2 msgs, got %d", si.State.Msgs)
  1171  		}
  1172  		return nil
  1173  	})
  1174  
  1175  	// Make sure we can see our external information.
  1176  	// This not in the Go client yet so manual for now.
  1177  	resp, err := nc.Request(fmt.Sprintf(JSApiStreamInfoT, "S"), nil, time.Second)
  1178  	if err != nil {
  1179  		t.Fatalf("Unexpected error: %v", err)
  1180  	}
  1181  	var ssi StreamInfo
  1182  	if err = json.Unmarshal(resp.Data, &ssi); err != nil {
  1183  		t.Fatalf("Unexpected error: %v", err)
  1184  	}
  1185  	if len(ssi.Sources) != 2 {
  1186  		t.Fatalf("Expected 2 source streams, got %d", len(ssi.Sources))
  1187  	}
  1188  	if ssi.Sources[0].External == nil {
  1189  		t.Fatalf("Expected a non-nil external designation")
  1190  	}
  1191  	pre := ssi.Sources[0].External.ApiPrefix
  1192  	if pre != "$JS.SPOKE-1.API" && pre != "$JS.SPOKE-2.API" {
  1193  		t.Fatalf("Expected external api of %q, got %q", "$JS.SPOKE-[1|2].API", ssi.Sources[0].External.ApiPrefix)
  1194  	}
  1195  
  1196  	// Also create a mirror.
  1197  	_, err = js.AddStream(&nats.StreamConfig{
  1198  		Name: "M",
  1199  		Mirror: &nats.StreamSource{
  1200  			Name:     "TEST",
  1201  			External: &nats.ExternalStream{APIPrefix: "$JS.SPOKE-1.API"},
  1202  		},
  1203  	})
  1204  	if err != nil {
  1205  		t.Fatalf("Unexpected error: %v", err)
  1206  	}
  1207  	resp, err = nc.Request(fmt.Sprintf(JSApiStreamInfoT, "M"), nil, time.Second)
  1208  	if err != nil {
  1209  		t.Fatalf("Unexpected error: %v", err)
  1210  	}
  1211  	if err = json.Unmarshal(resp.Data, &ssi); err != nil {
  1212  		t.Fatalf("Unexpected error: %v", err)
  1213  	}
  1214  	if ssi.Mirror == nil || ssi.Mirror.External == nil {
  1215  		t.Fatalf("Expected a non-nil external designation for our mirror")
  1216  	}
  1217  	if ssi.Mirror.External.ApiPrefix != "$JS.SPOKE-1.API" {
  1218  		t.Fatalf("Expected external api of %q, got %q", "$JS.SPOKE-1.API", ssi.Sources[0].External.ApiPrefix)
  1219  	}
  1220  }
  1221  
  1222  // When a leafnode enables JS on an account that is not enabled on the remote cluster account this should fail
  1223  // Accessing a jet stream in a different availability domain requires the client provide a damain name, or
  1224  // the server having set up appropriate defaults (default_js_domain. tested in leafnode_test.go)
  1225  func TestJetStreamClusterSingleLeafNodeEnablingJetStream(t *testing.T) {
  1226  	tmpl := strings.Replace(jsClusterAccountsTempl, "store_dir:", "domain: HUB, store_dir:", 1)
  1227  	c := createJetStreamCluster(t, tmpl, "HUB", _EMPTY_, 3, 11322, true)
  1228  	defer c.shutdown()
  1229  
  1230  	ln := c.createSingleLeafNodeNoSystemAccountAndEnablesJetStream()
  1231  	defer ln.Shutdown()
  1232  
  1233  	// Check that we have JS in the $G account on the leafnode.
  1234  	nc, js := jsClientConnect(t, ln)
  1235  	defer nc.Close()
  1236  
  1237  	if _, err := js.AccountInfo(); err != nil {
  1238  		t.Fatalf("Unexpected error: %v", err)
  1239  	}
  1240  
  1241  	// Connect our client to the "nojs" account in the cluster but make sure JS works since its enabled via the leafnode.
  1242  	s := c.randomServer()
  1243  	nc, js = jsClientConnect(t, s, nats.UserInfo("nojs", "p"))
  1244  	defer nc.Close()
  1245  	_, err := js.AccountInfo()
  1246  	// error is context deadline exceeded as the local account has no js and can't reach the remote one
  1247  	require_True(t, err == context.DeadlineExceeded)
  1248  }
  1249  
  1250  func TestJetStreamClusterLeafNodesWithoutJS(t *testing.T) {
  1251  	tmpl := strings.Replace(jsClusterAccountsTempl, "store_dir:", "domain: HUB, store_dir:", 1)
  1252  	c := createJetStreamCluster(t, tmpl, "HUB", _EMPTY_, 3, 11233, true)
  1253  	defer c.shutdown()
  1254  
  1255  	testJS := func(s *Server, domain string, doDomainAPI bool) {
  1256  		nc, js := jsClientConnect(t, s)
  1257  		defer nc.Close()
  1258  		if doDomainAPI {
  1259  			var err error
  1260  			apiPre := fmt.Sprintf("$JS.%s.API", domain)
  1261  			if js, err = nc.JetStream(nats.APIPrefix(apiPre)); err != nil {
  1262  				t.Fatalf("Unexpected error getting JetStream context: %v", err)
  1263  			}
  1264  		}
  1265  		ai, err := js.AccountInfo()
  1266  		if err != nil {
  1267  			t.Fatalf("Unexpected error: %v", err)
  1268  		}
  1269  		if ai.Domain != domain {
  1270  			t.Fatalf("Expected domain of %q, got %q", domain, ai.Domain)
  1271  		}
  1272  	}
  1273  
  1274  	ln := c.createLeafNodeWithTemplate("LN-SYS-S-NOJS", jsClusterTemplWithSingleLeafNodeNoJS)
  1275  	defer ln.Shutdown()
  1276  
  1277  	checkLeafNodeConnectedCount(t, ln, 2)
  1278  
  1279  	// Check that we can access JS in the $G account on the cluster through the leafnode.
  1280  	testJS(ln, "HUB", true)
  1281  	ln.Shutdown()
  1282  
  1283  	// Now create a leafnode cluster with No JS and make sure that works.
  1284  	lnc := c.createLeafNodesNoJS("LN-SYS-C-NOJS", 3)
  1285  	defer lnc.shutdown()
  1286  
  1287  	testJS(lnc.randomServer(), "HUB", true)
  1288  	lnc.shutdown()
  1289  
  1290  	// Do mixed mode but with a JS config block that specifies domain and just sets it to disabled.
  1291  	// This is the preferred method for mixed mode, always define JS server config block just disable
  1292  	// in those you do not want it running.
  1293  	// e.g. jetstream: {domain: "SPOKE", enabled: false}
  1294  	tmpl = strings.Replace(jsClusterTemplWithLeafNode, "store_dir:", "domain: SPOKE, store_dir:", 1)
  1295  	lncm := c.createLeafNodesWithTemplateMixedMode(tmpl, "SPOKE", 3, 2, true)
  1296  	defer lncm.shutdown()
  1297  
  1298  	// Now grab a non-JS server, last two are non-JS.
  1299  	sl := lncm.servers[0]
  1300  	testJS(sl, "SPOKE", false)
  1301  
  1302  	// Test that mappings work as well and we can access the hub.
  1303  	testJS(sl, "HUB", true)
  1304  }
  1305  
  1306  func TestJetStreamClusterLeafNodesWithSameDomainNames(t *testing.T) {
  1307  	tmpl := strings.Replace(jsClusterAccountsTempl, "store_dir:", "domain: HUB, store_dir:", 1)
  1308  	c := createJetStreamCluster(t, tmpl, "HUB", _EMPTY_, 3, 11233, true)
  1309  	defer c.shutdown()
  1310  
  1311  	tmpl = strings.Replace(jsClusterTemplWithLeafNode, "store_dir:", "domain: HUB, store_dir:", 1)
  1312  	lnc := c.createLeafNodesWithTemplateAndStartPort(tmpl, "SPOKE", 3, 11311)
  1313  	defer lnc.shutdown()
  1314  
  1315  	c.waitOnPeerCount(6)
  1316  }
  1317  
  1318  func TestJetStreamClusterLeafDifferentAccounts(t *testing.T) {
  1319  	c := createJetStreamCluster(t, jsClusterAccountsTempl, "HUB", _EMPTY_, 2, 23133, false)
  1320  	defer c.shutdown()
  1321  
  1322  	ln := c.createLeafNodesWithStartPortAndDomain("LN", 2, 22110, _EMPTY_)
  1323  	defer ln.shutdown()
  1324  
  1325  	// Wait on all peers.
  1326  	c.waitOnPeerCount(4)
  1327  
  1328  	nc, js := jsClientConnect(t, ln.randomServer())
  1329  	defer nc.Close()
  1330  
  1331  	// Make sure we can properly identify the right account when the leader received the request.
  1332  	// We need to map the client info header to the new account once received by the hub.
  1333  	if _, err := js.AccountInfo(); err != nil {
  1334  		t.Fatalf("Unexpected error: %v", err)
  1335  	}
  1336  }
  1337  
  1338  func TestJetStreamClusterStreamInfoDeletedDetails(t *testing.T) {
  1339  	c := createJetStreamClusterExplicit(t, "R2", 2)
  1340  	defer c.shutdown()
  1341  
  1342  	// Client based API
  1343  	nc, js := jsClientConnect(t, c.randomServer())
  1344  	defer nc.Close()
  1345  
  1346  	_, err := js.AddStream(&nats.StreamConfig{
  1347  		Name:     "TEST",
  1348  		Subjects: []string{"foo"},
  1349  		Replicas: 1,
  1350  	})
  1351  	if err != nil {
  1352  		t.Fatalf("Unexpected error: %v", err)
  1353  	}
  1354  
  1355  	// Send in 10 messages.
  1356  	msg, toSend := []byte("HELLO"), 10
  1357  
  1358  	for i := 0; i < toSend; i++ {
  1359  		if _, err = js.Publish("foo", msg); err != nil {
  1360  			t.Fatalf("Unexpected publish error: %v", err)
  1361  		}
  1362  	}
  1363  
  1364  	// Now remove some messages.
  1365  	deleteMsg := func(seq uint64) {
  1366  		if err := js.DeleteMsg("TEST", seq); err != nil {
  1367  			t.Fatalf("Unexpected error: %v", err)
  1368  		}
  1369  	}
  1370  	deleteMsg(2)
  1371  	deleteMsg(4)
  1372  	deleteMsg(6)
  1373  
  1374  	// Need to do these via direct server request for now.
  1375  	resp, err := nc.Request(fmt.Sprintf(JSApiStreamInfoT, "TEST"), nil, time.Second)
  1376  	if err != nil {
  1377  		t.Fatalf("Unexpected error: %v", err)
  1378  	}
  1379  	var si StreamInfo
  1380  	if err = json.Unmarshal(resp.Data, &si); err != nil {
  1381  		t.Fatalf("Unexpected error: %v", err)
  1382  	}
  1383  	if si.State.NumDeleted != 3 {
  1384  		t.Fatalf("Expected %d deleted, got %d", 3, si.State.NumDeleted)
  1385  	}
  1386  	if len(si.State.Deleted) != 0 {
  1387  		t.Fatalf("Expected not deleted details, but got %+v", si.State.Deleted)
  1388  	}
  1389  
  1390  	// Now request deleted details.
  1391  	req := JSApiStreamInfoRequest{DeletedDetails: true}
  1392  	b, _ := json.Marshal(req)
  1393  
  1394  	resp, err = nc.Request(fmt.Sprintf(JSApiStreamInfoT, "TEST"), b, time.Second)
  1395  	if err != nil {
  1396  		t.Fatalf("Unexpected error: %v", err)
  1397  	}
  1398  	if err = json.Unmarshal(resp.Data, &si); err != nil {
  1399  		t.Fatalf("Unexpected error: %v", err)
  1400  	}
  1401  
  1402  	if si.State.NumDeleted != 3 {
  1403  		t.Fatalf("Expected %d deleted, got %d", 3, si.State.NumDeleted)
  1404  	}
  1405  	if len(si.State.Deleted) != 3 {
  1406  		t.Fatalf("Expected deleted details, but got %+v", si.State.Deleted)
  1407  	}
  1408  }
  1409  
  1410  func TestJetStreamClusterMirrorAndSourceExpiration(t *testing.T) {
  1411  	c := createJetStreamClusterExplicit(t, "MSE", 3)
  1412  	defer c.shutdown()
  1413  
  1414  	// Client for API requests.
  1415  	nc, js := jsClientConnect(t, c.randomServer())
  1416  	defer nc.Close()
  1417  
  1418  	// Origin
  1419  	if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST"}); err != nil {
  1420  		t.Fatalf("Unexpected error: %v", err)
  1421  	}
  1422  
  1423  	bi := 1
  1424  	sendBatch := func(n int) {
  1425  		t.Helper()
  1426  		// Send a batch to a given subject.
  1427  		for i := 0; i < n; i++ {
  1428  			msg := fmt.Sprintf("ID: %d", bi)
  1429  			bi++
  1430  			if _, err := js.PublishAsync("TEST", []byte(msg)); err != nil {
  1431  				t.Fatalf("Unexpected publish error: %v", err)
  1432  			}
  1433  		}
  1434  	}
  1435  
  1436  	checkStream := func(stream string, num uint64) {
  1437  		t.Helper()
  1438  		checkFor(t, 5*time.Second, 50*time.Millisecond, func() error {
  1439  			si, err := js.StreamInfo(stream)
  1440  			if err != nil {
  1441  				return err
  1442  			}
  1443  			if si.State.Msgs != num {
  1444  				return fmt.Errorf("Expected %d msgs, got %d", num, si.State.Msgs)
  1445  			}
  1446  			return nil
  1447  		})
  1448  	}
  1449  
  1450  	checkSource := func(num uint64) { t.Helper(); checkStream("S", num) }
  1451  	checkMirror := func(num uint64) { t.Helper(); checkStream("M", num) }
  1452  	checkTest := func(num uint64) { t.Helper(); checkStream("TEST", num) }
  1453  
  1454  	var err error
  1455  
  1456  	_, err = js.AddStream(&nats.StreamConfig{
  1457  		Name:     "M",
  1458  		Mirror:   &nats.StreamSource{Name: "TEST"},
  1459  		Replicas: 2,
  1460  		MaxAge:   500 * time.Millisecond,
  1461  	})
  1462  	if err != nil {
  1463  		t.Fatalf("Unexpected error: %v", err)
  1464  	}
  1465  
  1466  	// We want this to not be same as TEST leader for this test.
  1467  	sl := c.streamLeader("$G", "TEST")
  1468  	for ss := sl; ss == sl; {
  1469  		_, err = js.AddStream(&nats.StreamConfig{
  1470  			Name:     "S",
  1471  			Sources:  []*nats.StreamSource{{Name: "TEST"}},
  1472  			Replicas: 2,
  1473  			MaxAge:   500 * time.Millisecond,
  1474  		})
  1475  		if err != nil {
  1476  			t.Fatalf("Unexpected error: %v", err)
  1477  		}
  1478  		if ss = c.streamLeader("$G", "S"); ss == sl {
  1479  			// Delete and retry.
  1480  			js.DeleteStream("S")
  1481  		}
  1482  	}
  1483  
  1484  	sendBatch(100)
  1485  	checkTest(100)
  1486  	checkMirror(100)
  1487  	checkSource(100)
  1488  
  1489  	// Make sure they expire.
  1490  	checkMirror(0)
  1491  	checkSource(0)
  1492  
  1493  	// Now stop the server housing the leader of the source stream.
  1494  	sl.Shutdown()
  1495  	c.restartServer(sl)
  1496  	checkClusterFormed(t, c.servers...)
  1497  	c.waitOnStreamLeader("$G", "S")
  1498  	c.waitOnStreamLeader("$G", "M")
  1499  
  1500  	// Make sure can process correctly after we have expired all of the messages.
  1501  	sendBatch(100)
  1502  	// Need to check both in parallel.
  1503  	scheck, mcheck := uint64(0), uint64(0)
  1504  	checkFor(t, 10*time.Second, 50*time.Millisecond, func() error {
  1505  		if scheck != 100 {
  1506  			if si, _ := js.StreamInfo("S"); si != nil {
  1507  				scheck = si.State.Msgs
  1508  			}
  1509  		}
  1510  		if mcheck != 100 {
  1511  			if si, _ := js.StreamInfo("M"); si != nil {
  1512  				mcheck = si.State.Msgs
  1513  			}
  1514  		}
  1515  		if scheck == 100 && mcheck == 100 {
  1516  			return nil
  1517  		}
  1518  		return fmt.Errorf("Both not at 100 yet, S=%d, M=%d", scheck, mcheck)
  1519  	})
  1520  
  1521  	checkTest(200)
  1522  }
  1523  
  1524  func TestJetStreamClusterMirrorAndSourceSubLeaks(t *testing.T) {
  1525  	c := createJetStreamClusterExplicit(t, "MSL", 3)
  1526  	defer c.shutdown()
  1527  
  1528  	// Client for API requests.
  1529  	nc, js := jsClientConnect(t, c.randomServer())
  1530  	defer nc.Close()
  1531  
  1532  	startSubs := c.stableTotalSubs()
  1533  
  1534  	var ss []*nats.StreamSource
  1535  
  1536  	// Create 10 origin streams
  1537  	for i := 0; i < 10; i++ {
  1538  		sn := fmt.Sprintf("ORDERS-%d", i+1)
  1539  		ss = append(ss, &nats.StreamSource{Name: sn})
  1540  		if _, err := js.AddStream(&nats.StreamConfig{Name: sn}); err != nil {
  1541  			t.Fatalf("Unexpected error: %v", err)
  1542  		}
  1543  	}
  1544  
  1545  	// Create mux'd stream that sources all of the origin streams.
  1546  	_, err := js.AddStream(&nats.StreamConfig{
  1547  		Name:     "MUX",
  1548  		Replicas: 2,
  1549  		Sources:  ss,
  1550  	})
  1551  	if err != nil {
  1552  		t.Fatalf("Unexpected error: %v", err)
  1553  	}
  1554  
  1555  	// Now create a mirror of the mux stream.
  1556  	_, err = js.AddStream(&nats.StreamConfig{
  1557  		Name:     "MIRROR",
  1558  		Replicas: 2,
  1559  		Mirror:   &nats.StreamSource{Name: "MUX"},
  1560  	})
  1561  	if err != nil {
  1562  		t.Fatalf("Unexpected error: %v", err)
  1563  	}
  1564  
  1565  	// Get stable subs count.
  1566  	afterSubs := c.stableTotalSubs()
  1567  
  1568  	js.DeleteStream("MIRROR")
  1569  	js.DeleteStream("MUX")
  1570  
  1571  	for _, si := range ss {
  1572  		js.DeleteStream(si.Name)
  1573  	}
  1574  
  1575  	// Some subs take longer to settle out so we give ourselves a small buffer.
  1576  	// There will be 1 sub for client on each server (such as _INBOX.IvVJ2DOXUotn4RUSZZCFvp.*)
  1577  	// and 2 or 3 subs such as `_R_.xxxxx.>` on each server, so a total of 12 subs.
  1578  	if deleteSubs := c.stableTotalSubs(); deleteSubs > startSubs+12 {
  1579  		t.Fatalf("Expected subs to return to %d from a high of %d, but got %d", startSubs, afterSubs, deleteSubs)
  1580  	}
  1581  }
  1582  
  1583  func TestJetStreamClusterCreateConcurrentDurableConsumers(t *testing.T) {
  1584  	c := createJetStreamClusterExplicit(t, "MSL", 3)
  1585  	defer c.shutdown()
  1586  
  1587  	// Client for API requests.
  1588  	nc, js := jsClientConnect(t, c.randomServer())
  1589  	defer nc.Close()
  1590  
  1591  	// Create origin stream, must be R > 1
  1592  	if _, err := js.AddStream(&nats.StreamConfig{Name: "ORDERS", Replicas: 3}); err != nil {
  1593  		t.Fatalf("Unexpected error: %v", err)
  1594  	}
  1595  
  1596  	if _, err := js.QueueSubscribeSync("ORDERS", "wq", nats.Durable("shared")); err != nil {
  1597  		t.Fatalf("Unexpected error: %v", err)
  1598  	}
  1599  
  1600  	// Now try to create durables concurrently.
  1601  	start := make(chan struct{})
  1602  	var wg sync.WaitGroup
  1603  	created := uint32(0)
  1604  	errs := make(chan error, 10)
  1605  
  1606  	for i := 0; i < 10; i++ {
  1607  		wg.Add(1)
  1608  
  1609  		go func() {
  1610  			defer wg.Done()
  1611  			<-start
  1612  			_, err := js.QueueSubscribeSync("ORDERS", "wq", nats.Durable("shared"))
  1613  			if err == nil {
  1614  				atomic.AddUint32(&created, 1)
  1615  			} else if !strings.Contains(err.Error(), "consumer name already") {
  1616  				errs <- err
  1617  			}
  1618  		}()
  1619  	}
  1620  
  1621  	close(start)
  1622  	wg.Wait()
  1623  
  1624  	if lc := atomic.LoadUint32(&created); lc != 10 {
  1625  		t.Fatalf("Expected all 10 to be created, got %d", lc)
  1626  	}
  1627  	if len(errs) > 0 {
  1628  		t.Fatalf("Failed to create some sub: %v", <-errs)
  1629  	}
  1630  }
  1631  
  1632  // https://github.com/nats-io/nats-server/issues/2144
  1633  func TestJetStreamClusterUpdateStreamToExisting(t *testing.T) {
  1634  	c := createJetStreamClusterExplicit(t, "MSL", 3)
  1635  	defer c.shutdown()
  1636  
  1637  	// Client for API requests.
  1638  	nc, js := jsClientConnect(t, c.randomServer())
  1639  	defer nc.Close()
  1640  
  1641  	_, err := js.AddStream(&nats.StreamConfig{
  1642  		Name:     "ORDERS1",
  1643  		Replicas: 3,
  1644  		Subjects: []string{"foo"},
  1645  	})
  1646  	if err != nil {
  1647  		t.Fatalf("Unexpected error: %v", err)
  1648  	}
  1649  
  1650  	_, err = js.AddStream(&nats.StreamConfig{
  1651  		Name:     "ORDERS2",
  1652  		Replicas: 3,
  1653  		Subjects: []string{"bar"},
  1654  	})
  1655  	if err != nil {
  1656  		t.Fatalf("Unexpected error: %v", err)
  1657  	}
  1658  
  1659  	_, err = js.UpdateStream(&nats.StreamConfig{
  1660  		Name:     "ORDERS2",
  1661  		Replicas: 3,
  1662  		Subjects: []string{"foo"},
  1663  	})
  1664  	if err == nil {
  1665  		t.Fatalf("Expected an error but got none")
  1666  	}
  1667  }
  1668  
  1669  func TestJetStreamClusterCrossAccountInterop(t *testing.T) {
  1670  	template := `
  1671  	listen: 127.0.0.1:-1
  1672  	server_name: %s
  1673  	jetstream: {max_mem_store: 256MB, max_file_store: 2GB, domain: HUB, store_dir: '%s'}
  1674  
  1675  	cluster {
  1676  		name: %s
  1677  		listen: 127.0.0.1:%d
  1678  		routes = [%s]
  1679  	}
  1680  
  1681  	accounts {
  1682  		JS {
  1683  			jetstream: enabled
  1684  			users = [ { user: "rip", pass: "pass" } ]
  1685  			exports [
  1686  				{ service: "$JS.API.CONSUMER.INFO.>" }
  1687  				{ service: "$JS.HUB.API.CONSUMER.>", response: stream }
  1688  				{ stream: "M.SYNC.>" } # For the mirror
  1689  			]
  1690  		}
  1691  		IA {
  1692  			jetstream: enabled
  1693  			users = [ { user: "dlc", pass: "pass" } ]
  1694  			imports [
  1695  				{ service: { account: JS, subject: "$JS.API.CONSUMER.INFO.TEST.DLC"}, to: "FROM.DLC" }
  1696  				{ service: { account: JS, subject: "$JS.HUB.API.CONSUMER.>"}, to: "js.xacc.API.CONSUMER.>" }
  1697  				{ stream: { account: JS, subject: "M.SYNC.>"} }
  1698  			]
  1699  		}
  1700  		$SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] }
  1701  	}
  1702  	`
  1703  
  1704  	c := createJetStreamClusterWithTemplate(t, template, "HUB", 3)
  1705  	defer c.shutdown()
  1706  
  1707  	// Create the stream and the consumer under the JS/rip user.
  1708  	s := c.randomServer()
  1709  	nc, js := jsClientConnect(t, s, nats.UserInfo("rip", "pass"))
  1710  	defer nc.Close()
  1711  
  1712  	if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Replicas: 2}); err != nil {
  1713  		t.Fatalf("Unexpected error: %v", err)
  1714  	}
  1715  	_, err := js.AddConsumer("TEST", &nats.ConsumerConfig{Durable: "DLC", AckPolicy: nats.AckExplicitPolicy})
  1716  	if err != nil {
  1717  		t.Fatalf("Unexpected error: %v", err)
  1718  	}
  1719  
  1720  	// Also create a stream via the domain qualified API.
  1721  	js, err = nc.JetStream(nats.APIPrefix("$JS.HUB.API"))
  1722  	if err != nil {
  1723  		t.Fatalf("Unexpected error getting JetStream context: %v", err)
  1724  	}
  1725  	if _, err := js.AddStream(&nats.StreamConfig{Name: "ORDERS", Replicas: 2}); err != nil {
  1726  		t.Fatalf("Unexpected error: %v", err)
  1727  	}
  1728  
  1729  	// Now we want to access the consumer info from IA/dlc.
  1730  	nc2, js2 := jsClientConnect(t, c.randomServer(), nats.UserInfo("dlc", "pass"))
  1731  	defer nc2.Close()
  1732  
  1733  	if _, err := nc2.Request("FROM.DLC", nil, time.Second); err != nil {
  1734  		t.Fatalf("Unexpected error: %v", err)
  1735  	}
  1736  
  1737  	// Make sure domain mappings etc work across accounts.
  1738  	// Setup a mirror.
  1739  	_, err = js2.AddStream(&nats.StreamConfig{
  1740  		Name: "MIRROR",
  1741  		Mirror: &nats.StreamSource{
  1742  			Name: "ORDERS",
  1743  			External: &nats.ExternalStream{
  1744  				APIPrefix:     "js.xacc.API",
  1745  				DeliverPrefix: "M.SYNC",
  1746  			},
  1747  		},
  1748  	})
  1749  	if err != nil {
  1750  		t.Fatalf("Unexpected error: %v", err)
  1751  	}
  1752  
  1753  	// Send 10 messages..
  1754  	msg, toSend := []byte("Hello mapped domains"), 10
  1755  	for i := 0; i < toSend; i++ {
  1756  		if _, err = js.Publish("ORDERS", msg); err != nil {
  1757  			t.Fatalf("Unexpected publish error: %v", err)
  1758  		}
  1759  	}
  1760  
  1761  	checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
  1762  		si, err := js2.StreamInfo("MIRROR")
  1763  		if err != nil {
  1764  			return fmt.Errorf("Unexpected error: %v", err)
  1765  		}
  1766  		if si.State.Msgs != 10 {
  1767  			return fmt.Errorf("Expected 10 msgs, got state: %+v", si.State)
  1768  		}
  1769  		return nil
  1770  	})
  1771  }
  1772  
  1773  // https://github.com/nats-io/nats-server/issues/2242
  1774  func TestJetStreamClusterMsgIdDuplicateBug(t *testing.T) {
  1775  	c := createJetStreamClusterExplicit(t, "MSL", 3)
  1776  	defer c.shutdown()
  1777  
  1778  	// Client for API requests.
  1779  	nc, js := jsClientConnect(t, c.randomServer())
  1780  	defer nc.Close()
  1781  
  1782  	_, err := js.AddStream(&nats.StreamConfig{
  1783  		Name:     "TEST",
  1784  		Subjects: []string{"foo"},
  1785  		Replicas: 2,
  1786  	})
  1787  	if err != nil {
  1788  		t.Fatalf("Unexpected error: %v", err)
  1789  	}
  1790  
  1791  	sendMsgID := func(id string) (*nats.PubAck, error) {
  1792  		t.Helper()
  1793  		m := nats.NewMsg("foo")
  1794  		m.Header.Add(JSMsgId, id)
  1795  		m.Data = []byte("HELLO WORLD")
  1796  		return js.PublishMsg(m)
  1797  	}
  1798  
  1799  	if _, err := sendMsgID("1"); err != nil {
  1800  		t.Fatalf("Unexpected error: %v", err)
  1801  	}
  1802  	// This should fail with duplicate detected.
  1803  	if pa, _ := sendMsgID("1"); pa == nil || !pa.Duplicate {
  1804  		t.Fatalf("Expected duplicate but got none: %+v", pa)
  1805  	}
  1806  	// This should be fine.
  1807  	if _, err := sendMsgID("2"); err != nil {
  1808  		t.Fatalf("Unexpected error: %v", err)
  1809  	}
  1810  }
  1811  
  1812  func TestJetStreamClusterNilMsgWithHeaderThroughSourcedStream(t *testing.T) {
  1813  	tmpl := strings.Replace(jsClusterAccountsTempl, "store_dir:", "domain: HUB, store_dir:", 1)
  1814  	c := createJetStreamCluster(t, tmpl, "HUB", _EMPTY_, 3, 12232, true)
  1815  	defer c.shutdown()
  1816  
  1817  	tmpl = strings.Replace(jsClusterTemplWithSingleLeafNode, "store_dir:", "domain: SPOKE, store_dir:", 1)
  1818  	spoke := c.createLeafNodeWithTemplate("SPOKE", tmpl)
  1819  	defer spoke.Shutdown()
  1820  
  1821  	checkLeafNodeConnectedCount(t, spoke, 2)
  1822  
  1823  	// Client for API requests.
  1824  	nc, js := jsClientConnect(t, spoke)
  1825  	defer nc.Close()
  1826  
  1827  	_, err := js.AddStream(&nats.StreamConfig{
  1828  		Name:     "TEST",
  1829  		Subjects: []string{"foo"},
  1830  	})
  1831  	if err != nil {
  1832  		t.Fatalf("Unexpected error: %v", err)
  1833  	}
  1834  
  1835  	jsHub, err := nc.JetStream(nats.APIPrefix("$JS.HUB.API"))
  1836  	if err != nil {
  1837  		t.Fatalf("Unexpected error getting JetStream context: %v", err)
  1838  	}
  1839  
  1840  	_, err = jsHub.AddStream(&nats.StreamConfig{
  1841  		Name:     "S",
  1842  		Replicas: 2,
  1843  		Sources: []*nats.StreamSource{{
  1844  			Name:     "TEST",
  1845  			External: &nats.ExternalStream{APIPrefix: "$JS.SPOKE.API"},
  1846  		}},
  1847  	})
  1848  	if err != nil {
  1849  		t.Fatalf("Unexpected error: %v", err)
  1850  	}
  1851  
  1852  	// Now send a message to the origin stream with nil body and a header.
  1853  	m := nats.NewMsg("foo")
  1854  	m.Header.Add("X-Request-ID", "e9a639b4-cecb-4fbe-8376-1ef511ae1f8d")
  1855  	m.Data = []byte("HELLO WORLD")
  1856  
  1857  	if _, err = jsHub.PublishMsg(m); err != nil {
  1858  		t.Fatalf("Unexpected error: %v", err)
  1859  	}
  1860  
  1861  	sub, err := jsHub.SubscribeSync("foo", nats.BindStream("S"))
  1862  	if err != nil {
  1863  		t.Fatalf("Unexpected error: %v", err)
  1864  	}
  1865  	msg, err := sub.NextMsg(time.Second)
  1866  	if err != nil {
  1867  		t.Fatalf("Unexpected error: %v", err)
  1868  	}
  1869  	if string(msg.Data) != "HELLO WORLD" {
  1870  		t.Fatalf("Message corrupt? Expecting %q got %q", "HELLO WORLD", msg.Data)
  1871  	}
  1872  }
  1873  
  1874  // Make sure varz reports the server usage not replicated usage etc.
  1875  func TestJetStreamClusterVarzReporting(t *testing.T) {
  1876  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  1877  	defer c.shutdown()
  1878  
  1879  	s := c.randomServer()
  1880  	nc, js := jsClientConnect(t, s)
  1881  	defer nc.Close()
  1882  
  1883  	_, err := js.AddStream(&nats.StreamConfig{
  1884  		Name:     "TEST",
  1885  		Replicas: 3,
  1886  	})
  1887  	if err != nil {
  1888  		t.Fatalf("Unexpected error: %v", err)
  1889  	}
  1890  
  1891  	// ~100k per message.
  1892  	msg := []byte(strings.Repeat("A", 99_960))
  1893  	msz := fileStoreMsgSize("TEST", nil, msg)
  1894  	total := msz * 10
  1895  
  1896  	for i := 0; i < 10; i++ {
  1897  		if _, err := js.Publish("TEST", msg); err != nil {
  1898  			t.Fatalf("Unexpected publish error: %v", err)
  1899  		}
  1900  	}
  1901  	// To show the bug we need this to allow remote usage to replicate.
  1902  	time.Sleep(2 * usageTick)
  1903  
  1904  	v, err := s.Varz(nil)
  1905  	if err != nil {
  1906  		t.Fatalf("Unexpected error: %v", err)
  1907  	}
  1908  
  1909  	if v.JetStream.Stats.Store > total {
  1910  		t.Fatalf("Single server varz JetStream store usage should be <= %d, got %d", total, v.JetStream.Stats.Store)
  1911  	}
  1912  
  1913  	info, err := js.AccountInfo()
  1914  	if err != nil {
  1915  		t.Fatalf("Unexpected error: %v", err)
  1916  	}
  1917  	if info.Store < total*3 {
  1918  		t.Fatalf("Expected account information to show usage ~%d, got %d", total*3, info.Store)
  1919  	}
  1920  }
  1921  
  1922  func TestJetStreamClusterPurgeBySequence(t *testing.T) {
  1923  	for _, st := range []StorageType{FileStorage, MemoryStorage} {
  1924  		t.Run(st.String(), func(t *testing.T) {
  1925  
  1926  			c := createJetStreamClusterExplicit(t, "JSC", 3)
  1927  			defer c.shutdown()
  1928  
  1929  			nc, js := jsClientConnect(t, c.randomServer())
  1930  			defer nc.Close()
  1931  
  1932  			cfg := StreamConfig{
  1933  				Name:       "KV",
  1934  				Subjects:   []string{"kv.*.*"},
  1935  				Storage:    st,
  1936  				Replicas:   2,
  1937  				MaxMsgsPer: 5,
  1938  			}
  1939  			req, err := json.Marshal(cfg)
  1940  			if err != nil {
  1941  				t.Fatalf("Unexpected error: %v", err)
  1942  			}
  1943  			nc.Request(fmt.Sprintf(JSApiStreamCreateT, cfg.Name), req, time.Second)
  1944  			for i := 0; i < 20; i++ {
  1945  				if _, err = js.Publish("kv.myapp.username", []byte(fmt.Sprintf("value %d", i))); err != nil {
  1946  					t.Fatalf("request failed: %s", err)
  1947  				}
  1948  			}
  1949  			for i := 0; i < 20; i++ {
  1950  				if _, err = js.Publish("kv.myapp.password", []byte(fmt.Sprintf("value %d", i))); err != nil {
  1951  					t.Fatalf("request failed: %s", err)
  1952  				}
  1953  			}
  1954  			expectSequences := func(t *testing.T, subject string, seq ...int) {
  1955  				sub, err := js.SubscribeSync(subject)
  1956  				if err != nil {
  1957  					t.Fatalf("sub failed: %s", err)
  1958  				}
  1959  				defer sub.Unsubscribe()
  1960  				for _, i := range seq {
  1961  					msg, err := sub.NextMsg(time.Second)
  1962  					if err != nil {
  1963  						t.Fatalf("didn't get message: %s", err)
  1964  					}
  1965  					meta, err := msg.Metadata()
  1966  					if err != nil {
  1967  						t.Fatalf("didn't get metadata: %s", err)
  1968  					}
  1969  					if meta.Sequence.Stream != uint64(i) {
  1970  						t.Fatalf("expected sequence %d got %d", i, meta.Sequence.Stream)
  1971  					}
  1972  				}
  1973  			}
  1974  			expectSequences(t, "kv.myapp.username", 16, 17, 18, 19, 20)
  1975  			expectSequences(t, "kv.myapp.password", 36, 37, 38, 39, 40)
  1976  
  1977  			// delete up to but not including 18 of username...
  1978  			jr, _ := json.Marshal(&JSApiStreamPurgeRequest{Subject: "kv.myapp.username", Sequence: 18})
  1979  			_, err = nc.Request(fmt.Sprintf(JSApiStreamPurgeT, "KV"), jr, time.Second)
  1980  			if err != nil {
  1981  				t.Fatalf("request failed: %s", err)
  1982  			}
  1983  			// 18 should still be there
  1984  			expectSequences(t, "kv.myapp.username", 18, 19, 20)
  1985  		})
  1986  	}
  1987  }
  1988  
  1989  func TestJetStreamClusterMaxConsumers(t *testing.T) {
  1990  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  1991  	defer c.shutdown()
  1992  
  1993  	nc, js := jsClientConnect(t, c.randomServer())
  1994  	defer nc.Close()
  1995  
  1996  	cfg := &nats.StreamConfig{
  1997  		Name:         "MAXC",
  1998  		Storage:      nats.MemoryStorage,
  1999  		Subjects:     []string{"in.maxc.>"},
  2000  		MaxConsumers: 1,
  2001  	}
  2002  	if _, err := js.AddStream(cfg); err != nil {
  2003  		t.Fatalf("Unexpected error: %v", err)
  2004  	}
  2005  	si, err := js.StreamInfo("MAXC")
  2006  	if err != nil {
  2007  		t.Fatalf("Unexpected error: %v", err)
  2008  	}
  2009  	if si.Config.MaxConsumers != 1 {
  2010  		t.Fatalf("Expected max of 1, got %d", si.Config.MaxConsumers)
  2011  	}
  2012  	// Make sure we get the right error.
  2013  	// This should succeed.
  2014  	if _, err := js.SubscribeSync("in.maxc.foo"); err != nil {
  2015  		t.Fatalf("Unexpected error: %v", err)
  2016  	}
  2017  	if _, err := js.SubscribeSync("in.maxc.bar"); err == nil {
  2018  		t.Fatalf("Eexpected error but got none")
  2019  	}
  2020  }
  2021  
  2022  func TestJetStreamClusterMaxConsumersMultipleConcurrentRequests(t *testing.T) {
  2023  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  2024  	defer c.shutdown()
  2025  
  2026  	nc, js := jsClientConnect(t, c.randomServer())
  2027  	defer nc.Close()
  2028  
  2029  	cfg := &nats.StreamConfig{
  2030  		Name:         "MAXCC",
  2031  		Storage:      nats.MemoryStorage,
  2032  		Subjects:     []string{"in.maxcc.>"},
  2033  		MaxConsumers: 1,
  2034  		Replicas:     3,
  2035  	}
  2036  	if _, err := js.AddStream(cfg); err != nil {
  2037  		t.Fatalf("Unexpected error: %v", err)
  2038  	}
  2039  	si, err := js.StreamInfo("MAXCC")
  2040  	if err != nil {
  2041  		t.Fatalf("Unexpected error: %v", err)
  2042  	}
  2043  	if si.Config.MaxConsumers != 1 {
  2044  		t.Fatalf("Expected max of 1, got %d", si.Config.MaxConsumers)
  2045  	}
  2046  
  2047  	startCh := make(chan bool)
  2048  	var wg sync.WaitGroup
  2049  	wg.Add(10)
  2050  	for n := 0; n < 10; n++ {
  2051  		nc, js := jsClientConnect(t, c.randomServer())
  2052  		defer nc.Close()
  2053  		go func(js nats.JetStreamContext) {
  2054  			defer wg.Done()
  2055  			<-startCh
  2056  			js.SubscribeSync("in.maxcc.foo")
  2057  		}(js)
  2058  	}
  2059  	// Wait for Go routines.
  2060  	time.Sleep(250 * time.Millisecond)
  2061  
  2062  	close(startCh)
  2063  	wg.Wait()
  2064  
  2065  	var names []string
  2066  	for n := range js.ConsumerNames("MAXCC") {
  2067  		names = append(names, n)
  2068  	}
  2069  	if nc := len(names); nc > 1 {
  2070  		t.Fatalf("Expected only 1 consumer, got %d", nc)
  2071  	}
  2072  }
  2073  
  2074  func TestJetStreamClusterAccountMaxStreamsAndConsumersMultipleConcurrentRequests(t *testing.T) {
  2075  	tmpl := `
  2076  	listen: 127.0.0.1:-1
  2077  	server_name: %s
  2078  	jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'}
  2079  
  2080  	leaf {
  2081  		listen: 127.0.0.1:-1
  2082  	}
  2083  
  2084  	cluster {
  2085  		name: %s
  2086  		listen: 127.0.0.1:%d
  2087  		routes = [%s]
  2088  	}
  2089  
  2090  	accounts {
  2091  		A {
  2092  			jetstream {
  2093  				max_file: 9663676416
  2094  				max_streams: 2
  2095  				max_consumers: 1
  2096  			}
  2097  			users = [ { user: "a", pass: "pwd" } ]
  2098  		}
  2099  		$SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] }
  2100  	}
  2101  	`
  2102  	c := createJetStreamClusterWithTemplate(t, tmpl, "JSC", 3)
  2103  	defer c.shutdown()
  2104  
  2105  	nc, js := jsClientConnect(t, c.randomServer(), nats.UserInfo("a", "pwd"))
  2106  	defer nc.Close()
  2107  
  2108  	cfg := &nats.StreamConfig{
  2109  		Name:     "MAXCC",
  2110  		Storage:  nats.MemoryStorage,
  2111  		Subjects: []string{"in.maxcc.>"},
  2112  		Replicas: 3,
  2113  	}
  2114  	if _, err := js.AddStream(cfg); err != nil {
  2115  		t.Fatalf("Unexpected error: %v", err)
  2116  	}
  2117  	si, err := js.StreamInfo("MAXCC")
  2118  	if err != nil {
  2119  		t.Fatalf("Unexpected error: %v", err)
  2120  	}
  2121  	if si.Config.MaxConsumers != -1 {
  2122  		t.Fatalf("Expected max of -1, got %d", si.Config.MaxConsumers)
  2123  	}
  2124  
  2125  	startCh := make(chan bool)
  2126  	var wg sync.WaitGroup
  2127  	wg.Add(10)
  2128  	for n := 0; n < 10; n++ {
  2129  		nc, js := jsClientConnect(t, c.randomServer(), nats.UserInfo("a", "pwd"))
  2130  		defer nc.Close()
  2131  		go func(js nats.JetStreamContext, idx int) {
  2132  			defer wg.Done()
  2133  			<-startCh
  2134  			// Test adding new streams
  2135  			js.AddStream(&nats.StreamConfig{
  2136  				Name:     fmt.Sprintf("OTHER_%d", idx),
  2137  				Replicas: 3,
  2138  			})
  2139  			// Test adding consumers to MAXCC stream
  2140  			js.SubscribeSync("in.maxcc.foo", nats.BindStream("MAXCC"))
  2141  		}(js, n)
  2142  	}
  2143  	// Wait for Go routines.
  2144  	time.Sleep(250 * time.Millisecond)
  2145  
  2146  	close(startCh)
  2147  	wg.Wait()
  2148  
  2149  	var names []string
  2150  	for n := range js.StreamNames() {
  2151  		names = append(names, n)
  2152  	}
  2153  	if nc := len(names); nc > 2 {
  2154  		t.Fatalf("Expected only 2 streams, got %d", nc)
  2155  	}
  2156  	names = names[:0]
  2157  	for n := range js.ConsumerNames("MAXCC") {
  2158  		names = append(names, n)
  2159  	}
  2160  	if nc := len(names); nc > 1 {
  2161  		t.Fatalf("Expected only 1 consumer, got %d", nc)
  2162  	}
  2163  }
  2164  
  2165  func TestJetStreamClusterPanicDecodingConsumerState(t *testing.T) {
  2166  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  2167  	defer c.shutdown()
  2168  
  2169  	rch := make(chan struct{}, 2)
  2170  	nc, js := jsClientConnect(t, c.randomServer(),
  2171  		nats.ReconnectWait(50*time.Millisecond),
  2172  		nats.MaxReconnects(-1),
  2173  		nats.ReconnectHandler(func(_ *nats.Conn) {
  2174  			rch <- struct{}{}
  2175  		}),
  2176  	)
  2177  	defer nc.Close()
  2178  
  2179  	if _, err := js.AddStream(&nats.StreamConfig{
  2180  		Name:      "TEST",
  2181  		Subjects:  []string{"ORDERS.*"},
  2182  		Storage:   nats.FileStorage,
  2183  		Replicas:  3,
  2184  		Retention: nats.WorkQueuePolicy,
  2185  		Discard:   nats.DiscardNew,
  2186  		MaxMsgs:   -1,
  2187  		MaxAge:    time.Hour * 24 * 365,
  2188  	}); err != nil {
  2189  		t.Fatalf("Error creating stream: %v", err)
  2190  	}
  2191  
  2192  	sub, err := js.PullSubscribe("ORDERS.created", "durable", nats.MaxAckPending(1000))
  2193  
  2194  	if err != nil {
  2195  		t.Fatalf("Error creating pull subscriber: %v", err)
  2196  	}
  2197  
  2198  	sendMsg := func(subject string) {
  2199  		t.Helper()
  2200  		if _, err := js.Publish(subject, []byte("msg")); err != nil {
  2201  			t.Fatalf("Error on publish: %v", err)
  2202  		}
  2203  	}
  2204  
  2205  	for i := 0; i < 100; i++ {
  2206  		sendMsg("ORDERS.something")
  2207  		sendMsg("ORDERS.created")
  2208  	}
  2209  
  2210  	for total := 0; total != 100; {
  2211  		msgs, err := sub.Fetch(100-total, nats.MaxWait(2*time.Second))
  2212  		if err != nil {
  2213  			t.Fatalf("Failed to fetch message: %v", err)
  2214  		}
  2215  		for _, m := range msgs {
  2216  			m.AckSync()
  2217  			total++
  2218  		}
  2219  	}
  2220  
  2221  	c.stopAll()
  2222  	c.restartAllSamePorts()
  2223  	c.waitOnStreamLeader("$G", "TEST")
  2224  	c.waitOnConsumerLeader("$G", "TEST", "durable")
  2225  
  2226  	select {
  2227  	case <-rch:
  2228  	case <-time.After(2 * time.Second):
  2229  		t.Fatal("Did not reconnect")
  2230  	}
  2231  
  2232  	for i := 0; i < 100; i++ {
  2233  		sendMsg("ORDERS.something")
  2234  		sendMsg("ORDERS.created")
  2235  	}
  2236  
  2237  	for total := 0; total != 100; {
  2238  		msgs, err := sub.Fetch(100-total, nats.MaxWait(2*time.Second))
  2239  		if err != nil {
  2240  			t.Fatalf("Error on fetch: %v", err)
  2241  		}
  2242  		for _, m := range msgs {
  2243  			m.AckSync()
  2244  			total++
  2245  		}
  2246  	}
  2247  }
  2248  
  2249  // Had a report of leaked subs with pull subscribers.
  2250  func TestJetStreamClusterPullConsumerLeakedSubs(t *testing.T) {
  2251  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  2252  	defer c.shutdown()
  2253  
  2254  	nc, js := jsClientConnect(t, c.randomServer())
  2255  	defer nc.Close()
  2256  
  2257  	if _, err := js.AddStream(&nats.StreamConfig{
  2258  		Name:      "TEST",
  2259  		Subjects:  []string{"Domains.*"},
  2260  		Replicas:  1,
  2261  		Retention: nats.InterestPolicy,
  2262  	}); err != nil {
  2263  		t.Fatalf("Error creating stream: %v", err)
  2264  	}
  2265  
  2266  	sub, err := js.PullSubscribe("Domains.Domain", "Domains-Api", nats.MaxAckPending(20_000))
  2267  	if err != nil {
  2268  		t.Fatalf("Error creating pull subscriber: %v", err)
  2269  	}
  2270  	defer sub.Unsubscribe()
  2271  
  2272  	// Load up a bunch of requests.
  2273  	numRequests := 20
  2274  	for i := 0; i < numRequests; i++ {
  2275  		js.PublishAsync("Domains.Domain", []byte("QUESTION"))
  2276  	}
  2277  	select {
  2278  	case <-js.PublishAsyncComplete():
  2279  	case <-time.After(5 * time.Second):
  2280  		t.Fatalf("Did not receive completion signal")
  2281  	}
  2282  	numSubs := c.stableTotalSubs()
  2283  
  2284  	// With batch of 1 we do not see any issues, so set to 10.
  2285  	// Currently Go client uses auto unsub based on the batch size.
  2286  	for i := 0; i < numRequests/10; i++ {
  2287  		msgs, err := sub.Fetch(10)
  2288  		if err != nil {
  2289  			t.Fatalf("Unexpected error: %v", err)
  2290  		}
  2291  		for _, m := range msgs {
  2292  			m.AckSync()
  2293  		}
  2294  	}
  2295  
  2296  	// Make sure the stream is empty..
  2297  	si, err := js.StreamInfo("TEST")
  2298  	if err != nil {
  2299  		t.Fatalf("Unexpected error: %v", err)
  2300  	}
  2301  	if si.State.Msgs != 0 {
  2302  		t.Fatalf("Stream should be empty, got %+v", si)
  2303  	}
  2304  
  2305  	// Make sure we did not leak any subs.
  2306  	if numSubsAfter := c.stableTotalSubs(); numSubsAfter != numSubs {
  2307  		t.Fatalf("Subs leaked: %d before, %d after", numSubs, numSubsAfter)
  2308  	}
  2309  }
  2310  
  2311  func TestJetStreamClusterPushConsumerQueueGroup(t *testing.T) {
  2312  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  2313  	defer c.shutdown()
  2314  
  2315  	nc, js := jsClientConnect(t, c.randomServer())
  2316  	defer nc.Close()
  2317  
  2318  	if _, err := js.AddStream(&nats.StreamConfig{
  2319  		Name:     "TEST",
  2320  		Subjects: []string{"foo"},
  2321  		Replicas: 3,
  2322  	}); err != nil {
  2323  		t.Fatalf("Error creating stream: %v", err)
  2324  	}
  2325  
  2326  	js.Publish("foo", []byte("QG"))
  2327  
  2328  	// Do consumer by hand for now.
  2329  	inbox := nats.NewInbox()
  2330  	obsReq := CreateConsumerRequest{
  2331  		Stream: "TEST",
  2332  		Config: ConsumerConfig{
  2333  			Durable:        "dlc",
  2334  			DeliverSubject: inbox,
  2335  			DeliverGroup:   "22",
  2336  			AckPolicy:      AckNone,
  2337  		},
  2338  	}
  2339  	req, err := json.Marshal(obsReq)
  2340  	if err != nil {
  2341  		t.Fatalf("Unexpected error: %v", err)
  2342  	}
  2343  	resp, err := nc.Request(fmt.Sprintf(JSApiDurableCreateT, "TEST", "dlc"), req, 2*time.Second)
  2344  	if err != nil {
  2345  		t.Fatalf("Unexpected error: %v", err)
  2346  	}
  2347  	var ccResp JSApiConsumerCreateResponse
  2348  	if err = json.Unmarshal(resp.Data, &ccResp); err != nil {
  2349  		t.Fatalf("Unexpected error: %v", err)
  2350  	}
  2351  	if ccResp.Error != nil {
  2352  		t.Fatalf("Unexpected error, got %+v", ccResp.Error)
  2353  	}
  2354  
  2355  	sub, _ := nc.SubscribeSync(inbox)
  2356  	if _, err := sub.NextMsg(100 * time.Millisecond); err == nil {
  2357  		t.Fatalf("Expected a timeout, we should not get messages here")
  2358  	}
  2359  	qsub, _ := nc.QueueSubscribeSync(inbox, "22")
  2360  	checkSubsPending(t, qsub, 1)
  2361  
  2362  	// Test deleting the plain sub has not affect.
  2363  	sub.Unsubscribe()
  2364  	js.Publish("foo", []byte("QG"))
  2365  	checkSubsPending(t, qsub, 2)
  2366  
  2367  	qsub.Unsubscribe()
  2368  	qsub2, _ := nc.QueueSubscribeSync(inbox, "22")
  2369  	js.Publish("foo", []byte("QG"))
  2370  	checkSubsPending(t, qsub2, 1)
  2371  
  2372  	// Catch all sub.
  2373  	sub, _ = nc.SubscribeSync(inbox)
  2374  	qsub2.Unsubscribe() // Should be no more interest.
  2375  	// Send another, make sure we do not see the message flow here.
  2376  	js.Publish("foo", []byte("QG"))
  2377  	if _, err := sub.NextMsg(100 * time.Millisecond); err == nil {
  2378  		t.Fatalf("Expected a timeout, we should not get messages here")
  2379  	}
  2380  }
  2381  
  2382  func TestJetStreamClusterConsumerLastActiveReporting(t *testing.T) {
  2383  	c := createJetStreamClusterExplicit(t, "R3S", 3)
  2384  	defer c.shutdown()
  2385  
  2386  	// Client based API
  2387  	nc, js := jsClientConnect(t, c.randomServer())
  2388  	defer nc.Close()
  2389  
  2390  	cfg := &nats.StreamConfig{Name: "foo", Replicas: 2}
  2391  	if _, err := js.AddStream(cfg); err != nil {
  2392  		t.Fatalf("Unexpected error: %v", err)
  2393  	}
  2394  
  2395  	sendMsg := func() {
  2396  		t.Helper()
  2397  		if _, err := js.Publish("foo", []byte("OK")); err != nil {
  2398  			t.Fatalf("Unexpected publish error: %v", err)
  2399  		}
  2400  	}
  2401  
  2402  	sub, err := js.SubscribeSync("foo", nats.Durable("dlc"))
  2403  	if err != nil {
  2404  		t.Fatalf("Unexpected error: %v", err)
  2405  	}
  2406  
  2407  	// TODO(dlc) - Do by hand for now until Go client has this.
  2408  	consumerInfo := func(name string) *ConsumerInfo {
  2409  		t.Helper()
  2410  		resp, err := nc.Request(fmt.Sprintf(JSApiConsumerInfoT, "foo", name), nil, time.Second)
  2411  		if err != nil {
  2412  			t.Fatalf("Unexpected error: %v", err)
  2413  		}
  2414  		var cinfo JSApiConsumerInfoResponse
  2415  		if err := json.Unmarshal(resp.Data, &cinfo); err != nil {
  2416  			t.Fatalf("Unexpected error: %v", err)
  2417  		}
  2418  		if cinfo.ConsumerInfo == nil || cinfo.Error != nil {
  2419  			t.Fatalf("Got a bad response %+v", cinfo)
  2420  		}
  2421  		return cinfo.ConsumerInfo
  2422  	}
  2423  
  2424  	if ci := consumerInfo("dlc"); ci.Delivered.Last != nil || ci.AckFloor.Last != nil {
  2425  		t.Fatalf("Expected last to be nil by default, got %+v", ci)
  2426  	}
  2427  
  2428  	checkTimeDiff := func(t1, t2 *time.Time) {
  2429  		t.Helper()
  2430  		// Compare on a seconds level
  2431  		rt1, rt2 := t1.UTC().Round(time.Second), t2.UTC().Round(time.Second)
  2432  		if rt1 != rt2 {
  2433  			d := rt1.Sub(rt2)
  2434  			if d > time.Second || d < -time.Second {
  2435  				t.Fatalf("Times differ too much, expected %v got %v", rt1, rt2)
  2436  			}
  2437  		}
  2438  	}
  2439  
  2440  	checkDelivered := func(name string) {
  2441  		t.Helper()
  2442  		now := time.Now()
  2443  		ci := consumerInfo(name)
  2444  		if ci.Delivered.Last == nil {
  2445  			t.Fatalf("Expected delivered last to not be nil after activity, got %+v", ci.Delivered)
  2446  		}
  2447  		checkTimeDiff(&now, ci.Delivered.Last)
  2448  	}
  2449  
  2450  	checkLastAck := func(name string, m *nats.Msg) {
  2451  		t.Helper()
  2452  		now := time.Now()
  2453  		if err := m.AckSync(); err != nil {
  2454  			t.Fatalf("Unexpected error: %v", err)
  2455  		}
  2456  		ci := consumerInfo(name)
  2457  		if ci.AckFloor.Last == nil {
  2458  			t.Fatalf("Expected ack floor last to not be nil after ack, got %+v", ci.AckFloor)
  2459  		}
  2460  		// Compare on a seconds level
  2461  		checkTimeDiff(&now, ci.AckFloor.Last)
  2462  	}
  2463  
  2464  	checkAck := func(name string) {
  2465  		t.Helper()
  2466  		m, err := sub.NextMsg(time.Second)
  2467  		if err != nil {
  2468  			t.Fatalf("Unexpected error: %v", err)
  2469  		}
  2470  		checkLastAck(name, m)
  2471  	}
  2472  
  2473  	// Push
  2474  	sendMsg()
  2475  	checkSubsPending(t, sub, 1)
  2476  	checkDelivered("dlc")
  2477  	checkAck("dlc")
  2478  
  2479  	// Check pull.
  2480  	sub, err = js.PullSubscribe("foo", "rip")
  2481  	if err != nil {
  2482  		t.Fatalf("Unexpected error: %v", err)
  2483  	}
  2484  	sendMsg()
  2485  	// Should still be nil since pull.
  2486  	if ci := consumerInfo("rip"); ci.Delivered.Last != nil || ci.AckFloor.Last != nil {
  2487  		t.Fatalf("Expected last to be nil by default, got %+v", ci)
  2488  	}
  2489  	msgs, err := sub.Fetch(1)
  2490  	if err != nil || len(msgs) == 0 {
  2491  		t.Fatalf("Unexpected error: %v", err)
  2492  	}
  2493  	checkDelivered("rip")
  2494  	checkLastAck("rip", msgs[0])
  2495  
  2496  	// Now test to make sure this state is held correctly across a cluster.
  2497  	ci := consumerInfo("rip")
  2498  	nc.Request(fmt.Sprintf(JSApiConsumerLeaderStepDownT, "foo", "rip"), nil, time.Second)
  2499  	c.waitOnConsumerLeader("$G", "foo", "rip")
  2500  	nci := consumerInfo("rip")
  2501  	if nci.Delivered.Last == nil {
  2502  		t.Fatalf("Expected delivered last to not be nil, got %+v", nci.Delivered)
  2503  	}
  2504  	if nci.AckFloor.Last == nil {
  2505  		t.Fatalf("Expected ack floor last to not be nil, got %+v", nci.AckFloor)
  2506  	}
  2507  
  2508  	checkTimeDiff(ci.Delivered.Last, nci.Delivered.Last)
  2509  	checkTimeDiff(ci.AckFloor.Last, nci.AckFloor.Last)
  2510  }
  2511  
  2512  func TestJetStreamClusterRaceOnRAFTCreate(t *testing.T) {
  2513  	c := createJetStreamClusterExplicit(t, "R3S", 3)
  2514  	defer c.shutdown()
  2515  
  2516  	srv := c.servers[0]
  2517  	nc, err := nats.Connect(srv.ClientURL())
  2518  	if err != nil {
  2519  		t.Fatal(err)
  2520  	}
  2521  	defer nc.Close()
  2522  
  2523  	js, err := nc.JetStream()
  2524  	if err != nil {
  2525  		t.Fatal(err)
  2526  	}
  2527  
  2528  	if _, err := js.AddStream(&nats.StreamConfig{
  2529  		Name:     "TEST",
  2530  		Subjects: []string{"foo"},
  2531  		Replicas: 3,
  2532  	}); err != nil {
  2533  		t.Fatalf("Error creating stream: %v", err)
  2534  	}
  2535  
  2536  	c.waitOnStreamLeader(globalAccountName, "TEST")
  2537  
  2538  	js, err = nc.JetStream(nats.MaxWait(2 * time.Second))
  2539  	if err != nil {
  2540  		t.Fatal(err)
  2541  	}
  2542  
  2543  	size := 10
  2544  	wg := sync.WaitGroup{}
  2545  	wg.Add(size)
  2546  	for i := 0; i < size; i++ {
  2547  		go func() {
  2548  			defer wg.Done()
  2549  			// We don't care about possible failures here, we just want
  2550  			// parallel creation of a consumer.
  2551  			js.PullSubscribe("foo", "shared")
  2552  		}()
  2553  	}
  2554  	wg.Wait()
  2555  }
  2556  
  2557  func TestJetStreamClusterDeadlockOnVarz(t *testing.T) {
  2558  	c := createJetStreamClusterExplicit(t, "R3S", 3)
  2559  	defer c.shutdown()
  2560  
  2561  	srv := c.servers[0]
  2562  	nc, err := nats.Connect(srv.ClientURL())
  2563  	if err != nil {
  2564  		t.Fatal(err)
  2565  	}
  2566  	defer nc.Close()
  2567  
  2568  	js, err := nc.JetStream()
  2569  	if err != nil {
  2570  		t.Fatal(err)
  2571  	}
  2572  
  2573  	size := 10
  2574  	wg := sync.WaitGroup{}
  2575  	wg.Add(size)
  2576  	ch := make(chan struct{})
  2577  	for i := 0; i < size; i++ {
  2578  		go func(i int) {
  2579  			defer wg.Done()
  2580  			<-ch
  2581  			js.AddStream(&nats.StreamConfig{
  2582  				Name:     fmt.Sprintf("TEST%d", i),
  2583  				Subjects: []string{"foo"},
  2584  				Replicas: 3,
  2585  			})
  2586  		}(i)
  2587  	}
  2588  
  2589  	close(ch)
  2590  	for i := 0; i < 10; i++ {
  2591  		srv.Varz(nil)
  2592  		time.Sleep(time.Millisecond)
  2593  	}
  2594  	wg.Wait()
  2595  }
  2596  
  2597  // Issue #2397
  2598  func TestJetStreamClusterStreamCatchupNoState(t *testing.T) {
  2599  	c := createJetStreamClusterExplicit(t, "R2S", 2)
  2600  	defer c.shutdown()
  2601  
  2602  	// Client based API
  2603  	nc, js := jsClientConnect(t, c.randomServer())
  2604  	defer nc.Close()
  2605  
  2606  	_, err := js.AddStream(&nats.StreamConfig{
  2607  		Name:     "TEST",
  2608  		Subjects: []string{"foo.*"},
  2609  		Replicas: 2,
  2610  	})
  2611  	if err != nil {
  2612  		t.Fatalf("Unexpected error: %v", err)
  2613  	}
  2614  
  2615  	// Hold onto servers.
  2616  	sl := c.streamLeader("$G", "TEST")
  2617  	if sl == nil {
  2618  		t.Fatalf("Did not get a server")
  2619  	}
  2620  	nsl := c.randomNonStreamLeader("$G", "TEST")
  2621  	if nsl == nil {
  2622  		t.Fatalf("Did not get a server")
  2623  	}
  2624  	// Grab low level stream and raft node.
  2625  	mset, err := nsl.GlobalAccount().lookupStream("TEST")
  2626  	if err != nil {
  2627  		t.Fatalf("Unexpected error: %v", err)
  2628  	}
  2629  	node := mset.raftNode()
  2630  	if node == nil {
  2631  		t.Fatalf("Could not get stream group name")
  2632  	}
  2633  	gname := node.Group()
  2634  
  2635  	numRequests := 100
  2636  	for i := 0; i < numRequests; i++ {
  2637  		// This will force a snapshot which will prune the normal log.
  2638  		// We will remove the snapshot to simulate the error condition.
  2639  		if i == 10 {
  2640  			if err := node.InstallSnapshot(mset.stateSnapshot()); err != nil {
  2641  				t.Fatalf("Error installing snapshot: %v", err)
  2642  			}
  2643  		}
  2644  		_, err := js.Publish("foo.created", []byte("REQ"))
  2645  		require_NoError(t, err)
  2646  	}
  2647  
  2648  	config := nsl.JetStreamConfig()
  2649  	if config == nil {
  2650  		t.Fatalf("No config")
  2651  	}
  2652  	lconfig := sl.JetStreamConfig()
  2653  	if lconfig == nil {
  2654  		t.Fatalf("No config")
  2655  	}
  2656  
  2657  	nc.Close()
  2658  	c.stopAll()
  2659  	// Remove all state by truncating for the non-leader.
  2660  	for _, fn := range []string{"1.blk", "1.idx", "1.fss"} {
  2661  		fname := filepath.Join(config.StoreDir, "$G", "streams", "TEST", "msgs", fn)
  2662  		fd, err := os.OpenFile(fname, os.O_RDWR, defaultFilePerms)
  2663  		if err != nil {
  2664  			continue
  2665  		}
  2666  		fd.Truncate(0)
  2667  		fd.Close()
  2668  	}
  2669  	// For both make sure we have no raft snapshots.
  2670  	snapDir := filepath.Join(lconfig.StoreDir, "$SYS", "_js_", gname, "snapshots")
  2671  	os.RemoveAll(snapDir)
  2672  	snapDir = filepath.Join(config.StoreDir, "$SYS", "_js_", gname, "snapshots")
  2673  	os.RemoveAll(snapDir)
  2674  
  2675  	// Now restart.
  2676  	c.restartAll()
  2677  	for _, cs := range c.servers {
  2678  		c.waitOnStreamCurrent(cs, "$G", "TEST")
  2679  	}
  2680  
  2681  	nc, js = jsClientConnect(t, c.randomServer())
  2682  	defer nc.Close()
  2683  
  2684  	c.waitOnStreamLeader("$G", "TEST")
  2685  
  2686  	_, err = js.Publish("foo.created", []byte("ZZZ"))
  2687  	require_NoError(t, err)
  2688  
  2689  	si, err := js.StreamInfo("TEST")
  2690  	require_NoError(t, err)
  2691  
  2692  	if si.State.LastSeq != 101 {
  2693  		t.Fatalf("bad state after restart: %+v", si.State)
  2694  	}
  2695  }
  2696  
  2697  // Issue #2525
  2698  func TestJetStreamClusterLargeHeaders(t *testing.T) {
  2699  	c := createJetStreamClusterExplicit(t, "R3S", 3)
  2700  	defer c.shutdown()
  2701  
  2702  	nc, js := jsClientConnect(t, c.randomServer())
  2703  	defer nc.Close()
  2704  
  2705  	_, err := js.AddStream(&nats.StreamConfig{
  2706  		Name:     "TEST",
  2707  		Subjects: []string{"foo"},
  2708  		Replicas: 3,
  2709  	})
  2710  	if err != nil {
  2711  		t.Fatalf("add stream failed: %s", err)
  2712  	}
  2713  
  2714  	// We use u16 to encode msg header len. Make sure we do the right thing when > 65k.
  2715  	data := make([]byte, 8*1024)
  2716  	crand.Read(data)
  2717  	val := hex.EncodeToString(data)[:8*1024]
  2718  	m := nats.NewMsg("foo")
  2719  	for i := 1; i <= 10; i++ {
  2720  		m.Header.Add(fmt.Sprintf("LargeHeader-%d", i), val)
  2721  	}
  2722  	m.Data = []byte("Hello Large Headers!")
  2723  	if _, err = js.PublishMsg(m); err == nil {
  2724  		t.Fatalf("Expected an error but got none")
  2725  	}
  2726  }
  2727  
  2728  func TestJetStreamClusterFlowControlRequiresHeartbeats(t *testing.T) {
  2729  	c := createJetStreamClusterExplicit(t, "R3S", 3)
  2730  	defer c.shutdown()
  2731  
  2732  	nc, js := jsClientConnect(t, c.randomServer())
  2733  	defer nc.Close()
  2734  
  2735  	if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST"}); err != nil {
  2736  		t.Fatalf("Unexpected error: %v", err)
  2737  	}
  2738  
  2739  	if _, err := js.AddConsumer("TEST", &nats.ConsumerConfig{
  2740  		Durable:        "dlc",
  2741  		DeliverSubject: nats.NewInbox(),
  2742  		FlowControl:    true,
  2743  	}); err == nil || IsNatsErr(err, JSConsumerWithFlowControlNeedsHeartbeats) {
  2744  		t.Fatalf("Unexpected error: %v", err)
  2745  	}
  2746  }
  2747  
  2748  func TestJetStreamClusterMixedModeColdStartPrune(t *testing.T) {
  2749  	// Purposely make this unbalanced. Without changes this will never form a quorum to elect the meta-leader.
  2750  	c := createMixedModeCluster(t, jsMixedModeGlobalAccountTempl, "MMCS5", _EMPTY_, 3, 4, false)
  2751  	defer c.shutdown()
  2752  
  2753  	// Make sure we report cluster size.
  2754  	checkClusterSize := func(s *Server) {
  2755  		t.Helper()
  2756  		jsi, err := s.Jsz(nil)
  2757  		if err != nil {
  2758  			t.Fatalf("Unexpected error: %v", err)
  2759  		}
  2760  		if jsi.Meta == nil {
  2761  			t.Fatalf("Expected a cluster info")
  2762  		}
  2763  		if jsi.Meta.Size != 3 {
  2764  			t.Fatalf("Expected cluster size to be adjusted to %d, but got %d", 3, jsi.Meta.Size)
  2765  		}
  2766  	}
  2767  
  2768  	checkClusterSize(c.leader())
  2769  	checkClusterSize(c.randomNonLeader())
  2770  }
  2771  
  2772  func TestJetStreamClusterMirrorAndSourceCrossNonNeighboringDomain(t *testing.T) {
  2773  	storeDir1 := t.TempDir()
  2774  	conf1 := createConfFile(t, []byte(fmt.Sprintf(`
  2775  		listen: 127.0.0.1:-1
  2776  		jetstream: {max_mem_store: 256MB, max_file_store: 256MB, domain: domain1, store_dir: '%s'}
  2777  		accounts {
  2778  			A:{   jetstream: enable, users:[ {user:a1,password:a1}]},
  2779  			SYS:{ users:[ {user:s1,password:s1}]},
  2780  		}
  2781  		system_account = SYS
  2782  		no_auth_user: a1
  2783  		leafnodes: {
  2784  			listen: 127.0.0.1:-1
  2785  		}
  2786  	`, storeDir1)))
  2787  	s1, _ := RunServerWithConfig(conf1)
  2788  	defer s1.Shutdown()
  2789  	storeDir2 := t.TempDir()
  2790  	conf2 := createConfFile(t, []byte(fmt.Sprintf(`
  2791  		listen: 127.0.0.1:-1
  2792  		jetstream: {max_mem_store: 256MB, max_file_store: 256MB, domain: domain2, store_dir: '%s'}
  2793  		accounts {
  2794  			A:{   jetstream: enable, users:[ {user:a1,password:a1}]},
  2795  			SYS:{ users:[ {user:s1,password:s1}]},
  2796  		}
  2797  		system_account = SYS
  2798  		no_auth_user: a1
  2799  		leafnodes:{
  2800  			remotes:[{ url:nats://a1:a1@127.0.0.1:%d, account: A},
  2801  					 { url:nats://s1:s1@127.0.0.1:%d, account: SYS}]
  2802  		}
  2803  	`, storeDir2, s1.opts.LeafNode.Port, s1.opts.LeafNode.Port)))
  2804  	s2, _ := RunServerWithConfig(conf2)
  2805  	defer s2.Shutdown()
  2806  	storeDir3 := t.TempDir()
  2807  	conf3 := createConfFile(t, []byte(fmt.Sprintf(`
  2808  		listen: 127.0.0.1:-1
  2809  		jetstream: {max_mem_store: 256MB, max_file_store: 256MB, domain: domain3, store_dir: '%s'}
  2810  		accounts {
  2811  			A:{   jetstream: enable, users:[ {user:a1,password:a1}]},
  2812  			SYS:{ users:[ {user:s1,password:s1}]},
  2813  		}
  2814  		system_account = SYS
  2815  		no_auth_user: a1
  2816  		leafnodes:{
  2817  			remotes:[{ url:nats://a1:a1@127.0.0.1:%d, account: A},
  2818  					 { url:nats://s1:s1@127.0.0.1:%d, account: SYS}]
  2819  		}
  2820  	`, storeDir3, s1.opts.LeafNode.Port, s1.opts.LeafNode.Port)))
  2821  	s3, _ := RunServerWithConfig(conf3)
  2822  	defer s3.Shutdown()
  2823  
  2824  	checkLeafNodeConnectedCount(t, s1, 4)
  2825  	checkLeafNodeConnectedCount(t, s2, 2)
  2826  	checkLeafNodeConnectedCount(t, s3, 2)
  2827  
  2828  	c2 := natsConnect(t, s2.ClientURL())
  2829  	defer c2.Close()
  2830  	js2, err := c2.JetStream(nats.Domain("domain2"))
  2831  	require_NoError(t, err)
  2832  	ai2, err := js2.AccountInfo()
  2833  	require_NoError(t, err)
  2834  	require_Equal(t, ai2.Domain, "domain2")
  2835  	_, err = js2.AddStream(&nats.StreamConfig{
  2836  		Name:     "disk",
  2837  		Storage:  nats.FileStorage,
  2838  		Subjects: []string{"disk"},
  2839  	})
  2840  	require_NoError(t, err)
  2841  	_, err = js2.Publish("disk", nil)
  2842  	require_NoError(t, err)
  2843  	si, err := js2.StreamInfo("disk")
  2844  	require_NoError(t, err)
  2845  	require_True(t, si.State.Msgs == 1)
  2846  
  2847  	c3 := natsConnect(t, s3.ClientURL())
  2848  	defer c3.Close()
  2849  	js3, err := c3.JetStream(nats.Domain("domain3"))
  2850  	require_NoError(t, err)
  2851  	ai3, err := js3.AccountInfo()
  2852  	require_NoError(t, err)
  2853  	require_Equal(t, ai3.Domain, "domain3")
  2854  
  2855  	_, err = js3.AddStream(&nats.StreamConfig{
  2856  		Name:    "stream-mirror",
  2857  		Storage: nats.FileStorage,
  2858  		Mirror: &nats.StreamSource{
  2859  			Name:     "disk",
  2860  			External: &nats.ExternalStream{APIPrefix: "$JS.domain2.API"},
  2861  		},
  2862  	})
  2863  	require_NoError(t, err)
  2864  
  2865  	_, err = js3.AddStream(&nats.StreamConfig{
  2866  		Name:    "stream-source",
  2867  		Storage: nats.FileStorage,
  2868  		Sources: []*nats.StreamSource{{
  2869  			Name:     "disk",
  2870  			External: &nats.ExternalStream{APIPrefix: "$JS.domain2.API"},
  2871  		}},
  2872  	})
  2873  	require_NoError(t, err)
  2874  	checkFor(t, 10*time.Second, 250*time.Millisecond, func() error {
  2875  		if si, _ := js3.StreamInfo("stream-mirror"); si.State.Msgs != 1 {
  2876  			return fmt.Errorf("Expected 1 msg for mirror, got %d", si.State.Msgs)
  2877  		}
  2878  		if si, _ := js3.StreamInfo("stream-source"); si.State.Msgs != 1 {
  2879  			return fmt.Errorf("Expected 1 msg for source, got %d", si.State.Msgs)
  2880  		}
  2881  		return nil
  2882  	})
  2883  }
  2884  
  2885  func TestJetStreamClusterSeal(t *testing.T) {
  2886  	s := RunBasicJetStreamServer(t)
  2887  	defer s.Shutdown()
  2888  
  2889  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  2890  	defer c.shutdown()
  2891  
  2892  	// Need to be done by hand until makes its way to Go client.
  2893  	createStream := func(t *testing.T, nc *nats.Conn, cfg *StreamConfig) *JSApiStreamCreateResponse {
  2894  		t.Helper()
  2895  		req, err := json.Marshal(cfg)
  2896  		require_NoError(t, err)
  2897  		resp, err := nc.Request(fmt.Sprintf(JSApiStreamCreateT, cfg.Name), req, time.Second)
  2898  		require_NoError(t, err)
  2899  		var scResp JSApiStreamCreateResponse
  2900  		err = json.Unmarshal(resp.Data, &scResp)
  2901  		require_NoError(t, err)
  2902  		return &scResp
  2903  	}
  2904  
  2905  	updateStream := func(t *testing.T, nc *nats.Conn, cfg *StreamConfig) *JSApiStreamUpdateResponse {
  2906  		t.Helper()
  2907  		req, err := json.Marshal(cfg)
  2908  		require_NoError(t, err)
  2909  		resp, err := nc.Request(fmt.Sprintf(JSApiStreamUpdateT, cfg.Name), req, time.Second)
  2910  		require_NoError(t, err)
  2911  		var scResp JSApiStreamUpdateResponse
  2912  		err = json.Unmarshal(resp.Data, &scResp)
  2913  		require_NoError(t, err)
  2914  		return &scResp
  2915  	}
  2916  
  2917  	testSeal := func(t *testing.T, s *Server, replicas int) {
  2918  		nc, js := jsClientConnect(t, s)
  2919  		defer nc.Close()
  2920  		// Should not be able to create a stream that starts sealed.
  2921  		scr := createStream(t, nc, &StreamConfig{Name: "SEALED", Replicas: replicas, Storage: MemoryStorage, Sealed: true})
  2922  		if scr.Error == nil {
  2923  			t.Fatalf("Expected an error but got none")
  2924  		}
  2925  		// Create our stream.
  2926  		scr = createStream(t, nc, &StreamConfig{Name: "SEALED", Replicas: replicas, MaxAge: time.Minute, Storage: MemoryStorage})
  2927  		if scr.Error != nil {
  2928  			t.Fatalf("Unexpected error: %v", scr.Error)
  2929  		}
  2930  		for i := 0; i < 100; i++ {
  2931  			js.Publish("SEALED", []byte("OK"))
  2932  		}
  2933  		// Update to sealed.
  2934  		sur := updateStream(t, nc, &StreamConfig{Name: "SEALED", Replicas: replicas, MaxAge: time.Minute, Storage: MemoryStorage, Sealed: true})
  2935  		if sur.Error != nil {
  2936  			t.Fatalf("Unexpected error: %v", sur.Error)
  2937  		}
  2938  
  2939  		// Grab stream info and make sure its reflected as sealed.
  2940  		resp, err := nc.Request(fmt.Sprintf(JSApiStreamInfoT, "SEALED"), nil, time.Second)
  2941  		require_NoError(t, err)
  2942  		var sir JSApiStreamInfoResponse
  2943  		err = json.Unmarshal(resp.Data, &sir)
  2944  		require_NoError(t, err)
  2945  		if sir.Error != nil {
  2946  			t.Fatalf("Unexpected error: %v", sir.Error)
  2947  		}
  2948  		si := sir.StreamInfo
  2949  		if !si.Config.Sealed {
  2950  			t.Fatalf("Expected the stream to be marked sealed, got %+v\n", si.Config)
  2951  		}
  2952  		// Make sure we also updated any max age and moved to discard new.
  2953  		if si.Config.MaxAge != 0 {
  2954  			t.Fatalf("Expected MaxAge to be cleared, got %v", si.Config.MaxAge)
  2955  		}
  2956  		if si.Config.Discard != DiscardNew {
  2957  			t.Fatalf("Expected DiscardPolicy to be set to new, got %v", si.Config.Discard)
  2958  		}
  2959  		// Also make sure we set denyDelete and denyPurge.
  2960  		if !si.Config.DenyDelete {
  2961  			t.Fatalf("Expected the stream to be marked as DenyDelete, got %+v\n", si.Config)
  2962  		}
  2963  		if !si.Config.DenyPurge {
  2964  			t.Fatalf("Expected the stream to be marked as DenyPurge, got %+v\n", si.Config)
  2965  		}
  2966  		if si.Config.AllowRollup {
  2967  			t.Fatalf("Expected the stream to be marked as not AllowRollup, got %+v\n", si.Config)
  2968  		}
  2969  
  2970  		// Sealing is not reversible, so make sure we get an error trying to undo.
  2971  		sur = updateStream(t, nc, &StreamConfig{Name: "SEALED", Replicas: replicas, Storage: MemoryStorage, Sealed: false})
  2972  		if sur.Error == nil {
  2973  			t.Fatalf("Expected an error but got none")
  2974  		}
  2975  
  2976  		// Now test operations like publish a new msg, delete, purge etc all fail.
  2977  		if _, err := js.Publish("SEALED", []byte("OK")); err == nil {
  2978  			t.Fatalf("Expected a publish to fail")
  2979  		}
  2980  		if err := js.DeleteMsg("SEALED", 1); err == nil {
  2981  			t.Fatalf("Expected a delete to fail")
  2982  		}
  2983  		if err := js.PurgeStream("SEALED"); err == nil {
  2984  			t.Fatalf("Expected a purge to fail")
  2985  		}
  2986  		if err := js.DeleteStream("SEALED"); err != nil {
  2987  			t.Fatalf("Expected a delete to succeed, got %v", err)
  2988  		}
  2989  	}
  2990  
  2991  	t.Run("Single", func(t *testing.T) { testSeal(t, s, 1) })
  2992  	t.Run("Clustered", func(t *testing.T) { testSeal(t, c.randomServer(), 3) })
  2993  }
  2994  
  2995  // Issue #2568
  2996  func TestJetStreamClusteredStreamCreateIdempotent(t *testing.T) {
  2997  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  2998  	defer c.shutdown()
  2999  
  3000  	nc, _ := jsClientConnect(t, c.randomServer())
  3001  	defer nc.Close()
  3002  
  3003  	cfg := &StreamConfig{
  3004  		Name:       "AUDIT",
  3005  		Storage:    MemoryStorage,
  3006  		Subjects:   []string{"foo"},
  3007  		Replicas:   3,
  3008  		DenyDelete: true,
  3009  		DenyPurge:  true,
  3010  	}
  3011  	addStream(t, nc, cfg)
  3012  	addStream(t, nc, cfg)
  3013  }
  3014  
  3015  func TestJetStreamClusterRollupsRequirePurge(t *testing.T) {
  3016  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  3017  	defer c.shutdown()
  3018  
  3019  	nc, _ := jsClientConnect(t, c.randomServer())
  3020  	defer nc.Close()
  3021  
  3022  	cfg := &StreamConfig{
  3023  		Name:        "SENSORS",
  3024  		Storage:     FileStorage,
  3025  		Subjects:    []string{"sensor.*.temp"},
  3026  		MaxMsgsPer:  10,
  3027  		AllowRollup: true,
  3028  		DenyPurge:   true,
  3029  		Replicas:    2,
  3030  	}
  3031  
  3032  	j, err := json.Marshal(cfg)
  3033  	require_NoError(t, err)
  3034  	resp, err := nc.Request(fmt.Sprintf(JSApiStreamCreateT, cfg.Name), j, time.Second)
  3035  	require_NoError(t, err)
  3036  
  3037  	var cr JSApiStreamCreateResponse
  3038  	err = json.Unmarshal(resp.Data, &cr)
  3039  	require_NoError(t, err)
  3040  	if cr.Error == nil || cr.Error.Description != "roll-ups require the purge permission" {
  3041  		t.Fatalf("unexpected error: %v", cr.Error)
  3042  	}
  3043  }
  3044  
  3045  func TestJetStreamClusterRollups(t *testing.T) {
  3046  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  3047  	defer c.shutdown()
  3048  
  3049  	nc, js := jsClientConnect(t, c.randomServer())
  3050  	defer nc.Close()
  3051  
  3052  	cfg := &StreamConfig{
  3053  		Name:        "SENSORS",
  3054  		Storage:     FileStorage,
  3055  		Subjects:    []string{"sensor.*.temp"},
  3056  		MaxMsgsPer:  10,
  3057  		AllowRollup: true,
  3058  		Replicas:    2,
  3059  	}
  3060  	addStream(t, nc, cfg)
  3061  
  3062  	var bt [16]byte
  3063  	var le = binary.LittleEndian
  3064  
  3065  	// Generate 1000 random measurements for 10 sensors
  3066  	for i := 0; i < 1000; i++ {
  3067  		id, temp := strconv.Itoa(rand.Intn(9)+1), rand.Int31n(42)+60 // 60-102 degrees.
  3068  		le.PutUint16(bt[0:], uint16(temp))
  3069  		js.PublishAsync(fmt.Sprintf("sensor.%v.temp", id), bt[:])
  3070  	}
  3071  	select {
  3072  	case <-js.PublishAsyncComplete():
  3073  	case <-time.After(5 * time.Second):
  3074  		t.Fatalf("Did not receive completion signal")
  3075  	}
  3076  
  3077  	// Grab random sensor and do a rollup by averaging etc.
  3078  	sensor := fmt.Sprintf("sensor.%v.temp", strconv.Itoa(rand.Intn(9)+1))
  3079  	sub, err := js.SubscribeSync(sensor)
  3080  	require_NoError(t, err)
  3081  
  3082  	var total, samples int
  3083  	for m, err := sub.NextMsg(time.Second); err == nil; m, err = sub.NextMsg(time.Second) {
  3084  		total += int(le.Uint16(m.Data))
  3085  		samples++
  3086  	}
  3087  	sub.Unsubscribe()
  3088  	avg := uint16(total / samples)
  3089  	le.PutUint16(bt[0:], avg)
  3090  
  3091  	rollup := nats.NewMsg(sensor)
  3092  	rollup.Data = bt[:]
  3093  	rollup.Header.Set(JSMsgRollup, JSMsgRollupSubject)
  3094  	_, err = js.PublishMsg(rollup)
  3095  	require_NoError(t, err)
  3096  	sub, err = js.SubscribeSync(sensor)
  3097  	require_NoError(t, err)
  3098  	// Make sure only 1 left.
  3099  	checkSubsPending(t, sub, 1)
  3100  	sub.Unsubscribe()
  3101  
  3102  	// Now do all.
  3103  	rollup.Header.Set(JSMsgRollup, JSMsgRollupAll)
  3104  	_, err = js.PublishMsg(rollup)
  3105  	require_NoError(t, err)
  3106  	// Same thing as above should hold true.
  3107  	sub, err = js.SubscribeSync(sensor)
  3108  	require_NoError(t, err)
  3109  	// Make sure only 1 left.
  3110  	checkSubsPending(t, sub, 1)
  3111  	sub.Unsubscribe()
  3112  
  3113  	// Also should only be 1 msgs in total stream left with JSMsgRollupAll
  3114  	si, err := js.StreamInfo("SENSORS")
  3115  	require_NoError(t, err)
  3116  	if si.State.Msgs != 1 {
  3117  		t.Fatalf("Expected only 1 msg left after rollup all, got %+v", si.State)
  3118  	}
  3119  }
  3120  
  3121  func TestJetStreamClusterRollupSubjectAndWatchers(t *testing.T) {
  3122  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  3123  	defer c.shutdown()
  3124  
  3125  	nc, js := jsClientConnect(t, c.randomServer())
  3126  	defer nc.Close()
  3127  
  3128  	cfg := &StreamConfig{
  3129  		Name:        "KVW",
  3130  		Storage:     FileStorage,
  3131  		Subjects:    []string{"kv.*"},
  3132  		MaxMsgsPer:  10,
  3133  		AllowRollup: true,
  3134  		Replicas:    2,
  3135  	}
  3136  	addStream(t, nc, cfg)
  3137  
  3138  	sub, err := js.SubscribeSync("kv.*")
  3139  	if err != nil {
  3140  		t.Fatalf("Unexpected error: %v", err)
  3141  	}
  3142  
  3143  	send := func(key, value string) {
  3144  		t.Helper()
  3145  		_, err := js.Publish("kv."+key, []byte(value))
  3146  		require_NoError(t, err)
  3147  	}
  3148  
  3149  	rollup := func(key, value string) {
  3150  		t.Helper()
  3151  		m := nats.NewMsg("kv." + key)
  3152  		m.Data = []byte(value)
  3153  		m.Header.Set(JSMsgRollup, JSMsgRollupSubject)
  3154  		_, err := js.PublishMsg(m)
  3155  		require_NoError(t, err)
  3156  	}
  3157  
  3158  	expectUpdate := func(key, value string, seq uint64) {
  3159  		t.Helper()
  3160  		m, err := sub.NextMsg(time.Second)
  3161  		require_NoError(t, err)
  3162  		if m.Subject != "kv."+key {
  3163  			t.Fatalf("Keys don't match: %q vs %q", m.Subject[3:], key)
  3164  		}
  3165  		if string(m.Data) != value {
  3166  			t.Fatalf("Values don't match: %q vs %q", m.Data, value)
  3167  		}
  3168  		meta, err := m.Metadata()
  3169  		require_NoError(t, err)
  3170  		if meta.Sequence.Consumer != seq {
  3171  			t.Fatalf("Sequences don't match: %v vs %v", meta.Sequence.Consumer, value)
  3172  		}
  3173  	}
  3174  
  3175  	rollup("name", "derek")
  3176  	expectUpdate("name", "derek", 1)
  3177  	rollup("age", "22")
  3178  	expectUpdate("age", "22", 2)
  3179  
  3180  	send("name", "derek")
  3181  	expectUpdate("name", "derek", 3)
  3182  	send("age", "22")
  3183  	expectUpdate("age", "22", 4)
  3184  	send("age", "33")
  3185  	expectUpdate("age", "33", 5)
  3186  	send("name", "ivan")
  3187  	expectUpdate("name", "ivan", 6)
  3188  	send("name", "rip")
  3189  	expectUpdate("name", "rip", 7)
  3190  	rollup("age", "50")
  3191  	expectUpdate("age", "50", 8)
  3192  }
  3193  
  3194  func TestJetStreamClusterAppendOnly(t *testing.T) {
  3195  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  3196  	defer c.shutdown()
  3197  
  3198  	nc, js := jsClientConnect(t, c.randomServer())
  3199  	defer nc.Close()
  3200  
  3201  	cfg := &StreamConfig{
  3202  		Name:       "AUDIT",
  3203  		Storage:    MemoryStorage,
  3204  		Subjects:   []string{"foo"},
  3205  		Replicas:   3,
  3206  		DenyDelete: true,
  3207  		DenyPurge:  true,
  3208  	}
  3209  	si := addStream(t, nc, cfg)
  3210  	if !si.Config.DenyDelete || !si.Config.DenyPurge {
  3211  		t.Fatalf("Expected DenyDelete and DenyPurge to be set, got %+v", si.Config)
  3212  	}
  3213  	for i := 0; i < 10; i++ {
  3214  		js.Publish("foo", []byte("ok"))
  3215  	}
  3216  	// Delete should not be allowed.
  3217  	if err := js.DeleteMsg("AUDIT", 1); err == nil {
  3218  		t.Fatalf("Expected an error for delete but got none")
  3219  	}
  3220  	if err := js.PurgeStream("AUDIT"); err == nil {
  3221  		t.Fatalf("Expected an error for purge but got none")
  3222  	}
  3223  
  3224  	cfg.DenyDelete = false
  3225  	cfg.DenyPurge = false
  3226  
  3227  	req, err := json.Marshal(cfg)
  3228  	require_NoError(t, err)
  3229  	rmsg, err := nc.Request(fmt.Sprintf(JSApiStreamUpdateT, cfg.Name), req, time.Second)
  3230  	require_NoError(t, err)
  3231  	var resp JSApiStreamCreateResponse
  3232  	err = json.Unmarshal(rmsg.Data, &resp)
  3233  	require_NoError(t, err)
  3234  	if resp.Error == nil {
  3235  		t.Fatalf("Expected an error")
  3236  	}
  3237  }
  3238  
  3239  // Related to #2642
  3240  func TestJetStreamClusterStreamUpdateSyncBug(t *testing.T) {
  3241  	c := createJetStreamClusterExplicit(t, "R3S", 3)
  3242  	defer c.shutdown()
  3243  
  3244  	// Client based API
  3245  	s := c.randomServer()
  3246  	nc, js := jsClientConnect(t, s)
  3247  	defer nc.Close()
  3248  
  3249  	cfg := &nats.StreamConfig{
  3250  		Name:     "TEST",
  3251  		Subjects: []string{"foo", "bar"},
  3252  		Replicas: 3,
  3253  	}
  3254  
  3255  	if _, err := js.AddStream(cfg); err != nil {
  3256  		t.Fatalf("Unexpected error: %v", err)
  3257  	}
  3258  
  3259  	msg, toSend := []byte("OK"), 100
  3260  	for i := 0; i < toSend; i++ {
  3261  		if _, err := js.PublishAsync("foo", msg); err != nil {
  3262  			t.Fatalf("Unexpected publish error: %v", err)
  3263  		}
  3264  	}
  3265  	select {
  3266  	case <-js.PublishAsyncComplete():
  3267  	case <-time.After(5 * time.Second):
  3268  		t.Fatalf("Did not receive completion signal")
  3269  	}
  3270  
  3271  	cfg.Subjects = []string{"foo", "bar", "baz"}
  3272  	if _, err := js.UpdateStream(cfg); err != nil {
  3273  		t.Fatalf("Unexpected error: %v", err)
  3274  	}
  3275  
  3276  	// Shutdown a server. The bug is that the update wiped the sync subject used to catchup a stream that has the RAFT layer snapshotted.
  3277  	nsl := c.randomNonStreamLeader("$G", "TEST")
  3278  	nsl.Shutdown()
  3279  	// make sure a leader exists
  3280  	c.waitOnStreamLeader("$G", "TEST")
  3281  
  3282  	for i := 0; i < toSend*4; i++ {
  3283  		if _, err := js.PublishAsync("foo", msg); err != nil {
  3284  			t.Fatalf("Unexpected publish error: %v", err)
  3285  		}
  3286  	}
  3287  	select {
  3288  	case <-js.PublishAsyncComplete():
  3289  	case <-time.After(5 * time.Second):
  3290  		t.Fatalf("Did not receive completion signal")
  3291  	}
  3292  
  3293  	// Throw in deletes as well.
  3294  	for seq := uint64(200); seq < uint64(300); seq += 4 {
  3295  		if err := js.DeleteMsg("TEST", seq); err != nil {
  3296  			t.Fatalf("Unexpected error: %v", err)
  3297  		}
  3298  	}
  3299  
  3300  	// We need to snapshot to force upper layer catchup vs RAFT layer.
  3301  	c.waitOnAllCurrent()
  3302  	mset, err := c.streamLeader("$G", "TEST").GlobalAccount().lookupStream("TEST")
  3303  	if err != nil {
  3304  		t.Fatalf("Expected to find a stream for %q", "TEST")
  3305  	}
  3306  	if err := mset.raftNode().InstallSnapshot(mset.stateSnapshot()); err != nil {
  3307  		t.Fatalf("Unexpected error: %v", err)
  3308  	}
  3309  
  3310  	c.waitOnAllCurrent()
  3311  	nsl = c.restartServer(nsl)
  3312  	c.waitOnStreamLeader("$G", "TEST")
  3313  	c.waitOnStreamCurrent(nsl, "$G", "TEST")
  3314  
  3315  	mset, _ = nsl.GlobalAccount().lookupStream("TEST")
  3316  	cloneState := mset.state()
  3317  
  3318  	mset, _ = c.streamLeader("$G", "TEST").GlobalAccount().lookupStream("TEST")
  3319  	leaderState := mset.state()
  3320  
  3321  	if !reflect.DeepEqual(cloneState, leaderState) {
  3322  		t.Fatalf("States do not match: %+v vs %+v", cloneState, leaderState)
  3323  	}
  3324  }
  3325  
  3326  // Issue #2666
  3327  func TestJetStreamClusterKVMultipleConcurrentCreate(t *testing.T) {
  3328  	c := createJetStreamClusterExplicit(t, "R3S", 3)
  3329  	defer c.shutdown()
  3330  
  3331  	// Client based API
  3332  	nc, js := jsClientConnect(t, c.randomServer())
  3333  	defer nc.Close()
  3334  
  3335  	kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "TEST", History: 1, TTL: 150 * time.Millisecond, Replicas: 3})
  3336  	if err != nil {
  3337  		t.Fatalf("Unexpected error: %v", err)
  3338  	}
  3339  
  3340  	startCh := make(chan bool)
  3341  	var wg sync.WaitGroup
  3342  
  3343  	for n := 0; n < 5; n++ {
  3344  		wg.Add(1)
  3345  		go func() {
  3346  			defer wg.Done()
  3347  			<-startCh
  3348  			if r, err := kv.Create("name", []byte("dlc")); err == nil {
  3349  				if _, err = kv.Update("name", []byte("rip"), r); err != nil {
  3350  					t.Log("Unexpected Update error: ", err)
  3351  				}
  3352  			}
  3353  		}()
  3354  	}
  3355  	// Wait for Go routines to start.
  3356  	time.Sleep(100 * time.Millisecond)
  3357  	close(startCh)
  3358  	wg.Wait()
  3359  	// Just make sure its there and picks up the phone.
  3360  	if _, err := js.StreamInfo("KV_TEST"); err != nil {
  3361  		t.Fatalf("Unexpected error: %v", err)
  3362  	}
  3363  
  3364  	// Now make sure we do ok when servers are restarted and we need to deal with dangling clfs state.
  3365  	// First non-leader.
  3366  	rs := c.randomNonStreamLeader("$G", "KV_TEST")
  3367  	rs.Shutdown()
  3368  	rs = c.restartServer(rs)
  3369  	c.waitOnStreamCurrent(rs, "$G", "KV_TEST")
  3370  
  3371  	if _, err := kv.Put("name", []byte("ik")); err != nil {
  3372  		t.Fatalf("Unexpected error: %v", err)
  3373  	}
  3374  
  3375  	// Now the actual leader.
  3376  	sl := c.streamLeader("$G", "KV_TEST")
  3377  	sl.Shutdown()
  3378  	sl = c.restartServer(sl)
  3379  	c.waitOnStreamLeader("$G", "KV_TEST")
  3380  	c.waitOnStreamCurrent(sl, "$G", "KV_TEST")
  3381  
  3382  	if _, err := kv.Put("name", []byte("mh")); err != nil {
  3383  		t.Fatalf("Unexpected error: %v", err)
  3384  	}
  3385  	time.Sleep(time.Second)
  3386  }
  3387  
  3388  func TestJetStreamClusterAccountInfoForSystemAccount(t *testing.T) {
  3389  	c := createJetStreamClusterExplicit(t, "R3S", 3)
  3390  	defer c.shutdown()
  3391  
  3392  	// Client based API
  3393  	nc, js := jsClientConnect(t, c.randomServer(), nats.UserInfo("admin", "s3cr3t!"))
  3394  	defer nc.Close()
  3395  
  3396  	_, err := js.AccountInfo()
  3397  	require_Error(t, err, nats.ErrJetStreamNotEnabledForAccount)
  3398  }
  3399  
  3400  func TestJetStreamClusterListFilter(t *testing.T) {
  3401  	s := RunBasicJetStreamServer(t)
  3402  	defer s.Shutdown()
  3403  
  3404  	c := createJetStreamClusterExplicit(t, "R3S", 3)
  3405  	defer c.shutdown()
  3406  
  3407  	testList := func(t *testing.T, srv *Server, r int) {
  3408  		nc, js := jsClientConnect(t, srv)
  3409  		defer nc.Close()
  3410  
  3411  		_, err := js.AddStream(&nats.StreamConfig{
  3412  			Name:     "ONE",
  3413  			Subjects: []string{"one.>"},
  3414  			Replicas: r,
  3415  		})
  3416  		require_NoError(t, err)
  3417  
  3418  		_, err = js.AddStream(&nats.StreamConfig{
  3419  			Name:     "TWO",
  3420  			Subjects: []string{"two.>"},
  3421  			Replicas: r,
  3422  		})
  3423  		require_NoError(t, err)
  3424  
  3425  		resp, err := nc.Request(JSApiStreamList, []byte("{}"), time.Second)
  3426  		require_NoError(t, err)
  3427  
  3428  		list := &JSApiStreamListResponse{}
  3429  		err = json.Unmarshal(resp.Data, list)
  3430  		require_NoError(t, err)
  3431  
  3432  		if len(list.Streams) != 2 {
  3433  			t.Fatalf("Expected 2 responses got %d", len(list.Streams))
  3434  		}
  3435  
  3436  		resp, err = nc.Request(JSApiStreamList, []byte(`{"subject":"two.x"}`), time.Second)
  3437  		require_NoError(t, err)
  3438  		list = &JSApiStreamListResponse{}
  3439  		err = json.Unmarshal(resp.Data, list)
  3440  		require_NoError(t, err)
  3441  		if len(list.Streams) != 1 {
  3442  			t.Fatalf("Expected 1 response got %d", len(list.Streams))
  3443  		}
  3444  		if list.Streams[0].Config.Name != "TWO" {
  3445  			t.Fatalf("Expected stream TWO in result got %#v", list.Streams[0])
  3446  		}
  3447  	}
  3448  
  3449  	t.Run("Single", func(t *testing.T) { testList(t, s, 1) })
  3450  	t.Run("Clustered", func(t *testing.T) { testList(t, c.randomServer(), 3) })
  3451  }
  3452  
  3453  func TestJetStreamClusterConsumerUpdates(t *testing.T) {
  3454  	s := RunBasicJetStreamServer(t)
  3455  	defer s.Shutdown()
  3456  
  3457  	c := createJetStreamClusterExplicit(t, "JSC", 5)
  3458  	defer c.shutdown()
  3459  
  3460  	testConsumerUpdate := func(t *testing.T, s *Server, replicas int) {
  3461  		nc, js := jsClientConnect(t, s)
  3462  		defer nc.Close()
  3463  
  3464  		// Create a stream.
  3465  		_, err := js.AddStream(&nats.StreamConfig{
  3466  			Name:     "TEST",
  3467  			Subjects: []string{"foo", "bar"},
  3468  			Replicas: replicas,
  3469  		})
  3470  		require_NoError(t, err)
  3471  
  3472  		for i := 0; i < 100; i++ {
  3473  			js.PublishAsync("foo", []byte("OK"))
  3474  		}
  3475  
  3476  		cfg := &nats.ConsumerConfig{
  3477  			Durable:        "dlc",
  3478  			Description:    "Update TEST",
  3479  			FilterSubject:  "foo",
  3480  			DeliverSubject: "d.foo",
  3481  			AckPolicy:      nats.AckExplicitPolicy,
  3482  			AckWait:        time.Minute,
  3483  			MaxDeliver:     5,
  3484  			MaxAckPending:  50,
  3485  		}
  3486  		_, err = js.AddConsumer("TEST", cfg)
  3487  		require_NoError(t, err)
  3488  
  3489  		// Update delivery subject, which worked before, but upon review had issues unless replica count == clustered size.
  3490  		cfg.DeliverSubject = "d.bar"
  3491  		_, err = js.AddConsumer("TEST", cfg)
  3492  		require_NoError(t, err)
  3493  
  3494  		// Bind deliver subject.
  3495  		sub, err := nc.SubscribeSync("d.bar")
  3496  		require_NoError(t, err)
  3497  		defer sub.Unsubscribe()
  3498  
  3499  		ncfg := *cfg
  3500  		// Deliver Subject
  3501  		ncfg.DeliverSubject = "d.baz"
  3502  		// Description
  3503  		cfg.Description = "New Description"
  3504  		_, err = js.UpdateConsumer("TEST", cfg)
  3505  		require_NoError(t, err)
  3506  
  3507  		// MaxAckPending
  3508  		checkSubsPending(t, sub, 50)
  3509  		cfg.MaxAckPending = 75
  3510  		_, err = js.UpdateConsumer("TEST", cfg)
  3511  		require_NoError(t, err)
  3512  		checkSubsPending(t, sub, 75)
  3513  
  3514  		// Drain sub, do not ack first ten though so we can test shortening AckWait.
  3515  		for i := 0; i < 100; i++ {
  3516  			m, err := sub.NextMsg(time.Second)
  3517  			require_NoError(t, err)
  3518  			if i >= 10 {
  3519  				m.AckSync()
  3520  			}
  3521  		}
  3522  
  3523  		// AckWait
  3524  		checkSubsPending(t, sub, 0)
  3525  		cfg.AckWait = 200 * time.Millisecond
  3526  		_, err = js.UpdateConsumer("TEST", cfg)
  3527  		require_NoError(t, err)
  3528  		checkSubsPending(t, sub, 10)
  3529  
  3530  		// Rate Limit
  3531  		cfg.RateLimit = 8 * 1024
  3532  		_, err = js.UpdateConsumer("TEST", cfg)
  3533  		require_NoError(t, err)
  3534  
  3535  		cfg.RateLimit = 0
  3536  		_, err = js.UpdateConsumer("TEST", cfg)
  3537  		require_NoError(t, err)
  3538  
  3539  		// These all should fail.
  3540  		ncfg = *cfg
  3541  		ncfg.DeliverPolicy = nats.DeliverLastPolicy
  3542  		_, err = js.UpdateConsumer("TEST", &ncfg)
  3543  		require_Error(t, err)
  3544  
  3545  		ncfg = *cfg
  3546  		ncfg.OptStartSeq = 22
  3547  		_, err = js.UpdateConsumer("TEST", &ncfg)
  3548  		require_Error(t, err)
  3549  
  3550  		ncfg = *cfg
  3551  		now := time.Now()
  3552  		ncfg.OptStartTime = &now
  3553  		_, err = js.UpdateConsumer("TEST", &ncfg)
  3554  		require_Error(t, err)
  3555  
  3556  		ncfg = *cfg
  3557  		ncfg.AckPolicy = nats.AckAllPolicy
  3558  		_, err = js.UpdateConsumer("TEST", &ncfg)
  3559  		require_Error(t, err)
  3560  
  3561  		ncfg = *cfg
  3562  		ncfg.ReplayPolicy = nats.ReplayOriginalPolicy
  3563  		_, err = js.UpdateConsumer("TEST", &ncfg)
  3564  		require_Error(t, err)
  3565  
  3566  		ncfg = *cfg
  3567  		ncfg.Heartbeat = time.Second
  3568  		_, err = js.UpdateConsumer("TEST", &ncfg)
  3569  		require_Error(t, err)
  3570  
  3571  		ncfg = *cfg
  3572  		ncfg.FlowControl = true
  3573  		_, err = js.UpdateConsumer("TEST", &ncfg)
  3574  		require_Error(t, err)
  3575  
  3576  	}
  3577  
  3578  	t.Run("Single", func(t *testing.T) { testConsumerUpdate(t, s, 1) })
  3579  	t.Run("Clustered", func(t *testing.T) { testConsumerUpdate(t, c.randomServer(), 2) })
  3580  }
  3581  
  3582  func TestJetStreamClusterConsumerMaxDeliverUpdate(t *testing.T) {
  3583  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  3584  	defer c.shutdown()
  3585  
  3586  	nc, js := jsClientConnect(t, c.randomServer())
  3587  	defer nc.Close()
  3588  
  3589  	_, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"foo"}, Replicas: 3})
  3590  	require_NoError(t, err)
  3591  
  3592  	maxDeliver := 2
  3593  	_, err = js.AddConsumer("TEST", &nats.ConsumerConfig{
  3594  		Durable:       "ard",
  3595  		AckPolicy:     nats.AckExplicitPolicy,
  3596  		FilterSubject: "foo",
  3597  		MaxDeliver:    maxDeliver,
  3598  	})
  3599  	require_NoError(t, err)
  3600  
  3601  	sub, err := js.PullSubscribe("foo", "ard")
  3602  	require_NoError(t, err)
  3603  
  3604  	checkMaxDeliver := func() {
  3605  		t.Helper()
  3606  		for i := 0; i <= maxDeliver; i++ {
  3607  			msgs, err := sub.Fetch(2, nats.MaxWait(100*time.Millisecond))
  3608  			if i < maxDeliver {
  3609  				require_NoError(t, err)
  3610  				require_Len(t, 1, len(msgs))
  3611  				_ = msgs[0].Nak()
  3612  			} else {
  3613  				require_Error(t, err, nats.ErrTimeout)
  3614  			}
  3615  		}
  3616  	}
  3617  
  3618  	_, err = js.Publish("foo", []byte("Hello"))
  3619  	require_NoError(t, err)
  3620  	checkMaxDeliver()
  3621  
  3622  	// update maxDeliver
  3623  	maxDeliver++
  3624  	_, err = js.UpdateConsumer("TEST", &nats.ConsumerConfig{
  3625  		Durable:       "ard",
  3626  		AckPolicy:     nats.AckExplicitPolicy,
  3627  		FilterSubject: "foo",
  3628  		MaxDeliver:    maxDeliver,
  3629  	})
  3630  	require_NoError(t, err)
  3631  
  3632  	_, err = js.Publish("foo", []byte("Hello"))
  3633  	require_NoError(t, err)
  3634  	checkMaxDeliver()
  3635  }
  3636  
  3637  func TestJetStreamClusterAccountReservations(t *testing.T) {
  3638  	c := createJetStreamClusterWithTemplate(t, jsClusterMaxBytesAccountLimitTempl, "C1", 3)
  3639  	defer c.shutdown()
  3640  
  3641  	s := c.randomServer()
  3642  	nc, js := jsClientConnect(t, s)
  3643  	defer nc.Close()
  3644  	accMax := 3
  3645  
  3646  	errResources := errors.New("nats: insufficient storage resources available")
  3647  
  3648  	test := func(t *testing.T, replica int) {
  3649  		mb := int64((1+accMax)-replica) * 1024 * 1024 * 1024 // GB, corrected for replication factor
  3650  		_, err := js.AddStream(&nats.StreamConfig{Name: "S1", Subjects: []string{"s1"}, MaxBytes: mb, Replicas: replica})
  3651  		require_NoError(t, err)
  3652  
  3653  		_, err = js.AddStream(&nats.StreamConfig{Name: "S2", Subjects: []string{"s2"}, MaxBytes: 1024, Replicas: replica})
  3654  		require_Error(t, err, errResources)
  3655  
  3656  		_, err = js.UpdateStream(&nats.StreamConfig{Name: "S1", Subjects: []string{"s1"}, MaxBytes: mb / 2, Replicas: replica})
  3657  		require_NoError(t, err)
  3658  
  3659  		_, err = js.AddStream(&nats.StreamConfig{Name: "S2", Subjects: []string{"s2"}, MaxBytes: mb / 2, Replicas: replica})
  3660  		require_NoError(t, err)
  3661  
  3662  		_, err = js.AddStream(&nats.StreamConfig{Name: "S3", Subjects: []string{"s3"}, MaxBytes: 1024, Replicas: replica})
  3663  		require_Error(t, err, errResources)
  3664  
  3665  		_, err = js.UpdateStream(&nats.StreamConfig{Name: "S2", Subjects: []string{"s2"}, MaxBytes: mb/2 + 1, Replicas: replica})
  3666  		require_Error(t, err, errResources)
  3667  
  3668  		require_NoError(t, js.DeleteStream("S1"))
  3669  		require_NoError(t, js.DeleteStream("S2"))
  3670  	}
  3671  	test(t, 3)
  3672  	test(t, 1)
  3673  }
  3674  
  3675  func TestJetStreamClusterConcurrentAccountLimits(t *testing.T) {
  3676  	c := createJetStreamClusterWithTemplate(t, jsClusterMaxBytesAccountLimitTempl, "cluster", 3)
  3677  	defer c.shutdown()
  3678  
  3679  	startCh := make(chan bool)
  3680  	var wg sync.WaitGroup
  3681  	var swg sync.WaitGroup
  3682  	failCount := int32(0)
  3683  
  3684  	start := func(name string) {
  3685  		wg.Add(1)
  3686  		defer wg.Done()
  3687  
  3688  		s := c.randomServer()
  3689  		nc, js := jsClientConnect(t, s)
  3690  		defer nc.Close()
  3691  
  3692  		swg.Done()
  3693  		<-startCh
  3694  
  3695  		_, err := js.AddStream(&nats.StreamConfig{
  3696  			Name:     name,
  3697  			Replicas: 3,
  3698  			MaxBytes: 1024 * 1024 * 1024,
  3699  		})
  3700  		if err != nil {
  3701  			atomic.AddInt32(&failCount, 1)
  3702  			require_Equal(t, err.Error(), "nats: insufficient storage resources available")
  3703  		}
  3704  	}
  3705  
  3706  	swg.Add(2)
  3707  	go start("foo")
  3708  	go start("bar")
  3709  	swg.Wait()
  3710  	// Now start both at same time.
  3711  	close(startCh)
  3712  	wg.Wait()
  3713  	require_True(t, failCount == 1)
  3714  }
  3715  
  3716  func TestJetStreamClusterBalancedPlacement(t *testing.T) {
  3717  	c := createJetStreamClusterWithTemplate(t, jsClusterMaxBytesTempl, "CB", 5)
  3718  	defer c.shutdown()
  3719  
  3720  	nc, js := jsClientConnect(t, c.randomServer())
  3721  	defer nc.Close()
  3722  
  3723  	// We have 10GB (2GB X 5) available.
  3724  	// Use MaxBytes for ease of test (used works too) and place 5 1GB streams with R=2.
  3725  	for i := 1; i <= 5; i++ {
  3726  		_, err := js.AddStream(&nats.StreamConfig{
  3727  			Name:     fmt.Sprintf("S-%d", i),
  3728  			Replicas: 2,
  3729  			MaxBytes: 1 * 1024 * 1024 * 1024,
  3730  		})
  3731  		require_NoError(t, err)
  3732  	}
  3733  	// Make sure the next one fails properly.
  3734  	_, err := js.AddStream(&nats.StreamConfig{
  3735  		Name:     "FAIL",
  3736  		Replicas: 2,
  3737  		MaxBytes: 1 * 1024 * 1024 * 1024,
  3738  	})
  3739  	require_Contains(t, err.Error(), "no suitable peers for placement", "insufficient storage")
  3740  }
  3741  
  3742  func TestJetStreamClusterConsumerPendingBug(t *testing.T) {
  3743  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  3744  	defer c.shutdown()
  3745  
  3746  	nc, js := jsClientConnect(t, c.randomServer())
  3747  	defer nc.Close()
  3748  
  3749  	nc2, js2 := jsClientConnect(t, c.randomServer())
  3750  	defer nc2.Close()
  3751  
  3752  	_, err := js.AddStream(&nats.StreamConfig{Name: "foo", Replicas: 3})
  3753  	require_NoError(t, err)
  3754  
  3755  	startCh, doneCh := make(chan bool), make(chan error)
  3756  	go func() {
  3757  		<-startCh
  3758  		_, err := js2.AddConsumer("foo", &nats.ConsumerConfig{
  3759  			Durable:        "dlc",
  3760  			FilterSubject:  "foo",
  3761  			DeliverSubject: "x",
  3762  		})
  3763  		doneCh <- err
  3764  	}()
  3765  
  3766  	n := 10_000
  3767  	for i := 0; i < n; i++ {
  3768  		nc.Publish("foo", []byte("ok"))
  3769  		if i == 222 {
  3770  			startCh <- true
  3771  		}
  3772  	}
  3773  	// Wait for them to all be there.
  3774  	checkFor(t, 5*time.Second, 100*time.Millisecond, func() error {
  3775  		si, err := js.StreamInfo("foo")
  3776  		require_NoError(t, err)
  3777  		if si.State.Msgs != uint64(n) {
  3778  			return fmt.Errorf("Not received all messages")
  3779  		}
  3780  		return nil
  3781  	})
  3782  
  3783  	select {
  3784  	case err := <-doneCh:
  3785  		if err != nil {
  3786  			t.Fatalf("Error creating consumer: %v", err)
  3787  		}
  3788  	case <-time.After(5 * time.Second):
  3789  		t.Fatalf("Timed out?")
  3790  	}
  3791  	checkFor(t, 5*time.Second, 100*time.Millisecond, func() error {
  3792  		ci, err := js.ConsumerInfo("foo", "dlc")
  3793  		require_NoError(t, err)
  3794  		if ci.NumPending != uint64(n) {
  3795  			return fmt.Errorf("Expected NumPending to be %d, got %d", n, ci.NumPending)
  3796  		}
  3797  		return nil
  3798  	})
  3799  }
  3800  
  3801  func TestJetStreamClusterPullPerf(t *testing.T) {
  3802  	skip(t)
  3803  
  3804  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  3805  	defer c.shutdown()
  3806  
  3807  	nc, js := jsClientConnect(t, c.randomServer())
  3808  	defer nc.Close()
  3809  
  3810  	js.AddStream(&nats.StreamConfig{Name: "f22"})
  3811  	defer js.DeleteStream("f22")
  3812  
  3813  	n, msg := 1_000_000, []byte(strings.Repeat("A", 1000))
  3814  	for i := 0; i < n; i++ {
  3815  		js.PublishAsync("f22", msg)
  3816  	}
  3817  	select {
  3818  	case <-js.PublishAsyncComplete():
  3819  	case <-time.After(10 * time.Second):
  3820  		t.Fatalf("Did not receive completion signal")
  3821  	}
  3822  
  3823  	si, err := js.StreamInfo("f22")
  3824  	require_NoError(t, err)
  3825  
  3826  	fmt.Printf("msgs: %d, total_bytes: %v\n", si.State.Msgs, friendlyBytes(int64(si.State.Bytes)))
  3827  
  3828  	// OrderedConsumer - fastest push based.
  3829  	start := time.Now()
  3830  	received, done := 0, make(chan bool)
  3831  	_, err = js.Subscribe("f22", func(m *nats.Msg) {
  3832  		received++
  3833  		if received >= n {
  3834  			done <- true
  3835  		}
  3836  	}, nats.OrderedConsumer())
  3837  	require_NoError(t, err)
  3838  
  3839  	// Wait to receive all messages.
  3840  	select {
  3841  	case <-done:
  3842  	case <-time.After(30 * time.Second):
  3843  		t.Fatalf("Did not receive all of our messages")
  3844  	}
  3845  
  3846  	tt := time.Since(start)
  3847  	fmt.Printf("Took %v to receive %d msgs\n", tt, n)
  3848  	fmt.Printf("%.0f msgs/s\n", float64(n)/tt.Seconds())
  3849  	fmt.Printf("%.0f mb/s\n\n", float64(si.State.Bytes/(1024*1024))/tt.Seconds())
  3850  
  3851  	// Now do pull based, this is custom for now.
  3852  	// Current nats.PullSubscribe maxes at about 1/2 the performance even with large batches.
  3853  	_, err = js.AddConsumer("f22", &nats.ConsumerConfig{
  3854  		Durable:       "dlc",
  3855  		AckPolicy:     nats.AckAllPolicy,
  3856  		MaxAckPending: 1000,
  3857  	})
  3858  	require_NoError(t, err)
  3859  
  3860  	r := 0
  3861  	_, err = nc.Subscribe("xx", func(m *nats.Msg) {
  3862  		r++
  3863  		if r >= n {
  3864  			done <- true
  3865  		}
  3866  		if r%750 == 0 {
  3867  			m.AckSync()
  3868  		}
  3869  	})
  3870  	require_NoError(t, err)
  3871  
  3872  	// Simulate an non-ending request.
  3873  	req := &JSApiConsumerGetNextRequest{Batch: n, Expires: 60 * time.Second}
  3874  	jreq, err := json.Marshal(req)
  3875  	require_NoError(t, err)
  3876  
  3877  	start = time.Now()
  3878  	rsubj := fmt.Sprintf(JSApiRequestNextT, "f22", "dlc")
  3879  	err = nc.PublishRequest(rsubj, "xx", jreq)
  3880  	require_NoError(t, err)
  3881  
  3882  	// Wait to receive all messages.
  3883  	select {
  3884  	case <-done:
  3885  	case <-time.After(60 * time.Second):
  3886  		t.Fatalf("Did not receive all of our messages")
  3887  	}
  3888  
  3889  	tt = time.Since(start)
  3890  	fmt.Printf("Took %v to receive %d msgs\n", tt, n)
  3891  	fmt.Printf("%.0f msgs/s\n", float64(n)/tt.Seconds())
  3892  	fmt.Printf("%.0f mb/s\n\n", float64(si.State.Bytes/(1024*1024))/tt.Seconds())
  3893  }
  3894  
  3895  // Test that we get the right signaling when a consumer leader change occurs for any pending requests.
  3896  func TestJetStreamClusterPullConsumerLeaderChange(t *testing.T) {
  3897  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  3898  	defer c.shutdown()
  3899  
  3900  	nc, js := jsClientConnect(t, c.randomServer())
  3901  	defer nc.Close()
  3902  
  3903  	_, err := js.AddStream(&nats.StreamConfig{
  3904  		Name:     "TEST",
  3905  		Replicas: 3,
  3906  		Subjects: []string{"foo"},
  3907  	})
  3908  	require_NoError(t, err)
  3909  
  3910  	_, err = js.AddConsumer("TEST", &nats.ConsumerConfig{
  3911  		Durable:       "dlc",
  3912  		AckPolicy:     nats.AckExplicitPolicy,
  3913  		FilterSubject: "foo",
  3914  	})
  3915  	require_NoError(t, err)
  3916  
  3917  	rsubj := fmt.Sprintf(JSApiRequestNextT, "TEST", "dlc")
  3918  	sub, err := nc.SubscribeSync("reply")
  3919  	require_NoError(t, err)
  3920  	defer sub.Unsubscribe()
  3921  
  3922  	drainSub := func() {
  3923  		t.Helper()
  3924  		for _, err := sub.NextMsg(0); err == nil; _, err = sub.NextMsg(0) {
  3925  		}
  3926  		checkSubsPending(t, sub, 0)
  3927  	}
  3928  
  3929  	// Queue up a request that can live for a bit.
  3930  	req := &JSApiConsumerGetNextRequest{Expires: 2 * time.Second}
  3931  	jreq, err := json.Marshal(req)
  3932  	require_NoError(t, err)
  3933  	err = nc.PublishRequest(rsubj, "reply", jreq)
  3934  	require_NoError(t, err)
  3935  	// Make sure request is recorded and replicated.
  3936  	time.Sleep(100 * time.Millisecond)
  3937  	checkSubsPending(t, sub, 0)
  3938  
  3939  	// Now have consumer leader change and make sure we get signaled that our request is not valid.
  3940  	_, err = nc.Request(fmt.Sprintf(JSApiConsumerLeaderStepDownT, "TEST", "dlc"), nil, time.Second)
  3941  	require_NoError(t, err)
  3942  	c.waitOnConsumerLeader("$G", "TEST", "dlc")
  3943  	checkSubsPending(t, sub, 1)
  3944  	m, err := sub.NextMsg(0)
  3945  	require_NoError(t, err)
  3946  	// Make sure this is an alert that tells us our request is no longer valid.
  3947  	if m.Header.Get("Status") != "409" {
  3948  		t.Fatalf("Expected a 409 status code, got %q", m.Header.Get("Status"))
  3949  	}
  3950  	checkSubsPending(t, sub, 0)
  3951  
  3952  	// Add a few messages to the stream to fulfill a request.
  3953  	for i := 0; i < 10; i++ {
  3954  		_, err := js.Publish("foo", []byte("HELLO"))
  3955  		require_NoError(t, err)
  3956  	}
  3957  	req = &JSApiConsumerGetNextRequest{Batch: 10, Expires: 10 * time.Second}
  3958  	jreq, err = json.Marshal(req)
  3959  	require_NoError(t, err)
  3960  	err = nc.PublishRequest(rsubj, "reply", jreq)
  3961  	require_NoError(t, err)
  3962  	checkSubsPending(t, sub, 10)
  3963  	drainSub()
  3964  
  3965  	// Now do a leader change again, make sure we do not get anything about that request.
  3966  	_, err = nc.Request(fmt.Sprintf(JSApiConsumerLeaderStepDownT, "TEST", "dlc"), nil, time.Second)
  3967  	require_NoError(t, err)
  3968  	c.waitOnConsumerLeader("$G", "TEST", "dlc")
  3969  	time.Sleep(100 * time.Millisecond)
  3970  	checkSubsPending(t, sub, 0)
  3971  
  3972  	// Make sure we do not get anything if we expire, etc.
  3973  	req = &JSApiConsumerGetNextRequest{Batch: 10, Expires: 250 * time.Millisecond}
  3974  	jreq, err = json.Marshal(req)
  3975  	require_NoError(t, err)
  3976  	err = nc.PublishRequest(rsubj, "reply", jreq)
  3977  	require_NoError(t, err)
  3978  	// Let it expire.
  3979  	time.Sleep(350 * time.Millisecond)
  3980  	checkSubsPending(t, sub, 1)
  3981  
  3982  	// Now do a leader change again, make sure we do not get anything about that request.
  3983  	_, err = nc.Request(fmt.Sprintf(JSApiConsumerLeaderStepDownT, "TEST", "dlc"), nil, time.Second)
  3984  	require_NoError(t, err)
  3985  	c.waitOnConsumerLeader("$G", "TEST", "dlc")
  3986  	checkSubsPending(t, sub, 1)
  3987  }
  3988  
  3989  func TestJetStreamClusterEphemeralPullConsumerServerShutdown(t *testing.T) {
  3990  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  3991  	defer c.shutdown()
  3992  
  3993  	nc, js := jsClientConnect(t, c.randomServer())
  3994  	defer nc.Close()
  3995  
  3996  	_, err := js.AddStream(&nats.StreamConfig{
  3997  		Name:     "TEST",
  3998  		Replicas: 2,
  3999  		Subjects: []string{"foo"},
  4000  	})
  4001  	require_NoError(t, err)
  4002  
  4003  	ci, err := js.AddConsumer("TEST", &nats.ConsumerConfig{
  4004  		AckPolicy:     nats.AckExplicitPolicy,
  4005  		FilterSubject: "foo",
  4006  	})
  4007  	require_NoError(t, err)
  4008  
  4009  	rsubj := fmt.Sprintf(JSApiRequestNextT, "TEST", ci.Name)
  4010  	sub, err := nc.SubscribeSync("reply")
  4011  	require_NoError(t, err)
  4012  	defer sub.Unsubscribe()
  4013  
  4014  	// Queue up a request that can live for a bit.
  4015  	req := &JSApiConsumerGetNextRequest{Expires: 2 * time.Second}
  4016  	jreq, err := json.Marshal(req)
  4017  	require_NoError(t, err)
  4018  	err = nc.PublishRequest(rsubj, "reply", jreq)
  4019  	require_NoError(t, err)
  4020  	// Make sure request is recorded and replicated.
  4021  	time.Sleep(100 * time.Millisecond)
  4022  	checkSubsPending(t, sub, 0)
  4023  
  4024  	// Now shutdown the server where this ephemeral lives.
  4025  	c.consumerLeader("$G", "TEST", ci.Name).Shutdown()
  4026  	checkSubsPending(t, sub, 1)
  4027  	m, err := sub.NextMsg(0)
  4028  	require_NoError(t, err)
  4029  	// Make sure this is an alert that tells us our request is no longer valid.
  4030  	if m.Header.Get("Status") != "409" {
  4031  		t.Fatalf("Expected a 409 status code, got %q", m.Header.Get("Status"))
  4032  	}
  4033  }
  4034  
  4035  func TestJetStreamClusterNAKBackoffs(t *testing.T) {
  4036  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  4037  	defer c.shutdown()
  4038  
  4039  	nc, js := jsClientConnect(t, c.randomServer())
  4040  	defer nc.Close()
  4041  
  4042  	_, err := js.AddStream(&nats.StreamConfig{
  4043  		Name:     "TEST",
  4044  		Replicas: 2,
  4045  		Subjects: []string{"foo"},
  4046  	})
  4047  	require_NoError(t, err)
  4048  
  4049  	_, err = js.Publish("foo", []byte("NAK"))
  4050  	require_NoError(t, err)
  4051  
  4052  	sub, err := js.SubscribeSync("foo", nats.Durable("dlc"), nats.AckWait(5*time.Second), nats.ManualAck())
  4053  	require_NoError(t, err)
  4054  	defer sub.Unsubscribe()
  4055  
  4056  	checkSubsPending(t, sub, 1)
  4057  	m, err := sub.NextMsg(0)
  4058  	require_NoError(t, err)
  4059  
  4060  	// Default nak will redeliver almost immediately.
  4061  	// We can now add a parse duration string after whitespace to the NAK proto.
  4062  	start := time.Now()
  4063  	dnak := []byte(fmt.Sprintf("%s 200ms", AckNak))
  4064  	m.Respond(dnak)
  4065  	checkSubsPending(t, sub, 1)
  4066  	elapsed := time.Since(start)
  4067  	if elapsed < 200*time.Millisecond {
  4068  		t.Fatalf("Took too short to redeliver, expected ~200ms but got %v", elapsed)
  4069  	}
  4070  	if elapsed > time.Second {
  4071  		t.Fatalf("Took too long to redeliver, expected ~200ms but got %v", elapsed)
  4072  	}
  4073  
  4074  	// Now let's delay and make sure that is honored when a new consumer leader takes over.
  4075  	m, err = sub.NextMsg(0)
  4076  	require_NoError(t, err)
  4077  	dnak = []byte(fmt.Sprintf("%s 1s", AckNak))
  4078  	start = time.Now()
  4079  	m.Respond(dnak)
  4080  	// Wait for NAK state to propagate.
  4081  	time.Sleep(100 * time.Millisecond)
  4082  	// Ask leader to stepdown.
  4083  	_, err = nc.Request(fmt.Sprintf(JSApiConsumerLeaderStepDownT, "TEST", "dlc"), nil, time.Second)
  4084  	require_NoError(t, err)
  4085  	c.waitOnConsumerLeader("$G", "TEST", "dlc")
  4086  	checkSubsPending(t, sub, 1)
  4087  	elapsed = time.Since(start)
  4088  	if elapsed < time.Second {
  4089  		t.Fatalf("Took too short to redeliver, expected ~1s but got %v", elapsed)
  4090  	}
  4091  	if elapsed > 3*time.Second {
  4092  		t.Fatalf("Took too long to redeliver, expected ~1s but got %v", elapsed)
  4093  	}
  4094  
  4095  	// Test json version.
  4096  	delay, err := json.Marshal(&ConsumerNakOptions{Delay: 20 * time.Millisecond})
  4097  	require_NoError(t, err)
  4098  	dnak = []byte(fmt.Sprintf("%s  %s", AckNak, delay))
  4099  	m, err = sub.NextMsg(0)
  4100  	require_NoError(t, err)
  4101  	start = time.Now()
  4102  	m.Respond(dnak)
  4103  	checkSubsPending(t, sub, 1)
  4104  	elapsed = time.Since(start)
  4105  	if elapsed < 20*time.Millisecond {
  4106  		t.Fatalf("Took too short to redeliver, expected ~20ms but got %v", elapsed)
  4107  	}
  4108  	if elapsed > 100*time.Millisecond {
  4109  		t.Fatalf("Took too long to redeliver, expected ~20ms but got %v", elapsed)
  4110  	}
  4111  }
  4112  
  4113  func TestJetStreamClusterRedeliverBackoffs(t *testing.T) {
  4114  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  4115  	defer c.shutdown()
  4116  
  4117  	nc, js := jsClientConnect(t, c.randomServer())
  4118  	defer nc.Close()
  4119  
  4120  	_, err := js.AddStream(&nats.StreamConfig{
  4121  		Name:     "TEST",
  4122  		Replicas: 2,
  4123  		Subjects: []string{"foo", "bar"},
  4124  	})
  4125  	require_NoError(t, err)
  4126  
  4127  	// Produce some messages on bar so that when we create the consumer
  4128  	// on "foo", we don't have a 1:1 between consumer/stream sequence.
  4129  	for i := 0; i < 10; i++ {
  4130  		js.Publish("bar", []byte("msg"))
  4131  	}
  4132  
  4133  	// Test when BackOff is configured and AckWait and MaxDeliver are as well.
  4134  	// Currently the BackOff will override AckWait, but we want MaxDeliver to be set to be at least len(BackOff)+1.
  4135  	ccReq := &CreateConsumerRequest{
  4136  		Stream: "TEST",
  4137  		Config: ConsumerConfig{
  4138  			Durable:        "dlc",
  4139  			FilterSubject:  "foo",
  4140  			DeliverSubject: "x",
  4141  			AckPolicy:      AckExplicit,
  4142  			AckWait:        30 * time.Second,
  4143  			MaxDeliver:     2,
  4144  			BackOff:        []time.Duration{25 * time.Millisecond, 100 * time.Millisecond, 250 * time.Millisecond},
  4145  		},
  4146  	}
  4147  	req, err := json.Marshal(ccReq)
  4148  	require_NoError(t, err)
  4149  	resp, err := nc.Request(fmt.Sprintf(JSApiDurableCreateT, "TEST", "dlc"), req, time.Second)
  4150  	require_NoError(t, err)
  4151  	var ccResp JSApiConsumerCreateResponse
  4152  	err = json.Unmarshal(resp.Data, &ccResp)
  4153  	require_NoError(t, err)
  4154  	if ccResp.Error == nil || ccResp.Error.ErrCode != 10116 {
  4155  		t.Fatalf("Expected an error when MaxDeliver is <= len(BackOff), got %+v", ccResp.Error)
  4156  	}
  4157  
  4158  	// Set MaxDeliver to 6.
  4159  	ccReq.Config.MaxDeliver = 6
  4160  	req, err = json.Marshal(ccReq)
  4161  	require_NoError(t, err)
  4162  	resp, err = nc.Request(fmt.Sprintf(JSApiDurableCreateT, "TEST", "dlc"), req, time.Second)
  4163  	require_NoError(t, err)
  4164  	ccResp.Error = nil
  4165  	err = json.Unmarshal(resp.Data, &ccResp)
  4166  	require_NoError(t, err)
  4167  	if ccResp.Error != nil {
  4168  		t.Fatalf("Unexpected error: %+v", ccResp.Error)
  4169  	}
  4170  	if cfg := ccResp.ConsumerInfo.Config; cfg.AckWait != 25*time.Millisecond || cfg.MaxDeliver != 6 {
  4171  		t.Fatalf("Expected AckWait to be first BackOff (25ms) and MaxDeliver set to 6, got %+v", cfg)
  4172  	}
  4173  
  4174  	var received []time.Time
  4175  	var mu sync.Mutex
  4176  
  4177  	sub, err := nc.Subscribe("x", func(m *nats.Msg) {
  4178  		mu.Lock()
  4179  		received = append(received, time.Now())
  4180  		mu.Unlock()
  4181  	})
  4182  	require_NoError(t, err)
  4183  
  4184  	// Send a message.
  4185  	start := time.Now()
  4186  	_, err = js.Publish("foo", []byte("m22"))
  4187  	require_NoError(t, err)
  4188  
  4189  	checkFor(t, 5*time.Second, 500*time.Millisecond, func() error {
  4190  		mu.Lock()
  4191  		nr := len(received)
  4192  		mu.Unlock()
  4193  		if nr >= 6 {
  4194  			return nil
  4195  		}
  4196  		return fmt.Errorf("Only seen %d of 6", nr)
  4197  	})
  4198  	sub.Unsubscribe()
  4199  
  4200  	expected := ccReq.Config.BackOff
  4201  	// We expect the MaxDeliver to go until 6, so fill in two additional ones.
  4202  	expected = append(expected, 250*time.Millisecond, 250*time.Millisecond)
  4203  	for i, tr := range received[1:] {
  4204  		d := tr.Sub(start)
  4205  		// Adjust start for next calcs.
  4206  		start = start.Add(d)
  4207  		if d < expected[i] || d > expected[i]*2 {
  4208  			t.Fatalf("Timing is off for %d, expected ~%v, but got %v", i, expected[i], d)
  4209  		}
  4210  	}
  4211  }
  4212  
  4213  func TestJetStreamClusterConsumerUpgrade(t *testing.T) {
  4214  	s := RunBasicJetStreamServer(t)
  4215  	defer s.Shutdown()
  4216  
  4217  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  4218  	defer c.shutdown()
  4219  
  4220  	testUpdate := func(t *testing.T, s *Server) {
  4221  		nc, js := jsClientConnect(t, s)
  4222  		defer nc.Close()
  4223  		_, err := js.AddStream(&nats.StreamConfig{Name: "X"})
  4224  		require_NoError(t, err)
  4225  		_, err = js.Publish("X", []byte("OK"))
  4226  		require_NoError(t, err)
  4227  		// First create a consumer that is push based.
  4228  		_, err = js.AddConsumer("X", &nats.ConsumerConfig{Durable: "dlc", DeliverSubject: "Y"})
  4229  		require_NoError(t, err)
  4230  	}
  4231  
  4232  	t.Run("Single", func(t *testing.T) { testUpdate(t, s) })
  4233  	t.Run("Clustered", func(t *testing.T) { testUpdate(t, c.randomServer()) })
  4234  }
  4235  
  4236  func TestJetStreamClusterAddConsumerWithInfo(t *testing.T) {
  4237  	s := RunBasicJetStreamServer(t)
  4238  	defer s.Shutdown()
  4239  
  4240  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  4241  	defer c.shutdown()
  4242  
  4243  	testConsInfo := func(t *testing.T, s *Server) {
  4244  		nc, js := jsClientConnect(t, s)
  4245  		defer nc.Close()
  4246  
  4247  		_, err := js.AddStream(&nats.StreamConfig{
  4248  			Name:     "TEST",
  4249  			Subjects: []string{"foo"},
  4250  		})
  4251  		require_NoError(t, err)
  4252  
  4253  		for i := 0; i < 10; i++ {
  4254  			_, err = js.Publish("foo", []byte("msg"))
  4255  			require_NoError(t, err)
  4256  		}
  4257  
  4258  		for i := 0; i < 100; i++ {
  4259  			inbox := nats.NewInbox()
  4260  			sub := natsSubSync(t, nc, inbox)
  4261  
  4262  			ci, err := js.AddConsumer("TEST", &nats.ConsumerConfig{
  4263  				DeliverSubject: inbox,
  4264  				DeliverPolicy:  nats.DeliverAllPolicy,
  4265  				FilterSubject:  "foo",
  4266  				AckPolicy:      nats.AckExplicitPolicy,
  4267  			})
  4268  			require_NoError(t, err)
  4269  
  4270  			if ci.NumPending != 10 {
  4271  				t.Fatalf("Iter=%v - expected 10 messages pending on create, got %v", i+1, ci.NumPending)
  4272  			}
  4273  			js.DeleteConsumer("TEST", ci.Name)
  4274  			sub.Unsubscribe()
  4275  		}
  4276  	}
  4277  
  4278  	t.Run("Single", func(t *testing.T) { testConsInfo(t, s) })
  4279  	t.Run("Clustered", func(t *testing.T) { testConsInfo(t, c.randomServer()) })
  4280  }
  4281  
  4282  func TestJetStreamClusterStreamReplicaUpdates(t *testing.T) {
  4283  	c := createJetStreamClusterExplicit(t, "R7S", 7)
  4284  	defer c.shutdown()
  4285  
  4286  	// Client based API
  4287  	nc, js := jsClientConnect(t, c.randomServer())
  4288  	defer nc.Close()
  4289  
  4290  	// Start out at R1
  4291  	cfg := &nats.StreamConfig{
  4292  		Name:     "TEST",
  4293  		Subjects: []string{"foo"},
  4294  		Replicas: 1,
  4295  	}
  4296  	_, err := js.AddStream(cfg)
  4297  	require_NoError(t, err)
  4298  
  4299  	numMsgs := 1000
  4300  	for i := 0; i < numMsgs; i++ {
  4301  		js.PublishAsync("foo", []byte("HELLO WORLD"))
  4302  	}
  4303  	select {
  4304  	case <-js.PublishAsyncComplete():
  4305  	case <-time.After(5 * time.Second):
  4306  		t.Fatalf("Did not receive completion signal")
  4307  	}
  4308  
  4309  	updateReplicas := func(r int) {
  4310  		t.Helper()
  4311  		si, err := js.StreamInfo("TEST")
  4312  		require_NoError(t, err)
  4313  		leader := si.Cluster.Leader
  4314  
  4315  		cfg.Replicas = r
  4316  		_, err = js.UpdateStream(cfg)
  4317  		require_NoError(t, err)
  4318  		c.waitOnStreamLeader("$G", "TEST")
  4319  
  4320  		checkFor(t, 10*time.Second, 100*time.Millisecond, func() error {
  4321  			si, err = js.StreamInfo("TEST")
  4322  			require_NoError(t, err)
  4323  			if len(si.Cluster.Replicas) != r-1 {
  4324  				return fmt.Errorf("Expected %d replicas, got %d", r-1, len(si.Cluster.Replicas))
  4325  			}
  4326  			return nil
  4327  		})
  4328  
  4329  		// Make sure we kept same leader.
  4330  		if si.Cluster.Leader != leader {
  4331  			t.Fatalf("Leader changed, expected %q got %q", leader, si.Cluster.Leader)
  4332  		}
  4333  		// Make sure all are current.
  4334  		for _, r := range si.Cluster.Replicas {
  4335  			c.waitOnStreamCurrent(c.serverByName(r.Name), "$G", "TEST")
  4336  		}
  4337  		// Check msgs.
  4338  		if si.State.Msgs != uint64(numMsgs) {
  4339  			t.Fatalf("Expected %d msgs, got %d", numMsgs, si.State.Msgs)
  4340  		}
  4341  		// Make sure we have the right number of HA Assets running on the leader.
  4342  		s := c.serverByName(leader)
  4343  		jsi, err := s.Jsz(nil)
  4344  		require_NoError(t, err)
  4345  		nha := 1 // meta always present.
  4346  		if len(si.Cluster.Replicas) > 0 {
  4347  			nha++
  4348  		}
  4349  		if nha != jsi.HAAssets {
  4350  			t.Fatalf("Expected %d HA asset(s), but got %d", nha, jsi.HAAssets)
  4351  		}
  4352  	}
  4353  
  4354  	// Update from 1-3
  4355  	updateReplicas(3)
  4356  	// Update from 3-5
  4357  	updateReplicas(5)
  4358  	// Update from 5-3
  4359  	updateReplicas(3)
  4360  	// Update from 3-1
  4361  	updateReplicas(1)
  4362  }
  4363  
  4364  func TestJetStreamClusterStreamAndConsumerScaleUpAndDown(t *testing.T) {
  4365  	c := createJetStreamClusterExplicit(t, "R3S", 3)
  4366  	defer c.shutdown()
  4367  
  4368  	// Client based API
  4369  	nc, js := jsClientConnect(t, c.randomServer())
  4370  	defer nc.Close()
  4371  
  4372  	// Start out at R3
  4373  	cfg := &nats.StreamConfig{
  4374  		Name:     "TEST",
  4375  		Subjects: []string{"foo"},
  4376  		Replicas: 3,
  4377  	}
  4378  	_, err := js.AddStream(cfg)
  4379  	require_NoError(t, err)
  4380  
  4381  	sub, err := js.SubscribeSync("foo", nats.Durable("cat"))
  4382  	require_NoError(t, err)
  4383  
  4384  	numMsgs := 10
  4385  	for i := 0; i < numMsgs; i++ {
  4386  		_, err := js.Publish("foo", []byte("HELLO WORLD"))
  4387  		require_NoError(t, err)
  4388  	}
  4389  	checkSubsPending(t, sub, numMsgs)
  4390  
  4391  	// Now ask leader to stepdown.
  4392  	rmsg, err := nc.Request(fmt.Sprintf(JSApiStreamLeaderStepDownT, "TEST"), nil, time.Second)
  4393  	require_NoError(t, err)
  4394  
  4395  	var sdResp JSApiStreamLeaderStepDownResponse
  4396  	err = json.Unmarshal(rmsg.Data, &sdResp)
  4397  	require_NoError(t, err)
  4398  
  4399  	if sdResp.Error != nil || !sdResp.Success {
  4400  		t.Fatalf("Unexpected error: %+v", sdResp.Error)
  4401  	}
  4402  
  4403  	c.waitOnStreamLeader("$G", "TEST")
  4404  
  4405  	updateReplicas := func(r int) {
  4406  		t.Helper()
  4407  		cfg.Replicas = r
  4408  		_, err := js.UpdateStream(cfg)
  4409  		require_NoError(t, err)
  4410  		c.waitOnStreamLeader("$G", "TEST")
  4411  		c.waitOnConsumerLeader("$G", "TEST", "cat")
  4412  		ci, err := js.ConsumerInfo("TEST", "cat")
  4413  		require_NoError(t, err)
  4414  		if ci.Cluster.Leader == _EMPTY_ {
  4415  			t.Fatalf("Expected a consumer leader but got none in consumer info")
  4416  		}
  4417  		if len(ci.Cluster.Replicas)+1 != r {
  4418  			t.Fatalf("Expected consumer info to have %d peers, got %d", r, len(ci.Cluster.Replicas)+1)
  4419  		}
  4420  	}
  4421  
  4422  	// Capture leader, we want to make sure when we scale down this does not change.
  4423  	sl := c.streamLeader("$G", "TEST")
  4424  
  4425  	// Scale down to 1.
  4426  	updateReplicas(1)
  4427  
  4428  	if sl != c.streamLeader("$G", "TEST") {
  4429  		t.Fatalf("Expected same leader, but it changed")
  4430  	}
  4431  
  4432  	// Make sure we can still send to the stream.
  4433  	for i := 0; i < numMsgs; i++ {
  4434  		_, err := js.Publish("foo", []byte("HELLO WORLD"))
  4435  		require_NoError(t, err)
  4436  	}
  4437  
  4438  	si, err := js.StreamInfo("TEST")
  4439  	require_NoError(t, err)
  4440  	if si.State.Msgs != uint64(2*numMsgs) {
  4441  		t.Fatalf("Expected %d msgs, got %d", 3*numMsgs, si.State.Msgs)
  4442  	}
  4443  
  4444  	checkSubsPending(t, sub, 2*numMsgs)
  4445  
  4446  	// Now back up.
  4447  	updateReplicas(3)
  4448  
  4449  	// Send more.
  4450  	for i := 0; i < numMsgs; i++ {
  4451  		_, err := js.Publish("foo", []byte("HELLO WORLD"))
  4452  		require_NoError(t, err)
  4453  	}
  4454  
  4455  	si, err = js.StreamInfo("TEST")
  4456  	require_NoError(t, err)
  4457  	if si.State.Msgs != uint64(3*numMsgs) {
  4458  		t.Fatalf("Expected %d msgs, got %d", 3*numMsgs, si.State.Msgs)
  4459  	}
  4460  
  4461  	checkSubsPending(t, sub, 3*numMsgs)
  4462  
  4463  	// Make sure cluster replicas are current.
  4464  	checkFor(t, 5*time.Second, 100*time.Millisecond, func() error {
  4465  		si, err = js.StreamInfo("TEST")
  4466  		require_NoError(t, err)
  4467  		for _, r := range si.Cluster.Replicas {
  4468  			if !r.Current {
  4469  				return fmt.Errorf("Expected replica to be current: %+v", r)
  4470  			}
  4471  		}
  4472  		return nil
  4473  	})
  4474  
  4475  	checkState := func(s *Server) {
  4476  		t.Helper()
  4477  		checkFor(t, 5*time.Second, 100*time.Millisecond, func() error {
  4478  			mset, err := s.GlobalAccount().lookupStream("TEST")
  4479  			require_NoError(t, err)
  4480  			state := mset.state()
  4481  			if state.Msgs != uint64(3*numMsgs) || state.FirstSeq != 1 || state.LastSeq != 30 || state.Bytes != 1320 {
  4482  				return fmt.Errorf("Wrong state: %+v for server: %v", state, s)
  4483  			}
  4484  			return nil
  4485  		})
  4486  	}
  4487  
  4488  	// Now check each indidvidual stream on each server to make sure replication occurred.
  4489  	for _, s := range c.servers {
  4490  		checkState(s)
  4491  	}
  4492  }
  4493  
  4494  func TestJetStreamClusterInterestRetentionWithFilteredConsumersExtra(t *testing.T) {
  4495  	c := createJetStreamClusterExplicit(t, "R3S", 3)
  4496  	defer c.shutdown()
  4497  
  4498  	subjectNameZero := "foo.bar"
  4499  	subjectNameOne := "foo.baz"
  4500  
  4501  	// Client based API
  4502  	nc, js := jsClientConnect(t, c.randomServer())
  4503  	defer nc.Close()
  4504  
  4505  	_, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"foo.*"}, Retention: nats.InterestPolicy, Replicas: 3})
  4506  	require_NoError(t, err)
  4507  
  4508  	checkState := func(expected uint64) {
  4509  		t.Helper()
  4510  		checkFor(t, 5*time.Second, 100*time.Millisecond, func() error {
  4511  			si, err := js.StreamInfo("TEST")
  4512  			require_NoError(t, err)
  4513  			if si.State.Msgs != expected {
  4514  				return fmt.Errorf("Expected %d msgs, got %d", expected, si.State.Msgs)
  4515  			}
  4516  			return nil
  4517  		})
  4518  	}
  4519  
  4520  	subZero, err := js.PullSubscribe(subjectNameZero, "dlc-0")
  4521  	require_NoError(t, err)
  4522  
  4523  	subOne, err := js.PullSubscribe(subjectNameOne, "dlc-1")
  4524  	require_NoError(t, err)
  4525  
  4526  	msg := []byte("FILTERED")
  4527  	// Now send a bunch of messages
  4528  	for i := 0; i < 1000; i++ {
  4529  		_, err = js.PublishAsync(subjectNameZero, msg)
  4530  		require_NoError(t, err)
  4531  		_, err = js.PublishAsync(subjectNameOne, msg)
  4532  		require_NoError(t, err)
  4533  	}
  4534  
  4535  	// should be 2000 in total
  4536  	checkState(2000)
  4537  
  4538  	// fetch and acknowledge, count records to ensure no errors acknowledging
  4539  	getAndAckBatch := func(sub *nats.Subscription) {
  4540  		t.Helper()
  4541  		successCounter := 0
  4542  		msgs, err := sub.Fetch(1000)
  4543  		require_NoError(t, err)
  4544  
  4545  		for _, m := range msgs {
  4546  			err = m.AckSync()
  4547  			require_NoError(t, err)
  4548  			successCounter++
  4549  		}
  4550  		if successCounter != 1000 {
  4551  			t.Fatalf("Unexpected number of acknowledges %d for subscription %v", successCounter, sub)
  4552  		}
  4553  	}
  4554  
  4555  	// fetch records subscription zero
  4556  	getAndAckBatch(subZero)
  4557  	// fetch records for subscription one
  4558  	getAndAckBatch(subOne)
  4559  	// Make sure stream is zero.
  4560  	checkState(0)
  4561  }
  4562  
  4563  func TestJetStreamClusterStreamConsumersCount(t *testing.T) {
  4564  	c := createJetStreamClusterExplicit(t, "R3S", 3)
  4565  	defer c.shutdown()
  4566  
  4567  	nc, js := jsClientConnect(t, c.randomServer())
  4568  	defer nc.Close()
  4569  
  4570  	sname := "TEST_STREAM_CONS_COUNT"
  4571  	_, err := js.AddStream(&nats.StreamConfig{Name: sname, Subjects: []string{"foo"}, Replicas: 3})
  4572  	require_NoError(t, err)
  4573  
  4574  	// Create some R1 consumers
  4575  	for i := 0; i < 10; i++ {
  4576  		inbox := nats.NewInbox()
  4577  		natsSubSync(t, nc, inbox)
  4578  		_, err = js.AddConsumer(sname, &nats.ConsumerConfig{DeliverSubject: inbox})
  4579  		require_NoError(t, err)
  4580  	}
  4581  
  4582  	// Now check that the consumer count in stream info/list is 10
  4583  	checkFor(t, 2*time.Second, 15*time.Millisecond, func() error {
  4584  		// Check stream info
  4585  		si, err := js.StreamInfo(sname)
  4586  		if err != nil {
  4587  			return fmt.Errorf("Error getting stream info: %v", err)
  4588  		}
  4589  		if n := si.State.Consumers; n != 10 {
  4590  			return fmt.Errorf("From StreamInfo, expecting 10 consumers, got %v", n)
  4591  		}
  4592  
  4593  		// Now from stream list
  4594  		for si := range js.StreamsInfo() {
  4595  			if n := si.State.Consumers; n != 10 {
  4596  				return fmt.Errorf("From StreamsInfo, expecting 10 consumers, got %v", n)
  4597  			}
  4598  		}
  4599  		return nil
  4600  	})
  4601  }
  4602  
  4603  func TestJetStreamClusterFilteredAndIdleConsumerNRGGrowth(t *testing.T) {
  4604  	c := createJetStreamClusterExplicit(t, "R3S", 3)
  4605  	defer c.shutdown()
  4606  
  4607  	nc, js := jsClientConnect(t, c.randomServer())
  4608  	defer nc.Close()
  4609  
  4610  	sname := "TEST"
  4611  	_, err := js.AddStream(&nats.StreamConfig{Name: sname, Subjects: []string{"foo.*"}, Replicas: 3})
  4612  	require_NoError(t, err)
  4613  
  4614  	sub, err := js.SubscribeSync("foo.baz", nats.Durable("dlc"))
  4615  	require_NoError(t, err)
  4616  
  4617  	for i := 0; i < 10_000; i++ {
  4618  		js.PublishAsync("foo.bar", []byte("ok"))
  4619  	}
  4620  	select {
  4621  	case <-js.PublishAsyncComplete():
  4622  	case <-time.After(5 * time.Second):
  4623  		t.Fatalf("Did not receive completion signal")
  4624  	}
  4625  
  4626  	checkSubsPending(t, sub, 0)
  4627  
  4628  	// Grab consumer's underlying info and make sure NRG log not running away do to no-op skips on filtered consumer.
  4629  	// Need a non-leader for the consumer, they are only ones getting skip ops to keep delivered updated.
  4630  	cl := c.consumerLeader("$G", "TEST", "dlc")
  4631  	var s *Server
  4632  	for _, s = range c.servers {
  4633  		if s != cl {
  4634  			break
  4635  		}
  4636  	}
  4637  
  4638  	mset, err := s.GlobalAccount().lookupStream("TEST")
  4639  	require_NoError(t, err)
  4640  	o := mset.lookupConsumer("dlc")
  4641  	if o == nil {
  4642  		t.Fatalf("Error looking up consumer %q", "dlc")
  4643  	}
  4644  
  4645  	// compactNumMin from monitorConsumer is 8192 atm.
  4646  	const compactNumMin = 8192
  4647  	if entries, _ := o.raftNode().Size(); entries > compactNumMin {
  4648  		t.Fatalf("Expected <= %d entries, got %d", compactNumMin, entries)
  4649  	}
  4650  
  4651  	// Now make the consumer leader stepdown and make sure we have the proper snapshot.
  4652  	resp, err := nc.Request(fmt.Sprintf(JSApiConsumerLeaderStepDownT, "TEST", "dlc"), nil, time.Second)
  4653  	require_NoError(t, err)
  4654  
  4655  	var cdResp JSApiConsumerLeaderStepDownResponse
  4656  	if err := json.Unmarshal(resp.Data, &cdResp); err != nil {
  4657  		t.Fatalf("Unexpected error: %v", err)
  4658  	}
  4659  	if cdResp.Error != nil {
  4660  		t.Fatalf("Unexpected error: %+v", cdResp.Error)
  4661  	}
  4662  
  4663  	c.waitOnConsumerLeader("$G", "TEST", "dlc")
  4664  }
  4665  
  4666  func TestJetStreamClusterMirrorOrSourceNotActiveReporting(t *testing.T) {
  4667  	c := createJetStreamClusterExplicit(t, "R3S", 3)
  4668  	defer c.shutdown()
  4669  
  4670  	nc, js := jsClientConnect(t, c.randomServer())
  4671  	defer nc.Close()
  4672  
  4673  	_, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"foo"}, Replicas: 3})
  4674  	require_NoError(t, err)
  4675  
  4676  	si, err := js.AddStream(&nats.StreamConfig{
  4677  		Name:   "M",
  4678  		Mirror: &nats.StreamSource{Name: "TEST"},
  4679  	})
  4680  	require_NoError(t, err)
  4681  
  4682  	// We would previous calculate a large number if we actually never heard from the peer yet.
  4683  	// We want to make sure if we have never heard from the other side report -1 as Active.
  4684  	// It is possible if testing infra is slow that this could be legit, but should be pretty small.
  4685  	if si.Mirror.Active != -1 && si.Mirror.Active > 10*time.Millisecond {
  4686  		t.Fatalf("Expected an Active of -1, but got %v", si.Mirror.Active)
  4687  	}
  4688  }
  4689  
  4690  func TestJetStreamClusterStreamAdvisories(t *testing.T) {
  4691  	s := RunBasicJetStreamServer(t)
  4692  	defer s.Shutdown()
  4693  
  4694  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  4695  	defer c.shutdown()
  4696  
  4697  	checkAdv := func(t *testing.T, sub *nats.Subscription, expectedPrefixes ...string) {
  4698  		t.Helper()
  4699  		seen := make([]bool, len(expectedPrefixes))
  4700  		for i := 0; i < len(expectedPrefixes); i++ {
  4701  			msg := natsNexMsg(t, sub, time.Second)
  4702  			var gotOne bool
  4703  			for j, pfx := range expectedPrefixes {
  4704  				if !seen[j] && strings.HasPrefix(msg.Subject, pfx) {
  4705  					seen[j] = true
  4706  					gotOne = true
  4707  					break
  4708  				}
  4709  			}
  4710  			if !gotOne {
  4711  				t.Fatalf("Expected one of prefixes %q, got %q", expectedPrefixes, msg.Subject)
  4712  			}
  4713  		}
  4714  	}
  4715  
  4716  	// Used to keep stream names pseudo unique. t.Name() has slashes in it which caused problems.
  4717  	var testN int
  4718  
  4719  	checkAdvisories := func(t *testing.T, s *Server, replicas int) {
  4720  
  4721  		nc, js := jsClientConnect(t, s)
  4722  		defer nc.Close()
  4723  
  4724  		testN++
  4725  		streamName := "TEST_ADVISORIES_" + fmt.Sprintf("%d", testN)
  4726  
  4727  		sub := natsSubSync(t, nc, "$JS.EVENT.ADVISORY.STREAM.*."+streamName)
  4728  
  4729  		si, err := js.AddStream(&nats.StreamConfig{
  4730  			Name:     streamName,
  4731  			Storage:  nats.FileStorage,
  4732  			Replicas: replicas,
  4733  		})
  4734  		require_NoError(t, err)
  4735  		advisories := []string{JSAdvisoryStreamCreatedPre}
  4736  		if replicas > 1 {
  4737  			advisories = append(advisories, JSAdvisoryStreamLeaderElectedPre)
  4738  		}
  4739  		checkAdv(t, sub, advisories...)
  4740  
  4741  		si.Config.MaxMsgs = 1000
  4742  		_, err = js.UpdateStream(&si.Config)
  4743  		require_NoError(t, err)
  4744  		checkAdv(t, sub, JSAdvisoryStreamUpdatedPre)
  4745  
  4746  		snapreq := &JSApiStreamSnapshotRequest{
  4747  			DeliverSubject: nats.NewInbox(),
  4748  			ChunkSize:      512,
  4749  		}
  4750  		var snapshot []byte
  4751  		done := make(chan bool)
  4752  		nc.Subscribe(snapreq.DeliverSubject, func(m *nats.Msg) {
  4753  			// EOF
  4754  			if len(m.Data) == 0 {
  4755  				done <- true
  4756  				return
  4757  			}
  4758  			// Could be writing to a file here too.
  4759  			snapshot = append(snapshot, m.Data...)
  4760  			// Flow ack
  4761  			m.Respond(nil)
  4762  		})
  4763  
  4764  		req, _ := json.Marshal(snapreq)
  4765  		rmsg, err := nc.Request(fmt.Sprintf(JSApiStreamSnapshotT, streamName), req, time.Second)
  4766  		if err != nil {
  4767  			t.Fatalf("Unexpected error on snapshot request: %v", err)
  4768  		}
  4769  
  4770  		var snapresp JSApiStreamSnapshotResponse
  4771  		json.Unmarshal(rmsg.Data, &snapresp)
  4772  		if snapresp.Error != nil {
  4773  			t.Fatalf("Did not get correct error response: %+v", snapresp.Error)
  4774  		}
  4775  
  4776  		// Wait to receive the snapshot.
  4777  		select {
  4778  		case <-done:
  4779  		case <-time.After(5 * time.Second):
  4780  			t.Fatalf("Did not receive our snapshot in time")
  4781  		}
  4782  
  4783  		checkAdv(t, sub, JSAdvisoryStreamSnapshotCreatePre)
  4784  		checkAdv(t, sub, JSAdvisoryStreamSnapshotCompletePre)
  4785  
  4786  		err = js.DeleteStream(streamName)
  4787  		require_NoError(t, err)
  4788  		checkAdv(t, sub, JSAdvisoryStreamDeletedPre)
  4789  
  4790  		state := *snapresp.State
  4791  		config := *snapresp.Config
  4792  		resreq := &JSApiStreamRestoreRequest{
  4793  			Config: config,
  4794  			State:  state,
  4795  		}
  4796  		req, _ = json.Marshal(resreq)
  4797  		rmsg, err = nc.Request(fmt.Sprintf(JSApiStreamRestoreT, streamName), req, 5*time.Second)
  4798  		if err != nil {
  4799  			t.Fatalf("Unexpected error: %v", err)
  4800  		}
  4801  		var resresp JSApiStreamRestoreResponse
  4802  		json.Unmarshal(rmsg.Data, &resresp)
  4803  		if resresp.Error != nil {
  4804  			t.Fatalf("Got an unexpected error response: %+v", resresp.Error)
  4805  		}
  4806  
  4807  		// Send our snapshot back in to restore the stream.
  4808  		// Can be any size message.
  4809  		var chunk [1024]byte
  4810  		for r := bytes.NewReader(snapshot); ; {
  4811  			n, err := r.Read(chunk[:])
  4812  			if err != nil {
  4813  				break
  4814  			}
  4815  			nc.Request(resresp.DeliverSubject, chunk[:n], time.Second)
  4816  		}
  4817  		rmsg, err = nc.Request(resresp.DeliverSubject, nil, time.Second)
  4818  		if err != nil {
  4819  			t.Fatalf("Unexpected error: %v", err)
  4820  		}
  4821  		resresp.Error = nil
  4822  		json.Unmarshal(rmsg.Data, &resresp)
  4823  		if resresp.Error != nil {
  4824  			t.Fatalf("Got an unexpected error response: %+v", resresp.Error)
  4825  		}
  4826  
  4827  		checkAdv(t, sub, JSAdvisoryStreamRestoreCreatePre)
  4828  		// At this point, the stream_created advisory may be sent before
  4829  		// or after the restore_complete advisory because they are sent
  4830  		// using different "send queues". That is, the restore uses the
  4831  		// server's event queue while the stream_created is sent from
  4832  		// the stream's own send queue.
  4833  		advisories = append(advisories, JSAdvisoryStreamRestoreCompletePre)
  4834  		checkAdv(t, sub, advisories...)
  4835  	}
  4836  
  4837  	t.Run("Single", func(t *testing.T) { checkAdvisories(t, s, 1) })
  4838  	t.Run("Clustered_R1", func(t *testing.T) { checkAdvisories(t, c.randomServer(), 1) })
  4839  	t.Run("Clustered_R3", func(t *testing.T) { checkAdvisories(t, c.randomServer(), 3) })
  4840  }
  4841  
  4842  // If the config files have duplicate routes this can have the metagroup estimate a size for the system
  4843  // which prevents reaching quorum and electing a meta-leader.
  4844  func TestJetStreamClusterDuplicateRoutesDisruptJetStreamMetaGroup(t *testing.T) {
  4845  	tmpl := `
  4846  	listen: 127.0.0.1:-1
  4847  	server_name: %s
  4848  	jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'}
  4849  
  4850  	cluster {
  4851  		name: RR
  4852  		listen: 127.0.0.1:%d
  4853  		routes = [
  4854  			nats-route://127.0.0.1:%d
  4855  			nats-route://127.0.0.1:%d
  4856  			nats-route://127.0.0.1:%d
  4857  			# These will be dupes
  4858  			nats-route://127.0.0.1:%d
  4859  			nats-route://127.0.0.1:%d
  4860  			nats-route://127.0.0.1:%d
  4861  		]
  4862  	}
  4863  
  4864  	# For access to system account.
  4865  	accounts { $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } }
  4866  `
  4867  
  4868  	c := &cluster{servers: make([]*Server, 0, 3), opts: make([]*Options, 0, 3), name: "RR", t: t}
  4869  
  4870  	rports := []int{22208, 22209, 22210}
  4871  	for i, p := range rports {
  4872  		sname, sd := fmt.Sprintf("S%d", i+1), t.TempDir()
  4873  		cf := fmt.Sprintf(tmpl, sname, sd, p, rports[0], rports[1], rports[2], rports[0], rports[1], rports[2])
  4874  		s, o := RunServerWithConfig(createConfFile(t, []byte(cf)))
  4875  		c.servers, c.opts = append(c.servers, s), append(c.opts, o)
  4876  	}
  4877  	defer c.shutdown()
  4878  
  4879  	checkClusterFormed(t, c.servers...)
  4880  	c.waitOnClusterReady()
  4881  }
  4882  
  4883  func TestJetStreamClusterDuplicateMsgIdsOnCatchupAndLeaderTakeover(t *testing.T) {
  4884  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  4885  	defer c.shutdown()
  4886  
  4887  	nc, js := jsClientConnect(t, c.randomServer())
  4888  	defer nc.Close()
  4889  
  4890  	_, err := js.AddStream(&nats.StreamConfig{
  4891  		Name:     "TEST",
  4892  		Replicas: 3,
  4893  	})
  4894  	require_NoError(t, err)
  4895  
  4896  	// Shutdown a non-leader.
  4897  	nc.Close()
  4898  	sr := c.randomNonStreamLeader("$G", "TEST")
  4899  	sr.Shutdown()
  4900  
  4901  	nc, js = jsClientConnect(t, c.randomServer())
  4902  	defer nc.Close()
  4903  
  4904  	m := nats.NewMsg("TEST")
  4905  	m.Data = []byte("OK")
  4906  
  4907  	n := 10
  4908  	for i := 0; i < n; i++ {
  4909  		m.Header.Set(JSMsgId, strconv.Itoa(i))
  4910  		_, err := js.PublishMsg(m)
  4911  		require_NoError(t, err)
  4912  	}
  4913  
  4914  	m.Header.Set(JSMsgId, "8")
  4915  	pa, err := js.PublishMsg(m)
  4916  	require_NoError(t, err)
  4917  	if !pa.Duplicate {
  4918  		t.Fatalf("Expected msg to be a duplicate")
  4919  	}
  4920  
  4921  	// Now force a snapshot, want to test catchup above RAFT layer.
  4922  	sl := c.streamLeader("$G", "TEST")
  4923  	mset, err := sl.GlobalAccount().lookupStream("TEST")
  4924  	require_NoError(t, err)
  4925  	if node := mset.raftNode(); node == nil {
  4926  		t.Fatalf("Could not get stream group name")
  4927  	} else if err := node.InstallSnapshot(mset.stateSnapshot()); err != nil {
  4928  		t.Fatalf("Error installing snapshot: %v", err)
  4929  	}
  4930  
  4931  	// Now restart
  4932  	sr = c.restartServer(sr)
  4933  	c.waitOnStreamCurrent(sr, "$G", "TEST")
  4934  	c.waitOnStreamLeader("$G", "TEST")
  4935  
  4936  	// Now make them the leader.
  4937  	for sr != c.streamLeader("$G", "TEST") {
  4938  		nc.Request(fmt.Sprintf(JSApiStreamLeaderStepDownT, "TEST"), nil, time.Second)
  4939  		c.waitOnStreamLeader("$G", "TEST")
  4940  	}
  4941  
  4942  	// Make sure this gets rejected.
  4943  	pa, err = js.PublishMsg(m)
  4944  	require_NoError(t, err)
  4945  	if !pa.Duplicate {
  4946  		t.Fatalf("Expected msg to be a duplicate")
  4947  	}
  4948  }
  4949  
  4950  func TestJetStreamClusterConsumerLeaderChangeDeadlock(t *testing.T) {
  4951  	c := createJetStreamClusterExplicit(t, "R3S", 3)
  4952  	defer c.shutdown()
  4953  
  4954  	nc, js := jsClientConnect(t, c.randomServer())
  4955  	defer nc.Close()
  4956  
  4957  	// Create a stream and durable with ack explicit
  4958  	_, err := js.AddStream(&nats.StreamConfig{Name: "test", Subjects: []string{"foo"}, Replicas: 3})
  4959  	require_NoError(t, err)
  4960  	_, err = js.AddConsumer("test", &nats.ConsumerConfig{
  4961  		Durable:        "test",
  4962  		DeliverSubject: "bar",
  4963  		AckPolicy:      nats.AckExplicitPolicy,
  4964  		AckWait:        250 * time.Millisecond,
  4965  	})
  4966  	require_NoError(t, err)
  4967  
  4968  	// Wait for a leader
  4969  	c.waitOnConsumerLeader("$G", "test", "test")
  4970  	cl := c.consumerLeader("$G", "test", "test")
  4971  
  4972  	// Publish a message
  4973  	_, err = js.Publish("foo", []byte("msg"))
  4974  	require_NoError(t, err)
  4975  
  4976  	// Create nats consumer on "bar" and don't ack it
  4977  	sub := natsSubSync(t, nc, "bar")
  4978  	natsNexMsg(t, sub, time.Second)
  4979  	// Wait for redeliveries, to make sure it is in the redelivery map
  4980  	natsNexMsg(t, sub, time.Second)
  4981  	natsNexMsg(t, sub, time.Second)
  4982  
  4983  	mset, err := cl.GlobalAccount().lookupStream("test")
  4984  	require_NoError(t, err)
  4985  	require_True(t, mset != nil)
  4986  
  4987  	// There are parts in the code (for instance when signaling to consumers
  4988  	// that there are new messages) where we get the mset lock and iterate
  4989  	// over the consumers and get consumer lock. We are going to do that
  4990  	// in a go routine while we send a consumer step down request from
  4991  	// another go routine. We will watch for possible deadlock and if
  4992  	// found report it.
  4993  	ch := make(chan struct{})
  4994  	doneCh := make(chan struct{})
  4995  	go func() {
  4996  		defer close(doneCh)
  4997  		for {
  4998  			mset.mu.Lock()
  4999  			for _, o := range mset.consumers {
  5000  				o.mu.Lock()
  5001  				time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond)
  5002  				o.mu.Unlock()
  5003  			}
  5004  			mset.mu.Unlock()
  5005  			select {
  5006  			case <-ch:
  5007  				return
  5008  			default:
  5009  			}
  5010  		}
  5011  	}()
  5012  
  5013  	// Now cause a leader changes
  5014  	for i := 0; i < 5; i++ {
  5015  		m, err := nc.Request("$JS.API.CONSUMER.LEADER.STEPDOWN.test.test", nil, 2*time.Second)
  5016  		// Ignore error here and check for deadlock below
  5017  		if err != nil {
  5018  			break
  5019  		}
  5020  		// if there is a message, check that it is success
  5021  		var resp JSApiConsumerLeaderStepDownResponse
  5022  		err = json.Unmarshal(m.Data, &resp)
  5023  		require_NoError(t, err)
  5024  		require_True(t, resp.Success)
  5025  		c.waitOnConsumerLeader("$G", "test", "test")
  5026  	}
  5027  
  5028  	close(ch)
  5029  	select {
  5030  	case <-doneCh:
  5031  		// OK!
  5032  	case <-time.After(2 * time.Second):
  5033  		buf := make([]byte, 1000000)
  5034  		n := runtime.Stack(buf, true)
  5035  		t.Fatalf("Suspected deadlock, printing current stack. The test suite may timeout and will also dump the stack\n%s\n", buf[:n])
  5036  	}
  5037  }
  5038  
  5039  // We were compacting to keep the raft log manageable but not snapshotting, which meant that restarted
  5040  // servers could complain about no snapshot and could not sync after that condition.
  5041  // Changes also address https://github.com/nats-io/nats-server/issues/2936
  5042  func TestJetStreamClusterMemoryConsumerCompactVsSnapshot(t *testing.T) {
  5043  	c := createJetStreamClusterExplicit(t, "R3S", 3)
  5044  	defer c.shutdown()
  5045  
  5046  	nc, js := jsClientConnect(t, c.randomServer())
  5047  	defer nc.Close()
  5048  
  5049  	// Create a stream and durable with ack explicit
  5050  	_, err := js.AddStream(&nats.StreamConfig{
  5051  		Name:     "test",
  5052  		Storage:  nats.MemoryStorage,
  5053  		Replicas: 3,
  5054  	})
  5055  	require_NoError(t, err)
  5056  
  5057  	_, err = js.AddConsumer("test", &nats.ConsumerConfig{
  5058  		Durable:   "d",
  5059  		AckPolicy: nats.AckExplicitPolicy,
  5060  	})
  5061  	require_NoError(t, err)
  5062  
  5063  	// Bring a non-leader down.
  5064  	s := c.randomNonConsumerLeader("$G", "test", "d")
  5065  	s.Shutdown()
  5066  
  5067  	// In case that was also mete or stream leader.
  5068  	c.waitOnLeader()
  5069  	c.waitOnStreamLeader("$G", "test")
  5070  	// In case we were connected there.
  5071  	nc.Close()
  5072  	nc, js = jsClientConnect(t, c.randomServer())
  5073  	defer nc.Close()
  5074  
  5075  	// Generate some state.
  5076  	for i := 0; i < 2000; i++ {
  5077  		_, err := js.PublishAsync("test", nil)
  5078  		require_NoError(t, err)
  5079  	}
  5080  	select {
  5081  	case <-js.PublishAsyncComplete():
  5082  	case <-time.After(5 * time.Second):
  5083  		t.Fatalf("Did not receive completion signal")
  5084  	}
  5085  
  5086  	sub, err := js.PullSubscribe("test", "d")
  5087  	require_NoError(t, err)
  5088  
  5089  	for i := 0; i < 2; i++ {
  5090  		for _, m := range fetchMsgs(t, sub, 1000, 5*time.Second) {
  5091  			m.AckSync()
  5092  		}
  5093  	}
  5094  
  5095  	// Restart our downed server.
  5096  	s = c.restartServer(s)
  5097  	c.checkClusterFormed()
  5098  	c.waitOnServerCurrent(s)
  5099  
  5100  	checkFor(t, 5*time.Second, 100*time.Millisecond, func() error {
  5101  		ci, err := js.ConsumerInfo("test", "d")
  5102  		require_NoError(t, err)
  5103  		for _, r := range ci.Cluster.Replicas {
  5104  			if !r.Current || r.Lag != 0 {
  5105  				return fmt.Errorf("Replica not current: %+v", r)
  5106  			}
  5107  		}
  5108  		return nil
  5109  	})
  5110  }
  5111  
  5112  func TestJetStreamClusterMemoryConsumerInterestRetention(t *testing.T) {
  5113  	c := createJetStreamClusterExplicit(t, "R3S", 3)
  5114  	defer c.shutdown()
  5115  
  5116  	nc, js := jsClientConnect(t, c.randomServer())
  5117  	defer nc.Close()
  5118  
  5119  	_, err := js.AddStream(&nats.StreamConfig{
  5120  		Name:      "test",
  5121  		Storage:   nats.MemoryStorage,
  5122  		Retention: nats.InterestPolicy,
  5123  		Replicas:  3,
  5124  	})
  5125  	require_NoError(t, err)
  5126  
  5127  	sub, err := js.SubscribeSync("test", nats.Durable("dlc"))
  5128  	require_NoError(t, err)
  5129  
  5130  	for i := 0; i < 1000; i++ {
  5131  		_, err := js.PublishAsync("test", nil)
  5132  		require_NoError(t, err)
  5133  	}
  5134  	select {
  5135  	case <-js.PublishAsyncComplete():
  5136  	case <-time.After(5 * time.Second):
  5137  		t.Fatalf("Did not receive completion signal")
  5138  	}
  5139  
  5140  	toAck := 100
  5141  	for i := 0; i < toAck; i++ {
  5142  		m, err := sub.NextMsg(time.Second)
  5143  		require_NoError(t, err)
  5144  		m.AckSync()
  5145  	}
  5146  
  5147  	checkFor(t, time.Second, 15*time.Millisecond, func() error {
  5148  		si, err := js.StreamInfo("test")
  5149  		if err != nil {
  5150  			return err
  5151  		}
  5152  		if n := si.State.Msgs; n != 900 {
  5153  			return fmt.Errorf("Waiting for msgs count to be 900, got %v", n)
  5154  		}
  5155  		return nil
  5156  	})
  5157  
  5158  	si, err := js.StreamInfo("test")
  5159  	require_NoError(t, err)
  5160  
  5161  	ci, err := sub.ConsumerInfo()
  5162  	require_NoError(t, err)
  5163  
  5164  	// Make sure acks are not only replicated but processed in a way to remove messages from the replica streams.
  5165  	_, err = nc.Request(fmt.Sprintf(JSApiStreamLeaderStepDownT, "test"), nil, time.Second)
  5166  	require_NoError(t, err)
  5167  	c.waitOnStreamLeader("$G", "test")
  5168  
  5169  	_, err = nc.Request(fmt.Sprintf(JSApiConsumerLeaderStepDownT, "test", "dlc"), nil, time.Second)
  5170  	require_NoError(t, err)
  5171  	c.waitOnConsumerLeader("$G", "test", "dlc")
  5172  
  5173  	nsi, err := js.StreamInfo("test")
  5174  	require_NoError(t, err)
  5175  	if !reflect.DeepEqual(nsi.State, si.State) {
  5176  		t.Fatalf("Stream states do not match: %+v vs %+v", si.State, nsi.State)
  5177  	}
  5178  
  5179  	nci, err := sub.ConsumerInfo()
  5180  	require_NoError(t, err)
  5181  
  5182  	// Last may be skewed a very small amount.
  5183  	ci.AckFloor.Last, nci.AckFloor.Last = nil, nil
  5184  	if nci.AckFloor != ci.AckFloor {
  5185  		t.Fatalf("Consumer AckFloors are not the same: %+v vs %+v", ci.AckFloor, nci.AckFloor)
  5186  	}
  5187  }
  5188  
  5189  func TestJetStreamClusterDeleteAndRestoreAndRestart(t *testing.T) {
  5190  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  5191  	defer c.shutdown()
  5192  
  5193  	nc, js := jsClientConnect(t, c.randomServer())
  5194  	defer nc.Close()
  5195  
  5196  	_, err := js.AddStream(&nats.StreamConfig{Name: "TEST"})
  5197  	require_NoError(t, err)
  5198  
  5199  	for i := 0; i < 10; i++ {
  5200  		_, err := js.Publish("TEST", []byte("OK"))
  5201  		require_NoError(t, err)
  5202  	}
  5203  	_, err = js.AddConsumer("TEST", &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy})
  5204  	require_NoError(t, err)
  5205  
  5206  	require_NoError(t, js.DeleteConsumer("TEST", "dlc"))
  5207  	require_NoError(t, js.DeleteStream("TEST"))
  5208  
  5209  	_, err = js.AddStream(&nats.StreamConfig{Name: "TEST"})
  5210  	require_NoError(t, err)
  5211  
  5212  	for i := 0; i < 22; i++ {
  5213  		_, err := js.Publish("TEST", []byte("OK"))
  5214  		require_NoError(t, err)
  5215  	}
  5216  
  5217  	sub, err := js.SubscribeSync("TEST", nats.Durable("dlc"), nats.Description("SECOND"))
  5218  	require_NoError(t, err)
  5219  
  5220  	for i := 0; i < 5; i++ {
  5221  		m, err := sub.NextMsg(time.Second)
  5222  		require_NoError(t, err)
  5223  		m.AckSync()
  5224  	}
  5225  
  5226  	// Now restart.
  5227  	sl := c.streamLeader("$G", "TEST")
  5228  	sl.Shutdown()
  5229  	sl = c.restartServer(sl)
  5230  	c.waitOnStreamLeader("$G", "TEST")
  5231  	c.waitOnConsumerLeader("$G", "TEST", "dlc")
  5232  
  5233  	nc, js = jsClientConnect(t, c.randomServer())
  5234  	defer nc.Close()
  5235  
  5236  	checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
  5237  		si, err := js.StreamInfo("TEST")
  5238  		require_NoError(t, err)
  5239  		if si.State.Msgs != 22 {
  5240  			return fmt.Errorf("State is not correct after restart, expected 22 msgs, got %d", si.State.Msgs)
  5241  		}
  5242  		return nil
  5243  	})
  5244  
  5245  	ci, err := js.ConsumerInfo("TEST", "dlc")
  5246  	require_NoError(t, err)
  5247  
  5248  	if ci.AckFloor.Consumer != 5 {
  5249  		t.Fatalf("Bad ack floor: %+v", ci.AckFloor)
  5250  	}
  5251  
  5252  	// Now delete and make sure consumer does not come back.
  5253  	// First add a delete something else.
  5254  	_, err = js.AddStream(&nats.StreamConfig{Name: "TEST2"})
  5255  	require_NoError(t, err)
  5256  	require_NoError(t, js.DeleteStream("TEST2"))
  5257  	// Now the consumer.
  5258  	require_NoError(t, js.DeleteConsumer("TEST", "dlc"))
  5259  
  5260  	sl.Shutdown()
  5261  	c.restartServer(sl)
  5262  	c.waitOnStreamLeader("$G", "TEST")
  5263  
  5264  	// In rare circumstances this could be recovered and then quickly deleted.
  5265  	checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
  5266  		if _, err := js.ConsumerInfo("TEST", "dlc"); err == nil {
  5267  			return fmt.Errorf("Not cleaned up yet")
  5268  		}
  5269  		return nil
  5270  	})
  5271  }
  5272  
  5273  func TestJetStreamClusterMirrorSourceLoop(t *testing.T) {
  5274  	test := func(t *testing.T, s *Server, replicas int) {
  5275  		nc, js := jsClientConnect(t, s)
  5276  		defer nc.Close()
  5277  		// Create a source/mirror loop
  5278  		_, err := js.AddStream(&nats.StreamConfig{
  5279  			Name:     "1",
  5280  			Subjects: []string{"foo", "bar"},
  5281  			Replicas: replicas,
  5282  			Sources:  []*nats.StreamSource{{Name: "DECOY"}, {Name: "2"}},
  5283  		})
  5284  		require_NoError(t, err)
  5285  		_, err = js.AddStream(&nats.StreamConfig{
  5286  			Name:     "DECOY",
  5287  			Subjects: []string{"baz"},
  5288  			Replicas: replicas,
  5289  			Sources:  []*nats.StreamSource{{Name: "NOTTHERE"}},
  5290  		})
  5291  		require_NoError(t, err)
  5292  		_, err = js.AddStream(&nats.StreamConfig{
  5293  			Name:     "2",
  5294  			Replicas: replicas,
  5295  			Sources:  []*nats.StreamSource{{Name: "3"}},
  5296  		})
  5297  		require_NoError(t, err)
  5298  		_, err = js.AddStream(&nats.StreamConfig{
  5299  			Name:     "3",
  5300  			Replicas: replicas,
  5301  			Sources:  []*nats.StreamSource{{Name: "1"}},
  5302  		})
  5303  		require_Error(t, err)
  5304  		require_Equal(t, err.Error(), "nats: detected cycle")
  5305  	}
  5306  
  5307  	t.Run("Single", func(t *testing.T) {
  5308  		s := RunBasicJetStreamServer(t)
  5309  		defer s.Shutdown()
  5310  		test(t, s, 1)
  5311  	})
  5312  	t.Run("Clustered", func(t *testing.T) {
  5313  		c := createJetStreamClusterExplicit(t, "JSC", 5)
  5314  		defer c.shutdown()
  5315  		test(t, c.randomServer(), 2)
  5316  	})
  5317  }
  5318  
  5319  func TestJetStreamClusterMirrorDeDupWindow(t *testing.T) {
  5320  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  5321  	defer c.shutdown()
  5322  
  5323  	nc, js := jsClientConnect(t, c.randomServer())
  5324  	defer nc.Close()
  5325  
  5326  	si, err := js.AddStream(&nats.StreamConfig{
  5327  		Name:     "S",
  5328  		Subjects: []string{"foo"},
  5329  		Replicas: 3,
  5330  	})
  5331  	require_NoError(t, err)
  5332  	require_True(t, si.Cluster != nil)
  5333  	require_True(t, si.Config.Replicas == 3)
  5334  	require_True(t, len(si.Cluster.Replicas) == 2)
  5335  
  5336  	send := func(count int) {
  5337  		t.Helper()
  5338  		for i := 0; i < count; i++ {
  5339  			_, err := js.Publish("foo", []byte("msg"))
  5340  			require_NoError(t, err)
  5341  		}
  5342  	}
  5343  
  5344  	// Send 100 messages
  5345  	send(100)
  5346  
  5347  	// Now create a valid one.
  5348  	si, err = js.AddStream(&nats.StreamConfig{
  5349  		Name:     "M",
  5350  		Replicas: 3,
  5351  		Mirror:   &nats.StreamSource{Name: "S"},
  5352  	})
  5353  	require_NoError(t, err)
  5354  	require_True(t, si.Cluster != nil)
  5355  	require_True(t, si.Config.Replicas == 3)
  5356  	require_True(t, len(si.Cluster.Replicas) == 2)
  5357  
  5358  	check := func(expected int) {
  5359  		t.Helper()
  5360  		// Wait for all messages to be in mirror
  5361  		checkFor(t, 15*time.Second, 50*time.Millisecond, func() error {
  5362  			si, err := js.StreamInfo("M")
  5363  			if err != nil {
  5364  				return err
  5365  			}
  5366  			if n := si.State.Msgs; int(n) != expected {
  5367  				return fmt.Errorf("Expected %v msgs, got %v", expected, n)
  5368  			}
  5369  			return nil
  5370  		})
  5371  	}
  5372  	check(100)
  5373  
  5374  	// Restart cluster
  5375  	nc.Close()
  5376  	c.stopAll()
  5377  	c.restartAll()
  5378  	c.waitOnLeader()
  5379  	c.waitOnStreamLeader(globalAccountName, "S")
  5380  	c.waitOnStreamLeader(globalAccountName, "M")
  5381  
  5382  	nc, js = jsClientConnect(t, c.randomServer())
  5383  	defer nc.Close()
  5384  
  5385  	si, err = js.StreamInfo("M")
  5386  	require_NoError(t, err)
  5387  	require_True(t, si.Cluster != nil)
  5388  	require_True(t, si.Config.Replicas == 3)
  5389  	require_True(t, len(si.Cluster.Replicas) == 2)
  5390  
  5391  	// Send 100 messages
  5392  	send(100)
  5393  	check(200)
  5394  }
  5395  
  5396  func TestJetStreamClusterNewHealthz(t *testing.T) {
  5397  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  5398  	defer c.shutdown()
  5399  
  5400  	nc, js := jsClientConnect(t, c.randomServer())
  5401  	defer nc.Close()
  5402  
  5403  	_, err := js.AddStream(&nats.StreamConfig{
  5404  		Name:     "R1",
  5405  		Subjects: []string{"foo"},
  5406  		Replicas: 1,
  5407  	})
  5408  	require_NoError(t, err)
  5409  
  5410  	_, err = js.AddStream(&nats.StreamConfig{
  5411  		Name:     "R3",
  5412  		Subjects: []string{"bar"},
  5413  		Replicas: 3,
  5414  	})
  5415  	require_NoError(t, err)
  5416  
  5417  	// Create subscribers (durable and ephemeral for each)
  5418  	fsube, err := js.SubscribeSync("foo")
  5419  	require_NoError(t, err)
  5420  	fsubd, err := js.SubscribeSync("foo", nats.Durable("d"))
  5421  	require_NoError(t, err)
  5422  
  5423  	_, err = js.SubscribeSync("bar")
  5424  	require_NoError(t, err)
  5425  	bsubd, err := js.SubscribeSync("bar", nats.Durable("d"))
  5426  	require_NoError(t, err)
  5427  
  5428  	for i := 0; i < 20; i++ {
  5429  		_, err = js.Publish("foo", []byte("foo"))
  5430  		require_NoError(t, err)
  5431  	}
  5432  	checkSubsPending(t, fsube, 20)
  5433  	checkSubsPending(t, fsubd, 20)
  5434  
  5435  	// Select the server where we know the R1 stream is running.
  5436  	sl := c.streamLeader("$G", "R1")
  5437  	sl.Shutdown()
  5438  
  5439  	// Do same on R3 so that sl has to recover some things before healthz should be good.
  5440  	c.waitOnStreamLeader("$G", "R3")
  5441  
  5442  	for i := 0; i < 10; i++ {
  5443  		_, err = js.Publish("bar", []byte("bar"))
  5444  		require_NoError(t, err)
  5445  	}
  5446  	// Ephemeral is skipped, might have been on the downed server.
  5447  	checkSubsPending(t, bsubd, 10)
  5448  
  5449  	sl = c.restartServer(sl)
  5450  	c.waitOnServerHealthz(sl)
  5451  }
  5452  
  5453  func TestJetStreamClusterConsumerOverrides(t *testing.T) {
  5454  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  5455  	defer c.shutdown()
  5456  
  5457  	nc, js := jsClientConnect(t, c.randomServer())
  5458  	defer nc.Close()
  5459  
  5460  	_, err := js.AddStream(&nats.StreamConfig{
  5461  		Name:     "TEST",
  5462  		Subjects: []string{"foo"},
  5463  		Replicas: 3,
  5464  	})
  5465  	require_NoError(t, err)
  5466  
  5467  	// Test replica override.
  5468  	// Make sure we can not go "wider" than the parent stream.
  5469  	ccReq := CreateConsumerRequest{
  5470  		Stream: "TEST",
  5471  		Config: ConsumerConfig{
  5472  			Durable:   "d",
  5473  			AckPolicy: AckExplicit,
  5474  			Replicas:  5,
  5475  		},
  5476  	}
  5477  	req, err := json.Marshal(ccReq)
  5478  	require_NoError(t, err)
  5479  
  5480  	ci, err := nc.Request(fmt.Sprintf(JSApiDurableCreateT, "TEST", "d"), req, time.Second)
  5481  	require_NoError(t, err)
  5482  
  5483  	var resp JSApiConsumerCreateResponse
  5484  	err = json.Unmarshal(ci.Data, &resp)
  5485  	require_NoError(t, err)
  5486  
  5487  	if resp.Error == nil || !IsNatsErr(resp.Error, JSConsumerReplicasExceedsStream) {
  5488  		t.Fatalf("Expected an error when replicas > parent stream, got %+v", resp.Error)
  5489  	}
  5490  
  5491  	// Durables inherit the replica count from the stream, so make sure we can override that.
  5492  	ccReq.Config.Replicas = 1
  5493  	req, err = json.Marshal(ccReq)
  5494  	require_NoError(t, err)
  5495  
  5496  	ci, err = nc.Request(fmt.Sprintf(JSApiDurableCreateT, "TEST", "d"), req, time.Second)
  5497  	require_NoError(t, err)
  5498  
  5499  	resp.Error = nil
  5500  	err = json.Unmarshal(ci.Data, &resp)
  5501  	require_NoError(t, err)
  5502  	require_True(t, resp.Error == nil)
  5503  
  5504  	checkCount := func(durable string, expected int) {
  5505  		t.Helper()
  5506  		count := 0
  5507  		for _, s := range c.servers {
  5508  			if mset, err := s.GlobalAccount().lookupStream("TEST"); err == nil {
  5509  				if o := mset.lookupConsumer(durable); o != nil {
  5510  					count++
  5511  				}
  5512  			}
  5513  		}
  5514  		if count != expected {
  5515  			t.Fatalf("Expected %d consumers in cluster, got %d", expected, count)
  5516  		}
  5517  	}
  5518  	checkCount("d", 1)
  5519  
  5520  	// Now override storage and force storage to memory based.
  5521  	ccReq.Config.MemoryStorage = true
  5522  	ccReq.Config.Durable = "m"
  5523  	ccReq.Config.Replicas = 3
  5524  
  5525  	req, err = json.Marshal(ccReq)
  5526  	require_NoError(t, err)
  5527  
  5528  	ci, err = nc.Request(fmt.Sprintf(JSApiDurableCreateT, "TEST", "m"), req, time.Second)
  5529  	require_NoError(t, err)
  5530  
  5531  	resp.Error = nil
  5532  	err = json.Unmarshal(ci.Data, &resp)
  5533  	require_NoError(t, err)
  5534  	require_True(t, resp.Error == nil)
  5535  
  5536  	checkCount("m", 3)
  5537  
  5538  	// Make sure memory setting is for both consumer raft log and consumer store.
  5539  	s := c.consumerLeader("$G", "TEST", "m")
  5540  	require_True(t, s != nil)
  5541  	mset, err := s.GlobalAccount().lookupStream("TEST")
  5542  	require_NoError(t, err)
  5543  	o := mset.lookupConsumer("m")
  5544  	require_True(t, o != nil)
  5545  
  5546  	st := o.store.Type()
  5547  	n := o.raftNode()
  5548  	require_True(t, n != nil)
  5549  	rn := n.(*raft)
  5550  	rn.RLock()
  5551  	wal := rn.wal
  5552  	rn.RUnlock()
  5553  	require_True(t, wal.Type() == MemoryStorage)
  5554  	require_True(t, st == MemoryStorage)
  5555  
  5556  	// Now make sure we account properly for the consumers.
  5557  	// Add in normal here first.
  5558  	_, err = js.SubscribeSync("foo", nats.Durable("d22"))
  5559  	require_NoError(t, err)
  5560  
  5561  	si, err := js.StreamInfo("TEST")
  5562  	require_NoError(t, err)
  5563  	require_True(t, si.State.Consumers == 3)
  5564  
  5565  	err = js.DeleteConsumer("TEST", "d")
  5566  	require_NoError(t, err)
  5567  
  5568  	// Also make sure the stream leader direct store reports same with mixed and matched.
  5569  	s = c.streamLeader("$G", "TEST")
  5570  	require_True(t, s != nil)
  5571  	mset, err = s.GlobalAccount().lookupStream("TEST")
  5572  	require_NoError(t, err)
  5573  
  5574  	state := mset.Store().State()
  5575  	require_True(t, state.Consumers == 2)
  5576  
  5577  	// Fast state version as well.
  5578  	fstate := mset.stateWithDetail(false)
  5579  	require_True(t, fstate.Consumers == 2)
  5580  
  5581  	// Make sure delete accounting works too.
  5582  	err = js.DeleteConsumer("TEST", "m")
  5583  	require_NoError(t, err)
  5584  
  5585  	state = mset.Store().State()
  5586  	require_True(t, state.Consumers == 1)
  5587  
  5588  	// Fast state version as well.
  5589  	fstate = mset.stateWithDetail(false)
  5590  	require_True(t, fstate.Consumers == 1)
  5591  }
  5592  
  5593  func TestJetStreamClusterStreamRepublish(t *testing.T) {
  5594  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  5595  	defer c.shutdown()
  5596  
  5597  	nc, js := jsClientConnect(t, c.randomServer())
  5598  	defer nc.Close()
  5599  
  5600  	// Do by hand for now.
  5601  	cfg := &StreamConfig{
  5602  		Name:     "RP",
  5603  		Storage:  MemoryStorage,
  5604  		Subjects: []string{"foo", "bar", "baz"},
  5605  		Replicas: 3,
  5606  		RePublish: &RePublish{
  5607  			Source:      ">",
  5608  			Destination: "RP.>",
  5609  		},
  5610  	}
  5611  
  5612  	addStream(t, nc, cfg)
  5613  
  5614  	sub, err := nc.SubscribeSync("RP.>")
  5615  	require_NoError(t, err)
  5616  
  5617  	msg, toSend := []byte("OK TO REPUBLISH?"), 100
  5618  	for i := 0; i < toSend; i++ {
  5619  		_, err = js.PublishAsync("foo", msg)
  5620  		require_NoError(t, err)
  5621  		_, err = js.PublishAsync("bar", msg)
  5622  		require_NoError(t, err)
  5623  		_, err = js.PublishAsync("baz", msg)
  5624  		require_NoError(t, err)
  5625  	}
  5626  	select {
  5627  	case <-js.PublishAsyncComplete():
  5628  	case <-time.After(5 * time.Second):
  5629  		t.Fatalf("Did not receive completion signal")
  5630  	}
  5631  
  5632  	checkSubsPending(t, sub, toSend*3)
  5633  
  5634  	lseq := map[string]int{
  5635  		"foo": 0,
  5636  		"bar": 0,
  5637  		"baz": 0,
  5638  	}
  5639  
  5640  	for i := 1; i <= toSend; i++ {
  5641  		m, err := sub.NextMsg(time.Second)
  5642  		require_NoError(t, err)
  5643  		// Grab info from Header
  5644  		require_True(t, m.Header.Get(JSStream) == "RP")
  5645  		// Make sure sequence is correct.
  5646  		seq, err := strconv.Atoi(m.Header.Get(JSSequence))
  5647  		require_NoError(t, err)
  5648  		require_True(t, seq == i)
  5649  		// Make sure timestamp is correct
  5650  		ts, err := time.Parse(time.RFC3339Nano, m.Header.Get(JSTimeStamp))
  5651  		require_NoError(t, err)
  5652  		origMsg, err := js.GetMsg("RP", uint64(seq))
  5653  		require_NoError(t, err)
  5654  		require_True(t, ts == origMsg.Time)
  5655  		// Make sure last sequence matches last seq we received on this subject.
  5656  		last, err := strconv.Atoi(m.Header.Get(JSLastSequence))
  5657  		require_NoError(t, err)
  5658  		require_True(t, last == lseq[m.Subject])
  5659  		lseq[m.Subject] = seq
  5660  	}
  5661  }
  5662  
  5663  func TestJetStreamClusterConsumerDeliverNewNotConsumingBeforeStepDownOrRestart(t *testing.T) {
  5664  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  5665  	defer c.shutdown()
  5666  
  5667  	nc, js := jsClientConnect(t, c.randomServer())
  5668  	defer nc.Close()
  5669  
  5670  	_, err := js.AddStream(&nats.StreamConfig{
  5671  		Name:     "TEST",
  5672  		Subjects: []string{"foo"},
  5673  		Replicas: 3,
  5674  	})
  5675  	require_NoError(t, err)
  5676  
  5677  	inbox := nats.NewInbox()
  5678  	_, err = js.AddConsumer("TEST", &nats.ConsumerConfig{
  5679  		DeliverSubject: inbox,
  5680  		Durable:        "dur",
  5681  		AckPolicy:      nats.AckExplicitPolicy,
  5682  		DeliverPolicy:  nats.DeliverNewPolicy,
  5683  		FilterSubject:  "foo",
  5684  	})
  5685  	require_NoError(t, err)
  5686  
  5687  	c.waitOnConsumerLeader(globalAccountName, "TEST", "dur")
  5688  
  5689  	for i := 0; i < 10; i++ {
  5690  		sendStreamMsg(t, nc, "foo", "msg")
  5691  	}
  5692  
  5693  	checkCount := func(expected int) {
  5694  		t.Helper()
  5695  		checkFor(t, 2*time.Second, 15*time.Millisecond, func() error {
  5696  			ci, err := js.ConsumerInfo("TEST", "dur")
  5697  			if err != nil {
  5698  				return err
  5699  			}
  5700  			if n := int(ci.NumPending); n != expected {
  5701  				return fmt.Errorf("Expected %v pending, got %v", expected, n)
  5702  			}
  5703  			return nil
  5704  		})
  5705  	}
  5706  	checkCount(10)
  5707  
  5708  	resp, err := nc.Request(fmt.Sprintf(JSApiConsumerLeaderStepDownT, "TEST", "dur"), nil, time.Second)
  5709  	if err != nil {
  5710  		t.Fatalf("Unexpected error: %v", err)
  5711  	}
  5712  	var cdResp JSApiConsumerLeaderStepDownResponse
  5713  	if err := json.Unmarshal(resp.Data, &cdResp); err != nil {
  5714  		t.Fatalf("Unexpected error: %v", err)
  5715  	}
  5716  	if cdResp.Error != nil {
  5717  		t.Fatalf("Unexpected error: %+v", cdResp.Error)
  5718  	}
  5719  
  5720  	c.waitOnConsumerLeader(globalAccountName, "TEST", "dur")
  5721  	checkCount(10)
  5722  
  5723  	// Check also servers restart
  5724  	nc.Close()
  5725  	c.stopAll()
  5726  	c.restartAll()
  5727  
  5728  	c.waitOnConsumerLeader(globalAccountName, "TEST", "dur")
  5729  
  5730  	nc, js = jsClientConnect(t, c.randomServer())
  5731  	defer nc.Close()
  5732  
  5733  	checkCount(10)
  5734  
  5735  	// Make sure messages can be consumed
  5736  	sub := natsSubSync(t, nc, inbox)
  5737  	for i := 0; i < 10; i++ {
  5738  		msg, err := sub.NextMsg(time.Second)
  5739  		if err != nil {
  5740  			t.Fatalf("i=%v next msg error: %v", i, err)
  5741  		}
  5742  		msg.AckSync()
  5743  	}
  5744  	checkCount(0)
  5745  }
  5746  
  5747  func TestJetStreamClusterConsumerDeliverNewMaxRedeliveriesAndServerRestart(t *testing.T) {
  5748  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  5749  	defer c.shutdown()
  5750  
  5751  	nc, js := jsClientConnect(t, c.randomServer())
  5752  	defer nc.Close()
  5753  
  5754  	_, err := js.AddStream(&nats.StreamConfig{
  5755  		Name:     "TEST",
  5756  		Subjects: []string{"foo.*"},
  5757  		Replicas: 3,
  5758  	})
  5759  	require_NoError(t, err)
  5760  
  5761  	inbox := nats.NewInbox()
  5762  	_, err = js.AddConsumer("TEST", &nats.ConsumerConfig{
  5763  		DeliverSubject: inbox,
  5764  		Durable:        "dur",
  5765  		AckPolicy:      nats.AckExplicitPolicy,
  5766  		DeliverPolicy:  nats.DeliverNewPolicy,
  5767  		MaxDeliver:     3,
  5768  		AckWait:        250 * time.Millisecond,
  5769  		FilterSubject:  "foo.bar",
  5770  	})
  5771  	require_NoError(t, err)
  5772  
  5773  	c.waitOnConsumerLeader(globalAccountName, "TEST", "dur")
  5774  
  5775  	sendStreamMsg(t, nc, "foo.bar", "msg")
  5776  
  5777  	sub := natsSubSync(t, nc, inbox)
  5778  	for i := 0; i < 3; i++ {
  5779  		natsNexMsg(t, sub, time.Second)
  5780  	}
  5781  	// Now check that there is no more redeliveries
  5782  	if msg, err := sub.NextMsg(300 * time.Millisecond); err != nats.ErrTimeout {
  5783  		t.Fatalf("Expected timeout, got msg=%+v err=%v", msg, err)
  5784  	}
  5785  
  5786  	// Check server restart
  5787  	nc.Close()
  5788  	c.stopAll()
  5789  	c.restartAll()
  5790  
  5791  	c.waitOnConsumerLeader(globalAccountName, "TEST", "dur")
  5792  
  5793  	nc, _ = jsClientConnect(t, c.randomServer())
  5794  	defer nc.Close()
  5795  
  5796  	sub = natsSubSync(t, nc, inbox)
  5797  	// We should not have messages being redelivered.
  5798  	if msg, err := sub.NextMsg(300 * time.Millisecond); err != nats.ErrTimeout {
  5799  		t.Fatalf("Expected timeout, got msg=%+v err=%v", msg, err)
  5800  	}
  5801  }
  5802  
  5803  func TestJetStreamClusterNoRestartAdvisories(t *testing.T) {
  5804  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  5805  	defer c.shutdown()
  5806  
  5807  	nc, js := jsClientConnect(t, c.randomServer())
  5808  	defer nc.Close()
  5809  
  5810  	_, err := js.AddStream(&nats.StreamConfig{
  5811  		Name:     "TEST",
  5812  		Subjects: []string{"foo"},
  5813  		Replicas: 3,
  5814  	})
  5815  	require_NoError(t, err)
  5816  
  5817  	_, err = js.AddStream(&nats.StreamConfig{
  5818  		Name:     "TEST2",
  5819  		Subjects: []string{"bar"},
  5820  		Replicas: 1,
  5821  	})
  5822  	require_NoError(t, err)
  5823  
  5824  	// Create 10 consumers
  5825  	for i := 0; i < 10; i++ {
  5826  		dur := fmt.Sprintf("dlc-%d", i)
  5827  		_, err = js.AddConsumer("TEST", &nats.ConsumerConfig{Durable: dur, AckPolicy: nats.AckExplicitPolicy})
  5828  		require_NoError(t, err)
  5829  	}
  5830  
  5831  	msg := bytes.Repeat([]byte("Z"), 1024)
  5832  	for i := 0; i < 1000; i++ {
  5833  		js.PublishAsync("foo", msg)
  5834  		js.PublishAsync("bar", msg)
  5835  	}
  5836  	select {
  5837  	case <-js.PublishAsyncComplete():
  5838  	case <-time.After(5 * time.Second):
  5839  		t.Fatalf("Did not receive completion signal")
  5840  	}
  5841  
  5842  	// Add state to a consumer.
  5843  	sub, err := js.PullSubscribe("foo", "dlc-2")
  5844  	require_NoError(t, err)
  5845  	for _, m := range fetchMsgs(t, sub, 5, time.Second) {
  5846  		m.AckSync()
  5847  	}
  5848  	nc.Close()
  5849  
  5850  	// Required to show the bug.
  5851  	c.leader().JetStreamSnapshotMeta()
  5852  
  5853  	nc, _ = jsClientConnect(t, c.consumerLeader("$G", "TEST", "dlc-2"))
  5854  	defer nc.Close()
  5855  
  5856  	sub, err = nc.SubscribeSync("$JS.EVENT.ADVISORY.API")
  5857  	require_NoError(t, err)
  5858  
  5859  	// Shutdown and Restart.
  5860  	s := c.randomNonConsumerLeader("$G", "TEST", "dlc-2")
  5861  	s.Shutdown()
  5862  	s = c.restartServer(s)
  5863  	c.waitOnServerHealthz(s)
  5864  
  5865  	checkSubsPending(t, sub, 0)
  5866  
  5867  	nc, _ = jsClientConnect(t, c.randomNonStreamLeader("$G", "TEST"))
  5868  	defer nc.Close()
  5869  
  5870  	sub, err = nc.SubscribeSync("$JS.EVENT.ADVISORY.STREAM.UPDATED.>")
  5871  	require_NoError(t, err)
  5872  
  5873  	s = c.streamLeader("$G", "TEST2")
  5874  	s.Shutdown()
  5875  	s = c.restartServer(s)
  5876  	c.waitOnServerHealthz(s)
  5877  
  5878  	checkSubsPending(t, sub, 0)
  5879  }
  5880  
  5881  func TestJetStreamClusterR1StreamPlacementNoReservation(t *testing.T) {
  5882  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  5883  	defer c.shutdown()
  5884  
  5885  	nc, js := jsClientConnect(t, c.randomServer())
  5886  	defer nc.Close()
  5887  
  5888  	sp := make(map[string]int)
  5889  	for i := 0; i < 100; i++ {
  5890  		sname := fmt.Sprintf("T-%d", i)
  5891  		_, err := js.AddStream(&nats.StreamConfig{
  5892  			Name: sname,
  5893  		})
  5894  		require_NoError(t, err)
  5895  		sp[c.streamLeader("$G", sname).Name()]++
  5896  	}
  5897  
  5898  	for serverName, num := range sp {
  5899  		if num > 60 {
  5900  			t.Fatalf("Streams not distributed, expected ~30-35 but got %d for server %q", num, serverName)
  5901  		}
  5902  	}
  5903  }
  5904  
  5905  func TestJetStreamClusterConsumerAndStreamNamesWithPathSeparators(t *testing.T) {
  5906  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  5907  	defer c.shutdown()
  5908  
  5909  	nc, js := jsClientConnect(t, c.randomServer())
  5910  	defer nc.Close()
  5911  
  5912  	_, err := js.AddStream(&nats.StreamConfig{Name: "usr/bin"})
  5913  	require_Error(t, err, NewJSStreamNameContainsPathSeparatorsError(), nats.ErrInvalidStreamName)
  5914  	_, err = js.AddStream(&nats.StreamConfig{Name: `Documents\readme.txt`})
  5915  	require_Error(t, err, NewJSStreamNameContainsPathSeparatorsError(), nats.ErrInvalidStreamName)
  5916  
  5917  	// Now consumers.
  5918  	_, err = js.AddStream(&nats.StreamConfig{Name: "T"})
  5919  	require_NoError(t, err)
  5920  
  5921  	_, err = js.AddConsumer("T", &nats.ConsumerConfig{Durable: "a/b", AckPolicy: nats.AckExplicitPolicy})
  5922  	require_Error(t, err, NewJSConsumerNameContainsPathSeparatorsError(), nats.ErrInvalidConsumerName)
  5923  
  5924  	_, err = js.AddConsumer("T", &nats.ConsumerConfig{Durable: `a\b`, AckPolicy: nats.AckExplicitPolicy})
  5925  	require_Error(t, err, NewJSConsumerNameContainsPathSeparatorsError(), nats.ErrInvalidConsumerName)
  5926  }
  5927  
  5928  func TestJetStreamClusterFilteredMirrors(t *testing.T) {
  5929  	c := createJetStreamClusterExplicit(t, "MSR", 3)
  5930  	defer c.shutdown()
  5931  
  5932  	// Client for API requests.
  5933  	nc, js := jsClientConnect(t, c.randomServer())
  5934  	defer nc.Close()
  5935  
  5936  	// Origin
  5937  	_, err := js.AddStream(&nats.StreamConfig{
  5938  		Name:     "TEST",
  5939  		Subjects: []string{"foo", "bar", "baz"},
  5940  	})
  5941  	require_NoError(t, err)
  5942  
  5943  	msg := bytes.Repeat([]byte("Z"), 3)
  5944  	for i := 0; i < 100; i++ {
  5945  		js.PublishAsync("foo", msg)
  5946  		js.PublishAsync("bar", msg)
  5947  		js.PublishAsync("baz", msg)
  5948  	}
  5949  	select {
  5950  	case <-js.PublishAsyncComplete():
  5951  	case <-time.After(5 * time.Second):
  5952  		t.Fatalf("Did not receive completion signal")
  5953  	}
  5954  
  5955  	// Create Mirror now.
  5956  	_, err = js.AddStream(&nats.StreamConfig{
  5957  		Name:   "M",
  5958  		Mirror: &nats.StreamSource{Name: "TEST", FilterSubject: "foo"},
  5959  	})
  5960  	require_NoError(t, err)
  5961  
  5962  	checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
  5963  		si, err := js.StreamInfo("M")
  5964  		require_NoError(t, err)
  5965  		if si.State.Msgs != 100 {
  5966  			return fmt.Errorf("Expected 100 msgs, got state: %+v", si.State)
  5967  		}
  5968  		return nil
  5969  	})
  5970  
  5971  	sub, err := js.PullSubscribe("foo", "d", nats.BindStream("M"))
  5972  	require_NoError(t, err)
  5973  
  5974  	// Make sure we only have "foo" and that sequence numbers preserved.
  5975  	sseq, dseq := uint64(1), uint64(1)
  5976  	for _, m := range fetchMsgs(t, sub, 100, 5*time.Second) {
  5977  		require_True(t, m.Subject == "foo")
  5978  		meta, err := m.Metadata()
  5979  		require_NoError(t, err)
  5980  		require_True(t, meta.Sequence.Consumer == dseq)
  5981  		dseq++
  5982  		require_True(t, meta.Sequence.Stream == sseq)
  5983  		sseq += 3
  5984  	}
  5985  }
  5986  
  5987  // Test for making sure we error on same cluster name.
  5988  func TestJetStreamClusterSameClusterLeafNodes(t *testing.T) {
  5989  	c := createJetStreamCluster(t, jsClusterAccountsTempl, "SAME", _EMPTY_, 3, 11233, true)
  5990  	defer c.shutdown()
  5991  
  5992  	// Do by hand since by default we check for connections.
  5993  	tmpl := c.createLeafSolicit(jsClusterTemplWithLeafNode)
  5994  	lc := createJetStreamCluster(t, tmpl, "SAME", "S-", 2, 22111, false)
  5995  	defer lc.shutdown()
  5996  
  5997  	time.Sleep(200 * time.Millisecond)
  5998  
  5999  	// Make sure no leafnodes are connected.
  6000  	for _, s := range lc.servers {
  6001  		checkLeafNodeConnectedCount(t, s, 0)
  6002  	}
  6003  }
  6004  
  6005  // https://github.com/nats-io/nats-server/issues/3178
  6006  func TestJetStreamClusterLeafNodeSPOFMigrateLeaders(t *testing.T) {
  6007  	tmpl := strings.Replace(jsClusterTempl, "store_dir:", "domain: REMOTE, store_dir:", 1)
  6008  	c := createJetStreamClusterWithTemplate(t, tmpl, "HUB", 2)
  6009  	defer c.shutdown()
  6010  
  6011  	tmpl = strings.Replace(jsClusterTemplWithLeafNode, "store_dir:", "domain: CORE, store_dir:", 1)
  6012  	lnc := c.createLeafNodesWithTemplateAndStartPort(tmpl, "LNC", 2, 22110)
  6013  	defer lnc.shutdown()
  6014  
  6015  	lnc.waitOnClusterReady()
  6016  
  6017  	// Place JS assets in LN, and we will do a pull consumer from the HUB.
  6018  	nc, js := jsClientConnect(t, lnc.randomServer())
  6019  	defer nc.Close()
  6020  
  6021  	si, err := js.AddStream(&nats.StreamConfig{
  6022  		Name:     "TEST",
  6023  		Subjects: []string{"foo"},
  6024  		Replicas: 2,
  6025  	})
  6026  	require_NoError(t, err)
  6027  	require_True(t, si.Cluster.Name == "LNC")
  6028  
  6029  	for i := 0; i < 100; i++ {
  6030  		js.PublishAsync("foo", []byte("HELLO"))
  6031  	}
  6032  	select {
  6033  	case <-js.PublishAsyncComplete():
  6034  	case <-time.After(5 * time.Second):
  6035  		t.Fatalf("Did not receive completion signal")
  6036  	}
  6037  
  6038  	// Create the consumer.
  6039  	_, err = js.AddConsumer("TEST", &nats.ConsumerConfig{Durable: "d", AckPolicy: nats.AckExplicitPolicy})
  6040  	require_NoError(t, err)
  6041  
  6042  	nc, _ = jsClientConnect(t, c.randomServer())
  6043  	defer nc.Close()
  6044  
  6045  	dsubj := "$JS.CORE.API.CONSUMER.MSG.NEXT.TEST.d"
  6046  	// Grab directly using domain based subject but from the HUB cluster.
  6047  	_, err = nc.Request(dsubj, nil, time.Second)
  6048  	require_NoError(t, err)
  6049  
  6050  	// Now we will force the consumer leader's server to drop and stall leafnode connections.
  6051  	cl := lnc.consumerLeader("$G", "TEST", "d")
  6052  	cl.setJetStreamMigrateOnRemoteLeaf()
  6053  	cl.closeAndDisableLeafnodes()
  6054  
  6055  	// Now make sure we can eventually get a message again.
  6056  	checkFor(t, 5*time.Second, 500*time.Millisecond, func() error {
  6057  		_, err = nc.Request(dsubj, nil, 500*time.Millisecond)
  6058  		return err
  6059  	})
  6060  
  6061  	nc, _ = jsClientConnect(t, lnc.randomServer())
  6062  	defer nc.Close()
  6063  
  6064  	// Now make sure the consumer, or any other asset, can not become a leader on this node while the leafnode
  6065  	// is disconnected.
  6066  	csd := fmt.Sprintf(JSApiConsumerLeaderStepDownT, "TEST", "d")
  6067  	for i := 0; i < 10; i++ {
  6068  		nc.Request(csd, nil, time.Second)
  6069  		lnc.waitOnConsumerLeader(globalAccountName, "TEST", "d")
  6070  		if lnc.consumerLeader(globalAccountName, "TEST", "d") == cl {
  6071  			t.Fatalf("Consumer leader should not migrate to server without a leafnode connection")
  6072  		}
  6073  	}
  6074  
  6075  	// Now make sure once leafnode is back we can have leaders on this server.
  6076  	cl.reEnableLeafnodes()
  6077  	checkLeafNodeConnectedCount(t, cl, 2)
  6078  
  6079  	// Make sure we can migrate back to this server now that we are connected.
  6080  	checkFor(t, 5*time.Second, 100*time.Millisecond, func() error {
  6081  		nc.Request(csd, nil, time.Second)
  6082  		lnc.waitOnConsumerLeader(globalAccountName, "TEST", "d")
  6083  		if lnc.consumerLeader(globalAccountName, "TEST", "d") == cl {
  6084  			return nil
  6085  		}
  6086  		return fmt.Errorf("Not this server yet")
  6087  	})
  6088  }
  6089  
  6090  func TestJetStreamClusterStreamCatchupWithTruncateAndPriorSnapshot(t *testing.T) {
  6091  	c := createJetStreamClusterExplicit(t, "R3F", 3)
  6092  	defer c.shutdown()
  6093  
  6094  	// Client based API
  6095  	s := c.randomServer()
  6096  	nc, js := jsClientConnect(t, s)
  6097  	defer nc.Close()
  6098  
  6099  	_, err := js.AddStream(&nats.StreamConfig{
  6100  		Name:     "TEST",
  6101  		Subjects: []string{"foo"},
  6102  		Replicas: 3,
  6103  	})
  6104  	require_NoError(t, err)
  6105  
  6106  	// Shutdown a replica
  6107  	rs := c.randomNonStreamLeader("$G", "TEST")
  6108  	rs.Shutdown()
  6109  	if s == rs {
  6110  		nc.Close()
  6111  		s = c.randomServer()
  6112  		nc, js = jsClientConnect(t, s)
  6113  		defer nc.Close()
  6114  	}
  6115  
  6116  	msg, toSend := []byte("OK"), 100
  6117  	for i := 0; i < toSend; i++ {
  6118  		_, err := js.PublishAsync("foo", msg)
  6119  		require_NoError(t, err)
  6120  	}
  6121  	select {
  6122  	case <-js.PublishAsyncComplete():
  6123  	case <-time.After(2 * time.Second):
  6124  		t.Fatalf("Did not receive completion signal")
  6125  	}
  6126  
  6127  	sl := c.streamLeader("$G", "TEST")
  6128  	mset, err := sl.GlobalAccount().lookupStream("TEST")
  6129  	require_NoError(t, err)
  6130  
  6131  	// Force snapshot
  6132  	require_NoError(t, mset.raftNode().InstallSnapshot(mset.stateSnapshot()))
  6133  
  6134  	// Now truncate the store on purpose.
  6135  	err = mset.store.Truncate(50)
  6136  	require_NoError(t, err)
  6137  
  6138  	// Restart Server.
  6139  	rs = c.restartServer(rs)
  6140  
  6141  	// Make sure we can become current.
  6142  	// With bug we would fail here.
  6143  	c.waitOnStreamCurrent(rs, "$G", "TEST")
  6144  }
  6145  
  6146  func TestJetStreamClusterNoOrphanedDueToNoConnection(t *testing.T) {
  6147  	orgEventsHBInterval := eventsHBInterval
  6148  	eventsHBInterval = 500 * time.Millisecond
  6149  	defer func() { eventsHBInterval = orgEventsHBInterval }()
  6150  
  6151  	c := createJetStreamClusterExplicit(t, "R3F", 3)
  6152  	defer c.shutdown()
  6153  
  6154  	s := c.randomServer()
  6155  	nc, js := jsClientConnect(t, s)
  6156  	defer nc.Close()
  6157  
  6158  	_, err := js.AddStream(&nats.StreamConfig{
  6159  		Name:     "TEST",
  6160  		Subjects: []string{"foo"},
  6161  		Replicas: 3,
  6162  	})
  6163  	require_NoError(t, err)
  6164  
  6165  	checkSysServers := func() {
  6166  		t.Helper()
  6167  		checkFor(t, 2*time.Second, 15*time.Millisecond, func() error {
  6168  			for _, s := range c.servers {
  6169  				s.mu.RLock()
  6170  				num := len(s.sys.servers)
  6171  				s.mu.RUnlock()
  6172  				if num != 2 {
  6173  					return fmt.Errorf("Expected server %q to have 2 servers, got %v", s, num)
  6174  				}
  6175  			}
  6176  			return nil
  6177  		})
  6178  	}
  6179  
  6180  	checkSysServers()
  6181  	nc.Close()
  6182  
  6183  	s.mu.RLock()
  6184  	val := (s.sys.orphMax / eventsHBInterval) + 2
  6185  	s.mu.RUnlock()
  6186  	time.Sleep(val * eventsHBInterval)
  6187  	checkSysServers()
  6188  }
  6189  
  6190  func TestJetStreamClusterStreamResetOnExpirationDuringPeerDownAndRestartWithLeaderChange(t *testing.T) {
  6191  	c := createJetStreamClusterExplicit(t, "R3F", 3)
  6192  	defer c.shutdown()
  6193  
  6194  	s := c.randomServer()
  6195  	nc, js := jsClientConnect(t, s)
  6196  	defer nc.Close()
  6197  
  6198  	_, err := js.AddStream(&nats.StreamConfig{
  6199  		Name:     "TEST",
  6200  		Subjects: []string{"foo"},
  6201  		Replicas: 3,
  6202  		MaxAge:   time.Second,
  6203  	})
  6204  	require_NoError(t, err)
  6205  
  6206  	n := 100
  6207  	for i := 0; i < n; i++ {
  6208  		js.PublishAsync("foo", []byte("NORESETPLS"))
  6209  	}
  6210  	select {
  6211  	case <-js.PublishAsyncComplete():
  6212  	case <-time.After(5 * time.Second):
  6213  		t.Fatalf("Did not receive completion signal")
  6214  	}
  6215  
  6216  	// Shutdown a non-leader before expiration.
  6217  	nsl := c.randomNonStreamLeader("$G", "TEST")
  6218  	nsl.Shutdown()
  6219  
  6220  	// Wait for all messages to expire.
  6221  	checkFor(t, 5*time.Second, time.Second, func() error {
  6222  		si, err := js.StreamInfo("TEST")
  6223  		require_NoError(t, err)
  6224  		if si.State.Msgs == 0 {
  6225  			return nil
  6226  		}
  6227  		return fmt.Errorf("Wanted 0 messages, got %d", si.State.Msgs)
  6228  	})
  6229  
  6230  	// Now restart the non-leader server, twice. First time clears raft,
  6231  	// second will not have any index state or raft to tell it what is first sequence.
  6232  	nsl = c.restartServer(nsl)
  6233  	c.checkClusterFormed()
  6234  	c.waitOnServerCurrent(nsl)
  6235  
  6236  	// Now clear raft WAL.
  6237  	mset, err := nsl.GlobalAccount().lookupStream("TEST")
  6238  	require_NoError(t, err)
  6239  	require_NoError(t, mset.raftNode().InstallSnapshot(mset.stateSnapshot()))
  6240  
  6241  	nsl.Shutdown()
  6242  	nsl = c.restartServer(nsl)
  6243  	c.checkClusterFormed()
  6244  	c.waitOnServerCurrent(nsl)
  6245  
  6246  	// We will now check this server directly.
  6247  	mset, err = nsl.GlobalAccount().lookupStream("TEST")
  6248  	require_NoError(t, err)
  6249  
  6250  	if state := mset.state(); state.FirstSeq != uint64(n+1) || state.LastSeq != uint64(n) {
  6251  		t.Fatalf("Expected first sequence of %d, got %d", n+1, state.FirstSeq)
  6252  	}
  6253  
  6254  	si, err := js.StreamInfo("TEST")
  6255  	require_NoError(t, err)
  6256  	if state := si.State; state.FirstSeq != uint64(n+1) || state.LastSeq != uint64(n) {
  6257  		t.Fatalf("Expected first sequence of %d, got %d", n+1, state.FirstSeq)
  6258  	}
  6259  
  6260  	// Now move the leader there and double check, but above test is sufficient.
  6261  	checkFor(t, 10*time.Second, 250*time.Millisecond, func() error {
  6262  		_, err = nc.Request(fmt.Sprintf(JSApiStreamLeaderStepDownT, "TEST"), nil, time.Second)
  6263  		require_NoError(t, err)
  6264  		c.waitOnStreamLeader("$G", "TEST")
  6265  		if c.streamLeader("$G", "TEST") == nsl {
  6266  			return nil
  6267  		}
  6268  		return fmt.Errorf("No correct leader yet")
  6269  	})
  6270  
  6271  	if state := mset.state(); state.FirstSeq != uint64(n+1) || state.LastSeq != uint64(n) {
  6272  		t.Fatalf("Expected first sequence of %d, got %d", n+1, state.FirstSeq)
  6273  	}
  6274  
  6275  	si, err = js.StreamInfo("TEST")
  6276  	require_NoError(t, err)
  6277  	if state := si.State; state.FirstSeq != uint64(n+1) || state.LastSeq != uint64(n) {
  6278  		t.Fatalf("Expected first sequence of %d, got %d", n+1, state.FirstSeq)
  6279  	}
  6280  }
  6281  
  6282  func TestJetStreamClusterPullConsumerMaxWaiting(t *testing.T) {
  6283  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  6284  	defer c.shutdown()
  6285  
  6286  	nc, js := jsClientConnect(t, c.randomServer())
  6287  	defer nc.Close()
  6288  
  6289  	_, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"test.*"}})
  6290  	require_NoError(t, err)
  6291  
  6292  	_, err = js.AddConsumer("TEST", &nats.ConsumerConfig{
  6293  		Durable:    "dur",
  6294  		AckPolicy:  nats.AckExplicitPolicy,
  6295  		MaxWaiting: 10,
  6296  	})
  6297  	require_NoError(t, err)
  6298  
  6299  	// Cannot be updated.
  6300  	_, err = js.UpdateConsumer("TEST", &nats.ConsumerConfig{
  6301  		Durable:    "dur",
  6302  		AckPolicy:  nats.AckExplicitPolicy,
  6303  		MaxWaiting: 1,
  6304  	})
  6305  	if !strings.Contains(err.Error(), "can not be updated") {
  6306  		t.Fatalf(`expected "cannot be updated" error, got %s`, err)
  6307  	}
  6308  }
  6309  
  6310  func TestJetStreamClusterEncryptedDoubleSnapshotBug(t *testing.T) {
  6311  	c := createJetStreamClusterWithTemplate(t, jsClusterEncryptedTempl, "JSC", 3)
  6312  	defer c.shutdown()
  6313  
  6314  	nc, js := jsClientConnect(t, c.randomServer())
  6315  	defer nc.Close()
  6316  
  6317  	_, err := js.AddStream(&nats.StreamConfig{
  6318  		Name:     "TEST",
  6319  		Subjects: []string{"foo"},
  6320  		MaxAge:   time.Second,
  6321  		Replicas: 3,
  6322  	})
  6323  	require_NoError(t, err)
  6324  
  6325  	numMsgs := 50
  6326  	for i := 0; i < numMsgs; i++ {
  6327  		js.PublishAsync("foo", []byte("SNAP"))
  6328  	}
  6329  	select {
  6330  	case <-js.PublishAsyncComplete():
  6331  	case <-time.After(5 * time.Second):
  6332  		t.Fatalf("Did not receive completion signal")
  6333  	}
  6334  
  6335  	// Perform a snapshot on a follower.
  6336  	nl := c.randomNonStreamLeader("$G", "TEST")
  6337  	mset, err := nl.GlobalAccount().lookupStream("TEST")
  6338  	require_NoError(t, err)
  6339  	err = mset.raftNode().InstallSnapshot(mset.stateSnapshot())
  6340  	require_NoError(t, err)
  6341  
  6342  	_, err = js.Publish("foo", []byte("SNAP2"))
  6343  	require_NoError(t, err)
  6344  
  6345  	for _, seq := range []uint64{1, 11, 22, 51} {
  6346  		js.DeleteMsg("TEST", seq)
  6347  	}
  6348  
  6349  	err = mset.raftNode().InstallSnapshot(mset.stateSnapshot())
  6350  	require_NoError(t, err)
  6351  
  6352  	_, err = js.Publish("foo", []byte("SNAP3"))
  6353  	require_NoError(t, err)
  6354  }
  6355  
  6356  func TestJetStreamClusterRePublishUpdateSupported(t *testing.T) {
  6357  	test := func(t *testing.T, s *Server, stream string, replicas int) {
  6358  		nc, js := jsClientConnect(t, s)
  6359  		defer nc.Close()
  6360  
  6361  		cfg := &nats.StreamConfig{
  6362  			Name:     stream,
  6363  			Storage:  nats.MemoryStorage,
  6364  			Replicas: replicas,
  6365  			Subjects: []string{"foo.>"},
  6366  		}
  6367  		_, err := js.AddStream(cfg)
  6368  		require_NoError(t, err)
  6369  
  6370  		expectUpdate := func() {
  6371  			t.Helper()
  6372  
  6373  			req, err := json.Marshal(cfg)
  6374  			require_NoError(t, err)
  6375  			rmsg, err := nc.Request(fmt.Sprintf(JSApiStreamUpdateT, cfg.Name), req, time.Second)
  6376  			require_NoError(t, err)
  6377  			var resp JSApiStreamCreateResponse
  6378  			err = json.Unmarshal(rmsg.Data, &resp)
  6379  			require_NoError(t, err)
  6380  			if resp.Type != JSApiStreamUpdateResponseType {
  6381  				t.Fatalf("Invalid response type %s expected %s", resp.Type, JSApiStreamUpdateResponseType)
  6382  			}
  6383  			if IsNatsErr(resp.Error, JSStreamInvalidConfigF) {
  6384  				t.Fatalf("Expected no error regarding config error, got %+v", resp.Error)
  6385  			}
  6386  		}
  6387  
  6388  		expectRepublished := func(expectedRepub bool) {
  6389  			t.Helper()
  6390  
  6391  			nc, js := jsClientConnect(t, s)
  6392  			defer nc.Close()
  6393  
  6394  			// Create a subscriber for foo.> so that we can see
  6395  			// our published message being echoed back to us.
  6396  			sf, err := nc.SubscribeSync("foo.>")
  6397  			require_NoError(t, err)
  6398  			defer sf.Unsubscribe()
  6399  
  6400  			// Create a subscriber for bar.> so that we can see
  6401  			// any potentially republished messages.
  6402  			sb, err := nc.SubscribeSync("bar.>")
  6403  			require_NoError(t, err)
  6404  			defer sf.Unsubscribe()
  6405  
  6406  			// Publish a message, it will hit the foo.> stream and
  6407  			// may potentially be republished to the bar.> stream.
  6408  			_, err = js.Publish("foo."+stream, []byte("HELLO!"))
  6409  			require_NoError(t, err)
  6410  
  6411  			// Wait for a little while so that we have enough time
  6412  			// to determine whether it's going to arrive on one or
  6413  			// both streams.
  6414  			checkSubsPending(t, sf, 1)
  6415  			if expectedRepub {
  6416  				checkSubsPending(t, sb, 1)
  6417  			} else {
  6418  				checkSubsPending(t, sb, 0)
  6419  			}
  6420  		}
  6421  
  6422  		// At this point there's no republish config, so we should
  6423  		// only receive our published message on foo.>.
  6424  		expectRepublished(false)
  6425  
  6426  		// Add a republish config so that everything on foo.> also
  6427  		// gets republished to bar.>.
  6428  		cfg.RePublish = &nats.RePublish{
  6429  			Source:      "foo.>",
  6430  			Destination: "bar.>",
  6431  		}
  6432  		expectUpdate()
  6433  		expectRepublished(true)
  6434  
  6435  		// Now take the republish config away again, so we should go
  6436  		// back to only getting them on foo.>.
  6437  		cfg.RePublish = nil
  6438  		expectUpdate()
  6439  		expectRepublished(false)
  6440  	}
  6441  
  6442  	t.Run("Single", func(t *testing.T) {
  6443  		s := RunBasicJetStreamServer(t)
  6444  		defer s.Shutdown()
  6445  
  6446  		test(t, s, "single", 1)
  6447  	})
  6448  	t.Run("Clustered", func(t *testing.T) {
  6449  		c := createJetStreamClusterExplicit(t, "JSC", 3)
  6450  		defer c.shutdown()
  6451  
  6452  		test(t, c.randomNonLeader(), "clustered", 3)
  6453  	})
  6454  }
  6455  
  6456  func TestJetStreamClusterDirectGetFromLeafnode(t *testing.T) {
  6457  	tmpl := strings.Replace(jsClusterAccountsTempl, "store_dir:", "domain: CORE, store_dir:", 1)
  6458  	c := createJetStreamCluster(t, tmpl, "CORE", _EMPTY_, 3, 19022, true)
  6459  	defer c.shutdown()
  6460  
  6461  	tmpl = strings.Replace(jsClusterTemplWithSingleLeafNode, "store_dir:", "domain: SPOKE, store_dir:", 1)
  6462  	ln := c.createLeafNodeWithTemplate("LN-SPOKE", tmpl)
  6463  	defer ln.Shutdown()
  6464  
  6465  	checkLeafNodeConnectedCount(t, ln, 2)
  6466  
  6467  	nc, js := jsClientConnect(t, c.randomServer())
  6468  	defer nc.Close()
  6469  
  6470  	kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "KV"})
  6471  	require_NoError(t, err)
  6472  
  6473  	_, err = kv.PutString("age", "22")
  6474  	require_NoError(t, err)
  6475  
  6476  	// Now connect to the ln and make sure we can do a domain direct get.
  6477  	nc, _ = jsClientConnect(t, ln)
  6478  	defer nc.Close()
  6479  
  6480  	js, err = nc.JetStream(nats.Domain("CORE"))
  6481  	require_NoError(t, err)
  6482  
  6483  	kv, err = js.KeyValue("KV")
  6484  	require_NoError(t, err)
  6485  
  6486  	entry, err := kv.Get("age")
  6487  	require_NoError(t, err)
  6488  	require_True(t, string(entry.Value()) == "22")
  6489  }
  6490  
  6491  func TestJetStreamClusterUnknownReplicaOnClusterRestart(t *testing.T) {
  6492  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  6493  	defer c.shutdown()
  6494  
  6495  	nc, js := jsClientConnect(t, c.randomServer())
  6496  	defer nc.Close()
  6497  
  6498  	_, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"foo"}, Replicas: 3})
  6499  	require_NoError(t, err)
  6500  
  6501  	c.waitOnStreamLeader(globalAccountName, "TEST")
  6502  	lname := c.streamLeader(globalAccountName, "TEST").Name()
  6503  	sendStreamMsg(t, nc, "foo", "msg1")
  6504  
  6505  	nc.Close()
  6506  	c.stopAll()
  6507  	// Restart the leader...
  6508  	for _, s := range c.servers {
  6509  		if s.Name() == lname {
  6510  			c.restartServer(s)
  6511  		}
  6512  	}
  6513  	// And one of the other servers
  6514  	for _, s := range c.servers {
  6515  		if s.Name() != lname {
  6516  			c.restartServer(s)
  6517  			break
  6518  		}
  6519  	}
  6520  	c.waitOnStreamLeader(globalAccountName, "TEST")
  6521  
  6522  	nc, js = jsClientConnect(t, c.randomServer())
  6523  	defer nc.Close()
  6524  	sendStreamMsg(t, nc, "foo", "msg2")
  6525  
  6526  	si, err := js.StreamInfo("TEST")
  6527  	require_NoError(t, err)
  6528  	if len(si.Cluster.Replicas) != 2 {
  6529  		t.Fatalf("Leader is %s - expected 2 peers, got %+v", si.Cluster.Leader, si.Cluster.Replicas[0])
  6530  	}
  6531  	// However, since the leader does not know the name of the server
  6532  	// we should report an "unknown" name.
  6533  	var ok bool
  6534  	for _, r := range si.Cluster.Replicas {
  6535  		if strings.Contains(r.Name, "unknown") {
  6536  			// Check that it has no lag reported, and the it is not current.
  6537  			if r.Current {
  6538  				t.Fatal("Expected non started node to be marked as not current")
  6539  			}
  6540  			if r.Lag != 0 {
  6541  				t.Fatalf("Expected lag to not be set, was %v", r.Lag)
  6542  			}
  6543  			if r.Active != 0 {
  6544  				t.Fatalf("Expected active to not be set, was: %v", r.Active)
  6545  			}
  6546  			ok = true
  6547  			break
  6548  		}
  6549  	}
  6550  	if !ok {
  6551  		t.Fatalf("Should have had an unknown server name, did not: %+v - %+v", si.Cluster.Replicas[0], si.Cluster.Replicas[1])
  6552  	}
  6553  }
  6554  
  6555  func TestJetStreamClusterSnapshotBeforePurgeAndCatchup(t *testing.T) {
  6556  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  6557  	defer c.shutdown()
  6558  
  6559  	nc, js := jsClientConnect(t, c.randomServer())
  6560  	defer nc.Close()
  6561  
  6562  	_, err := js.AddStream(&nats.StreamConfig{
  6563  		Name:     "TEST",
  6564  		Subjects: []string{"foo"},
  6565  		MaxAge:   5 * time.Second,
  6566  		Replicas: 3,
  6567  	})
  6568  	require_NoError(t, err)
  6569  
  6570  	sl := c.streamLeader("$G", "TEST")
  6571  	nl := c.randomNonStreamLeader("$G", "TEST")
  6572  
  6573  	// Make sure we do not get disconnected when shutting the non-leader down.
  6574  	nc, js = jsClientConnect(t, sl)
  6575  	defer nc.Close()
  6576  
  6577  	send1k := func() {
  6578  		t.Helper()
  6579  		for i := 0; i < 1000; i++ {
  6580  			js.PublishAsync("foo", []byte("SNAP"))
  6581  		}
  6582  		select {
  6583  		case <-js.PublishAsyncComplete():
  6584  		case <-time.After(5 * time.Second):
  6585  			t.Fatalf("Did not receive completion signal")
  6586  		}
  6587  	}
  6588  
  6589  	// Send first 1000 to everyone.
  6590  	send1k()
  6591  
  6592  	// Now shutdown a non-leader.
  6593  	c.waitOnStreamCurrent(nl, "$G", "TEST")
  6594  	nl.Shutdown()
  6595  
  6596  	// Send another 1000.
  6597  	send1k()
  6598  
  6599  	// Force snapshot on the leader.
  6600  	mset, err := sl.GlobalAccount().lookupStream("TEST")
  6601  	require_NoError(t, err)
  6602  	err = mset.raftNode().InstallSnapshot(mset.stateSnapshot())
  6603  	require_NoError(t, err)
  6604  
  6605  	// Purge
  6606  	err = js.PurgeStream("TEST")
  6607  	require_NoError(t, err)
  6608  
  6609  	// Send another 1000.
  6610  	send1k()
  6611  
  6612  	// We want to make sure we do not send unnecessary skip msgs when we know we do not have all of these messages.
  6613  	nc, _ = jsClientConnect(t, sl, nats.UserInfo("admin", "s3cr3t!"))
  6614  	defer nc.Close()
  6615  	sub, err := nc.SubscribeSync("$JSC.R.>")
  6616  	require_NoError(t, err)
  6617  
  6618  	// Now restart non-leader.
  6619  	nl = c.restartServer(nl)
  6620  	c.waitOnStreamCurrent(nl, "$G", "TEST")
  6621  
  6622  	// Grab state directly from non-leader.
  6623  	mset, err = nl.GlobalAccount().lookupStream("TEST")
  6624  	require_NoError(t, err)
  6625  
  6626  	checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
  6627  		if state := mset.state(); state.FirstSeq != 2001 || state.LastSeq != 3000 {
  6628  			return fmt.Errorf("Incorrect state: %+v", state)
  6629  		}
  6630  		return nil
  6631  	})
  6632  
  6633  	// Make sure we only sent 1002 sync catchup msgs.
  6634  	// This is for the new messages, the delete range, and the EOF.
  6635  	nmsgs, _, _ := sub.Pending()
  6636  	if nmsgs != 1002 {
  6637  		t.Fatalf("Expected only 1002 sync catchup msgs to be sent signaling eof, but got %d", nmsgs)
  6638  	}
  6639  }
  6640  
  6641  func TestJetStreamClusterStreamResetWithLargeFirstSeq(t *testing.T) {
  6642  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  6643  	defer c.shutdown()
  6644  
  6645  	nc, js := jsClientConnect(t, c.randomServer())
  6646  	defer nc.Close()
  6647  
  6648  	cfg := &nats.StreamConfig{
  6649  		Name:     "TEST",
  6650  		Subjects: []string{"foo"},
  6651  		MaxAge:   5 * time.Second,
  6652  		Replicas: 1,
  6653  	}
  6654  	_, err := js.AddStream(cfg)
  6655  	require_NoError(t, err)
  6656  
  6657  	// Fake a very large first seq.
  6658  	sl := c.streamLeader("$G", "TEST")
  6659  	mset, err := sl.GlobalAccount().lookupStream("TEST")
  6660  	require_NoError(t, err)
  6661  
  6662  	mset.mu.Lock()
  6663  	mset.store.Compact(1_000_000)
  6664  	mset.mu.Unlock()
  6665  	// Restart
  6666  	sl.Shutdown()
  6667  	sl = c.restartServer(sl)
  6668  	c.waitOnStreamLeader("$G", "TEST")
  6669  
  6670  	nc, js = jsClientConnect(t, c.randomServer())
  6671  	defer nc.Close()
  6672  
  6673  	// Make sure we have the correct state after restart.
  6674  	si, err := js.StreamInfo("TEST")
  6675  	require_NoError(t, err)
  6676  	require_True(t, si.State.FirstSeq == 1_000_000)
  6677  
  6678  	// Now add in 10,000 messages.
  6679  	num := 10_000
  6680  	for i := 0; i < num; i++ {
  6681  		js.PublishAsync("foo", []byte("SNAP"))
  6682  	}
  6683  	select {
  6684  	case <-js.PublishAsyncComplete():
  6685  	case <-time.After(5 * time.Second):
  6686  		t.Fatalf("Did not receive completion signal")
  6687  	}
  6688  
  6689  	si, err = js.StreamInfo("TEST")
  6690  	require_NoError(t, err)
  6691  	require_True(t, si.State.FirstSeq == 1_000_000)
  6692  	require_True(t, si.State.LastSeq == uint64(1_000_000+num-1))
  6693  
  6694  	// We want to make sure we do not send unnecessary skip msgs when we know we do not have all of these messages.
  6695  	ncs, _ := jsClientConnect(t, sl, nats.UserInfo("admin", "s3cr3t!"))
  6696  	defer nc.Close()
  6697  	sub, err := ncs.SubscribeSync("$JSC.R.>")
  6698  	require_NoError(t, err)
  6699  
  6700  	// Now scale up to R3.
  6701  	cfg.Replicas = 3
  6702  	_, err = js.UpdateStream(cfg)
  6703  	require_NoError(t, err)
  6704  	nl := c.randomNonStreamLeader("$G", "TEST")
  6705  	c.waitOnStreamCurrent(nl, "$G", "TEST")
  6706  
  6707  	// Make sure we only sent the number of catchup msgs we expected.
  6708  	checkFor(t, 5*time.Second, 50*time.Millisecond, func() error {
  6709  		if nmsgs, _, _ := sub.Pending(); nmsgs != (cfg.Replicas-1)*(num+1) {
  6710  			return fmt.Errorf("expected %d catchup msgs, but got %d", (cfg.Replicas-1)*(num+1), nmsgs)
  6711  		}
  6712  		return nil
  6713  	})
  6714  }
  6715  
  6716  func TestJetStreamClusterStreamCatchupInteriorNilMsgs(t *testing.T) {
  6717  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  6718  	defer c.shutdown()
  6719  
  6720  	nc, js := jsClientConnect(t, c.randomServer())
  6721  	defer nc.Close()
  6722  
  6723  	cfg := &nats.StreamConfig{
  6724  		Name:     "TEST",
  6725  		Subjects: []string{"foo"},
  6726  	}
  6727  	_, err := js.AddStream(cfg)
  6728  	require_NoError(t, err)
  6729  
  6730  	num := 100
  6731  	for l := 0; l < 5; l++ {
  6732  		for i := 0; i < num-1; i++ {
  6733  			js.PublishAsync("foo", []byte("SNAP"))
  6734  		}
  6735  		// Blank msg.
  6736  		js.PublishAsync("foo", nil)
  6737  	}
  6738  	select {
  6739  	case <-js.PublishAsyncComplete():
  6740  	case <-time.After(5 * time.Second):
  6741  		t.Fatalf("Did not receive completion signal")
  6742  	}
  6743  
  6744  	// Make sure we have the correct state after restart.
  6745  	si, err := js.StreamInfo("TEST")
  6746  	require_NoError(t, err)
  6747  	require_True(t, si.State.Msgs == 500)
  6748  
  6749  	// Now scale up to R3.
  6750  	cfg.Replicas = 3
  6751  	_, err = js.UpdateStream(cfg)
  6752  	require_NoError(t, err)
  6753  	nl := c.randomNonStreamLeader("$G", "TEST")
  6754  	c.waitOnStreamCurrent(nl, "$G", "TEST")
  6755  
  6756  	mset, err := nl.GlobalAccount().lookupStream("TEST")
  6757  	require_NoError(t, err)
  6758  
  6759  	mset.mu.RLock()
  6760  	state := mset.store.State()
  6761  	mset.mu.RUnlock()
  6762  	require_True(t, state.Msgs == 500)
  6763  }
  6764  
  6765  type captureCatchupWarnLogger struct {
  6766  	DummyLogger
  6767  	ch chan string
  6768  }
  6769  
  6770  func (l *captureCatchupWarnLogger) Warnf(format string, args ...interface{}) {
  6771  	msg := fmt.Sprintf(format, args...)
  6772  	if strings.Contains(msg, "simulate error") {
  6773  		select {
  6774  		case l.ch <- msg:
  6775  		default:
  6776  		}
  6777  	}
  6778  }
  6779  
  6780  type catchupMockStore struct {
  6781  	StreamStore
  6782  	ch chan uint64
  6783  }
  6784  
  6785  func (s catchupMockStore) LoadMsg(seq uint64, sm *StoreMsg) (*StoreMsg, error) {
  6786  	s.ch <- seq
  6787  	return s.StreamStore.LoadMsg(seq, sm)
  6788  }
  6789  
  6790  func TestJetStreamClusterLeaderAbortsCatchupOnFollowerError(t *testing.T) {
  6791  	c := createJetStreamClusterExplicit(t, "JSC", 3)
  6792  	defer c.shutdown()
  6793  
  6794  	nc, js := jsClientConnect(t, c.randomServer())
  6795  	defer nc.Close()
  6796  
  6797  	cfg := &nats.StreamConfig{
  6798  		Name:     "TEST",
  6799  		Subjects: []string{"foo"},
  6800  		Replicas: 3,
  6801  	}
  6802  	_, err := js.AddStream(cfg)
  6803  	require_NoError(t, err)
  6804  
  6805  	c.waitOnStreamLeader(globalAccountName, "TEST")
  6806  
  6807  	payload := string(make([]byte, 1024))
  6808  	total := 100
  6809  	for i := 0; i < total; i++ {
  6810  		sendStreamMsg(t, nc, "foo", payload)
  6811  	}
  6812  
  6813  	c.waitOnAllCurrent()
  6814  
  6815  	// Get the stream leader
  6816  	leader := c.streamLeader(globalAccountName, "TEST")
  6817  	mset, err := leader.GlobalAccount().lookupStream("TEST")
  6818  	require_NoError(t, err)
  6819  
  6820  	var syncSubj string
  6821  	mset.mu.RLock()
  6822  	if mset.syncSub != nil {
  6823  		syncSubj = string(mset.syncSub.subject)
  6824  	}
  6825  	mset.mu.RUnlock()
  6826  
  6827  	if syncSubj == _EMPTY_ {
  6828  		t.Fatal("Did not find the sync request subject")
  6829  	}
  6830  
  6831  	// Setup the logger on the leader to make sure we capture the error and print
  6832  	// and also stop the runCatchup.
  6833  	l := &captureCatchupWarnLogger{ch: make(chan string, 10)}
  6834  	leader.SetLogger(l, false, false)
  6835  
  6836  	// Set a fake message store that will allow us to verify
  6837  	// a few things.
  6838  	mset.mu.Lock()
  6839  	orgMS := mset.store
  6840  	ms := catchupMockStore{StreamStore: mset.store, ch: make(chan uint64)}
  6841  	mset.store = ms
  6842  	mset.mu.Unlock()
  6843  
  6844  	// Need the system account to simulate the sync request that we are going to send.
  6845  	sysNC := natsConnect(t, c.randomServer().ClientURL(), nats.UserInfo("admin", "s3cr3t!"))
  6846  	defer sysNC.Close()
  6847  	// Setup a subscription to receive the messages sent by the leader.
  6848  	sub := natsSubSync(t, sysNC, nats.NewInbox())
  6849  	req := &streamSyncRequest{
  6850  		FirstSeq: 1,
  6851  		LastSeq:  uint64(total),
  6852  		Peer:     "bozo", // Should be one of the node name, but does not matter here
  6853  	}
  6854  	b, _ := json.Marshal(req)
  6855  	// Send the sync request and use our sub's subject for destination where leader
  6856  	// needs to send messages to.
  6857  	natsPubReq(t, sysNC, syncSubj, sub.Subject, b)
  6858  
  6859  	// The mock store is blocked loading the first message, so we need to consume
  6860  	// the sequence before being able to receive the message in our sub.
  6861  	if seq := <-ms.ch; seq != 1 {
  6862  		t.Fatalf("Expected sequence to be 1, got %v", seq)
  6863  	}
  6864  
  6865  	// Now consume and the leader should report the error and terminate runCatchup
  6866  	msg := natsNexMsg(t, sub, time.Second)
  6867  	msg.Respond([]byte("simulate error"))
  6868  
  6869  	select {
  6870  	case <-l.ch:
  6871  		// OK
  6872  	case <-time.After(time.Second):
  6873  		t.Fatal("Did not get the expected error")
  6874  	}
  6875  
  6876  	// The mock store should be blocked in seq==2 now, but after consuming, it should
  6877  	// abort the runCatchup.
  6878  	if seq := <-ms.ch; seq != 2 {
  6879  		t.Fatalf("Expected sequence to be 2, got %v", seq)
  6880  	}
  6881  	// We may have some more messages loaded as a race between when the sub will
  6882  	// indicate that the catchup should stop and the part where we send messages
  6883  	// in the batch, but we should likely not have sent all messages.
  6884  	loaded := 0
  6885  	for done := false; !done; {
  6886  		select {
  6887  		case <-ms.ch:
  6888  			loaded++
  6889  		case <-time.After(250 * time.Millisecond):
  6890  			done = true
  6891  		}
  6892  	}
  6893  	if loaded > 10 {
  6894  		t.Fatalf("Too many messages were sent after detecting remote is done: %v", loaded)
  6895  	}
  6896  
  6897  	ch := make(chan string, 1)
  6898  	mset.mu.Lock()
  6899  	mset.store = orgMS
  6900  	leader.sysUnsubscribe(mset.syncSub)
  6901  	mset.syncSub = nil
  6902  	leader.systemSubscribe(syncSubj, _EMPTY_, false, mset.sysc, func(_ *subscription, _ *client, _ *Account, _, reply string, msg []byte) {
  6903  		var sreq streamSyncRequest
  6904  		if err := json.Unmarshal(msg, &sreq); err != nil {
  6905  			return
  6906  		}
  6907  		select {
  6908  		case ch <- reply:
  6909  		default:
  6910  		}
  6911  	})
  6912  	mset.mu.Unlock()
  6913  	syncRepl := natsSubSync(t, sysNC, nats.NewInbox()+".>")
  6914  	// Make sure our sub is propagated
  6915  	time.Sleep(250 * time.Millisecond)
  6916  
  6917  	if v := leader.gcbTotal(); v != 0 {
  6918  		t.Fatalf("Expected gcbTotal to be 0, got %v", v)
  6919  	}
  6920  
  6921  	buf := make([]byte, 1_000_000)
  6922  	n := runtime.Stack(buf, true)
  6923  	if bytes.Contains(buf[:n], []byte("runCatchup")) {
  6924  		t.Fatalf("Looks like runCatchup is still running:\n%s", buf[:n])
  6925  	}
  6926  
  6927  	mset.mu.Lock()
  6928  	var state StreamState
  6929  	mset.store.FastState(&state)
  6930  	snapshot := &streamSnapshot{
  6931  		Msgs:     state.Msgs,
  6932  		Bytes:    state.Bytes,
  6933  		FirstSeq: state.FirstSeq,
  6934  		LastSeq:  state.LastSeq + 1,
  6935  	}
  6936  	b, _ = json.Marshal(snapshot)
  6937  	mset.node.SendSnapshot(b)
  6938  	mset.mu.Unlock()
  6939  
  6940  	var sreqSubj string
  6941  	select {
  6942  	case sreqSubj = <-ch:
  6943  	case <-time.After(time.Second):
  6944  		t.Fatal("Did not receive sync request")
  6945  	}
  6946  
  6947  	// Now send a message with a wrong sequence and expect to receive an error.
  6948  	em := encodeStreamMsg("foo", _EMPTY_, nil, []byte("fail"), 102, time.Now().UnixNano())
  6949  	leader.sendInternalMsgLocked(sreqSubj, syncRepl.Subject, nil, em)
  6950  	msg = natsNexMsg(t, syncRepl, time.Second)
  6951  	if len(msg.Data) == 0 {
  6952  		t.Fatal("Expected err response from the remote")
  6953  	}
  6954  }
  6955  
  6956  func TestJetStreamClusterStreamDirectGetNotTooSoon(t *testing.T) {
  6957  	c := createJetStreamClusterExplicit(t, "R3F", 3)
  6958  	defer c.shutdown()
  6959  
  6960  	// Client based API
  6961  	s := c.randomServer()
  6962  	nc, _ := jsClientConnect(t, s)
  6963  	defer nc.Close()
  6964  
  6965  	// Do by hand for now.
  6966  	cfg := &StreamConfig{
  6967  		Name:        "TEST",
  6968  		Storage:     FileStorage,
  6969  		Subjects:    []string{"foo"},
  6970  		Replicas:    3,
  6971  		MaxMsgsPer:  1,
  6972  		AllowDirect: true,
  6973  	}
  6974  	addStream(t, nc, cfg)
  6975  	sendStreamMsg(t, nc, "foo", "bar")
  6976  
  6977  	getSubj := fmt.Sprintf(JSDirectGetLastBySubjectT, "TEST", "foo")
  6978  
  6979  	// Make sure we get all direct subs.
  6980  	checkForDirectSubs := func() {
  6981  		t.Helper()
  6982  		checkFor(t, 10*time.Second, 250*time.Millisecond, func() error {
  6983  			for _, s := range c.servers {
  6984  				mset, err := s.GlobalAccount().lookupStream("TEST")
  6985  				if err != nil {
  6986  					return err
  6987  				}
  6988  				mset.mu.RLock()
  6989  				hasBoth := mset.directSub != nil && mset.lastBySub != nil
  6990  				mset.mu.RUnlock()
  6991  				if !hasBoth {
  6992  					return fmt.Errorf("%v does not have both direct subs registered", s)
  6993  				}
  6994  			}
  6995  			return nil
  6996  		})
  6997  	}
  6998  
  6999  	_, err := nc.Request(getSubj, nil, time.Second)
  7000  	require_NoError(t, err)
  7001  
  7002  	checkForDirectSubs()
  7003  
  7004  	// We want to make sure that when starting up we do not listen until we have a leader.
  7005  	nc.Close()
  7006  	c.stopAll()
  7007  
  7008  	// Start just one..
  7009  	s, opts := RunServerWithConfig(c.opts[0].ConfigFile)
  7010  	c.servers[0] = s
  7011  	c.opts[0] = opts
  7012  
  7013  	nc, _ = jsClientConnect(t, s)
  7014  	defer nc.Close()
  7015  
  7016  	_, err = nc.Request(getSubj, nil, time.Second)
  7017  	require_Error(t, err, nats.ErrTimeout)
  7018  
  7019  	// Now start all and make sure they all eventually have subs for direct access.
  7020  	c.restartAll()
  7021  	c.waitOnStreamLeader("$G", "TEST")
  7022  
  7023  	_, err = nc.Request(getSubj, nil, time.Second)
  7024  	require_NoError(t, err)
  7025  
  7026  	checkForDirectSubs()
  7027  }
  7028  
  7029  func TestJetStreamClusterStaleReadsOnRestart(t *testing.T) {
  7030  	c := createJetStreamClusterExplicit(t, "R3F", 3)
  7031  	defer c.shutdown()
  7032  
  7033  	// Client based API
  7034  	s := c.randomServer()
  7035  	nc, js := jsClientConnect(t, s)
  7036  	defer nc.Close()
  7037  
  7038  	_, err := js.AddStream(&nats.StreamConfig{
  7039  		Name:     "TEST",
  7040  		Subjects: []string{"foo", "bar", "baz"},
  7041  		Replicas: 3,
  7042  	})
  7043  	require_NoError(t, err)
  7044  
  7045  	_, err = js.Publish("foo", nil)
  7046  	require_NoError(t, err)
  7047  
  7048  	sl := c.streamLeader("$G", "TEST")
  7049  
  7050  	r1 := c.randomNonStreamLeader("$G", "TEST")
  7051  	r1.Shutdown()
  7052  
  7053  	nc.Close()
  7054  	nc, js = jsClientConnect(t, c.randomServer())
  7055  	defer nc.Close()
  7056  
  7057  	_, err = js.Publish("bar", nil)
  7058  	require_NoError(t, err)
  7059  
  7060  	_, err = js.Publish("baz", nil)
  7061  	require_NoError(t, err)
  7062  
  7063  	r2 := c.randomNonStreamLeader("$G", "TEST")
  7064  	r2.Shutdown()
  7065  
  7066  	sl.Shutdown()
  7067  
  7068  	c.restartServer(r2)
  7069  	c.restartServer(r1)
  7070  
  7071  	c.waitOnStreamLeader("$G", "TEST")
  7072  
  7073  	nc.Close()
  7074  	nc, js = jsClientConnect(t, c.randomServer())
  7075  	defer nc.Close()
  7076  
  7077  	_, err = js.Publish("foo", nil)
  7078  	require_NoError(t, err)
  7079  
  7080  	_, err = js.Publish("bar", nil)
  7081  	require_NoError(t, err)
  7082  
  7083  	_, err = js.Publish("baz", nil)
  7084  	require_NoError(t, err)
  7085  
  7086  	c.restartServer(sl)
  7087  	c.waitOnAllCurrent()
  7088  	c.waitOnStreamLeader("$G", "TEST")
  7089  
  7090  	// Grab expected from leader.
  7091  	var state StreamState
  7092  	sl = c.streamLeader("$G", "TEST")
  7093  	mset, err := sl.GlobalAccount().lookupStream("TEST")
  7094  	require_NoError(t, err)
  7095  	mset.store.FastState(&state)
  7096  
  7097  	checkFor(t, 10*time.Second, 200*time.Millisecond, func() error {
  7098  		for _, s := range c.servers {
  7099  			if s.Running() {
  7100  				mset, err := s.GlobalAccount().lookupStream("TEST")
  7101  				if err != nil {
  7102  					return err
  7103  				}
  7104  				var fs StreamState
  7105  				mset.store.FastState(&fs)
  7106  				if !reflect.DeepEqual(fs, state) {
  7107  					return fmt.Errorf("States do not match, expected %+v but got %+v", state, fs)
  7108  				}
  7109  			}
  7110  		}
  7111  		return nil
  7112  	})
  7113  }
  7114  
  7115  func TestJetStreamClusterReplicasChangeStreamInfo(t *testing.T) {
  7116  	c := createJetStreamClusterExplicit(t, "R3F", 3)
  7117  	defer c.shutdown()
  7118  
  7119  	s := c.randomServer()
  7120  	nc, js := jsClientConnect(t, s)
  7121  	defer nc.Close()
  7122  
  7123  	numStreams := 1
  7124  	msgsPerStream := 10
  7125  	for i := 0; i < numStreams; i++ {
  7126  		sname := fmt.Sprintf("TEST_%v", i)
  7127  		_, err := js.AddStream(&nats.StreamConfig{
  7128  			Name:     sname,
  7129  			Replicas: 3,
  7130  		})
  7131  		require_NoError(t, err)
  7132  		for j := 0; j < msgsPerStream; j++ {
  7133  			sendStreamMsg(t, nc, sname, "msg")
  7134  		}
  7135  	}
  7136  
  7137  	checkStreamInfo := func(js nats.JetStreamContext) {
  7138  		t.Helper()
  7139  		checkFor(t, 20*time.Second, 15*time.Millisecond, func() error {
  7140  			for i := 0; i < numStreams; i++ {
  7141  				si, err := js.StreamInfo(fmt.Sprintf("TEST_%v", i))
  7142  				if err != nil {
  7143  					return err
  7144  				}
  7145  				if si.State.Msgs != uint64(msgsPerStream) || si.State.FirstSeq != 1 || si.State.LastSeq != uint64(msgsPerStream) {
  7146  					return fmt.Errorf("Invalid stream info for %s: %+v", si.Config.Name, si.State)
  7147  				}
  7148  			}
  7149  			return nil
  7150  		})
  7151  	}
  7152  	checkStreamInfo(js)
  7153  
  7154  	// Update replicas down to 1
  7155  	for i := 0; i < numStreams; i++ {
  7156  		sname := fmt.Sprintf("TEST_%v", i)
  7157  		_, err := js.UpdateStream(&nats.StreamConfig{
  7158  			Name:     sname,
  7159  			Replicas: 1,
  7160  		})
  7161  		require_NoError(t, err)
  7162  	}
  7163  	checkStreamInfo(js)
  7164  
  7165  	// Back up to 3
  7166  	for i := 0; i < numStreams; i++ {
  7167  		sname := fmt.Sprintf("TEST_%v", i)
  7168  		_, err := js.UpdateStream(&nats.StreamConfig{
  7169  			Name:     sname,
  7170  			Replicas: 3,
  7171  		})
  7172  		require_NoError(t, err)
  7173  		c.waitOnStreamLeader(globalAccountName, sname)
  7174  		for _, s := range c.servers {
  7175  			c.waitOnStreamCurrent(s, globalAccountName, sname)
  7176  		}
  7177  	}
  7178  	checkStreamInfo(js)
  7179  
  7180  	// Now shutdown the cluster and restart it
  7181  	nc.Close()
  7182  	c.stopAll()
  7183  	c.restartAll()
  7184  
  7185  	for i := 0; i < numStreams; i++ {
  7186  		sname := fmt.Sprintf("TEST_%v", i)
  7187  		c.waitOnStreamLeader(globalAccountName, sname)
  7188  		for _, s := range c.servers {
  7189  			c.waitOnStreamCurrent(s, globalAccountName, sname)
  7190  		}
  7191  	}
  7192  
  7193  	nc, js = jsClientConnect(t, c.randomServer())
  7194  	defer nc.Close()
  7195  
  7196  	checkStreamInfo(js)
  7197  }
  7198  
  7199  func TestJetStreamClusterMaxOutstandingCatchup(t *testing.T) {
  7200  	c := createJetStreamClusterExplicit(t, "MCB", 3)
  7201  	defer c.shutdown()
  7202  
  7203  	for _, s := range c.servers {
  7204  		s.gcbMu.RLock()
  7205  		v := s.gcbOutMax
  7206  		s.gcbMu.RUnlock()
  7207  		if v != defaultMaxTotalCatchupOutBytes {
  7208  			t.Fatalf("Server %v, expected max_outstanding_catchup to be %v, got %v", s, defaultMaxTotalCatchupOutBytes, v)
  7209  		}
  7210  	}
  7211  
  7212  	c.shutdown()
  7213  
  7214  	tmpl := `
  7215  		listen: 127.0.0.1:-1
  7216  		server_name: %s
  7217  		jetstream: { max_outstanding_catchup: 1KB, domain: ngs, max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'}
  7218  		leaf: { listen: 127.0.0.1:-1 }
  7219  		cluster {
  7220  			name: %s
  7221  			listen: 127.0.0.1:%d
  7222  			routes = [%s]
  7223  		}
  7224  	`
  7225  	c = createJetStreamClusterWithTemplate(t, tmpl, "MCB", 3)
  7226  	defer c.shutdown()
  7227  
  7228  	for _, s := range c.servers {
  7229  		s.gcbMu.RLock()
  7230  		v := s.gcbOutMax
  7231  		s.gcbMu.RUnlock()
  7232  		if v != 1024 {
  7233  			t.Fatalf("Server %v, expected max_outstanding_catchup to be 1KB, got %v", s, v)
  7234  		}
  7235  	}
  7236  
  7237  	nc, js := jsClientConnect(t, c.randomServer())
  7238  	defer nc.Close()
  7239  
  7240  	_, err := js.AddStream(&nats.StreamConfig{
  7241  		Name:     "TEST",
  7242  		Subjects: []string{"foo"},
  7243  		Replicas: 3,
  7244  	})
  7245  	require_NoError(t, err)
  7246  
  7247  	// Close client now and will create new one
  7248  	nc.Close()
  7249  
  7250  	c.waitOnStreamLeader(globalAccountName, "TEST")
  7251  
  7252  	follower := c.randomNonStreamLeader(globalAccountName, "TEST")
  7253  	follower.Shutdown()
  7254  
  7255  	c.waitOnStreamLeader(globalAccountName, "TEST")
  7256  
  7257  	// Create new connection in case we would have been connected to follower.
  7258  	nc, _ = jsClientConnect(t, c.randomServer())
  7259  	defer nc.Close()
  7260  
  7261  	payload := string(make([]byte, 2048))
  7262  	for i := 0; i < 1000; i++ {
  7263  		sendStreamMsg(t, nc, "foo", payload)
  7264  	}
  7265  
  7266  	// Cause snapshots on leader
  7267  	mset, err := c.streamLeader(globalAccountName, "TEST").GlobalAccount().lookupStream("TEST")
  7268  	require_NoError(t, err)
  7269  	err = mset.raftNode().InstallSnapshot(mset.stateSnapshot())
  7270  	require_NoError(t, err)
  7271  
  7272  	// Resart server and it should be able to catchup
  7273  	follower = c.restartServer(follower)
  7274  	c.waitOnStreamCurrent(follower, globalAccountName, "TEST")
  7275  
  7276  	// Config reload not supported
  7277  	s := c.servers[0]
  7278  	cfile := s.getOpts().ConfigFile
  7279  	content, err := os.ReadFile(cfile)
  7280  	require_NoError(t, err)
  7281  	conf := string(content)
  7282  	conf = strings.ReplaceAll(conf, "max_outstanding_catchup: 1KB,", "max_outstanding_catchup: 1MB,")
  7283  	err = os.WriteFile(cfile, []byte(conf), 0644)
  7284  	require_NoError(t, err)
  7285  	err = s.Reload()
  7286  	require_Error(t, err, fmt.Errorf("config reload not supported for JetStreamMaxCatchup: old=1024, new=1048576"))
  7287  }
  7288  
  7289  func TestJetStreamClusterCompressedStreamMessages(t *testing.T) {
  7290  	c := createJetStreamClusterExplicit(t, "R3F", 3)
  7291  	defer c.shutdown()
  7292  
  7293  	s := c.randomServer()
  7294  	nc, js := jsClientConnect(t, s)
  7295  	defer nc.Close()
  7296  
  7297  	_, err := js.AddStream(&nats.StreamConfig{
  7298  		Name:     "TEST",
  7299  		Subjects: []string{"foo"},
  7300  		Replicas: 3,
  7301  	})
  7302  	require_NoError(t, err)
  7303  
  7304  	// 32k (compress threshold ~4k)
  7305  	toSend, msg := 10_000, []byte(strings.Repeat("ABCD", 8*1024))
  7306  	for i := 0; i < toSend; i++ {
  7307  		js.PublishAsync("foo", msg)
  7308  	}
  7309  	select {
  7310  	case <-js.PublishAsyncComplete():
  7311  	case <-time.After(5 * time.Second):
  7312  		t.Fatalf("Did not receive completion signal")
  7313  	}
  7314  }
  7315  
  7316  //
  7317  // DO NOT ADD NEW TESTS IN THIS FILE
  7318  // Add at the end of jetstream_cluster_<n>_test.go, with <n> being the highest value.
  7319  //