get.pme.sh/pnats@v0.0.0-20240304004023-26bb5a137ed0/server/consumer.go (about)

     1  // Copyright 2019-2024 The NATS Authors
     2  // Licensed under the Apache License, Version 2.0 (the "License");
     3  // you may not use this file except in compliance with the License.
     4  // You may obtain a copy of the License at
     5  //
     6  // http://www.apache.org/licenses/LICENSE-2.0
     7  //
     8  // Unless required by applicable law or agreed to in writing, software
     9  // distributed under the License is distributed on an "AS IS" BASIS,
    10  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package server
    15  
    16  import (
    17  	"bytes"
    18  	"encoding/binary"
    19  	"encoding/json"
    20  	"errors"
    21  	"fmt"
    22  	"math/rand"
    23  	"reflect"
    24  	"sort"
    25  	"strconv"
    26  	"strings"
    27  	"sync"
    28  	"sync/atomic"
    29  	"time"
    30  
    31  	"get.pme.sh/pnats/server/avl"
    32  	"github.com/nats-io/nuid"
    33  	"golang.org/x/time/rate"
    34  )
    35  
    36  // Headers sent with Request Timeout
    37  const (
    38  	JSPullRequestPendingMsgs  = "Nats-Pending-Messages"
    39  	JSPullRequestPendingBytes = "Nats-Pending-Bytes"
    40  )
    41  
    42  // Headers sent when batch size was completed, but there were remaining bytes.
    43  const JsPullRequestRemainingBytesT = "NATS/1.0 409 Batch Completed\r\n%s: %d\r\n%s: %d\r\n\r\n"
    44  
    45  type ConsumerInfo struct {
    46  	Stream         string          `json:"stream_name"`
    47  	Name           string          `json:"name"`
    48  	Created        time.Time       `json:"created"`
    49  	Config         *ConsumerConfig `json:"config,omitempty"`
    50  	Delivered      SequenceInfo    `json:"delivered"`
    51  	AckFloor       SequenceInfo    `json:"ack_floor"`
    52  	NumAckPending  int             `json:"num_ack_pending"`
    53  	NumRedelivered int             `json:"num_redelivered"`
    54  	NumWaiting     int             `json:"num_waiting"`
    55  	NumPending     uint64          `json:"num_pending"`
    56  	Cluster        *ClusterInfo    `json:"cluster,omitempty"`
    57  	PushBound      bool            `json:"push_bound,omitempty"`
    58  	Paused         bool            `json:"paused,omitempty"`
    59  	PauseRemaining time.Duration   `json:"pause_remaining,omitempty"`
    60  	// TimeStamp indicates when the info was gathered
    61  	TimeStamp time.Time `json:"ts"`
    62  }
    63  
    64  type ConsumerConfig struct {
    65  	// Durable is deprecated. All consumers should have names, picked by clients.
    66  	Durable         string          `json:"durable_name,omitempty"`
    67  	Name            string          `json:"name,omitempty"`
    68  	Description     string          `json:"description,omitempty"`
    69  	DeliverPolicy   DeliverPolicy   `json:"deliver_policy"`
    70  	OptStartSeq     uint64          `json:"opt_start_seq,omitempty"`
    71  	OptStartTime    *time.Time      `json:"opt_start_time,omitempty"`
    72  	AckPolicy       AckPolicy       `json:"ack_policy"`
    73  	AckWait         time.Duration   `json:"ack_wait,omitempty"`
    74  	MaxDeliver      int             `json:"max_deliver,omitempty"`
    75  	BackOff         []time.Duration `json:"backoff,omitempty"`
    76  	FilterSubject   string          `json:"filter_subject,omitempty"`
    77  	FilterSubjects  []string        `json:"filter_subjects,omitempty"`
    78  	ReplayPolicy    ReplayPolicy    `json:"replay_policy"`
    79  	RateLimit       uint64          `json:"rate_limit_bps,omitempty"` // Bits per sec
    80  	SampleFrequency string          `json:"sample_freq,omitempty"`
    81  	MaxWaiting      int             `json:"max_waiting,omitempty"`
    82  	MaxAckPending   int             `json:"max_ack_pending,omitempty"`
    83  	Heartbeat       time.Duration   `json:"idle_heartbeat,omitempty"`
    84  	FlowControl     bool            `json:"flow_control,omitempty"`
    85  	HeadersOnly     bool            `json:"headers_only,omitempty"`
    86  
    87  	// Pull based options.
    88  	MaxRequestBatch    int           `json:"max_batch,omitempty"`
    89  	MaxRequestExpires  time.Duration `json:"max_expires,omitempty"`
    90  	MaxRequestMaxBytes int           `json:"max_bytes,omitempty"`
    91  
    92  	// Push based consumers.
    93  	DeliverSubject string `json:"deliver_subject,omitempty"`
    94  	DeliverGroup   string `json:"deliver_group,omitempty"`
    95  
    96  	// Ephemeral inactivity threshold.
    97  	InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"`
    98  
    99  	// Generally inherited by parent stream and other markers, now can be configured directly.
   100  	Replicas int `json:"num_replicas"`
   101  	// Force memory storage.
   102  	MemoryStorage bool `json:"mem_storage,omitempty"`
   103  
   104  	// Don't add to general clients.
   105  	Direct bool `json:"direct,omitempty"`
   106  
   107  	// Metadata is additional metadata for the Consumer.
   108  	Metadata map[string]string `json:"metadata,omitempty"`
   109  
   110  	// PauseUntil is for suspending the consumer until the deadline.
   111  	PauseUntil *time.Time `json:"pause_until,omitempty"`
   112  }
   113  
   114  // SequenceInfo has both the consumer and the stream sequence and last activity.
   115  type SequenceInfo struct {
   116  	Consumer uint64     `json:"consumer_seq"`
   117  	Stream   uint64     `json:"stream_seq"`
   118  	Last     *time.Time `json:"last_active,omitempty"`
   119  }
   120  
   121  type CreateConsumerRequest struct {
   122  	Stream string         `json:"stream_name"`
   123  	Config ConsumerConfig `json:"config"`
   124  	Action ConsumerAction `json:"action"`
   125  }
   126  
   127  type ConsumerAction int
   128  
   129  const (
   130  	ActionCreateOrUpdate ConsumerAction = iota
   131  	ActionUpdate
   132  	ActionCreate
   133  )
   134  
   135  const (
   136  	actionUpdateJSONString         = `"update"`
   137  	actionCreateJSONString         = `"create"`
   138  	actionCreateOrUpdateJSONString = `""`
   139  )
   140  
   141  var (
   142  	actionUpdateJSONBytes         = []byte(actionUpdateJSONString)
   143  	actionCreateJSONBytes         = []byte(actionCreateJSONString)
   144  	actionCreateOrUpdateJSONBytes = []byte(actionCreateOrUpdateJSONString)
   145  )
   146  
   147  func (a ConsumerAction) String() string {
   148  	switch a {
   149  	case ActionCreateOrUpdate:
   150  		return actionCreateOrUpdateJSONString
   151  	case ActionCreate:
   152  		return actionCreateJSONString
   153  	case ActionUpdate:
   154  		return actionUpdateJSONString
   155  	}
   156  	return actionCreateOrUpdateJSONString
   157  }
   158  
   159  func (a ConsumerAction) MarshalJSON() ([]byte, error) {
   160  	switch a {
   161  	case ActionCreate:
   162  		return actionCreateJSONBytes, nil
   163  	case ActionUpdate:
   164  		return actionUpdateJSONBytes, nil
   165  	case ActionCreateOrUpdate:
   166  		return actionCreateOrUpdateJSONBytes, nil
   167  	default:
   168  		return nil, fmt.Errorf("can not marshal %v", a)
   169  	}
   170  }
   171  
   172  func (a *ConsumerAction) UnmarshalJSON(data []byte) error {
   173  	switch string(data) {
   174  	case actionCreateJSONString:
   175  		*a = ActionCreate
   176  	case actionUpdateJSONString:
   177  		*a = ActionUpdate
   178  	case actionCreateOrUpdateJSONString:
   179  		*a = ActionCreateOrUpdate
   180  	default:
   181  		return fmt.Errorf("unknown consumer action: %v", string(data))
   182  	}
   183  	return nil
   184  }
   185  
   186  // ConsumerNakOptions is for optional NAK values, e.g. delay.
   187  type ConsumerNakOptions struct {
   188  	Delay time.Duration `json:"delay"`
   189  }
   190  
   191  // DeliverPolicy determines how the consumer should select the first message to deliver.
   192  type DeliverPolicy int
   193  
   194  const (
   195  	// DeliverAll will be the default so can be omitted from the request.
   196  	DeliverAll DeliverPolicy = iota
   197  	// DeliverLast will start the consumer with the last sequence received.
   198  	DeliverLast
   199  	// DeliverNew will only deliver new messages that are sent after the consumer is created.
   200  	DeliverNew
   201  	// DeliverByStartSequence will look for a defined starting sequence to start.
   202  	DeliverByStartSequence
   203  	// DeliverByStartTime will select the first messsage with a timestamp >= to StartTime.
   204  	DeliverByStartTime
   205  	// DeliverLastPerSubject will start the consumer with the last message for all subjects received.
   206  	DeliverLastPerSubject
   207  )
   208  
   209  func (dp DeliverPolicy) String() string {
   210  	switch dp {
   211  	case DeliverAll:
   212  		return "all"
   213  	case DeliverLast:
   214  		return "last"
   215  	case DeliverNew:
   216  		return "new"
   217  	case DeliverByStartSequence:
   218  		return "by_start_sequence"
   219  	case DeliverByStartTime:
   220  		return "by_start_time"
   221  	case DeliverLastPerSubject:
   222  		return "last_per_subject"
   223  	default:
   224  		return "undefined"
   225  	}
   226  }
   227  
   228  // AckPolicy determines how the consumer should acknowledge delivered messages.
   229  type AckPolicy int
   230  
   231  const (
   232  	// AckNone requires no acks for delivered messages.
   233  	AckNone AckPolicy = iota
   234  	// AckAll when acking a sequence number, this implicitly acks all sequences below this one as well.
   235  	AckAll
   236  	// AckExplicit requires ack or nack for all messages.
   237  	AckExplicit
   238  )
   239  
   240  func (a AckPolicy) String() string {
   241  	switch a {
   242  	case AckNone:
   243  		return "none"
   244  	case AckAll:
   245  		return "all"
   246  	default:
   247  		return "explicit"
   248  	}
   249  }
   250  
   251  // ReplayPolicy determines how the consumer should replay messages it already has queued in the stream.
   252  type ReplayPolicy int
   253  
   254  const (
   255  	// ReplayInstant will replay messages as fast as possible.
   256  	ReplayInstant ReplayPolicy = iota
   257  	// ReplayOriginal will maintain the same timing as the messages were received.
   258  	ReplayOriginal
   259  )
   260  
   261  func (r ReplayPolicy) String() string {
   262  	switch r {
   263  	case ReplayInstant:
   264  		return replayInstantPolicyJSONString
   265  	default:
   266  		return replayOriginalPolicyJSONString
   267  	}
   268  }
   269  
   270  // OK
   271  const OK = "+OK"
   272  
   273  // Ack responses. Note that a nil or no payload is same as AckAck
   274  var (
   275  	// Ack
   276  	AckAck = []byte("+ACK") // nil or no payload to ack subject also means ACK
   277  	AckOK  = []byte(OK)     // deprecated but +OK meant ack as well.
   278  
   279  	// Nack
   280  	AckNak = []byte("-NAK")
   281  	// Progress indicator
   282  	AckProgress = []byte("+WPI")
   283  	// Ack + Deliver the next message(s).
   284  	AckNext = []byte("+NXT")
   285  	// Terminate delivery of the message.
   286  	AckTerm = []byte("+TERM")
   287  
   288  	// reasons to supply when terminating messages using limits
   289  	ackTermLimitsReason        = "Message deleted by stream limits"
   290  	ackTermUnackedLimitsReason = "Unacknowledged message was deleted"
   291  )
   292  
   293  // Calculate accurate replicas for the consumer config with the parent stream config.
   294  func (consCfg ConsumerConfig) replicas(strCfg *StreamConfig) int {
   295  	if consCfg.Replicas == 0 || consCfg.Replicas > strCfg.Replicas {
   296  		if !isDurableConsumer(&consCfg) && strCfg.Retention == LimitsPolicy && consCfg.Replicas == 0 {
   297  			// Matches old-school ephemerals only, where the replica count is 0.
   298  			return 1
   299  		}
   300  		return strCfg.Replicas
   301  	}
   302  	return consCfg.Replicas
   303  }
   304  
   305  // Consumer is a jetstream consumer.
   306  type consumer struct {
   307  	// Atomic used to notify that we want to process an ack.
   308  	// This will be checked in checkPending to abort processing
   309  	// and let ack be processed in priority.
   310  	awl               int64
   311  	leader            atomic.Bool
   312  	mu                sync.RWMutex
   313  	js                *jetStream
   314  	mset              *stream
   315  	acc               *Account
   316  	srv               *Server
   317  	client            *client
   318  	sysc              *client
   319  	sid               int
   320  	name              string
   321  	stream            string
   322  	sseq              uint64         // next stream sequence
   323  	subjf             subjectFilters // subject filters and their sequences
   324  	dseq              uint64         // delivered consumer sequence
   325  	adflr             uint64         // ack delivery floor
   326  	asflr             uint64         // ack store floor
   327  	npc               int64          // Num Pending Count
   328  	npf               uint64         // Num Pending Floor Sequence
   329  	dsubj             string
   330  	qgroup            string
   331  	lss               *lastSeqSkipList
   332  	rlimit            *rate.Limiter
   333  	reqSub            *subscription
   334  	ackSub            *subscription
   335  	ackReplyT         string
   336  	ackSubj           string
   337  	nextMsgSubj       string
   338  	nextMsgReqs       *ipQueue[*nextMsgReq]
   339  	maxp              int
   340  	pblimit           int
   341  	maxpb             int
   342  	pbytes            int
   343  	fcsz              int
   344  	fcid              string
   345  	fcSub             *subscription
   346  	outq              *jsOutQ
   347  	pending           map[uint64]*Pending
   348  	ptmr              *time.Timer
   349  	rdq               []uint64
   350  	rdqi              avl.SequenceSet
   351  	rdc               map[uint64]uint64
   352  	maxdc             uint64
   353  	waiting           *waitQueue
   354  	cfg               ConsumerConfig
   355  	ici               *ConsumerInfo
   356  	store             ConsumerStore
   357  	active            bool
   358  	replay            bool
   359  	dtmr              *time.Timer
   360  	uptmr             *time.Timer // Unpause timer
   361  	gwdtmr            *time.Timer
   362  	dthresh           time.Duration
   363  	mch               chan struct{} // Message channel
   364  	qch               chan struct{} // Quit channel
   365  	inch              chan bool     // Interest change channel
   366  	sfreq             int32
   367  	ackEventT         string
   368  	nakEventT         string
   369  	deliveryExcEventT string
   370  	created           time.Time
   371  	ldt               time.Time
   372  	lat               time.Time
   373  	closed            bool
   374  
   375  	// Clustered.
   376  	ca        *consumerAssignment
   377  	node      RaftNode
   378  	infoSub   *subscription
   379  	lqsent    time.Time
   380  	prm       map[string]struct{}
   381  	prOk      bool
   382  	uch       chan struct{}
   383  	retention RetentionPolicy
   384  
   385  	monitorWg sync.WaitGroup
   386  	inMonitor bool
   387  
   388  	// R>1 proposals
   389  	pch   chan struct{}
   390  	phead *proposal
   391  	ptail *proposal
   392  
   393  	// Ack queue
   394  	ackMsgs *ipQueue[*jsAckMsg]
   395  
   396  	// for stream signaling when multiple filters are set.
   397  	sigSubs []*subscription
   398  }
   399  
   400  // A single subject filter.
   401  type subjectFilter struct {
   402  	subject          string
   403  	nextSeq          uint64
   404  	currentSeq       uint64
   405  	pmsg             *jsPubMsg
   406  	err              error
   407  	hasWildcard      bool
   408  	tokenizedSubject []string
   409  }
   410  
   411  type subjectFilters []*subjectFilter
   412  
   413  // subjects is a helper function used for updating consumers.
   414  // It is not used and should not be used in hotpath.
   415  func (s subjectFilters) subjects() []string {
   416  	subjects := make([]string, 0, len(s))
   417  	for _, filter := range s {
   418  		subjects = append(subjects, filter.subject)
   419  	}
   420  	return subjects
   421  }
   422  
   423  type proposal struct {
   424  	data []byte
   425  	next *proposal
   426  }
   427  
   428  const (
   429  	// JsAckWaitDefault is the default AckWait, only applicable on explicit ack policy consumers.
   430  	JsAckWaitDefault = 30 * time.Second
   431  	// JsDeleteWaitTimeDefault is the default amount of time we will wait for non-durable
   432  	// consumers to be in an inactive state before deleting them.
   433  	JsDeleteWaitTimeDefault = 5 * time.Second
   434  	// JsFlowControlMaxPending specifies default pending bytes during flow control that can be
   435  	// outstanding.
   436  	JsFlowControlMaxPending = 32 * 1024 * 1024
   437  	// JsDefaultMaxAckPending is set for consumers with explicit ack that do not set the max ack pending.
   438  	JsDefaultMaxAckPending = 1000
   439  )
   440  
   441  // Helper function to set consumer config defaults from above.
   442  func setConsumerConfigDefaults(config *ConsumerConfig, streamCfg *StreamConfig, lim *JSLimitOpts, accLim *JetStreamAccountLimits) {
   443  	// Set to default if not specified.
   444  	if config.DeliverSubject == _EMPTY_ && config.MaxWaiting == 0 {
   445  		config.MaxWaiting = JSWaitQueueDefaultMax
   446  	}
   447  	// Setup proper default for ack wait if we are in explicit ack mode.
   448  	if config.AckWait == 0 && (config.AckPolicy == AckExplicit || config.AckPolicy == AckAll) {
   449  		config.AckWait = JsAckWaitDefault
   450  	}
   451  	// Setup default of -1, meaning no limit for MaxDeliver.
   452  	if config.MaxDeliver == 0 {
   453  		config.MaxDeliver = -1
   454  	}
   455  	// If BackOff was specified that will override the AckWait and the MaxDeliver.
   456  	if len(config.BackOff) > 0 {
   457  		config.AckWait = config.BackOff[0]
   458  	}
   459  	if config.MaxAckPending == 0 {
   460  		config.MaxAckPending = streamCfg.ConsumerLimits.MaxAckPending
   461  	}
   462  	if config.InactiveThreshold == 0 {
   463  		config.InactiveThreshold = streamCfg.ConsumerLimits.InactiveThreshold
   464  	}
   465  	// Set proper default for max ack pending if we are ack explicit and none has been set.
   466  	if (config.AckPolicy == AckExplicit || config.AckPolicy == AckAll) && config.MaxAckPending == 0 {
   467  		accPending := JsDefaultMaxAckPending
   468  		if lim.MaxAckPending > 0 && lim.MaxAckPending < accPending {
   469  			accPending = lim.MaxAckPending
   470  		}
   471  		if accLim.MaxAckPending > 0 && accLim.MaxAckPending < accPending {
   472  			accPending = accLim.MaxAckPending
   473  		}
   474  		config.MaxAckPending = accPending
   475  	}
   476  	// if applicable set max request batch size
   477  	if config.DeliverSubject == _EMPTY_ && config.MaxRequestBatch == 0 && lim.MaxRequestBatch > 0 {
   478  		config.MaxRequestBatch = lim.MaxRequestBatch
   479  	}
   480  }
   481  
   482  // Check the consumer config. If we are recovering don't check filter subjects.
   483  func checkConsumerCfg(
   484  	config *ConsumerConfig,
   485  	srvLim *JSLimitOpts,
   486  	cfg *StreamConfig,
   487  	_ *Account,
   488  	accLim *JetStreamAccountLimits,
   489  	isRecovering bool,
   490  ) *ApiError {
   491  
   492  	// Check if replicas is defined but exceeds parent stream.
   493  	if config.Replicas > 0 && config.Replicas > cfg.Replicas {
   494  		return NewJSConsumerReplicasExceedsStreamError()
   495  	}
   496  	// Check that it is not negative
   497  	if config.Replicas < 0 {
   498  		return NewJSReplicasCountCannotBeNegativeError()
   499  	}
   500  	// If the stream is interest or workqueue retention make sure the replicas
   501  	// match that of the stream. This is REQUIRED for now.
   502  	if cfg.Retention == InterestPolicy || cfg.Retention == WorkQueuePolicy {
   503  		// Only error here if not recovering.
   504  		// We handle recovering in a different spot to allow consumer to come up
   505  		// if previous version allowed it to be created. We do not want it to not come up.
   506  		if !isRecovering && config.Replicas != 0 && config.Replicas != cfg.Replicas {
   507  			return NewJSConsumerReplicasShouldMatchStreamError()
   508  		}
   509  	}
   510  
   511  	// Check if we have a BackOff defined that MaxDeliver is within range etc.
   512  	if lbo := len(config.BackOff); lbo > 0 && config.MaxDeliver <= lbo {
   513  		return NewJSConsumerMaxDeliverBackoffError()
   514  	}
   515  
   516  	if len(config.Description) > JSMaxDescriptionLen {
   517  		return NewJSConsumerDescriptionTooLongError(JSMaxDescriptionLen)
   518  	}
   519  
   520  	// For now expect a literal subject if its not empty. Empty means work queue mode (pull mode).
   521  	if config.DeliverSubject != _EMPTY_ {
   522  		if !subjectIsLiteral(config.DeliverSubject) {
   523  			return NewJSConsumerDeliverToWildcardsError()
   524  		}
   525  		if !IsValidSubject(config.DeliverSubject) {
   526  			return NewJSConsumerInvalidDeliverSubjectError()
   527  		}
   528  		if deliveryFormsCycle(cfg, config.DeliverSubject) {
   529  			return NewJSConsumerDeliverCycleError()
   530  		}
   531  		if config.MaxWaiting != 0 {
   532  			return NewJSConsumerPushMaxWaitingError()
   533  		}
   534  		if config.MaxAckPending > 0 && config.AckPolicy == AckNone {
   535  			return NewJSConsumerMaxPendingAckPolicyRequiredError()
   536  		}
   537  		if config.Heartbeat > 0 && config.Heartbeat < 100*time.Millisecond {
   538  			return NewJSConsumerSmallHeartbeatError()
   539  		}
   540  	} else {
   541  		// Pull mode with work queue retention from the stream requires an explicit ack.
   542  		if config.AckPolicy == AckNone && cfg.Retention == WorkQueuePolicy {
   543  			return NewJSConsumerPullRequiresAckError()
   544  		}
   545  		if config.RateLimit > 0 {
   546  			return NewJSConsumerPullWithRateLimitError()
   547  		}
   548  		if config.MaxWaiting < 0 {
   549  			return NewJSConsumerMaxWaitingNegativeError()
   550  		}
   551  		if config.Heartbeat > 0 {
   552  			return NewJSConsumerHBRequiresPushError()
   553  		}
   554  		if config.FlowControl {
   555  			return NewJSConsumerFCRequiresPushError()
   556  		}
   557  		if config.MaxRequestBatch < 0 {
   558  			return NewJSConsumerMaxRequestBatchNegativeError()
   559  		}
   560  		if config.MaxRequestExpires != 0 && config.MaxRequestExpires < time.Millisecond {
   561  			return NewJSConsumerMaxRequestExpiresToSmallError()
   562  		}
   563  		if srvLim.MaxRequestBatch > 0 && config.MaxRequestBatch > srvLim.MaxRequestBatch {
   564  			return NewJSConsumerMaxRequestBatchExceededError(srvLim.MaxRequestBatch)
   565  		}
   566  	}
   567  	if srvLim.MaxAckPending > 0 && config.MaxAckPending > srvLim.MaxAckPending {
   568  		return NewJSConsumerMaxPendingAckExcessError(srvLim.MaxAckPending)
   569  	}
   570  	if accLim.MaxAckPending > 0 && config.MaxAckPending > accLim.MaxAckPending {
   571  		return NewJSConsumerMaxPendingAckExcessError(accLim.MaxAckPending)
   572  	}
   573  	if cfg.ConsumerLimits.MaxAckPending > 0 && config.MaxAckPending > cfg.ConsumerLimits.MaxAckPending {
   574  		return NewJSConsumerMaxPendingAckExcessError(cfg.ConsumerLimits.MaxAckPending)
   575  	}
   576  	if cfg.ConsumerLimits.InactiveThreshold > 0 && config.InactiveThreshold > cfg.ConsumerLimits.InactiveThreshold {
   577  		return NewJSConsumerInactiveThresholdExcessError(cfg.ConsumerLimits.InactiveThreshold)
   578  	}
   579  
   580  	// Direct need to be non-mapped ephemerals.
   581  	if config.Direct {
   582  		if config.DeliverSubject == _EMPTY_ {
   583  			return NewJSConsumerDirectRequiresPushError()
   584  		}
   585  		if isDurableConsumer(config) {
   586  			return NewJSConsumerDirectRequiresEphemeralError()
   587  		}
   588  	}
   589  
   590  	// Do not allow specifying both FilterSubject and FilterSubjects,
   591  	// as that's probably unintentional without any difference from passing
   592  	// all filters in FilterSubjects.
   593  	if config.FilterSubject != _EMPTY_ && len(config.FilterSubjects) > 0 {
   594  		return NewJSConsumerDuplicateFilterSubjectsError()
   595  	}
   596  
   597  	if config.FilterSubject != _EMPTY_ && !IsValidSubject(config.FilterSubject) {
   598  		return NewJSStreamInvalidConfigError(ErrBadSubject)
   599  	}
   600  
   601  	// We treat FilterSubjects: []string{""} as a misconfig, so we validate against it.
   602  	for _, filter := range config.FilterSubjects {
   603  		if filter == _EMPTY_ {
   604  			return NewJSConsumerEmptyFilterError()
   605  		}
   606  	}
   607  	subjectFilters := gatherSubjectFilters(config.FilterSubject, config.FilterSubjects)
   608  
   609  	// Check subject filters overlap.
   610  	for outer, subject := range subjectFilters {
   611  		if !IsValidSubject(subject) {
   612  			return NewJSStreamInvalidConfigError(ErrBadSubject)
   613  		}
   614  		for inner, ssubject := range subjectFilters {
   615  			if inner != outer && subjectIsSubsetMatch(subject, ssubject) {
   616  				return NewJSConsumerOverlappingSubjectFiltersError()
   617  			}
   618  		}
   619  	}
   620  
   621  	// Helper function to formulate similar errors.
   622  	badStart := func(dp, start string) error {
   623  		return fmt.Errorf("consumer delivery policy is deliver %s, but optional start %s is also set", dp, start)
   624  	}
   625  	notSet := func(dp, notSet string) error {
   626  		return fmt.Errorf("consumer delivery policy is deliver %s, but optional %s is not set", dp, notSet)
   627  	}
   628  
   629  	// Check on start position conflicts.
   630  	switch config.DeliverPolicy {
   631  	case DeliverAll:
   632  		if config.OptStartSeq > 0 {
   633  			return NewJSConsumerInvalidPolicyError(badStart("all", "sequence"))
   634  		}
   635  		if config.OptStartTime != nil {
   636  			return NewJSConsumerInvalidPolicyError(badStart("all", "time"))
   637  		}
   638  	case DeliverLast:
   639  		if config.OptStartSeq > 0 {
   640  			return NewJSConsumerInvalidPolicyError(badStart("last", "sequence"))
   641  		}
   642  		if config.OptStartTime != nil {
   643  			return NewJSConsumerInvalidPolicyError(badStart("last", "time"))
   644  		}
   645  	case DeliverLastPerSubject:
   646  		if config.OptStartSeq > 0 {
   647  			return NewJSConsumerInvalidPolicyError(badStart("last per subject", "sequence"))
   648  		}
   649  		if config.OptStartTime != nil {
   650  			return NewJSConsumerInvalidPolicyError(badStart("last per subject", "time"))
   651  		}
   652  		if config.FilterSubject == _EMPTY_ && len(config.FilterSubjects) == 0 {
   653  			return NewJSConsumerInvalidPolicyError(notSet("last per subject", "filter subject"))
   654  		}
   655  	case DeliverNew:
   656  		if config.OptStartSeq > 0 {
   657  			return NewJSConsumerInvalidPolicyError(badStart("new", "sequence"))
   658  		}
   659  		if config.OptStartTime != nil {
   660  			return NewJSConsumerInvalidPolicyError(badStart("new", "time"))
   661  		}
   662  	case DeliverByStartSequence:
   663  		if config.OptStartSeq == 0 {
   664  			return NewJSConsumerInvalidPolicyError(notSet("by start sequence", "start sequence"))
   665  		}
   666  		if config.OptStartTime != nil {
   667  			return NewJSConsumerInvalidPolicyError(badStart("by start sequence", "time"))
   668  		}
   669  	case DeliverByStartTime:
   670  		if config.OptStartTime == nil {
   671  			return NewJSConsumerInvalidPolicyError(notSet("by start time", "start time"))
   672  		}
   673  		if config.OptStartSeq != 0 {
   674  			return NewJSConsumerInvalidPolicyError(badStart("by start time", "start sequence"))
   675  		}
   676  	}
   677  
   678  	if config.SampleFrequency != _EMPTY_ {
   679  		s := strings.TrimSuffix(config.SampleFrequency, "%")
   680  		if sampleFreq, err := strconv.Atoi(s); err != nil || sampleFreq < 0 {
   681  			return NewJSConsumerInvalidSamplingError(err)
   682  		}
   683  	}
   684  
   685  	// We reject if flow control is set without heartbeats.
   686  	if config.FlowControl && config.Heartbeat == 0 {
   687  		return NewJSConsumerWithFlowControlNeedsHeartbeatsError()
   688  	}
   689  
   690  	if config.Durable != _EMPTY_ && config.Name != _EMPTY_ {
   691  		if config.Name != config.Durable {
   692  			return NewJSConsumerCreateDurableAndNameMismatchError()
   693  		}
   694  	}
   695  
   696  	var metadataLen int
   697  	for k, v := range config.Metadata {
   698  		metadataLen += len(k) + len(v)
   699  	}
   700  	if metadataLen > JSMaxMetadataLen {
   701  		return NewJSConsumerMetadataLengthError(fmt.Sprintf("%dKB", JSMaxMetadataLen/1024))
   702  	}
   703  
   704  	return nil
   705  }
   706  
   707  func (mset *stream) addConsumerWithAction(config *ConsumerConfig, action ConsumerAction) (*consumer, error) {
   708  	return mset.addConsumerWithAssignment(config, _EMPTY_, nil, false, action)
   709  }
   710  
   711  func (mset *stream) addConsumer(config *ConsumerConfig) (*consumer, error) {
   712  	return mset.addConsumerWithAction(config, ActionCreateOrUpdate)
   713  }
   714  
   715  func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname string, ca *consumerAssignment, isRecovering bool, action ConsumerAction) (*consumer, error) {
   716  	// Check if this stream has closed.
   717  	if mset.closed.Load() {
   718  		return nil, NewJSStreamInvalidError()
   719  	}
   720  
   721  	mset.mu.RLock()
   722  	s, jsa, tierName, cfg, acc := mset.srv, mset.jsa, mset.tier, mset.cfg, mset.acc
   723  	retention := cfg.Retention
   724  	mset.mu.RUnlock()
   725  
   726  	// If we do not have the consumer currently assigned to us in cluster mode we will proceed but warn.
   727  	// This can happen on startup with restored state where on meta replay we still do not have
   728  	// the assignment. Running in single server mode this always returns true.
   729  	if oname != _EMPTY_ && !jsa.consumerAssigned(mset.name(), oname) {
   730  		s.Debugf("Consumer %q > %q does not seem to be assigned to this server", mset.name(), oname)
   731  	}
   732  
   733  	if config == nil {
   734  		return nil, NewJSConsumerConfigRequiredError()
   735  	}
   736  
   737  	jsa.usageMu.RLock()
   738  	selectedLimits, limitsFound := jsa.limits[tierName]
   739  	jsa.usageMu.RUnlock()
   740  	if !limitsFound {
   741  		return nil, NewJSNoLimitsError()
   742  	}
   743  
   744  	srvLim := &s.getOpts().JetStreamLimits
   745  	// Make sure we have sane defaults. Do so with the JS lock, otherwise a
   746  	// badly timed meta snapshot can result in a race condition.
   747  	mset.js.mu.Lock()
   748  	setConsumerConfigDefaults(config, &mset.cfg, srvLim, &selectedLimits)
   749  	mset.js.mu.Unlock()
   750  
   751  	if err := checkConsumerCfg(config, srvLim, &cfg, acc, &selectedLimits, isRecovering); err != nil {
   752  		return nil, err
   753  	}
   754  
   755  	sampleFreq := 0
   756  	if config.SampleFrequency != _EMPTY_ {
   757  		// Can't fail as checkConsumerCfg checks correct format
   758  		sampleFreq, _ = strconv.Atoi(strings.TrimSuffix(config.SampleFrequency, "%"))
   759  	}
   760  
   761  	// Grab the client, account and server reference.
   762  	c := mset.client
   763  	if c == nil {
   764  		return nil, NewJSStreamInvalidError()
   765  	}
   766  	var accName string
   767  	c.mu.Lock()
   768  	s, a := c.srv, c.acc
   769  	if a != nil {
   770  		accName = a.Name
   771  	}
   772  	c.mu.Unlock()
   773  
   774  	// Hold mset lock here.
   775  	mset.mu.Lock()
   776  	if mset.client == nil || mset.store == nil || mset.consumers == nil {
   777  		mset.mu.Unlock()
   778  		return nil, NewJSStreamInvalidError()
   779  	}
   780  
   781  	// If this one is durable and already exists, we let that be ok as long as only updating what should be allowed.
   782  	var cName string
   783  	if isDurableConsumer(config) {
   784  		cName = config.Durable
   785  	} else if config.Name != _EMPTY_ {
   786  		cName = config.Name
   787  	}
   788  	if cName != _EMPTY_ {
   789  		if eo, ok := mset.consumers[cName]; ok {
   790  			mset.mu.Unlock()
   791  			if action == ActionCreate && !reflect.DeepEqual(*config, eo.config()) {
   792  				return nil, NewJSConsumerAlreadyExistsError()
   793  			}
   794  			// Check for overlapping subjects.
   795  			if mset.cfg.Retention == WorkQueuePolicy {
   796  				subjects := gatherSubjectFilters(config.FilterSubject, config.FilterSubjects)
   797  				if !mset.partitionUnique(cName, subjects) {
   798  					return nil, NewJSConsumerWQConsumerNotUniqueError()
   799  				}
   800  			}
   801  			err := eo.updateConfig(config)
   802  			if err == nil {
   803  				return eo, nil
   804  			}
   805  			return nil, NewJSConsumerCreateError(err, Unless(err))
   806  		}
   807  	}
   808  	if action == ActionUpdate {
   809  		mset.mu.Unlock()
   810  		return nil, NewJSConsumerDoesNotExistError()
   811  	}
   812  
   813  	// Check for any limits, if the config for the consumer sets a limit we check against that
   814  	// but if not we use the value from account limits, if account limits is more restrictive
   815  	// than stream config we prefer the account limits to handle cases where account limits are
   816  	// updated during the lifecycle of the stream
   817  	maxc := mset.cfg.MaxConsumers
   818  	if maxc <= 0 || (selectedLimits.MaxConsumers > 0 && selectedLimits.MaxConsumers < maxc) {
   819  		maxc = selectedLimits.MaxConsumers
   820  	}
   821  	if maxc > 0 && mset.numPublicConsumers() >= maxc {
   822  		mset.mu.Unlock()
   823  		return nil, NewJSMaximumConsumersLimitError()
   824  	}
   825  
   826  	// Check on stream type conflicts with WorkQueues.
   827  	if mset.cfg.Retention == WorkQueuePolicy && !config.Direct {
   828  		// Force explicit acks here.
   829  		if config.AckPolicy != AckExplicit {
   830  			mset.mu.Unlock()
   831  			return nil, NewJSConsumerWQRequiresExplicitAckError()
   832  		}
   833  
   834  		if len(mset.consumers) > 0 {
   835  			subjects := gatherSubjectFilters(config.FilterSubject, config.FilterSubjects)
   836  			if len(subjects) == 0 {
   837  				mset.mu.Unlock()
   838  				return nil, NewJSConsumerWQMultipleUnfilteredError()
   839  			} else if !mset.partitionUnique(cName, subjects) {
   840  				// Prior to v2.9.7, on a stream with WorkQueue policy, the servers
   841  				// were not catching the error of having multiple consumers with
   842  				// overlapping filter subjects depending on the scope, for instance
   843  				// creating "foo.*.bar" and then "foo.>" was not detected, while
   844  				// "foo.>" and then "foo.*.bar" would have been. Failing here
   845  				// in recovery mode would leave the rejected consumer in a bad state,
   846  				// so we will simply warn here, asking the user to remove this
   847  				// consumer administratively. Otherwise, if this is the creation
   848  				// of a new consumer, we will return the error.
   849  				if isRecovering {
   850  					s.Warnf("Consumer %q > %q has a filter subject that overlaps "+
   851  						"with other consumers, which is not allowed for a stream "+
   852  						"with WorkQueue policy, it should be administratively deleted",
   853  						cfg.Name, cName)
   854  				} else {
   855  					// We have a partition but it is not unique amongst the others.
   856  					mset.mu.Unlock()
   857  					return nil, NewJSConsumerWQConsumerNotUniqueError()
   858  				}
   859  			}
   860  		}
   861  		if config.DeliverPolicy != DeliverAll {
   862  			mset.mu.Unlock()
   863  			return nil, NewJSConsumerWQConsumerNotDeliverAllError()
   864  		}
   865  	}
   866  
   867  	// Set name, which will be durable name if set, otherwise we create one at random.
   868  	o := &consumer{
   869  		mset:      mset,
   870  		js:        s.getJetStream(),
   871  		acc:       a,
   872  		srv:       s,
   873  		client:    s.createInternalJetStreamClient(),
   874  		sysc:      s.createInternalJetStreamClient(),
   875  		cfg:       *config,
   876  		dsubj:     config.DeliverSubject,
   877  		outq:      mset.outq,
   878  		active:    true,
   879  		qch:       make(chan struct{}),
   880  		uch:       make(chan struct{}, 1),
   881  		mch:       make(chan struct{}, 1),
   882  		sfreq:     int32(sampleFreq),
   883  		maxdc:     uint64(config.MaxDeliver),
   884  		maxp:      config.MaxAckPending,
   885  		retention: retention,
   886  		created:   time.Now().UTC(),
   887  	}
   888  
   889  	// Bind internal client to the user account.
   890  	o.client.registerWithAccount(a)
   891  	// Bind to the system account.
   892  	o.sysc.registerWithAccount(s.SystemAccount())
   893  
   894  	if isDurableConsumer(config) {
   895  		if len(config.Durable) > JSMaxNameLen {
   896  			mset.mu.Unlock()
   897  			o.deleteWithoutAdvisory()
   898  			return nil, NewJSConsumerNameTooLongError(JSMaxNameLen)
   899  		}
   900  		o.name = config.Durable
   901  	} else if oname != _EMPTY_ {
   902  		o.name = oname
   903  	} else {
   904  		if config.Name != _EMPTY_ {
   905  			o.name = config.Name
   906  		} else {
   907  			// Legacy ephemeral auto-generated.
   908  			for {
   909  				o.name = createConsumerName()
   910  				if _, ok := mset.consumers[o.name]; !ok {
   911  					break
   912  				}
   913  			}
   914  			config.Name = o.name
   915  		}
   916  	}
   917  	// Create ackMsgs queue now that we have a consumer name
   918  	o.ackMsgs = newIPQueue[*jsAckMsg](s, fmt.Sprintf("[ACC:%s] consumer '%s' on stream '%s' ackMsgs", accName, o.name, mset.cfg.Name))
   919  
   920  	// Create our request waiting queue.
   921  	if o.isPullMode() {
   922  		o.waiting = newWaitQueue(config.MaxWaiting)
   923  		// Create our internal queue for next msg requests.
   924  		o.nextMsgReqs = newIPQueue[*nextMsgReq](s, fmt.Sprintf("[ACC:%s] consumer '%s' on stream '%s' pull requests", accName, o.name, mset.cfg.Name))
   925  	}
   926  
   927  	// already under lock, mset.Name() would deadlock
   928  	o.stream = mset.cfg.Name
   929  	o.ackEventT = JSMetricConsumerAckPre + "." + o.stream + "." + o.name
   930  	o.nakEventT = JSAdvisoryConsumerMsgNakPre + "." + o.stream + "." + o.name
   931  	o.deliveryExcEventT = JSAdvisoryConsumerMaxDeliveryExceedPre + "." + o.stream + "." + o.name
   932  
   933  	if !isValidName(o.name) {
   934  		mset.mu.Unlock()
   935  		o.deleteWithoutAdvisory()
   936  		return nil, NewJSConsumerBadDurableNameError()
   937  	}
   938  
   939  	// Setup our storage if not a direct consumer.
   940  	if !config.Direct {
   941  		store, err := mset.store.ConsumerStore(o.name, config)
   942  		if err != nil {
   943  			mset.mu.Unlock()
   944  			o.deleteWithoutAdvisory()
   945  			return nil, NewJSConsumerStoreFailedError(err)
   946  		}
   947  		o.store = store
   948  	}
   949  
   950  	subjects := gatherSubjectFilters(o.cfg.FilterSubject, o.cfg.FilterSubjects)
   951  	for _, filter := range subjects {
   952  		sub := &subjectFilter{
   953  			subject:          filter,
   954  			hasWildcard:      subjectHasWildcard(filter),
   955  			tokenizedSubject: tokenizeSubjectIntoSlice(nil, filter),
   956  		}
   957  		o.subjf = append(o.subjf, sub)
   958  	}
   959  
   960  	if o.store != nil && o.store.HasState() {
   961  		// Restore our saved state.
   962  		o.mu.Lock()
   963  		o.readStoredState(0)
   964  		o.mu.Unlock()
   965  	} else {
   966  		// Select starting sequence number
   967  		o.selectStartingSeqNo()
   968  	}
   969  
   970  	// Now register with mset and create the ack subscription.
   971  	// Check if we already have this one registered.
   972  	if eo, ok := mset.consumers[o.name]; ok {
   973  		mset.mu.Unlock()
   974  		if !o.isDurable() || !o.isPushMode() {
   975  			o.name = _EMPTY_ // Prevent removal since same name.
   976  			o.deleteWithoutAdvisory()
   977  			return nil, NewJSConsumerNameExistError()
   978  		}
   979  		// If we are here we have already registered this durable. If it is still active that is an error.
   980  		if eo.isActive() {
   981  			o.name = _EMPTY_ // Prevent removal since same name.
   982  			o.deleteWithoutAdvisory()
   983  			return nil, NewJSConsumerExistingActiveError()
   984  		}
   985  		// Since we are here this means we have a potentially new durable so we should update here.
   986  		// Check that configs are the same.
   987  		if !configsEqualSansDelivery(o.cfg, eo.cfg) {
   988  			o.name = _EMPTY_ // Prevent removal since same name.
   989  			o.deleteWithoutAdvisory()
   990  			return nil, NewJSConsumerReplacementWithDifferentNameError()
   991  		}
   992  		// Once we are here we have a replacement push-based durable.
   993  		eo.updateDeliverSubject(o.cfg.DeliverSubject)
   994  		return eo, nil
   995  	}
   996  
   997  	// Set up the ack subscription for this consumer. Will use wildcard for all acks.
   998  	// We will remember the template to generate replies with sequence numbers and use
   999  	// that to scanf them back in.
  1000  	// Escape '%' in consumer and stream names, as `pre` is used as a template later
  1001  	// in consumer.ackReply(), resulting in erroneous formatting of the ack subject.
  1002  	mn := strings.ReplaceAll(mset.cfg.Name, "%", "%%")
  1003  	pre := fmt.Sprintf(jsAckT, mn, strings.ReplaceAll(o.name, "%", "%%"))
  1004  	o.ackReplyT = fmt.Sprintf("%s.%%d.%%d.%%d.%%d.%%d", pre)
  1005  	o.ackSubj = fmt.Sprintf("%s.*.*.*.*.*", pre)
  1006  	o.nextMsgSubj = fmt.Sprintf(JSApiRequestNextT, mn, o.name)
  1007  
  1008  	// Check/update the inactive threshold
  1009  	o.updateInactiveThreshold(&o.cfg)
  1010  
  1011  	if o.isPushMode() {
  1012  		// Check if we are running only 1 replica and that the delivery subject has interest.
  1013  		// Check in place here for interest. Will setup properly in setLeader.
  1014  		if config.replicas(&mset.cfg) == 1 {
  1015  			r := o.acc.sl.Match(o.cfg.DeliverSubject)
  1016  			if !o.hasDeliveryInterest(len(r.psubs)+len(r.qsubs) > 0) {
  1017  				// Let the interest come to us eventually, but setup delete timer.
  1018  				o.updateDeliveryInterest(false)
  1019  			}
  1020  		}
  1021  	}
  1022  
  1023  	// Set our ca.
  1024  	if ca != nil {
  1025  		o.setConsumerAssignment(ca)
  1026  	}
  1027  
  1028  	// Check if we have a rate limit set.
  1029  	if config.RateLimit != 0 {
  1030  		o.setRateLimit(config.RateLimit)
  1031  	}
  1032  
  1033  	mset.setConsumer(o)
  1034  	mset.mu.Unlock()
  1035  
  1036  	if config.Direct || (!s.JetStreamIsClustered() && s.standAloneMode()) {
  1037  		o.setLeader(true)
  1038  	}
  1039  
  1040  	// This is always true in single server mode.
  1041  	if o.IsLeader() {
  1042  		// Send advisory.
  1043  		var suppress bool
  1044  		if !s.standAloneMode() && ca == nil {
  1045  			suppress = true
  1046  		} else if ca != nil {
  1047  			suppress = ca.responded
  1048  		}
  1049  		if !suppress {
  1050  			o.sendCreateAdvisory()
  1051  		}
  1052  	}
  1053  
  1054  	return o, nil
  1055  }
  1056  
  1057  // Updates the consumer `dthresh` delete timer duration and set
  1058  // cfg.InactiveThreshold to JsDeleteWaitTimeDefault for ephemerals
  1059  // if not explicitly already specified by the user.
  1060  // Lock should be held.
  1061  func (o *consumer) updateInactiveThreshold(cfg *ConsumerConfig) {
  1062  	// Ephemerals will always have inactive thresholds.
  1063  	if !o.isDurable() && cfg.InactiveThreshold <= 0 {
  1064  		// Add in 1 sec of jitter above and beyond the default of 5s.
  1065  		o.dthresh = JsDeleteWaitTimeDefault + 100*time.Millisecond + time.Duration(rand.Int63n(900))*time.Millisecond
  1066  		// Only stamp config with default sans jitter.
  1067  		cfg.InactiveThreshold = JsDeleteWaitTimeDefault
  1068  	} else if cfg.InactiveThreshold > 0 {
  1069  		// Add in up to 1 sec of jitter if pull mode.
  1070  		if o.isPullMode() {
  1071  			o.dthresh = cfg.InactiveThreshold + 100*time.Millisecond + time.Duration(rand.Int63n(900))*time.Millisecond
  1072  		} else {
  1073  			o.dthresh = cfg.InactiveThreshold
  1074  		}
  1075  	} else if cfg.InactiveThreshold <= 0 {
  1076  		// We accept InactiveThreshold be set to 0 (for durables)
  1077  		o.dthresh = 0
  1078  	}
  1079  }
  1080  
  1081  // Updates the paused state. If we are the leader and the pause deadline
  1082  // hasn't passed yet then we will start a timer to kick the consumer once
  1083  // that deadline is reached. Lock should be held.
  1084  func (o *consumer) updatePauseState(cfg *ConsumerConfig) {
  1085  	if o.uptmr != nil {
  1086  		stopAndClearTimer(&o.uptmr)
  1087  	}
  1088  	if !o.isLeader() {
  1089  		// Only the leader will run the timer as only the leader will run
  1090  		// loopAndGatherMsgs.
  1091  		return
  1092  	}
  1093  	if cfg.PauseUntil == nil || cfg.PauseUntil.IsZero() || cfg.PauseUntil.Before(time.Now()) {
  1094  		// Either the PauseUntil is unset (is effectively zero) or the
  1095  		// deadline has already passed, in which case there is nothing
  1096  		// to do.
  1097  		return
  1098  	}
  1099  	o.uptmr = time.AfterFunc(time.Until(*cfg.PauseUntil), func() {
  1100  		o.mu.Lock()
  1101  		defer o.mu.Unlock()
  1102  
  1103  		stopAndClearTimer(&o.uptmr)
  1104  		o.sendPauseAdvisoryLocked(&o.cfg)
  1105  		o.signalNewMessages()
  1106  	})
  1107  }
  1108  
  1109  func (o *consumer) consumerAssignment() *consumerAssignment {
  1110  	o.mu.RLock()
  1111  	defer o.mu.RUnlock()
  1112  	return o.ca
  1113  }
  1114  
  1115  func (o *consumer) setConsumerAssignment(ca *consumerAssignment) {
  1116  	o.mu.Lock()
  1117  	defer o.mu.Unlock()
  1118  
  1119  	o.ca = ca
  1120  	if ca == nil {
  1121  		return
  1122  	}
  1123  	// Set our node.
  1124  	o.node = ca.Group.node
  1125  
  1126  	// Trigger update chan.
  1127  	select {
  1128  	case o.uch <- struct{}{}:
  1129  	default:
  1130  	}
  1131  }
  1132  
  1133  func (o *consumer) updateC() <-chan struct{} {
  1134  	o.mu.RLock()
  1135  	defer o.mu.RUnlock()
  1136  	return o.uch
  1137  }
  1138  
  1139  // checkQueueInterest will check on our interest's queue group status.
  1140  // Lock should be held.
  1141  func (o *consumer) checkQueueInterest() {
  1142  	if !o.active || o.cfg.DeliverSubject == _EMPTY_ {
  1143  		return
  1144  	}
  1145  	subj := o.dsubj
  1146  	if subj == _EMPTY_ {
  1147  		subj = o.cfg.DeliverSubject
  1148  	}
  1149  
  1150  	if rr := o.acc.sl.Match(subj); len(rr.qsubs) > 0 {
  1151  		// Just grab first
  1152  		if qsubs := rr.qsubs[0]; len(qsubs) > 0 {
  1153  			if sub := rr.qsubs[0][0]; len(sub.queue) > 0 {
  1154  				o.qgroup = string(sub.queue)
  1155  			}
  1156  		}
  1157  	}
  1158  }
  1159  
  1160  // clears our node if we have one. When we scale down to 1.
  1161  func (o *consumer) clearNode() {
  1162  	o.mu.Lock()
  1163  	defer o.mu.Unlock()
  1164  	if o.node != nil {
  1165  		o.node.Delete()
  1166  		o.node = nil
  1167  	}
  1168  }
  1169  
  1170  // IsLeader will return if we are the current leader.
  1171  func (o *consumer) IsLeader() bool {
  1172  	o.mu.RLock()
  1173  	defer o.mu.RUnlock()
  1174  	return o.isLeader()
  1175  }
  1176  
  1177  // Lock should be held.
  1178  func (o *consumer) isLeader() bool {
  1179  	if o.node != nil {
  1180  		return o.node.Leader()
  1181  	}
  1182  	return true
  1183  }
  1184  
  1185  func (o *consumer) setLeader(isLeader bool) {
  1186  	o.mu.RLock()
  1187  	mset, closed := o.mset, o.closed
  1188  	movingToClustered := o.node != nil && o.pch == nil
  1189  	wasLeader := o.leader.Swap(isLeader)
  1190  	o.mu.RUnlock()
  1191  
  1192  	// If we are here we have a change in leader status.
  1193  	if isLeader {
  1194  		if closed || mset == nil {
  1195  			return
  1196  		}
  1197  
  1198  		if wasLeader {
  1199  			// If we detect we are scaling up, make sure to create clustered routines and channels.
  1200  			if movingToClustered {
  1201  				o.mu.Lock()
  1202  				// We are moving from R1 to clustered.
  1203  				o.pch = make(chan struct{}, 1)
  1204  				go o.loopAndForwardProposals(o.qch)
  1205  				if o.phead != nil {
  1206  					select {
  1207  					case o.pch <- struct{}{}:
  1208  					default:
  1209  					}
  1210  				}
  1211  				o.mu.Unlock()
  1212  			}
  1213  			return
  1214  		}
  1215  
  1216  		mset.mu.RLock()
  1217  		s, jsa, stream, lseq := mset.srv, mset.jsa, mset.cfg.Name, mset.lseq
  1218  		mset.mu.RUnlock()
  1219  
  1220  		// Register as a leader with our parent stream.
  1221  		mset.setConsumerAsLeader(o)
  1222  
  1223  		o.mu.Lock()
  1224  		o.rdq = nil
  1225  		o.rdqi.Empty()
  1226  
  1227  		// Restore our saved state. During non-leader status we just update our underlying store.
  1228  		o.readStoredState(lseq)
  1229  
  1230  		// Setup initial num pending.
  1231  		o.streamNumPending()
  1232  
  1233  		// Cleanup lss when we take over in clustered mode.
  1234  		if o.hasSkipListPending() && o.sseq >= o.lss.resume {
  1235  			o.lss = nil
  1236  		}
  1237  
  1238  		// Update the group on the our starting sequence if we are starting but we skipped some in the stream.
  1239  		if o.dseq == 1 && o.sseq > 1 {
  1240  			o.updateSkipped(o.sseq)
  1241  		}
  1242  
  1243  		// Do info sub.
  1244  		if o.infoSub == nil && jsa != nil {
  1245  			isubj := fmt.Sprintf(clusterConsumerInfoT, jsa.acc(), stream, o.name)
  1246  			// Note below the way we subscribe here is so that we can send requests to ourselves.
  1247  			o.infoSub, _ = s.systemSubscribe(isubj, _EMPTY_, false, o.sysc, o.handleClusterConsumerInfoRequest)
  1248  		}
  1249  
  1250  		var err error
  1251  		if o.cfg.AckPolicy != AckNone {
  1252  			if o.ackSub, err = o.subscribeInternal(o.ackSubj, o.pushAck); err != nil {
  1253  				o.mu.Unlock()
  1254  				o.deleteWithoutAdvisory()
  1255  				return
  1256  			}
  1257  		}
  1258  
  1259  		// Setup the internal sub for next message requests regardless.
  1260  		// Will error if wrong mode to provide feedback to users.
  1261  		if o.reqSub, err = o.subscribeInternal(o.nextMsgSubj, o.processNextMsgReq); err != nil {
  1262  			o.mu.Unlock()
  1263  			o.deleteWithoutAdvisory()
  1264  			return
  1265  		}
  1266  
  1267  		// Check on flow control settings.
  1268  		if o.cfg.FlowControl {
  1269  			o.setMaxPendingBytes(JsFlowControlMaxPending)
  1270  			fcsubj := fmt.Sprintf(jsFlowControl, stream, o.name)
  1271  			if o.fcSub, err = o.subscribeInternal(fcsubj, o.processFlowControl); err != nil {
  1272  				o.mu.Unlock()
  1273  				o.deleteWithoutAdvisory()
  1274  				return
  1275  			}
  1276  		}
  1277  
  1278  		// If push mode, register for notifications on interest.
  1279  		if o.isPushMode() {
  1280  			o.inch = make(chan bool, 8)
  1281  			o.acc.sl.registerNotification(o.cfg.DeliverSubject, o.cfg.DeliverGroup, o.inch)
  1282  			if o.active = <-o.inch; o.active {
  1283  				o.checkQueueInterest()
  1284  			}
  1285  
  1286  			// Check gateways in case they are enabled.
  1287  			if s.gateway.enabled {
  1288  				if !o.active {
  1289  					o.active = s.hasGatewayInterest(o.acc.Name, o.cfg.DeliverSubject)
  1290  				}
  1291  				stopAndClearTimer(&o.gwdtmr)
  1292  				o.gwdtmr = time.AfterFunc(time.Second, func() { o.watchGWinterest() })
  1293  			}
  1294  		}
  1295  
  1296  		if o.dthresh > 0 && (o.isPullMode() || !o.active) {
  1297  			// Pull consumer. We run the dtmr all the time for this one.
  1298  			stopAndClearTimer(&o.dtmr)
  1299  			o.dtmr = time.AfterFunc(o.dthresh, o.deleteNotActive)
  1300  		}
  1301  
  1302  		// Update the consumer pause tracking.
  1303  		o.updatePauseState(&o.cfg)
  1304  
  1305  		// If we are not in ReplayInstant mode mark us as in replay state until resolved.
  1306  		if o.cfg.ReplayPolicy != ReplayInstant {
  1307  			o.replay = true
  1308  		}
  1309  
  1310  		// Recreate quit channel.
  1311  		o.qch = make(chan struct{})
  1312  		qch := o.qch
  1313  		node := o.node
  1314  		if node != nil && o.pch == nil {
  1315  			o.pch = make(chan struct{}, 1)
  1316  		}
  1317  		pullMode := o.isPullMode()
  1318  		o.mu.Unlock()
  1319  
  1320  		// Snapshot initial info.
  1321  		o.infoWithSnap(true)
  1322  
  1323  		// These are the labels we will use to annotate our goroutines.
  1324  		labels := pprofLabels{
  1325  			"type":     "consumer",
  1326  			"account":  mset.accName(),
  1327  			"stream":   mset.name(),
  1328  			"consumer": o.name,
  1329  		}
  1330  
  1331  		// Now start up Go routine to deliver msgs.
  1332  		go func() {
  1333  			setGoRoutineLabels(labels)
  1334  			o.loopAndGatherMsgs(qch)
  1335  		}()
  1336  
  1337  		// Now start up Go routine to process acks.
  1338  		go func() {
  1339  			setGoRoutineLabels(labels)
  1340  			o.processInboundAcks(qch)
  1341  		}()
  1342  
  1343  		if pullMode {
  1344  			// Now start up Go routine to process inbound next message requests.
  1345  			go func() {
  1346  				setGoRoutineLabels(labels)
  1347  				o.processInboundNextMsgReqs(qch)
  1348  			}()
  1349  		}
  1350  
  1351  		// If we are R>1 spin up our proposal loop.
  1352  		if node != nil {
  1353  			// Determine if we can send pending requests info to the group.
  1354  			// They must be on server versions >= 2.7.1
  1355  			o.checkAndSetPendingRequestsOk()
  1356  			o.checkPendingRequests()
  1357  			go func() {
  1358  				setGoRoutineLabels(labels)
  1359  				o.loopAndForwardProposals(qch)
  1360  			}()
  1361  		}
  1362  
  1363  	} else {
  1364  		// Shutdown the go routines and the subscriptions.
  1365  		o.mu.Lock()
  1366  		if o.qch != nil {
  1367  			close(o.qch)
  1368  			o.qch = nil
  1369  		}
  1370  		// Stop any inactivity timers. Should only be running on leaders.
  1371  		stopAndClearTimer(&o.dtmr)
  1372  		// Stop any unpause timers. Should only be running on leaders.
  1373  		stopAndClearTimer(&o.uptmr)
  1374  		// Make sure to clear out any re-deliver queues
  1375  		stopAndClearTimer(&o.ptmr)
  1376  		o.rdq = nil
  1377  		o.rdqi.Empty()
  1378  		o.pending = nil
  1379  		// ok if they are nil, we protect inside unsubscribe()
  1380  		o.unsubscribe(o.ackSub)
  1381  		o.unsubscribe(o.reqSub)
  1382  		o.unsubscribe(o.fcSub)
  1383  		o.ackSub, o.reqSub, o.fcSub = nil, nil, nil
  1384  		if o.infoSub != nil {
  1385  			o.srv.sysUnsubscribe(o.infoSub)
  1386  			o.infoSub = nil
  1387  		}
  1388  		// Reset waiting if we are in pull mode.
  1389  		if o.isPullMode() {
  1390  			o.waiting = newWaitQueue(o.cfg.MaxWaiting)
  1391  			o.nextMsgReqs.drain()
  1392  		} else if o.srv.gateway.enabled {
  1393  			stopAndClearTimer(&o.gwdtmr)
  1394  		}
  1395  		o.mu.Unlock()
  1396  
  1397  		// Unregister as a leader with our parent stream.
  1398  		if mset != nil {
  1399  			mset.removeConsumerAsLeader(o)
  1400  		}
  1401  	}
  1402  }
  1403  
  1404  // This is coming on the wire so do not block here.
  1405  func (o *consumer) handleClusterConsumerInfoRequest(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) {
  1406  	go o.infoWithSnapAndReply(false, reply)
  1407  }
  1408  
  1409  // Lock should be held.
  1410  func (o *consumer) subscribeInternal(subject string, cb msgHandler) (*subscription, error) {
  1411  	c := o.client
  1412  	if c == nil {
  1413  		return nil, fmt.Errorf("invalid consumer")
  1414  	}
  1415  	if !c.srv.EventsEnabled() {
  1416  		return nil, ErrNoSysAccount
  1417  	}
  1418  	if cb == nil {
  1419  		return nil, fmt.Errorf("undefined message handler")
  1420  	}
  1421  
  1422  	o.sid++
  1423  
  1424  	// Now create the subscription
  1425  	return c.processSub([]byte(subject), nil, []byte(strconv.Itoa(o.sid)), cb, false)
  1426  }
  1427  
  1428  // Unsubscribe from our subscription.
  1429  // Lock should be held.
  1430  func (o *consumer) unsubscribe(sub *subscription) {
  1431  	if sub == nil || o.client == nil {
  1432  		return
  1433  	}
  1434  	o.client.processUnsub(sub.sid)
  1435  }
  1436  
  1437  // We need to make sure we protect access to the outq.
  1438  // Do all advisory sends here.
  1439  func (o *consumer) sendAdvisory(subj string, msg []byte) {
  1440  	o.outq.sendMsg(subj, msg)
  1441  }
  1442  
  1443  func (o *consumer) sendDeleteAdvisoryLocked() {
  1444  	e := JSConsumerActionAdvisory{
  1445  		TypedEvent: TypedEvent{
  1446  			Type: JSConsumerActionAdvisoryType,
  1447  			ID:   nuid.Next(),
  1448  			Time: time.Now().UTC(),
  1449  		},
  1450  		Stream:   o.stream,
  1451  		Consumer: o.name,
  1452  		Action:   DeleteEvent,
  1453  		Domain:   o.srv.getOpts().JetStreamDomain,
  1454  	}
  1455  
  1456  	j, err := json.Marshal(e)
  1457  	if err != nil {
  1458  		return
  1459  	}
  1460  
  1461  	subj := JSAdvisoryConsumerDeletedPre + "." + o.stream + "." + o.name
  1462  	o.sendAdvisory(subj, j)
  1463  }
  1464  
  1465  func (o *consumer) sendCreateAdvisory() {
  1466  	o.mu.Lock()
  1467  	defer o.mu.Unlock()
  1468  
  1469  	e := JSConsumerActionAdvisory{
  1470  		TypedEvent: TypedEvent{
  1471  			Type: JSConsumerActionAdvisoryType,
  1472  			ID:   nuid.Next(),
  1473  			Time: time.Now().UTC(),
  1474  		},
  1475  		Stream:   o.stream,
  1476  		Consumer: o.name,
  1477  		Action:   CreateEvent,
  1478  		Domain:   o.srv.getOpts().JetStreamDomain,
  1479  	}
  1480  
  1481  	j, err := json.Marshal(e)
  1482  	if err != nil {
  1483  		return
  1484  	}
  1485  
  1486  	subj := JSAdvisoryConsumerCreatedPre + "." + o.stream + "." + o.name
  1487  	o.sendAdvisory(subj, j)
  1488  }
  1489  
  1490  func (o *consumer) sendPauseAdvisoryLocked(cfg *ConsumerConfig) {
  1491  	e := JSConsumerPauseAdvisory{
  1492  		TypedEvent: TypedEvent{
  1493  			Type: JSConsumerPauseAdvisoryType,
  1494  			ID:   nuid.Next(),
  1495  			Time: time.Now().UTC(),
  1496  		},
  1497  		Stream:   o.stream,
  1498  		Consumer: o.name,
  1499  		Domain:   o.srv.getOpts().JetStreamDomain,
  1500  	}
  1501  
  1502  	if cfg.PauseUntil != nil {
  1503  		e.PauseUntil = *cfg.PauseUntil
  1504  		e.Paused = time.Now().Before(e.PauseUntil)
  1505  	}
  1506  
  1507  	j, err := json.Marshal(e)
  1508  	if err != nil {
  1509  		return
  1510  	}
  1511  
  1512  	subj := JSAdvisoryConsumerPausePre + "." + o.stream + "." + o.name
  1513  	o.sendAdvisory(subj, j)
  1514  }
  1515  
  1516  // Created returns created time.
  1517  func (o *consumer) createdTime() time.Time {
  1518  	o.mu.Lock()
  1519  	created := o.created
  1520  	o.mu.Unlock()
  1521  	return created
  1522  }
  1523  
  1524  // Internal to allow creation time to be restored.
  1525  func (o *consumer) setCreatedTime(created time.Time) {
  1526  	o.mu.Lock()
  1527  	o.created = created
  1528  	o.mu.Unlock()
  1529  }
  1530  
  1531  // This will check for extended interest in a subject. If we have local interest we just return
  1532  // that, but in the absence of local interest and presence of gateways or service imports we need
  1533  // to check those as well.
  1534  func (o *consumer) hasDeliveryInterest(localInterest bool) bool {
  1535  	o.mu.Lock()
  1536  	mset := o.mset
  1537  	if mset == nil {
  1538  		o.mu.Unlock()
  1539  		return false
  1540  	}
  1541  	acc := o.acc
  1542  	deliver := o.cfg.DeliverSubject
  1543  	o.mu.Unlock()
  1544  
  1545  	if localInterest {
  1546  		return true
  1547  	}
  1548  
  1549  	// If we are here check gateways.
  1550  	if s := acc.srv; s != nil && s.hasGatewayInterest(acc.Name, deliver) {
  1551  		return true
  1552  	}
  1553  	return false
  1554  }
  1555  
  1556  func (s *Server) hasGatewayInterest(account, subject string) bool {
  1557  	gw := s.gateway
  1558  	if !gw.enabled {
  1559  		return false
  1560  	}
  1561  	gw.RLock()
  1562  	defer gw.RUnlock()
  1563  	for _, gwc := range gw.outo {
  1564  		psi, qr := gwc.gatewayInterest(account, subject)
  1565  		if psi || qr != nil {
  1566  			return true
  1567  		}
  1568  	}
  1569  	return false
  1570  }
  1571  
  1572  // This processes an update to the local interest for a deliver subject.
  1573  func (o *consumer) updateDeliveryInterest(localInterest bool) bool {
  1574  	interest := o.hasDeliveryInterest(localInterest)
  1575  
  1576  	o.mu.Lock()
  1577  	defer o.mu.Unlock()
  1578  
  1579  	mset := o.mset
  1580  	if mset == nil || o.isPullMode() {
  1581  		return false
  1582  	}
  1583  
  1584  	if interest && !o.active {
  1585  		o.signalNewMessages()
  1586  	}
  1587  	// Update active status, if not active clear any queue group we captured.
  1588  	if o.active = interest; !o.active {
  1589  		o.qgroup = _EMPTY_
  1590  	} else {
  1591  		o.checkQueueInterest()
  1592  	}
  1593  
  1594  	// If the delete timer has already been set do not clear here and return.
  1595  	// Note that durable can now have an inactive threshold, so don't check
  1596  	// for durable status, instead check for dthresh > 0.
  1597  	if o.dtmr != nil && o.dthresh > 0 && !interest {
  1598  		return true
  1599  	}
  1600  
  1601  	// Stop and clear the delete timer always.
  1602  	stopAndClearTimer(&o.dtmr)
  1603  
  1604  	// If we do not have interest anymore and have a delete threshold set, then set
  1605  	// a timer to delete us. We wait for a bit in case of server reconnect.
  1606  	if !interest && o.dthresh > 0 {
  1607  		o.dtmr = time.AfterFunc(o.dthresh, o.deleteNotActive)
  1608  		return true
  1609  	}
  1610  	return false
  1611  }
  1612  
  1613  func (o *consumer) deleteNotActive() {
  1614  	o.mu.Lock()
  1615  	if o.mset == nil {
  1616  		o.mu.Unlock()
  1617  		return
  1618  	}
  1619  	// Push mode just look at active.
  1620  	if o.isPushMode() {
  1621  		// If we are active simply return.
  1622  		if o.active {
  1623  			o.mu.Unlock()
  1624  			return
  1625  		}
  1626  	} else {
  1627  		// Pull mode.
  1628  		elapsed := time.Since(o.waiting.last)
  1629  		if elapsed <= o.cfg.InactiveThreshold {
  1630  			// These need to keep firing so reset but use delta.
  1631  			if o.dtmr != nil {
  1632  				o.dtmr.Reset(o.dthresh - elapsed)
  1633  			} else {
  1634  				o.dtmr = time.AfterFunc(o.dthresh-elapsed, o.deleteNotActive)
  1635  			}
  1636  			o.mu.Unlock()
  1637  			return
  1638  		}
  1639  		// Check if we still have valid requests waiting.
  1640  		if o.checkWaitingForInterest() {
  1641  			if o.dtmr != nil {
  1642  				o.dtmr.Reset(o.dthresh)
  1643  			} else {
  1644  				o.dtmr = time.AfterFunc(o.dthresh, o.deleteNotActive)
  1645  			}
  1646  			o.mu.Unlock()
  1647  			return
  1648  		}
  1649  	}
  1650  
  1651  	s, js := o.mset.srv, o.srv.js.Load()
  1652  	acc, stream, name, isDirect := o.acc.Name, o.stream, o.name, o.cfg.Direct
  1653  	o.mu.Unlock()
  1654  
  1655  	// If we are clustered, check if we still have this consumer assigned.
  1656  	// If we do forward a proposal to delete ourselves to the metacontroller leader.
  1657  	if !isDirect && s.JetStreamIsClustered() {
  1658  		js.mu.RLock()
  1659  		var (
  1660  			cca         consumerAssignment
  1661  			meta        RaftNode
  1662  			removeEntry []byte
  1663  		)
  1664  		ca, cc := js.consumerAssignment(acc, stream, name), js.cluster
  1665  		if ca != nil && cc != nil {
  1666  			meta = cc.meta
  1667  			cca = *ca
  1668  			cca.Reply = _EMPTY_
  1669  			removeEntry = encodeDeleteConsumerAssignment(&cca)
  1670  			meta.ForwardProposal(removeEntry)
  1671  		}
  1672  		js.mu.RUnlock()
  1673  
  1674  		if ca != nil && cc != nil {
  1675  			// Check to make sure we went away.
  1676  			// Don't think this needs to be a monitored go routine.
  1677  			go func() {
  1678  				const (
  1679  					startInterval = 30 * time.Second
  1680  					maxInterval   = 5 * time.Minute
  1681  				)
  1682  				jitter := time.Duration(rand.Int63n(int64(startInterval)))
  1683  				interval := startInterval + jitter
  1684  				ticker := time.NewTicker(interval)
  1685  				defer ticker.Stop()
  1686  				for range ticker.C {
  1687  					js.mu.RLock()
  1688  					if js.shuttingDown {
  1689  						js.mu.RUnlock()
  1690  						return
  1691  					}
  1692  					nca := js.consumerAssignment(acc, stream, name)
  1693  					js.mu.RUnlock()
  1694  					// Make sure this is not a new consumer with the same name.
  1695  					if nca != nil && nca == ca {
  1696  						s.Warnf("Consumer assignment for '%s > %s > %s' not cleaned up, retrying", acc, stream, name)
  1697  						meta.ForwardProposal(removeEntry)
  1698  						if interval < maxInterval {
  1699  							interval *= 2
  1700  							ticker.Reset(interval)
  1701  						}
  1702  						continue
  1703  					}
  1704  					// We saw that consumer has been removed, all done.
  1705  					return
  1706  				}
  1707  			}()
  1708  		}
  1709  	}
  1710  
  1711  	// We will delete here regardless.
  1712  	o.delete()
  1713  }
  1714  
  1715  func (o *consumer) watchGWinterest() {
  1716  	pa := o.isActive()
  1717  	// If there is no local interest...
  1718  	if o.hasNoLocalInterest() {
  1719  		o.updateDeliveryInterest(false)
  1720  		if !pa && o.isActive() {
  1721  			o.signalNewMessages()
  1722  		}
  1723  	}
  1724  
  1725  	// We want this to always be running so we can also pick up on interest returning.
  1726  	o.mu.Lock()
  1727  	if o.gwdtmr != nil {
  1728  		o.gwdtmr.Reset(time.Second)
  1729  	} else {
  1730  		stopAndClearTimer(&o.gwdtmr)
  1731  		o.gwdtmr = time.AfterFunc(time.Second, func() { o.watchGWinterest() })
  1732  	}
  1733  	o.mu.Unlock()
  1734  }
  1735  
  1736  // Config returns the consumer's configuration.
  1737  func (o *consumer) config() ConsumerConfig {
  1738  	o.mu.Lock()
  1739  	defer o.mu.Unlock()
  1740  	return o.cfg
  1741  }
  1742  
  1743  // Force expiration of all pending.
  1744  // Lock should be held.
  1745  func (o *consumer) forceExpirePending() {
  1746  	var expired []uint64
  1747  	for seq := range o.pending {
  1748  		if !o.onRedeliverQueue(seq) {
  1749  			expired = append(expired, seq)
  1750  		}
  1751  	}
  1752  	if len(expired) > 0 {
  1753  		sort.Slice(expired, func(i, j int) bool { return expired[i] < expired[j] })
  1754  		o.addToRedeliverQueue(expired...)
  1755  		// Now we should update the timestamp here since we are redelivering.
  1756  		// We will use an incrementing time to preserve order for any other redelivery.
  1757  		off := time.Now().UnixNano() - o.pending[expired[0]].Timestamp
  1758  		for _, seq := range expired {
  1759  			if p, ok := o.pending[seq]; ok && p != nil {
  1760  				p.Timestamp += off
  1761  			}
  1762  		}
  1763  		o.ptmr.Reset(o.ackWait(0))
  1764  	}
  1765  	o.signalNewMessages()
  1766  }
  1767  
  1768  // Acquire proper locks and update rate limit.
  1769  // Will use what is in config.
  1770  func (o *consumer) setRateLimitNeedsLocks() {
  1771  	o.mu.RLock()
  1772  	mset := o.mset
  1773  	o.mu.RUnlock()
  1774  
  1775  	if mset == nil {
  1776  		return
  1777  	}
  1778  
  1779  	mset.mu.RLock()
  1780  	o.mu.Lock()
  1781  	o.setRateLimit(o.cfg.RateLimit)
  1782  	o.mu.Unlock()
  1783  	mset.mu.RUnlock()
  1784  }
  1785  
  1786  // Set the rate limiter
  1787  // Both mset and consumer lock should be held.
  1788  func (o *consumer) setRateLimit(bps uint64) {
  1789  	if bps == 0 {
  1790  		o.rlimit = nil
  1791  		return
  1792  	}
  1793  
  1794  	// TODO(dlc) - Make sane values or error if not sane?
  1795  	// We are configured in bits per sec so adjust to bytes.
  1796  	rl := rate.Limit(bps / 8)
  1797  	mset := o.mset
  1798  
  1799  	// Burst should be set to maximum msg size for this account, etc.
  1800  	var burst int
  1801  	if mset.cfg.MaxMsgSize > 0 {
  1802  		burst = int(mset.cfg.MaxMsgSize)
  1803  	} else if mset.jsa.account.limits.mpay > 0 {
  1804  		burst = int(mset.jsa.account.limits.mpay)
  1805  	} else {
  1806  		s := mset.jsa.account.srv
  1807  		burst = int(s.getOpts().MaxPayload)
  1808  	}
  1809  
  1810  	o.rlimit = rate.NewLimiter(rl, burst)
  1811  }
  1812  
  1813  // Check if new consumer config allowed vs old.
  1814  func (acc *Account) checkNewConsumerConfig(cfg, ncfg *ConsumerConfig) error {
  1815  	if reflect.DeepEqual(cfg, ncfg) {
  1816  		return nil
  1817  	}
  1818  	// Something different, so check since we only allow certain things to be updated.
  1819  	if cfg.DeliverPolicy != ncfg.DeliverPolicy {
  1820  		return errors.New("deliver policy can not be updated")
  1821  	}
  1822  	if cfg.OptStartSeq != ncfg.OptStartSeq {
  1823  		return errors.New("start sequence can not be updated")
  1824  	}
  1825  	if cfg.OptStartTime != nil && ncfg.OptStartTime != nil {
  1826  		// Both have start times set, compare them directly:
  1827  		if !cfg.OptStartTime.Equal(*ncfg.OptStartTime) {
  1828  			return errors.New("start time can not be updated")
  1829  		}
  1830  	} else if cfg.OptStartTime != nil || ncfg.OptStartTime != nil {
  1831  		// At least one start time is set and the other is not
  1832  		return errors.New("start time can not be updated")
  1833  	}
  1834  	if cfg.AckPolicy != ncfg.AckPolicy {
  1835  		return errors.New("ack policy can not be updated")
  1836  	}
  1837  	if cfg.ReplayPolicy != ncfg.ReplayPolicy {
  1838  		return errors.New("replay policy can not be updated")
  1839  	}
  1840  	if cfg.Heartbeat != ncfg.Heartbeat {
  1841  		return errors.New("heart beats can not be updated")
  1842  	}
  1843  	if cfg.FlowControl != ncfg.FlowControl {
  1844  		return errors.New("flow control can not be updated")
  1845  	}
  1846  	if cfg.MaxWaiting != ncfg.MaxWaiting {
  1847  		return errors.New("max waiting can not be updated")
  1848  	}
  1849  
  1850  	// Deliver Subject is conditional on if its bound.
  1851  	if cfg.DeliverSubject != ncfg.DeliverSubject {
  1852  		if cfg.DeliverSubject == _EMPTY_ {
  1853  			return errors.New("can not update pull consumer to push based")
  1854  		}
  1855  		if ncfg.DeliverSubject == _EMPTY_ {
  1856  			return errors.New("can not update push consumer to pull based")
  1857  		}
  1858  		rr := acc.sl.Match(cfg.DeliverSubject)
  1859  		if len(rr.psubs)+len(rr.qsubs) != 0 {
  1860  			return NewJSConsumerNameExistError()
  1861  		}
  1862  	}
  1863  
  1864  	// Check if BackOff is defined, MaxDeliver is within range.
  1865  	if lbo := len(ncfg.BackOff); lbo > 0 && ncfg.MaxDeliver <= lbo {
  1866  		return NewJSConsumerMaxDeliverBackoffError()
  1867  	}
  1868  
  1869  	return nil
  1870  }
  1871  
  1872  // Update the config based on the new config, or error if update not allowed.
  1873  func (o *consumer) updateConfig(cfg *ConsumerConfig) error {
  1874  	o.mu.Lock()
  1875  	defer o.mu.Unlock()
  1876  
  1877  	if o.closed || o.mset == nil {
  1878  		return NewJSConsumerDoesNotExistError()
  1879  	}
  1880  
  1881  	if err := o.acc.checkNewConsumerConfig(&o.cfg, cfg); err != nil {
  1882  		return err
  1883  	}
  1884  
  1885  	// Make sure we always store PauseUntil in UTC.
  1886  	if cfg.PauseUntil != nil {
  1887  		utc := (*cfg.PauseUntil).UTC()
  1888  		cfg.PauseUntil = &utc
  1889  	}
  1890  
  1891  	if o.store != nil {
  1892  		// Update local state always.
  1893  		if err := o.store.UpdateConfig(cfg); err != nil {
  1894  			return err
  1895  		}
  1896  	}
  1897  
  1898  	// DeliverSubject
  1899  	if cfg.DeliverSubject != o.cfg.DeliverSubject {
  1900  		o.updateDeliverSubjectLocked(cfg.DeliverSubject)
  1901  	}
  1902  
  1903  	// MaxAckPending
  1904  	if cfg.MaxAckPending != o.cfg.MaxAckPending {
  1905  		o.maxp = cfg.MaxAckPending
  1906  		o.signalNewMessages()
  1907  	}
  1908  	// AckWait
  1909  	if cfg.AckWait != o.cfg.AckWait {
  1910  		if o.ptmr != nil {
  1911  			o.ptmr.Reset(100 * time.Millisecond)
  1912  		}
  1913  	}
  1914  	// Rate Limit
  1915  	if cfg.RateLimit != o.cfg.RateLimit {
  1916  		// We need both locks here so do in Go routine.
  1917  		go o.setRateLimitNeedsLocks()
  1918  	}
  1919  	if cfg.SampleFrequency != o.cfg.SampleFrequency {
  1920  		s := strings.TrimSuffix(cfg.SampleFrequency, "%")
  1921  		// String has been already verified for validity up in the stack, so no
  1922  		// need to check for error here.
  1923  		sampleFreq, _ := strconv.Atoi(s)
  1924  		o.sfreq = int32(sampleFreq)
  1925  	}
  1926  	// Set MaxDeliver if changed
  1927  	if cfg.MaxDeliver != o.cfg.MaxDeliver {
  1928  		o.maxdc = uint64(cfg.MaxDeliver)
  1929  	}
  1930  	// Set InactiveThreshold if changed.
  1931  	if val := cfg.InactiveThreshold; val != o.cfg.InactiveThreshold {
  1932  		o.updateInactiveThreshold(cfg)
  1933  		stopAndClearTimer(&o.dtmr)
  1934  		// Restart timer only if we are the leader.
  1935  		if o.isLeader() && o.dthresh > 0 {
  1936  			o.dtmr = time.AfterFunc(o.dthresh, o.deleteNotActive)
  1937  		}
  1938  	}
  1939  	// Check whether the pause has changed
  1940  	{
  1941  		var old, new time.Time
  1942  		if o.cfg.PauseUntil != nil {
  1943  			old = *o.cfg.PauseUntil
  1944  		}
  1945  		if cfg.PauseUntil != nil {
  1946  			new = *cfg.PauseUntil
  1947  		}
  1948  		if !old.Equal(new) {
  1949  			o.updatePauseState(cfg)
  1950  			if o.isLeader() {
  1951  				o.sendPauseAdvisoryLocked(cfg)
  1952  			}
  1953  		}
  1954  	}
  1955  
  1956  	// Check for Subject Filters update.
  1957  	newSubjects := gatherSubjectFilters(cfg.FilterSubject, cfg.FilterSubjects)
  1958  	if !subjectSliceEqual(newSubjects, o.subjf.subjects()) {
  1959  		newSubjf := make(subjectFilters, 0, len(newSubjects))
  1960  		for _, newFilter := range newSubjects {
  1961  			fs := &subjectFilter{
  1962  				subject:          newFilter,
  1963  				hasWildcard:      subjectHasWildcard(newFilter),
  1964  				tokenizedSubject: tokenizeSubjectIntoSlice(nil, newFilter),
  1965  			}
  1966  			// If given subject was present, we will retain its fields values
  1967  			// so `getNextMgs` can take advantage of already buffered `pmsgs`.
  1968  			for _, oldFilter := range o.subjf {
  1969  				if oldFilter.subject == newFilter {
  1970  					fs.currentSeq = oldFilter.currentSeq
  1971  					fs.nextSeq = oldFilter.nextSeq
  1972  					fs.pmsg = oldFilter.pmsg
  1973  				}
  1974  				continue
  1975  			}
  1976  			newSubjf = append(newSubjf, fs)
  1977  		}
  1978  		// Make sure we have correct signaling setup.
  1979  		// Consumer lock can not be held.
  1980  		mset := o.mset
  1981  		o.mu.Unlock()
  1982  		mset.swapSigSubs(o, newSubjf.subjects())
  1983  		o.mu.Lock()
  1984  
  1985  		// When we're done with signaling, we can replace the subjects.
  1986  		// If filters were removed, set `o.subjf` to nil.
  1987  		if len(newSubjf) == 0 {
  1988  			o.subjf = nil
  1989  		} else {
  1990  			o.subjf = newSubjf
  1991  		}
  1992  	}
  1993  
  1994  	// Record new config for others that do not need special handling.
  1995  	// Allowed but considered no-op, [Description, SampleFrequency, MaxWaiting, HeadersOnly]
  1996  	o.cfg = *cfg
  1997  
  1998  	// Re-calculate num pending on update.
  1999  	o.streamNumPending()
  2000  
  2001  	return nil
  2002  }
  2003  
  2004  // This is a config change for the delivery subject for a
  2005  // push based consumer.
  2006  func (o *consumer) updateDeliverSubject(newDeliver string) {
  2007  	// Update the config and the dsubj
  2008  	o.mu.Lock()
  2009  	defer o.mu.Unlock()
  2010  	o.updateDeliverSubjectLocked(newDeliver)
  2011  }
  2012  
  2013  // This is a config change for the delivery subject for a
  2014  // push based consumer.
  2015  func (o *consumer) updateDeliverSubjectLocked(newDeliver string) {
  2016  	if o.closed || o.isPullMode() || o.cfg.DeliverSubject == newDeliver {
  2017  		return
  2018  	}
  2019  
  2020  	// Force redeliver of all pending on change of delivery subject.
  2021  	if len(o.pending) > 0 {
  2022  		o.forceExpirePending()
  2023  	}
  2024  
  2025  	o.acc.sl.clearNotification(o.dsubj, o.cfg.DeliverGroup, o.inch)
  2026  	o.dsubj, o.cfg.DeliverSubject = newDeliver, newDeliver
  2027  	// When we register new one it will deliver to update state loop.
  2028  	o.acc.sl.registerNotification(newDeliver, o.cfg.DeliverGroup, o.inch)
  2029  }
  2030  
  2031  // Check that configs are equal but allow delivery subjects to be different.
  2032  func configsEqualSansDelivery(a, b ConsumerConfig) bool {
  2033  	// These were copied in so can set Delivery here.
  2034  	a.DeliverSubject, b.DeliverSubject = _EMPTY_, _EMPTY_
  2035  	return reflect.DeepEqual(a, b)
  2036  }
  2037  
  2038  // Helper to send a reply to an ack.
  2039  func (o *consumer) sendAckReply(subj string) {
  2040  	o.mu.Lock()
  2041  	defer o.mu.Unlock()
  2042  	o.sendAdvisory(subj, nil)
  2043  }
  2044  
  2045  type jsAckMsg struct {
  2046  	subject string
  2047  	reply   string
  2048  	hdr     int
  2049  	msg     []byte
  2050  }
  2051  
  2052  var jsAckMsgPool sync.Pool
  2053  
  2054  func newJSAckMsg(subj, reply string, hdr int, msg []byte) *jsAckMsg {
  2055  	var m *jsAckMsg
  2056  	am := jsAckMsgPool.Get()
  2057  	if am != nil {
  2058  		m = am.(*jsAckMsg)
  2059  	} else {
  2060  		m = &jsAckMsg{}
  2061  	}
  2062  	// When getting something from a pool it is critical that all fields are
  2063  	// initialized. Doing this way guarantees that if someone adds a field to
  2064  	// the structure, the compiler will fail the build if this line is not updated.
  2065  	(*m) = jsAckMsg{subj, reply, hdr, msg}
  2066  	return m
  2067  }
  2068  
  2069  func (am *jsAckMsg) returnToPool() {
  2070  	if am == nil {
  2071  		return
  2072  	}
  2073  	am.subject, am.reply, am.hdr, am.msg = _EMPTY_, _EMPTY_, -1, nil
  2074  	jsAckMsgPool.Put(am)
  2075  }
  2076  
  2077  // Push the ack message to the consumer's ackMsgs queue
  2078  func (o *consumer) pushAck(_ *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) {
  2079  	atomic.AddInt64(&o.awl, 1)
  2080  	o.ackMsgs.push(newJSAckMsg(subject, reply, c.pa.hdr, copyBytes(rmsg)))
  2081  }
  2082  
  2083  // Processes a message for the ack reply subject delivered with a message.
  2084  func (o *consumer) processAck(subject, reply string, hdr int, rmsg []byte) {
  2085  	defer atomic.AddInt64(&o.awl, -1)
  2086  
  2087  	var msg []byte
  2088  	if hdr > 0 {
  2089  		msg = rmsg[hdr:]
  2090  	} else {
  2091  		msg = rmsg
  2092  	}
  2093  
  2094  	sseq, dseq, dc := ackReplyInfo(subject)
  2095  
  2096  	skipAckReply := sseq == 0
  2097  
  2098  	switch {
  2099  	case len(msg) == 0, bytes.Equal(msg, AckAck), bytes.Equal(msg, AckOK):
  2100  		o.processAckMsg(sseq, dseq, dc, true)
  2101  	case bytes.HasPrefix(msg, AckNext):
  2102  		o.processAckMsg(sseq, dseq, dc, true)
  2103  		o.processNextMsgRequest(reply, msg[len(AckNext):])
  2104  		skipAckReply = true
  2105  	case bytes.HasPrefix(msg, AckNak):
  2106  		o.processNak(sseq, dseq, dc, msg)
  2107  	case bytes.Equal(msg, AckProgress):
  2108  		o.progressUpdate(sseq)
  2109  	case bytes.HasPrefix(msg, AckTerm):
  2110  		var reason string
  2111  		if buf := msg[len(AckTerm):]; len(buf) > 0 {
  2112  			reason = string(bytes.TrimSpace(buf))
  2113  		}
  2114  		o.processTerm(sseq, dseq, dc, reason)
  2115  	}
  2116  
  2117  	// Ack the ack if requested.
  2118  	if len(reply) > 0 && !skipAckReply {
  2119  		o.sendAckReply(reply)
  2120  	}
  2121  }
  2122  
  2123  // Used to process a working update to delay redelivery.
  2124  func (o *consumer) progressUpdate(seq uint64) {
  2125  	o.mu.Lock()
  2126  	defer o.mu.Unlock()
  2127  
  2128  	if p, ok := o.pending[seq]; ok {
  2129  		p.Timestamp = time.Now().UnixNano()
  2130  		// Update store system.
  2131  		o.updateDelivered(p.Sequence, seq, 1, p.Timestamp)
  2132  	}
  2133  }
  2134  
  2135  // Lock should be held.
  2136  func (o *consumer) updateSkipped(seq uint64) {
  2137  	// Clustered mode and R>1 only.
  2138  	if o.node == nil || !o.isLeader() {
  2139  		return
  2140  	}
  2141  	var b [1 + 8]byte
  2142  	b[0] = byte(updateSkipOp)
  2143  	var le = binary.LittleEndian
  2144  	le.PutUint64(b[1:], seq)
  2145  	o.propose(b[:])
  2146  }
  2147  
  2148  func (o *consumer) loopAndForwardProposals(qch chan struct{}) {
  2149  	o.mu.RLock()
  2150  	node, pch := o.node, o.pch
  2151  	o.mu.RUnlock()
  2152  
  2153  	if node == nil || pch == nil {
  2154  		return
  2155  	}
  2156  
  2157  	forwardProposals := func() error {
  2158  		o.mu.Lock()
  2159  		if o.node != node || node.State() != Leader {
  2160  			o.mu.Unlock()
  2161  			return errors.New("no longer leader")
  2162  		}
  2163  		proposal := o.phead
  2164  		o.phead, o.ptail = nil, nil
  2165  		o.mu.Unlock()
  2166  		// 256k max for now per batch.
  2167  		const maxBatch = 256 * 1024
  2168  		var entries []*Entry
  2169  		for sz := 0; proposal != nil; proposal = proposal.next {
  2170  			entry := entryPool.Get().(*Entry)
  2171  			entry.Type, entry.Data = EntryNormal, proposal.data
  2172  			entries = append(entries, entry)
  2173  			sz += len(proposal.data)
  2174  			if sz > maxBatch {
  2175  				node.ProposeDirect(entries)
  2176  				// We need to re-create `entries` because there is a reference
  2177  				// to it in the node's pae map.
  2178  				sz, entries = 0, nil
  2179  			}
  2180  		}
  2181  		if len(entries) > 0 {
  2182  			node.ProposeDirect(entries)
  2183  		}
  2184  		return nil
  2185  	}
  2186  
  2187  	// In case we have anything pending on entry.
  2188  	forwardProposals()
  2189  
  2190  	for {
  2191  		select {
  2192  		case <-qch:
  2193  			forwardProposals()
  2194  			return
  2195  		case <-pch:
  2196  			if err := forwardProposals(); err != nil {
  2197  				return
  2198  			}
  2199  		}
  2200  	}
  2201  }
  2202  
  2203  // Lock should be held.
  2204  func (o *consumer) propose(entry []byte) {
  2205  	var notify bool
  2206  	p := &proposal{data: entry}
  2207  	if o.phead == nil {
  2208  		o.phead = p
  2209  		notify = true
  2210  	} else {
  2211  		o.ptail.next = p
  2212  	}
  2213  	o.ptail = p
  2214  
  2215  	// Kick our looper routine if needed.
  2216  	if notify {
  2217  		select {
  2218  		case o.pch <- struct{}{}:
  2219  		default:
  2220  		}
  2221  	}
  2222  }
  2223  
  2224  // Lock should be held.
  2225  func (o *consumer) updateDelivered(dseq, sseq, dc uint64, ts int64) {
  2226  	// Clustered mode and R>1.
  2227  	if o.node != nil {
  2228  		// Inline for now, use variable compression.
  2229  		var b [4*binary.MaxVarintLen64 + 1]byte
  2230  		b[0] = byte(updateDeliveredOp)
  2231  		n := 1
  2232  		n += binary.PutUvarint(b[n:], dseq)
  2233  		n += binary.PutUvarint(b[n:], sseq)
  2234  		n += binary.PutUvarint(b[n:], dc)
  2235  		n += binary.PutVarint(b[n:], ts)
  2236  		o.propose(b[:n])
  2237  	}
  2238  	if o.store != nil {
  2239  		// Update local state always.
  2240  		o.store.UpdateDelivered(dseq, sseq, dc, ts)
  2241  	}
  2242  	// Update activity.
  2243  	o.ldt = time.Now()
  2244  }
  2245  
  2246  // Lock should be held.
  2247  func (o *consumer) updateAcks(dseq, sseq uint64) {
  2248  	if o.node != nil {
  2249  		// Inline for now, use variable compression.
  2250  		var b [2*binary.MaxVarintLen64 + 1]byte
  2251  		b[0] = byte(updateAcksOp)
  2252  		n := 1
  2253  		n += binary.PutUvarint(b[n:], dseq)
  2254  		n += binary.PutUvarint(b[n:], sseq)
  2255  		o.propose(b[:n])
  2256  	} else if o.store != nil {
  2257  		o.store.UpdateAcks(dseq, sseq)
  2258  	}
  2259  	// Update activity.
  2260  	o.lat = time.Now()
  2261  }
  2262  
  2263  // Communicate to the cluster an addition of a pending request.
  2264  // Lock should be held.
  2265  func (o *consumer) addClusterPendingRequest(reply string) {
  2266  	if o.node == nil || !o.pendingRequestsOk() {
  2267  		return
  2268  	}
  2269  	b := make([]byte, len(reply)+1)
  2270  	b[0] = byte(addPendingRequest)
  2271  	copy(b[1:], reply)
  2272  	o.propose(b)
  2273  }
  2274  
  2275  // Communicate to the cluster a removal of a pending request.
  2276  // Lock should be held.
  2277  func (o *consumer) removeClusterPendingRequest(reply string) {
  2278  	if o.node == nil || !o.pendingRequestsOk() {
  2279  		return
  2280  	}
  2281  	b := make([]byte, len(reply)+1)
  2282  	b[0] = byte(removePendingRequest)
  2283  	copy(b[1:], reply)
  2284  	o.propose(b)
  2285  }
  2286  
  2287  // Set whether or not we can send pending requests to followers.
  2288  func (o *consumer) setPendingRequestsOk(ok bool) {
  2289  	o.mu.Lock()
  2290  	o.prOk = ok
  2291  	o.mu.Unlock()
  2292  }
  2293  
  2294  // Lock should be held.
  2295  func (o *consumer) pendingRequestsOk() bool {
  2296  	return o.prOk
  2297  }
  2298  
  2299  // Set whether or not we can send info about pending pull requests to our group.
  2300  // Will require all peers have a minimum version.
  2301  func (o *consumer) checkAndSetPendingRequestsOk() {
  2302  	o.mu.RLock()
  2303  	s, isValid := o.srv, o.mset != nil
  2304  	o.mu.RUnlock()
  2305  	if !isValid {
  2306  		return
  2307  	}
  2308  
  2309  	if ca := o.consumerAssignment(); ca != nil && len(ca.Group.Peers) > 1 {
  2310  		for _, pn := range ca.Group.Peers {
  2311  			if si, ok := s.nodeToInfo.Load(pn); ok {
  2312  				if !versionAtLeast(si.(nodeInfo).version, 2, 7, 1) {
  2313  					// We expect all of our peers to eventually be up to date.
  2314  					// So check again in awhile.
  2315  					time.AfterFunc(eventsHBInterval, func() { o.checkAndSetPendingRequestsOk() })
  2316  					o.setPendingRequestsOk(false)
  2317  					return
  2318  				}
  2319  			}
  2320  		}
  2321  	}
  2322  	o.setPendingRequestsOk(true)
  2323  }
  2324  
  2325  // On leadership change make sure we alert the pending requests that they are no longer valid.
  2326  func (o *consumer) checkPendingRequests() {
  2327  	o.mu.Lock()
  2328  	defer o.mu.Unlock()
  2329  	if o.mset == nil || o.outq == nil {
  2330  		return
  2331  	}
  2332  	hdr := []byte("NATS/1.0 409 Leadership Change\r\n\r\n")
  2333  	for reply := range o.prm {
  2334  		o.outq.send(newJSPubMsg(reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0))
  2335  	}
  2336  	o.prm = nil
  2337  }
  2338  
  2339  // This will release any pending pull requests if applicable.
  2340  // Should be called only by the leader being deleted or stopped.
  2341  // Lock should be held.
  2342  func (o *consumer) releaseAnyPendingRequests(isConsumerAssigned bool) {
  2343  	if o.mset == nil || o.outq == nil || o.waiting.len() == 0 {
  2344  		return
  2345  	}
  2346  	var hdr []byte
  2347  	if !isConsumerAssigned {
  2348  		hdr = []byte("NATS/1.0 409 Consumer Deleted\r\n\r\n")
  2349  	}
  2350  	wq := o.waiting
  2351  	o.waiting = nil
  2352  	for i, rp := 0, wq.rp; i < wq.n; i++ {
  2353  		if wr := wq.reqs[rp]; wr != nil {
  2354  			if hdr != nil {
  2355  				o.outq.send(newJSPubMsg(wr.reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0))
  2356  			}
  2357  			wr.recycle()
  2358  		}
  2359  		rp = (rp + 1) % cap(wq.reqs)
  2360  	}
  2361  }
  2362  
  2363  // Process a NAK.
  2364  func (o *consumer) processNak(sseq, dseq, dc uint64, nak []byte) {
  2365  	o.mu.Lock()
  2366  	defer o.mu.Unlock()
  2367  
  2368  	// Check for out of range.
  2369  	if dseq <= o.adflr || dseq > o.dseq {
  2370  		return
  2371  	}
  2372  	// If we are explicit ack make sure this is still on our pending list.
  2373  	if _, ok := o.pending[sseq]; !ok {
  2374  		return
  2375  	}
  2376  
  2377  	// Deliver an advisory
  2378  	e := JSConsumerDeliveryNakAdvisory{
  2379  		TypedEvent: TypedEvent{
  2380  			Type: JSConsumerDeliveryNakAdvisoryType,
  2381  			ID:   nuid.Next(),
  2382  			Time: time.Now().UTC(),
  2383  		},
  2384  		Stream:      o.stream,
  2385  		Consumer:    o.name,
  2386  		ConsumerSeq: dseq,
  2387  		StreamSeq:   sseq,
  2388  		Deliveries:  dc,
  2389  		Domain:      o.srv.getOpts().JetStreamDomain,
  2390  	}
  2391  
  2392  	j, err := json.Marshal(e)
  2393  	if err != nil {
  2394  		return
  2395  	}
  2396  
  2397  	o.sendAdvisory(o.nakEventT, j)
  2398  
  2399  	// Check to see if we have delays attached.
  2400  	if len(nak) > len(AckNak) {
  2401  		arg := bytes.TrimSpace(nak[len(AckNak):])
  2402  		if len(arg) > 0 {
  2403  			var d time.Duration
  2404  			var err error
  2405  			if arg[0] == '{' {
  2406  				var nd ConsumerNakOptions
  2407  				if err = json.Unmarshal(arg, &nd); err == nil {
  2408  					d = nd.Delay
  2409  				}
  2410  			} else {
  2411  				d, err = time.ParseDuration(string(arg))
  2412  			}
  2413  			if err != nil {
  2414  				// Treat this as normal NAK.
  2415  				o.srv.Warnf("JetStream consumer '%s > %s > %s' bad NAK delay value: %q", o.acc.Name, o.stream, o.name, arg)
  2416  			} else {
  2417  				// We have a parsed duration that the user wants us to wait before retrying.
  2418  				// Make sure we are not on the rdq.
  2419  				o.removeFromRedeliverQueue(sseq)
  2420  				if p, ok := o.pending[sseq]; ok {
  2421  					// now - ackWait is expired now, so offset from there.
  2422  					p.Timestamp = time.Now().Add(-o.cfg.AckWait).Add(d).UnixNano()
  2423  					// Update store system which will update followers as well.
  2424  					o.updateDelivered(p.Sequence, sseq, dc, p.Timestamp)
  2425  					if o.ptmr != nil {
  2426  						// Want checkPending to run and figure out the next timer ttl.
  2427  						// TODO(dlc) - We could optimize this maybe a bit more and track when we expect the timer to fire.
  2428  						o.ptmr.Reset(10 * time.Millisecond)
  2429  					}
  2430  				}
  2431  				// Nothing else for use to do now so return.
  2432  				return
  2433  			}
  2434  		}
  2435  	}
  2436  
  2437  	// If already queued up also ignore.
  2438  	if !o.onRedeliverQueue(sseq) {
  2439  		o.addToRedeliverQueue(sseq)
  2440  	}
  2441  
  2442  	o.signalNewMessages()
  2443  }
  2444  
  2445  // Process a TERM
  2446  func (o *consumer) processTerm(sseq, dseq, dc uint64, reason string) {
  2447  	// Treat like an ack to suppress redelivery.
  2448  	o.processAckMsg(sseq, dseq, dc, false)
  2449  
  2450  	o.mu.Lock()
  2451  	defer o.mu.Unlock()
  2452  
  2453  	// Deliver an advisory
  2454  	e := JSConsumerDeliveryTerminatedAdvisory{
  2455  		TypedEvent: TypedEvent{
  2456  			Type: JSConsumerDeliveryTerminatedAdvisoryType,
  2457  			ID:   nuid.Next(),
  2458  			Time: time.Now().UTC(),
  2459  		},
  2460  		Stream:      o.stream,
  2461  		Consumer:    o.name,
  2462  		ConsumerSeq: dseq,
  2463  		StreamSeq:   sseq,
  2464  		Deliveries:  dc,
  2465  		Reason:      reason,
  2466  		Domain:      o.srv.getOpts().JetStreamDomain,
  2467  	}
  2468  
  2469  	j, err := json.Marshal(e)
  2470  	if err != nil {
  2471  		return
  2472  	}
  2473  
  2474  	subj := JSAdvisoryConsumerMsgTerminatedPre + "." + o.stream + "." + o.name
  2475  	o.sendAdvisory(subj, j)
  2476  }
  2477  
  2478  // Introduce a small delay in when timer fires to check pending.
  2479  // Allows bursts to be treated in same time frame.
  2480  const ackWaitDelay = time.Millisecond
  2481  
  2482  // ackWait returns how long to wait to fire the pending timer.
  2483  func (o *consumer) ackWait(next time.Duration) time.Duration {
  2484  	if next > 0 {
  2485  		return next + ackWaitDelay
  2486  	}
  2487  	return o.cfg.AckWait + ackWaitDelay
  2488  }
  2489  
  2490  // Due to bug in calculation of sequences on restoring redelivered let's do quick sanity check.
  2491  // Lock should be held.
  2492  func (o *consumer) checkRedelivered(slseq uint64) {
  2493  	var lseq uint64
  2494  	if mset := o.mset; mset != nil {
  2495  		lseq = slseq
  2496  	}
  2497  	var shouldUpdateState bool
  2498  	for sseq := range o.rdc {
  2499  		if sseq < o.asflr || (lseq > 0 && sseq > lseq) {
  2500  			delete(o.rdc, sseq)
  2501  			o.removeFromRedeliverQueue(sseq)
  2502  			shouldUpdateState = true
  2503  		}
  2504  	}
  2505  	if shouldUpdateState {
  2506  		if err := o.writeStoreStateUnlocked(); err != nil && o.srv != nil && o.mset != nil && !o.closed {
  2507  			s, acc, mset, name := o.srv, o.acc, o.mset, o.name
  2508  			s.Warnf("Consumer '%s > %s > %s' error on write store state from check redelivered: %v", acc, mset.cfg.Name, name, err)
  2509  		}
  2510  	}
  2511  }
  2512  
  2513  // This will restore the state from disk.
  2514  // Lock should be held.
  2515  func (o *consumer) readStoredState(slseq uint64) error {
  2516  	if o.store == nil {
  2517  		return nil
  2518  	}
  2519  	state, err := o.store.State()
  2520  	if err == nil {
  2521  		o.applyState(state)
  2522  		if len(o.rdc) > 0 {
  2523  			o.checkRedelivered(slseq)
  2524  		}
  2525  	}
  2526  	return err
  2527  }
  2528  
  2529  // Apply the consumer stored state.
  2530  // Lock should be held.
  2531  func (o *consumer) applyState(state *ConsumerState) {
  2532  	if state == nil {
  2533  		return
  2534  	}
  2535  
  2536  	// If o.sseq is greater don't update. Don't go backwards on o.sseq.
  2537  	if o.sseq <= state.Delivered.Stream {
  2538  		o.sseq = state.Delivered.Stream + 1
  2539  	}
  2540  	o.dseq = state.Delivered.Consumer + 1
  2541  
  2542  	o.adflr = state.AckFloor.Consumer
  2543  	o.asflr = state.AckFloor.Stream
  2544  	o.pending = state.Pending
  2545  	o.rdc = state.Redelivered
  2546  
  2547  	// Setup tracking timer if we have restored pending.
  2548  	if len(o.pending) > 0 {
  2549  		// This is on startup or leader change. We want to check pending
  2550  		// sooner in case there are inconsistencies etc. Pick between 500ms - 1.5s
  2551  		delay := 500*time.Millisecond + time.Duration(rand.Int63n(1000))*time.Millisecond
  2552  		// If normal is lower than this just use that.
  2553  		if o.cfg.AckWait < delay {
  2554  			delay = o.ackWait(0)
  2555  		}
  2556  		if o.ptmr == nil {
  2557  			o.ptmr = time.AfterFunc(delay, o.checkPending)
  2558  		} else {
  2559  			o.ptmr.Reset(delay)
  2560  		}
  2561  	}
  2562  }
  2563  
  2564  // Sets our store state from another source. Used in clustered mode on snapshot restore.
  2565  // Lock should be held.
  2566  func (o *consumer) setStoreState(state *ConsumerState) error {
  2567  	if state == nil || o.store == nil {
  2568  		return nil
  2569  	}
  2570  	err := o.store.Update(state)
  2571  	if err == nil {
  2572  		o.applyState(state)
  2573  	}
  2574  	return err
  2575  }
  2576  
  2577  // Update our state to the store.
  2578  func (o *consumer) writeStoreState() error {
  2579  	o.mu.Lock()
  2580  	defer o.mu.Unlock()
  2581  	return o.writeStoreStateUnlocked()
  2582  }
  2583  
  2584  // Update our state to the store.
  2585  // Lock should be held.
  2586  func (o *consumer) writeStoreStateUnlocked() error {
  2587  	if o.store == nil {
  2588  		return nil
  2589  	}
  2590  	state := ConsumerState{
  2591  		Delivered: SequencePair{
  2592  			Consumer: o.dseq - 1,
  2593  			Stream:   o.sseq - 1,
  2594  		},
  2595  		AckFloor: SequencePair{
  2596  			Consumer: o.adflr,
  2597  			Stream:   o.asflr,
  2598  		},
  2599  		Pending:     o.pending,
  2600  		Redelivered: o.rdc,
  2601  	}
  2602  	return o.store.Update(&state)
  2603  }
  2604  
  2605  // Returns an initial info. Only applicable for non-clustered consumers.
  2606  // We will clear after we return it, so one shot.
  2607  func (o *consumer) initialInfo() *ConsumerInfo {
  2608  	o.mu.Lock()
  2609  	ici := o.ici
  2610  	o.ici = nil // gc friendly
  2611  	o.mu.Unlock()
  2612  	if ici == nil {
  2613  		ici = o.info()
  2614  	}
  2615  	return ici
  2616  }
  2617  
  2618  // Clears our initial info.
  2619  // Used when we have a leader change in cluster mode but do not send a response.
  2620  func (o *consumer) clearInitialInfo() {
  2621  	o.mu.Lock()
  2622  	o.ici = nil // gc friendly
  2623  	o.mu.Unlock()
  2624  }
  2625  
  2626  // Info returns our current consumer state.
  2627  func (o *consumer) info() *ConsumerInfo {
  2628  	return o.infoWithSnap(false)
  2629  }
  2630  
  2631  func (o *consumer) infoWithSnap(snap bool) *ConsumerInfo {
  2632  	return o.infoWithSnapAndReply(snap, _EMPTY_)
  2633  }
  2634  
  2635  func (o *consumer) infoWithSnapAndReply(snap bool, reply string) *ConsumerInfo {
  2636  	o.mu.Lock()
  2637  	mset := o.mset
  2638  	if o.closed || mset == nil || mset.srv == nil {
  2639  		o.mu.Unlock()
  2640  		return nil
  2641  	}
  2642  	js := o.js
  2643  	if js == nil {
  2644  		o.mu.Unlock()
  2645  		return nil
  2646  	}
  2647  
  2648  	// Capture raftGroup.
  2649  	var rg *raftGroup
  2650  	if o.ca != nil {
  2651  		rg = o.ca.Group
  2652  	}
  2653  
  2654  	cfg := o.cfg
  2655  	info := &ConsumerInfo{
  2656  		Stream:  o.stream,
  2657  		Name:    o.name,
  2658  		Created: o.created,
  2659  		Config:  &cfg,
  2660  		Delivered: SequenceInfo{
  2661  			Consumer: o.dseq - 1,
  2662  			Stream:   o.sseq - 1,
  2663  		},
  2664  		AckFloor: SequenceInfo{
  2665  			Consumer: o.adflr,
  2666  			Stream:   o.asflr,
  2667  		},
  2668  		NumAckPending:  len(o.pending),
  2669  		NumRedelivered: len(o.rdc),
  2670  		NumPending:     o.checkNumPending(),
  2671  		PushBound:      o.isPushMode() && o.active,
  2672  		TimeStamp:      time.Now().UTC(),
  2673  	}
  2674  	if o.cfg.PauseUntil != nil {
  2675  		p := *o.cfg.PauseUntil
  2676  		if info.Paused = time.Now().Before(p); info.Paused {
  2677  			info.PauseRemaining = time.Until(p)
  2678  		}
  2679  	}
  2680  
  2681  	// If we are replicated and we are not the leader we need to pull certain data from our store.
  2682  	if rg != nil && rg.node != nil && !o.isLeader() && o.store != nil {
  2683  		state, err := o.store.BorrowState()
  2684  		if err != nil {
  2685  			o.mu.Unlock()
  2686  			return nil
  2687  		}
  2688  		info.Delivered.Consumer, info.Delivered.Stream = state.Delivered.Consumer, state.Delivered.Stream
  2689  		info.AckFloor.Consumer, info.AckFloor.Stream = state.AckFloor.Consumer, state.AckFloor.Stream
  2690  		info.NumAckPending = len(state.Pending)
  2691  		info.NumRedelivered = len(state.Redelivered)
  2692  	}
  2693  
  2694  	// Adjust active based on non-zero etc. Also make UTC here.
  2695  	if !o.ldt.IsZero() {
  2696  		ldt := o.ldt.UTC() // This copies as well.
  2697  		info.Delivered.Last = &ldt
  2698  	}
  2699  	if !o.lat.IsZero() {
  2700  		lat := o.lat.UTC() // This copies as well.
  2701  		info.AckFloor.Last = &lat
  2702  	}
  2703  
  2704  	// If we are a pull mode consumer, report on number of waiting requests.
  2705  	if o.isPullMode() {
  2706  		o.processWaiting(false)
  2707  		info.NumWaiting = o.waiting.len()
  2708  	}
  2709  	// If we were asked to snapshot do so here.
  2710  	if snap {
  2711  		o.ici = info
  2712  	}
  2713  	sysc := o.sysc
  2714  	o.mu.Unlock()
  2715  
  2716  	// Do cluster.
  2717  	if rg != nil {
  2718  		info.Cluster = js.clusterInfo(rg)
  2719  	}
  2720  
  2721  	// If we have a reply subject send the response here.
  2722  	if reply != _EMPTY_ && sysc != nil {
  2723  		sysc.sendInternalMsg(reply, _EMPTY_, nil, info)
  2724  	}
  2725  
  2726  	return info
  2727  }
  2728  
  2729  // Will signal us that new messages are available. Will break out of waiting.
  2730  func (o *consumer) signalNewMessages() {
  2731  	// Kick our new message channel
  2732  	select {
  2733  	case o.mch <- struct{}{}:
  2734  	default:
  2735  	}
  2736  }
  2737  
  2738  // shouldSample lets us know if we are sampling metrics on acks.
  2739  func (o *consumer) shouldSample() bool {
  2740  	switch {
  2741  	case o.sfreq <= 0:
  2742  		return false
  2743  	case o.sfreq >= 100:
  2744  		return true
  2745  	}
  2746  
  2747  	// TODO(ripienaar) this is a tad slow so we need to rethink here, however this will only
  2748  	// hit for those with sampling enabled and its not the default
  2749  	return rand.Int31n(100) <= o.sfreq
  2750  }
  2751  
  2752  func (o *consumer) sampleAck(sseq, dseq, dc uint64) {
  2753  	if !o.shouldSample() {
  2754  		return
  2755  	}
  2756  
  2757  	now := time.Now().UTC()
  2758  	unow := now.UnixNano()
  2759  
  2760  	e := JSConsumerAckMetric{
  2761  		TypedEvent: TypedEvent{
  2762  			Type: JSConsumerAckMetricType,
  2763  			ID:   nuid.Next(),
  2764  			Time: now,
  2765  		},
  2766  		Stream:      o.stream,
  2767  		Consumer:    o.name,
  2768  		ConsumerSeq: dseq,
  2769  		StreamSeq:   sseq,
  2770  		Delay:       unow - o.pending[sseq].Timestamp,
  2771  		Deliveries:  dc,
  2772  		Domain:      o.srv.getOpts().JetStreamDomain,
  2773  	}
  2774  
  2775  	j, err := json.Marshal(e)
  2776  	if err != nil {
  2777  		return
  2778  	}
  2779  
  2780  	o.sendAdvisory(o.ackEventT, j)
  2781  }
  2782  
  2783  func (o *consumer) processAckMsg(sseq, dseq, dc uint64, doSample bool) {
  2784  	o.mu.Lock()
  2785  	if o.closed {
  2786  		o.mu.Unlock()
  2787  		return
  2788  	}
  2789  
  2790  	var sagap uint64
  2791  	var needSignal bool
  2792  
  2793  	switch o.cfg.AckPolicy {
  2794  	case AckExplicit:
  2795  		if p, ok := o.pending[sseq]; ok {
  2796  			if doSample {
  2797  				o.sampleAck(sseq, dseq, dc)
  2798  			}
  2799  			if o.maxp > 0 && len(o.pending) >= o.maxp {
  2800  				needSignal = true
  2801  			}
  2802  			delete(o.pending, sseq)
  2803  			// Use the original deliver sequence from our pending record.
  2804  			dseq = p.Sequence
  2805  		}
  2806  		if len(o.pending) == 0 {
  2807  			o.adflr, o.asflr = o.dseq-1, o.sseq-1
  2808  		} else if dseq == o.adflr+1 {
  2809  			o.adflr, o.asflr = dseq, sseq
  2810  			for ss := sseq + 1; ss < o.sseq; ss++ {
  2811  				if p, ok := o.pending[ss]; ok {
  2812  					if p.Sequence > 0 {
  2813  						o.adflr, o.asflr = p.Sequence-1, ss-1
  2814  					}
  2815  					break
  2816  				}
  2817  			}
  2818  		}
  2819  		// We do these regardless.
  2820  		delete(o.rdc, sseq)
  2821  		o.removeFromRedeliverQueue(sseq)
  2822  	case AckAll:
  2823  		// no-op
  2824  		if dseq <= o.adflr || sseq <= o.asflr {
  2825  			o.mu.Unlock()
  2826  			return
  2827  		}
  2828  		if o.maxp > 0 && len(o.pending) >= o.maxp {
  2829  			needSignal = true
  2830  		}
  2831  		sagap = sseq - o.asflr
  2832  		o.adflr, o.asflr = dseq, sseq
  2833  		for seq := sseq; seq > sseq-sagap; seq-- {
  2834  			delete(o.pending, seq)
  2835  			delete(o.rdc, seq)
  2836  			o.removeFromRedeliverQueue(seq)
  2837  		}
  2838  	case AckNone:
  2839  		// FIXME(dlc) - This is error but do we care?
  2840  		o.mu.Unlock()
  2841  		return
  2842  	}
  2843  
  2844  	// Update underlying store.
  2845  	o.updateAcks(dseq, sseq)
  2846  
  2847  	mset := o.mset
  2848  	clustered := o.node != nil
  2849  
  2850  	// In case retention changes for a stream, this ought to have been updated
  2851  	// using the consumer lock to avoid a race.
  2852  	retention := o.retention
  2853  	o.mu.Unlock()
  2854  
  2855  	// Let the owning stream know if we are interest or workqueue retention based.
  2856  	// If this consumer is clustered this will be handled by processReplicatedAck
  2857  	// after the ack has propagated.
  2858  	if !clustered && mset != nil && retention != LimitsPolicy {
  2859  		if sagap > 1 {
  2860  			// FIXME(dlc) - This is very inefficient, will need to fix.
  2861  			for seq := sseq; seq > sseq-sagap; seq-- {
  2862  				mset.ackMsg(o, seq)
  2863  			}
  2864  		} else {
  2865  			mset.ackMsg(o, sseq)
  2866  		}
  2867  	}
  2868  
  2869  	// If we had max ack pending set and were at limit we need to unblock ourselves.
  2870  	if needSignal {
  2871  		o.signalNewMessages()
  2872  	}
  2873  }
  2874  
  2875  // Determine if this is a truly filtered consumer. Modern clients will place filtered subjects
  2876  // even if the stream only has a single non-wildcard subject designation.
  2877  // Read lock should be held.
  2878  func (o *consumer) isFiltered() bool {
  2879  	if o.subjf == nil {
  2880  		return false
  2881  	}
  2882  	// If we are here we want to check if the filtered subject is
  2883  	// a direct match for our only listed subject.
  2884  	mset := o.mset
  2885  	if mset == nil {
  2886  		return true
  2887  	}
  2888  
  2889  	// `isFiltered` need to be performant, so we do
  2890  	// as any checks as possible to avoid unnecessary work.
  2891  	// Here we avoid iteration over slices if there is only one subject in stream
  2892  	// and one filter for the consumer.
  2893  	if len(mset.cfg.Subjects) == 1 && len(o.subjf) == 1 {
  2894  		return mset.cfg.Subjects[0] != o.subjf[0].subject
  2895  	}
  2896  
  2897  	// if the list is not equal length, we can return early, as this is filtered.
  2898  	if len(mset.cfg.Subjects) != len(o.subjf) {
  2899  		return true
  2900  	}
  2901  
  2902  	// if in rare case scenario that user passed all stream subjects as consumer filters,
  2903  	// we need to do a more expensive operation.
  2904  	// reflect.DeepEqual would return false if the filters are the same, but in different order
  2905  	// so it can't be used here.
  2906  	cfilters := make(map[string]struct{}, len(o.subjf))
  2907  	for _, val := range o.subjf {
  2908  		cfilters[val.subject] = struct{}{}
  2909  	}
  2910  	for _, val := range mset.cfg.Subjects {
  2911  		if _, ok := cfilters[val]; !ok {
  2912  			return true
  2913  		}
  2914  	}
  2915  	return false
  2916  }
  2917  
  2918  // Check if we need an ack for this store seq.
  2919  // This is called for interest based retention streams to remove messages.
  2920  func (o *consumer) needAck(sseq uint64, subj string) bool {
  2921  	var needAck bool
  2922  	var asflr, osseq uint64
  2923  	var pending map[uint64]*Pending
  2924  
  2925  	o.mu.RLock()
  2926  	defer o.mu.RUnlock()
  2927  
  2928  	isFiltered := o.isFiltered()
  2929  	if isFiltered && o.mset == nil {
  2930  		return false
  2931  	}
  2932  
  2933  	// Check if we are filtered, and if so check if this is even applicable to us.
  2934  	if isFiltered {
  2935  		if subj == _EMPTY_ {
  2936  			var svp StoreMsg
  2937  			if _, err := o.mset.store.LoadMsg(sseq, &svp); err != nil {
  2938  				return false
  2939  			}
  2940  			subj = svp.subj
  2941  		}
  2942  		if !o.isFilteredMatch(subj) {
  2943  			return false
  2944  		}
  2945  	}
  2946  	if o.isLeader() {
  2947  		asflr, osseq = o.asflr, o.sseq
  2948  		pending = o.pending
  2949  	} else {
  2950  		if o.store == nil {
  2951  			return false
  2952  		}
  2953  		state, err := o.store.BorrowState()
  2954  		if err != nil || state == nil {
  2955  			// Fall back to what we track internally for now.
  2956  			return sseq > o.asflr && !o.isFiltered()
  2957  		}
  2958  		// If loading state as here, the osseq is +1.
  2959  		asflr, osseq, pending = state.AckFloor.Stream, state.Delivered.Stream+1, state.Pending
  2960  	}
  2961  
  2962  	switch o.cfg.AckPolicy {
  2963  	case AckNone, AckAll:
  2964  		needAck = sseq > asflr
  2965  	case AckExplicit:
  2966  		if sseq > asflr {
  2967  			if sseq >= osseq {
  2968  				needAck = true
  2969  			} else {
  2970  				_, needAck = pending[sseq]
  2971  			}
  2972  		}
  2973  	}
  2974  
  2975  	return needAck
  2976  }
  2977  
  2978  // Helper for the next message requests.
  2979  func nextReqFromMsg(msg []byte) (time.Time, int, int, bool, time.Duration, time.Time, error) {
  2980  	req := bytes.TrimSpace(msg)
  2981  
  2982  	switch {
  2983  	case len(req) == 0:
  2984  		return time.Time{}, 1, 0, false, 0, time.Time{}, nil
  2985  
  2986  	case req[0] == '{':
  2987  		var cr JSApiConsumerGetNextRequest
  2988  		if err := json.Unmarshal(req, &cr); err != nil {
  2989  			return time.Time{}, -1, 0, false, 0, time.Time{}, err
  2990  		}
  2991  		var hbt time.Time
  2992  		if cr.Heartbeat > 0 {
  2993  			if cr.Heartbeat*2 > cr.Expires {
  2994  				return time.Time{}, 1, 0, false, 0, time.Time{}, errors.New("heartbeat value too large")
  2995  			}
  2996  			hbt = time.Now().Add(cr.Heartbeat)
  2997  		}
  2998  		if cr.Expires == time.Duration(0) {
  2999  			return time.Time{}, cr.Batch, cr.MaxBytes, cr.NoWait, cr.Heartbeat, hbt, nil
  3000  		}
  3001  		return time.Now().Add(cr.Expires), cr.Batch, cr.MaxBytes, cr.NoWait, cr.Heartbeat, hbt, nil
  3002  	default:
  3003  		if n, err := strconv.Atoi(string(req)); err == nil {
  3004  			return time.Time{}, n, 0, false, 0, time.Time{}, nil
  3005  		}
  3006  	}
  3007  
  3008  	return time.Time{}, 1, 0, false, 0, time.Time{}, nil
  3009  }
  3010  
  3011  // Represents a request that is on the internal waiting queue
  3012  type waitingRequest struct {
  3013  	acc      *Account
  3014  	interest string
  3015  	reply    string
  3016  	n        int // For batching
  3017  	d        int
  3018  	b        int // For max bytes tracking.
  3019  	expires  time.Time
  3020  	received time.Time
  3021  	hb       time.Duration
  3022  	hbt      time.Time
  3023  	noWait   bool
  3024  }
  3025  
  3026  // sync.Pool for waiting requests.
  3027  var wrPool = sync.Pool{
  3028  	New: func() interface{} {
  3029  		return new(waitingRequest)
  3030  	},
  3031  }
  3032  
  3033  // Recycle this request. This request can not be accessed after this call.
  3034  func (wr *waitingRequest) recycleIfDone() bool {
  3035  	if wr != nil && wr.n <= 0 {
  3036  		wr.recycle()
  3037  		return true
  3038  	}
  3039  	return false
  3040  }
  3041  
  3042  // Force a recycle.
  3043  func (wr *waitingRequest) recycle() {
  3044  	if wr != nil {
  3045  		wr.acc, wr.interest, wr.reply = nil, _EMPTY_, _EMPTY_
  3046  		wrPool.Put(wr)
  3047  	}
  3048  }
  3049  
  3050  // waiting queue for requests that are waiting for new messages to arrive.
  3051  type waitQueue struct {
  3052  	rp, wp, n int
  3053  	last      time.Time
  3054  	reqs      []*waitingRequest
  3055  }
  3056  
  3057  // Create a new ring buffer with at most max items.
  3058  func newWaitQueue(max int) *waitQueue {
  3059  	return &waitQueue{rp: -1, reqs: make([]*waitingRequest, max)}
  3060  }
  3061  
  3062  var (
  3063  	errWaitQueueFull = errors.New("wait queue is full")
  3064  	errWaitQueueNil  = errors.New("wait queue is nil")
  3065  )
  3066  
  3067  // Adds in a new request.
  3068  func (wq *waitQueue) add(wr *waitingRequest) error {
  3069  	if wq == nil {
  3070  		return errWaitQueueNil
  3071  	}
  3072  	if wq.isFull() {
  3073  		return errWaitQueueFull
  3074  	}
  3075  	wq.reqs[wq.wp] = wr
  3076  	// TODO(dlc) - Could make pow2 and get rid of mod.
  3077  	wq.wp = (wq.wp + 1) % cap(wq.reqs)
  3078  
  3079  	// Adjust read pointer if we were empty.
  3080  	if wq.rp < 0 {
  3081  		wq.rp = 0
  3082  	}
  3083  	// Track last active via when we receive a request.
  3084  	wq.last = wr.received
  3085  	wq.n++
  3086  	return nil
  3087  }
  3088  
  3089  func (wq *waitQueue) isFull() bool {
  3090  	return wq.n == cap(wq.reqs)
  3091  }
  3092  
  3093  func (wq *waitQueue) isEmpty() bool {
  3094  	return wq.len() == 0
  3095  }
  3096  
  3097  func (wq *waitQueue) len() int {
  3098  	if wq == nil {
  3099  		return 0
  3100  	}
  3101  	return wq.n
  3102  }
  3103  
  3104  // Peek will return the next request waiting or nil if empty.
  3105  func (wq *waitQueue) peek() *waitingRequest {
  3106  	if wq == nil {
  3107  		return nil
  3108  	}
  3109  	var wr *waitingRequest
  3110  	if wq.rp >= 0 {
  3111  		wr = wq.reqs[wq.rp]
  3112  	}
  3113  	return wr
  3114  }
  3115  
  3116  // pop will return the next request and move the read cursor.
  3117  // This will now place a request that still has pending items at the ends of the list.
  3118  func (wq *waitQueue) pop() *waitingRequest {
  3119  	wr := wq.peek()
  3120  	if wr != nil {
  3121  		wr.d++
  3122  		wr.n--
  3123  
  3124  		// Always remove current now on a pop, and move to end if still valid.
  3125  		// If we were the only one don't need to remove since this can be a no-op.
  3126  		if wr.n > 0 && wq.n > 1 {
  3127  			wq.removeCurrent()
  3128  			wq.add(wr)
  3129  		} else if wr.n <= 0 {
  3130  			wq.removeCurrent()
  3131  		}
  3132  	}
  3133  	return wr
  3134  }
  3135  
  3136  // Removes the current read pointer (head FIFO) entry.
  3137  func (wq *waitQueue) removeCurrent() {
  3138  	if wq.rp < 0 {
  3139  		return
  3140  	}
  3141  	wq.reqs[wq.rp] = nil
  3142  	wq.rp = (wq.rp + 1) % cap(wq.reqs)
  3143  	wq.n--
  3144  	// Check if we are empty.
  3145  	if wq.n == 0 {
  3146  		wq.rp, wq.wp = -1, 0
  3147  	}
  3148  }
  3149  
  3150  // Will compact when we have interior deletes.
  3151  func (wq *waitQueue) compact() {
  3152  	if wq.isEmpty() {
  3153  		return
  3154  	}
  3155  	nreqs, i := make([]*waitingRequest, cap(wq.reqs)), 0
  3156  	for j, rp := 0, wq.rp; j < wq.n; j++ {
  3157  		if wr := wq.reqs[rp]; wr != nil {
  3158  			nreqs[i] = wr
  3159  			i++
  3160  		}
  3161  		rp = (rp + 1) % cap(wq.reqs)
  3162  	}
  3163  	// Reset here.
  3164  	wq.rp, wq.wp, wq.n, wq.reqs = 0, i, i, nreqs
  3165  }
  3166  
  3167  // Return the map of pending requests keyed by the reply subject.
  3168  // No-op if push consumer or invalid etc.
  3169  func (o *consumer) pendingRequests() map[string]*waitingRequest {
  3170  	if o.waiting == nil {
  3171  		return nil
  3172  	}
  3173  	wq, m := o.waiting, make(map[string]*waitingRequest)
  3174  	for i, rp := 0, wq.rp; i < wq.n; i++ {
  3175  		if wr := wq.reqs[rp]; wr != nil {
  3176  			m[wr.reply] = wr
  3177  		}
  3178  		rp = (rp + 1) % cap(wq.reqs)
  3179  	}
  3180  	return m
  3181  }
  3182  
  3183  // Return next waiting request. This will check for expirations but not noWait or interest.
  3184  // That will be handled by processWaiting.
  3185  // Lock should be held.
  3186  func (o *consumer) nextWaiting(sz int) *waitingRequest {
  3187  	if o.waiting == nil || o.waiting.isEmpty() {
  3188  		return nil
  3189  	}
  3190  	for wr := o.waiting.peek(); !o.waiting.isEmpty(); wr = o.waiting.peek() {
  3191  		if wr == nil {
  3192  			break
  3193  		}
  3194  		// Check if we have max bytes set.
  3195  		if wr.b > 0 {
  3196  			if sz <= wr.b {
  3197  				wr.b -= sz
  3198  				// If we are right now at zero, set batch to 1 to deliver this one but stop after.
  3199  				if wr.b == 0 {
  3200  					wr.n = 1
  3201  				}
  3202  			} else {
  3203  				// Since we can't send that message to the requestor, we need to
  3204  				// notify that we are closing the request.
  3205  				const maxBytesT = "NATS/1.0 409 Message Size Exceeds MaxBytes\r\n%s: %d\r\n%s: %d\r\n\r\n"
  3206  				hdr := fmt.Appendf(nil, maxBytesT, JSPullRequestPendingMsgs, wr.n, JSPullRequestPendingBytes, wr.b)
  3207  				o.outq.send(newJSPubMsg(wr.reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0))
  3208  				// Remove the current one, no longer valid due to max bytes limit.
  3209  				o.waiting.removeCurrent()
  3210  				if o.node != nil {
  3211  					o.removeClusterPendingRequest(wr.reply)
  3212  				}
  3213  				wr.recycle()
  3214  				continue
  3215  			}
  3216  		}
  3217  
  3218  		if wr.expires.IsZero() || time.Now().Before(wr.expires) {
  3219  			rr := wr.acc.sl.Match(wr.interest)
  3220  			if len(rr.psubs)+len(rr.qsubs) > 0 {
  3221  				return o.waiting.pop()
  3222  			} else if time.Since(wr.received) < defaultGatewayRecentSubExpiration && (o.srv.leafNodeEnabled || o.srv.gateway.enabled) {
  3223  				return o.waiting.pop()
  3224  			} else if o.srv.gateway.enabled && o.srv.hasGatewayInterest(wr.acc.Name, wr.interest) {
  3225  				return o.waiting.pop()
  3226  			}
  3227  		} else {
  3228  			// We do check for expiration in `processWaiting`, but it is possible to hit the expiry here, and not there.
  3229  			hdr := fmt.Appendf(nil, "NATS/1.0 408 Request Timeout\r\n%s: %d\r\n%s: %d\r\n\r\n", JSPullRequestPendingMsgs, wr.n, JSPullRequestPendingBytes, wr.b)
  3230  			o.outq.send(newJSPubMsg(wr.reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0))
  3231  			o.waiting.removeCurrent()
  3232  			if o.node != nil {
  3233  				o.removeClusterPendingRequest(wr.reply)
  3234  			}
  3235  			wr.recycle()
  3236  			continue
  3237  
  3238  		}
  3239  		if wr.interest != wr.reply {
  3240  			const intExpT = "NATS/1.0 408 Interest Expired\r\n%s: %d\r\n%s: %d\r\n\r\n"
  3241  			hdr := fmt.Appendf(nil, intExpT, JSPullRequestPendingMsgs, wr.n, JSPullRequestPendingBytes, wr.b)
  3242  			o.outq.send(newJSPubMsg(wr.reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0))
  3243  		}
  3244  		// Remove the current one, no longer valid.
  3245  		o.waiting.removeCurrent()
  3246  		if o.node != nil {
  3247  			o.removeClusterPendingRequest(wr.reply)
  3248  		}
  3249  		wr.recycle()
  3250  	}
  3251  	return nil
  3252  }
  3253  
  3254  // Next message request.
  3255  type nextMsgReq struct {
  3256  	reply string
  3257  	msg   []byte
  3258  }
  3259  
  3260  var nextMsgReqPool sync.Pool
  3261  
  3262  func newNextMsgReq(reply string, msg []byte) *nextMsgReq {
  3263  	var nmr *nextMsgReq
  3264  	m := nextMsgReqPool.Get()
  3265  	if m != nil {
  3266  		nmr = m.(*nextMsgReq)
  3267  	} else {
  3268  		nmr = &nextMsgReq{}
  3269  	}
  3270  	// When getting something from a pool it is critical that all fields are
  3271  	// initialized. Doing this way guarantees that if someone adds a field to
  3272  	// the structure, the compiler will fail the build if this line is not updated.
  3273  	(*nmr) = nextMsgReq{reply, msg}
  3274  	return nmr
  3275  }
  3276  
  3277  func (nmr *nextMsgReq) returnToPool() {
  3278  	if nmr == nil {
  3279  		return
  3280  	}
  3281  	nmr.reply, nmr.msg = _EMPTY_, nil
  3282  	nextMsgReqPool.Put(nmr)
  3283  }
  3284  
  3285  // processNextMsgReq will process a request for the next message available. A nil message payload means deliver
  3286  // a single message. If the payload is a formal request or a number parseable with Atoi(), then we will send a
  3287  // batch of messages without requiring another request to this endpoint, or an ACK.
  3288  func (o *consumer) processNextMsgReq(_ *subscription, c *client, _ *Account, _, reply string, msg []byte) {
  3289  	if reply == _EMPTY_ {
  3290  		return
  3291  	}
  3292  
  3293  	// Short circuit error here.
  3294  	if o.nextMsgReqs == nil {
  3295  		hdr := []byte("NATS/1.0 409 Consumer is push based\r\n\r\n")
  3296  		o.outq.send(newJSPubMsg(reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0))
  3297  		return
  3298  	}
  3299  
  3300  	_, msg = c.msgParts(msg)
  3301  	o.nextMsgReqs.push(newNextMsgReq(reply, copyBytes(msg)))
  3302  }
  3303  
  3304  func (o *consumer) processNextMsgRequest(reply string, msg []byte) {
  3305  	o.mu.Lock()
  3306  	defer o.mu.Unlock()
  3307  
  3308  	mset := o.mset
  3309  	if mset == nil {
  3310  		return
  3311  	}
  3312  
  3313  	sendErr := func(status int, description string) {
  3314  		hdr := fmt.Appendf(nil, "NATS/1.0 %d %s\r\n\r\n", status, description)
  3315  		o.outq.send(newJSPubMsg(reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0))
  3316  	}
  3317  
  3318  	if o.isPushMode() || o.waiting == nil {
  3319  		sendErr(409, "Consumer is push based")
  3320  		return
  3321  	}
  3322  
  3323  	// Check payload here to see if they sent in batch size or a formal request.
  3324  	expires, batchSize, maxBytes, noWait, hb, hbt, err := nextReqFromMsg(msg)
  3325  	if err != nil {
  3326  		sendErr(400, fmt.Sprintf("Bad Request - %v", err))
  3327  		return
  3328  	}
  3329  
  3330  	// Check for request limits
  3331  	if o.cfg.MaxRequestBatch > 0 && batchSize > o.cfg.MaxRequestBatch {
  3332  		sendErr(409, fmt.Sprintf("Exceeded MaxRequestBatch of %d", o.cfg.MaxRequestBatch))
  3333  		return
  3334  	}
  3335  
  3336  	if !expires.IsZero() && o.cfg.MaxRequestExpires > 0 && expires.After(time.Now().Add(o.cfg.MaxRequestExpires)) {
  3337  		sendErr(409, fmt.Sprintf("Exceeded MaxRequestExpires of %v", o.cfg.MaxRequestExpires))
  3338  		return
  3339  	}
  3340  
  3341  	if maxBytes > 0 && o.cfg.MaxRequestMaxBytes > 0 && maxBytes > o.cfg.MaxRequestMaxBytes {
  3342  		sendErr(409, fmt.Sprintf("Exceeded MaxRequestMaxBytes of %v", o.cfg.MaxRequestMaxBytes))
  3343  		return
  3344  	}
  3345  
  3346  	// If we have the max number of requests already pending try to expire.
  3347  	if o.waiting.isFull() {
  3348  		// Try to expire some of the requests.
  3349  		o.processWaiting(false)
  3350  	}
  3351  
  3352  	// If the request is for noWait and we have pending requests already, check if we have room.
  3353  	if noWait {
  3354  		msgsPending := o.numPending() + uint64(len(o.rdq))
  3355  		// If no pending at all, decide what to do with request.
  3356  		// If no expires was set then fail.
  3357  		if msgsPending == 0 && expires.IsZero() {
  3358  			o.waiting.last = time.Now()
  3359  			sendErr(404, "No Messages")
  3360  			return
  3361  		}
  3362  		if msgsPending > 0 {
  3363  			_, _, batchPending, _ := o.processWaiting(false)
  3364  			if msgsPending < uint64(batchPending) {
  3365  				o.waiting.last = time.Now()
  3366  				sendErr(408, "Requests Pending")
  3367  				return
  3368  			}
  3369  		}
  3370  		// If we are here this should be considered a one-shot situation.
  3371  		// We will wait for expires but will return as soon as we have any messages.
  3372  	}
  3373  
  3374  	// If we receive this request though an account export, we need to track that interest subject and account.
  3375  	acc, interest := trackDownAccountAndInterest(o.acc, reply)
  3376  
  3377  	// Create a waiting request.
  3378  	wr := wrPool.Get().(*waitingRequest)
  3379  	wr.acc, wr.interest, wr.reply, wr.n, wr.d, wr.noWait, wr.expires, wr.hb, wr.hbt = acc, interest, reply, batchSize, 0, noWait, expires, hb, hbt
  3380  	wr.b = maxBytes
  3381  	wr.received = time.Now()
  3382  
  3383  	if err := o.waiting.add(wr); err != nil {
  3384  		sendErr(409, "Exceeded MaxWaiting")
  3385  		return
  3386  	}
  3387  	o.signalNewMessages()
  3388  	// If we are clustered update our followers about this request.
  3389  	if o.node != nil {
  3390  		o.addClusterPendingRequest(wr.reply)
  3391  	}
  3392  }
  3393  
  3394  func trackDownAccountAndInterest(acc *Account, interest string) (*Account, string) {
  3395  	for strings.HasPrefix(interest, replyPrefix) {
  3396  		oa := acc
  3397  		oa.mu.RLock()
  3398  		if oa.exports.responses == nil {
  3399  			oa.mu.RUnlock()
  3400  			break
  3401  		}
  3402  		si := oa.exports.responses[interest]
  3403  		if si == nil {
  3404  			oa.mu.RUnlock()
  3405  			break
  3406  		}
  3407  		acc, interest = si.acc, si.to
  3408  		oa.mu.RUnlock()
  3409  	}
  3410  	return acc, interest
  3411  }
  3412  
  3413  // Increase the delivery count for this message.
  3414  // ONLY used on redelivery semantics.
  3415  // Lock should be held.
  3416  func (o *consumer) incDeliveryCount(sseq uint64) uint64 {
  3417  	if o.rdc == nil {
  3418  		o.rdc = make(map[uint64]uint64)
  3419  	}
  3420  	o.rdc[sseq] += 1
  3421  	return o.rdc[sseq] + 1
  3422  }
  3423  
  3424  // Used if we have to adjust on failed delivery or bad lookups.
  3425  // Those failed attempts should not increase deliver count.
  3426  // Lock should be held.
  3427  func (o *consumer) decDeliveryCount(sseq uint64) {
  3428  	if o.rdc == nil {
  3429  		return
  3430  	}
  3431  	if dc, ok := o.rdc[sseq]; ok {
  3432  		if dc == 1 {
  3433  			delete(o.rdc, sseq)
  3434  		} else {
  3435  			o.rdc[sseq] -= 1
  3436  		}
  3437  	}
  3438  }
  3439  
  3440  // send a delivery exceeded advisory.
  3441  func (o *consumer) notifyDeliveryExceeded(sseq, dc uint64) {
  3442  	e := JSConsumerDeliveryExceededAdvisory{
  3443  		TypedEvent: TypedEvent{
  3444  			Type: JSConsumerDeliveryExceededAdvisoryType,
  3445  			ID:   nuid.Next(),
  3446  			Time: time.Now().UTC(),
  3447  		},
  3448  		Stream:     o.stream,
  3449  		Consumer:   o.name,
  3450  		StreamSeq:  sseq,
  3451  		Deliveries: dc,
  3452  		Domain:     o.srv.getOpts().JetStreamDomain,
  3453  	}
  3454  
  3455  	j, err := json.Marshal(e)
  3456  	if err != nil {
  3457  		return
  3458  	}
  3459  
  3460  	o.sendAdvisory(o.deliveryExcEventT, j)
  3461  }
  3462  
  3463  // Check if the candidate subject matches a filter if its present.
  3464  // Lock should be held.
  3465  func (o *consumer) isFilteredMatch(subj string) bool {
  3466  	// No filter is automatic match.
  3467  	if o.subjf == nil {
  3468  		return true
  3469  	}
  3470  	for _, filter := range o.subjf {
  3471  		if !filter.hasWildcard && subj == filter.subject {
  3472  			return true
  3473  		}
  3474  	}
  3475  	// It's quicker to first check for non-wildcard filters, then
  3476  	// iterate again to check for subset match.
  3477  	tsa := [32]string{}
  3478  	tts := tokenizeSubjectIntoSlice(tsa[:0], subj)
  3479  	for _, filter := range o.subjf {
  3480  		if isSubsetMatchTokenized(tts, filter.tokenizedSubject) {
  3481  			return true
  3482  		}
  3483  	}
  3484  	return false
  3485  }
  3486  
  3487  // Check if the candidate filter subject is equal to or a subset match
  3488  // of one of the filter subjects.
  3489  // Lock should be held.
  3490  func (o *consumer) isEqualOrSubsetMatch(subj string) bool {
  3491  	for _, filter := range o.subjf {
  3492  		if !filter.hasWildcard && subj == filter.subject {
  3493  			return true
  3494  		}
  3495  	}
  3496  	tsa := [32]string{}
  3497  	tts := tokenizeSubjectIntoSlice(tsa[:0], subj)
  3498  	for _, filter := range o.subjf {
  3499  		if isSubsetMatchTokenized(filter.tokenizedSubject, tts) {
  3500  			return true
  3501  		}
  3502  	}
  3503  	return false
  3504  }
  3505  
  3506  var (
  3507  	errMaxAckPending = errors.New("max ack pending reached")
  3508  	errBadConsumer   = errors.New("consumer not valid")
  3509  	errNoInterest    = errors.New("consumer requires interest for delivery subject when ephemeral")
  3510  )
  3511  
  3512  // Get next available message from underlying store.
  3513  // Is partition aware and redeliver aware.
  3514  // Lock should be held.
  3515  func (o *consumer) getNextMsg() (*jsPubMsg, uint64, error) {
  3516  	if o.mset == nil || o.mset.store == nil {
  3517  		return nil, 0, errBadConsumer
  3518  	}
  3519  	// Process redelivered messages before looking at possibly "skip list" (deliver last per subject)
  3520  	if o.hasRedeliveries() {
  3521  		var seq, dc uint64
  3522  		for seq = o.getNextToRedeliver(); seq > 0; seq = o.getNextToRedeliver() {
  3523  			dc = o.incDeliveryCount(seq)
  3524  			if o.maxdc > 0 && dc > o.maxdc {
  3525  				// Only send once
  3526  				if dc == o.maxdc+1 {
  3527  					o.notifyDeliveryExceeded(seq, dc-1)
  3528  				}
  3529  				// Make sure to remove from pending.
  3530  				if p, ok := o.pending[seq]; ok && p != nil {
  3531  					delete(o.pending, seq)
  3532  					o.updateDelivered(p.Sequence, seq, dc, p.Timestamp)
  3533  				}
  3534  				continue
  3535  			}
  3536  			if seq > 0 {
  3537  				pmsg := getJSPubMsgFromPool()
  3538  				sm, err := o.mset.store.LoadMsg(seq, &pmsg.StoreMsg)
  3539  				if sm == nil || err != nil {
  3540  					pmsg.returnToPool()
  3541  					pmsg, dc = nil, 0
  3542  					// Adjust back deliver count.
  3543  					o.decDeliveryCount(seq)
  3544  				}
  3545  				return pmsg, dc, err
  3546  			}
  3547  		}
  3548  	}
  3549  
  3550  	// Check if we have max pending.
  3551  	if o.maxp > 0 && len(o.pending) >= o.maxp {
  3552  		// maxp only set when ack policy != AckNone and user set MaxAckPending
  3553  		// Stall if we have hit max pending.
  3554  		return nil, 0, errMaxAckPending
  3555  	}
  3556  
  3557  	if o.hasSkipListPending() {
  3558  		seq := o.lss.seqs[0]
  3559  		if len(o.lss.seqs) == 1 {
  3560  			o.sseq = o.lss.resume
  3561  			o.lss = nil
  3562  			o.updateSkipped(o.sseq)
  3563  		} else {
  3564  			o.lss.seqs = o.lss.seqs[1:]
  3565  		}
  3566  		pmsg := getJSPubMsgFromPool()
  3567  		sm, err := o.mset.store.LoadMsg(seq, &pmsg.StoreMsg)
  3568  		if sm == nil || err != nil {
  3569  			pmsg.returnToPool()
  3570  		}
  3571  		o.sseq++
  3572  		return pmsg, 1, err
  3573  	}
  3574  
  3575  	// Hold onto this since we release the lock.
  3576  	store := o.mset.store
  3577  
  3578  	// If no filters are specified, optimize to fetch just non-filtered messages.
  3579  	if len(o.subjf) == 0 {
  3580  		// Grab next message applicable to us.
  3581  		// We will unlock here in case lots of contention, e.g. WQ.
  3582  		o.mu.Unlock()
  3583  		pmsg := getJSPubMsgFromPool()
  3584  		sm, sseq, err := store.LoadNextMsg(_EMPTY_, false, o.sseq, &pmsg.StoreMsg)
  3585  		if sm == nil {
  3586  			pmsg.returnToPool()
  3587  			pmsg = nil
  3588  		}
  3589  		o.mu.Lock()
  3590  		if sseq >= o.sseq {
  3591  			o.sseq = sseq + 1
  3592  			if err == ErrStoreEOF {
  3593  				o.updateSkipped(o.sseq)
  3594  			}
  3595  		}
  3596  		return pmsg, 1, err
  3597  	}
  3598  
  3599  	// if we have filters, iterate over filters and optimize by buffering found messages.
  3600  	for _, filter := range o.subjf {
  3601  		if filter.nextSeq < o.sseq {
  3602  			// o.subjf should always point to the right starting point for reading messages
  3603  			// if anything modified it, make sure our sequence do not start earlier.
  3604  			filter.nextSeq = o.sseq
  3605  		}
  3606  		// if this subject didn't fetch any message before, do it now
  3607  		if filter.pmsg == nil {
  3608  			// We will unlock here in case lots of contention, e.g. WQ.
  3609  			filterSubject, filterWC, nextSeq := filter.subject, filter.hasWildcard, filter.nextSeq
  3610  			o.mu.Unlock()
  3611  			pmsg := getJSPubMsgFromPool()
  3612  			sm, sseq, err := store.LoadNextMsg(filterSubject, filterWC, nextSeq, &pmsg.StoreMsg)
  3613  			o.mu.Lock()
  3614  
  3615  			filter.err = err
  3616  
  3617  			if sm != nil {
  3618  				filter.pmsg = pmsg
  3619  			} else {
  3620  				pmsg.returnToPool()
  3621  				pmsg = nil
  3622  			}
  3623  			if sseq >= filter.nextSeq {
  3624  				filter.nextSeq = sseq + 1
  3625  			}
  3626  
  3627  			// If we're sure that this filter has continuous sequence of messages, skip looking up other filters.
  3628  			if nextSeq == sseq && err != ErrStoreEOF {
  3629  				break
  3630  			}
  3631  		}
  3632  
  3633  	}
  3634  
  3635  	// Don't sort the o.subjf if it's only one entry
  3636  	// Sort uses `reflect` and can noticeably slow down fetching,
  3637  	// even if len == 0 or 1.
  3638  	// TODO(tp): we should have sort based off generics for server
  3639  	// to avoid reflection.
  3640  	if len(o.subjf) > 1 {
  3641  		sort.Slice(o.subjf, func(i, j int) bool {
  3642  			if o.subjf[j].pmsg != nil && o.subjf[i].pmsg == nil {
  3643  				return false
  3644  			}
  3645  			if o.subjf[i].pmsg != nil && o.subjf[j].pmsg == nil {
  3646  				return true
  3647  			}
  3648  			return o.subjf[j].nextSeq > o.subjf[i].nextSeq
  3649  		})
  3650  	}
  3651  	// Grab next message applicable to us.
  3652  	// Sort sequences first, to grab the first message.
  3653  	filter := o.subjf[0]
  3654  	err := filter.err
  3655  	// This means we got a message in this subject fetched.
  3656  	if filter.pmsg != nil {
  3657  		filter.currentSeq = filter.nextSeq
  3658  		o.sseq = filter.currentSeq
  3659  		returned := filter.pmsg
  3660  		filter.pmsg = nil
  3661  		return returned, 1, err
  3662  	}
  3663  	if err == ErrStoreEOF {
  3664  		o.updateSkipped(filter.nextSeq)
  3665  	}
  3666  
  3667  	// set o.sseq to the first subject sequence
  3668  	if filter.nextSeq > o.sseq {
  3669  		o.sseq = filter.nextSeq
  3670  	}
  3671  	return nil, 0, err
  3672  }
  3673  
  3674  // Will check for expiration and lack of interest on waiting requests.
  3675  // Will also do any heartbeats and return the next expiration or HB interval.
  3676  func (o *consumer) processWaiting(eos bool) (int, int, int, time.Time) {
  3677  	var fexp time.Time
  3678  	if o.srv == nil || o.waiting.isEmpty() {
  3679  		return 0, 0, 0, fexp
  3680  	}
  3681  
  3682  	var expired, brp int
  3683  	s, now := o.srv, time.Now()
  3684  
  3685  	// Signals interior deletes, which we will compact if needed.
  3686  	var hid bool
  3687  	remove := func(wr *waitingRequest, i int) {
  3688  		if i == o.waiting.rp {
  3689  			o.waiting.removeCurrent()
  3690  		} else {
  3691  			o.waiting.reqs[i] = nil
  3692  			hid = true
  3693  		}
  3694  		if o.node != nil {
  3695  			o.removeClusterPendingRequest(wr.reply)
  3696  		}
  3697  		expired++
  3698  		wr.recycle()
  3699  	}
  3700  
  3701  	wq := o.waiting
  3702  	for i, rp, n := 0, wq.rp, wq.n; i < n; rp = (rp + 1) % cap(wq.reqs) {
  3703  		wr := wq.reqs[rp]
  3704  		// Check expiration.
  3705  		if (eos && wr.noWait && wr.d > 0) || (!wr.expires.IsZero() && now.After(wr.expires)) {
  3706  			hdr := fmt.Appendf(nil, "NATS/1.0 408 Request Timeout\r\n%s: %d\r\n%s: %d\r\n\r\n", JSPullRequestPendingMsgs, wr.n, JSPullRequestPendingBytes, wr.b)
  3707  			o.outq.send(newJSPubMsg(wr.reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0))
  3708  			remove(wr, rp)
  3709  			i++
  3710  			continue
  3711  		}
  3712  		// Now check interest.
  3713  		rr := wr.acc.sl.Match(wr.interest)
  3714  		interest := len(rr.psubs)+len(rr.qsubs) > 0
  3715  		if !interest && (s.leafNodeEnabled || s.gateway.enabled) {
  3716  			// If we are here check on gateways and leaf nodes (as they can mask gateways on the other end).
  3717  			// If we have interest or the request is too young break and do not expire.
  3718  			if time.Since(wr.received) < defaultGatewayRecentSubExpiration {
  3719  				interest = true
  3720  			} else if s.gateway.enabled && s.hasGatewayInterest(wr.acc.Name, wr.interest) {
  3721  				interest = true
  3722  			}
  3723  		}
  3724  
  3725  		// If interest, update batch pending requests counter and update fexp timer.
  3726  		if interest {
  3727  			brp += wr.n
  3728  			if !wr.hbt.IsZero() {
  3729  				if now.After(wr.hbt) {
  3730  					// Fire off a heartbeat here.
  3731  					o.sendIdleHeartbeat(wr.reply)
  3732  					// Update next HB.
  3733  					wr.hbt = now.Add(wr.hb)
  3734  				}
  3735  				if fexp.IsZero() || wr.hbt.Before(fexp) {
  3736  					fexp = wr.hbt
  3737  				}
  3738  			}
  3739  			if !wr.expires.IsZero() && (fexp.IsZero() || wr.expires.Before(fexp)) {
  3740  				fexp = wr.expires
  3741  			}
  3742  			i++
  3743  			continue
  3744  		}
  3745  		// No more interest here so go ahead and remove this one from our list.
  3746  		remove(wr, rp)
  3747  		i++
  3748  	}
  3749  
  3750  	// If we have interior deletes from out of order invalidation, compact the waiting queue.
  3751  	if hid {
  3752  		o.waiting.compact()
  3753  	}
  3754  
  3755  	return expired, wq.len(), brp, fexp
  3756  }
  3757  
  3758  // Will check to make sure those waiting still have registered interest.
  3759  func (o *consumer) checkWaitingForInterest() bool {
  3760  	o.processWaiting(true)
  3761  	return o.waiting.len() > 0
  3762  }
  3763  
  3764  // Lock should be held.
  3765  func (o *consumer) hbTimer() (time.Duration, *time.Timer) {
  3766  	if o.cfg.Heartbeat == 0 {
  3767  		return 0, nil
  3768  	}
  3769  	return o.cfg.Heartbeat, time.NewTimer(o.cfg.Heartbeat)
  3770  }
  3771  
  3772  // Check here for conditions when our ack floor may have drifted below the streams first sequence.
  3773  // In general this is accounted for in normal operations, but if the consumer misses the signal from
  3774  // the stream it will not clear the message and move the ack state.
  3775  // Should only be called from consumer leader.
  3776  func (o *consumer) checkAckFloor() {
  3777  	o.mu.RLock()
  3778  	mset, closed, asflr, numPending := o.mset, o.closed, o.asflr, len(o.pending)
  3779  	o.mu.RUnlock()
  3780  
  3781  	if asflr == 0 || closed || mset == nil {
  3782  		return
  3783  	}
  3784  
  3785  	var ss StreamState
  3786  	mset.store.FastState(&ss)
  3787  
  3788  	// If our floor is equal or greater that is normal and nothing for us to do.
  3789  	if ss.FirstSeq == 0 || asflr >= ss.FirstSeq-1 {
  3790  		return
  3791  	}
  3792  
  3793  	// Check which linear space is less to walk.
  3794  	if ss.FirstSeq-asflr-1 < uint64(numPending) {
  3795  		// Process all messages that no longer exist.
  3796  		for seq := asflr + 1; seq < ss.FirstSeq; seq++ {
  3797  			// Check if this message was pending.
  3798  			o.mu.RLock()
  3799  			p, isPending := o.pending[seq]
  3800  			var rdc uint64 = 1
  3801  			if o.rdc != nil {
  3802  				rdc = o.rdc[seq]
  3803  			}
  3804  			o.mu.RUnlock()
  3805  			// If it was pending for us, get rid of it.
  3806  			if isPending {
  3807  				o.processTerm(seq, p.Sequence, rdc, ackTermLimitsReason)
  3808  			}
  3809  		}
  3810  	} else if numPending > 0 {
  3811  		// here it is shorter to walk pending.
  3812  		// toTerm is seq, dseq, rcd for each entry.
  3813  		toTerm := make([]uint64, 0, numPending*3)
  3814  		o.mu.RLock()
  3815  		for seq, p := range o.pending {
  3816  			if seq < ss.FirstSeq {
  3817  				var dseq uint64 = 1
  3818  				if p != nil {
  3819  					dseq = p.Sequence
  3820  				}
  3821  				var rdc uint64 = 1
  3822  				if o.rdc != nil {
  3823  					rdc = o.rdc[seq]
  3824  				}
  3825  				toTerm = append(toTerm, seq, dseq, rdc)
  3826  			}
  3827  		}
  3828  		o.mu.RUnlock()
  3829  
  3830  		for i := 0; i < len(toTerm); i += 3 {
  3831  			seq, dseq, rdc := toTerm[i], toTerm[i+1], toTerm[i+2]
  3832  			o.processTerm(seq, dseq, rdc, ackTermLimitsReason)
  3833  		}
  3834  	}
  3835  
  3836  	// Do one final check here.
  3837  	o.mu.Lock()
  3838  	defer o.mu.Unlock()
  3839  
  3840  	// If we are here, and this should be rare, we still are off with our ack floor.
  3841  	// We will set it explicitly to 1 behind our current lowest in pending, or if
  3842  	// pending is empty, to our current delivered -1.
  3843  	if o.asflr < ss.FirstSeq-1 {
  3844  		var psseq, pdseq uint64
  3845  		for seq, p := range o.pending {
  3846  			if psseq == 0 || seq < psseq {
  3847  				psseq, pdseq = seq, p.Sequence
  3848  			}
  3849  		}
  3850  		// If we still have none, set to current delivered -1.
  3851  		if psseq == 0 {
  3852  			psseq, pdseq = o.sseq-1, o.dseq-1
  3853  			// If still not adjusted.
  3854  			if psseq < ss.FirstSeq-1 {
  3855  				psseq, pdseq = ss.FirstSeq-1, ss.FirstSeq-1
  3856  			}
  3857  		}
  3858  		o.asflr, o.adflr = psseq, pdseq
  3859  	}
  3860  }
  3861  
  3862  func (o *consumer) processInboundAcks(qch chan struct{}) {
  3863  	// Grab the server lock to watch for server quit.
  3864  	o.mu.RLock()
  3865  	s, mset := o.srv, o.mset
  3866  	hasInactiveThresh := o.cfg.InactiveThreshold > 0
  3867  	o.mu.RUnlock()
  3868  
  3869  	if s == nil || mset == nil {
  3870  		return
  3871  	}
  3872  
  3873  	// We will check this on entry and periodically.
  3874  	o.checkAckFloor()
  3875  
  3876  	// How often we will check for ack floor drift.
  3877  	// Spread these out for large numbers on a server restart.
  3878  	delta := time.Duration(rand.Int63n(int64(time.Minute)))
  3879  	ticker := time.NewTicker(time.Minute + delta)
  3880  	defer ticker.Stop()
  3881  
  3882  	for {
  3883  		select {
  3884  		case <-o.ackMsgs.ch:
  3885  			acks := o.ackMsgs.pop()
  3886  			for _, ack := range acks {
  3887  				o.processAck(ack.subject, ack.reply, ack.hdr, ack.msg)
  3888  				ack.returnToPool()
  3889  			}
  3890  			o.ackMsgs.recycle(&acks)
  3891  			// If we have an inactiveThreshold set, mark our activity.
  3892  			if hasInactiveThresh {
  3893  				o.suppressDeletion()
  3894  			}
  3895  		case <-ticker.C:
  3896  			o.checkAckFloor()
  3897  		case <-qch:
  3898  			return
  3899  		case <-s.quitCh:
  3900  			return
  3901  		}
  3902  	}
  3903  }
  3904  
  3905  // Process inbound next message requests.
  3906  func (o *consumer) processInboundNextMsgReqs(qch chan struct{}) {
  3907  	// Grab the server lock to watch for server quit.
  3908  	o.mu.RLock()
  3909  	s := o.srv
  3910  	o.mu.RUnlock()
  3911  
  3912  	for {
  3913  		select {
  3914  		case <-o.nextMsgReqs.ch:
  3915  			reqs := o.nextMsgReqs.pop()
  3916  			for _, req := range reqs {
  3917  				o.processNextMsgRequest(req.reply, req.msg)
  3918  				req.returnToPool()
  3919  			}
  3920  			o.nextMsgReqs.recycle(&reqs)
  3921  		case <-qch:
  3922  			return
  3923  		case <-s.quitCh:
  3924  			return
  3925  		}
  3926  	}
  3927  }
  3928  
  3929  // Suppress auto cleanup on ack activity of any kind.
  3930  func (o *consumer) suppressDeletion() {
  3931  	o.mu.Lock()
  3932  	defer o.mu.Unlock()
  3933  
  3934  	if o.closed {
  3935  		return
  3936  	}
  3937  
  3938  	if o.isPushMode() && o.dtmr != nil {
  3939  		// if dtmr is not nil we have started the countdown, simply reset to threshold.
  3940  		o.dtmr.Reset(o.dthresh)
  3941  	} else if o.isPullMode() && o.waiting != nil {
  3942  		// Pull mode always has timer running, just update last on waiting queue.
  3943  		o.waiting.last = time.Now()
  3944  	}
  3945  }
  3946  
  3947  // loopAndGatherMsgs waits for messages for the consumer. qch is the quit channel,
  3948  // upch is the unpause channel which fires when the PauseUntil deadline is reached.
  3949  func (o *consumer) loopAndGatherMsgs(qch chan struct{}) {
  3950  	// On startup check to see if we are in a reply situation where replay policy is not instant.
  3951  	var (
  3952  		lts  int64 // last time stamp seen, used for replay.
  3953  		lseq uint64
  3954  	)
  3955  
  3956  	o.mu.RLock()
  3957  	mset := o.mset
  3958  	getLSeq := o.replay
  3959  	o.mu.RUnlock()
  3960  	// consumer is closed when mset is set to nil.
  3961  	if mset == nil {
  3962  		return
  3963  	}
  3964  	if getLSeq {
  3965  		lseq = mset.state().LastSeq
  3966  	}
  3967  
  3968  	o.mu.Lock()
  3969  	s := o.srv
  3970  	// need to check again if consumer is closed
  3971  	if o.mset == nil {
  3972  		o.mu.Unlock()
  3973  		return
  3974  	}
  3975  	// For idle heartbeat support.
  3976  	var hbc <-chan time.Time
  3977  	hbd, hb := o.hbTimer()
  3978  	if hb != nil {
  3979  		hbc = hb.C
  3980  	}
  3981  	// Interest changes.
  3982  	inch := o.inch
  3983  	o.mu.Unlock()
  3984  
  3985  	// Grab the stream's retention policy
  3986  	mset.mu.RLock()
  3987  	rp := mset.cfg.Retention
  3988  	mset.mu.RUnlock()
  3989  
  3990  	var err error
  3991  
  3992  	// Deliver all the msgs we have now, once done or on a condition, we wait for new ones.
  3993  	for {
  3994  		var (
  3995  			pmsg     *jsPubMsg
  3996  			dc       uint64
  3997  			dsubj    string
  3998  			ackReply string
  3999  			delay    time.Duration
  4000  			sz       int
  4001  			wrn, wrb int
  4002  		)
  4003  
  4004  		o.mu.Lock()
  4005  
  4006  		// consumer is closed when mset is set to nil.
  4007  		if o.mset == nil {
  4008  			o.mu.Unlock()
  4009  			return
  4010  		}
  4011  
  4012  		// Clear last error.
  4013  		err = nil
  4014  
  4015  		// If the consumer is paused then stop sending.
  4016  		if o.cfg.PauseUntil != nil && !o.cfg.PauseUntil.IsZero() && time.Now().Before(*o.cfg.PauseUntil) {
  4017  			// If the consumer is paused and we haven't reached the deadline yet then
  4018  			// go back to waiting.
  4019  			goto waitForMsgs
  4020  		}
  4021  
  4022  		// If we are in push mode and not active or under flowcontrol let's stop sending.
  4023  		if o.isPushMode() {
  4024  			if !o.active || (o.maxpb > 0 && o.pbytes > o.maxpb) {
  4025  				goto waitForMsgs
  4026  			}
  4027  		} else if o.waiting.isEmpty() {
  4028  			// If we are in pull mode and no one is waiting already break and wait.
  4029  			goto waitForMsgs
  4030  		}
  4031  
  4032  		// Grab our next msg.
  4033  		pmsg, dc, err = o.getNextMsg()
  4034  
  4035  		// We can release the lock now under getNextMsg so need to check this condition again here.
  4036  		// consumer is closed when mset is set to nil.
  4037  		if o.mset == nil {
  4038  			o.mu.Unlock()
  4039  			return
  4040  		}
  4041  
  4042  		// On error either wait or return.
  4043  		if err != nil || pmsg == nil {
  4044  			// On EOF we can optionally fast sync num pending state.
  4045  			if err == ErrStoreEOF {
  4046  				o.checkNumPendingOnEOF()
  4047  			}
  4048  			if err == ErrStoreMsgNotFound || err == errDeletedMsg || err == ErrStoreEOF || err == errMaxAckPending {
  4049  				goto waitForMsgs
  4050  			} else if err == errPartialCache {
  4051  				s.Warnf("Unexpected partial cache error looking up message for consumer '%s > %s > %s'",
  4052  					o.mset.acc, o.mset.cfg.Name, o.cfg.Name)
  4053  				goto waitForMsgs
  4054  
  4055  			} else {
  4056  				s.Errorf("Received an error looking up message for consumer '%s > %s > %s': %v",
  4057  					o.mset.acc, o.mset.cfg.Name, o.cfg.Name, err)
  4058  				goto waitForMsgs
  4059  			}
  4060  		}
  4061  
  4062  		// Update our cached num pending here first.
  4063  		if dc == 1 {
  4064  			o.npc--
  4065  		}
  4066  		// Pre-calculate ackReply
  4067  		ackReply = o.ackReply(pmsg.seq, o.dseq, dc, pmsg.ts, o.numPending())
  4068  
  4069  		// If headers only do not send msg payload.
  4070  		// Add in msg size itself as header.
  4071  		if o.cfg.HeadersOnly {
  4072  			convertToHeadersOnly(pmsg)
  4073  		}
  4074  		// Calculate payload size. This can be calculated on client side.
  4075  		// We do not include transport subject here since not generally known on client.
  4076  		sz = len(pmsg.subj) + len(ackReply) + len(pmsg.hdr) + len(pmsg.msg)
  4077  
  4078  		if o.isPushMode() {
  4079  			dsubj = o.dsubj
  4080  		} else if wr := o.nextWaiting(sz); wr != nil {
  4081  			wrn, wrb = wr.n, wr.b
  4082  			dsubj = wr.reply
  4083  			if done := wr.recycleIfDone(); done && o.node != nil {
  4084  				o.removeClusterPendingRequest(dsubj)
  4085  			} else if !done && wr.hb > 0 {
  4086  				wr.hbt = time.Now().Add(wr.hb)
  4087  			}
  4088  		} else {
  4089  			if o.subjf != nil {
  4090  				tsa := [32]string{}
  4091  				tts := tokenizeSubjectIntoSlice(tsa[:0], pmsg.subj)
  4092  				for i, filter := range o.subjf {
  4093  					if isSubsetMatchTokenized(tts, filter.tokenizedSubject) {
  4094  						o.subjf[i].currentSeq--
  4095  						o.subjf[i].nextSeq--
  4096  						break
  4097  					}
  4098  				}
  4099  			}
  4100  			// We will redo this one.
  4101  			o.sseq--
  4102  			if dc == 1 {
  4103  				o.npc++
  4104  			}
  4105  			pmsg.returnToPool()
  4106  			goto waitForMsgs
  4107  		}
  4108  
  4109  		// If we are in a replay scenario and have not caught up check if we need to delay here.
  4110  		if o.replay && lts > 0 {
  4111  			if delay = time.Duration(pmsg.ts - lts); delay > time.Millisecond {
  4112  				o.mu.Unlock()
  4113  				select {
  4114  				case <-qch:
  4115  					pmsg.returnToPool()
  4116  					return
  4117  				case <-time.After(delay):
  4118  				}
  4119  				o.mu.Lock()
  4120  			}
  4121  		}
  4122  
  4123  		// Track this regardless.
  4124  		lts = pmsg.ts
  4125  
  4126  		// If we have a rate limit set make sure we check that here.
  4127  		if o.rlimit != nil {
  4128  			now := time.Now()
  4129  			r := o.rlimit.ReserveN(now, sz)
  4130  			delay := r.DelayFrom(now)
  4131  			if delay > 0 {
  4132  				o.mu.Unlock()
  4133  				select {
  4134  				case <-qch:
  4135  					pmsg.returnToPool()
  4136  					return
  4137  				case <-time.After(delay):
  4138  				}
  4139  				o.mu.Lock()
  4140  			}
  4141  		}
  4142  
  4143  		// Do actual delivery.
  4144  		o.deliverMsg(dsubj, ackReply, pmsg, dc, rp)
  4145  
  4146  		// If given request fulfilled batch size, but there are still pending bytes, send information about it.
  4147  		if wrn <= 0 && wrb > 0 {
  4148  			o.outq.send(newJSPubMsg(dsubj, _EMPTY_, _EMPTY_, fmt.Appendf(nil, JsPullRequestRemainingBytesT, JSPullRequestPendingMsgs, wrn, JSPullRequestPendingBytes, wrb), nil, nil, 0))
  4149  		}
  4150  		// Reset our idle heartbeat timer if set.
  4151  		if hb != nil {
  4152  			hb.Reset(hbd)
  4153  		}
  4154  
  4155  		o.mu.Unlock()
  4156  		continue
  4157  
  4158  	waitForMsgs:
  4159  		// If we were in a replay state check to see if we are caught up. If so clear.
  4160  		if o.replay && o.sseq > lseq {
  4161  			o.replay = false
  4162  		}
  4163  
  4164  		// Make sure to process any expired requests that are pending.
  4165  		var wrExp <-chan time.Time
  4166  		if o.isPullMode() {
  4167  			// Dont expire oneshots if we are here because of max ack pending limit.
  4168  			_, _, _, fexp := o.processWaiting(err != errMaxAckPending)
  4169  			if !fexp.IsZero() {
  4170  				expires := time.Until(fexp)
  4171  				if expires <= 0 {
  4172  					expires = time.Millisecond
  4173  				}
  4174  				wrExp = time.NewTimer(expires).C
  4175  			}
  4176  		}
  4177  
  4178  		// We will wait here for new messages to arrive.
  4179  		mch, odsubj := o.mch, o.cfg.DeliverSubject
  4180  		o.mu.Unlock()
  4181  
  4182  		select {
  4183  		case <-mch:
  4184  			// Messages are waiting.
  4185  		case interest := <-inch:
  4186  			// inch can be nil on pull-based, but then this will
  4187  			// just block and not fire.
  4188  			o.updateDeliveryInterest(interest)
  4189  		case <-qch:
  4190  			return
  4191  		case <-wrExp:
  4192  			o.mu.Lock()
  4193  			o.processWaiting(true)
  4194  			o.mu.Unlock()
  4195  		case <-hbc:
  4196  			if o.isActive() {
  4197  				o.mu.RLock()
  4198  				o.sendIdleHeartbeat(odsubj)
  4199  				o.mu.RUnlock()
  4200  			}
  4201  			// Reset our idle heartbeat timer.
  4202  			hb.Reset(hbd)
  4203  		}
  4204  	}
  4205  }
  4206  
  4207  // Lock should be held.
  4208  func (o *consumer) sendIdleHeartbeat(subj string) {
  4209  	const t = "NATS/1.0 100 Idle Heartbeat\r\n%s: %d\r\n%s: %d\r\n\r\n"
  4210  	sseq, dseq := o.sseq-1, o.dseq-1
  4211  	hdr := fmt.Appendf(nil, t, JSLastConsumerSeq, dseq, JSLastStreamSeq, sseq)
  4212  	if fcp := o.fcid; fcp != _EMPTY_ {
  4213  		// Add in that we are stalled on flow control here.
  4214  		addOn := fmt.Appendf(nil, "%s: %s\r\n\r\n", JSConsumerStalled, fcp)
  4215  		hdr = append(hdr[:len(hdr)-LEN_CR_LF], []byte(addOn)...)
  4216  	}
  4217  	o.outq.send(newJSPubMsg(subj, _EMPTY_, _EMPTY_, hdr, nil, nil, 0))
  4218  }
  4219  
  4220  func (o *consumer) ackReply(sseq, dseq, dc uint64, ts int64, pending uint64) string {
  4221  	return fmt.Sprintf(o.ackReplyT, dc, sseq, dseq, ts, pending)
  4222  }
  4223  
  4224  // Used mostly for testing. Sets max pending bytes for flow control setups.
  4225  func (o *consumer) setMaxPendingBytes(limit int) {
  4226  	o.pblimit = limit
  4227  	o.maxpb = limit / 16
  4228  	if o.maxpb == 0 {
  4229  		o.maxpb = 1
  4230  	}
  4231  }
  4232  
  4233  // Does some sanity checks to see if we should re-calculate.
  4234  // Since there is a race when decrementing when there is contention at the beginning of the stream.
  4235  // The race is a getNextMsg skips a deleted msg, and then the decStreamPending call fires.
  4236  // This does some quick sanity checks to see if we should re-calculate num pending.
  4237  // Lock should be held.
  4238  func (o *consumer) checkNumPending() uint64 {
  4239  	if o.mset != nil {
  4240  		var state StreamState
  4241  		o.mset.store.FastState(&state)
  4242  		if o.sseq > state.LastSeq && o.npc != 0 || o.npc > int64(state.Msgs) {
  4243  			// Re-calculate.
  4244  			o.streamNumPending()
  4245  		}
  4246  	}
  4247  	return o.numPending()
  4248  }
  4249  
  4250  // Lock should be held.
  4251  func (o *consumer) numPending() uint64 {
  4252  	if o.npc < 0 {
  4253  		return 0
  4254  	}
  4255  	return uint64(o.npc)
  4256  }
  4257  
  4258  // This will do a quick sanity check on num pending when we encounter
  4259  // and EOF in the loop and gather.
  4260  // Lock should be held.
  4261  func (o *consumer) checkNumPendingOnEOF() {
  4262  	if o.mset == nil {
  4263  		return
  4264  	}
  4265  	var state StreamState
  4266  	o.mset.store.FastState(&state)
  4267  	if o.sseq > state.LastSeq && o.npc != 0 {
  4268  		// We know here we can reset our running state for num pending.
  4269  		o.npc, o.npf = 0, state.LastSeq
  4270  	}
  4271  }
  4272  
  4273  // Call into streamNumPending after acquiring the consumer lock.
  4274  func (o *consumer) streamNumPendingLocked() uint64 {
  4275  	o.mu.Lock()
  4276  	defer o.mu.Unlock()
  4277  	return o.streamNumPending()
  4278  }
  4279  
  4280  // Will force a set from the stream store of num pending.
  4281  // Depends on delivery policy, for last per subject we calculate differently.
  4282  // Lock should be held.
  4283  func (o *consumer) streamNumPending() uint64 {
  4284  	if o.mset == nil || o.mset.store == nil {
  4285  		o.npc, o.npf = 0, 0
  4286  		return 0
  4287  	}
  4288  
  4289  	isLastPerSubject := o.cfg.DeliverPolicy == DeliverLastPerSubject
  4290  
  4291  	// Deliver Last Per Subject calculates num pending differently.
  4292  	if isLastPerSubject {
  4293  		o.npc, o.npf = 0, 0
  4294  		// Consumer without filters.
  4295  		if o.subjf == nil {
  4296  			npc, npf := o.mset.store.NumPending(o.sseq, _EMPTY_, isLastPerSubject)
  4297  			o.npc, o.npf = int64(npc), npf
  4298  			return o.numPending()
  4299  		}
  4300  		// Consumer with filters.
  4301  		for _, filter := range o.subjf {
  4302  			npc, npf := o.mset.store.NumPending(o.sseq, filter.subject, isLastPerSubject)
  4303  			o.npc += int64(npc)
  4304  			if npf > o.npf {
  4305  				o.npf = npf // Always last
  4306  			}
  4307  		}
  4308  		return o.numPending()
  4309  	}
  4310  	// Every other Delivery Policy is handled here.
  4311  	// Consumer without filters.
  4312  	if o.subjf == nil {
  4313  		npc, npf := o.mset.store.NumPending(o.sseq, o.cfg.FilterSubject, isLastPerSubject)
  4314  		o.npc, o.npf = int64(npc), npf
  4315  		return o.numPending()
  4316  	}
  4317  	// Consumer with filters.
  4318  	o.npc, o.npf = 0, 0
  4319  	for _, filter := range o.subjf {
  4320  		// We might loose state of o.subjf, so if we do recover from o.sseq
  4321  		if filter.currentSeq < o.sseq {
  4322  			filter.currentSeq = o.sseq
  4323  		}
  4324  		npc, npf := o.mset.store.NumPending(filter.currentSeq, filter.subject, isLastPerSubject)
  4325  		o.npc += int64(npc)
  4326  		if npf > o.npf {
  4327  			o.npf = npf // Always last
  4328  		}
  4329  	}
  4330  
  4331  	return o.numPending()
  4332  }
  4333  
  4334  func convertToHeadersOnly(pmsg *jsPubMsg) {
  4335  	// If headers only do not send msg payload.
  4336  	// Add in msg size itself as header.
  4337  	hdr, msg := pmsg.hdr, pmsg.msg
  4338  	var bb bytes.Buffer
  4339  	if len(hdr) == 0 {
  4340  		bb.WriteString(hdrLine)
  4341  	} else {
  4342  		bb.Write(hdr)
  4343  		bb.Truncate(len(hdr) - LEN_CR_LF)
  4344  	}
  4345  	bb.WriteString(JSMsgSize)
  4346  	bb.WriteString(": ")
  4347  	bb.WriteString(strconv.FormatInt(int64(len(msg)), 10))
  4348  	bb.WriteString(CR_LF)
  4349  	bb.WriteString(CR_LF)
  4350  	// Replace underlying buf which we can use directly when we send.
  4351  	// TODO(dlc) - Probably just use directly when forming bytes.Buffer?
  4352  	pmsg.buf = pmsg.buf[:0]
  4353  	pmsg.buf = append(pmsg.buf, bb.Bytes()...)
  4354  	// Replace with new header.
  4355  	pmsg.hdr = pmsg.buf
  4356  	// Cancel msg payload
  4357  	pmsg.msg = nil
  4358  }
  4359  
  4360  // Deliver a msg to the consumer.
  4361  // Lock should be held and o.mset validated to be non-nil.
  4362  func (o *consumer) deliverMsg(dsubj, ackReply string, pmsg *jsPubMsg, dc uint64, rp RetentionPolicy) {
  4363  	if o.mset == nil {
  4364  		pmsg.returnToPool()
  4365  		return
  4366  	}
  4367  
  4368  	dseq := o.dseq
  4369  	o.dseq++
  4370  
  4371  	pmsg.dsubj, pmsg.reply, pmsg.o = dsubj, ackReply, o
  4372  	psz := pmsg.size()
  4373  
  4374  	if o.maxpb > 0 {
  4375  		o.pbytes += psz
  4376  	}
  4377  
  4378  	mset := o.mset
  4379  	ap := o.cfg.AckPolicy
  4380  
  4381  	// Cant touch pmsg after this sending so capture what we need.
  4382  	seq, ts := pmsg.seq, pmsg.ts
  4383  	// Send message.
  4384  	o.outq.send(pmsg)
  4385  
  4386  	if ap == AckExplicit || ap == AckAll {
  4387  		o.trackPending(seq, dseq)
  4388  	} else if ap == AckNone {
  4389  		o.adflr = dseq
  4390  		o.asflr = seq
  4391  	}
  4392  
  4393  	// Flow control.
  4394  	if o.maxpb > 0 && o.needFlowControl(psz) {
  4395  		o.sendFlowControl()
  4396  	}
  4397  
  4398  	// If pull mode and we have inactivity threshold, signaled by dthresh, update last activity.
  4399  	if o.isPullMode() && o.dthresh > 0 {
  4400  		o.waiting.last = time.Now()
  4401  	}
  4402  
  4403  	// FIXME(dlc) - Capture errors?
  4404  	o.updateDelivered(dseq, seq, dc, ts)
  4405  
  4406  	// If we are ack none and mset is interest only we should make sure stream removes interest.
  4407  	if ap == AckNone && rp != LimitsPolicy {
  4408  		if o.node == nil || o.cfg.Direct {
  4409  			mset.ackq.push(seq)
  4410  		} else {
  4411  			o.updateAcks(dseq, seq)
  4412  		}
  4413  	}
  4414  }
  4415  
  4416  func (o *consumer) needFlowControl(sz int) bool {
  4417  	if o.maxpb == 0 {
  4418  		return false
  4419  	}
  4420  	// Decide whether to send a flow control message which we will need the user to respond.
  4421  	// We send when we are over 50% of our current window limit.
  4422  	if o.fcid == _EMPTY_ && o.pbytes > o.maxpb/2 {
  4423  		return true
  4424  	}
  4425  	// If we have an existing outstanding FC, check to see if we need to expand the o.fcsz
  4426  	if o.fcid != _EMPTY_ && (o.pbytes-o.fcsz) >= o.maxpb {
  4427  		o.fcsz += sz
  4428  	}
  4429  	return false
  4430  }
  4431  
  4432  func (o *consumer) processFlowControl(_ *subscription, c *client, _ *Account, subj, _ string, _ []byte) {
  4433  	o.mu.Lock()
  4434  	defer o.mu.Unlock()
  4435  
  4436  	// Ignore if not the latest we have sent out.
  4437  	if subj != o.fcid {
  4438  		return
  4439  	}
  4440  
  4441  	// For slow starts and ramping up.
  4442  	if o.maxpb < o.pblimit {
  4443  		o.maxpb *= 2
  4444  		if o.maxpb > o.pblimit {
  4445  			o.maxpb = o.pblimit
  4446  		}
  4447  	}
  4448  
  4449  	// Update accounting.
  4450  	o.pbytes -= o.fcsz
  4451  	if o.pbytes < 0 {
  4452  		o.pbytes = 0
  4453  	}
  4454  	o.fcid, o.fcsz = _EMPTY_, 0
  4455  
  4456  	o.signalNewMessages()
  4457  }
  4458  
  4459  // Lock should be held.
  4460  func (o *consumer) fcReply() string {
  4461  	var sb strings.Builder
  4462  	sb.WriteString(jsFlowControlPre)
  4463  	sb.WriteString(o.stream)
  4464  	sb.WriteByte(btsep)
  4465  	sb.WriteString(o.name)
  4466  	sb.WriteByte(btsep)
  4467  	var b [4]byte
  4468  	rn := rand.Int63()
  4469  	for i, l := 0, rn; i < len(b); i++ {
  4470  		b[i] = digits[l%base]
  4471  		l /= base
  4472  	}
  4473  	sb.Write(b[:])
  4474  	return sb.String()
  4475  }
  4476  
  4477  // sendFlowControl will send a flow control packet to the consumer.
  4478  // Lock should be held.
  4479  func (o *consumer) sendFlowControl() {
  4480  	if !o.isPushMode() {
  4481  		return
  4482  	}
  4483  	subj, rply := o.cfg.DeliverSubject, o.fcReply()
  4484  	o.fcsz, o.fcid = o.pbytes, rply
  4485  	hdr := []byte("NATS/1.0 100 FlowControl Request\r\n\r\n")
  4486  	o.outq.send(newJSPubMsg(subj, _EMPTY_, rply, hdr, nil, nil, 0))
  4487  }
  4488  
  4489  // Tracks our outstanding pending acks. Only applicable to AckExplicit mode.
  4490  // Lock should be held.
  4491  func (o *consumer) trackPending(sseq, dseq uint64) {
  4492  	if o.pending == nil {
  4493  		o.pending = make(map[uint64]*Pending)
  4494  	}
  4495  	if o.ptmr == nil {
  4496  		o.ptmr = time.AfterFunc(o.ackWait(0), o.checkPending)
  4497  	}
  4498  	if p, ok := o.pending[sseq]; ok {
  4499  		// Update timestamp but keep original consumer delivery sequence.
  4500  		// So do not update p.Sequence.
  4501  		p.Timestamp = time.Now().UnixNano()
  4502  	} else {
  4503  		o.pending[sseq] = &Pending{dseq, time.Now().UnixNano()}
  4504  	}
  4505  }
  4506  
  4507  // Credit back a failed delivery.
  4508  // lock should be held.
  4509  func (o *consumer) creditWaitingRequest(reply string) {
  4510  	for i, rp := 0, o.waiting.rp; i < o.waiting.n; i++ {
  4511  		if wr := o.waiting.reqs[rp]; wr != nil {
  4512  			if wr.reply == reply {
  4513  				wr.n++
  4514  				wr.d--
  4515  				return
  4516  			}
  4517  		}
  4518  		rp = (rp + 1) % cap(o.waiting.reqs)
  4519  	}
  4520  }
  4521  
  4522  // didNotDeliver is called when a delivery for a consumer message failed.
  4523  // Depending on our state, we will process the failure.
  4524  func (o *consumer) didNotDeliver(seq uint64, subj string) {
  4525  	o.mu.Lock()
  4526  	mset := o.mset
  4527  	if mset == nil {
  4528  		o.mu.Unlock()
  4529  		return
  4530  	}
  4531  	// Adjust back deliver count.
  4532  	o.decDeliveryCount(seq)
  4533  
  4534  	var checkDeliveryInterest bool
  4535  	if o.isPushMode() {
  4536  		o.active = false
  4537  		checkDeliveryInterest = true
  4538  	} else if o.pending != nil {
  4539  		o.creditWaitingRequest(subj)
  4540  		// pull mode and we have pending.
  4541  		if _, ok := o.pending[seq]; ok {
  4542  			// We found this messsage on pending, we need
  4543  			// to queue it up for immediate redelivery since
  4544  			// we know it was not delivered.
  4545  			if !o.onRedeliverQueue(seq) {
  4546  				o.addToRedeliverQueue(seq)
  4547  				o.signalNewMessages()
  4548  			}
  4549  		}
  4550  	}
  4551  	o.mu.Unlock()
  4552  
  4553  	// If we do not have interest update that here.
  4554  	if checkDeliveryInterest && o.hasNoLocalInterest() {
  4555  		o.updateDeliveryInterest(false)
  4556  	}
  4557  }
  4558  
  4559  // Lock should be held.
  4560  func (o *consumer) addToRedeliverQueue(seqs ...uint64) {
  4561  	o.rdq = append(o.rdq, seqs...)
  4562  	for _, seq := range seqs {
  4563  		o.rdqi.Insert(seq)
  4564  	}
  4565  }
  4566  
  4567  // Lock should be held.
  4568  func (o *consumer) hasRedeliveries() bool {
  4569  	return len(o.rdq) > 0
  4570  }
  4571  
  4572  func (o *consumer) getNextToRedeliver() uint64 {
  4573  	if len(o.rdq) == 0 {
  4574  		return 0
  4575  	}
  4576  	seq := o.rdq[0]
  4577  	if len(o.rdq) == 1 {
  4578  		o.rdq = nil
  4579  		o.rdqi.Empty()
  4580  	} else {
  4581  		o.rdq = append(o.rdq[:0], o.rdq[1:]...)
  4582  		o.rdqi.Delete(seq)
  4583  	}
  4584  	return seq
  4585  }
  4586  
  4587  // This checks if we already have this sequence queued for redelivery.
  4588  // FIXME(dlc) - This is O(n) but should be fast with small redeliver size.
  4589  // Lock should be held.
  4590  func (o *consumer) onRedeliverQueue(seq uint64) bool {
  4591  	return o.rdqi.Exists(seq)
  4592  }
  4593  
  4594  // Remove a sequence from the redelivery queue.
  4595  // Lock should be held.
  4596  func (o *consumer) removeFromRedeliverQueue(seq uint64) bool {
  4597  	if !o.onRedeliverQueue(seq) {
  4598  		return false
  4599  	}
  4600  	for i, rseq := range o.rdq {
  4601  		if rseq == seq {
  4602  			if len(o.rdq) == 1 {
  4603  				o.rdq = nil
  4604  				o.rdqi.Empty()
  4605  			} else {
  4606  				o.rdq = append(o.rdq[:i], o.rdq[i+1:]...)
  4607  				o.rdqi.Delete(seq)
  4608  			}
  4609  			return true
  4610  		}
  4611  	}
  4612  	return false
  4613  }
  4614  
  4615  // Checks the pending messages.
  4616  func (o *consumer) checkPending() {
  4617  	o.mu.RLock()
  4618  	mset := o.mset
  4619  	// On stop, mset and timer will be nil.
  4620  	if o.closed || mset == nil || o.ptmr == nil {
  4621  		stopAndClearTimer(&o.ptmr)
  4622  		o.mu.RUnlock()
  4623  		return
  4624  	}
  4625  	o.mu.RUnlock()
  4626  
  4627  	var shouldUpdateState bool
  4628  	var state StreamState
  4629  	mset.store.FastState(&state)
  4630  	fseq := state.FirstSeq
  4631  
  4632  	o.mu.Lock()
  4633  	defer o.mu.Unlock()
  4634  
  4635  	now := time.Now().UnixNano()
  4636  	ttl := int64(o.cfg.AckWait)
  4637  	next := int64(o.ackWait(0))
  4638  	// However, if there is backoff, initializes with the largest backoff.
  4639  	// It will be adjusted as needed.
  4640  	if l := len(o.cfg.BackOff); l > 0 {
  4641  		next = int64(o.cfg.BackOff[l-1])
  4642  	}
  4643  
  4644  	// Since we can update timestamps, we have to review all pending.
  4645  	// We will now bail if we see an ack pending inbound to us via o.awl.
  4646  	var expired []uint64
  4647  	check := len(o.pending) > 1024
  4648  	for seq, p := range o.pending {
  4649  		if check && atomic.LoadInt64(&o.awl) > 0 {
  4650  			o.ptmr.Reset(100 * time.Millisecond)
  4651  			return
  4652  		}
  4653  		// Check if these are no longer valid.
  4654  		if seq < fseq || seq <= o.asflr {
  4655  			delete(o.pending, seq)
  4656  			delete(o.rdc, seq)
  4657  			o.removeFromRedeliverQueue(seq)
  4658  			shouldUpdateState = true
  4659  			// Check if we need to move ack floors.
  4660  			if seq > o.asflr {
  4661  				o.asflr = seq
  4662  			}
  4663  			if p.Sequence > o.adflr {
  4664  				o.adflr = p.Sequence
  4665  			}
  4666  			continue
  4667  		}
  4668  		elapsed, deadline := now-p.Timestamp, ttl
  4669  		if len(o.cfg.BackOff) > 0 {
  4670  			// This is ok even if o.rdc is nil, we would get dc == 0, which is what we want.
  4671  			dc := int(o.rdc[seq])
  4672  			// This will be the index for the next backoff, will set to last element if needed.
  4673  			nbi := dc + 1
  4674  			if dc+1 >= len(o.cfg.BackOff) {
  4675  				dc = len(o.cfg.BackOff) - 1
  4676  				nbi = dc
  4677  			}
  4678  			deadline = int64(o.cfg.BackOff[dc])
  4679  			// Set `next` to the next backoff (if smaller than current `next` value).
  4680  			if nextBackoff := int64(o.cfg.BackOff[nbi]); nextBackoff < next {
  4681  				next = nextBackoff
  4682  			}
  4683  		}
  4684  		if elapsed >= deadline {
  4685  			if !o.onRedeliverQueue(seq) {
  4686  				expired = append(expired, seq)
  4687  			}
  4688  		} else if deadline-elapsed < next {
  4689  			// Update when we should fire next.
  4690  			next = deadline - elapsed
  4691  		}
  4692  	}
  4693  
  4694  	if len(expired) > 0 {
  4695  		// We need to sort.
  4696  		sort.Slice(expired, func(i, j int) bool { return expired[i] < expired[j] })
  4697  		o.addToRedeliverQueue(expired...)
  4698  		// Now we should update the timestamp here since we are redelivering.
  4699  		// We will use an incrementing time to preserve order for any other redelivery.
  4700  		off := now - o.pending[expired[0]].Timestamp
  4701  		for _, seq := range expired {
  4702  			if p, ok := o.pending[seq]; ok {
  4703  				p.Timestamp += off
  4704  			}
  4705  		}
  4706  		o.signalNewMessages()
  4707  	}
  4708  
  4709  	if len(o.pending) > 0 {
  4710  		delay := time.Duration(next)
  4711  		if o.ptmr == nil {
  4712  			o.ptmr = time.AfterFunc(delay, o.checkPending)
  4713  		} else {
  4714  			o.ptmr.Reset(o.ackWait(delay))
  4715  		}
  4716  	} else {
  4717  		// Make sure to stop timer and clear out any re delivery queues
  4718  		stopAndClearTimer(&o.ptmr)
  4719  		o.rdq = nil
  4720  		o.rdqi.Empty()
  4721  		o.pending = nil
  4722  		// Mimic behavior in processAckMsg when pending is empty.
  4723  		o.adflr, o.asflr = o.dseq-1, o.sseq-1
  4724  	}
  4725  
  4726  	// Update our state if needed.
  4727  	if shouldUpdateState {
  4728  		if err := o.writeStoreStateUnlocked(); err != nil && o.srv != nil && o.mset != nil && !o.closed {
  4729  			s, acc, mset, name := o.srv, o.acc, o.mset, o.name
  4730  			s.Warnf("Consumer '%s > %s > %s' error on write store state from check pending: %v", acc, mset.cfg.Name, name, err)
  4731  		}
  4732  	}
  4733  }
  4734  
  4735  // SeqFromReply will extract a sequence number from a reply subject.
  4736  func (o *consumer) seqFromReply(reply string) uint64 {
  4737  	_, dseq, _ := ackReplyInfo(reply)
  4738  	return dseq
  4739  }
  4740  
  4741  // StreamSeqFromReply will extract the stream sequence from the reply subject.
  4742  func (o *consumer) streamSeqFromReply(reply string) uint64 {
  4743  	sseq, _, _ := ackReplyInfo(reply)
  4744  	return sseq
  4745  }
  4746  
  4747  // Quick parser for positive numbers in ack reply encoding.
  4748  func parseAckReplyNum(d string) (n int64) {
  4749  	if len(d) == 0 {
  4750  		return -1
  4751  	}
  4752  	for _, dec := range d {
  4753  		if dec < asciiZero || dec > asciiNine {
  4754  			return -1
  4755  		}
  4756  		n = n*10 + (int64(dec) - asciiZero)
  4757  	}
  4758  	return n
  4759  }
  4760  
  4761  const expectedNumReplyTokens = 9
  4762  
  4763  // Grab encoded information in the reply subject for a delivered message.
  4764  func replyInfo(subject string) (sseq, dseq, dc uint64, ts int64, pending uint64) {
  4765  	tsa := [expectedNumReplyTokens]string{}
  4766  	start, tokens := 0, tsa[:0]
  4767  	for i := 0; i < len(subject); i++ {
  4768  		if subject[i] == btsep {
  4769  			tokens = append(tokens, subject[start:i])
  4770  			start = i + 1
  4771  		}
  4772  	}
  4773  	tokens = append(tokens, subject[start:])
  4774  	if len(tokens) != expectedNumReplyTokens || tokens[0] != "$JS" || tokens[1] != "ACK" {
  4775  		return 0, 0, 0, 0, 0
  4776  	}
  4777  	// TODO(dlc) - Should we error if we do not match consumer name?
  4778  	// stream is tokens[2], consumer is 3.
  4779  	dc = uint64(parseAckReplyNum(tokens[4]))
  4780  	sseq, dseq = uint64(parseAckReplyNum(tokens[5])), uint64(parseAckReplyNum(tokens[6]))
  4781  	ts = parseAckReplyNum(tokens[7])
  4782  	pending = uint64(parseAckReplyNum(tokens[8]))
  4783  
  4784  	return sseq, dseq, dc, ts, pending
  4785  }
  4786  
  4787  func ackReplyInfo(subject string) (sseq, dseq, dc uint64) {
  4788  	tsa := [expectedNumReplyTokens]string{}
  4789  	start, tokens := 0, tsa[:0]
  4790  	for i := 0; i < len(subject); i++ {
  4791  		if subject[i] == btsep {
  4792  			tokens = append(tokens, subject[start:i])
  4793  			start = i + 1
  4794  		}
  4795  	}
  4796  	tokens = append(tokens, subject[start:])
  4797  	if len(tokens) != expectedNumReplyTokens || tokens[0] != "$JS" || tokens[1] != "ACK" {
  4798  		return 0, 0, 0
  4799  	}
  4800  	dc = uint64(parseAckReplyNum(tokens[4]))
  4801  	sseq, dseq = uint64(parseAckReplyNum(tokens[5])), uint64(parseAckReplyNum(tokens[6]))
  4802  
  4803  	return sseq, dseq, dc
  4804  }
  4805  
  4806  // NextSeq returns the next delivered sequence number for this consumer.
  4807  func (o *consumer) nextSeq() uint64 {
  4808  	o.mu.RLock()
  4809  	dseq := o.dseq
  4810  	o.mu.RUnlock()
  4811  	return dseq
  4812  }
  4813  
  4814  // Used to hold skip list when deliver policy is last per subject.
  4815  type lastSeqSkipList struct {
  4816  	resume uint64
  4817  	seqs   []uint64
  4818  }
  4819  
  4820  // Let's us know we have a skip list, which is for deliver last per subject and we are just starting.
  4821  // Lock should be held.
  4822  func (o *consumer) hasSkipListPending() bool {
  4823  	return o.lss != nil && len(o.lss.seqs) > 0
  4824  }
  4825  
  4826  // Will select the starting sequence.
  4827  func (o *consumer) selectStartingSeqNo() {
  4828  	if o.mset == nil || o.mset.store == nil {
  4829  		o.sseq = 1
  4830  	} else {
  4831  		var state StreamState
  4832  		o.mset.store.FastState(&state)
  4833  		if o.cfg.OptStartSeq == 0 {
  4834  			if o.cfg.DeliverPolicy == DeliverAll {
  4835  				o.sseq = state.FirstSeq
  4836  			} else if o.cfg.DeliverPolicy == DeliverLast {
  4837  				if o.subjf == nil {
  4838  					o.sseq = state.LastSeq
  4839  					return
  4840  				}
  4841  				// If we are partitioned here this will be properly set when we become leader.
  4842  				for _, filter := range o.subjf {
  4843  					ss := o.mset.store.FilteredState(1, filter.subject)
  4844  					filter.nextSeq = ss.Last
  4845  					if ss.Last > o.sseq {
  4846  						o.sseq = ss.Last
  4847  					}
  4848  				}
  4849  			} else if o.cfg.DeliverPolicy == DeliverLastPerSubject {
  4850  				// If our parent stream is set to max msgs per subject of 1 this is just
  4851  				// a normal consumer at this point. We can avoid any heavy lifting.
  4852  				if o.mset.cfg.MaxMsgsPer == 1 {
  4853  					o.sseq = state.FirstSeq
  4854  				} else {
  4855  					// A threshold for when we switch from get last msg to subjects state.
  4856  					const numSubjectsThresh = 256
  4857  					lss := &lastSeqSkipList{resume: state.LastSeq}
  4858  					var filters []string
  4859  					if o.subjf == nil {
  4860  						filters = append(filters, o.cfg.FilterSubject)
  4861  					} else {
  4862  						for _, filter := range o.subjf {
  4863  							filters = append(filters, filter.subject)
  4864  						}
  4865  					}
  4866  					for _, filter := range filters {
  4867  						if st := o.mset.store.SubjectsTotals(filter); len(st) < numSubjectsThresh {
  4868  							var smv StoreMsg
  4869  							for subj := range st {
  4870  								if sm, err := o.mset.store.LoadLastMsg(subj, &smv); err == nil {
  4871  									lss.seqs = append(lss.seqs, sm.seq)
  4872  								}
  4873  							}
  4874  						} else if mss := o.mset.store.SubjectsState(filter); len(mss) > 0 {
  4875  							for _, ss := range mss {
  4876  								lss.seqs = append(lss.seqs, ss.Last)
  4877  							}
  4878  						}
  4879  					}
  4880  					// Sort the skip list if needed.
  4881  					if len(lss.seqs) > 1 {
  4882  						sort.Slice(lss.seqs, func(i, j int) bool {
  4883  							return lss.seqs[j] > lss.seqs[i]
  4884  						})
  4885  					}
  4886  					if len(lss.seqs) == 0 {
  4887  						o.sseq = state.LastSeq
  4888  					} else {
  4889  						o.sseq = lss.seqs[0]
  4890  					}
  4891  					// Assign skip list.
  4892  					o.lss = lss
  4893  				}
  4894  			} else if o.cfg.OptStartTime != nil {
  4895  				// If we are here we are time based.
  4896  				// TODO(dlc) - Once clustered can't rely on this.
  4897  				o.sseq = o.mset.store.GetSeqFromTime(*o.cfg.OptStartTime)
  4898  				// Here we want to see if we are filtered, and if so possibly close the gap
  4899  				// to the nearest first given our starting sequence from time. This is so we do
  4900  				// not force the system to do a linear walk between o.sseq and the real first.
  4901  				if len(o.subjf) > 0 {
  4902  					nseq := state.LastSeq
  4903  					for _, filter := range o.subjf {
  4904  						// Use first sequence since this is more optimized atm.
  4905  						ss := o.mset.store.FilteredState(state.FirstSeq, filter.subject)
  4906  						if ss.First > o.sseq && ss.First < nseq {
  4907  							nseq = ss.First
  4908  						}
  4909  					}
  4910  					// Skip ahead if possible.
  4911  					if nseq > o.sseq && nseq < state.LastSeq {
  4912  						o.sseq = nseq
  4913  					}
  4914  				}
  4915  			} else {
  4916  				// DeliverNew
  4917  				o.sseq = state.LastSeq + 1
  4918  			}
  4919  		} else {
  4920  			o.sseq = o.cfg.OptStartSeq
  4921  		}
  4922  
  4923  		if state.FirstSeq == 0 {
  4924  			o.sseq = 1
  4925  			for _, filter := range o.subjf {
  4926  				filter.nextSeq = 1
  4927  			}
  4928  		} else if o.sseq < state.FirstSeq {
  4929  			o.sseq = state.FirstSeq
  4930  		} else if o.sseq > state.LastSeq {
  4931  			o.sseq = state.LastSeq + 1
  4932  		}
  4933  		for _, filter := range o.subjf {
  4934  			if state.FirstSeq == 0 {
  4935  				filter.nextSeq = 1
  4936  			}
  4937  			if filter.nextSeq < state.FirstSeq {
  4938  				filter.nextSeq = state.FirstSeq
  4939  			}
  4940  			if filter.nextSeq > state.LastSeq {
  4941  				filter.nextSeq = state.LastSeq + 1
  4942  			}
  4943  		}
  4944  	}
  4945  	if o.subjf != nil {
  4946  		sort.Slice(o.subjf, func(i, j int) bool {
  4947  			return o.subjf[j].nextSeq > o.subjf[i].nextSeq
  4948  		})
  4949  	}
  4950  
  4951  	// Always set delivery sequence to 1.
  4952  	o.dseq = 1
  4953  	// Set ack delivery floor to delivery-1
  4954  	o.adflr = o.dseq - 1
  4955  	// Set ack store floor to store-1
  4956  	o.asflr = o.sseq - 1
  4957  	// Set our starting sequence state.
  4958  	if o.store != nil && o.sseq > 0 {
  4959  		o.store.SetStarting(o.sseq - 1)
  4960  	}
  4961  }
  4962  
  4963  // Test whether a config represents a durable subscriber.
  4964  func isDurableConsumer(config *ConsumerConfig) bool {
  4965  	return config != nil && config.Durable != _EMPTY_
  4966  }
  4967  
  4968  func (o *consumer) isDurable() bool {
  4969  	return o.cfg.Durable != _EMPTY_
  4970  }
  4971  
  4972  // Are we in push mode, delivery subject, etc.
  4973  func (o *consumer) isPushMode() bool {
  4974  	return o.cfg.DeliverSubject != _EMPTY_
  4975  }
  4976  
  4977  func (o *consumer) isPullMode() bool {
  4978  	return o.cfg.DeliverSubject == _EMPTY_
  4979  }
  4980  
  4981  // Name returns the name of this consumer.
  4982  func (o *consumer) String() string {
  4983  	o.mu.RLock()
  4984  	n := o.name
  4985  	o.mu.RUnlock()
  4986  	return n
  4987  }
  4988  
  4989  func createConsumerName() string {
  4990  	return getHash(nuid.Next())
  4991  }
  4992  
  4993  // deleteConsumer will delete the consumer from this stream.
  4994  func (mset *stream) deleteConsumer(o *consumer) error {
  4995  	return o.delete()
  4996  }
  4997  
  4998  func (o *consumer) getStream() *stream {
  4999  	o.mu.RLock()
  5000  	mset := o.mset
  5001  	o.mu.RUnlock()
  5002  	return mset
  5003  }
  5004  
  5005  func (o *consumer) streamName() string {
  5006  	o.mu.RLock()
  5007  	mset := o.mset
  5008  	o.mu.RUnlock()
  5009  	if mset != nil {
  5010  		return mset.name()
  5011  	}
  5012  	return _EMPTY_
  5013  }
  5014  
  5015  // Active indicates if this consumer is still active.
  5016  func (o *consumer) isActive() bool {
  5017  	o.mu.RLock()
  5018  	active := o.active && o.mset != nil
  5019  	o.mu.RUnlock()
  5020  	return active
  5021  }
  5022  
  5023  // hasNoLocalInterest return true if we have no local interest.
  5024  func (o *consumer) hasNoLocalInterest() bool {
  5025  	o.mu.RLock()
  5026  	rr := o.acc.sl.Match(o.cfg.DeliverSubject)
  5027  	o.mu.RUnlock()
  5028  	return len(rr.psubs)+len(rr.qsubs) == 0
  5029  }
  5030  
  5031  // This is when the underlying stream has been purged.
  5032  // sseq is the new first seq for the stream after purge.
  5033  // Lock should NOT be held.
  5034  func (o *consumer) purge(sseq uint64, slseq uint64, isWider bool) {
  5035  	// Do not update our state unless we know we are the leader.
  5036  	if !o.isLeader() {
  5037  		return
  5038  	}
  5039  	// Signals all have been purged for this consumer.
  5040  	if sseq == 0 && !isWider {
  5041  		sseq = slseq + 1
  5042  	}
  5043  
  5044  	var store StreamStore
  5045  	if isWider {
  5046  		o.mu.RLock()
  5047  		if o.mset != nil {
  5048  			store = o.mset.store
  5049  		}
  5050  		o.mu.RUnlock()
  5051  	}
  5052  
  5053  	o.mu.Lock()
  5054  	// Do not go backwards
  5055  	if o.sseq < sseq {
  5056  		o.sseq = sseq
  5057  	}
  5058  
  5059  	if o.asflr < sseq {
  5060  		o.asflr = sseq - 1
  5061  		// We need to remove those no longer relevant from pending.
  5062  		for seq, p := range o.pending {
  5063  			if seq <= o.asflr {
  5064  				if p.Sequence > o.adflr {
  5065  					o.adflr = p.Sequence
  5066  					if o.adflr > o.dseq {
  5067  						o.dseq = o.adflr
  5068  					}
  5069  				}
  5070  				delete(o.pending, seq)
  5071  				delete(o.rdc, seq)
  5072  				// rdq handled below.
  5073  			}
  5074  			if isWider && store != nil {
  5075  				// Our filtered subject, which could be all, is wider than the underlying purge.
  5076  				// We need to check if the pending items left are still valid.
  5077  				var smv StoreMsg
  5078  				if _, err := store.LoadMsg(seq, &smv); err == errDeletedMsg || err == ErrStoreMsgNotFound {
  5079  					if p.Sequence > o.adflr {
  5080  						o.adflr = p.Sequence
  5081  						if o.adflr > o.dseq {
  5082  							o.dseq = o.adflr
  5083  						}
  5084  					}
  5085  					delete(o.pending, seq)
  5086  					delete(o.rdc, seq)
  5087  				}
  5088  			}
  5089  		}
  5090  	}
  5091  
  5092  	// This means we can reset everything at this point.
  5093  	if len(o.pending) == 0 {
  5094  		o.pending, o.rdc = nil, nil
  5095  		o.adflr, o.asflr = o.dseq-1, o.sseq-1
  5096  	}
  5097  
  5098  	// We need to remove all those being queued for redelivery under o.rdq
  5099  	if len(o.rdq) > 0 {
  5100  		rdq := o.rdq
  5101  		o.rdq = nil
  5102  		o.rdqi.Empty()
  5103  		for _, sseq := range rdq {
  5104  			if sseq >= o.sseq {
  5105  				o.addToRedeliverQueue(sseq)
  5106  			}
  5107  		}
  5108  	}
  5109  	// Grab some info in case of error below.
  5110  	s, acc, mset, name := o.srv, o.acc, o.mset, o.name
  5111  	o.mu.Unlock()
  5112  
  5113  	if err := o.writeStoreState(); err != nil && s != nil && mset != nil {
  5114  		s.Warnf("Consumer '%s > %s > %s' error on write store state from purge: %v", acc, mset.name(), name, err)
  5115  	}
  5116  }
  5117  
  5118  func stopAndClearTimer(tp **time.Timer) {
  5119  	if *tp == nil {
  5120  		return
  5121  	}
  5122  	// Will get drained in normal course, do not try to
  5123  	// drain here.
  5124  	(*tp).Stop()
  5125  	*tp = nil
  5126  }
  5127  
  5128  // Stop will shutdown  the consumer for the associated stream.
  5129  func (o *consumer) stop() error {
  5130  	return o.stopWithFlags(false, false, true, false)
  5131  }
  5132  
  5133  func (o *consumer) deleteWithoutAdvisory() error {
  5134  	return o.stopWithFlags(true, false, true, false)
  5135  }
  5136  
  5137  // Delete will delete the consumer for the associated stream and send advisories.
  5138  func (o *consumer) delete() error {
  5139  	return o.stopWithFlags(true, false, true, true)
  5140  }
  5141  
  5142  // To test for closed state.
  5143  func (o *consumer) isClosed() bool {
  5144  	o.mu.RLock()
  5145  	defer o.mu.RUnlock()
  5146  	return o.closed
  5147  }
  5148  
  5149  func (o *consumer) stopWithFlags(dflag, sdflag, doSignal, advisory bool) error {
  5150  	// If dflag is true determine if we are still assigned.
  5151  	var isConsumerAssigned bool
  5152  	if dflag {
  5153  		o.mu.RLock()
  5154  		acc, stream, consumer := o.acc, o.stream, o.name
  5155  		o.mu.RUnlock()
  5156  		// Grab jsa to check assignment.
  5157  		var jsa *jsAccount
  5158  		if acc != nil {
  5159  			// Need lock here to avoid data race.
  5160  			acc.mu.RLock()
  5161  			jsa = acc.js
  5162  			acc.mu.RUnlock()
  5163  		}
  5164  		if jsa != nil {
  5165  			isConsumerAssigned = jsa.consumerAssigned(stream, consumer)
  5166  		}
  5167  	}
  5168  
  5169  	o.mu.Lock()
  5170  	if o.closed {
  5171  		o.mu.Unlock()
  5172  		return nil
  5173  	}
  5174  	o.closed = true
  5175  
  5176  	// Check if we are the leader and are being deleted (as a node).
  5177  	if dflag && o.isLeader() {
  5178  		// If we are clustered and node leader (probable from above), stepdown.
  5179  		if node := o.node; node != nil && node.Leader() {
  5180  			node.StepDown()
  5181  		}
  5182  
  5183  		// dflag does not necessarily mean that the consumer is being deleted,
  5184  		// just that the consumer node is being removed from this peer, so we
  5185  		// send delete advisories only if we are no longer assigned at the meta layer.
  5186  		if !isConsumerAssigned && advisory {
  5187  			o.sendDeleteAdvisoryLocked()
  5188  		}
  5189  		if o.isPullMode() {
  5190  			// Release any pending.
  5191  			o.releaseAnyPendingRequests(isConsumerAssigned)
  5192  		}
  5193  	}
  5194  
  5195  	if o.qch != nil {
  5196  		close(o.qch)
  5197  		o.qch = nil
  5198  	}
  5199  
  5200  	a := o.acc
  5201  	store := o.store
  5202  	mset := o.mset
  5203  	o.mset = nil
  5204  	o.active = false
  5205  	o.unsubscribe(o.ackSub)
  5206  	o.unsubscribe(o.reqSub)
  5207  	o.unsubscribe(o.fcSub)
  5208  	o.ackSub = nil
  5209  	o.reqSub = nil
  5210  	o.fcSub = nil
  5211  	if o.infoSub != nil {
  5212  		o.srv.sysUnsubscribe(o.infoSub)
  5213  		o.infoSub = nil
  5214  	}
  5215  	c := o.client
  5216  	o.client = nil
  5217  	sysc := o.sysc
  5218  	o.sysc = nil
  5219  	stopAndClearTimer(&o.ptmr)
  5220  	stopAndClearTimer(&o.dtmr)
  5221  	stopAndClearTimer(&o.gwdtmr)
  5222  	delivery := o.cfg.DeliverSubject
  5223  	o.waiting = nil
  5224  	// Break us out of the readLoop.
  5225  	if doSignal {
  5226  		o.signalNewMessages()
  5227  	}
  5228  	n := o.node
  5229  	qgroup := o.cfg.DeliverGroup
  5230  	o.ackMsgs.unregister()
  5231  	if o.nextMsgReqs != nil {
  5232  		o.nextMsgReqs.unregister()
  5233  	}
  5234  
  5235  	// For cleaning up the node assignment.
  5236  	var ca *consumerAssignment
  5237  	if dflag {
  5238  		ca = o.ca
  5239  	}
  5240  	sigSubs := o.sigSubs
  5241  	js := o.js
  5242  	o.mu.Unlock()
  5243  
  5244  	if c != nil {
  5245  		c.closeConnection(ClientClosed)
  5246  	}
  5247  	if sysc != nil {
  5248  		sysc.closeConnection(ClientClosed)
  5249  	}
  5250  
  5251  	if delivery != _EMPTY_ {
  5252  		a.sl.clearNotification(delivery, qgroup, o.inch)
  5253  	}
  5254  
  5255  	var rp RetentionPolicy
  5256  	if mset != nil {
  5257  		if len(sigSubs) > 0 {
  5258  			mset.removeConsumerAsLeader(o)
  5259  		}
  5260  		mset.mu.Lock()
  5261  		mset.removeConsumer(o)
  5262  		rp = mset.cfg.Retention
  5263  		mset.mu.Unlock()
  5264  	}
  5265  
  5266  	// We need to optionally remove all messages since we are interest based retention.
  5267  	// We will do this consistently on all replicas. Note that if in clustered mode the
  5268  	// non-leader consumers will need to restore state first.
  5269  	if dflag && rp == InterestPolicy {
  5270  		state := mset.state()
  5271  		stop := state.LastSeq
  5272  		o.mu.Lock()
  5273  		if !o.isLeader() {
  5274  			o.readStoredState(stop)
  5275  		}
  5276  		start := o.asflr
  5277  		o.mu.Unlock()
  5278  		// Make sure we start at worst with first sequence in the stream.
  5279  		if start < state.FirstSeq {
  5280  			start = state.FirstSeq
  5281  		}
  5282  
  5283  		var rmseqs []uint64
  5284  		mset.mu.Lock()
  5285  		for seq := start; seq <= stop; seq++ {
  5286  			if mset.noInterest(seq, o) {
  5287  				rmseqs = append(rmseqs, seq)
  5288  			}
  5289  		}
  5290  		mset.mu.Unlock()
  5291  
  5292  		// These can be removed.
  5293  		for _, seq := range rmseqs {
  5294  			mset.store.RemoveMsg(seq)
  5295  		}
  5296  	}
  5297  
  5298  	// Cluster cleanup.
  5299  	if n != nil {
  5300  		if dflag {
  5301  			n.Delete()
  5302  		} else {
  5303  			// Try to install snapshot on clean exit
  5304  			if o.store != nil && (o.retention != LimitsPolicy || n.NeedSnapshot()) {
  5305  				if snap, err := o.store.EncodedState(); err == nil {
  5306  					n.InstallSnapshot(snap)
  5307  				}
  5308  			}
  5309  			n.Stop()
  5310  		}
  5311  	}
  5312  
  5313  	if ca != nil {
  5314  		js.mu.Lock()
  5315  		if ca.Group != nil {
  5316  			ca.Group.node = nil
  5317  		}
  5318  		js.mu.Unlock()
  5319  	}
  5320  
  5321  	// Clean up our store.
  5322  	var err error
  5323  	if store != nil {
  5324  		if dflag {
  5325  			if sdflag {
  5326  				err = store.StreamDelete()
  5327  			} else {
  5328  				err = store.Delete()
  5329  			}
  5330  		} else {
  5331  			err = store.Stop()
  5332  		}
  5333  	}
  5334  
  5335  	return err
  5336  }
  5337  
  5338  // Check that we do not form a cycle by delivering to a delivery subject
  5339  // that is part of the interest group.
  5340  func deliveryFormsCycle(cfg *StreamConfig, deliverySubject string) bool {
  5341  	for _, subject := range cfg.Subjects {
  5342  		if subjectIsSubsetMatch(deliverySubject, subject) {
  5343  			return true
  5344  		}
  5345  	}
  5346  	return false
  5347  }
  5348  
  5349  // switchToEphemeral is called on startup when recovering ephemerals.
  5350  func (o *consumer) switchToEphemeral() {
  5351  	o.mu.Lock()
  5352  	o.cfg.Durable = _EMPTY_
  5353  	store, ok := o.store.(*consumerFileStore)
  5354  	rr := o.acc.sl.Match(o.cfg.DeliverSubject)
  5355  	// Setup dthresh.
  5356  	o.updateInactiveThreshold(&o.cfg)
  5357  	o.updatePauseState(&o.cfg)
  5358  	o.mu.Unlock()
  5359  
  5360  	// Update interest
  5361  	o.updateDeliveryInterest(len(rr.psubs)+len(rr.qsubs) > 0)
  5362  	// Write out new config
  5363  	if ok {
  5364  		store.updateConfig(o.cfg)
  5365  	}
  5366  }
  5367  
  5368  // RequestNextMsgSubject returns the subject to request the next message when in pull or worker mode.
  5369  // Returns empty otherwise.
  5370  func (o *consumer) requestNextMsgSubject() string {
  5371  	return o.nextMsgSubj
  5372  }
  5373  
  5374  func (o *consumer) decStreamPending(sseq uint64, subj string) {
  5375  	o.mu.Lock()
  5376  	// Update our cached num pending only if we think deliverMsg has not done so.
  5377  	if sseq >= o.sseq && o.isFilteredMatch(subj) {
  5378  		o.npc--
  5379  	}
  5380  
  5381  	// Check if this message was pending.
  5382  	p, wasPending := o.pending[sseq]
  5383  	var rdc uint64 = 1
  5384  	if o.rdc != nil {
  5385  		rdc = o.rdc[sseq]
  5386  	}
  5387  	o.mu.Unlock()
  5388  
  5389  	// If it was pending process it like an ack.
  5390  	if wasPending {
  5391  		// We could have lock for stream so do this in a go routine.
  5392  		// TODO(dlc) - We should do this with ipq vs naked go routines.
  5393  		go o.processTerm(sseq, p.Sequence, rdc, ackTermUnackedLimitsReason)
  5394  	}
  5395  }
  5396  
  5397  func (o *consumer) account() *Account {
  5398  	o.mu.RLock()
  5399  	a := o.acc
  5400  	o.mu.RUnlock()
  5401  	return a
  5402  }
  5403  
  5404  // Creates a sublist for consumer.
  5405  // All subjects share the same callback.
  5406  func (o *consumer) signalSubs() []*subscription {
  5407  	o.mu.Lock()
  5408  	defer o.mu.Unlock()
  5409  
  5410  	if o.sigSubs != nil {
  5411  		return o.sigSubs
  5412  	}
  5413  
  5414  	subs := []*subscription{}
  5415  	if o.subjf == nil {
  5416  		subs = append(subs, &subscription{subject: []byte(fwcs), icb: o.processStreamSignal})
  5417  		o.sigSubs = subs
  5418  		return subs
  5419  	}
  5420  
  5421  	for _, filter := range o.subjf {
  5422  		subs = append(subs, &subscription{subject: []byte(filter.subject), icb: o.processStreamSignal})
  5423  	}
  5424  	o.sigSubs = subs
  5425  	return subs
  5426  }
  5427  
  5428  // This is what will be called when our parent stream wants to kick us regarding a new message.
  5429  // We know that we are the leader and that this subject matches us by how the parent handles registering
  5430  // us with the signaling sublist.
  5431  // We do need the sequence of the message however and we use the msg as the encoded seq.
  5432  func (o *consumer) processStreamSignal(_ *subscription, _ *client, _ *Account, subject, _ string, seqb []byte) {
  5433  	var le = binary.LittleEndian
  5434  	seq := le.Uint64(seqb)
  5435  
  5436  	o.mu.Lock()
  5437  	defer o.mu.Unlock()
  5438  	if o.mset == nil {
  5439  		return
  5440  	}
  5441  	if seq > o.npf {
  5442  		o.npc++
  5443  	}
  5444  	if seq < o.sseq {
  5445  		return
  5446  	}
  5447  	if o.isPushMode() && o.active || o.isPullMode() && !o.waiting.isEmpty() {
  5448  		o.signalNewMessages()
  5449  	}
  5450  }
  5451  
  5452  // Used to compare if two multiple filtered subject lists are equal.
  5453  func subjectSliceEqual(slice1 []string, slice2 []string) bool {
  5454  	if len(slice1) != len(slice2) {
  5455  		return false
  5456  	}
  5457  	set2 := make(map[string]struct{}, len(slice2))
  5458  	for _, val := range slice2 {
  5459  		set2[val] = struct{}{}
  5460  	}
  5461  	for _, val := range slice1 {
  5462  		if _, ok := set2[val]; !ok {
  5463  			return false
  5464  		}
  5465  	}
  5466  	return true
  5467  }
  5468  
  5469  // Utility for simpler if conditions in Consumer config checks.
  5470  // In future iteration, we can immediately create `o.subjf` and
  5471  // use it to validate things.
  5472  func gatherSubjectFilters(filter string, filters []string) []string {
  5473  	if filter != _EMPTY_ {
  5474  		filters = append(filters, filter)
  5475  	}
  5476  	// list of filters should never contain non-empty filter.
  5477  	return filters
  5478  }
  5479  
  5480  // shouldStartMonitor will return true if we should start a monitor
  5481  // goroutine or will return false if one is already running.
  5482  func (o *consumer) shouldStartMonitor() bool {
  5483  	o.mu.Lock()
  5484  	defer o.mu.Unlock()
  5485  
  5486  	if o.inMonitor {
  5487  		return false
  5488  	}
  5489  	o.monitorWg.Add(1)
  5490  	o.inMonitor = true
  5491  	return true
  5492  }
  5493  
  5494  // Clear the monitor running state. The monitor goroutine should
  5495  // call this in a defer to clean up on exit.
  5496  func (o *consumer) clearMonitorRunning() {
  5497  	o.mu.Lock()
  5498  	defer o.mu.Unlock()
  5499  
  5500  	if o.inMonitor {
  5501  		o.monitorWg.Done()
  5502  		o.inMonitor = false
  5503  	}
  5504  }
  5505  
  5506  // Test whether we are in the monitor routine.
  5507  func (o *consumer) isMonitorRunning() bool {
  5508  	o.mu.RLock()
  5509  	defer o.mu.RUnlock()
  5510  	return o.inMonitor
  5511  }
  5512  
  5513  // If we detect that our ackfloor is higher than the stream's last sequence, return this error.
  5514  var errAckFloorHigherThanLastSeq = errors.New("consumer ack floor is higher than streams last sequence")
  5515  
  5516  // If we are a consumer of an interest or workqueue policy stream, process that state and make sure consistent.
  5517  func (o *consumer) checkStateForInterestStream() error {
  5518  	o.mu.RLock()
  5519  	// See if we need to process this update if our parent stream is not a limits policy stream.
  5520  	mset := o.mset
  5521  	shouldProcessState := mset != nil && o.retention != LimitsPolicy
  5522  	if o.closed || !shouldProcessState {
  5523  		o.mu.RUnlock()
  5524  		return nil
  5525  	}
  5526  	state, err := o.store.State()
  5527  	o.mu.RUnlock()
  5528  
  5529  	if err != nil {
  5530  		return err
  5531  	}
  5532  
  5533  	asflr := state.AckFloor.Stream
  5534  	// Protect ourselves against rolling backwards.
  5535  	if asflr&(1<<63) != 0 {
  5536  		return nil
  5537  	}
  5538  
  5539  	// We should make sure to update the acks.
  5540  	var ss StreamState
  5541  	mset.store.FastState(&ss)
  5542  
  5543  	// Check if the underlying stream's last sequence is less than our floor.
  5544  	// This can happen if the stream has been reset and has not caught up yet.
  5545  	if asflr > ss.LastSeq {
  5546  		return errAckFloorHigherThanLastSeq
  5547  	}
  5548  
  5549  	for seq := ss.FirstSeq; seq <= asflr; seq++ {
  5550  		mset.ackMsg(o, seq)
  5551  	}
  5552  
  5553  	o.mu.RLock()
  5554  	// See if we need to process this update if our parent stream is not a limits policy stream.
  5555  	state, _ = o.store.State()
  5556  	o.mu.RUnlock()
  5557  
  5558  	// If we have pending, we will need to walk through to delivered in case we missed any of those acks as well.
  5559  	if state != nil && len(state.Pending) > 0 {
  5560  		for seq := state.AckFloor.Stream + 1; seq <= state.Delivered.Stream; seq++ {
  5561  			if _, ok := state.Pending[seq]; !ok {
  5562  				mset.ackMsg(o, seq)
  5563  			}
  5564  		}
  5565  	}
  5566  	return nil
  5567  }