github.com/nats-io/nats-server/v2@v2.11.0-preview.2/server/consumer.go (about)

     1  // Copyright 2019-2024 The NATS Authors
     2  // Licensed under the Apache License, Version 2.0 (the "License");
     3  // you may not use this file except in compliance with the License.
     4  // You may obtain a copy of the License at
     5  //
     6  // http://www.apache.org/licenses/LICENSE-2.0
     7  //
     8  // Unless required by applicable law or agreed to in writing, software
     9  // distributed under the License is distributed on an "AS IS" BASIS,
    10  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package server
    15  
    16  import (
    17  	"bytes"
    18  	"encoding/binary"
    19  	"encoding/json"
    20  	"errors"
    21  	"fmt"
    22  	"math/rand"
    23  	"reflect"
    24  	"sort"
    25  	"strconv"
    26  	"strings"
    27  	"sync"
    28  	"sync/atomic"
    29  	"time"
    30  
    31  	"github.com/nats-io/nats-server/v2/server/avl"
    32  	"github.com/nats-io/nuid"
    33  	"golang.org/x/time/rate"
    34  )
    35  
    36  // Headers sent with Request Timeout
    37  const (
    38  	JSPullRequestPendingMsgs  = "Nats-Pending-Messages"
    39  	JSPullRequestPendingBytes = "Nats-Pending-Bytes"
    40  )
    41  
    42  // Headers sent when batch size was completed, but there were remaining bytes.
    43  const JsPullRequestRemainingBytesT = "NATS/1.0 409 Batch Completed\r\n%s: %d\r\n%s: %d\r\n\r\n"
    44  
    45  type ConsumerInfo struct {
    46  	Stream         string          `json:"stream_name"`
    47  	Name           string          `json:"name"`
    48  	Created        time.Time       `json:"created"`
    49  	Config         *ConsumerConfig `json:"config,omitempty"`
    50  	Delivered      SequenceInfo    `json:"delivered"`
    51  	AckFloor       SequenceInfo    `json:"ack_floor"`
    52  	NumAckPending  int             `json:"num_ack_pending"`
    53  	NumRedelivered int             `json:"num_redelivered"`
    54  	NumWaiting     int             `json:"num_waiting"`
    55  	NumPending     uint64          `json:"num_pending"`
    56  	Cluster        *ClusterInfo    `json:"cluster,omitempty"`
    57  	PushBound      bool            `json:"push_bound,omitempty"`
    58  	Paused         bool            `json:"paused,omitempty"`
    59  	PauseRemaining time.Duration   `json:"pause_remaining,omitempty"`
    60  	// TimeStamp indicates when the info was gathered
    61  	TimeStamp time.Time `json:"ts"`
    62  }
    63  
    64  type ConsumerConfig struct {
    65  	// Durable is deprecated. All consumers should have names, picked by clients.
    66  	Durable         string          `json:"durable_name,omitempty"`
    67  	Name            string          `json:"name,omitempty"`
    68  	Description     string          `json:"description,omitempty"`
    69  	DeliverPolicy   DeliverPolicy   `json:"deliver_policy"`
    70  	OptStartSeq     uint64          `json:"opt_start_seq,omitempty"`
    71  	OptStartTime    *time.Time      `json:"opt_start_time,omitempty"`
    72  	AckPolicy       AckPolicy       `json:"ack_policy"`
    73  	AckWait         time.Duration   `json:"ack_wait,omitempty"`
    74  	MaxDeliver      int             `json:"max_deliver,omitempty"`
    75  	BackOff         []time.Duration `json:"backoff,omitempty"`
    76  	FilterSubject   string          `json:"filter_subject,omitempty"`
    77  	FilterSubjects  []string        `json:"filter_subjects,omitempty"`
    78  	ReplayPolicy    ReplayPolicy    `json:"replay_policy"`
    79  	RateLimit       uint64          `json:"rate_limit_bps,omitempty"` // Bits per sec
    80  	SampleFrequency string          `json:"sample_freq,omitempty"`
    81  	MaxWaiting      int             `json:"max_waiting,omitempty"`
    82  	MaxAckPending   int             `json:"max_ack_pending,omitempty"`
    83  	Heartbeat       time.Duration   `json:"idle_heartbeat,omitempty"`
    84  	FlowControl     bool            `json:"flow_control,omitempty"`
    85  	HeadersOnly     bool            `json:"headers_only,omitempty"`
    86  
    87  	// Pull based options.
    88  	MaxRequestBatch    int           `json:"max_batch,omitempty"`
    89  	MaxRequestExpires  time.Duration `json:"max_expires,omitempty"`
    90  	MaxRequestMaxBytes int           `json:"max_bytes,omitempty"`
    91  
    92  	// Push based consumers.
    93  	DeliverSubject string `json:"deliver_subject,omitempty"`
    94  	DeliverGroup   string `json:"deliver_group,omitempty"`
    95  
    96  	// Ephemeral inactivity threshold.
    97  	InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"`
    98  
    99  	// Generally inherited by parent stream and other markers, now can be configured directly.
   100  	Replicas int `json:"num_replicas"`
   101  	// Force memory storage.
   102  	MemoryStorage bool `json:"mem_storage,omitempty"`
   103  
   104  	// Don't add to general clients.
   105  	Direct bool `json:"direct,omitempty"`
   106  
   107  	// Metadata is additional metadata for the Consumer.
   108  	Metadata map[string]string `json:"metadata,omitempty"`
   109  
   110  	// PauseUntil is for suspending the consumer until the deadline.
   111  	PauseUntil *time.Time `json:"pause_until,omitempty"`
   112  }
   113  
   114  // SequenceInfo has both the consumer and the stream sequence and last activity.
   115  type SequenceInfo struct {
   116  	Consumer uint64     `json:"consumer_seq"`
   117  	Stream   uint64     `json:"stream_seq"`
   118  	Last     *time.Time `json:"last_active,omitempty"`
   119  }
   120  
   121  type CreateConsumerRequest struct {
   122  	Stream string         `json:"stream_name"`
   123  	Config ConsumerConfig `json:"config"`
   124  	Action ConsumerAction `json:"action"`
   125  }
   126  
   127  type ConsumerAction int
   128  
   129  const (
   130  	ActionCreateOrUpdate ConsumerAction = iota
   131  	ActionUpdate
   132  	ActionCreate
   133  )
   134  
   135  const (
   136  	actionUpdateJSONString         = `"update"`
   137  	actionCreateJSONString         = `"create"`
   138  	actionCreateOrUpdateJSONString = `""`
   139  )
   140  
   141  var (
   142  	actionUpdateJSONBytes         = []byte(actionUpdateJSONString)
   143  	actionCreateJSONBytes         = []byte(actionCreateJSONString)
   144  	actionCreateOrUpdateJSONBytes = []byte(actionCreateOrUpdateJSONString)
   145  )
   146  
   147  func (a ConsumerAction) String() string {
   148  	switch a {
   149  	case ActionCreateOrUpdate:
   150  		return actionCreateOrUpdateJSONString
   151  	case ActionCreate:
   152  		return actionCreateJSONString
   153  	case ActionUpdate:
   154  		return actionUpdateJSONString
   155  	}
   156  	return actionCreateOrUpdateJSONString
   157  }
   158  
   159  func (a ConsumerAction) MarshalJSON() ([]byte, error) {
   160  	switch a {
   161  	case ActionCreate:
   162  		return actionCreateJSONBytes, nil
   163  	case ActionUpdate:
   164  		return actionUpdateJSONBytes, nil
   165  	case ActionCreateOrUpdate:
   166  		return actionCreateOrUpdateJSONBytes, nil
   167  	default:
   168  		return nil, fmt.Errorf("can not marshal %v", a)
   169  	}
   170  }
   171  
   172  func (a *ConsumerAction) UnmarshalJSON(data []byte) error {
   173  	switch string(data) {
   174  	case actionCreateJSONString:
   175  		*a = ActionCreate
   176  	case actionUpdateJSONString:
   177  		*a = ActionUpdate
   178  	case actionCreateOrUpdateJSONString:
   179  		*a = ActionCreateOrUpdate
   180  	default:
   181  		return fmt.Errorf("unknown consumer action: %v", string(data))
   182  	}
   183  	return nil
   184  }
   185  
   186  // ConsumerNakOptions is for optional NAK values, e.g. delay.
   187  type ConsumerNakOptions struct {
   188  	Delay time.Duration `json:"delay"`
   189  }
   190  
   191  // DeliverPolicy determines how the consumer should select the first message to deliver.
   192  type DeliverPolicy int
   193  
   194  const (
   195  	// DeliverAll will be the default so can be omitted from the request.
   196  	DeliverAll DeliverPolicy = iota
   197  	// DeliverLast will start the consumer with the last sequence received.
   198  	DeliverLast
   199  	// DeliverNew will only deliver new messages that are sent after the consumer is created.
   200  	DeliverNew
   201  	// DeliverByStartSequence will look for a defined starting sequence to start.
   202  	DeliverByStartSequence
   203  	// DeliverByStartTime will select the first messsage with a timestamp >= to StartTime.
   204  	DeliverByStartTime
   205  	// DeliverLastPerSubject will start the consumer with the last message for all subjects received.
   206  	DeliverLastPerSubject
   207  )
   208  
   209  func (dp DeliverPolicy) String() string {
   210  	switch dp {
   211  	case DeliverAll:
   212  		return "all"
   213  	case DeliverLast:
   214  		return "last"
   215  	case DeliverNew:
   216  		return "new"
   217  	case DeliverByStartSequence:
   218  		return "by_start_sequence"
   219  	case DeliverByStartTime:
   220  		return "by_start_time"
   221  	case DeliverLastPerSubject:
   222  		return "last_per_subject"
   223  	default:
   224  		return "undefined"
   225  	}
   226  }
   227  
   228  // AckPolicy determines how the consumer should acknowledge delivered messages.
   229  type AckPolicy int
   230  
   231  const (
   232  	// AckNone requires no acks for delivered messages.
   233  	AckNone AckPolicy = iota
   234  	// AckAll when acking a sequence number, this implicitly acks all sequences below this one as well.
   235  	AckAll
   236  	// AckExplicit requires ack or nack for all messages.
   237  	AckExplicit
   238  )
   239  
   240  func (a AckPolicy) String() string {
   241  	switch a {
   242  	case AckNone:
   243  		return "none"
   244  	case AckAll:
   245  		return "all"
   246  	default:
   247  		return "explicit"
   248  	}
   249  }
   250  
   251  // ReplayPolicy determines how the consumer should replay messages it already has queued in the stream.
   252  type ReplayPolicy int
   253  
   254  const (
   255  	// ReplayInstant will replay messages as fast as possible.
   256  	ReplayInstant ReplayPolicy = iota
   257  	// ReplayOriginal will maintain the same timing as the messages were received.
   258  	ReplayOriginal
   259  )
   260  
   261  func (r ReplayPolicy) String() string {
   262  	switch r {
   263  	case ReplayInstant:
   264  		return replayInstantPolicyJSONString
   265  	default:
   266  		return replayOriginalPolicyJSONString
   267  	}
   268  }
   269  
   270  // OK
   271  const OK = "+OK"
   272  
   273  // Ack responses. Note that a nil or no payload is same as AckAck
   274  var (
   275  	// Ack
   276  	AckAck = []byte("+ACK") // nil or no payload to ack subject also means ACK
   277  	AckOK  = []byte(OK)     // deprecated but +OK meant ack as well.
   278  
   279  	// Nack
   280  	AckNak = []byte("-NAK")
   281  	// Progress indicator
   282  	AckProgress = []byte("+WPI")
   283  	// Ack + Deliver the next message(s).
   284  	AckNext = []byte("+NXT")
   285  	// Terminate delivery of the message.
   286  	AckTerm = []byte("+TERM")
   287  
   288  	// reasons to supply when terminating messages using limits
   289  	ackTermLimitsReason        = "Message deleted by stream limits"
   290  	ackTermUnackedLimitsReason = "Unacknowledged message was deleted"
   291  )
   292  
   293  // Calculate accurate replicas for the consumer config with the parent stream config.
   294  func (consCfg ConsumerConfig) replicas(strCfg *StreamConfig) int {
   295  	if consCfg.Replicas == 0 || consCfg.Replicas > strCfg.Replicas {
   296  		if !isDurableConsumer(&consCfg) && strCfg.Retention == LimitsPolicy && consCfg.Replicas == 0 {
   297  			// Matches old-school ephemerals only, where the replica count is 0.
   298  			return 1
   299  		}
   300  		return strCfg.Replicas
   301  	}
   302  	return consCfg.Replicas
   303  }
   304  
   305  // Consumer is a jetstream consumer.
   306  type consumer struct {
   307  	// Atomic used to notify that we want to process an ack.
   308  	// This will be checked in checkPending to abort processing
   309  	// and let ack be processed in priority.
   310  	awl               int64
   311  	leader            atomic.Bool
   312  	mu                sync.RWMutex
   313  	js                *jetStream
   314  	mset              *stream
   315  	acc               *Account
   316  	srv               *Server
   317  	client            *client
   318  	sysc              *client
   319  	sid               int
   320  	name              string
   321  	stream            string
   322  	sseq              uint64         // next stream sequence
   323  	subjf             subjectFilters // subject filters and their sequences
   324  	filters           *Sublist       // When we have multiple filters we will use LoadNextMsgMulti and pass this in.
   325  	dseq              uint64         // delivered consumer sequence
   326  	adflr             uint64         // ack delivery floor
   327  	asflr             uint64         // ack store floor
   328  	npc               int64          // Num Pending Count
   329  	npf               uint64         // Num Pending Floor Sequence
   330  	dsubj             string
   331  	qgroup            string
   332  	lss               *lastSeqSkipList
   333  	rlimit            *rate.Limiter
   334  	reqSub            *subscription
   335  	ackSub            *subscription
   336  	ackReplyT         string
   337  	ackSubj           string
   338  	nextMsgSubj       string
   339  	nextMsgReqs       *ipQueue[*nextMsgReq]
   340  	maxp              int
   341  	pblimit           int
   342  	maxpb             int
   343  	pbytes            int
   344  	fcsz              int
   345  	fcid              string
   346  	fcSub             *subscription
   347  	outq              *jsOutQ
   348  	pending           map[uint64]*Pending
   349  	ptmr              *time.Timer
   350  	rdq               []uint64
   351  	rdqi              avl.SequenceSet
   352  	rdc               map[uint64]uint64
   353  	maxdc             uint64
   354  	waiting           *waitQueue
   355  	cfg               ConsumerConfig
   356  	ici               *ConsumerInfo
   357  	store             ConsumerStore
   358  	active            bool
   359  	replay            bool
   360  	dtmr              *time.Timer
   361  	uptmr             *time.Timer // Unpause timer
   362  	gwdtmr            *time.Timer
   363  	dthresh           time.Duration
   364  	mch               chan struct{} // Message channel
   365  	qch               chan struct{} // Quit channel
   366  	inch              chan bool     // Interest change channel
   367  	sfreq             int32
   368  	ackEventT         string
   369  	nakEventT         string
   370  	deliveryExcEventT string
   371  	created           time.Time
   372  	ldt               time.Time
   373  	lat               time.Time
   374  	lwqic             time.Time
   375  	closed            bool
   376  
   377  	// Clustered.
   378  	ca        *consumerAssignment
   379  	node      RaftNode
   380  	infoSub   *subscription
   381  	lqsent    time.Time
   382  	prm       map[string]struct{}
   383  	prOk      bool
   384  	uch       chan struct{}
   385  	retention RetentionPolicy
   386  
   387  	monitorWg sync.WaitGroup
   388  	inMonitor bool
   389  
   390  	// R>1 proposals
   391  	pch   chan struct{}
   392  	phead *proposal
   393  	ptail *proposal
   394  
   395  	// Ack queue
   396  	ackMsgs *ipQueue[*jsAckMsg]
   397  
   398  	// for stream signaling when multiple filters are set.
   399  	sigSubs []*subscription
   400  }
   401  
   402  // A single subject filter.
   403  type subjectFilter struct {
   404  	subject          string
   405  	tokenizedSubject []string
   406  	hasWildcard      bool
   407  }
   408  
   409  type subjectFilters []*subjectFilter
   410  
   411  // subjects is a helper function used for updating consumers.
   412  // It is not used and should not be used in hotpath.
   413  func (s subjectFilters) subjects() []string {
   414  	subjects := make([]string, 0, len(s))
   415  	for _, filter := range s {
   416  		subjects = append(subjects, filter.subject)
   417  	}
   418  	return subjects
   419  }
   420  
   421  type proposal struct {
   422  	data []byte
   423  	next *proposal
   424  }
   425  
   426  const (
   427  	// JsAckWaitDefault is the default AckWait, only applicable on explicit ack policy consumers.
   428  	JsAckWaitDefault = 30 * time.Second
   429  	// JsDeleteWaitTimeDefault is the default amount of time we will wait for non-durable
   430  	// consumers to be in an inactive state before deleting them.
   431  	JsDeleteWaitTimeDefault = 5 * time.Second
   432  	// JsFlowControlMaxPending specifies default pending bytes during flow control that can be
   433  	// outstanding.
   434  	JsFlowControlMaxPending = 32 * 1024 * 1024
   435  	// JsDefaultMaxAckPending is set for consumers with explicit ack that do not set the max ack pending.
   436  	JsDefaultMaxAckPending = 1000
   437  )
   438  
   439  // Helper function to set consumer config defaults from above.
   440  func setConsumerConfigDefaults(config *ConsumerConfig, streamCfg *StreamConfig, lim *JSLimitOpts, accLim *JetStreamAccountLimits) {
   441  	// Set to default if not specified.
   442  	if config.DeliverSubject == _EMPTY_ && config.MaxWaiting == 0 {
   443  		config.MaxWaiting = JSWaitQueueDefaultMax
   444  	}
   445  	// Setup proper default for ack wait if we are in explicit ack mode.
   446  	if config.AckWait == 0 && (config.AckPolicy == AckExplicit || config.AckPolicy == AckAll) {
   447  		config.AckWait = JsAckWaitDefault
   448  	}
   449  	// Setup default of -1, meaning no limit for MaxDeliver.
   450  	if config.MaxDeliver == 0 {
   451  		config.MaxDeliver = -1
   452  	}
   453  	// If BackOff was specified that will override the AckWait and the MaxDeliver.
   454  	if len(config.BackOff) > 0 {
   455  		config.AckWait = config.BackOff[0]
   456  	}
   457  	if config.MaxAckPending == 0 {
   458  		config.MaxAckPending = streamCfg.ConsumerLimits.MaxAckPending
   459  	}
   460  	if config.InactiveThreshold == 0 {
   461  		config.InactiveThreshold = streamCfg.ConsumerLimits.InactiveThreshold
   462  	}
   463  	// Set proper default for max ack pending if we are ack explicit and none has been set.
   464  	if (config.AckPolicy == AckExplicit || config.AckPolicy == AckAll) && config.MaxAckPending == 0 {
   465  		accPending := JsDefaultMaxAckPending
   466  		if lim.MaxAckPending > 0 && lim.MaxAckPending < accPending {
   467  			accPending = lim.MaxAckPending
   468  		}
   469  		if accLim.MaxAckPending > 0 && accLim.MaxAckPending < accPending {
   470  			accPending = accLim.MaxAckPending
   471  		}
   472  		config.MaxAckPending = accPending
   473  	}
   474  	// if applicable set max request batch size
   475  	if config.DeliverSubject == _EMPTY_ && config.MaxRequestBatch == 0 && lim.MaxRequestBatch > 0 {
   476  		config.MaxRequestBatch = lim.MaxRequestBatch
   477  	}
   478  }
   479  
   480  // Check the consumer config. If we are recovering don't check filter subjects.
   481  func checkConsumerCfg(
   482  	config *ConsumerConfig,
   483  	srvLim *JSLimitOpts,
   484  	cfg *StreamConfig,
   485  	_ *Account,
   486  	accLim *JetStreamAccountLimits,
   487  	isRecovering bool,
   488  ) *ApiError {
   489  
   490  	// Check if replicas is defined but exceeds parent stream.
   491  	if config.Replicas > 0 && config.Replicas > cfg.Replicas {
   492  		return NewJSConsumerReplicasExceedsStreamError()
   493  	}
   494  	// Check that it is not negative
   495  	if config.Replicas < 0 {
   496  		return NewJSReplicasCountCannotBeNegativeError()
   497  	}
   498  	// If the stream is interest or workqueue retention make sure the replicas
   499  	// match that of the stream. This is REQUIRED for now.
   500  	if cfg.Retention == InterestPolicy || cfg.Retention == WorkQueuePolicy {
   501  		// Only error here if not recovering.
   502  		// We handle recovering in a different spot to allow consumer to come up
   503  		// if previous version allowed it to be created. We do not want it to not come up.
   504  		if !isRecovering && config.Replicas != 0 && config.Replicas != cfg.Replicas {
   505  			return NewJSConsumerReplicasShouldMatchStreamError()
   506  		}
   507  	}
   508  
   509  	// Check if we have a BackOff defined that MaxDeliver is within range etc.
   510  	if lbo := len(config.BackOff); lbo > 0 && config.MaxDeliver != -1 && config.MaxDeliver <= lbo {
   511  		return NewJSConsumerMaxDeliverBackoffError()
   512  	}
   513  
   514  	if len(config.Description) > JSMaxDescriptionLen {
   515  		return NewJSConsumerDescriptionTooLongError(JSMaxDescriptionLen)
   516  	}
   517  
   518  	// For now expect a literal subject if its not empty. Empty means work queue mode (pull mode).
   519  	if config.DeliverSubject != _EMPTY_ {
   520  		if !subjectIsLiteral(config.DeliverSubject) {
   521  			return NewJSConsumerDeliverToWildcardsError()
   522  		}
   523  		if !IsValidSubject(config.DeliverSubject) {
   524  			return NewJSConsumerInvalidDeliverSubjectError()
   525  		}
   526  		if deliveryFormsCycle(cfg, config.DeliverSubject) {
   527  			return NewJSConsumerDeliverCycleError()
   528  		}
   529  		if config.MaxWaiting != 0 {
   530  			return NewJSConsumerPushMaxWaitingError()
   531  		}
   532  		if config.MaxAckPending > 0 && config.AckPolicy == AckNone {
   533  			return NewJSConsumerMaxPendingAckPolicyRequiredError()
   534  		}
   535  		if config.Heartbeat > 0 && config.Heartbeat < 100*time.Millisecond {
   536  			return NewJSConsumerSmallHeartbeatError()
   537  		}
   538  	} else {
   539  		// Pull mode with work queue retention from the stream requires an explicit ack.
   540  		if config.AckPolicy == AckNone && cfg.Retention == WorkQueuePolicy {
   541  			return NewJSConsumerPullRequiresAckError()
   542  		}
   543  		if config.RateLimit > 0 {
   544  			return NewJSConsumerPullWithRateLimitError()
   545  		}
   546  		if config.MaxWaiting < 0 {
   547  			return NewJSConsumerMaxWaitingNegativeError()
   548  		}
   549  		if config.Heartbeat > 0 {
   550  			return NewJSConsumerHBRequiresPushError()
   551  		}
   552  		if config.FlowControl {
   553  			return NewJSConsumerFCRequiresPushError()
   554  		}
   555  		if config.MaxRequestBatch < 0 {
   556  			return NewJSConsumerMaxRequestBatchNegativeError()
   557  		}
   558  		if config.MaxRequestExpires != 0 && config.MaxRequestExpires < time.Millisecond {
   559  			return NewJSConsumerMaxRequestExpiresToSmallError()
   560  		}
   561  		if srvLim.MaxRequestBatch > 0 && config.MaxRequestBatch > srvLim.MaxRequestBatch {
   562  			return NewJSConsumerMaxRequestBatchExceededError(srvLim.MaxRequestBatch)
   563  		}
   564  	}
   565  	if srvLim.MaxAckPending > 0 && config.MaxAckPending > srvLim.MaxAckPending {
   566  		return NewJSConsumerMaxPendingAckExcessError(srvLim.MaxAckPending)
   567  	}
   568  	if accLim.MaxAckPending > 0 && config.MaxAckPending > accLim.MaxAckPending {
   569  		return NewJSConsumerMaxPendingAckExcessError(accLim.MaxAckPending)
   570  	}
   571  	if cfg.ConsumerLimits.MaxAckPending > 0 && config.MaxAckPending > cfg.ConsumerLimits.MaxAckPending {
   572  		return NewJSConsumerMaxPendingAckExcessError(cfg.ConsumerLimits.MaxAckPending)
   573  	}
   574  	if cfg.ConsumerLimits.InactiveThreshold > 0 && config.InactiveThreshold > cfg.ConsumerLimits.InactiveThreshold {
   575  		return NewJSConsumerInactiveThresholdExcessError(cfg.ConsumerLimits.InactiveThreshold)
   576  	}
   577  
   578  	// Direct need to be non-mapped ephemerals.
   579  	if config.Direct {
   580  		if config.DeliverSubject == _EMPTY_ {
   581  			return NewJSConsumerDirectRequiresPushError()
   582  		}
   583  		if isDurableConsumer(config) {
   584  			return NewJSConsumerDirectRequiresEphemeralError()
   585  		}
   586  	}
   587  
   588  	// Do not allow specifying both FilterSubject and FilterSubjects,
   589  	// as that's probably unintentional without any difference from passing
   590  	// all filters in FilterSubjects.
   591  	if config.FilterSubject != _EMPTY_ && len(config.FilterSubjects) > 0 {
   592  		return NewJSConsumerDuplicateFilterSubjectsError()
   593  	}
   594  
   595  	if config.FilterSubject != _EMPTY_ && !IsValidSubject(config.FilterSubject) {
   596  		return NewJSStreamInvalidConfigError(ErrBadSubject)
   597  	}
   598  
   599  	// We treat FilterSubjects: []string{""} as a misconfig, so we validate against it.
   600  	for _, filter := range config.FilterSubjects {
   601  		if filter == _EMPTY_ {
   602  			return NewJSConsumerEmptyFilterError()
   603  		}
   604  	}
   605  	subjectFilters := gatherSubjectFilters(config.FilterSubject, config.FilterSubjects)
   606  
   607  	// Check subject filters do not overlap.
   608  	for outer, subject := range subjectFilters {
   609  		if !IsValidSubject(subject) {
   610  			return NewJSStreamInvalidConfigError(ErrBadSubject)
   611  		}
   612  		for inner, ssubject := range subjectFilters {
   613  			if inner != outer && SubjectsCollide(subject, ssubject) {
   614  				return NewJSConsumerOverlappingSubjectFiltersError()
   615  			}
   616  		}
   617  	}
   618  
   619  	// Helper function to formulate similar errors.
   620  	badStart := func(dp, start string) error {
   621  		return fmt.Errorf("consumer delivery policy is deliver %s, but optional start %s is also set", dp, start)
   622  	}
   623  	notSet := func(dp, notSet string) error {
   624  		return fmt.Errorf("consumer delivery policy is deliver %s, but optional %s is not set", dp, notSet)
   625  	}
   626  
   627  	// Check on start position conflicts.
   628  	switch config.DeliverPolicy {
   629  	case DeliverAll:
   630  		if config.OptStartSeq > 0 {
   631  			return NewJSConsumerInvalidPolicyError(badStart("all", "sequence"))
   632  		}
   633  		if config.OptStartTime != nil {
   634  			return NewJSConsumerInvalidPolicyError(badStart("all", "time"))
   635  		}
   636  	case DeliverLast:
   637  		if config.OptStartSeq > 0 {
   638  			return NewJSConsumerInvalidPolicyError(badStart("last", "sequence"))
   639  		}
   640  		if config.OptStartTime != nil {
   641  			return NewJSConsumerInvalidPolicyError(badStart("last", "time"))
   642  		}
   643  	case DeliverLastPerSubject:
   644  		if config.OptStartSeq > 0 {
   645  			return NewJSConsumerInvalidPolicyError(badStart("last per subject", "sequence"))
   646  		}
   647  		if config.OptStartTime != nil {
   648  			return NewJSConsumerInvalidPolicyError(badStart("last per subject", "time"))
   649  		}
   650  		if config.FilterSubject == _EMPTY_ && len(config.FilterSubjects) == 0 {
   651  			return NewJSConsumerInvalidPolicyError(notSet("last per subject", "filter subject"))
   652  		}
   653  	case DeliverNew:
   654  		if config.OptStartSeq > 0 {
   655  			return NewJSConsumerInvalidPolicyError(badStart("new", "sequence"))
   656  		}
   657  		if config.OptStartTime != nil {
   658  			return NewJSConsumerInvalidPolicyError(badStart("new", "time"))
   659  		}
   660  	case DeliverByStartSequence:
   661  		if config.OptStartSeq == 0 {
   662  			return NewJSConsumerInvalidPolicyError(notSet("by start sequence", "start sequence"))
   663  		}
   664  		if config.OptStartTime != nil {
   665  			return NewJSConsumerInvalidPolicyError(badStart("by start sequence", "time"))
   666  		}
   667  	case DeliverByStartTime:
   668  		if config.OptStartTime == nil {
   669  			return NewJSConsumerInvalidPolicyError(notSet("by start time", "start time"))
   670  		}
   671  		if config.OptStartSeq != 0 {
   672  			return NewJSConsumerInvalidPolicyError(badStart("by start time", "start sequence"))
   673  		}
   674  	}
   675  
   676  	if config.SampleFrequency != _EMPTY_ {
   677  		s := strings.TrimSuffix(config.SampleFrequency, "%")
   678  		if sampleFreq, err := strconv.Atoi(s); err != nil || sampleFreq < 0 {
   679  			return NewJSConsumerInvalidSamplingError(err)
   680  		}
   681  	}
   682  
   683  	// We reject if flow control is set without heartbeats.
   684  	if config.FlowControl && config.Heartbeat == 0 {
   685  		return NewJSConsumerWithFlowControlNeedsHeartbeatsError()
   686  	}
   687  
   688  	if config.Durable != _EMPTY_ && config.Name != _EMPTY_ {
   689  		if config.Name != config.Durable {
   690  			return NewJSConsumerCreateDurableAndNameMismatchError()
   691  		}
   692  	}
   693  
   694  	var metadataLen int
   695  	for k, v := range config.Metadata {
   696  		metadataLen += len(k) + len(v)
   697  	}
   698  	if metadataLen > JSMaxMetadataLen {
   699  		return NewJSConsumerMetadataLengthError(fmt.Sprintf("%dKB", JSMaxMetadataLen/1024))
   700  	}
   701  
   702  	return nil
   703  }
   704  
   705  func (mset *stream) addConsumerWithAction(config *ConsumerConfig, action ConsumerAction) (*consumer, error) {
   706  	return mset.addConsumerWithAssignment(config, _EMPTY_, nil, false, action)
   707  }
   708  
   709  func (mset *stream) addConsumer(config *ConsumerConfig) (*consumer, error) {
   710  	return mset.addConsumerWithAction(config, ActionCreateOrUpdate)
   711  }
   712  
   713  func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname string, ca *consumerAssignment, isRecovering bool, action ConsumerAction) (*consumer, error) {
   714  	// Check if this stream has closed.
   715  	if mset.closed.Load() {
   716  		return nil, NewJSStreamInvalidError()
   717  	}
   718  
   719  	mset.mu.RLock()
   720  	s, jsa, tierName, cfg, acc := mset.srv, mset.jsa, mset.tier, mset.cfg, mset.acc
   721  	retention := cfg.Retention
   722  	mset.mu.RUnlock()
   723  
   724  	// If we do not have the consumer currently assigned to us in cluster mode we will proceed but warn.
   725  	// This can happen on startup with restored state where on meta replay we still do not have
   726  	// the assignment. Running in single server mode this always returns true.
   727  	if oname != _EMPTY_ && !jsa.consumerAssigned(mset.name(), oname) {
   728  		s.Debugf("Consumer %q > %q does not seem to be assigned to this server", mset.name(), oname)
   729  	}
   730  
   731  	if config == nil {
   732  		return nil, NewJSConsumerConfigRequiredError()
   733  	}
   734  
   735  	jsa.usageMu.RLock()
   736  	selectedLimits, limitsFound := jsa.limits[tierName]
   737  	jsa.usageMu.RUnlock()
   738  	if !limitsFound {
   739  		return nil, NewJSNoLimitsError()
   740  	}
   741  
   742  	srvLim := &s.getOpts().JetStreamLimits
   743  	// Make sure we have sane defaults. Do so with the JS lock, otherwise a
   744  	// badly timed meta snapshot can result in a race condition.
   745  	mset.js.mu.Lock()
   746  	setConsumerConfigDefaults(config, &mset.cfg, srvLim, &selectedLimits)
   747  	mset.js.mu.Unlock()
   748  
   749  	if err := checkConsumerCfg(config, srvLim, &cfg, acc, &selectedLimits, isRecovering); err != nil {
   750  		return nil, err
   751  	}
   752  	sampleFreq := 0
   753  	if config.SampleFrequency != _EMPTY_ {
   754  		// Can't fail as checkConsumerCfg checks correct format
   755  		sampleFreq, _ = strconv.Atoi(strings.TrimSuffix(config.SampleFrequency, "%"))
   756  	}
   757  
   758  	// Grab the client, account and server reference.
   759  	c := mset.client
   760  	if c == nil {
   761  		return nil, NewJSStreamInvalidError()
   762  	}
   763  	var accName string
   764  	c.mu.Lock()
   765  	s, a := c.srv, c.acc
   766  	if a != nil {
   767  		accName = a.Name
   768  	}
   769  	c.mu.Unlock()
   770  
   771  	// Hold mset lock here.
   772  	mset.mu.Lock()
   773  	if mset.client == nil || mset.store == nil || mset.consumers == nil {
   774  		mset.mu.Unlock()
   775  		return nil, NewJSStreamInvalidError()
   776  	}
   777  
   778  	// If this one is durable and already exists, we let that be ok as long as only updating what should be allowed.
   779  	var cName string
   780  	if isDurableConsumer(config) {
   781  		cName = config.Durable
   782  	} else if config.Name != _EMPTY_ {
   783  		cName = config.Name
   784  	}
   785  	if cName != _EMPTY_ {
   786  		if eo, ok := mset.consumers[cName]; ok {
   787  			mset.mu.Unlock()
   788  			if action == ActionCreate && !reflect.DeepEqual(*config, eo.config()) {
   789  				return nil, NewJSConsumerAlreadyExistsError()
   790  			}
   791  			// Check for overlapping subjects if we are a workqueue
   792  			if mset.cfg.Retention == WorkQueuePolicy {
   793  				subjects := gatherSubjectFilters(config.FilterSubject, config.FilterSubjects)
   794  				if !mset.partitionUnique(cName, subjects) {
   795  					return nil, NewJSConsumerWQConsumerNotUniqueError()
   796  				}
   797  			}
   798  			err := eo.updateConfig(config)
   799  			if err == nil {
   800  				return eo, nil
   801  			}
   802  			return nil, NewJSConsumerCreateError(err, Unless(err))
   803  		}
   804  	}
   805  	if action == ActionUpdate {
   806  		mset.mu.Unlock()
   807  		return nil, NewJSConsumerDoesNotExistError()
   808  	}
   809  
   810  	// Check for any limits, if the config for the consumer sets a limit we check against that
   811  	// but if not we use the value from account limits, if account limits is more restrictive
   812  	// than stream config we prefer the account limits to handle cases where account limits are
   813  	// updated during the lifecycle of the stream
   814  	maxc := mset.cfg.MaxConsumers
   815  	if maxc <= 0 || (selectedLimits.MaxConsumers > 0 && selectedLimits.MaxConsumers < maxc) {
   816  		maxc = selectedLimits.MaxConsumers
   817  	}
   818  	if maxc > 0 && mset.numPublicConsumers() >= maxc {
   819  		mset.mu.Unlock()
   820  		return nil, NewJSMaximumConsumersLimitError()
   821  	}
   822  
   823  	// Check on stream type conflicts with WorkQueues.
   824  	if mset.cfg.Retention == WorkQueuePolicy && !config.Direct {
   825  		// Force explicit acks here.
   826  		if config.AckPolicy != AckExplicit {
   827  			mset.mu.Unlock()
   828  			return nil, NewJSConsumerWQRequiresExplicitAckError()
   829  		}
   830  
   831  		if len(mset.consumers) > 0 {
   832  			subjects := gatherSubjectFilters(config.FilterSubject, config.FilterSubjects)
   833  			if len(subjects) == 0 {
   834  				mset.mu.Unlock()
   835  				return nil, NewJSConsumerWQMultipleUnfilteredError()
   836  			} else if !mset.partitionUnique(cName, subjects) {
   837  				// Prior to v2.9.7, on a stream with WorkQueue policy, the servers
   838  				// were not catching the error of having multiple consumers with
   839  				// overlapping filter subjects depending on the scope, for instance
   840  				// creating "foo.*.bar" and then "foo.>" was not detected, while
   841  				// "foo.>" and then "foo.*.bar" would have been. Failing here
   842  				// in recovery mode would leave the rejected consumer in a bad state,
   843  				// so we will simply warn here, asking the user to remove this
   844  				// consumer administratively. Otherwise, if this is the creation
   845  				// of a new consumer, we will return the error.
   846  				if isRecovering {
   847  					s.Warnf("Consumer %q > %q has a filter subject that overlaps "+
   848  						"with other consumers, which is not allowed for a stream "+
   849  						"with WorkQueue policy, it should be administratively deleted",
   850  						cfg.Name, cName)
   851  				} else {
   852  					// We have a partition but it is not unique amongst the others.
   853  					mset.mu.Unlock()
   854  					return nil, NewJSConsumerWQConsumerNotUniqueError()
   855  				}
   856  			}
   857  		}
   858  		if config.DeliverPolicy != DeliverAll {
   859  			mset.mu.Unlock()
   860  			return nil, NewJSConsumerWQConsumerNotDeliverAllError()
   861  		}
   862  	}
   863  
   864  	// Set name, which will be durable name if set, otherwise we create one at random.
   865  	o := &consumer{
   866  		mset:      mset,
   867  		js:        s.getJetStream(),
   868  		acc:       a,
   869  		srv:       s,
   870  		client:    s.createInternalJetStreamClient(),
   871  		sysc:      s.createInternalJetStreamClient(),
   872  		cfg:       *config,
   873  		dsubj:     config.DeliverSubject,
   874  		outq:      mset.outq,
   875  		active:    true,
   876  		qch:       make(chan struct{}),
   877  		uch:       make(chan struct{}, 1),
   878  		mch:       make(chan struct{}, 1),
   879  		sfreq:     int32(sampleFreq),
   880  		maxdc:     uint64(config.MaxDeliver),
   881  		maxp:      config.MaxAckPending,
   882  		retention: retention,
   883  		created:   time.Now().UTC(),
   884  	}
   885  
   886  	// Bind internal client to the user account.
   887  	o.client.registerWithAccount(a)
   888  	// Bind to the system account.
   889  	o.sysc.registerWithAccount(s.SystemAccount())
   890  
   891  	if isDurableConsumer(config) {
   892  		if len(config.Durable) > JSMaxNameLen {
   893  			mset.mu.Unlock()
   894  			o.deleteWithoutAdvisory()
   895  			return nil, NewJSConsumerNameTooLongError(JSMaxNameLen)
   896  		}
   897  		o.name = config.Durable
   898  	} else if oname != _EMPTY_ {
   899  		o.name = oname
   900  	} else {
   901  		if config.Name != _EMPTY_ {
   902  			o.name = config.Name
   903  		} else {
   904  			// Legacy ephemeral auto-generated.
   905  			for {
   906  				o.name = createConsumerName()
   907  				if _, ok := mset.consumers[o.name]; !ok {
   908  					break
   909  				}
   910  			}
   911  			config.Name = o.name
   912  		}
   913  	}
   914  	// Create ackMsgs queue now that we have a consumer name
   915  	o.ackMsgs = newIPQueue[*jsAckMsg](s, fmt.Sprintf("[ACC:%s] consumer '%s' on stream '%s' ackMsgs", accName, o.name, mset.cfg.Name))
   916  
   917  	// Create our request waiting queue.
   918  	if o.isPullMode() {
   919  		o.waiting = newWaitQueue(config.MaxWaiting)
   920  		// Create our internal queue for next msg requests.
   921  		o.nextMsgReqs = newIPQueue[*nextMsgReq](s, fmt.Sprintf("[ACC:%s] consumer '%s' on stream '%s' pull requests", accName, o.name, mset.cfg.Name))
   922  	}
   923  
   924  	// already under lock, mset.Name() would deadlock
   925  	o.stream = mset.cfg.Name
   926  	o.ackEventT = JSMetricConsumerAckPre + "." + o.stream + "." + o.name
   927  	o.nakEventT = JSAdvisoryConsumerMsgNakPre + "." + o.stream + "." + o.name
   928  	o.deliveryExcEventT = JSAdvisoryConsumerMaxDeliveryExceedPre + "." + o.stream + "." + o.name
   929  
   930  	if !isValidName(o.name) {
   931  		mset.mu.Unlock()
   932  		o.deleteWithoutAdvisory()
   933  		return nil, NewJSConsumerBadDurableNameError()
   934  	}
   935  
   936  	// Setup our storage if not a direct consumer.
   937  	if !config.Direct {
   938  		store, err := mset.store.ConsumerStore(o.name, config)
   939  		if err != nil {
   940  			mset.mu.Unlock()
   941  			o.deleteWithoutAdvisory()
   942  			return nil, NewJSConsumerStoreFailedError(err)
   943  		}
   944  		o.store = store
   945  	}
   946  
   947  	for _, filter := range gatherSubjectFilters(o.cfg.FilterSubject, o.cfg.FilterSubjects) {
   948  		sub := &subjectFilter{
   949  			subject:          filter,
   950  			hasWildcard:      subjectHasWildcard(filter),
   951  			tokenizedSubject: tokenizeSubjectIntoSlice(nil, filter),
   952  		}
   953  		o.subjf = append(o.subjf, sub)
   954  	}
   955  
   956  	// If we have multiple filter subjects, create a sublist which we will use
   957  	// in calling store.LoadNextMsgMulti.
   958  	if len(o.cfg.FilterSubjects) > 0 {
   959  		o.filters = NewSublistWithCache()
   960  		for _, filter := range o.cfg.FilterSubjects {
   961  			o.filters.Insert(&subscription{subject: []byte(filter)})
   962  		}
   963  	} else {
   964  		// Make sure this is nil otherwise.
   965  		o.filters = nil
   966  	}
   967  
   968  	if o.store != nil && o.store.HasState() {
   969  		// Restore our saved state.
   970  		o.mu.Lock()
   971  		o.readStoredState(0)
   972  		o.mu.Unlock()
   973  	} else {
   974  		// Select starting sequence number
   975  		o.selectStartingSeqNo()
   976  	}
   977  
   978  	// Now register with mset and create the ack subscription.
   979  	// Check if we already have this one registered.
   980  	if eo, ok := mset.consumers[o.name]; ok {
   981  		mset.mu.Unlock()
   982  		if !o.isDurable() || !o.isPushMode() {
   983  			o.name = _EMPTY_ // Prevent removal since same name.
   984  			o.deleteWithoutAdvisory()
   985  			return nil, NewJSConsumerNameExistError()
   986  		}
   987  		// If we are here we have already registered this durable. If it is still active that is an error.
   988  		if eo.isActive() {
   989  			o.name = _EMPTY_ // Prevent removal since same name.
   990  			o.deleteWithoutAdvisory()
   991  			return nil, NewJSConsumerExistingActiveError()
   992  		}
   993  		// Since we are here this means we have a potentially new durable so we should update here.
   994  		// Check that configs are the same.
   995  		if !configsEqualSansDelivery(o.cfg, eo.cfg) {
   996  			o.name = _EMPTY_ // Prevent removal since same name.
   997  			o.deleteWithoutAdvisory()
   998  			return nil, NewJSConsumerReplacementWithDifferentNameError()
   999  		}
  1000  		// Once we are here we have a replacement push-based durable.
  1001  		eo.updateDeliverSubject(o.cfg.DeliverSubject)
  1002  		return eo, nil
  1003  	}
  1004  
  1005  	// Set up the ack subscription for this consumer. Will use wildcard for all acks.
  1006  	// We will remember the template to generate replies with sequence numbers and use
  1007  	// that to scanf them back in.
  1008  	// Escape '%' in consumer and stream names, as `pre` is used as a template later
  1009  	// in consumer.ackReply(), resulting in erroneous formatting of the ack subject.
  1010  	mn := strings.ReplaceAll(mset.cfg.Name, "%", "%%")
  1011  	pre := fmt.Sprintf(jsAckT, mn, strings.ReplaceAll(o.name, "%", "%%"))
  1012  	o.ackReplyT = fmt.Sprintf("%s.%%d.%%d.%%d.%%d.%%d", pre)
  1013  	o.ackSubj = fmt.Sprintf("%s.*.*.*.*.*", pre)
  1014  	o.nextMsgSubj = fmt.Sprintf(JSApiRequestNextT, mn, o.name)
  1015  
  1016  	// Check/update the inactive threshold
  1017  	o.updateInactiveThreshold(&o.cfg)
  1018  
  1019  	if o.isPushMode() {
  1020  		// Check if we are running only 1 replica and that the delivery subject has interest.
  1021  		// Check in place here for interest. Will setup properly in setLeader.
  1022  		if config.replicas(&mset.cfg) == 1 {
  1023  			r := o.acc.sl.Match(o.cfg.DeliverSubject)
  1024  			if !o.hasDeliveryInterest(len(r.psubs)+len(r.qsubs) > 0) {
  1025  				// Let the interest come to us eventually, but setup delete timer.
  1026  				o.updateDeliveryInterest(false)
  1027  			}
  1028  		}
  1029  	}
  1030  
  1031  	// Set our ca.
  1032  	if ca != nil {
  1033  		o.setConsumerAssignment(ca)
  1034  	}
  1035  
  1036  	// Check if we have a rate limit set.
  1037  	if config.RateLimit != 0 {
  1038  		o.setRateLimit(config.RateLimit)
  1039  	}
  1040  
  1041  	mset.setConsumer(o)
  1042  	mset.mu.Unlock()
  1043  
  1044  	if config.Direct || (!s.JetStreamIsClustered() && s.standAloneMode()) {
  1045  		o.setLeader(true)
  1046  	}
  1047  
  1048  	// This is always true in single server mode.
  1049  	if o.IsLeader() {
  1050  		// Send advisory.
  1051  		var suppress bool
  1052  		if !s.standAloneMode() && ca == nil {
  1053  			suppress = true
  1054  		} else if ca != nil {
  1055  			suppress = ca.responded
  1056  		}
  1057  		if !suppress {
  1058  			o.sendCreateAdvisory()
  1059  		}
  1060  	}
  1061  
  1062  	return o, nil
  1063  }
  1064  
  1065  // Updates the consumer `dthresh` delete timer duration and set
  1066  // cfg.InactiveThreshold to JsDeleteWaitTimeDefault for ephemerals
  1067  // if not explicitly already specified by the user.
  1068  // Lock should be held.
  1069  func (o *consumer) updateInactiveThreshold(cfg *ConsumerConfig) {
  1070  	// Ephemerals will always have inactive thresholds.
  1071  	if !o.isDurable() && cfg.InactiveThreshold <= 0 {
  1072  		// Add in 1 sec of jitter above and beyond the default of 5s.
  1073  		o.dthresh = JsDeleteWaitTimeDefault + 100*time.Millisecond + time.Duration(rand.Int63n(900))*time.Millisecond
  1074  		// Only stamp config with default sans jitter.
  1075  		cfg.InactiveThreshold = JsDeleteWaitTimeDefault
  1076  	} else if cfg.InactiveThreshold > 0 {
  1077  		// Add in up to 1 sec of jitter if pull mode.
  1078  		if o.isPullMode() {
  1079  			o.dthresh = cfg.InactiveThreshold + 100*time.Millisecond + time.Duration(rand.Int63n(900))*time.Millisecond
  1080  		} else {
  1081  			o.dthresh = cfg.InactiveThreshold
  1082  		}
  1083  	} else if cfg.InactiveThreshold <= 0 {
  1084  		// We accept InactiveThreshold be set to 0 (for durables)
  1085  		o.dthresh = 0
  1086  	}
  1087  }
  1088  
  1089  // Updates the paused state. If we are the leader and the pause deadline
  1090  // hasn't passed yet then we will start a timer to kick the consumer once
  1091  // that deadline is reached. Lock should be held.
  1092  func (o *consumer) updatePauseState(cfg *ConsumerConfig) {
  1093  	if o.uptmr != nil {
  1094  		stopAndClearTimer(&o.uptmr)
  1095  	}
  1096  	if !o.isLeader() {
  1097  		// Only the leader will run the timer as only the leader will run
  1098  		// loopAndGatherMsgs.
  1099  		return
  1100  	}
  1101  	if cfg.PauseUntil == nil || cfg.PauseUntil.IsZero() || cfg.PauseUntil.Before(time.Now()) {
  1102  		// Either the PauseUntil is unset (is effectively zero) or the
  1103  		// deadline has already passed, in which case there is nothing
  1104  		// to do.
  1105  		return
  1106  	}
  1107  	o.uptmr = time.AfterFunc(time.Until(*cfg.PauseUntil), func() {
  1108  		o.mu.Lock()
  1109  		defer o.mu.Unlock()
  1110  
  1111  		stopAndClearTimer(&o.uptmr)
  1112  		o.sendPauseAdvisoryLocked(&o.cfg)
  1113  		o.signalNewMessages()
  1114  	})
  1115  }
  1116  
  1117  func (o *consumer) consumerAssignment() *consumerAssignment {
  1118  	o.mu.RLock()
  1119  	defer o.mu.RUnlock()
  1120  	return o.ca
  1121  }
  1122  
  1123  func (o *consumer) setConsumerAssignment(ca *consumerAssignment) {
  1124  	o.mu.Lock()
  1125  	defer o.mu.Unlock()
  1126  
  1127  	o.ca = ca
  1128  	if ca == nil {
  1129  		return
  1130  	}
  1131  	// Set our node.
  1132  	o.node = ca.Group.node
  1133  
  1134  	// Trigger update chan.
  1135  	select {
  1136  	case o.uch <- struct{}{}:
  1137  	default:
  1138  	}
  1139  }
  1140  
  1141  func (o *consumer) updateC() <-chan struct{} {
  1142  	o.mu.RLock()
  1143  	defer o.mu.RUnlock()
  1144  	return o.uch
  1145  }
  1146  
  1147  // checkQueueInterest will check on our interest's queue group status.
  1148  // Lock should be held.
  1149  func (o *consumer) checkQueueInterest() {
  1150  	if !o.active || o.cfg.DeliverSubject == _EMPTY_ {
  1151  		return
  1152  	}
  1153  	subj := o.dsubj
  1154  	if subj == _EMPTY_ {
  1155  		subj = o.cfg.DeliverSubject
  1156  	}
  1157  
  1158  	if rr := o.acc.sl.Match(subj); len(rr.qsubs) > 0 {
  1159  		// Just grab first
  1160  		if qsubs := rr.qsubs[0]; len(qsubs) > 0 {
  1161  			if sub := rr.qsubs[0][0]; len(sub.queue) > 0 {
  1162  				o.qgroup = string(sub.queue)
  1163  			}
  1164  		}
  1165  	}
  1166  }
  1167  
  1168  // clears our node if we have one. When we scale down to 1.
  1169  func (o *consumer) clearNode() {
  1170  	o.mu.Lock()
  1171  	defer o.mu.Unlock()
  1172  	if o.node != nil {
  1173  		o.node.Delete()
  1174  		o.node = nil
  1175  	}
  1176  }
  1177  
  1178  // IsLeader will return if we are the current leader.
  1179  func (o *consumer) IsLeader() bool {
  1180  	o.mu.RLock()
  1181  	defer o.mu.RUnlock()
  1182  	return o.isLeader()
  1183  }
  1184  
  1185  // Lock should be held.
  1186  func (o *consumer) isLeader() bool {
  1187  	if o.node != nil {
  1188  		return o.node.Leader()
  1189  	}
  1190  	return true
  1191  }
  1192  
  1193  func (o *consumer) setLeader(isLeader bool) {
  1194  	o.mu.RLock()
  1195  	mset, closed := o.mset, o.closed
  1196  	movingToClustered := o.node != nil && o.pch == nil
  1197  	wasLeader := o.leader.Swap(isLeader)
  1198  	o.mu.RUnlock()
  1199  
  1200  	// If we are here we have a change in leader status.
  1201  	if isLeader {
  1202  		if closed || mset == nil {
  1203  			return
  1204  		}
  1205  
  1206  		if wasLeader {
  1207  			// If we detect we are scaling up, make sure to create clustered routines and channels.
  1208  			if movingToClustered {
  1209  				o.mu.Lock()
  1210  				// We are moving from R1 to clustered.
  1211  				o.pch = make(chan struct{}, 1)
  1212  				go o.loopAndForwardProposals(o.qch)
  1213  				if o.phead != nil {
  1214  					select {
  1215  					case o.pch <- struct{}{}:
  1216  					default:
  1217  					}
  1218  				}
  1219  				o.mu.Unlock()
  1220  			}
  1221  			return
  1222  		}
  1223  
  1224  		mset.mu.RLock()
  1225  		s, jsa, stream, lseq := mset.srv, mset.jsa, mset.cfg.Name, mset.lseq
  1226  		mset.mu.RUnlock()
  1227  
  1228  		// Register as a leader with our parent stream.
  1229  		mset.setConsumerAsLeader(o)
  1230  
  1231  		o.mu.Lock()
  1232  		o.rdq = nil
  1233  		o.rdqi.Empty()
  1234  
  1235  		// Restore our saved state. During non-leader status we just update our underlying store.
  1236  		o.readStoredState(lseq)
  1237  
  1238  		// Setup initial num pending.
  1239  		o.streamNumPending()
  1240  
  1241  		// Cleanup lss when we take over in clustered mode.
  1242  		if o.hasSkipListPending() && o.sseq >= o.lss.resume {
  1243  			o.lss = nil
  1244  		}
  1245  
  1246  		// Update the group on the our starting sequence if we are starting but we skipped some in the stream.
  1247  		if o.dseq == 1 && o.sseq > 1 {
  1248  			o.updateSkipped(o.sseq)
  1249  		}
  1250  
  1251  		// Do info sub.
  1252  		if o.infoSub == nil && jsa != nil {
  1253  			isubj := fmt.Sprintf(clusterConsumerInfoT, jsa.acc(), stream, o.name)
  1254  			// Note below the way we subscribe here is so that we can send requests to ourselves.
  1255  			o.infoSub, _ = s.systemSubscribe(isubj, _EMPTY_, false, o.sysc, o.handleClusterConsumerInfoRequest)
  1256  		}
  1257  
  1258  		var err error
  1259  		if o.cfg.AckPolicy != AckNone {
  1260  			if o.ackSub, err = o.subscribeInternal(o.ackSubj, o.pushAck); err != nil {
  1261  				o.mu.Unlock()
  1262  				o.deleteWithoutAdvisory()
  1263  				return
  1264  			}
  1265  		}
  1266  
  1267  		// Setup the internal sub for next message requests regardless.
  1268  		// Will error if wrong mode to provide feedback to users.
  1269  		if o.reqSub, err = o.subscribeInternal(o.nextMsgSubj, o.processNextMsgReq); err != nil {
  1270  			o.mu.Unlock()
  1271  			o.deleteWithoutAdvisory()
  1272  			return
  1273  		}
  1274  
  1275  		// Check on flow control settings.
  1276  		if o.cfg.FlowControl {
  1277  			o.setMaxPendingBytes(JsFlowControlMaxPending)
  1278  			fcsubj := fmt.Sprintf(jsFlowControl, stream, o.name)
  1279  			if o.fcSub, err = o.subscribeInternal(fcsubj, o.processFlowControl); err != nil {
  1280  				o.mu.Unlock()
  1281  				o.deleteWithoutAdvisory()
  1282  				return
  1283  			}
  1284  		}
  1285  
  1286  		// If push mode, register for notifications on interest.
  1287  		if o.isPushMode() {
  1288  			o.inch = make(chan bool, 8)
  1289  			o.acc.sl.registerNotification(o.cfg.DeliverSubject, o.cfg.DeliverGroup, o.inch)
  1290  			if o.active = <-o.inch; o.active {
  1291  				o.checkQueueInterest()
  1292  			}
  1293  
  1294  			// Check gateways in case they are enabled.
  1295  			if s.gateway.enabled {
  1296  				if !o.active {
  1297  					o.active = s.hasGatewayInterest(o.acc.Name, o.cfg.DeliverSubject)
  1298  				}
  1299  				stopAndClearTimer(&o.gwdtmr)
  1300  				o.gwdtmr = time.AfterFunc(time.Second, func() { o.watchGWinterest() })
  1301  			}
  1302  		}
  1303  
  1304  		if o.dthresh > 0 && (o.isPullMode() || !o.active) {
  1305  			// Pull consumer. We run the dtmr all the time for this one.
  1306  			stopAndClearTimer(&o.dtmr)
  1307  			o.dtmr = time.AfterFunc(o.dthresh, o.deleteNotActive)
  1308  		}
  1309  
  1310  		// Update the consumer pause tracking.
  1311  		o.updatePauseState(&o.cfg)
  1312  
  1313  		// If we are not in ReplayInstant mode mark us as in replay state until resolved.
  1314  		if o.cfg.ReplayPolicy != ReplayInstant {
  1315  			o.replay = true
  1316  		}
  1317  
  1318  		// Recreate quit channel.
  1319  		o.qch = make(chan struct{})
  1320  		qch := o.qch
  1321  		node := o.node
  1322  		if node != nil && o.pch == nil {
  1323  			o.pch = make(chan struct{}, 1)
  1324  		}
  1325  		pullMode := o.isPullMode()
  1326  		o.mu.Unlock()
  1327  
  1328  		// Snapshot initial info.
  1329  		o.infoWithSnap(true)
  1330  
  1331  		// These are the labels we will use to annotate our goroutines.
  1332  		labels := pprofLabels{
  1333  			"type":     "consumer",
  1334  			"account":  mset.accName(),
  1335  			"stream":   mset.name(),
  1336  			"consumer": o.name,
  1337  		}
  1338  
  1339  		// Now start up Go routine to deliver msgs.
  1340  		go func() {
  1341  			setGoRoutineLabels(labels)
  1342  			o.loopAndGatherMsgs(qch)
  1343  		}()
  1344  
  1345  		// Now start up Go routine to process acks.
  1346  		go func() {
  1347  			setGoRoutineLabels(labels)
  1348  			o.processInboundAcks(qch)
  1349  		}()
  1350  
  1351  		if pullMode {
  1352  			// Now start up Go routine to process inbound next message requests.
  1353  			go func() {
  1354  				setGoRoutineLabels(labels)
  1355  				o.processInboundNextMsgReqs(qch)
  1356  			}()
  1357  		}
  1358  
  1359  		// If we are R>1 spin up our proposal loop.
  1360  		if node != nil {
  1361  			// Determine if we can send pending requests info to the group.
  1362  			// They must be on server versions >= 2.7.1
  1363  			o.checkAndSetPendingRequestsOk()
  1364  			o.checkPendingRequests()
  1365  			go func() {
  1366  				setGoRoutineLabels(labels)
  1367  				o.loopAndForwardProposals(qch)
  1368  			}()
  1369  		}
  1370  
  1371  	} else {
  1372  		// Shutdown the go routines and the subscriptions.
  1373  		o.mu.Lock()
  1374  		if o.qch != nil {
  1375  			close(o.qch)
  1376  			o.qch = nil
  1377  		}
  1378  		// Stop any inactivity timers. Should only be running on leaders.
  1379  		stopAndClearTimer(&o.dtmr)
  1380  		// Stop any unpause timers. Should only be running on leaders.
  1381  		stopAndClearTimer(&o.uptmr)
  1382  		// Make sure to clear out any re-deliver queues
  1383  		stopAndClearTimer(&o.ptmr)
  1384  		o.rdq = nil
  1385  		o.rdqi.Empty()
  1386  		o.pending = nil
  1387  		// ok if they are nil, we protect inside unsubscribe()
  1388  		o.unsubscribe(o.ackSub)
  1389  		o.unsubscribe(o.reqSub)
  1390  		o.unsubscribe(o.fcSub)
  1391  		o.ackSub, o.reqSub, o.fcSub = nil, nil, nil
  1392  		if o.infoSub != nil {
  1393  			o.srv.sysUnsubscribe(o.infoSub)
  1394  			o.infoSub = nil
  1395  		}
  1396  		// Reset waiting if we are in pull mode.
  1397  		if o.isPullMode() {
  1398  			o.waiting = newWaitQueue(o.cfg.MaxWaiting)
  1399  			o.nextMsgReqs.drain()
  1400  		} else if o.srv.gateway.enabled {
  1401  			stopAndClearTimer(&o.gwdtmr)
  1402  		}
  1403  		// If we were the leader make sure to drain queued up acks.
  1404  		if wasLeader {
  1405  			o.ackMsgs.drain()
  1406  		}
  1407  		o.mu.Unlock()
  1408  
  1409  		// Unregister as a leader with our parent stream.
  1410  		if mset != nil {
  1411  			mset.removeConsumerAsLeader(o)
  1412  		}
  1413  	}
  1414  }
  1415  
  1416  // This is coming on the wire so do not block here.
  1417  func (o *consumer) handleClusterConsumerInfoRequest(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) {
  1418  	go o.infoWithSnapAndReply(false, reply)
  1419  }
  1420  
  1421  // Lock should be held.
  1422  func (o *consumer) subscribeInternal(subject string, cb msgHandler) (*subscription, error) {
  1423  	c := o.client
  1424  	if c == nil {
  1425  		return nil, fmt.Errorf("invalid consumer")
  1426  	}
  1427  	if !c.srv.EventsEnabled() {
  1428  		return nil, ErrNoSysAccount
  1429  	}
  1430  	if cb == nil {
  1431  		return nil, fmt.Errorf("undefined message handler")
  1432  	}
  1433  
  1434  	o.sid++
  1435  
  1436  	// Now create the subscription
  1437  	return c.processSub([]byte(subject), nil, []byte(strconv.Itoa(o.sid)), cb, false)
  1438  }
  1439  
  1440  // Unsubscribe from our subscription.
  1441  // Lock should be held.
  1442  func (o *consumer) unsubscribe(sub *subscription) {
  1443  	if sub == nil || o.client == nil {
  1444  		return
  1445  	}
  1446  	o.client.processUnsub(sub.sid)
  1447  }
  1448  
  1449  // We need to make sure we protect access to the outq.
  1450  // Do all advisory sends here.
  1451  func (o *consumer) sendAdvisory(subj string, msg []byte) {
  1452  	o.outq.sendMsg(subj, msg)
  1453  }
  1454  
  1455  func (o *consumer) sendDeleteAdvisoryLocked() {
  1456  	e := JSConsumerActionAdvisory{
  1457  		TypedEvent: TypedEvent{
  1458  			Type: JSConsumerActionAdvisoryType,
  1459  			ID:   nuid.Next(),
  1460  			Time: time.Now().UTC(),
  1461  		},
  1462  		Stream:   o.stream,
  1463  		Consumer: o.name,
  1464  		Action:   DeleteEvent,
  1465  		Domain:   o.srv.getOpts().JetStreamDomain,
  1466  	}
  1467  
  1468  	j, err := json.Marshal(e)
  1469  	if err != nil {
  1470  		return
  1471  	}
  1472  
  1473  	subj := JSAdvisoryConsumerDeletedPre + "." + o.stream + "." + o.name
  1474  	o.sendAdvisory(subj, j)
  1475  }
  1476  
  1477  func (o *consumer) sendCreateAdvisory() {
  1478  	o.mu.Lock()
  1479  	defer o.mu.Unlock()
  1480  
  1481  	e := JSConsumerActionAdvisory{
  1482  		TypedEvent: TypedEvent{
  1483  			Type: JSConsumerActionAdvisoryType,
  1484  			ID:   nuid.Next(),
  1485  			Time: time.Now().UTC(),
  1486  		},
  1487  		Stream:   o.stream,
  1488  		Consumer: o.name,
  1489  		Action:   CreateEvent,
  1490  		Domain:   o.srv.getOpts().JetStreamDomain,
  1491  	}
  1492  
  1493  	j, err := json.Marshal(e)
  1494  	if err != nil {
  1495  		return
  1496  	}
  1497  
  1498  	subj := JSAdvisoryConsumerCreatedPre + "." + o.stream + "." + o.name
  1499  	o.sendAdvisory(subj, j)
  1500  }
  1501  
  1502  func (o *consumer) sendPauseAdvisoryLocked(cfg *ConsumerConfig) {
  1503  	e := JSConsumerPauseAdvisory{
  1504  		TypedEvent: TypedEvent{
  1505  			Type: JSConsumerPauseAdvisoryType,
  1506  			ID:   nuid.Next(),
  1507  			Time: time.Now().UTC(),
  1508  		},
  1509  		Stream:   o.stream,
  1510  		Consumer: o.name,
  1511  		Domain:   o.srv.getOpts().JetStreamDomain,
  1512  	}
  1513  
  1514  	if cfg.PauseUntil != nil {
  1515  		e.PauseUntil = *cfg.PauseUntil
  1516  		e.Paused = time.Now().Before(e.PauseUntil)
  1517  	}
  1518  
  1519  	j, err := json.Marshal(e)
  1520  	if err != nil {
  1521  		return
  1522  	}
  1523  
  1524  	subj := JSAdvisoryConsumerPausePre + "." + o.stream + "." + o.name
  1525  	o.sendAdvisory(subj, j)
  1526  }
  1527  
  1528  // Created returns created time.
  1529  func (o *consumer) createdTime() time.Time {
  1530  	o.mu.Lock()
  1531  	created := o.created
  1532  	o.mu.Unlock()
  1533  	return created
  1534  }
  1535  
  1536  // Internal to allow creation time to be restored.
  1537  func (o *consumer) setCreatedTime(created time.Time) {
  1538  	o.mu.Lock()
  1539  	o.created = created
  1540  	o.mu.Unlock()
  1541  }
  1542  
  1543  // This will check for extended interest in a subject. If we have local interest we just return
  1544  // that, but in the absence of local interest and presence of gateways or service imports we need
  1545  // to check those as well.
  1546  func (o *consumer) hasDeliveryInterest(localInterest bool) bool {
  1547  	o.mu.Lock()
  1548  	mset := o.mset
  1549  	if mset == nil {
  1550  		o.mu.Unlock()
  1551  		return false
  1552  	}
  1553  	acc := o.acc
  1554  	deliver := o.cfg.DeliverSubject
  1555  	o.mu.Unlock()
  1556  
  1557  	if localInterest {
  1558  		return true
  1559  	}
  1560  
  1561  	// If we are here check gateways.
  1562  	if s := acc.srv; s != nil && s.hasGatewayInterest(acc.Name, deliver) {
  1563  		return true
  1564  	}
  1565  	return false
  1566  }
  1567  
  1568  func (s *Server) hasGatewayInterest(account, subject string) bool {
  1569  	gw := s.gateway
  1570  	if !gw.enabled {
  1571  		return false
  1572  	}
  1573  	gw.RLock()
  1574  	defer gw.RUnlock()
  1575  	for _, gwc := range gw.outo {
  1576  		psi, qr := gwc.gatewayInterest(account, subject)
  1577  		if psi || qr != nil {
  1578  			return true
  1579  		}
  1580  	}
  1581  	return false
  1582  }
  1583  
  1584  // This processes an update to the local interest for a deliver subject.
  1585  func (o *consumer) updateDeliveryInterest(localInterest bool) bool {
  1586  	interest := o.hasDeliveryInterest(localInterest)
  1587  
  1588  	o.mu.Lock()
  1589  	defer o.mu.Unlock()
  1590  
  1591  	mset := o.mset
  1592  	if mset == nil || o.isPullMode() {
  1593  		return false
  1594  	}
  1595  
  1596  	if interest && !o.active {
  1597  		o.signalNewMessages()
  1598  	}
  1599  	// Update active status, if not active clear any queue group we captured.
  1600  	if o.active = interest; !o.active {
  1601  		o.qgroup = _EMPTY_
  1602  	} else {
  1603  		o.checkQueueInterest()
  1604  	}
  1605  
  1606  	// If the delete timer has already been set do not clear here and return.
  1607  	// Note that durable can now have an inactive threshold, so don't check
  1608  	// for durable status, instead check for dthresh > 0.
  1609  	if o.dtmr != nil && o.dthresh > 0 && !interest {
  1610  		return true
  1611  	}
  1612  
  1613  	// Stop and clear the delete timer always.
  1614  	stopAndClearTimer(&o.dtmr)
  1615  
  1616  	// If we do not have interest anymore and have a delete threshold set, then set
  1617  	// a timer to delete us. We wait for a bit in case of server reconnect.
  1618  	if !interest && o.dthresh > 0 {
  1619  		o.dtmr = time.AfterFunc(o.dthresh, o.deleteNotActive)
  1620  		return true
  1621  	}
  1622  	return false
  1623  }
  1624  
  1625  func (o *consumer) deleteNotActive() {
  1626  	o.mu.Lock()
  1627  	if o.mset == nil {
  1628  		o.mu.Unlock()
  1629  		return
  1630  	}
  1631  	// Push mode just look at active.
  1632  	if o.isPushMode() {
  1633  		// If we are active simply return.
  1634  		if o.active {
  1635  			o.mu.Unlock()
  1636  			return
  1637  		}
  1638  	} else {
  1639  		// Pull mode.
  1640  		elapsed := time.Since(o.waiting.last)
  1641  		if elapsed <= o.cfg.InactiveThreshold {
  1642  			// These need to keep firing so reset but use delta.
  1643  			if o.dtmr != nil {
  1644  				o.dtmr.Reset(o.dthresh - elapsed)
  1645  			} else {
  1646  				o.dtmr = time.AfterFunc(o.dthresh-elapsed, o.deleteNotActive)
  1647  			}
  1648  			o.mu.Unlock()
  1649  			return
  1650  		}
  1651  		// Check if we still have valid requests waiting.
  1652  		if o.checkWaitingForInterest() {
  1653  			if o.dtmr != nil {
  1654  				o.dtmr.Reset(o.dthresh)
  1655  			} else {
  1656  				o.dtmr = time.AfterFunc(o.dthresh, o.deleteNotActive)
  1657  			}
  1658  			o.mu.Unlock()
  1659  			return
  1660  		}
  1661  	}
  1662  
  1663  	s, js := o.mset.srv, o.srv.js.Load()
  1664  	acc, stream, name, isDirect := o.acc.Name, o.stream, o.name, o.cfg.Direct
  1665  	o.mu.Unlock()
  1666  
  1667  	// If we are clustered, check if we still have this consumer assigned.
  1668  	// If we do forward a proposal to delete ourselves to the metacontroller leader.
  1669  	if !isDirect && s.JetStreamIsClustered() {
  1670  		js.mu.RLock()
  1671  		var (
  1672  			cca         consumerAssignment
  1673  			meta        RaftNode
  1674  			removeEntry []byte
  1675  		)
  1676  		ca, cc := js.consumerAssignment(acc, stream, name), js.cluster
  1677  		if ca != nil && cc != nil {
  1678  			meta = cc.meta
  1679  			cca = *ca
  1680  			cca.Reply = _EMPTY_
  1681  			removeEntry = encodeDeleteConsumerAssignment(&cca)
  1682  			meta.ForwardProposal(removeEntry)
  1683  		}
  1684  		js.mu.RUnlock()
  1685  
  1686  		if ca != nil && cc != nil {
  1687  			// Check to make sure we went away.
  1688  			// Don't think this needs to be a monitored go routine.
  1689  			go func() {
  1690  				const (
  1691  					startInterval = 30 * time.Second
  1692  					maxInterval   = 5 * time.Minute
  1693  				)
  1694  				jitter := time.Duration(rand.Int63n(int64(startInterval)))
  1695  				interval := startInterval + jitter
  1696  				ticker := time.NewTicker(interval)
  1697  				defer ticker.Stop()
  1698  				for range ticker.C {
  1699  					js.mu.RLock()
  1700  					if js.shuttingDown {
  1701  						js.mu.RUnlock()
  1702  						return
  1703  					}
  1704  					nca := js.consumerAssignment(acc, stream, name)
  1705  					js.mu.RUnlock()
  1706  					// Make sure this is not a new consumer with the same name.
  1707  					if nca != nil && nca == ca {
  1708  						s.Warnf("Consumer assignment for '%s > %s > %s' not cleaned up, retrying", acc, stream, name)
  1709  						meta.ForwardProposal(removeEntry)
  1710  						if interval < maxInterval {
  1711  							interval *= 2
  1712  							ticker.Reset(interval)
  1713  						}
  1714  						continue
  1715  					}
  1716  					// We saw that consumer has been removed, all done.
  1717  					return
  1718  				}
  1719  			}()
  1720  		}
  1721  	}
  1722  
  1723  	// We will delete here regardless.
  1724  	o.delete()
  1725  }
  1726  
  1727  func (o *consumer) watchGWinterest() {
  1728  	pa := o.isActive()
  1729  	// If there is no local interest...
  1730  	if o.hasNoLocalInterest() {
  1731  		o.updateDeliveryInterest(false)
  1732  		if !pa && o.isActive() {
  1733  			o.signalNewMessages()
  1734  		}
  1735  	}
  1736  
  1737  	// We want this to always be running so we can also pick up on interest returning.
  1738  	o.mu.Lock()
  1739  	if o.gwdtmr != nil {
  1740  		o.gwdtmr.Reset(time.Second)
  1741  	} else {
  1742  		stopAndClearTimer(&o.gwdtmr)
  1743  		o.gwdtmr = time.AfterFunc(time.Second, func() { o.watchGWinterest() })
  1744  	}
  1745  	o.mu.Unlock()
  1746  }
  1747  
  1748  // Config returns the consumer's configuration.
  1749  func (o *consumer) config() ConsumerConfig {
  1750  	o.mu.Lock()
  1751  	defer o.mu.Unlock()
  1752  	return o.cfg
  1753  }
  1754  
  1755  // Force expiration of all pending.
  1756  // Lock should be held.
  1757  func (o *consumer) forceExpirePending() {
  1758  	var expired []uint64
  1759  	for seq := range o.pending {
  1760  		if !o.onRedeliverQueue(seq) {
  1761  			expired = append(expired, seq)
  1762  		}
  1763  	}
  1764  	if len(expired) > 0 {
  1765  		sort.Slice(expired, func(i, j int) bool { return expired[i] < expired[j] })
  1766  		o.addToRedeliverQueue(expired...)
  1767  		// Now we should update the timestamp here since we are redelivering.
  1768  		// We will use an incrementing time to preserve order for any other redelivery.
  1769  		off := time.Now().UnixNano() - o.pending[expired[0]].Timestamp
  1770  		for _, seq := range expired {
  1771  			if p, ok := o.pending[seq]; ok && p != nil {
  1772  				p.Timestamp += off
  1773  			}
  1774  		}
  1775  		o.ptmr.Reset(o.ackWait(0))
  1776  	}
  1777  	o.signalNewMessages()
  1778  }
  1779  
  1780  // Acquire proper locks and update rate limit.
  1781  // Will use what is in config.
  1782  func (o *consumer) setRateLimitNeedsLocks() {
  1783  	o.mu.RLock()
  1784  	mset := o.mset
  1785  	o.mu.RUnlock()
  1786  
  1787  	if mset == nil {
  1788  		return
  1789  	}
  1790  
  1791  	mset.mu.RLock()
  1792  	o.mu.Lock()
  1793  	o.setRateLimit(o.cfg.RateLimit)
  1794  	o.mu.Unlock()
  1795  	mset.mu.RUnlock()
  1796  }
  1797  
  1798  // Set the rate limiter
  1799  // Both mset and consumer lock should be held.
  1800  func (o *consumer) setRateLimit(bps uint64) {
  1801  	if bps == 0 {
  1802  		o.rlimit = nil
  1803  		return
  1804  	}
  1805  
  1806  	// TODO(dlc) - Make sane values or error if not sane?
  1807  	// We are configured in bits per sec so adjust to bytes.
  1808  	rl := rate.Limit(bps / 8)
  1809  	mset := o.mset
  1810  
  1811  	// Burst should be set to maximum msg size for this account, etc.
  1812  	var burst int
  1813  	if mset.cfg.MaxMsgSize > 0 {
  1814  		burst = int(mset.cfg.MaxMsgSize)
  1815  	} else if mset.jsa.account.limits.mpay > 0 {
  1816  		burst = int(mset.jsa.account.limits.mpay)
  1817  	} else {
  1818  		s := mset.jsa.account.srv
  1819  		burst = int(s.getOpts().MaxPayload)
  1820  	}
  1821  
  1822  	o.rlimit = rate.NewLimiter(rl, burst)
  1823  }
  1824  
  1825  // Check if new consumer config allowed vs old.
  1826  func (acc *Account) checkNewConsumerConfig(cfg, ncfg *ConsumerConfig) error {
  1827  	if reflect.DeepEqual(cfg, ncfg) {
  1828  		return nil
  1829  	}
  1830  	// Something different, so check since we only allow certain things to be updated.
  1831  	if cfg.DeliverPolicy != ncfg.DeliverPolicy {
  1832  		return errors.New("deliver policy can not be updated")
  1833  	}
  1834  	if cfg.OptStartSeq != ncfg.OptStartSeq {
  1835  		return errors.New("start sequence can not be updated")
  1836  	}
  1837  	if cfg.OptStartTime != nil && ncfg.OptStartTime != nil {
  1838  		// Both have start times set, compare them directly:
  1839  		if !cfg.OptStartTime.Equal(*ncfg.OptStartTime) {
  1840  			return errors.New("start time can not be updated")
  1841  		}
  1842  	} else if cfg.OptStartTime != nil || ncfg.OptStartTime != nil {
  1843  		// At least one start time is set and the other is not
  1844  		return errors.New("start time can not be updated")
  1845  	}
  1846  	if cfg.AckPolicy != ncfg.AckPolicy {
  1847  		return errors.New("ack policy can not be updated")
  1848  	}
  1849  	if cfg.ReplayPolicy != ncfg.ReplayPolicy {
  1850  		return errors.New("replay policy can not be updated")
  1851  	}
  1852  	if cfg.Heartbeat != ncfg.Heartbeat {
  1853  		return errors.New("heart beats can not be updated")
  1854  	}
  1855  	if cfg.FlowControl != ncfg.FlowControl {
  1856  		return errors.New("flow control can not be updated")
  1857  	}
  1858  	if cfg.MaxWaiting != ncfg.MaxWaiting {
  1859  		return errors.New("max waiting can not be updated")
  1860  	}
  1861  
  1862  	// Deliver Subject is conditional on if its bound.
  1863  	if cfg.DeliverSubject != ncfg.DeliverSubject {
  1864  		if cfg.DeliverSubject == _EMPTY_ {
  1865  			return errors.New("can not update pull consumer to push based")
  1866  		}
  1867  		if ncfg.DeliverSubject == _EMPTY_ {
  1868  			return errors.New("can not update push consumer to pull based")
  1869  		}
  1870  		rr := acc.sl.Match(cfg.DeliverSubject)
  1871  		if len(rr.psubs)+len(rr.qsubs) != 0 {
  1872  			return NewJSConsumerNameExistError()
  1873  		}
  1874  	}
  1875  
  1876  	// Check if BackOff is defined, MaxDeliver is within range.
  1877  	if lbo := len(ncfg.BackOff); lbo > 0 && ncfg.MaxDeliver != -1 && ncfg.MaxDeliver <= lbo {
  1878  		return NewJSConsumerMaxDeliverBackoffError()
  1879  	}
  1880  
  1881  	return nil
  1882  }
  1883  
  1884  // Update the config based on the new config, or error if update not allowed.
  1885  func (o *consumer) updateConfig(cfg *ConsumerConfig) error {
  1886  	o.mu.Lock()
  1887  	defer o.mu.Unlock()
  1888  
  1889  	if o.closed || o.mset == nil {
  1890  		return NewJSConsumerDoesNotExistError()
  1891  	}
  1892  
  1893  	if err := o.acc.checkNewConsumerConfig(&o.cfg, cfg); err != nil {
  1894  		return err
  1895  	}
  1896  
  1897  	// Make sure we always store PauseUntil in UTC.
  1898  	if cfg.PauseUntil != nil {
  1899  		utc := (*cfg.PauseUntil).UTC()
  1900  		cfg.PauseUntil = &utc
  1901  	}
  1902  
  1903  	if o.store != nil {
  1904  		// Update local state always.
  1905  		if err := o.store.UpdateConfig(cfg); err != nil {
  1906  			return err
  1907  		}
  1908  	}
  1909  
  1910  	// DeliverSubject
  1911  	if cfg.DeliverSubject != o.cfg.DeliverSubject {
  1912  		o.updateDeliverSubjectLocked(cfg.DeliverSubject)
  1913  	}
  1914  
  1915  	// MaxAckPending
  1916  	if cfg.MaxAckPending != o.cfg.MaxAckPending {
  1917  		o.maxp = cfg.MaxAckPending
  1918  		o.signalNewMessages()
  1919  	}
  1920  	// AckWait
  1921  	if cfg.AckWait != o.cfg.AckWait {
  1922  		if o.ptmr != nil {
  1923  			o.ptmr.Reset(100 * time.Millisecond)
  1924  		}
  1925  	}
  1926  	// Rate Limit
  1927  	if cfg.RateLimit != o.cfg.RateLimit {
  1928  		// We need both locks here so do in Go routine.
  1929  		go o.setRateLimitNeedsLocks()
  1930  	}
  1931  	if cfg.SampleFrequency != o.cfg.SampleFrequency {
  1932  		s := strings.TrimSuffix(cfg.SampleFrequency, "%")
  1933  		// String has been already verified for validity up in the stack, so no
  1934  		// need to check for error here.
  1935  		sampleFreq, _ := strconv.Atoi(s)
  1936  		o.sfreq = int32(sampleFreq)
  1937  	}
  1938  	// Set MaxDeliver if changed
  1939  	if cfg.MaxDeliver != o.cfg.MaxDeliver {
  1940  		o.maxdc = uint64(cfg.MaxDeliver)
  1941  	}
  1942  	// Set InactiveThreshold if changed.
  1943  	if val := cfg.InactiveThreshold; val != o.cfg.InactiveThreshold {
  1944  		o.updateInactiveThreshold(cfg)
  1945  		stopAndClearTimer(&o.dtmr)
  1946  		// Restart timer only if we are the leader.
  1947  		if o.isLeader() && o.dthresh > 0 {
  1948  			o.dtmr = time.AfterFunc(o.dthresh, o.deleteNotActive)
  1949  		}
  1950  	}
  1951  	// Check whether the pause has changed
  1952  	{
  1953  		var old, new time.Time
  1954  		if o.cfg.PauseUntil != nil {
  1955  			old = *o.cfg.PauseUntil
  1956  		}
  1957  		if cfg.PauseUntil != nil {
  1958  			new = *cfg.PauseUntil
  1959  		}
  1960  		if !old.Equal(new) {
  1961  			o.updatePauseState(cfg)
  1962  			if o.isLeader() {
  1963  				o.sendPauseAdvisoryLocked(cfg)
  1964  			}
  1965  		}
  1966  	}
  1967  
  1968  	// Check for Subject Filters update.
  1969  	newSubjects := gatherSubjectFilters(cfg.FilterSubject, cfg.FilterSubjects)
  1970  	if !subjectSliceEqual(newSubjects, o.subjf.subjects()) {
  1971  		newSubjf := make(subjectFilters, 0, len(newSubjects))
  1972  		for _, newFilter := range newSubjects {
  1973  			fs := &subjectFilter{
  1974  				subject:          newFilter,
  1975  				hasWildcard:      subjectHasWildcard(newFilter),
  1976  				tokenizedSubject: tokenizeSubjectIntoSlice(nil, newFilter),
  1977  			}
  1978  			newSubjf = append(newSubjf, fs)
  1979  		}
  1980  		// Make sure we have correct signaling setup.
  1981  		// Consumer lock can not be held.
  1982  		mset := o.mset
  1983  		o.mu.Unlock()
  1984  		mset.swapSigSubs(o, newSubjf.subjects())
  1985  		o.mu.Lock()
  1986  
  1987  		// When we're done with signaling, we can replace the subjects.
  1988  		// If filters were removed, set `o.subjf` to nil.
  1989  		if len(newSubjf) == 0 {
  1990  			o.subjf = nil
  1991  			o.filters = nil
  1992  		} else {
  1993  			o.subjf = newSubjf
  1994  			if len(o.subjf) == 1 {
  1995  				o.filters = nil
  1996  			} else {
  1997  				o.filters = NewSublistWithCache()
  1998  				for _, filter := range o.subjf {
  1999  					o.filters.Insert(&subscription{subject: []byte(filter.subject)})
  2000  				}
  2001  			}
  2002  		}
  2003  	}
  2004  
  2005  	// Record new config for others that do not need special handling.
  2006  	// Allowed but considered no-op, [Description, SampleFrequency, MaxWaiting, HeadersOnly]
  2007  	o.cfg = *cfg
  2008  
  2009  	// Re-calculate num pending on update.
  2010  	o.streamNumPending()
  2011  
  2012  	return nil
  2013  }
  2014  
  2015  // This is a config change for the delivery subject for a
  2016  // push based consumer.
  2017  func (o *consumer) updateDeliverSubject(newDeliver string) {
  2018  	// Update the config and the dsubj
  2019  	o.mu.Lock()
  2020  	defer o.mu.Unlock()
  2021  	o.updateDeliverSubjectLocked(newDeliver)
  2022  }
  2023  
  2024  // This is a config change for the delivery subject for a
  2025  // push based consumer.
  2026  func (o *consumer) updateDeliverSubjectLocked(newDeliver string) {
  2027  	if o.closed || o.isPullMode() || o.cfg.DeliverSubject == newDeliver {
  2028  		return
  2029  	}
  2030  
  2031  	// Force redeliver of all pending on change of delivery subject.
  2032  	if len(o.pending) > 0 {
  2033  		o.forceExpirePending()
  2034  	}
  2035  
  2036  	o.acc.sl.clearNotification(o.dsubj, o.cfg.DeliverGroup, o.inch)
  2037  	o.dsubj, o.cfg.DeliverSubject = newDeliver, newDeliver
  2038  	// When we register new one it will deliver to update state loop.
  2039  	o.acc.sl.registerNotification(newDeliver, o.cfg.DeliverGroup, o.inch)
  2040  }
  2041  
  2042  // Check that configs are equal but allow delivery subjects to be different.
  2043  func configsEqualSansDelivery(a, b ConsumerConfig) bool {
  2044  	// These were copied in so can set Delivery here.
  2045  	a.DeliverSubject, b.DeliverSubject = _EMPTY_, _EMPTY_
  2046  	return reflect.DeepEqual(a, b)
  2047  }
  2048  
  2049  // Helper to send a reply to an ack.
  2050  func (o *consumer) sendAckReply(subj string) {
  2051  	o.mu.Lock()
  2052  	defer o.mu.Unlock()
  2053  	o.sendAdvisory(subj, nil)
  2054  }
  2055  
  2056  type jsAckMsg struct {
  2057  	subject string
  2058  	reply   string
  2059  	hdr     int
  2060  	msg     []byte
  2061  }
  2062  
  2063  var jsAckMsgPool sync.Pool
  2064  
  2065  func newJSAckMsg(subj, reply string, hdr int, msg []byte) *jsAckMsg {
  2066  	var m *jsAckMsg
  2067  	am := jsAckMsgPool.Get()
  2068  	if am != nil {
  2069  		m = am.(*jsAckMsg)
  2070  	} else {
  2071  		m = &jsAckMsg{}
  2072  	}
  2073  	// When getting something from a pool it is critical that all fields are
  2074  	// initialized. Doing this way guarantees that if someone adds a field to
  2075  	// the structure, the compiler will fail the build if this line is not updated.
  2076  	(*m) = jsAckMsg{subj, reply, hdr, msg}
  2077  	return m
  2078  }
  2079  
  2080  func (am *jsAckMsg) returnToPool() {
  2081  	if am == nil {
  2082  		return
  2083  	}
  2084  	am.subject, am.reply, am.hdr, am.msg = _EMPTY_, _EMPTY_, -1, nil
  2085  	jsAckMsgPool.Put(am)
  2086  }
  2087  
  2088  // Push the ack message to the consumer's ackMsgs queue
  2089  func (o *consumer) pushAck(_ *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) {
  2090  	atomic.AddInt64(&o.awl, 1)
  2091  	o.ackMsgs.push(newJSAckMsg(subject, reply, c.pa.hdr, copyBytes(rmsg)))
  2092  }
  2093  
  2094  // Processes a message for the ack reply subject delivered with a message.
  2095  func (o *consumer) processAck(subject, reply string, hdr int, rmsg []byte) {
  2096  	defer atomic.AddInt64(&o.awl, -1)
  2097  
  2098  	var msg []byte
  2099  	if hdr > 0 {
  2100  		msg = rmsg[hdr:]
  2101  	} else {
  2102  		msg = rmsg
  2103  	}
  2104  
  2105  	sseq, dseq, dc := ackReplyInfo(subject)
  2106  
  2107  	skipAckReply := sseq == 0
  2108  
  2109  	switch {
  2110  	case len(msg) == 0, bytes.Equal(msg, AckAck), bytes.Equal(msg, AckOK):
  2111  		o.processAckMsg(sseq, dseq, dc, true)
  2112  	case bytes.HasPrefix(msg, AckNext):
  2113  		o.processAckMsg(sseq, dseq, dc, true)
  2114  		o.processNextMsgRequest(reply, msg[len(AckNext):])
  2115  		skipAckReply = true
  2116  	case bytes.HasPrefix(msg, AckNak):
  2117  		o.processNak(sseq, dseq, dc, msg)
  2118  	case bytes.Equal(msg, AckProgress):
  2119  		o.progressUpdate(sseq)
  2120  	case bytes.HasPrefix(msg, AckTerm):
  2121  		var reason string
  2122  		if buf := msg[len(AckTerm):]; len(buf) > 0 {
  2123  			reason = string(bytes.TrimSpace(buf))
  2124  		}
  2125  		o.processTerm(sseq, dseq, dc, reason)
  2126  	}
  2127  
  2128  	// Ack the ack if requested.
  2129  	if len(reply) > 0 && !skipAckReply {
  2130  		o.sendAckReply(reply)
  2131  	}
  2132  }
  2133  
  2134  // Used to process a working update to delay redelivery.
  2135  func (o *consumer) progressUpdate(seq uint64) {
  2136  	o.mu.Lock()
  2137  	defer o.mu.Unlock()
  2138  
  2139  	if p, ok := o.pending[seq]; ok {
  2140  		p.Timestamp = time.Now().UnixNano()
  2141  		// Update store system.
  2142  		o.updateDelivered(p.Sequence, seq, 1, p.Timestamp)
  2143  	}
  2144  }
  2145  
  2146  // Lock should be held.
  2147  func (o *consumer) updateSkipped(seq uint64) {
  2148  	// Clustered mode and R>1 only.
  2149  	if o.node == nil || !o.isLeader() {
  2150  		return
  2151  	}
  2152  	var b [1 + 8]byte
  2153  	b[0] = byte(updateSkipOp)
  2154  	var le = binary.LittleEndian
  2155  	le.PutUint64(b[1:], seq)
  2156  	o.propose(b[:])
  2157  }
  2158  
  2159  func (o *consumer) loopAndForwardProposals(qch chan struct{}) {
  2160  	o.mu.RLock()
  2161  	node, pch := o.node, o.pch
  2162  	o.mu.RUnlock()
  2163  
  2164  	if node == nil || pch == nil {
  2165  		return
  2166  	}
  2167  
  2168  	forwardProposals := func() error {
  2169  		o.mu.Lock()
  2170  		if o.node != node || node.State() != Leader {
  2171  			o.mu.Unlock()
  2172  			return errors.New("no longer leader")
  2173  		}
  2174  		proposal := o.phead
  2175  		o.phead, o.ptail = nil, nil
  2176  		o.mu.Unlock()
  2177  		// 256k max for now per batch.
  2178  		const maxBatch = 256 * 1024
  2179  		var entries []*Entry
  2180  		for sz := 0; proposal != nil; proposal = proposal.next {
  2181  			entry := entryPool.Get().(*Entry)
  2182  			entry.Type, entry.Data = EntryNormal, proposal.data
  2183  			entries = append(entries, entry)
  2184  			sz += len(proposal.data)
  2185  			if sz > maxBatch {
  2186  				node.ProposeDirect(entries)
  2187  				// We need to re-create `entries` because there is a reference
  2188  				// to it in the node's pae map.
  2189  				sz, entries = 0, nil
  2190  			}
  2191  		}
  2192  		if len(entries) > 0 {
  2193  			node.ProposeDirect(entries)
  2194  		}
  2195  		return nil
  2196  	}
  2197  
  2198  	// In case we have anything pending on entry.
  2199  	forwardProposals()
  2200  
  2201  	for {
  2202  		select {
  2203  		case <-qch:
  2204  			forwardProposals()
  2205  			return
  2206  		case <-pch:
  2207  			if err := forwardProposals(); err != nil {
  2208  				return
  2209  			}
  2210  		}
  2211  	}
  2212  }
  2213  
  2214  // Lock should be held.
  2215  func (o *consumer) propose(entry []byte) {
  2216  	var notify bool
  2217  	p := &proposal{data: entry}
  2218  	if o.phead == nil {
  2219  		o.phead = p
  2220  		notify = true
  2221  	} else {
  2222  		o.ptail.next = p
  2223  	}
  2224  	o.ptail = p
  2225  
  2226  	// Kick our looper routine if needed.
  2227  	if notify {
  2228  		select {
  2229  		case o.pch <- struct{}{}:
  2230  		default:
  2231  		}
  2232  	}
  2233  }
  2234  
  2235  // Lock should be held.
  2236  func (o *consumer) updateDelivered(dseq, sseq, dc uint64, ts int64) {
  2237  	// Clustered mode and R>1.
  2238  	if o.node != nil {
  2239  		// Inline for now, use variable compression.
  2240  		var b [4*binary.MaxVarintLen64 + 1]byte
  2241  		b[0] = byte(updateDeliveredOp)
  2242  		n := 1
  2243  		n += binary.PutUvarint(b[n:], dseq)
  2244  		n += binary.PutUvarint(b[n:], sseq)
  2245  		n += binary.PutUvarint(b[n:], dc)
  2246  		n += binary.PutVarint(b[n:], ts)
  2247  		o.propose(b[:n])
  2248  	}
  2249  	if o.store != nil {
  2250  		// Update local state always.
  2251  		o.store.UpdateDelivered(dseq, sseq, dc, ts)
  2252  	}
  2253  	// Update activity.
  2254  	o.ldt = time.Now()
  2255  }
  2256  
  2257  // Lock should be held.
  2258  func (o *consumer) updateAcks(dseq, sseq uint64) {
  2259  	if o.node != nil {
  2260  		// Inline for now, use variable compression.
  2261  		var b [2*binary.MaxVarintLen64 + 1]byte
  2262  		b[0] = byte(updateAcksOp)
  2263  		n := 1
  2264  		n += binary.PutUvarint(b[n:], dseq)
  2265  		n += binary.PutUvarint(b[n:], sseq)
  2266  		o.propose(b[:n])
  2267  	} else if o.store != nil {
  2268  		o.store.UpdateAcks(dseq, sseq)
  2269  	}
  2270  	// Update activity.
  2271  	o.lat = time.Now()
  2272  }
  2273  
  2274  // Communicate to the cluster an addition of a pending request.
  2275  // Lock should be held.
  2276  func (o *consumer) addClusterPendingRequest(reply string) {
  2277  	if o.node == nil || !o.pendingRequestsOk() {
  2278  		return
  2279  	}
  2280  	b := make([]byte, len(reply)+1)
  2281  	b[0] = byte(addPendingRequest)
  2282  	copy(b[1:], reply)
  2283  	o.propose(b)
  2284  }
  2285  
  2286  // Communicate to the cluster a removal of a pending request.
  2287  // Lock should be held.
  2288  func (o *consumer) removeClusterPendingRequest(reply string) {
  2289  	if o.node == nil || !o.pendingRequestsOk() {
  2290  		return
  2291  	}
  2292  	b := make([]byte, len(reply)+1)
  2293  	b[0] = byte(removePendingRequest)
  2294  	copy(b[1:], reply)
  2295  	o.propose(b)
  2296  }
  2297  
  2298  // Set whether or not we can send pending requests to followers.
  2299  func (o *consumer) setPendingRequestsOk(ok bool) {
  2300  	o.mu.Lock()
  2301  	o.prOk = ok
  2302  	o.mu.Unlock()
  2303  }
  2304  
  2305  // Lock should be held.
  2306  func (o *consumer) pendingRequestsOk() bool {
  2307  	return o.prOk
  2308  }
  2309  
  2310  // Set whether or not we can send info about pending pull requests to our group.
  2311  // Will require all peers have a minimum version.
  2312  func (o *consumer) checkAndSetPendingRequestsOk() {
  2313  	o.mu.RLock()
  2314  	s, isValid := o.srv, o.mset != nil
  2315  	o.mu.RUnlock()
  2316  	if !isValid {
  2317  		return
  2318  	}
  2319  
  2320  	if ca := o.consumerAssignment(); ca != nil && len(ca.Group.Peers) > 1 {
  2321  		for _, pn := range ca.Group.Peers {
  2322  			if si, ok := s.nodeToInfo.Load(pn); ok {
  2323  				if !versionAtLeast(si.(nodeInfo).version, 2, 7, 1) {
  2324  					// We expect all of our peers to eventually be up to date.
  2325  					// So check again in awhile.
  2326  					time.AfterFunc(eventsHBInterval, func() { o.checkAndSetPendingRequestsOk() })
  2327  					o.setPendingRequestsOk(false)
  2328  					return
  2329  				}
  2330  			}
  2331  		}
  2332  	}
  2333  	o.setPendingRequestsOk(true)
  2334  }
  2335  
  2336  // On leadership change make sure we alert the pending requests that they are no longer valid.
  2337  func (o *consumer) checkPendingRequests() {
  2338  	o.mu.Lock()
  2339  	defer o.mu.Unlock()
  2340  	if o.mset == nil || o.outq == nil {
  2341  		return
  2342  	}
  2343  	hdr := []byte("NATS/1.0 409 Leadership Change\r\n\r\n")
  2344  	for reply := range o.prm {
  2345  		o.outq.send(newJSPubMsg(reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0))
  2346  	}
  2347  	o.prm = nil
  2348  }
  2349  
  2350  // This will release any pending pull requests if applicable.
  2351  // Should be called only by the leader being deleted or stopped.
  2352  // Lock should be held.
  2353  func (o *consumer) releaseAnyPendingRequests(isAssigned bool) {
  2354  	if o.mset == nil || o.outq == nil || o.waiting.len() == 0 {
  2355  		return
  2356  	}
  2357  	var hdr []byte
  2358  	if !isAssigned {
  2359  		hdr = []byte("NATS/1.0 409 Consumer Deleted\r\n\r\n")
  2360  	}
  2361  
  2362  	wq := o.waiting
  2363  	for wr := wq.head; wr != nil; {
  2364  		if hdr != nil {
  2365  			o.outq.send(newJSPubMsg(wr.reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0))
  2366  		}
  2367  		next := wr.next
  2368  		wr.recycle()
  2369  		wr = next
  2370  	}
  2371  	// Nil out old queue.
  2372  	o.waiting = nil
  2373  }
  2374  
  2375  // Process a NAK.
  2376  func (o *consumer) processNak(sseq, dseq, dc uint64, nak []byte) {
  2377  	o.mu.Lock()
  2378  	defer o.mu.Unlock()
  2379  
  2380  	// Check for out of range.
  2381  	if dseq <= o.adflr || dseq > o.dseq {
  2382  		return
  2383  	}
  2384  	// If we are explicit ack make sure this is still on our pending list.
  2385  	if _, ok := o.pending[sseq]; !ok {
  2386  		return
  2387  	}
  2388  
  2389  	// Deliver an advisory
  2390  	e := JSConsumerDeliveryNakAdvisory{
  2391  		TypedEvent: TypedEvent{
  2392  			Type: JSConsumerDeliveryNakAdvisoryType,
  2393  			ID:   nuid.Next(),
  2394  			Time: time.Now().UTC(),
  2395  		},
  2396  		Stream:      o.stream,
  2397  		Consumer:    o.name,
  2398  		ConsumerSeq: dseq,
  2399  		StreamSeq:   sseq,
  2400  		Deliveries:  dc,
  2401  		Domain:      o.srv.getOpts().JetStreamDomain,
  2402  	}
  2403  
  2404  	j, err := json.Marshal(e)
  2405  	if err != nil {
  2406  		return
  2407  	}
  2408  
  2409  	o.sendAdvisory(o.nakEventT, j)
  2410  
  2411  	// Check to see if we have delays attached.
  2412  	if len(nak) > len(AckNak) {
  2413  		arg := bytes.TrimSpace(nak[len(AckNak):])
  2414  		if len(arg) > 0 {
  2415  			var d time.Duration
  2416  			var err error
  2417  			if arg[0] == '{' {
  2418  				var nd ConsumerNakOptions
  2419  				if err = json.Unmarshal(arg, &nd); err == nil {
  2420  					d = nd.Delay
  2421  				}
  2422  			} else {
  2423  				d, err = time.ParseDuration(string(arg))
  2424  			}
  2425  			if err != nil {
  2426  				// Treat this as normal NAK.
  2427  				o.srv.Warnf("JetStream consumer '%s > %s > %s' bad NAK delay value: %q", o.acc.Name, o.stream, o.name, arg)
  2428  			} else {
  2429  				// We have a parsed duration that the user wants us to wait before retrying.
  2430  				// Make sure we are not on the rdq.
  2431  				o.removeFromRedeliverQueue(sseq)
  2432  				if p, ok := o.pending[sseq]; ok {
  2433  					// now - ackWait is expired now, so offset from there.
  2434  					p.Timestamp = time.Now().Add(-o.cfg.AckWait).Add(d).UnixNano()
  2435  					// Update store system which will update followers as well.
  2436  					o.updateDelivered(p.Sequence, sseq, dc, p.Timestamp)
  2437  					if o.ptmr != nil {
  2438  						// Want checkPending to run and figure out the next timer ttl.
  2439  						// TODO(dlc) - We could optimize this maybe a bit more and track when we expect the timer to fire.
  2440  						o.ptmr.Reset(10 * time.Millisecond)
  2441  					}
  2442  				}
  2443  				// Nothing else for use to do now so return.
  2444  				return
  2445  			}
  2446  		}
  2447  	}
  2448  
  2449  	// If already queued up also ignore.
  2450  	if !o.onRedeliverQueue(sseq) {
  2451  		o.addToRedeliverQueue(sseq)
  2452  	}
  2453  
  2454  	o.signalNewMessages()
  2455  }
  2456  
  2457  // Process a TERM
  2458  func (o *consumer) processTerm(sseq, dseq, dc uint64, reason string) {
  2459  	// Treat like an ack to suppress redelivery.
  2460  	o.processAckMsg(sseq, dseq, dc, false)
  2461  
  2462  	o.mu.Lock()
  2463  	defer o.mu.Unlock()
  2464  
  2465  	// Deliver an advisory
  2466  	e := JSConsumerDeliveryTerminatedAdvisory{
  2467  		TypedEvent: TypedEvent{
  2468  			Type: JSConsumerDeliveryTerminatedAdvisoryType,
  2469  			ID:   nuid.Next(),
  2470  			Time: time.Now().UTC(),
  2471  		},
  2472  		Stream:      o.stream,
  2473  		Consumer:    o.name,
  2474  		ConsumerSeq: dseq,
  2475  		StreamSeq:   sseq,
  2476  		Deliveries:  dc,
  2477  		Reason:      reason,
  2478  		Domain:      o.srv.getOpts().JetStreamDomain,
  2479  	}
  2480  
  2481  	j, err := json.Marshal(e)
  2482  	if err != nil {
  2483  		return
  2484  	}
  2485  
  2486  	subj := JSAdvisoryConsumerMsgTerminatedPre + "." + o.stream + "." + o.name
  2487  	o.sendAdvisory(subj, j)
  2488  }
  2489  
  2490  // Introduce a small delay in when timer fires to check pending.
  2491  // Allows bursts to be treated in same time frame.
  2492  const ackWaitDelay = time.Millisecond
  2493  
  2494  // ackWait returns how long to wait to fire the pending timer.
  2495  func (o *consumer) ackWait(next time.Duration) time.Duration {
  2496  	if next > 0 {
  2497  		return next + ackWaitDelay
  2498  	}
  2499  	return o.cfg.AckWait + ackWaitDelay
  2500  }
  2501  
  2502  // Due to bug in calculation of sequences on restoring redelivered let's do quick sanity check.
  2503  // Lock should be held.
  2504  func (o *consumer) checkRedelivered(slseq uint64) {
  2505  	var lseq uint64
  2506  	if mset := o.mset; mset != nil {
  2507  		lseq = slseq
  2508  	}
  2509  	var shouldUpdateState bool
  2510  	for sseq := range o.rdc {
  2511  		if sseq <= o.asflr || (lseq > 0 && sseq > lseq) {
  2512  			delete(o.rdc, sseq)
  2513  			o.removeFromRedeliverQueue(sseq)
  2514  			shouldUpdateState = true
  2515  		}
  2516  	}
  2517  	if shouldUpdateState {
  2518  		if err := o.writeStoreStateUnlocked(); err != nil && o.srv != nil && o.mset != nil && !o.closed {
  2519  			s, acc, mset, name := o.srv, o.acc, o.mset, o.name
  2520  			s.Warnf("Consumer '%s > %s > %s' error on write store state from check redelivered: %v", acc, mset.cfg.Name, name, err)
  2521  		}
  2522  	}
  2523  }
  2524  
  2525  // This will restore the state from disk.
  2526  // Lock should be held.
  2527  func (o *consumer) readStoredState(slseq uint64) error {
  2528  	if o.store == nil {
  2529  		return nil
  2530  	}
  2531  	state, err := o.store.State()
  2532  	if err == nil {
  2533  		o.applyState(state)
  2534  		if len(o.rdc) > 0 {
  2535  			o.checkRedelivered(slseq)
  2536  		}
  2537  	}
  2538  	return err
  2539  }
  2540  
  2541  // Apply the consumer stored state.
  2542  // Lock should be held.
  2543  func (o *consumer) applyState(state *ConsumerState) {
  2544  	if state == nil {
  2545  		return
  2546  	}
  2547  
  2548  	// If o.sseq is greater don't update. Don't go backwards on o.sseq if leader.
  2549  	if !o.isLeader() || o.sseq <= state.Delivered.Stream {
  2550  		o.sseq = state.Delivered.Stream + 1
  2551  	}
  2552  	o.dseq = state.Delivered.Consumer + 1
  2553  	o.adflr = state.AckFloor.Consumer
  2554  	o.asflr = state.AckFloor.Stream
  2555  	o.pending = state.Pending
  2556  	o.rdc = state.Redelivered
  2557  
  2558  	// Setup tracking timer if we have restored pending.
  2559  	if o.isLeader() && len(o.pending) > 0 {
  2560  		// This is on startup or leader change. We want to check pending
  2561  		// sooner in case there are inconsistencies etc. Pick between 500ms - 1.5s
  2562  		delay := 500*time.Millisecond + time.Duration(rand.Int63n(1000))*time.Millisecond
  2563  		// If normal is lower than this just use that.
  2564  		if o.cfg.AckWait < delay {
  2565  			delay = o.ackWait(0)
  2566  		}
  2567  		if o.ptmr == nil {
  2568  			o.ptmr = time.AfterFunc(delay, o.checkPending)
  2569  		} else {
  2570  			o.ptmr.Reset(delay)
  2571  		}
  2572  	}
  2573  }
  2574  
  2575  // Sets our store state from another source. Used in clustered mode on snapshot restore.
  2576  // Lock should be held.
  2577  func (o *consumer) setStoreState(state *ConsumerState) error {
  2578  	if state == nil || o.store == nil {
  2579  		return nil
  2580  	}
  2581  	err := o.store.Update(state)
  2582  	if err == nil {
  2583  		o.applyState(state)
  2584  	}
  2585  	return err
  2586  }
  2587  
  2588  // Update our state to the store.
  2589  func (o *consumer) writeStoreState() error {
  2590  	o.mu.Lock()
  2591  	defer o.mu.Unlock()
  2592  	return o.writeStoreStateUnlocked()
  2593  }
  2594  
  2595  // Update our state to the store.
  2596  // Lock should be held.
  2597  func (o *consumer) writeStoreStateUnlocked() error {
  2598  	if o.store == nil {
  2599  		return nil
  2600  	}
  2601  	state := ConsumerState{
  2602  		Delivered: SequencePair{
  2603  			Consumer: o.dseq - 1,
  2604  			Stream:   o.sseq - 1,
  2605  		},
  2606  		AckFloor: SequencePair{
  2607  			Consumer: o.adflr,
  2608  			Stream:   o.asflr,
  2609  		},
  2610  		Pending:     o.pending,
  2611  		Redelivered: o.rdc,
  2612  	}
  2613  	return o.store.Update(&state)
  2614  }
  2615  
  2616  // Returns an initial info. Only applicable for non-clustered consumers.
  2617  // We will clear after we return it, so one shot.
  2618  func (o *consumer) initialInfo() *ConsumerInfo {
  2619  	o.mu.Lock()
  2620  	ici := o.ici
  2621  	o.ici = nil // gc friendly
  2622  	o.mu.Unlock()
  2623  	if ici == nil {
  2624  		ici = o.info()
  2625  	}
  2626  	return ici
  2627  }
  2628  
  2629  // Clears our initial info.
  2630  // Used when we have a leader change in cluster mode but do not send a response.
  2631  func (o *consumer) clearInitialInfo() {
  2632  	o.mu.Lock()
  2633  	o.ici = nil // gc friendly
  2634  	o.mu.Unlock()
  2635  }
  2636  
  2637  // Info returns our current consumer state.
  2638  func (o *consumer) info() *ConsumerInfo {
  2639  	return o.infoWithSnap(false)
  2640  }
  2641  
  2642  func (o *consumer) infoWithSnap(snap bool) *ConsumerInfo {
  2643  	return o.infoWithSnapAndReply(snap, _EMPTY_)
  2644  }
  2645  
  2646  func (o *consumer) infoWithSnapAndReply(snap bool, reply string) *ConsumerInfo {
  2647  	o.mu.Lock()
  2648  	mset := o.mset
  2649  	if o.closed || mset == nil || mset.srv == nil {
  2650  		o.mu.Unlock()
  2651  		return nil
  2652  	}
  2653  	js := o.js
  2654  	if js == nil {
  2655  		o.mu.Unlock()
  2656  		return nil
  2657  	}
  2658  
  2659  	// Capture raftGroup.
  2660  	var rg *raftGroup
  2661  	if o.ca != nil {
  2662  		rg = o.ca.Group
  2663  	}
  2664  
  2665  	cfg := o.cfg
  2666  	info := &ConsumerInfo{
  2667  		Stream:  o.stream,
  2668  		Name:    o.name,
  2669  		Created: o.created,
  2670  		Config:  &cfg,
  2671  		Delivered: SequenceInfo{
  2672  			Consumer: o.dseq - 1,
  2673  			Stream:   o.sseq - 1,
  2674  		},
  2675  		AckFloor: SequenceInfo{
  2676  			Consumer: o.adflr,
  2677  			Stream:   o.asflr,
  2678  		},
  2679  		NumAckPending:  len(o.pending),
  2680  		NumRedelivered: len(o.rdc),
  2681  		NumPending:     o.checkNumPending(),
  2682  		PushBound:      o.isPushMode() && o.active,
  2683  		TimeStamp:      time.Now().UTC(),
  2684  	}
  2685  	if o.cfg.PauseUntil != nil {
  2686  		p := *o.cfg.PauseUntil
  2687  		if info.Paused = time.Now().Before(p); info.Paused {
  2688  			info.PauseRemaining = time.Until(p)
  2689  		}
  2690  	}
  2691  
  2692  	// If we are replicated and we are not the leader we need to pull certain data from our store.
  2693  	if rg != nil && rg.node != nil && !o.isLeader() && o.store != nil {
  2694  		state, err := o.store.BorrowState()
  2695  		if err != nil {
  2696  			o.mu.Unlock()
  2697  			return nil
  2698  		}
  2699  		info.Delivered.Consumer, info.Delivered.Stream = state.Delivered.Consumer, state.Delivered.Stream
  2700  		info.AckFloor.Consumer, info.AckFloor.Stream = state.AckFloor.Consumer, state.AckFloor.Stream
  2701  		info.NumAckPending = len(state.Pending)
  2702  		info.NumRedelivered = len(state.Redelivered)
  2703  	}
  2704  
  2705  	// Adjust active based on non-zero etc. Also make UTC here.
  2706  	if !o.ldt.IsZero() {
  2707  		ldt := o.ldt.UTC() // This copies as well.
  2708  		info.Delivered.Last = &ldt
  2709  	}
  2710  	if !o.lat.IsZero() {
  2711  		lat := o.lat.UTC() // This copies as well.
  2712  		info.AckFloor.Last = &lat
  2713  	}
  2714  
  2715  	// If we are a pull mode consumer, report on number of waiting requests.
  2716  	if o.isPullMode() {
  2717  		o.processWaiting(false)
  2718  		info.NumWaiting = o.waiting.len()
  2719  	}
  2720  	// If we were asked to snapshot do so here.
  2721  	if snap {
  2722  		o.ici = info
  2723  	}
  2724  	sysc := o.sysc
  2725  	o.mu.Unlock()
  2726  
  2727  	// Do cluster.
  2728  	if rg != nil {
  2729  		info.Cluster = js.clusterInfo(rg)
  2730  	}
  2731  
  2732  	// If we have a reply subject send the response here.
  2733  	if reply != _EMPTY_ && sysc != nil {
  2734  		sysc.sendInternalMsg(reply, _EMPTY_, nil, info)
  2735  	}
  2736  
  2737  	return info
  2738  }
  2739  
  2740  // Will signal us that new messages are available. Will break out of waiting.
  2741  func (o *consumer) signalNewMessages() {
  2742  	// Kick our new message channel
  2743  	select {
  2744  	case o.mch <- struct{}{}:
  2745  	default:
  2746  	}
  2747  }
  2748  
  2749  // shouldSample lets us know if we are sampling metrics on acks.
  2750  func (o *consumer) shouldSample() bool {
  2751  	switch {
  2752  	case o.sfreq <= 0:
  2753  		return false
  2754  	case o.sfreq >= 100:
  2755  		return true
  2756  	}
  2757  
  2758  	// TODO(ripienaar) this is a tad slow so we need to rethink here, however this will only
  2759  	// hit for those with sampling enabled and its not the default
  2760  	return rand.Int31n(100) <= o.sfreq
  2761  }
  2762  
  2763  func (o *consumer) sampleAck(sseq, dseq, dc uint64) {
  2764  	if !o.shouldSample() {
  2765  		return
  2766  	}
  2767  
  2768  	now := time.Now().UTC()
  2769  	unow := now.UnixNano()
  2770  
  2771  	e := JSConsumerAckMetric{
  2772  		TypedEvent: TypedEvent{
  2773  			Type: JSConsumerAckMetricType,
  2774  			ID:   nuid.Next(),
  2775  			Time: now,
  2776  		},
  2777  		Stream:      o.stream,
  2778  		Consumer:    o.name,
  2779  		ConsumerSeq: dseq,
  2780  		StreamSeq:   sseq,
  2781  		Delay:       unow - o.pending[sseq].Timestamp,
  2782  		Deliveries:  dc,
  2783  		Domain:      o.srv.getOpts().JetStreamDomain,
  2784  	}
  2785  
  2786  	j, err := json.Marshal(e)
  2787  	if err != nil {
  2788  		return
  2789  	}
  2790  
  2791  	o.sendAdvisory(o.ackEventT, j)
  2792  }
  2793  
  2794  func (o *consumer) processAckMsg(sseq, dseq, dc uint64, doSample bool) {
  2795  	o.mu.Lock()
  2796  	if o.closed {
  2797  		o.mu.Unlock()
  2798  		return
  2799  	}
  2800  
  2801  	mset := o.mset
  2802  	if mset == nil || mset.closed.Load() {
  2803  		o.mu.Unlock()
  2804  		return
  2805  	}
  2806  
  2807  	var sagap uint64
  2808  	var needSignal bool
  2809  
  2810  	switch o.cfg.AckPolicy {
  2811  	case AckExplicit:
  2812  		if p, ok := o.pending[sseq]; ok {
  2813  			if doSample {
  2814  				o.sampleAck(sseq, dseq, dc)
  2815  			}
  2816  			if o.maxp > 0 && len(o.pending) >= o.maxp {
  2817  				needSignal = true
  2818  			}
  2819  			delete(o.pending, sseq)
  2820  			// Use the original deliver sequence from our pending record.
  2821  			dseq = p.Sequence
  2822  			// Only move floors if we matched an existing pending.
  2823  			if dseq == o.adflr+1 {
  2824  				o.adflr, o.asflr = dseq, sseq
  2825  				for ss := sseq + 1; ss < o.sseq; ss++ {
  2826  					if p, ok := o.pending[ss]; ok {
  2827  						if p.Sequence > 0 {
  2828  							o.adflr, o.asflr = p.Sequence-1, ss-1
  2829  						}
  2830  						break
  2831  					}
  2832  				}
  2833  			}
  2834  			// If nothing left set to current delivered.
  2835  			if len(o.pending) == 0 {
  2836  				o.adflr, o.asflr = o.dseq-1, o.sseq-1
  2837  			}
  2838  		}
  2839  		// We do these regardless.
  2840  		delete(o.rdc, sseq)
  2841  		o.removeFromRedeliverQueue(sseq)
  2842  	case AckAll:
  2843  		// no-op
  2844  		if dseq <= o.adflr || sseq <= o.asflr {
  2845  			o.mu.Unlock()
  2846  			return
  2847  		}
  2848  		if o.maxp > 0 && len(o.pending) >= o.maxp {
  2849  			needSignal = true
  2850  		}
  2851  		sagap = sseq - o.asflr
  2852  		o.adflr, o.asflr = dseq, sseq
  2853  		for seq := sseq; seq > sseq-sagap; seq-- {
  2854  			delete(o.pending, seq)
  2855  			delete(o.rdc, seq)
  2856  			o.removeFromRedeliverQueue(seq)
  2857  		}
  2858  	case AckNone:
  2859  		// FIXME(dlc) - This is error but do we care?
  2860  		o.mu.Unlock()
  2861  		return
  2862  	}
  2863  
  2864  	// Update underlying store.
  2865  	o.updateAcks(dseq, sseq)
  2866  
  2867  	clustered := o.node != nil
  2868  
  2869  	// In case retention changes for a stream, this ought to have been updated
  2870  	// using the consumer lock to avoid a race.
  2871  	retention := o.retention
  2872  	o.mu.Unlock()
  2873  
  2874  	// Let the owning stream know if we are interest or workqueue retention based.
  2875  	// If this consumer is clustered this will be handled by processReplicatedAck
  2876  	// after the ack has propagated.
  2877  	if !clustered && mset != nil && retention != LimitsPolicy {
  2878  		if sagap > 1 {
  2879  			// FIXME(dlc) - This is very inefficient, will need to fix.
  2880  			for seq := sseq; seq > sseq-sagap; seq-- {
  2881  				mset.ackMsg(o, seq)
  2882  			}
  2883  		} else {
  2884  			mset.ackMsg(o, sseq)
  2885  		}
  2886  	}
  2887  
  2888  	// If we had max ack pending set and were at limit we need to unblock ourselves.
  2889  	if needSignal {
  2890  		o.signalNewMessages()
  2891  	}
  2892  }
  2893  
  2894  // Determine if this is a truly filtered consumer. Modern clients will place filtered subjects
  2895  // even if the stream only has a single non-wildcard subject designation.
  2896  // Read lock should be held.
  2897  func (o *consumer) isFiltered() bool {
  2898  	if o.subjf == nil {
  2899  		return false
  2900  	}
  2901  	// If we are here we want to check if the filtered subject is
  2902  	// a direct match for our only listed subject.
  2903  	mset := o.mset
  2904  	if mset == nil {
  2905  		return true
  2906  	}
  2907  
  2908  	// `isFiltered` need to be performant, so we do
  2909  	// as any checks as possible to avoid unnecessary work.
  2910  	// Here we avoid iteration over slices if there is only one subject in stream
  2911  	// and one filter for the consumer.
  2912  	if len(mset.cfg.Subjects) == 1 && len(o.subjf) == 1 {
  2913  		return mset.cfg.Subjects[0] != o.subjf[0].subject
  2914  	}
  2915  
  2916  	// if the list is not equal length, we can return early, as this is filtered.
  2917  	if len(mset.cfg.Subjects) != len(o.subjf) {
  2918  		return true
  2919  	}
  2920  
  2921  	// if in rare case scenario that user passed all stream subjects as consumer filters,
  2922  	// we need to do a more expensive operation.
  2923  	// reflect.DeepEqual would return false if the filters are the same, but in different order
  2924  	// so it can't be used here.
  2925  	cfilters := make(map[string]struct{}, len(o.subjf))
  2926  	for _, val := range o.subjf {
  2927  		cfilters[val.subject] = struct{}{}
  2928  	}
  2929  	for _, val := range mset.cfg.Subjects {
  2930  		if _, ok := cfilters[val]; !ok {
  2931  			return true
  2932  		}
  2933  	}
  2934  	return false
  2935  }
  2936  
  2937  // Check if we need an ack for this store seq.
  2938  // This is called for interest based retention streams to remove messages.
  2939  func (o *consumer) needAck(sseq uint64, subj string) bool {
  2940  	var needAck bool
  2941  	var asflr, osseq uint64
  2942  	var pending map[uint64]*Pending
  2943  
  2944  	o.mu.RLock()
  2945  	defer o.mu.RUnlock()
  2946  
  2947  	isFiltered := o.isFiltered()
  2948  	if isFiltered && o.mset == nil {
  2949  		return false
  2950  	}
  2951  
  2952  	// Check if we are filtered, and if so check if this is even applicable to us.
  2953  	if isFiltered {
  2954  		if subj == _EMPTY_ {
  2955  			var svp StoreMsg
  2956  			if _, err := o.mset.store.LoadMsg(sseq, &svp); err != nil {
  2957  				return false
  2958  			}
  2959  			subj = svp.subj
  2960  		}
  2961  		if !o.isFilteredMatch(subj) {
  2962  			return false
  2963  		}
  2964  	}
  2965  	if o.isLeader() {
  2966  		asflr, osseq = o.asflr, o.sseq
  2967  		pending = o.pending
  2968  	} else {
  2969  		if o.store == nil {
  2970  			return false
  2971  		}
  2972  		state, err := o.store.BorrowState()
  2973  		if err != nil || state == nil {
  2974  			// Fall back to what we track internally for now.
  2975  			return sseq > o.asflr && !o.isFiltered()
  2976  		}
  2977  		// If loading state as here, the osseq is +1.
  2978  		asflr, osseq, pending = state.AckFloor.Stream, state.Delivered.Stream+1, state.Pending
  2979  	}
  2980  
  2981  	switch o.cfg.AckPolicy {
  2982  	case AckNone, AckAll:
  2983  		needAck = sseq > asflr
  2984  	case AckExplicit:
  2985  		if sseq > asflr {
  2986  			if sseq >= osseq {
  2987  				needAck = true
  2988  			} else {
  2989  				_, needAck = pending[sseq]
  2990  			}
  2991  		}
  2992  	}
  2993  
  2994  	return needAck
  2995  }
  2996  
  2997  // Helper for the next message requests.
  2998  func nextReqFromMsg(msg []byte) (time.Time, int, int, bool, time.Duration, time.Time, error) {
  2999  	req := bytes.TrimSpace(msg)
  3000  
  3001  	switch {
  3002  	case len(req) == 0:
  3003  		return time.Time{}, 1, 0, false, 0, time.Time{}, nil
  3004  
  3005  	case req[0] == '{':
  3006  		var cr JSApiConsumerGetNextRequest
  3007  		if err := json.Unmarshal(req, &cr); err != nil {
  3008  			return time.Time{}, -1, 0, false, 0, time.Time{}, err
  3009  		}
  3010  		var hbt time.Time
  3011  		if cr.Heartbeat > 0 {
  3012  			if cr.Heartbeat*2 > cr.Expires {
  3013  				return time.Time{}, 1, 0, false, 0, time.Time{}, errors.New("heartbeat value too large")
  3014  			}
  3015  			hbt = time.Now().Add(cr.Heartbeat)
  3016  		}
  3017  		if cr.Expires == time.Duration(0) {
  3018  			return time.Time{}, cr.Batch, cr.MaxBytes, cr.NoWait, cr.Heartbeat, hbt, nil
  3019  		}
  3020  		return time.Now().Add(cr.Expires), cr.Batch, cr.MaxBytes, cr.NoWait, cr.Heartbeat, hbt, nil
  3021  	default:
  3022  		if n, err := strconv.Atoi(string(req)); err == nil {
  3023  			return time.Time{}, n, 0, false, 0, time.Time{}, nil
  3024  		}
  3025  	}
  3026  
  3027  	return time.Time{}, 1, 0, false, 0, time.Time{}, nil
  3028  }
  3029  
  3030  // Represents a request that is on the internal waiting queue
  3031  type waitingRequest struct {
  3032  	next     *waitingRequest
  3033  	acc      *Account
  3034  	interest string
  3035  	reply    string
  3036  	n        int // For batching
  3037  	d        int // num delivered
  3038  	b        int // For max bytes tracking
  3039  	expires  time.Time
  3040  	received time.Time
  3041  	hb       time.Duration
  3042  	hbt      time.Time
  3043  	noWait   bool
  3044  }
  3045  
  3046  // sync.Pool for waiting requests.
  3047  var wrPool = sync.Pool{
  3048  	New: func() any {
  3049  		return new(waitingRequest)
  3050  	},
  3051  }
  3052  
  3053  // Recycle this request. This request can not be accessed after this call.
  3054  func (wr *waitingRequest) recycleIfDone() bool {
  3055  	if wr != nil && wr.n <= 0 {
  3056  		wr.recycle()
  3057  		return true
  3058  	}
  3059  	return false
  3060  }
  3061  
  3062  // Force a recycle.
  3063  func (wr *waitingRequest) recycle() {
  3064  	if wr != nil {
  3065  		wr.next, wr.acc, wr.interest, wr.reply = nil, nil, _EMPTY_, _EMPTY_
  3066  		wrPool.Put(wr)
  3067  	}
  3068  }
  3069  
  3070  // waiting queue for requests that are waiting for new messages to arrive.
  3071  type waitQueue struct {
  3072  	n, max int
  3073  	last   time.Time
  3074  	head   *waitingRequest
  3075  	tail   *waitingRequest
  3076  }
  3077  
  3078  // Create a new ring buffer with at most max items.
  3079  func newWaitQueue(max int) *waitQueue {
  3080  	return &waitQueue{max: max}
  3081  }
  3082  
  3083  var (
  3084  	errWaitQueueFull = errors.New("wait queue is full")
  3085  	errWaitQueueNil  = errors.New("wait queue is nil")
  3086  )
  3087  
  3088  // Adds in a new request.
  3089  func (wq *waitQueue) add(wr *waitingRequest) error {
  3090  	if wq == nil {
  3091  		return errWaitQueueNil
  3092  	}
  3093  	if wq.isFull() {
  3094  		return errWaitQueueFull
  3095  	}
  3096  	if wq.head == nil {
  3097  		wq.head = wr
  3098  	} else {
  3099  		wq.tail.next = wr
  3100  	}
  3101  	// Always set tail.
  3102  	wq.tail = wr
  3103  	// Make sure nil
  3104  	wr.next = nil
  3105  
  3106  	// Track last active via when we receive a request.
  3107  	wq.last = wr.received
  3108  	wq.n++
  3109  	return nil
  3110  }
  3111  
  3112  func (wq *waitQueue) isFull() bool {
  3113  	if wq == nil {
  3114  		return false
  3115  	}
  3116  	return wq.n == wq.max
  3117  }
  3118  
  3119  func (wq *waitQueue) isEmpty() bool {
  3120  	if wq == nil {
  3121  		return true
  3122  	}
  3123  	return wq.n == 0
  3124  }
  3125  
  3126  func (wq *waitQueue) len() int {
  3127  	if wq == nil {
  3128  		return 0
  3129  	}
  3130  	return wq.n
  3131  }
  3132  
  3133  // Peek will return the next request waiting or nil if empty.
  3134  func (wq *waitQueue) peek() *waitingRequest {
  3135  	if wq == nil {
  3136  		return nil
  3137  	}
  3138  	return wq.head
  3139  }
  3140  
  3141  // pop will return the next request and move the read cursor.
  3142  // This will now place a request that still has pending items at the ends of the list.
  3143  func (wq *waitQueue) pop() *waitingRequest {
  3144  	wr := wq.peek()
  3145  	if wr != nil {
  3146  		wr.d++
  3147  		wr.n--
  3148  		// Always remove current now on a pop, and move to end if still valid.
  3149  		// If we were the only one don't need to remove since this can be a no-op.
  3150  		if wr.n > 0 && wq.n > 1 {
  3151  			wq.removeCurrent()
  3152  			wq.add(wr)
  3153  		} else if wr.n <= 0 {
  3154  			wq.removeCurrent()
  3155  		}
  3156  	}
  3157  	return wr
  3158  }
  3159  
  3160  // Removes the current read pointer (head FIFO) entry.
  3161  func (wq *waitQueue) removeCurrent() {
  3162  	wq.remove(nil, wq.head)
  3163  }
  3164  
  3165  // Remove the wr element from the wait queue.
  3166  func (wq *waitQueue) remove(pre, wr *waitingRequest) {
  3167  	if wr == nil {
  3168  		return
  3169  	}
  3170  	if pre != nil {
  3171  		pre.next = wr.next
  3172  	} else if wr == wq.head {
  3173  		// We are removing head here.
  3174  		wq.head = wr.next
  3175  	}
  3176  	// Check if wr was our tail.
  3177  	if wr == wq.tail {
  3178  		// Check if we need to assign to pre.
  3179  		if wr.next == nil {
  3180  			wq.tail = pre
  3181  		} else {
  3182  			wq.tail = wr.next
  3183  		}
  3184  	}
  3185  	wq.n--
  3186  }
  3187  
  3188  // Return the map of pending requests keyed by the reply subject.
  3189  // No-op if push consumer or invalid etc.
  3190  func (o *consumer) pendingRequests() map[string]*waitingRequest {
  3191  	if o.waiting == nil {
  3192  		return nil
  3193  	}
  3194  	wq, m := o.waiting, make(map[string]*waitingRequest)
  3195  	for wr := wq.head; wr != nil; wr = wr.next {
  3196  		m[wr.reply] = wr
  3197  	}
  3198  
  3199  	return m
  3200  }
  3201  
  3202  // Return next waiting request. This will check for expirations but not noWait or interest.
  3203  // That will be handled by processWaiting.
  3204  // Lock should be held.
  3205  func (o *consumer) nextWaiting(sz int) *waitingRequest {
  3206  	if o.waiting == nil || o.waiting.isEmpty() {
  3207  		return nil
  3208  	}
  3209  	for wr := o.waiting.peek(); !o.waiting.isEmpty(); wr = o.waiting.peek() {
  3210  		if wr == nil {
  3211  			break
  3212  		}
  3213  		// Check if we have max bytes set.
  3214  		if wr.b > 0 {
  3215  			if sz <= wr.b {
  3216  				wr.b -= sz
  3217  				// If we are right now at zero, set batch to 1 to deliver this one but stop after.
  3218  				if wr.b == 0 {
  3219  					wr.n = 1
  3220  				}
  3221  			} else {
  3222  				// Since we can't send that message to the requestor, we need to
  3223  				// notify that we are closing the request.
  3224  				const maxBytesT = "NATS/1.0 409 Message Size Exceeds MaxBytes\r\n%s: %d\r\n%s: %d\r\n\r\n"
  3225  				hdr := fmt.Appendf(nil, maxBytesT, JSPullRequestPendingMsgs, wr.n, JSPullRequestPendingBytes, wr.b)
  3226  				o.outq.send(newJSPubMsg(wr.reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0))
  3227  				// Remove the current one, no longer valid due to max bytes limit.
  3228  				o.waiting.removeCurrent()
  3229  				if o.node != nil {
  3230  					o.removeClusterPendingRequest(wr.reply)
  3231  				}
  3232  				wr.recycle()
  3233  				continue
  3234  			}
  3235  		}
  3236  
  3237  		if wr.expires.IsZero() || time.Now().Before(wr.expires) {
  3238  			rr := wr.acc.sl.Match(wr.interest)
  3239  			if len(rr.psubs)+len(rr.qsubs) > 0 {
  3240  				return o.waiting.pop()
  3241  			} else if time.Since(wr.received) < defaultGatewayRecentSubExpiration && (o.srv.leafNodeEnabled || o.srv.gateway.enabled) {
  3242  				return o.waiting.pop()
  3243  			} else if o.srv.gateway.enabled && o.srv.hasGatewayInterest(wr.acc.Name, wr.interest) {
  3244  				return o.waiting.pop()
  3245  			}
  3246  		} else {
  3247  			// We do check for expiration in `processWaiting`, but it is possible to hit the expiry here, and not there.
  3248  			hdr := fmt.Appendf(nil, "NATS/1.0 408 Request Timeout\r\n%s: %d\r\n%s: %d\r\n\r\n", JSPullRequestPendingMsgs, wr.n, JSPullRequestPendingBytes, wr.b)
  3249  			o.outq.send(newJSPubMsg(wr.reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0))
  3250  			o.waiting.removeCurrent()
  3251  			if o.node != nil {
  3252  				o.removeClusterPendingRequest(wr.reply)
  3253  			}
  3254  			wr.recycle()
  3255  			continue
  3256  
  3257  		}
  3258  		if wr.interest != wr.reply {
  3259  			const intExpT = "NATS/1.0 408 Interest Expired\r\n%s: %d\r\n%s: %d\r\n\r\n"
  3260  			hdr := fmt.Appendf(nil, intExpT, JSPullRequestPendingMsgs, wr.n, JSPullRequestPendingBytes, wr.b)
  3261  			o.outq.send(newJSPubMsg(wr.reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0))
  3262  		}
  3263  		// Remove the current one, no longer valid.
  3264  		o.waiting.removeCurrent()
  3265  		if o.node != nil {
  3266  			o.removeClusterPendingRequest(wr.reply)
  3267  		}
  3268  		wr.recycle()
  3269  	}
  3270  	return nil
  3271  }
  3272  
  3273  // Next message request.
  3274  type nextMsgReq struct {
  3275  	reply string
  3276  	msg   []byte
  3277  }
  3278  
  3279  var nextMsgReqPool sync.Pool
  3280  
  3281  func newNextMsgReq(reply string, msg []byte) *nextMsgReq {
  3282  	var nmr *nextMsgReq
  3283  	m := nextMsgReqPool.Get()
  3284  	if m != nil {
  3285  		nmr = m.(*nextMsgReq)
  3286  	} else {
  3287  		nmr = &nextMsgReq{}
  3288  	}
  3289  	// When getting something from a pool it is critical that all fields are
  3290  	// initialized. Doing this way guarantees that if someone adds a field to
  3291  	// the structure, the compiler will fail the build if this line is not updated.
  3292  	(*nmr) = nextMsgReq{reply, msg}
  3293  	return nmr
  3294  }
  3295  
  3296  func (nmr *nextMsgReq) returnToPool() {
  3297  	if nmr == nil {
  3298  		return
  3299  	}
  3300  	nmr.reply, nmr.msg = _EMPTY_, nil
  3301  	nextMsgReqPool.Put(nmr)
  3302  }
  3303  
  3304  // processNextMsgReq will process a request for the next message available. A nil message payload means deliver
  3305  // a single message. If the payload is a formal request or a number parseable with Atoi(), then we will send a
  3306  // batch of messages without requiring another request to this endpoint, or an ACK.
  3307  func (o *consumer) processNextMsgReq(_ *subscription, c *client, _ *Account, _, reply string, msg []byte) {
  3308  	if reply == _EMPTY_ {
  3309  		return
  3310  	}
  3311  
  3312  	// Short circuit error here.
  3313  	if o.nextMsgReqs == nil {
  3314  		hdr := []byte("NATS/1.0 409 Consumer is push based\r\n\r\n")
  3315  		o.outq.send(newJSPubMsg(reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0))
  3316  		return
  3317  	}
  3318  
  3319  	_, msg = c.msgParts(msg)
  3320  	o.nextMsgReqs.push(newNextMsgReq(reply, copyBytes(msg)))
  3321  }
  3322  
  3323  func (o *consumer) processNextMsgRequest(reply string, msg []byte) {
  3324  	o.mu.Lock()
  3325  	defer o.mu.Unlock()
  3326  
  3327  	mset := o.mset
  3328  	if mset == nil {
  3329  		return
  3330  	}
  3331  
  3332  	sendErr := func(status int, description string) {
  3333  		hdr := fmt.Appendf(nil, "NATS/1.0 %d %s\r\n\r\n", status, description)
  3334  		o.outq.send(newJSPubMsg(reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0))
  3335  	}
  3336  
  3337  	if o.isPushMode() || o.waiting == nil {
  3338  		sendErr(409, "Consumer is push based")
  3339  		return
  3340  	}
  3341  
  3342  	// Check payload here to see if they sent in batch size or a formal request.
  3343  	expires, batchSize, maxBytes, noWait, hb, hbt, err := nextReqFromMsg(msg)
  3344  	if err != nil {
  3345  		sendErr(400, fmt.Sprintf("Bad Request - %v", err))
  3346  		return
  3347  	}
  3348  
  3349  	// Check for request limits
  3350  	if o.cfg.MaxRequestBatch > 0 && batchSize > o.cfg.MaxRequestBatch {
  3351  		sendErr(409, fmt.Sprintf("Exceeded MaxRequestBatch of %d", o.cfg.MaxRequestBatch))
  3352  		return
  3353  	}
  3354  
  3355  	if !expires.IsZero() && o.cfg.MaxRequestExpires > 0 && expires.After(time.Now().Add(o.cfg.MaxRequestExpires)) {
  3356  		sendErr(409, fmt.Sprintf("Exceeded MaxRequestExpires of %v", o.cfg.MaxRequestExpires))
  3357  		return
  3358  	}
  3359  
  3360  	if maxBytes > 0 && o.cfg.MaxRequestMaxBytes > 0 && maxBytes > o.cfg.MaxRequestMaxBytes {
  3361  		sendErr(409, fmt.Sprintf("Exceeded MaxRequestMaxBytes of %v", o.cfg.MaxRequestMaxBytes))
  3362  		return
  3363  	}
  3364  
  3365  	// If we have the max number of requests already pending try to expire.
  3366  	if o.waiting.isFull() {
  3367  		// Try to expire some of the requests.
  3368  		// We do not want to push too hard here so at maximum process once per sec.
  3369  		if time.Since(o.lwqic) > time.Second {
  3370  			o.processWaiting(false)
  3371  		}
  3372  	}
  3373  
  3374  	// If the request is for noWait and we have pending requests already, check if we have room.
  3375  	if noWait {
  3376  		msgsPending := o.numPending() + uint64(len(o.rdq))
  3377  		// If no pending at all, decide what to do with request.
  3378  		// If no expires was set then fail.
  3379  		if msgsPending == 0 && expires.IsZero() {
  3380  			o.waiting.last = time.Now()
  3381  			sendErr(404, "No Messages")
  3382  			return
  3383  		}
  3384  		if msgsPending > 0 {
  3385  			_, _, batchPending, _ := o.processWaiting(false)
  3386  			if msgsPending < uint64(batchPending) {
  3387  				o.waiting.last = time.Now()
  3388  				sendErr(408, "Requests Pending")
  3389  				return
  3390  			}
  3391  		}
  3392  		// If we are here this should be considered a one-shot situation.
  3393  		// We will wait for expires but will return as soon as we have any messages.
  3394  	}
  3395  
  3396  	// If we receive this request though an account export, we need to track that interest subject and account.
  3397  	acc, interest := trackDownAccountAndInterest(o.acc, reply)
  3398  
  3399  	// Create a waiting request.
  3400  	wr := wrPool.Get().(*waitingRequest)
  3401  	wr.acc, wr.interest, wr.reply, wr.n, wr.d, wr.noWait, wr.expires, wr.hb, wr.hbt = acc, interest, reply, batchSize, 0, noWait, expires, hb, hbt
  3402  	wr.b = maxBytes
  3403  	wr.received = time.Now()
  3404  
  3405  	if err := o.waiting.add(wr); err != nil {
  3406  		sendErr(409, "Exceeded MaxWaiting")
  3407  		return
  3408  	}
  3409  	o.signalNewMessages()
  3410  	// If we are clustered update our followers about this request.
  3411  	if o.node != nil {
  3412  		o.addClusterPendingRequest(wr.reply)
  3413  	}
  3414  }
  3415  
  3416  func trackDownAccountAndInterest(acc *Account, interest string) (*Account, string) {
  3417  	for strings.HasPrefix(interest, replyPrefix) {
  3418  		oa := acc
  3419  		oa.mu.RLock()
  3420  		if oa.exports.responses == nil {
  3421  			oa.mu.RUnlock()
  3422  			break
  3423  		}
  3424  		si := oa.exports.responses[interest]
  3425  		if si == nil {
  3426  			oa.mu.RUnlock()
  3427  			break
  3428  		}
  3429  		acc, interest = si.acc, si.to
  3430  		oa.mu.RUnlock()
  3431  	}
  3432  	return acc, interest
  3433  }
  3434  
  3435  // Increase the delivery count for this message.
  3436  // ONLY used on redelivery semantics.
  3437  // Lock should be held.
  3438  func (o *consumer) incDeliveryCount(sseq uint64) uint64 {
  3439  	if o.rdc == nil {
  3440  		o.rdc = make(map[uint64]uint64)
  3441  	}
  3442  	o.rdc[sseq] += 1
  3443  	return o.rdc[sseq] + 1
  3444  }
  3445  
  3446  // Used if we have to adjust on failed delivery or bad lookups.
  3447  // Those failed attempts should not increase deliver count.
  3448  // Lock should be held.
  3449  func (o *consumer) decDeliveryCount(sseq uint64) {
  3450  	if o.rdc == nil {
  3451  		return
  3452  	}
  3453  	if dc, ok := o.rdc[sseq]; ok {
  3454  		if dc == 1 {
  3455  			delete(o.rdc, sseq)
  3456  		} else {
  3457  			o.rdc[sseq] -= 1
  3458  		}
  3459  	}
  3460  }
  3461  
  3462  // send a delivery exceeded advisory.
  3463  func (o *consumer) notifyDeliveryExceeded(sseq, dc uint64) {
  3464  	e := JSConsumerDeliveryExceededAdvisory{
  3465  		TypedEvent: TypedEvent{
  3466  			Type: JSConsumerDeliveryExceededAdvisoryType,
  3467  			ID:   nuid.Next(),
  3468  			Time: time.Now().UTC(),
  3469  		},
  3470  		Stream:     o.stream,
  3471  		Consumer:   o.name,
  3472  		StreamSeq:  sseq,
  3473  		Deliveries: dc,
  3474  		Domain:     o.srv.getOpts().JetStreamDomain,
  3475  	}
  3476  
  3477  	j, err := json.Marshal(e)
  3478  	if err != nil {
  3479  		return
  3480  	}
  3481  
  3482  	o.sendAdvisory(o.deliveryExcEventT, j)
  3483  }
  3484  
  3485  // Check if the candidate subject matches a filter if its present.
  3486  // Lock should be held.
  3487  func (o *consumer) isFilteredMatch(subj string) bool {
  3488  	// No filter is automatic match.
  3489  	if o.subjf == nil {
  3490  		return true
  3491  	}
  3492  	for _, filter := range o.subjf {
  3493  		if !filter.hasWildcard && subj == filter.subject {
  3494  			return true
  3495  		}
  3496  	}
  3497  	// It's quicker to first check for non-wildcard filters, then
  3498  	// iterate again to check for subset match.
  3499  	tsa := [32]string{}
  3500  	tts := tokenizeSubjectIntoSlice(tsa[:0], subj)
  3501  	for _, filter := range o.subjf {
  3502  		if isSubsetMatchTokenized(tts, filter.tokenizedSubject) {
  3503  			return true
  3504  		}
  3505  	}
  3506  	return false
  3507  }
  3508  
  3509  // Check if the candidate filter subject is equal to or a subset match
  3510  // of one of the filter subjects.
  3511  // Lock should be held.
  3512  func (o *consumer) isEqualOrSubsetMatch(subj string) bool {
  3513  	for _, filter := range o.subjf {
  3514  		if !filter.hasWildcard && subj == filter.subject {
  3515  			return true
  3516  		}
  3517  	}
  3518  	tsa := [32]string{}
  3519  	tts := tokenizeSubjectIntoSlice(tsa[:0], subj)
  3520  	for _, filter := range o.subjf {
  3521  		if isSubsetMatchTokenized(filter.tokenizedSubject, tts) {
  3522  			return true
  3523  		}
  3524  	}
  3525  	return false
  3526  }
  3527  
  3528  var (
  3529  	errMaxAckPending = errors.New("max ack pending reached")
  3530  	errBadConsumer   = errors.New("consumer not valid")
  3531  	errNoInterest    = errors.New("consumer requires interest for delivery subject when ephemeral")
  3532  )
  3533  
  3534  // Get next available message from underlying store.
  3535  // Is partition aware and redeliver aware.
  3536  // Lock should be held.
  3537  func (o *consumer) getNextMsg() (*jsPubMsg, uint64, error) {
  3538  	if o.mset == nil || o.mset.store == nil {
  3539  		return nil, 0, errBadConsumer
  3540  	}
  3541  	// Process redelivered messages before looking at possibly "skip list" (deliver last per subject)
  3542  	if o.hasRedeliveries() {
  3543  		var seq, dc uint64
  3544  		for seq = o.getNextToRedeliver(); seq > 0; seq = o.getNextToRedeliver() {
  3545  			dc = o.incDeliveryCount(seq)
  3546  			if o.maxdc > 0 && dc > o.maxdc {
  3547  				// Only send once
  3548  				if dc == o.maxdc+1 {
  3549  					o.notifyDeliveryExceeded(seq, dc-1)
  3550  				}
  3551  				// Make sure to remove from pending.
  3552  				if p, ok := o.pending[seq]; ok && p != nil {
  3553  					delete(o.pending, seq)
  3554  					o.updateDelivered(p.Sequence, seq, dc, p.Timestamp)
  3555  				}
  3556  				continue
  3557  			}
  3558  			if seq > 0 {
  3559  				pmsg := getJSPubMsgFromPool()
  3560  				sm, err := o.mset.store.LoadMsg(seq, &pmsg.StoreMsg)
  3561  				if sm == nil || err != nil {
  3562  					pmsg.returnToPool()
  3563  					pmsg, dc = nil, 0
  3564  					// Adjust back deliver count.
  3565  					o.decDeliveryCount(seq)
  3566  				}
  3567  				return pmsg, dc, err
  3568  			}
  3569  		}
  3570  	}
  3571  
  3572  	// Check if we have max pending.
  3573  	if o.maxp > 0 && len(o.pending) >= o.maxp {
  3574  		// maxp only set when ack policy != AckNone and user set MaxAckPending
  3575  		// Stall if we have hit max pending.
  3576  		return nil, 0, errMaxAckPending
  3577  	}
  3578  
  3579  	if o.hasSkipListPending() {
  3580  		seq := o.lss.seqs[0]
  3581  		if len(o.lss.seqs) == 1 {
  3582  			o.sseq = o.lss.resume
  3583  			o.lss = nil
  3584  			o.updateSkipped(o.sseq)
  3585  		} else {
  3586  			o.lss.seqs = o.lss.seqs[1:]
  3587  		}
  3588  		pmsg := getJSPubMsgFromPool()
  3589  		sm, err := o.mset.store.LoadMsg(seq, &pmsg.StoreMsg)
  3590  		if sm == nil || err != nil {
  3591  			pmsg.returnToPool()
  3592  		}
  3593  		o.sseq++
  3594  		return pmsg, 1, err
  3595  	}
  3596  
  3597  	// Hold onto this since we release the lock.
  3598  	store := o.mset.store
  3599  
  3600  	var sseq uint64
  3601  	var err error
  3602  	var sm *StoreMsg
  3603  	var pmsg = getJSPubMsgFromPool()
  3604  
  3605  	// Grab next message applicable to us.
  3606  	// We will unlock here in case lots of contention, e.g. WQ.
  3607  	o.mu.Unlock()
  3608  	// Check if we are multi-filtered or not.
  3609  	if o.filters != nil {
  3610  		sm, sseq, err = store.LoadNextMsgMulti(o.filters, o.sseq, &pmsg.StoreMsg)
  3611  	} else if o.subjf != nil { // Means single filtered subject since o.filters means > 1.
  3612  		filter, wc := o.subjf[0].subject, o.subjf[0].hasWildcard
  3613  		sm, sseq, err = store.LoadNextMsg(filter, wc, o.sseq, &pmsg.StoreMsg)
  3614  	} else {
  3615  		// No filter here.
  3616  		sm, sseq, err = store.LoadNextMsg(_EMPTY_, false, o.sseq, &pmsg.StoreMsg)
  3617  	}
  3618  	if sm == nil {
  3619  		pmsg.returnToPool()
  3620  		pmsg = nil
  3621  	}
  3622  	o.mu.Lock()
  3623  	// Check if we should move our o.sseq.
  3624  	if sseq >= o.sseq {
  3625  		// If we are moving step by step then sseq == o.sseq.
  3626  		// If we have jumped we should update skipped for other replicas.
  3627  		if sseq != o.sseq && err == ErrStoreEOF {
  3628  			o.updateSkipped(sseq + 1)
  3629  		}
  3630  		o.sseq = sseq + 1
  3631  	}
  3632  	return pmsg, 1, err
  3633  }
  3634  
  3635  // Will check for expiration and lack of interest on waiting requests.
  3636  // Will also do any heartbeats and return the next expiration or HB interval.
  3637  func (o *consumer) processWaiting(eos bool) (int, int, int, time.Time) {
  3638  	var fexp time.Time
  3639  	if o.srv == nil || o.waiting.isEmpty() {
  3640  		return 0, 0, 0, fexp
  3641  	}
  3642  	// Mark our last check time.
  3643  	o.lwqic = time.Now()
  3644  
  3645  	var expired, brp int
  3646  	s, now := o.srv, time.Now()
  3647  
  3648  	wq := o.waiting
  3649  	remove := func(pre, wr *waitingRequest) *waitingRequest {
  3650  		expired++
  3651  		if o.node != nil {
  3652  			o.removeClusterPendingRequest(wr.reply)
  3653  		}
  3654  		next := wr.next
  3655  		wq.remove(pre, wr)
  3656  		wr.recycle()
  3657  		return next
  3658  	}
  3659  
  3660  	var pre *waitingRequest
  3661  	for wr := wq.head; wr != nil; {
  3662  		// Check expiration.
  3663  		if (eos && wr.noWait && wr.d > 0) || (!wr.expires.IsZero() && now.After(wr.expires)) {
  3664  			hdr := fmt.Appendf(nil, "NATS/1.0 408 Request Timeout\r\n%s: %d\r\n%s: %d\r\n\r\n", JSPullRequestPendingMsgs, wr.n, JSPullRequestPendingBytes, wr.b)
  3665  			o.outq.send(newJSPubMsg(wr.reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0))
  3666  			wr = remove(pre, wr)
  3667  			continue
  3668  		}
  3669  		// Now check interest.
  3670  		rr := wr.acc.sl.Match(wr.interest)
  3671  		interest := len(rr.psubs)+len(rr.qsubs) > 0
  3672  		if !interest && (s.leafNodeEnabled || s.gateway.enabled) {
  3673  			// If we are here check on gateways and leaf nodes (as they can mask gateways on the other end).
  3674  			// If we have interest or the request is too young break and do not expire.
  3675  			if time.Since(wr.received) < defaultGatewayRecentSubExpiration {
  3676  				interest = true
  3677  			} else if s.gateway.enabled && s.hasGatewayInterest(wr.acc.Name, wr.interest) {
  3678  				interest = true
  3679  			}
  3680  		}
  3681  		// Check if we have interest.
  3682  		if !interest {
  3683  			// No more interest here so go ahead and remove this one from our list.
  3684  			wr = remove(pre, wr)
  3685  			continue
  3686  		}
  3687  
  3688  		// If interest, update batch pending requests counter and update fexp timer.
  3689  		brp += wr.n
  3690  		if !wr.hbt.IsZero() {
  3691  			if now.After(wr.hbt) {
  3692  				// Fire off a heartbeat here.
  3693  				o.sendIdleHeartbeat(wr.reply)
  3694  				// Update next HB.
  3695  				wr.hbt = now.Add(wr.hb)
  3696  			}
  3697  			if fexp.IsZero() || wr.hbt.Before(fexp) {
  3698  				fexp = wr.hbt
  3699  			}
  3700  		}
  3701  		if !wr.expires.IsZero() && (fexp.IsZero() || wr.expires.Before(fexp)) {
  3702  			fexp = wr.expires
  3703  		}
  3704  		// Update pre and wr here.
  3705  		pre = wr
  3706  		wr = wr.next
  3707  	}
  3708  
  3709  	return expired, wq.len(), brp, fexp
  3710  }
  3711  
  3712  // Will check to make sure those waiting still have registered interest.
  3713  func (o *consumer) checkWaitingForInterest() bool {
  3714  	o.processWaiting(true)
  3715  	return o.waiting.len() > 0
  3716  }
  3717  
  3718  // Lock should be held.
  3719  func (o *consumer) hbTimer() (time.Duration, *time.Timer) {
  3720  	if o.cfg.Heartbeat == 0 {
  3721  		return 0, nil
  3722  	}
  3723  	return o.cfg.Heartbeat, time.NewTimer(o.cfg.Heartbeat)
  3724  }
  3725  
  3726  // Check here for conditions when our ack floor may have drifted below the streams first sequence.
  3727  // In general this is accounted for in normal operations, but if the consumer misses the signal from
  3728  // the stream it will not clear the message and move the ack state.
  3729  // Should only be called from consumer leader.
  3730  func (o *consumer) checkAckFloor() {
  3731  	o.mu.RLock()
  3732  	mset, closed, asflr, numPending := o.mset, o.closed, o.asflr, len(o.pending)
  3733  	o.mu.RUnlock()
  3734  
  3735  	if asflr == 0 || closed || mset == nil {
  3736  		return
  3737  	}
  3738  
  3739  	var ss StreamState
  3740  	mset.store.FastState(&ss)
  3741  
  3742  	// If our floor is equal or greater that is normal and nothing for us to do.
  3743  	if ss.FirstSeq == 0 || asflr >= ss.FirstSeq-1 {
  3744  		return
  3745  	}
  3746  
  3747  	// Check which linear space is less to walk.
  3748  	if ss.FirstSeq-asflr-1 < uint64(numPending) {
  3749  		// Process all messages that no longer exist.
  3750  		for seq := asflr + 1; seq < ss.FirstSeq; seq++ {
  3751  			// Check if this message was pending.
  3752  			o.mu.RLock()
  3753  			p, isPending := o.pending[seq]
  3754  			var rdc uint64 = 1
  3755  			if o.rdc != nil {
  3756  				rdc = o.rdc[seq]
  3757  			}
  3758  			o.mu.RUnlock()
  3759  			// If it was pending for us, get rid of it.
  3760  			if isPending {
  3761  				o.processTerm(seq, p.Sequence, rdc, ackTermLimitsReason)
  3762  			}
  3763  		}
  3764  	} else if numPending > 0 {
  3765  		// here it is shorter to walk pending.
  3766  		// toTerm is seq, dseq, rcd for each entry.
  3767  		toTerm := make([]uint64, 0, numPending*3)
  3768  		o.mu.RLock()
  3769  		for seq, p := range o.pending {
  3770  			if seq < ss.FirstSeq {
  3771  				var dseq uint64 = 1
  3772  				if p != nil {
  3773  					dseq = p.Sequence
  3774  				}
  3775  				var rdc uint64 = 1
  3776  				if o.rdc != nil {
  3777  					rdc = o.rdc[seq]
  3778  				}
  3779  				toTerm = append(toTerm, seq, dseq, rdc)
  3780  			}
  3781  		}
  3782  		o.mu.RUnlock()
  3783  
  3784  		for i := 0; i < len(toTerm); i += 3 {
  3785  			seq, dseq, rdc := toTerm[i], toTerm[i+1], toTerm[i+2]
  3786  			o.processTerm(seq, dseq, rdc, ackTermLimitsReason)
  3787  		}
  3788  	}
  3789  
  3790  	// Do one final check here.
  3791  	o.mu.Lock()
  3792  	defer o.mu.Unlock()
  3793  
  3794  	// If we are closed do not change anything and simply return.
  3795  	if o.closed {
  3796  		return
  3797  	}
  3798  
  3799  	// If we are here, and this should be rare, we still are off with our ack floor.
  3800  	// We will set it explicitly to 1 behind our current lowest in pending, or if
  3801  	// pending is empty, to our current delivered -1.
  3802  	if o.asflr < ss.FirstSeq-1 {
  3803  		var psseq, pdseq uint64
  3804  		for seq, p := range o.pending {
  3805  			if psseq == 0 || seq < psseq {
  3806  				psseq, pdseq = seq, p.Sequence
  3807  			}
  3808  		}
  3809  		// If we still have none, set to current delivered -1.
  3810  		if psseq == 0 {
  3811  			psseq, pdseq = o.sseq-1, o.dseq-1
  3812  			// If still not adjusted.
  3813  			if psseq < ss.FirstSeq-1 {
  3814  				psseq, pdseq = ss.FirstSeq-1, ss.FirstSeq-1
  3815  			}
  3816  		} else {
  3817  			// Since this was set via the pending, we should not include
  3818  			// it directly but set floors to -1.
  3819  			psseq, pdseq = psseq-1, pdseq-1
  3820  		}
  3821  		o.asflr, o.adflr = psseq, pdseq
  3822  	}
  3823  }
  3824  
  3825  func (o *consumer) processInboundAcks(qch chan struct{}) {
  3826  	// Grab the server lock to watch for server quit.
  3827  	o.mu.RLock()
  3828  	s, mset := o.srv, o.mset
  3829  	hasInactiveThresh := o.cfg.InactiveThreshold > 0
  3830  	o.mu.RUnlock()
  3831  
  3832  	if s == nil || mset == nil {
  3833  		return
  3834  	}
  3835  
  3836  	// We will check this on entry and periodically.
  3837  	o.checkAckFloor()
  3838  
  3839  	// How often we will check for ack floor drift.
  3840  	// Spread these out for large numbers on a server restart.
  3841  	delta := time.Duration(rand.Int63n(int64(time.Minute)))
  3842  	ticker := time.NewTicker(time.Minute + delta)
  3843  	defer ticker.Stop()
  3844  
  3845  	for {
  3846  		select {
  3847  		case <-o.ackMsgs.ch:
  3848  			acks := o.ackMsgs.pop()
  3849  			for _, ack := range acks {
  3850  				o.processAck(ack.subject, ack.reply, ack.hdr, ack.msg)
  3851  				ack.returnToPool()
  3852  			}
  3853  			o.ackMsgs.recycle(&acks)
  3854  			// If we have an inactiveThreshold set, mark our activity.
  3855  			if hasInactiveThresh {
  3856  				o.suppressDeletion()
  3857  			}
  3858  		case <-ticker.C:
  3859  			o.checkAckFloor()
  3860  		case <-qch:
  3861  			return
  3862  		case <-s.quitCh:
  3863  			return
  3864  		}
  3865  	}
  3866  }
  3867  
  3868  // Process inbound next message requests.
  3869  func (o *consumer) processInboundNextMsgReqs(qch chan struct{}) {
  3870  	// Grab the server lock to watch for server quit.
  3871  	o.mu.RLock()
  3872  	s := o.srv
  3873  	o.mu.RUnlock()
  3874  
  3875  	for {
  3876  		select {
  3877  		case <-o.nextMsgReqs.ch:
  3878  			reqs := o.nextMsgReqs.pop()
  3879  			for _, req := range reqs {
  3880  				o.processNextMsgRequest(req.reply, req.msg)
  3881  				req.returnToPool()
  3882  			}
  3883  			o.nextMsgReqs.recycle(&reqs)
  3884  		case <-qch:
  3885  			return
  3886  		case <-s.quitCh:
  3887  			return
  3888  		}
  3889  	}
  3890  }
  3891  
  3892  // Suppress auto cleanup on ack activity of any kind.
  3893  func (o *consumer) suppressDeletion() {
  3894  	o.mu.Lock()
  3895  	defer o.mu.Unlock()
  3896  
  3897  	if o.closed {
  3898  		return
  3899  	}
  3900  
  3901  	if o.isPushMode() && o.dtmr != nil {
  3902  		// if dtmr is not nil we have started the countdown, simply reset to threshold.
  3903  		o.dtmr.Reset(o.dthresh)
  3904  	} else if o.isPullMode() && o.waiting != nil {
  3905  		// Pull mode always has timer running, just update last on waiting queue.
  3906  		o.waiting.last = time.Now()
  3907  	}
  3908  }
  3909  
  3910  // loopAndGatherMsgs waits for messages for the consumer. qch is the quit channel,
  3911  // upch is the unpause channel which fires when the PauseUntil deadline is reached.
  3912  func (o *consumer) loopAndGatherMsgs(qch chan struct{}) {
  3913  	// On startup check to see if we are in a reply situation where replay policy is not instant.
  3914  	var (
  3915  		lts  int64 // last time stamp seen, used for replay.
  3916  		lseq uint64
  3917  	)
  3918  
  3919  	o.mu.RLock()
  3920  	mset := o.mset
  3921  	getLSeq := o.replay
  3922  	o.mu.RUnlock()
  3923  	// consumer is closed when mset is set to nil.
  3924  	if mset == nil {
  3925  		return
  3926  	}
  3927  	if getLSeq {
  3928  		lseq = mset.state().LastSeq
  3929  	}
  3930  
  3931  	o.mu.Lock()
  3932  	s := o.srv
  3933  	// need to check again if consumer is closed
  3934  	if o.mset == nil {
  3935  		o.mu.Unlock()
  3936  		return
  3937  	}
  3938  	// For idle heartbeat support.
  3939  	var hbc <-chan time.Time
  3940  	hbd, hb := o.hbTimer()
  3941  	if hb != nil {
  3942  		hbc = hb.C
  3943  	}
  3944  	// Interest changes.
  3945  	inch := o.inch
  3946  	o.mu.Unlock()
  3947  
  3948  	// Grab the stream's retention policy
  3949  	mset.mu.RLock()
  3950  	rp := mset.cfg.Retention
  3951  	mset.mu.RUnlock()
  3952  
  3953  	var err error
  3954  
  3955  	// Deliver all the msgs we have now, once done or on a condition, we wait for new ones.
  3956  	for {
  3957  		var (
  3958  			pmsg     *jsPubMsg
  3959  			dc       uint64
  3960  			dsubj    string
  3961  			ackReply string
  3962  			delay    time.Duration
  3963  			sz       int
  3964  			wrn, wrb int
  3965  		)
  3966  
  3967  		o.mu.Lock()
  3968  
  3969  		// consumer is closed when mset is set to nil.
  3970  		if o.mset == nil {
  3971  			o.mu.Unlock()
  3972  			return
  3973  		}
  3974  
  3975  		// Clear last error.
  3976  		err = nil
  3977  
  3978  		// If the consumer is paused then stop sending.
  3979  		if o.cfg.PauseUntil != nil && !o.cfg.PauseUntil.IsZero() && time.Now().Before(*o.cfg.PauseUntil) {
  3980  			// If the consumer is paused and we haven't reached the deadline yet then
  3981  			// go back to waiting.
  3982  			goto waitForMsgs
  3983  		}
  3984  
  3985  		// If we are in push mode and not active or under flowcontrol let's stop sending.
  3986  		if o.isPushMode() {
  3987  			if !o.active || (o.maxpb > 0 && o.pbytes > o.maxpb) {
  3988  				goto waitForMsgs
  3989  			}
  3990  		} else if o.waiting.isEmpty() {
  3991  			// If we are in pull mode and no one is waiting already break and wait.
  3992  			goto waitForMsgs
  3993  		}
  3994  
  3995  		// Grab our next msg.
  3996  		pmsg, dc, err = o.getNextMsg()
  3997  
  3998  		// We can release the lock now under getNextMsg so need to check this condition again here.
  3999  		if o.closed || o.mset == nil {
  4000  			o.mu.Unlock()
  4001  			return
  4002  		}
  4003  
  4004  		// On error either wait or return.
  4005  		if err != nil || pmsg == nil {
  4006  			// On EOF we can optionally fast sync num pending state.
  4007  			if err == ErrStoreEOF {
  4008  				o.checkNumPendingOnEOF()
  4009  			}
  4010  			if err == ErrStoreMsgNotFound || err == errDeletedMsg || err == ErrStoreEOF || err == errMaxAckPending {
  4011  				goto waitForMsgs
  4012  			} else if err == errPartialCache {
  4013  				s.Warnf("Unexpected partial cache error looking up message for consumer '%s > %s > %s'",
  4014  					o.mset.acc, o.mset.cfg.Name, o.cfg.Name)
  4015  				goto waitForMsgs
  4016  
  4017  			} else {
  4018  				s.Errorf("Received an error looking up message for consumer '%s > %s > %s': %v",
  4019  					o.mset.acc, o.mset.cfg.Name, o.cfg.Name, err)
  4020  				goto waitForMsgs
  4021  			}
  4022  		}
  4023  
  4024  		// Update our cached num pending here first.
  4025  		if dc == 1 {
  4026  			o.npc--
  4027  		}
  4028  		// Pre-calculate ackReply
  4029  		ackReply = o.ackReply(pmsg.seq, o.dseq, dc, pmsg.ts, o.numPending())
  4030  
  4031  		// If headers only do not send msg payload.
  4032  		// Add in msg size itself as header.
  4033  		if o.cfg.HeadersOnly {
  4034  			convertToHeadersOnly(pmsg)
  4035  		}
  4036  		// Calculate payload size. This can be calculated on client side.
  4037  		// We do not include transport subject here since not generally known on client.
  4038  		sz = len(pmsg.subj) + len(ackReply) + len(pmsg.hdr) + len(pmsg.msg)
  4039  
  4040  		if o.isPushMode() {
  4041  			dsubj = o.dsubj
  4042  		} else if wr := o.nextWaiting(sz); wr != nil {
  4043  			wrn, wrb = wr.n, wr.b
  4044  			dsubj = wr.reply
  4045  			if done := wr.recycleIfDone(); done && o.node != nil {
  4046  				o.removeClusterPendingRequest(dsubj)
  4047  			} else if !done && wr.hb > 0 {
  4048  				wr.hbt = time.Now().Add(wr.hb)
  4049  			}
  4050  		} else {
  4051  			// We will redo this one as long as this is not a redelivery.
  4052  			if dc == 1 {
  4053  				o.sseq--
  4054  				o.npc++
  4055  			}
  4056  			pmsg.returnToPool()
  4057  			goto waitForMsgs
  4058  		}
  4059  
  4060  		// If we are in a replay scenario and have not caught up check if we need to delay here.
  4061  		if o.replay && lts > 0 {
  4062  			if delay = time.Duration(pmsg.ts - lts); delay > time.Millisecond {
  4063  				o.mu.Unlock()
  4064  				select {
  4065  				case <-qch:
  4066  					pmsg.returnToPool()
  4067  					return
  4068  				case <-time.After(delay):
  4069  				}
  4070  				o.mu.Lock()
  4071  			}
  4072  		}
  4073  
  4074  		// Track this regardless.
  4075  		lts = pmsg.ts
  4076  
  4077  		// If we have a rate limit set make sure we check that here.
  4078  		if o.rlimit != nil {
  4079  			now := time.Now()
  4080  			r := o.rlimit.ReserveN(now, sz)
  4081  			delay := r.DelayFrom(now)
  4082  			if delay > 0 {
  4083  				o.mu.Unlock()
  4084  				select {
  4085  				case <-qch:
  4086  					pmsg.returnToPool()
  4087  					return
  4088  				case <-time.After(delay):
  4089  				}
  4090  				o.mu.Lock()
  4091  			}
  4092  		}
  4093  
  4094  		// Do actual delivery.
  4095  		o.deliverMsg(dsubj, ackReply, pmsg, dc, rp)
  4096  
  4097  		// If given request fulfilled batch size, but there are still pending bytes, send information about it.
  4098  		if wrn <= 0 && wrb > 0 {
  4099  			o.outq.send(newJSPubMsg(dsubj, _EMPTY_, _EMPTY_, fmt.Appendf(nil, JsPullRequestRemainingBytesT, JSPullRequestPendingMsgs, wrn, JSPullRequestPendingBytes, wrb), nil, nil, 0))
  4100  		}
  4101  		// Reset our idle heartbeat timer if set.
  4102  		if hb != nil {
  4103  			hb.Reset(hbd)
  4104  		}
  4105  
  4106  		o.mu.Unlock()
  4107  		continue
  4108  
  4109  	waitForMsgs:
  4110  		// If we were in a replay state check to see if we are caught up. If so clear.
  4111  		if o.replay && o.sseq > lseq {
  4112  			o.replay = false
  4113  		}
  4114  
  4115  		// Make sure to process any expired requests that are pending.
  4116  		var wrExp <-chan time.Time
  4117  		if o.isPullMode() {
  4118  			// Dont expire oneshots if we are here because of max ack pending limit.
  4119  			_, _, _, fexp := o.processWaiting(err != errMaxAckPending)
  4120  			if !fexp.IsZero() {
  4121  				expires := time.Until(fexp)
  4122  				if expires <= 0 {
  4123  					expires = time.Millisecond
  4124  				}
  4125  				wrExp = time.NewTimer(expires).C
  4126  			}
  4127  		}
  4128  
  4129  		// We will wait here for new messages to arrive.
  4130  		mch, odsubj := o.mch, o.cfg.DeliverSubject
  4131  		o.mu.Unlock()
  4132  
  4133  		select {
  4134  		case <-mch:
  4135  			// Messages are waiting.
  4136  		case interest := <-inch:
  4137  			// inch can be nil on pull-based, but then this will
  4138  			// just block and not fire.
  4139  			o.updateDeliveryInterest(interest)
  4140  		case <-qch:
  4141  			return
  4142  		case <-wrExp:
  4143  			o.mu.Lock()
  4144  			o.processWaiting(true)
  4145  			o.mu.Unlock()
  4146  		case <-hbc:
  4147  			if o.isActive() {
  4148  				o.mu.RLock()
  4149  				o.sendIdleHeartbeat(odsubj)
  4150  				o.mu.RUnlock()
  4151  			}
  4152  			// Reset our idle heartbeat timer.
  4153  			hb.Reset(hbd)
  4154  		}
  4155  	}
  4156  }
  4157  
  4158  // Lock should be held.
  4159  func (o *consumer) sendIdleHeartbeat(subj string) {
  4160  	const t = "NATS/1.0 100 Idle Heartbeat\r\n%s: %d\r\n%s: %d\r\n\r\n"
  4161  	sseq, dseq := o.sseq-1, o.dseq-1
  4162  	hdr := fmt.Appendf(nil, t, JSLastConsumerSeq, dseq, JSLastStreamSeq, sseq)
  4163  	if fcp := o.fcid; fcp != _EMPTY_ {
  4164  		// Add in that we are stalled on flow control here.
  4165  		addOn := fmt.Appendf(nil, "%s: %s\r\n\r\n", JSConsumerStalled, fcp)
  4166  		hdr = append(hdr[:len(hdr)-LEN_CR_LF], []byte(addOn)...)
  4167  	}
  4168  	o.outq.send(newJSPubMsg(subj, _EMPTY_, _EMPTY_, hdr, nil, nil, 0))
  4169  }
  4170  
  4171  func (o *consumer) ackReply(sseq, dseq, dc uint64, ts int64, pending uint64) string {
  4172  	return fmt.Sprintf(o.ackReplyT, dc, sseq, dseq, ts, pending)
  4173  }
  4174  
  4175  // Used mostly for testing. Sets max pending bytes for flow control setups.
  4176  func (o *consumer) setMaxPendingBytes(limit int) {
  4177  	o.pblimit = limit
  4178  	o.maxpb = limit / 16
  4179  	if o.maxpb == 0 {
  4180  		o.maxpb = 1
  4181  	}
  4182  }
  4183  
  4184  // Does some sanity checks to see if we should re-calculate.
  4185  // Since there is a race when decrementing when there is contention at the beginning of the stream.
  4186  // The race is a getNextMsg skips a deleted msg, and then the decStreamPending call fires.
  4187  // This does some quick sanity checks to see if we should re-calculate num pending.
  4188  // Lock should be held.
  4189  func (o *consumer) checkNumPending() uint64 {
  4190  	if o.mset != nil {
  4191  		var state StreamState
  4192  		o.mset.store.FastState(&state)
  4193  		if o.sseq > state.LastSeq && o.npc != 0 || o.npc > int64(state.Msgs) {
  4194  			// Re-calculate.
  4195  			o.streamNumPending()
  4196  		}
  4197  	}
  4198  	return o.numPending()
  4199  }
  4200  
  4201  // Lock should be held.
  4202  func (o *consumer) numPending() uint64 {
  4203  	if o.npc < 0 {
  4204  		return 0
  4205  	}
  4206  	return uint64(o.npc)
  4207  }
  4208  
  4209  // This will do a quick sanity check on num pending when we encounter
  4210  // and EOF in the loop and gather.
  4211  // Lock should be held.
  4212  func (o *consumer) checkNumPendingOnEOF() {
  4213  	if o.mset == nil {
  4214  		return
  4215  	}
  4216  	var state StreamState
  4217  	o.mset.store.FastState(&state)
  4218  	if o.sseq > state.LastSeq && o.npc != 0 {
  4219  		// We know here we can reset our running state for num pending.
  4220  		o.npc, o.npf = 0, state.LastSeq
  4221  	}
  4222  }
  4223  
  4224  // Call into streamNumPending after acquiring the consumer lock.
  4225  func (o *consumer) streamNumPendingLocked() uint64 {
  4226  	o.mu.Lock()
  4227  	defer o.mu.Unlock()
  4228  	return o.streamNumPending()
  4229  }
  4230  
  4231  // Will force a set from the stream store of num pending.
  4232  // Depends on delivery policy, for last per subject we calculate differently.
  4233  // Lock should be held.
  4234  func (o *consumer) streamNumPending() uint64 {
  4235  	if o.mset == nil || o.mset.store == nil {
  4236  		o.npc, o.npf = 0, 0
  4237  		return 0
  4238  	}
  4239  	npc, npf := o.calculateNumPending()
  4240  	o.npc, o.npf = int64(npc), npf
  4241  	return o.numPending()
  4242  }
  4243  
  4244  // Will calculate num pending but only requires a read lock.
  4245  // Depends on delivery policy, for last per subject we calculate differently.
  4246  // At least RLock should be held.
  4247  func (o *consumer) calculateNumPending() (npc, npf uint64) {
  4248  	if o.mset == nil || o.mset.store == nil {
  4249  		return 0, 0
  4250  	}
  4251  
  4252  	isLastPerSubject := o.cfg.DeliverPolicy == DeliverLastPerSubject
  4253  
  4254  	// Deliver Last Per Subject calculates num pending differently.
  4255  	if isLastPerSubject {
  4256  		// Consumer without filters.
  4257  		if o.subjf == nil {
  4258  			return o.mset.store.NumPending(o.sseq, _EMPTY_, isLastPerSubject)
  4259  		}
  4260  		// Consumer with filters.
  4261  		for _, filter := range o.subjf {
  4262  			lnpc, lnpf := o.mset.store.NumPending(o.sseq, filter.subject, isLastPerSubject)
  4263  			npc += lnpc
  4264  			if lnpf > npf {
  4265  				npf = lnpf // Always last
  4266  			}
  4267  		}
  4268  		return npc, npf
  4269  	}
  4270  	// Every other Delivery Policy is handled here.
  4271  	// Consumer without filters.
  4272  	if o.subjf == nil {
  4273  		return o.mset.store.NumPending(o.sseq, _EMPTY_, false)
  4274  	}
  4275  	// Consumer with filters.
  4276  	for _, filter := range o.subjf {
  4277  		lnpc, lnpf := o.mset.store.NumPending(o.sseq, filter.subject, false)
  4278  		npc += lnpc
  4279  		if lnpf > npf {
  4280  			npf = lnpf // Always last
  4281  		}
  4282  	}
  4283  	return npc, npf
  4284  }
  4285  
  4286  func convertToHeadersOnly(pmsg *jsPubMsg) {
  4287  	// If headers only do not send msg payload.
  4288  	// Add in msg size itself as header.
  4289  	hdr, msg := pmsg.hdr, pmsg.msg
  4290  	var bb bytes.Buffer
  4291  	if len(hdr) == 0 {
  4292  		bb.WriteString(hdrLine)
  4293  	} else {
  4294  		bb.Write(hdr)
  4295  		bb.Truncate(len(hdr) - LEN_CR_LF)
  4296  	}
  4297  	bb.WriteString(JSMsgSize)
  4298  	bb.WriteString(": ")
  4299  	bb.WriteString(strconv.FormatInt(int64(len(msg)), 10))
  4300  	bb.WriteString(CR_LF)
  4301  	bb.WriteString(CR_LF)
  4302  	// Replace underlying buf which we can use directly when we send.
  4303  	// TODO(dlc) - Probably just use directly when forming bytes.Buffer?
  4304  	pmsg.buf = pmsg.buf[:0]
  4305  	pmsg.buf = append(pmsg.buf, bb.Bytes()...)
  4306  	// Replace with new header.
  4307  	pmsg.hdr = pmsg.buf
  4308  	// Cancel msg payload
  4309  	pmsg.msg = nil
  4310  }
  4311  
  4312  // Deliver a msg to the consumer.
  4313  // Lock should be held and o.mset validated to be non-nil.
  4314  func (o *consumer) deliverMsg(dsubj, ackReply string, pmsg *jsPubMsg, dc uint64, rp RetentionPolicy) {
  4315  	if o.mset == nil {
  4316  		pmsg.returnToPool()
  4317  		return
  4318  	}
  4319  
  4320  	dseq := o.dseq
  4321  	o.dseq++
  4322  
  4323  	pmsg.dsubj, pmsg.reply, pmsg.o = dsubj, ackReply, o
  4324  	psz := pmsg.size()
  4325  
  4326  	if o.maxpb > 0 {
  4327  		o.pbytes += psz
  4328  	}
  4329  
  4330  	mset := o.mset
  4331  	ap := o.cfg.AckPolicy
  4332  
  4333  	// Cant touch pmsg after this sending so capture what we need.
  4334  	seq, ts := pmsg.seq, pmsg.ts
  4335  
  4336  	// Update delivered first.
  4337  	o.updateDelivered(dseq, seq, dc, ts)
  4338  
  4339  	// Send message.
  4340  	o.outq.send(pmsg)
  4341  
  4342  	if ap == AckExplicit || ap == AckAll {
  4343  		o.trackPending(seq, dseq)
  4344  	} else if ap == AckNone {
  4345  		o.adflr = dseq
  4346  		o.asflr = seq
  4347  	}
  4348  
  4349  	// Flow control.
  4350  	if o.maxpb > 0 && o.needFlowControl(psz) {
  4351  		o.sendFlowControl()
  4352  	}
  4353  
  4354  	// If pull mode and we have inactivity threshold, signaled by dthresh, update last activity.
  4355  	if o.isPullMode() && o.dthresh > 0 {
  4356  		o.waiting.last = time.Now()
  4357  	}
  4358  
  4359  	// If we are ack none and mset is interest only we should make sure stream removes interest.
  4360  	if ap == AckNone && rp != LimitsPolicy {
  4361  		if o.node == nil || o.cfg.Direct {
  4362  			mset.ackq.push(seq)
  4363  		} else {
  4364  			o.updateAcks(dseq, seq)
  4365  		}
  4366  	}
  4367  }
  4368  
  4369  func (o *consumer) needFlowControl(sz int) bool {
  4370  	if o.maxpb == 0 {
  4371  		return false
  4372  	}
  4373  	// Decide whether to send a flow control message which we will need the user to respond.
  4374  	// We send when we are over 50% of our current window limit.
  4375  	if o.fcid == _EMPTY_ && o.pbytes > o.maxpb/2 {
  4376  		return true
  4377  	}
  4378  	// If we have an existing outstanding FC, check to see if we need to expand the o.fcsz
  4379  	if o.fcid != _EMPTY_ && (o.pbytes-o.fcsz) >= o.maxpb {
  4380  		o.fcsz += sz
  4381  	}
  4382  	return false
  4383  }
  4384  
  4385  func (o *consumer) processFlowControl(_ *subscription, c *client, _ *Account, subj, _ string, _ []byte) {
  4386  	o.mu.Lock()
  4387  	defer o.mu.Unlock()
  4388  
  4389  	// Ignore if not the latest we have sent out.
  4390  	if subj != o.fcid {
  4391  		return
  4392  	}
  4393  
  4394  	// For slow starts and ramping up.
  4395  	if o.maxpb < o.pblimit {
  4396  		o.maxpb *= 2
  4397  		if o.maxpb > o.pblimit {
  4398  			o.maxpb = o.pblimit
  4399  		}
  4400  	}
  4401  
  4402  	// Update accounting.
  4403  	o.pbytes -= o.fcsz
  4404  	if o.pbytes < 0 {
  4405  		o.pbytes = 0
  4406  	}
  4407  	o.fcid, o.fcsz = _EMPTY_, 0
  4408  
  4409  	o.signalNewMessages()
  4410  }
  4411  
  4412  // Lock should be held.
  4413  func (o *consumer) fcReply() string {
  4414  	var sb strings.Builder
  4415  	sb.WriteString(jsFlowControlPre)
  4416  	sb.WriteString(o.stream)
  4417  	sb.WriteByte(btsep)
  4418  	sb.WriteString(o.name)
  4419  	sb.WriteByte(btsep)
  4420  	var b [4]byte
  4421  	rn := rand.Int63()
  4422  	for i, l := 0, rn; i < len(b); i++ {
  4423  		b[i] = digits[l%base]
  4424  		l /= base
  4425  	}
  4426  	sb.Write(b[:])
  4427  	return sb.String()
  4428  }
  4429  
  4430  // sendFlowControl will send a flow control packet to the consumer.
  4431  // Lock should be held.
  4432  func (o *consumer) sendFlowControl() {
  4433  	if !o.isPushMode() {
  4434  		return
  4435  	}
  4436  	subj, rply := o.cfg.DeliverSubject, o.fcReply()
  4437  	o.fcsz, o.fcid = o.pbytes, rply
  4438  	hdr := []byte("NATS/1.0 100 FlowControl Request\r\n\r\n")
  4439  	o.outq.send(newJSPubMsg(subj, _EMPTY_, rply, hdr, nil, nil, 0))
  4440  }
  4441  
  4442  // Tracks our outstanding pending acks. Only applicable to AckExplicit mode.
  4443  // Lock should be held.
  4444  func (o *consumer) trackPending(sseq, dseq uint64) {
  4445  	if o.pending == nil {
  4446  		o.pending = make(map[uint64]*Pending)
  4447  	}
  4448  	if o.ptmr == nil {
  4449  		o.ptmr = time.AfterFunc(o.ackWait(0), o.checkPending)
  4450  	}
  4451  	if p, ok := o.pending[sseq]; ok {
  4452  		// Update timestamp but keep original consumer delivery sequence.
  4453  		// So do not update p.Sequence.
  4454  		p.Timestamp = time.Now().UnixNano()
  4455  	} else {
  4456  		o.pending[sseq] = &Pending{dseq, time.Now().UnixNano()}
  4457  	}
  4458  }
  4459  
  4460  // Credit back a failed delivery.
  4461  // lock should be held.
  4462  func (o *consumer) creditWaitingRequest(reply string) {
  4463  	wq := o.waiting
  4464  	for wr := wq.head; wr != nil; wr = wr.next {
  4465  		if wr.reply == reply {
  4466  			wr.n++
  4467  			wr.d--
  4468  			return
  4469  		}
  4470  	}
  4471  }
  4472  
  4473  // didNotDeliver is called when a delivery for a consumer message failed.
  4474  // Depending on our state, we will process the failure.
  4475  func (o *consumer) didNotDeliver(seq uint64, subj string) {
  4476  	o.mu.Lock()
  4477  	mset := o.mset
  4478  	if mset == nil {
  4479  		o.mu.Unlock()
  4480  		return
  4481  	}
  4482  	// Adjust back deliver count.
  4483  	o.decDeliveryCount(seq)
  4484  
  4485  	var checkDeliveryInterest bool
  4486  	if o.isPushMode() {
  4487  		o.active = false
  4488  		checkDeliveryInterest = true
  4489  	} else if o.pending != nil {
  4490  		o.creditWaitingRequest(subj)
  4491  		// pull mode and we have pending.
  4492  		if _, ok := o.pending[seq]; ok {
  4493  			// We found this messsage on pending, we need
  4494  			// to queue it up for immediate redelivery since
  4495  			// we know it was not delivered
  4496  			if !o.onRedeliverQueue(seq) {
  4497  				o.addToRedeliverQueue(seq)
  4498  				o.signalNewMessages()
  4499  			}
  4500  		}
  4501  	}
  4502  	o.mu.Unlock()
  4503  
  4504  	// If we do not have interest update that here.
  4505  	if checkDeliveryInterest && o.hasNoLocalInterest() {
  4506  		o.updateDeliveryInterest(false)
  4507  	}
  4508  }
  4509  
  4510  // Lock should be held.
  4511  func (o *consumer) addToRedeliverQueue(seqs ...uint64) {
  4512  	o.rdq = append(o.rdq, seqs...)
  4513  	for _, seq := range seqs {
  4514  		o.rdqi.Insert(seq)
  4515  	}
  4516  }
  4517  
  4518  // Lock should be held.
  4519  func (o *consumer) hasRedeliveries() bool {
  4520  	return len(o.rdq) > 0
  4521  }
  4522  
  4523  func (o *consumer) getNextToRedeliver() uint64 {
  4524  	if len(o.rdq) == 0 {
  4525  		return 0
  4526  	}
  4527  	seq := o.rdq[0]
  4528  	if len(o.rdq) == 1 {
  4529  		o.rdq = nil
  4530  		o.rdqi.Empty()
  4531  	} else {
  4532  		o.rdq = append(o.rdq[:0], o.rdq[1:]...)
  4533  		o.rdqi.Delete(seq)
  4534  	}
  4535  	return seq
  4536  }
  4537  
  4538  // This checks if we already have this sequence queued for redelivery.
  4539  // FIXME(dlc) - This is O(n) but should be fast with small redeliver size.
  4540  // Lock should be held.
  4541  func (o *consumer) onRedeliverQueue(seq uint64) bool {
  4542  	return o.rdqi.Exists(seq)
  4543  }
  4544  
  4545  // Remove a sequence from the redelivery queue.
  4546  // Lock should be held.
  4547  func (o *consumer) removeFromRedeliverQueue(seq uint64) bool {
  4548  	if !o.onRedeliverQueue(seq) {
  4549  		return false
  4550  	}
  4551  	for i, rseq := range o.rdq {
  4552  		if rseq == seq {
  4553  			if len(o.rdq) == 1 {
  4554  				o.rdq = nil
  4555  				o.rdqi.Empty()
  4556  			} else {
  4557  				o.rdq = append(o.rdq[:i], o.rdq[i+1:]...)
  4558  				o.rdqi.Delete(seq)
  4559  			}
  4560  			return true
  4561  		}
  4562  	}
  4563  	return false
  4564  }
  4565  
  4566  // Checks the pending messages.
  4567  func (o *consumer) checkPending() {
  4568  	o.mu.RLock()
  4569  	mset := o.mset
  4570  	// On stop, mset and timer will be nil.
  4571  	if o.closed || mset == nil || o.ptmr == nil {
  4572  		stopAndClearTimer(&o.ptmr)
  4573  		o.mu.RUnlock()
  4574  		return
  4575  	}
  4576  	o.mu.RUnlock()
  4577  
  4578  	var shouldUpdateState bool
  4579  	var state StreamState
  4580  	mset.store.FastState(&state)
  4581  	fseq := state.FirstSeq
  4582  
  4583  	o.mu.Lock()
  4584  	defer o.mu.Unlock()
  4585  
  4586  	now := time.Now().UnixNano()
  4587  	ttl := int64(o.cfg.AckWait)
  4588  	next := int64(o.ackWait(0))
  4589  	// However, if there is backoff, initializes with the largest backoff.
  4590  	// It will be adjusted as needed.
  4591  	if l := len(o.cfg.BackOff); l > 0 {
  4592  		next = int64(o.cfg.BackOff[l-1])
  4593  	}
  4594  
  4595  	// Since we can update timestamps, we have to review all pending.
  4596  	// We will now bail if we see an ack pending inbound to us via o.awl.
  4597  	var expired []uint64
  4598  	check := len(o.pending) > 1024
  4599  	for seq, p := range o.pending {
  4600  		if check && atomic.LoadInt64(&o.awl) > 0 {
  4601  			if o.ptmr == nil {
  4602  				o.ptmr = time.AfterFunc(100*time.Millisecond, o.checkPending)
  4603  			} else {
  4604  				o.ptmr.Reset(100 * time.Millisecond)
  4605  			}
  4606  			return
  4607  		}
  4608  		// Check if these are no longer valid.
  4609  		if seq < fseq || seq <= o.asflr {
  4610  			delete(o.pending, seq)
  4611  			delete(o.rdc, seq)
  4612  			o.removeFromRedeliverQueue(seq)
  4613  			shouldUpdateState = true
  4614  			// Check if we need to move ack floors.
  4615  			if seq > o.asflr {
  4616  				o.asflr = seq
  4617  			}
  4618  			if p.Sequence > o.adflr {
  4619  				o.adflr = p.Sequence
  4620  			}
  4621  			continue
  4622  		}
  4623  		elapsed, deadline := now-p.Timestamp, ttl
  4624  		if len(o.cfg.BackOff) > 0 {
  4625  			// This is ok even if o.rdc is nil, we would get dc == 0, which is what we want.
  4626  			dc := int(o.rdc[seq])
  4627  			// This will be the index for the next backoff, will set to last element if needed.
  4628  			nbi := dc + 1
  4629  			if dc+1 >= len(o.cfg.BackOff) {
  4630  				dc = len(o.cfg.BackOff) - 1
  4631  				nbi = dc
  4632  			}
  4633  			deadline = int64(o.cfg.BackOff[dc])
  4634  			// Set `next` to the next backoff (if smaller than current `next` value).
  4635  			if nextBackoff := int64(o.cfg.BackOff[nbi]); nextBackoff < next {
  4636  				next = nextBackoff
  4637  			}
  4638  		}
  4639  		if elapsed >= deadline {
  4640  			if !o.onRedeliverQueue(seq) {
  4641  				expired = append(expired, seq)
  4642  			}
  4643  		} else if deadline-elapsed < next {
  4644  			// Update when we should fire next.
  4645  			next = deadline - elapsed
  4646  		}
  4647  	}
  4648  
  4649  	if len(expired) > 0 {
  4650  		// We need to sort.
  4651  		sort.Slice(expired, func(i, j int) bool { return expired[i] < expired[j] })
  4652  		o.addToRedeliverQueue(expired...)
  4653  		// Now we should update the timestamp here since we are redelivering.
  4654  		// We will use an incrementing time to preserve order for any other redelivery.
  4655  		off := now - o.pending[expired[0]].Timestamp
  4656  		for _, seq := range expired {
  4657  			if p, ok := o.pending[seq]; ok {
  4658  				p.Timestamp += off
  4659  			}
  4660  		}
  4661  		o.signalNewMessages()
  4662  	}
  4663  
  4664  	if len(o.pending) > 0 {
  4665  		delay := time.Duration(next)
  4666  		if o.ptmr == nil {
  4667  			o.ptmr = time.AfterFunc(delay, o.checkPending)
  4668  		} else {
  4669  			o.ptmr.Reset(o.ackWait(delay))
  4670  		}
  4671  	} else {
  4672  		// Make sure to stop timer and clear out any re delivery queues
  4673  		stopAndClearTimer(&o.ptmr)
  4674  		o.rdq = nil
  4675  		o.rdqi.Empty()
  4676  		o.pending = nil
  4677  		// Mimic behavior in processAckMsg when pending is empty.
  4678  		o.adflr, o.asflr = o.dseq-1, o.sseq-1
  4679  	}
  4680  
  4681  	// Update our state if needed.
  4682  	if shouldUpdateState {
  4683  		if err := o.writeStoreStateUnlocked(); err != nil && o.srv != nil && o.mset != nil && !o.closed {
  4684  			s, acc, mset, name := o.srv, o.acc, o.mset, o.name
  4685  			s.Warnf("Consumer '%s > %s > %s' error on write store state from check pending: %v", acc, mset.cfg.Name, name, err)
  4686  		}
  4687  	}
  4688  }
  4689  
  4690  // SeqFromReply will extract a sequence number from a reply subject.
  4691  func (o *consumer) seqFromReply(reply string) uint64 {
  4692  	_, dseq, _ := ackReplyInfo(reply)
  4693  	return dseq
  4694  }
  4695  
  4696  // StreamSeqFromReply will extract the stream sequence from the reply subject.
  4697  func (o *consumer) streamSeqFromReply(reply string) uint64 {
  4698  	sseq, _, _ := ackReplyInfo(reply)
  4699  	return sseq
  4700  }
  4701  
  4702  // Quick parser for positive numbers in ack reply encoding.
  4703  func parseAckReplyNum(d string) (n int64) {
  4704  	if len(d) == 0 {
  4705  		return -1
  4706  	}
  4707  	for _, dec := range d {
  4708  		if dec < asciiZero || dec > asciiNine {
  4709  			return -1
  4710  		}
  4711  		n = n*10 + (int64(dec) - asciiZero)
  4712  	}
  4713  	return n
  4714  }
  4715  
  4716  const expectedNumReplyTokens = 9
  4717  
  4718  // Grab encoded information in the reply subject for a delivered message.
  4719  func replyInfo(subject string) (sseq, dseq, dc uint64, ts int64, pending uint64) {
  4720  	tsa := [expectedNumReplyTokens]string{}
  4721  	start, tokens := 0, tsa[:0]
  4722  	for i := 0; i < len(subject); i++ {
  4723  		if subject[i] == btsep {
  4724  			tokens = append(tokens, subject[start:i])
  4725  			start = i + 1
  4726  		}
  4727  	}
  4728  	tokens = append(tokens, subject[start:])
  4729  	if len(tokens) != expectedNumReplyTokens || tokens[0] != "$JS" || tokens[1] != "ACK" {
  4730  		return 0, 0, 0, 0, 0
  4731  	}
  4732  	// TODO(dlc) - Should we error if we do not match consumer name?
  4733  	// stream is tokens[2], consumer is 3.
  4734  	dc = uint64(parseAckReplyNum(tokens[4]))
  4735  	sseq, dseq = uint64(parseAckReplyNum(tokens[5])), uint64(parseAckReplyNum(tokens[6]))
  4736  	ts = parseAckReplyNum(tokens[7])
  4737  	pending = uint64(parseAckReplyNum(tokens[8]))
  4738  
  4739  	return sseq, dseq, dc, ts, pending
  4740  }
  4741  
  4742  func ackReplyInfo(subject string) (sseq, dseq, dc uint64) {
  4743  	tsa := [expectedNumReplyTokens]string{}
  4744  	start, tokens := 0, tsa[:0]
  4745  	for i := 0; i < len(subject); i++ {
  4746  		if subject[i] == btsep {
  4747  			tokens = append(tokens, subject[start:i])
  4748  			start = i + 1
  4749  		}
  4750  	}
  4751  	tokens = append(tokens, subject[start:])
  4752  	if len(tokens) != expectedNumReplyTokens || tokens[0] != "$JS" || tokens[1] != "ACK" {
  4753  		return 0, 0, 0
  4754  	}
  4755  	dc = uint64(parseAckReplyNum(tokens[4]))
  4756  	sseq, dseq = uint64(parseAckReplyNum(tokens[5])), uint64(parseAckReplyNum(tokens[6]))
  4757  
  4758  	return sseq, dseq, dc
  4759  }
  4760  
  4761  // NextSeq returns the next delivered sequence number for this consumer.
  4762  func (o *consumer) nextSeq() uint64 {
  4763  	o.mu.RLock()
  4764  	dseq := o.dseq
  4765  	o.mu.RUnlock()
  4766  	return dseq
  4767  }
  4768  
  4769  // Used to hold skip list when deliver policy is last per subject.
  4770  type lastSeqSkipList struct {
  4771  	resume uint64
  4772  	seqs   []uint64
  4773  }
  4774  
  4775  // Let's us know we have a skip list, which is for deliver last per subject and we are just starting.
  4776  // Lock should be held.
  4777  func (o *consumer) hasSkipListPending() bool {
  4778  	return o.lss != nil && len(o.lss.seqs) > 0
  4779  }
  4780  
  4781  // Will select the starting sequence.
  4782  func (o *consumer) selectStartingSeqNo() {
  4783  	if o.mset == nil || o.mset.store == nil {
  4784  		o.sseq = 1
  4785  	} else {
  4786  		var state StreamState
  4787  		o.mset.store.FastState(&state)
  4788  		if o.cfg.OptStartSeq == 0 {
  4789  			if o.cfg.DeliverPolicy == DeliverAll {
  4790  				o.sseq = state.FirstSeq
  4791  			} else if o.cfg.DeliverPolicy == DeliverLast {
  4792  				if o.subjf == nil {
  4793  					o.sseq = state.LastSeq
  4794  					return
  4795  				}
  4796  				// If we are partitioned here this will be properly set when we become leader.
  4797  				for _, filter := range o.subjf {
  4798  					ss := o.mset.store.FilteredState(1, filter.subject)
  4799  					if ss.Last > o.sseq {
  4800  						o.sseq = ss.Last
  4801  					}
  4802  				}
  4803  			} else if o.cfg.DeliverPolicy == DeliverLastPerSubject {
  4804  				// If our parent stream is set to max msgs per subject of 1 this is just
  4805  				// a normal consumer at this point. We can avoid any heavy lifting.
  4806  				if o.mset.cfg.MaxMsgsPer == 1 {
  4807  					o.sseq = state.FirstSeq
  4808  				} else {
  4809  					// A threshold for when we switch from get last msg to subjects state.
  4810  					const numSubjectsThresh = 256
  4811  					lss := &lastSeqSkipList{resume: state.LastSeq}
  4812  					var filters []string
  4813  					if o.subjf == nil {
  4814  						filters = append(filters, o.cfg.FilterSubject)
  4815  					} else {
  4816  						for _, filter := range o.subjf {
  4817  							filters = append(filters, filter.subject)
  4818  						}
  4819  					}
  4820  					for _, filter := range filters {
  4821  						if st := o.mset.store.SubjectsTotals(filter); len(st) < numSubjectsThresh {
  4822  							var smv StoreMsg
  4823  							for subj := range st {
  4824  								if sm, err := o.mset.store.LoadLastMsg(subj, &smv); err == nil {
  4825  									lss.seqs = append(lss.seqs, sm.seq)
  4826  								}
  4827  							}
  4828  						} else if mss := o.mset.store.SubjectsState(filter); len(mss) > 0 {
  4829  							for _, ss := range mss {
  4830  								lss.seqs = append(lss.seqs, ss.Last)
  4831  							}
  4832  						}
  4833  					}
  4834  					// Sort the skip list if needed.
  4835  					if len(lss.seqs) > 1 {
  4836  						sort.Slice(lss.seqs, func(i, j int) bool {
  4837  							return lss.seqs[j] > lss.seqs[i]
  4838  						})
  4839  					}
  4840  					if len(lss.seqs) == 0 {
  4841  						o.sseq = state.LastSeq
  4842  					} else {
  4843  						o.sseq = lss.seqs[0]
  4844  					}
  4845  					// Assign skip list.
  4846  					o.lss = lss
  4847  				}
  4848  			} else if o.cfg.OptStartTime != nil {
  4849  				// If we are here we are time based.
  4850  				// TODO(dlc) - Once clustered can't rely on this.
  4851  				o.sseq = o.mset.store.GetSeqFromTime(*o.cfg.OptStartTime)
  4852  				// Here we want to see if we are filtered, and if so possibly close the gap
  4853  				// to the nearest first given our starting sequence from time. This is so we do
  4854  				// not force the system to do a linear walk between o.sseq and the real first.
  4855  				if len(o.subjf) > 0 {
  4856  					nseq := state.LastSeq
  4857  					for _, filter := range o.subjf {
  4858  						// Use first sequence since this is more optimized atm.
  4859  						ss := o.mset.store.FilteredState(state.FirstSeq, filter.subject)
  4860  						if ss.First > o.sseq && ss.First < nseq {
  4861  							nseq = ss.First
  4862  						}
  4863  					}
  4864  					// Skip ahead if possible.
  4865  					if nseq > o.sseq && nseq < state.LastSeq {
  4866  						o.sseq = nseq
  4867  					}
  4868  				}
  4869  			} else {
  4870  				// DeliverNew
  4871  				o.sseq = state.LastSeq + 1
  4872  			}
  4873  		} else {
  4874  			o.sseq = o.cfg.OptStartSeq
  4875  		}
  4876  
  4877  		if state.FirstSeq == 0 {
  4878  			o.sseq = 1
  4879  		} else if o.sseq < state.FirstSeq {
  4880  			o.sseq = state.FirstSeq
  4881  		} else if o.sseq > state.LastSeq {
  4882  			o.sseq = state.LastSeq + 1
  4883  		}
  4884  	}
  4885  
  4886  	// Always set delivery sequence to 1.
  4887  	o.dseq = 1
  4888  	// Set ack delivery floor to delivery-1
  4889  	o.adflr = o.dseq - 1
  4890  	// Set ack store floor to store-1
  4891  	o.asflr = o.sseq - 1
  4892  	// Set our starting sequence state.
  4893  	if o.store != nil && o.sseq > 0 {
  4894  		o.store.SetStarting(o.sseq - 1)
  4895  	}
  4896  }
  4897  
  4898  // Test whether a config represents a durable subscriber.
  4899  func isDurableConsumer(config *ConsumerConfig) bool {
  4900  	return config != nil && config.Durable != _EMPTY_
  4901  }
  4902  
  4903  func (o *consumer) isDurable() bool {
  4904  	return o.cfg.Durable != _EMPTY_
  4905  }
  4906  
  4907  // Are we in push mode, delivery subject, etc.
  4908  func (o *consumer) isPushMode() bool {
  4909  	return o.cfg.DeliverSubject != _EMPTY_
  4910  }
  4911  
  4912  func (o *consumer) isPullMode() bool {
  4913  	return o.cfg.DeliverSubject == _EMPTY_
  4914  }
  4915  
  4916  // Name returns the name of this consumer.
  4917  func (o *consumer) String() string {
  4918  	o.mu.RLock()
  4919  	n := o.name
  4920  	o.mu.RUnlock()
  4921  	return n
  4922  }
  4923  
  4924  func createConsumerName() string {
  4925  	return getHash(nuid.Next())
  4926  }
  4927  
  4928  // deleteConsumer will delete the consumer from this stream.
  4929  func (mset *stream) deleteConsumer(o *consumer) error {
  4930  	return o.delete()
  4931  }
  4932  
  4933  func (o *consumer) getStream() *stream {
  4934  	o.mu.RLock()
  4935  	mset := o.mset
  4936  	o.mu.RUnlock()
  4937  	return mset
  4938  }
  4939  
  4940  func (o *consumer) streamName() string {
  4941  	o.mu.RLock()
  4942  	mset := o.mset
  4943  	o.mu.RUnlock()
  4944  	if mset != nil {
  4945  		return mset.name()
  4946  	}
  4947  	return _EMPTY_
  4948  }
  4949  
  4950  // Active indicates if this consumer is still active.
  4951  func (o *consumer) isActive() bool {
  4952  	o.mu.RLock()
  4953  	active := o.active && o.mset != nil
  4954  	o.mu.RUnlock()
  4955  	return active
  4956  }
  4957  
  4958  // hasNoLocalInterest return true if we have no local interest.
  4959  func (o *consumer) hasNoLocalInterest() bool {
  4960  	o.mu.RLock()
  4961  	rr := o.acc.sl.Match(o.cfg.DeliverSubject)
  4962  	o.mu.RUnlock()
  4963  	return len(rr.psubs)+len(rr.qsubs) == 0
  4964  }
  4965  
  4966  // This is when the underlying stream has been purged.
  4967  // sseq is the new first seq for the stream after purge.
  4968  // Lock should NOT be held.
  4969  func (o *consumer) purge(sseq uint64, slseq uint64, isWider bool) {
  4970  	// Do not update our state unless we know we are the leader.
  4971  	if !o.isLeader() {
  4972  		return
  4973  	}
  4974  	// Signals all have been purged for this consumer.
  4975  	if sseq == 0 && !isWider {
  4976  		sseq = slseq + 1
  4977  	}
  4978  
  4979  	var store StreamStore
  4980  	if isWider {
  4981  		o.mu.RLock()
  4982  		if o.mset != nil {
  4983  			store = o.mset.store
  4984  		}
  4985  		o.mu.RUnlock()
  4986  	}
  4987  
  4988  	o.mu.Lock()
  4989  	// Do not go backwards
  4990  	if o.sseq < sseq {
  4991  		o.sseq = sseq
  4992  	}
  4993  
  4994  	if o.asflr < sseq {
  4995  		o.asflr = sseq - 1
  4996  		// We need to remove those no longer relevant from pending.
  4997  		for seq, p := range o.pending {
  4998  			if seq <= o.asflr {
  4999  				if p.Sequence > o.adflr {
  5000  					o.adflr = p.Sequence
  5001  					if o.adflr > o.dseq {
  5002  						o.dseq = o.adflr
  5003  					}
  5004  				}
  5005  				delete(o.pending, seq)
  5006  				delete(o.rdc, seq)
  5007  				// rdq handled below.
  5008  			}
  5009  			if isWider && store != nil {
  5010  				// Our filtered subject, which could be all, is wider than the underlying purge.
  5011  				// We need to check if the pending items left are still valid.
  5012  				var smv StoreMsg
  5013  				if _, err := store.LoadMsg(seq, &smv); err == errDeletedMsg || err == ErrStoreMsgNotFound {
  5014  					if p.Sequence > o.adflr {
  5015  						o.adflr = p.Sequence
  5016  						if o.adflr > o.dseq {
  5017  							o.dseq = o.adflr
  5018  						}
  5019  					}
  5020  					delete(o.pending, seq)
  5021  					delete(o.rdc, seq)
  5022  				}
  5023  			}
  5024  		}
  5025  	}
  5026  
  5027  	// This means we can reset everything at this point.
  5028  	if len(o.pending) == 0 {
  5029  		o.pending, o.rdc = nil, nil
  5030  		o.adflr, o.asflr = o.dseq-1, o.sseq-1
  5031  	}
  5032  
  5033  	// We need to remove all those being queued for redelivery under o.rdq
  5034  	if len(o.rdq) > 0 {
  5035  		rdq := o.rdq
  5036  		o.rdq = nil
  5037  		o.rdqi.Empty()
  5038  		for _, sseq := range rdq {
  5039  			if sseq >= o.sseq {
  5040  				o.addToRedeliverQueue(sseq)
  5041  			}
  5042  		}
  5043  	}
  5044  	// Grab some info in case of error below.
  5045  	s, acc, mset, name := o.srv, o.acc, o.mset, o.name
  5046  	o.mu.Unlock()
  5047  
  5048  	if err := o.writeStoreState(); err != nil && s != nil && mset != nil {
  5049  		s.Warnf("Consumer '%s > %s > %s' error on write store state from purge: %v", acc, mset.name(), name, err)
  5050  	}
  5051  }
  5052  
  5053  func stopAndClearTimer(tp **time.Timer) {
  5054  	if *tp == nil {
  5055  		return
  5056  	}
  5057  	// Will get drained in normal course, do not try to
  5058  	// drain here.
  5059  	(*tp).Stop()
  5060  	*tp = nil
  5061  }
  5062  
  5063  // Stop will shutdown  the consumer for the associated stream.
  5064  func (o *consumer) stop() error {
  5065  	return o.stopWithFlags(false, false, true, false)
  5066  }
  5067  
  5068  func (o *consumer) deleteWithoutAdvisory() error {
  5069  	return o.stopWithFlags(true, false, true, false)
  5070  }
  5071  
  5072  // Delete will delete the consumer for the associated stream and send advisories.
  5073  func (o *consumer) delete() error {
  5074  	return o.stopWithFlags(true, false, true, true)
  5075  }
  5076  
  5077  // To test for closed state.
  5078  func (o *consumer) isClosed() bool {
  5079  	o.mu.RLock()
  5080  	defer o.mu.RUnlock()
  5081  	return o.closed
  5082  }
  5083  
  5084  func (o *consumer) stopWithFlags(dflag, sdflag, doSignal, advisory bool) error {
  5085  	// If dflag is true determine if we are still assigned.
  5086  	var isAssigned bool
  5087  	if dflag {
  5088  		o.mu.RLock()
  5089  		acc, stream, consumer := o.acc, o.stream, o.name
  5090  		isClustered := o.js != nil && o.js.isClustered()
  5091  		o.mu.RUnlock()
  5092  		if isClustered {
  5093  			// Grab jsa to check assignment.
  5094  			var jsa *jsAccount
  5095  			if acc != nil {
  5096  				// Need lock here to avoid data race.
  5097  				acc.mu.RLock()
  5098  				jsa = acc.js
  5099  				acc.mu.RUnlock()
  5100  			}
  5101  			if jsa != nil {
  5102  				isAssigned = jsa.consumerAssigned(stream, consumer)
  5103  			}
  5104  		}
  5105  	}
  5106  
  5107  	o.mu.Lock()
  5108  	if o.closed {
  5109  		o.mu.Unlock()
  5110  		return nil
  5111  	}
  5112  	o.closed = true
  5113  
  5114  	// Check if we are the leader and are being deleted (as a node).
  5115  	if dflag && o.isLeader() {
  5116  		// If we are clustered and node leader (probable from above), stepdown.
  5117  		if node := o.node; node != nil && node.Leader() {
  5118  			node.StepDown()
  5119  		}
  5120  
  5121  		// dflag does not necessarily mean that the consumer is being deleted,
  5122  		// just that the consumer node is being removed from this peer, so we
  5123  		// send delete advisories only if we are no longer assigned at the meta layer,
  5124  		// or we are not clustered.
  5125  		if !isAssigned && advisory {
  5126  			o.sendDeleteAdvisoryLocked()
  5127  		}
  5128  		if o.isPullMode() {
  5129  			// Release any pending.
  5130  			o.releaseAnyPendingRequests(isAssigned)
  5131  		}
  5132  	}
  5133  
  5134  	if o.qch != nil {
  5135  		close(o.qch)
  5136  		o.qch = nil
  5137  	}
  5138  
  5139  	a := o.acc
  5140  	store := o.store
  5141  	mset := o.mset
  5142  	o.mset = nil
  5143  	o.active = false
  5144  	o.unsubscribe(o.ackSub)
  5145  	o.unsubscribe(o.reqSub)
  5146  	o.unsubscribe(o.fcSub)
  5147  	o.ackSub = nil
  5148  	o.reqSub = nil
  5149  	o.fcSub = nil
  5150  	if o.infoSub != nil {
  5151  		o.srv.sysUnsubscribe(o.infoSub)
  5152  		o.infoSub = nil
  5153  	}
  5154  	c := o.client
  5155  	o.client = nil
  5156  	sysc := o.sysc
  5157  	o.sysc = nil
  5158  	stopAndClearTimer(&o.ptmr)
  5159  	stopAndClearTimer(&o.dtmr)
  5160  	stopAndClearTimer(&o.gwdtmr)
  5161  	delivery := o.cfg.DeliverSubject
  5162  	o.waiting = nil
  5163  	// Break us out of the readLoop.
  5164  	if doSignal {
  5165  		o.signalNewMessages()
  5166  	}
  5167  	n := o.node
  5168  	qgroup := o.cfg.DeliverGroup
  5169  	o.ackMsgs.unregister()
  5170  	if o.nextMsgReqs != nil {
  5171  		o.nextMsgReqs.unregister()
  5172  	}
  5173  
  5174  	// For cleaning up the node assignment.
  5175  	var ca *consumerAssignment
  5176  	if dflag {
  5177  		ca = o.ca
  5178  	}
  5179  	sigSubs := o.sigSubs
  5180  	js := o.js
  5181  	o.mu.Unlock()
  5182  
  5183  	if c != nil {
  5184  		c.closeConnection(ClientClosed)
  5185  	}
  5186  	if sysc != nil {
  5187  		sysc.closeConnection(ClientClosed)
  5188  	}
  5189  
  5190  	if delivery != _EMPTY_ {
  5191  		a.sl.clearNotification(delivery, qgroup, o.inch)
  5192  	}
  5193  
  5194  	var rp RetentionPolicy
  5195  	if mset != nil {
  5196  		if len(sigSubs) > 0 {
  5197  			mset.removeConsumerAsLeader(o)
  5198  		}
  5199  		mset.mu.Lock()
  5200  		mset.removeConsumer(o)
  5201  		rp = mset.cfg.Retention
  5202  		mset.mu.Unlock()
  5203  	}
  5204  
  5205  	// We need to optionally remove all messages since we are interest based retention.
  5206  	// We will do this consistently on all replicas. Note that if in clustered mode the
  5207  	// non-leader consumers will need to restore state first.
  5208  	if dflag && rp == InterestPolicy {
  5209  		state := mset.state()
  5210  		stop := state.LastSeq
  5211  		o.mu.Lock()
  5212  		if !o.isLeader() {
  5213  			o.readStoredState(stop)
  5214  		}
  5215  		start := o.asflr
  5216  		o.mu.Unlock()
  5217  		// Make sure we start at worst with first sequence in the stream.
  5218  		if start < state.FirstSeq {
  5219  			start = state.FirstSeq
  5220  		}
  5221  
  5222  		var rmseqs []uint64
  5223  		mset.mu.Lock()
  5224  		for seq := start; seq <= stop; seq++ {
  5225  			if mset.noInterest(seq, o) {
  5226  				rmseqs = append(rmseqs, seq)
  5227  			}
  5228  		}
  5229  		mset.mu.Unlock()
  5230  
  5231  		// These can be removed.
  5232  		for _, seq := range rmseqs {
  5233  			mset.store.RemoveMsg(seq)
  5234  		}
  5235  	}
  5236  
  5237  	// Cluster cleanup.
  5238  	if n != nil {
  5239  		if dflag {
  5240  			n.Delete()
  5241  		} else {
  5242  			// Try to install snapshot on clean exit
  5243  			if o.store != nil && (o.retention != LimitsPolicy || n.NeedSnapshot()) {
  5244  				if snap, err := o.store.EncodedState(); err == nil {
  5245  					n.InstallSnapshot(snap)
  5246  				}
  5247  			}
  5248  			n.Stop()
  5249  		}
  5250  	}
  5251  
  5252  	if ca != nil {
  5253  		js.mu.Lock()
  5254  		if ca.Group != nil {
  5255  			ca.Group.node = nil
  5256  		}
  5257  		js.mu.Unlock()
  5258  	}
  5259  
  5260  	// Clean up our store.
  5261  	var err error
  5262  	if store != nil {
  5263  		if dflag {
  5264  			if sdflag {
  5265  				err = store.StreamDelete()
  5266  			} else {
  5267  				err = store.Delete()
  5268  			}
  5269  		} else {
  5270  			err = store.Stop()
  5271  		}
  5272  	}
  5273  
  5274  	return err
  5275  }
  5276  
  5277  // Check that we do not form a cycle by delivering to a delivery subject
  5278  // that is part of the interest group.
  5279  func deliveryFormsCycle(cfg *StreamConfig, deliverySubject string) bool {
  5280  	for _, subject := range cfg.Subjects {
  5281  		if subjectIsSubsetMatch(deliverySubject, subject) {
  5282  			return true
  5283  		}
  5284  	}
  5285  	return false
  5286  }
  5287  
  5288  // switchToEphemeral is called on startup when recovering ephemerals.
  5289  func (o *consumer) switchToEphemeral() {
  5290  	o.mu.Lock()
  5291  	o.cfg.Durable = _EMPTY_
  5292  	store, ok := o.store.(*consumerFileStore)
  5293  	rr := o.acc.sl.Match(o.cfg.DeliverSubject)
  5294  	// Setup dthresh.
  5295  	o.updateInactiveThreshold(&o.cfg)
  5296  	o.updatePauseState(&o.cfg)
  5297  	o.mu.Unlock()
  5298  
  5299  	// Update interest
  5300  	o.updateDeliveryInterest(len(rr.psubs)+len(rr.qsubs) > 0)
  5301  	// Write out new config
  5302  	if ok {
  5303  		store.updateConfig(o.cfg)
  5304  	}
  5305  }
  5306  
  5307  // RequestNextMsgSubject returns the subject to request the next message when in pull or worker mode.
  5308  // Returns empty otherwise.
  5309  func (o *consumer) requestNextMsgSubject() string {
  5310  	return o.nextMsgSubj
  5311  }
  5312  
  5313  func (o *consumer) decStreamPending(sseq uint64, subj string) {
  5314  	o.mu.Lock()
  5315  	// Update our cached num pending only if we think deliverMsg has not done so.
  5316  	if sseq >= o.sseq && o.isFilteredMatch(subj) {
  5317  		o.npc--
  5318  	}
  5319  
  5320  	// Check if this message was pending.
  5321  	p, wasPending := o.pending[sseq]
  5322  	var rdc uint64 = 1
  5323  	if o.rdc != nil {
  5324  		rdc = o.rdc[sseq]
  5325  	}
  5326  	o.mu.Unlock()
  5327  
  5328  	// If it was pending process it like an ack.
  5329  	if wasPending {
  5330  		// We could have the lock for the stream so do this in a go routine.
  5331  		// TODO(dlc) - We should do this with ipq vs naked go routines.
  5332  		go o.processTerm(sseq, p.Sequence, rdc, ackTermUnackedLimitsReason)
  5333  	}
  5334  }
  5335  
  5336  func (o *consumer) account() *Account {
  5337  	o.mu.RLock()
  5338  	a := o.acc
  5339  	o.mu.RUnlock()
  5340  	return a
  5341  }
  5342  
  5343  // Creates a sublist for consumer.
  5344  // All subjects share the same callback.
  5345  func (o *consumer) signalSubs() []*subscription {
  5346  	o.mu.Lock()
  5347  	defer o.mu.Unlock()
  5348  
  5349  	if o.sigSubs != nil {
  5350  		return o.sigSubs
  5351  	}
  5352  
  5353  	subs := []*subscription{}
  5354  	if o.subjf == nil {
  5355  		subs = append(subs, &subscription{subject: []byte(fwcs), icb: o.processStreamSignal})
  5356  		o.sigSubs = subs
  5357  		return subs
  5358  	}
  5359  
  5360  	for _, filter := range o.subjf {
  5361  		subs = append(subs, &subscription{subject: []byte(filter.subject), icb: o.processStreamSignal})
  5362  	}
  5363  	o.sigSubs = subs
  5364  	return subs
  5365  }
  5366  
  5367  // This is what will be called when our parent stream wants to kick us regarding a new message.
  5368  // We know that we are the leader and that this subject matches us by how the parent handles registering
  5369  // us with the signaling sublist.
  5370  // We do need the sequence of the message however and we use the msg as the encoded seq.
  5371  func (o *consumer) processStreamSignal(_ *subscription, _ *client, _ *Account, subject, _ string, seqb []byte) {
  5372  	var le = binary.LittleEndian
  5373  	seq := le.Uint64(seqb)
  5374  
  5375  	o.mu.Lock()
  5376  	defer o.mu.Unlock()
  5377  	if o.mset == nil {
  5378  		return
  5379  	}
  5380  	if seq > o.npf {
  5381  		o.npc++
  5382  	}
  5383  	if seq < o.sseq {
  5384  		return
  5385  	}
  5386  	if o.isPushMode() && o.active || o.isPullMode() && !o.waiting.isEmpty() {
  5387  		o.signalNewMessages()
  5388  	}
  5389  }
  5390  
  5391  // Used to compare if two multiple filtered subject lists are equal.
  5392  func subjectSliceEqual(slice1 []string, slice2 []string) bool {
  5393  	if len(slice1) != len(slice2) {
  5394  		return false
  5395  	}
  5396  	set2 := make(map[string]struct{}, len(slice2))
  5397  	for _, val := range slice2 {
  5398  		set2[val] = struct{}{}
  5399  	}
  5400  	for _, val := range slice1 {
  5401  		if _, ok := set2[val]; !ok {
  5402  			return false
  5403  		}
  5404  	}
  5405  	return true
  5406  }
  5407  
  5408  // Utility for simpler if conditions in Consumer config checks.
  5409  // In future iteration, we can immediately create `o.subjf` and
  5410  // use it to validate things.
  5411  func gatherSubjectFilters(filter string, filters []string) []string {
  5412  	if filter != _EMPTY_ {
  5413  		filters = append(filters, filter)
  5414  	}
  5415  	// list of filters should never contain non-empty filter.
  5416  	return filters
  5417  }
  5418  
  5419  // shouldStartMonitor will return true if we should start a monitor
  5420  // goroutine or will return false if one is already running.
  5421  func (o *consumer) shouldStartMonitor() bool {
  5422  	o.mu.Lock()
  5423  	defer o.mu.Unlock()
  5424  
  5425  	if o.inMonitor {
  5426  		return false
  5427  	}
  5428  	o.monitorWg.Add(1)
  5429  	o.inMonitor = true
  5430  	return true
  5431  }
  5432  
  5433  // Clear the monitor running state. The monitor goroutine should
  5434  // call this in a defer to clean up on exit.
  5435  func (o *consumer) clearMonitorRunning() {
  5436  	o.mu.Lock()
  5437  	defer o.mu.Unlock()
  5438  
  5439  	if o.inMonitor {
  5440  		o.monitorWg.Done()
  5441  		o.inMonitor = false
  5442  	}
  5443  }
  5444  
  5445  // Test whether we are in the monitor routine.
  5446  func (o *consumer) isMonitorRunning() bool {
  5447  	o.mu.RLock()
  5448  	defer o.mu.RUnlock()
  5449  	return o.inMonitor
  5450  }
  5451  
  5452  // If we detect that our ackfloor is higher than the stream's last sequence, return this error.
  5453  var errAckFloorHigherThanLastSeq = errors.New("consumer ack floor is higher than streams last sequence")
  5454  
  5455  // If we are a consumer of an interest or workqueue policy stream, process that state and make sure consistent.
  5456  func (o *consumer) checkStateForInterestStream() error {
  5457  	o.mu.RLock()
  5458  	// See if we need to process this update if our parent stream is not a limits policy stream.
  5459  	mset := o.mset
  5460  	shouldProcessState := mset != nil && o.retention != LimitsPolicy
  5461  	if o.closed || !shouldProcessState || o.store == nil {
  5462  		o.mu.RUnlock()
  5463  		return nil
  5464  	}
  5465  	state, err := o.store.State()
  5466  	o.mu.RUnlock()
  5467  
  5468  	if err != nil {
  5469  		return err
  5470  	}
  5471  
  5472  	asflr := state.AckFloor.Stream
  5473  	// Protect ourselves against rolling backwards.
  5474  	if asflr&(1<<63) != 0 {
  5475  		return nil
  5476  	}
  5477  
  5478  	// We should make sure to update the acks.
  5479  	var ss StreamState
  5480  	mset.store.FastState(&ss)
  5481  
  5482  	// Check if the underlying stream's last sequence is less than our floor.
  5483  	// This can happen if the stream has been reset and has not caught up yet.
  5484  	if asflr > ss.LastSeq {
  5485  		return errAckFloorHigherThanLastSeq
  5486  	}
  5487  
  5488  	for seq := ss.FirstSeq; asflr > 0 && seq <= asflr; seq++ {
  5489  		mset.ackMsg(o, seq)
  5490  	}
  5491  
  5492  	o.mu.RLock()
  5493  	// See if we need to process this update if our parent stream is not a limits policy stream.
  5494  	state, _ = o.store.State()
  5495  	o.mu.RUnlock()
  5496  
  5497  	// If we have pending, we will need to walk through to delivered in case we missed any of those acks as well.
  5498  	if state != nil && len(state.Pending) > 0 {
  5499  		for seq := state.AckFloor.Stream + 1; seq <= state.Delivered.Stream; seq++ {
  5500  			if _, ok := state.Pending[seq]; !ok {
  5501  				mset.ackMsg(o, seq)
  5502  			}
  5503  		}
  5504  	}
  5505  	return nil
  5506  }