get.pme.sh/pnats@v0.0.0-20240304004023-26bb5a137ed0/server/store.go (about)

     1  // Copyright 2019-2024 The NATS Authors
     2  // Licensed under the Apache License, Version 2.0 (the "License");
     3  // you may not use this file except in compliance with the License.
     4  // You may obtain a copy of the License at
     5  //
     6  // http://www.apache.org/licenses/LICENSE-2.0
     7  //
     8  // Unless required by applicable law or agreed to in writing, software
     9  // distributed under the License is distributed on an "AS IS" BASIS,
    10  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package server
    15  
    16  import (
    17  	"encoding/binary"
    18  	"errors"
    19  	"fmt"
    20  	"io"
    21  	"strings"
    22  	"time"
    23  	"unsafe"
    24  
    25  	"get.pme.sh/pnats/server/avl"
    26  )
    27  
    28  // StorageType determines how messages are stored for retention.
    29  type StorageType int
    30  
    31  const (
    32  	// File specifies on disk, designated by the JetStream config StoreDir.
    33  	FileStorage = StorageType(22)
    34  	// MemoryStorage specifies in memory only.
    35  	MemoryStorage = StorageType(33)
    36  	// Any is for internals.
    37  	AnyStorage = StorageType(44)
    38  )
    39  
    40  var (
    41  	// ErrStoreClosed is returned when the store has been closed
    42  	ErrStoreClosed = errors.New("store is closed")
    43  	// ErrStoreMsgNotFound when message was not found but was expected to be.
    44  	ErrStoreMsgNotFound = errors.New("no message found")
    45  	// ErrStoreEOF is returned when message seq is greater than the last sequence.
    46  	ErrStoreEOF = errors.New("stream store EOF")
    47  	// ErrMaxMsgs is returned when we have discard new as a policy and we reached the message limit.
    48  	ErrMaxMsgs = errors.New("maximum messages exceeded")
    49  	// ErrMaxBytes is returned when we have discard new as a policy and we reached the bytes limit.
    50  	ErrMaxBytes = errors.New("maximum bytes exceeded")
    51  	// ErrMaxMsgsPerSubject is returned when we have discard new as a policy and we reached the message limit per subject.
    52  	ErrMaxMsgsPerSubject = errors.New("maximum messages per subject exceeded")
    53  	// ErrStoreSnapshotInProgress is returned when RemoveMsg or EraseMsg is called
    54  	// while a snapshot is in progress.
    55  	ErrStoreSnapshotInProgress = errors.New("snapshot in progress")
    56  	// ErrMsgTooLarge is returned when a message is considered too large.
    57  	ErrMsgTooLarge = errors.New("message to large")
    58  	// ErrStoreWrongType is for when you access the wrong storage type.
    59  	ErrStoreWrongType = errors.New("wrong storage type")
    60  	// ErrNoAckPolicy is returned when trying to update a consumer's acks with no ack policy.
    61  	ErrNoAckPolicy = errors.New("ack policy is none")
    62  	// ErrInvalidSequence is returned when the sequence is not present in the stream store.
    63  	ErrInvalidSequence = errors.New("invalid sequence")
    64  	// ErrSequenceMismatch is returned when storing a raw message and the expected sequence is wrong.
    65  	ErrSequenceMismatch = errors.New("expected sequence does not match store")
    66  	// ErrCorruptStreamState
    67  	ErrCorruptStreamState = errors.New("stream state snapshot is corrupt")
    68  	// ErrTooManyResults
    69  	ErrTooManyResults = errors.New("too many matching results for request")
    70  )
    71  
    72  // StoreMsg is the stored message format for messages that are retained by the Store layer.
    73  type StoreMsg struct {
    74  	subj string
    75  	hdr  []byte
    76  	msg  []byte
    77  	buf  []byte
    78  	seq  uint64
    79  	ts   int64
    80  }
    81  
    82  // Used to call back into the upper layers to report on changes in storage resources.
    83  // For the cases where its a single message we will also supply sequence number and subject.
    84  type StorageUpdateHandler func(msgs, bytes int64, seq uint64, subj string)
    85  
    86  type StreamStore interface {
    87  	StoreMsg(subject string, hdr, msg []byte) (uint64, int64, error)
    88  	StoreRawMsg(subject string, hdr, msg []byte, seq uint64, ts int64) error
    89  	SkipMsg() uint64
    90  	SkipMsgs(seq uint64, num uint64) error
    91  	LoadMsg(seq uint64, sm *StoreMsg) (*StoreMsg, error)
    92  	LoadNextMsg(filter string, wc bool, start uint64, smp *StoreMsg) (sm *StoreMsg, skip uint64, err error)
    93  	LoadLastMsg(subject string, sm *StoreMsg) (*StoreMsg, error)
    94  	RemoveMsg(seq uint64) (bool, error)
    95  	EraseMsg(seq uint64) (bool, error)
    96  	Purge() (uint64, error)
    97  	PurgeEx(subject string, seq, keep uint64) (uint64, error)
    98  	Compact(seq uint64) (uint64, error)
    99  	Truncate(seq uint64) error
   100  	GetSeqFromTime(t time.Time) uint64
   101  	FilteredState(seq uint64, subject string) SimpleState
   102  	SubjectsState(filterSubject string) map[string]SimpleState
   103  	SubjectsTotals(filterSubject string) map[string]uint64
   104  	MultiLastSeqs(filters []string, maxSeq uint64, maxAllowed int) ([]uint64, error)
   105  	NumPending(sseq uint64, filter string, lastPerSubject bool) (total, validThrough uint64)
   106  	State() StreamState
   107  	FastState(*StreamState)
   108  	EncodedStreamState(failed uint64) (enc []byte, err error)
   109  	SyncDeleted(dbs DeleteBlocks)
   110  	Type() StorageType
   111  	RegisterStorageUpdates(StorageUpdateHandler)
   112  	UpdateConfig(cfg *StreamConfig) error
   113  	Delete() error
   114  	Stop() error
   115  	ConsumerStore(name string, cfg *ConsumerConfig) (ConsumerStore, error)
   116  	AddConsumer(o ConsumerStore) error
   117  	RemoveConsumer(o ConsumerStore) error
   118  	Snapshot(deadline time.Duration, includeConsumers, checkMsgs bool) (*SnapshotResult, error)
   119  	Utilization() (total, reported uint64, err error)
   120  }
   121  
   122  // RetentionPolicy determines how messages in a set are retained.
   123  type RetentionPolicy int
   124  
   125  const (
   126  	// LimitsPolicy (default) means that messages are retained until any given limit is reached.
   127  	// This could be one of MaxMsgs, MaxBytes, or MaxAge.
   128  	LimitsPolicy RetentionPolicy = iota
   129  	// InterestPolicy specifies that when all known consumers have acknowledged a message it can be removed.
   130  	InterestPolicy
   131  	// WorkQueuePolicy specifies that when the first worker or subscriber acknowledges the message it can be removed.
   132  	WorkQueuePolicy
   133  )
   134  
   135  // Discard Policy determines how we proceed when limits of messages or bytes are hit. The default, DicscardOld will
   136  // remove older messages. DiscardNew will fail to store the new message.
   137  type DiscardPolicy int
   138  
   139  const (
   140  	// DiscardOld will remove older messages to return to the limits.
   141  	DiscardOld = iota
   142  	// DiscardNew will error on a StoreMsg call
   143  	DiscardNew
   144  )
   145  
   146  // StreamState is information about the given stream.
   147  type StreamState struct {
   148  	Msgs        uint64            `json:"messages"`
   149  	Bytes       uint64            `json:"bytes"`
   150  	FirstSeq    uint64            `json:"first_seq"`
   151  	FirstTime   time.Time         `json:"first_ts"`
   152  	LastSeq     uint64            `json:"last_seq"`
   153  	LastTime    time.Time         `json:"last_ts"`
   154  	NumSubjects int               `json:"num_subjects,omitempty"`
   155  	Subjects    map[string]uint64 `json:"subjects,omitempty"`
   156  	NumDeleted  int               `json:"num_deleted,omitempty"`
   157  	Deleted     []uint64          `json:"deleted,omitempty"`
   158  	Lost        *LostStreamData   `json:"lost,omitempty"`
   159  	Consumers   int               `json:"consumer_count"`
   160  }
   161  
   162  // SimpleState for filtered subject specific state.
   163  type SimpleState struct {
   164  	Msgs  uint64 `json:"messages"`
   165  	First uint64 `json:"first_seq"`
   166  	Last  uint64 `json:"last_seq"`
   167  
   168  	// Internal usage for when the first needs to be updated before use.
   169  	firstNeedsUpdate bool
   170  }
   171  
   172  // LostStreamData indicates msgs that have been lost.
   173  type LostStreamData struct {
   174  	Msgs  []uint64 `json:"msgs"`
   175  	Bytes uint64   `json:"bytes"`
   176  }
   177  
   178  // SnapshotResult contains information about the snapshot.
   179  type SnapshotResult struct {
   180  	Reader io.ReadCloser
   181  	State  StreamState
   182  }
   183  
   184  const (
   185  	// Magic is used to identify stream state encodings.
   186  	streamStateMagic = uint8(42)
   187  	// Version
   188  	streamStateVersion = uint8(1)
   189  	// Magic / Identifier for run length encodings.
   190  	runLengthMagic = uint8(33)
   191  	// Magic / Identifier for AVL seqsets.
   192  	seqSetMagic = uint8(22)
   193  )
   194  
   195  // Interface for DeleteBlock.
   196  // These will be of three types:
   197  // 1. AVL seqsets.
   198  // 2. Run length encoding of a deleted range.
   199  // 3. Legacy []uint64
   200  type DeleteBlock interface {
   201  	State() (first, last, num uint64)
   202  	Range(f func(uint64) bool)
   203  }
   204  
   205  type DeleteBlocks []DeleteBlock
   206  
   207  // StreamReplicatedState represents what is encoded in a binary stream snapshot used
   208  // for stream replication in an NRG.
   209  type StreamReplicatedState struct {
   210  	Msgs     uint64
   211  	Bytes    uint64
   212  	FirstSeq uint64
   213  	LastSeq  uint64
   214  	Failed   uint64
   215  	Deleted  DeleteBlocks
   216  }
   217  
   218  // Determine if this is an encoded stream state.
   219  func IsEncodedStreamState(buf []byte) bool {
   220  	return len(buf) >= hdrLen && buf[0] == streamStateMagic && buf[1] == streamStateVersion
   221  }
   222  
   223  var ErrBadStreamStateEncoding = errors.New("bad stream state encoding")
   224  
   225  func DecodeStreamState(buf []byte) (*StreamReplicatedState, error) {
   226  	ss := &StreamReplicatedState{}
   227  	if len(buf) < hdrLen || buf[0] != streamStateMagic || buf[1] != streamStateVersion {
   228  		return nil, ErrBadStreamStateEncoding
   229  	}
   230  	var bi = hdrLen
   231  
   232  	readU64 := func() uint64 {
   233  		if bi < 0 || bi >= len(buf) {
   234  			bi = -1
   235  			return 0
   236  		}
   237  		num, n := binary.Uvarint(buf[bi:])
   238  		if n <= 0 {
   239  			bi = -1
   240  			return 0
   241  		}
   242  		bi += n
   243  		return num
   244  	}
   245  
   246  	parserFailed := func() bool {
   247  		return bi < 0
   248  	}
   249  
   250  	ss.Msgs = readU64()
   251  	ss.Bytes = readU64()
   252  	ss.FirstSeq = readU64()
   253  	ss.LastSeq = readU64()
   254  	ss.Failed = readU64()
   255  
   256  	if parserFailed() {
   257  		return nil, ErrCorruptStreamState
   258  	}
   259  
   260  	if numDeleted := readU64(); numDeleted > 0 {
   261  		// If we have some deleted blocks.
   262  		for l := len(buf); l > bi; {
   263  			switch buf[bi] {
   264  			case seqSetMagic:
   265  				dmap, n, err := avl.Decode(buf[bi:])
   266  				if err != nil {
   267  					return nil, ErrCorruptStreamState
   268  				}
   269  				bi += n
   270  				ss.Deleted = append(ss.Deleted, dmap)
   271  			case runLengthMagic:
   272  				bi++
   273  				var rl DeleteRange
   274  				rl.First = readU64()
   275  				rl.Num = readU64()
   276  				if parserFailed() {
   277  					return nil, ErrCorruptStreamState
   278  				}
   279  				ss.Deleted = append(ss.Deleted, &rl)
   280  			default:
   281  				return nil, ErrCorruptStreamState
   282  			}
   283  		}
   284  	}
   285  
   286  	return ss, nil
   287  }
   288  
   289  // DeleteRange is a run length encoded delete range.
   290  type DeleteRange struct {
   291  	First uint64
   292  	Num   uint64
   293  }
   294  
   295  func (dr *DeleteRange) State() (first, last, num uint64) {
   296  	return dr.First, dr.First + dr.Num, dr.Num
   297  }
   298  
   299  // Range will range over all the deleted sequences represented by this block.
   300  func (dr *DeleteRange) Range(f func(uint64) bool) {
   301  	for seq := dr.First; seq <= dr.First+dr.Num; seq++ {
   302  		if !f(seq) {
   303  			return
   304  		}
   305  	}
   306  }
   307  
   308  // Legacy []uint64
   309  type DeleteSlice []uint64
   310  
   311  func (ds DeleteSlice) State() (first, last, num uint64) {
   312  	if len(ds) == 0 {
   313  		return 0, 0, 0
   314  	}
   315  	return ds[0], ds[len(ds)-1], uint64(len(ds))
   316  }
   317  
   318  // Range will range over all the deleted sequences represented by this []uint64.
   319  func (ds DeleteSlice) Range(f func(uint64) bool) {
   320  	for _, seq := range ds {
   321  		if !f(seq) {
   322  			return
   323  		}
   324  	}
   325  }
   326  
   327  func (dbs DeleteBlocks) NumDeleted() (total uint64) {
   328  	for _, db := range dbs {
   329  		_, _, num := db.State()
   330  		total += num
   331  	}
   332  	return total
   333  }
   334  
   335  // ConsumerStore stores state on consumers for streams.
   336  type ConsumerStore interface {
   337  	SetStarting(sseq uint64) error
   338  	HasState() bool
   339  	UpdateDelivered(dseq, sseq, dc uint64, ts int64) error
   340  	UpdateAcks(dseq, sseq uint64) error
   341  	UpdateConfig(cfg *ConsumerConfig) error
   342  	Update(*ConsumerState) error
   343  	State() (*ConsumerState, error)
   344  	BorrowState() (*ConsumerState, error)
   345  	EncodedState() ([]byte, error)
   346  	Type() StorageType
   347  	Stop() error
   348  	Delete() error
   349  	StreamDelete() error
   350  }
   351  
   352  // SequencePair has both the consumer and the stream sequence. They point to same message.
   353  type SequencePair struct {
   354  	Consumer uint64 `json:"consumer_seq"`
   355  	Stream   uint64 `json:"stream_seq"`
   356  }
   357  
   358  // ConsumerState represents a stored state for a consumer.
   359  type ConsumerState struct {
   360  	// Delivered keeps track of last delivered sequence numbers for both the stream and the consumer.
   361  	Delivered SequencePair `json:"delivered"`
   362  	// AckFloor keeps track of the ack floors for both the stream and the consumer.
   363  	AckFloor SequencePair `json:"ack_floor"`
   364  	// These are both in stream sequence context.
   365  	// Pending is for all messages pending and the timestamp for the delivered time.
   366  	// This will only be present when the AckPolicy is ExplicitAck.
   367  	Pending map[uint64]*Pending `json:"pending,omitempty"`
   368  	// This is for messages that have been redelivered, so count > 1.
   369  	Redelivered map[uint64]uint64 `json:"redelivered,omitempty"`
   370  }
   371  
   372  // Encode consumer state.
   373  func encodeConsumerState(state *ConsumerState) []byte {
   374  	var hdr [seqsHdrSize]byte
   375  	var buf []byte
   376  
   377  	maxSize := seqsHdrSize
   378  	if lp := len(state.Pending); lp > 0 {
   379  		maxSize += lp*(3*binary.MaxVarintLen64) + binary.MaxVarintLen64
   380  	}
   381  	if lr := len(state.Redelivered); lr > 0 {
   382  		maxSize += lr*(2*binary.MaxVarintLen64) + binary.MaxVarintLen64
   383  	}
   384  	if maxSize == seqsHdrSize {
   385  		buf = hdr[:seqsHdrSize]
   386  	} else {
   387  		buf = make([]byte, maxSize)
   388  	}
   389  
   390  	// Write header
   391  	buf[0] = magic
   392  	buf[1] = 2
   393  
   394  	n := hdrLen
   395  	n += binary.PutUvarint(buf[n:], state.AckFloor.Consumer)
   396  	n += binary.PutUvarint(buf[n:], state.AckFloor.Stream)
   397  	n += binary.PutUvarint(buf[n:], state.Delivered.Consumer)
   398  	n += binary.PutUvarint(buf[n:], state.Delivered.Stream)
   399  	n += binary.PutUvarint(buf[n:], uint64(len(state.Pending)))
   400  
   401  	asflr := state.AckFloor.Stream
   402  	adflr := state.AckFloor.Consumer
   403  
   404  	// These are optional, but always write len. This is to avoid a truncate inline.
   405  	if len(state.Pending) > 0 {
   406  		// To save space we will use now rounded to seconds to be our base timestamp.
   407  		mints := time.Now().Round(time.Second).Unix()
   408  		// Write minimum timestamp we found from above.
   409  		n += binary.PutVarint(buf[n:], mints)
   410  
   411  		for k, v := range state.Pending {
   412  			n += binary.PutUvarint(buf[n:], k-asflr)
   413  			n += binary.PutUvarint(buf[n:], v.Sequence-adflr)
   414  			// Downsample to seconds to save on space.
   415  			// Subsecond resolution not needed for recovery etc.
   416  			ts := v.Timestamp / int64(time.Second)
   417  			n += binary.PutVarint(buf[n:], mints-ts)
   418  		}
   419  	}
   420  
   421  	// We always write the redelivered len.
   422  	n += binary.PutUvarint(buf[n:], uint64(len(state.Redelivered)))
   423  
   424  	// We expect these to be small.
   425  	if len(state.Redelivered) > 0 {
   426  		for k, v := range state.Redelivered {
   427  			n += binary.PutUvarint(buf[n:], k-asflr)
   428  			n += binary.PutUvarint(buf[n:], v)
   429  		}
   430  	}
   431  
   432  	return buf[:n]
   433  }
   434  
   435  // Represents a pending message for explicit ack or ack all.
   436  // Sequence is the original consumer sequence.
   437  type Pending struct {
   438  	Sequence  uint64
   439  	Timestamp int64
   440  }
   441  
   442  // TemplateStore stores templates.
   443  type TemplateStore interface {
   444  	Store(*streamTemplate) error
   445  	Delete(*streamTemplate) error
   446  }
   447  
   448  const (
   449  	limitsPolicyJSONString    = `"limits"`
   450  	interestPolicyJSONString  = `"interest"`
   451  	workQueuePolicyJSONString = `"workqueue"`
   452  )
   453  
   454  var (
   455  	limitsPolicyJSONBytes    = []byte(limitsPolicyJSONString)
   456  	interestPolicyJSONBytes  = []byte(interestPolicyJSONString)
   457  	workQueuePolicyJSONBytes = []byte(workQueuePolicyJSONString)
   458  )
   459  
   460  func (rp RetentionPolicy) String() string {
   461  	switch rp {
   462  	case LimitsPolicy:
   463  		return "Limits"
   464  	case InterestPolicy:
   465  		return "Interest"
   466  	case WorkQueuePolicy:
   467  		return "WorkQueue"
   468  	default:
   469  		return "Unknown Retention Policy"
   470  	}
   471  }
   472  
   473  func (rp RetentionPolicy) MarshalJSON() ([]byte, error) {
   474  	switch rp {
   475  	case LimitsPolicy:
   476  		return limitsPolicyJSONBytes, nil
   477  	case InterestPolicy:
   478  		return interestPolicyJSONBytes, nil
   479  	case WorkQueuePolicy:
   480  		return workQueuePolicyJSONBytes, nil
   481  	default:
   482  		return nil, fmt.Errorf("can not marshal %v", rp)
   483  	}
   484  }
   485  
   486  func (rp *RetentionPolicy) UnmarshalJSON(data []byte) error {
   487  	switch string(data) {
   488  	case limitsPolicyJSONString:
   489  		*rp = LimitsPolicy
   490  	case interestPolicyJSONString:
   491  		*rp = InterestPolicy
   492  	case workQueuePolicyJSONString:
   493  		*rp = WorkQueuePolicy
   494  	default:
   495  		return fmt.Errorf("can not unmarshal %q", data)
   496  	}
   497  	return nil
   498  }
   499  
   500  func (dp DiscardPolicy) String() string {
   501  	switch dp {
   502  	case DiscardOld:
   503  		return "DiscardOld"
   504  	case DiscardNew:
   505  		return "DiscardNew"
   506  	default:
   507  		return "Unknown Discard Policy"
   508  	}
   509  }
   510  
   511  func (dp DiscardPolicy) MarshalJSON() ([]byte, error) {
   512  	switch dp {
   513  	case DiscardOld:
   514  		return []byte(`"old"`), nil
   515  	case DiscardNew:
   516  		return []byte(`"new"`), nil
   517  	default:
   518  		return nil, fmt.Errorf("can not marshal %v", dp)
   519  	}
   520  }
   521  
   522  func (dp *DiscardPolicy) UnmarshalJSON(data []byte) error {
   523  	switch strings.ToLower(string(data)) {
   524  	case `"old"`:
   525  		*dp = DiscardOld
   526  	case `"new"`:
   527  		*dp = DiscardNew
   528  	default:
   529  		return fmt.Errorf("can not unmarshal %q", data)
   530  	}
   531  	return nil
   532  }
   533  
   534  const (
   535  	memoryStorageJSONString = `"memory"`
   536  	fileStorageJSONString   = `"file"`
   537  	anyStorageJSONString    = `"any"`
   538  )
   539  
   540  var (
   541  	memoryStorageJSONBytes = []byte(memoryStorageJSONString)
   542  	fileStorageJSONBytes   = []byte(fileStorageJSONString)
   543  	anyStorageJSONBytes    = []byte(anyStorageJSONString)
   544  )
   545  
   546  func (st StorageType) String() string {
   547  	switch st {
   548  	case MemoryStorage:
   549  		return "Memory"
   550  	case FileStorage:
   551  		return "File"
   552  	case AnyStorage:
   553  		return "Any"
   554  	default:
   555  		return "Unknown Storage Type"
   556  	}
   557  }
   558  
   559  func (st StorageType) MarshalJSON() ([]byte, error) {
   560  	switch st {
   561  	case MemoryStorage:
   562  		return memoryStorageJSONBytes, nil
   563  	case FileStorage:
   564  		return fileStorageJSONBytes, nil
   565  	case AnyStorage:
   566  		return anyStorageJSONBytes, nil
   567  	default:
   568  		return nil, fmt.Errorf("can not marshal %v", st)
   569  	}
   570  }
   571  
   572  func (st *StorageType) UnmarshalJSON(data []byte) error {
   573  	switch string(data) {
   574  	case memoryStorageJSONString:
   575  		*st = MemoryStorage
   576  	case fileStorageJSONString:
   577  		*st = FileStorage
   578  	case anyStorageJSONString:
   579  		*st = AnyStorage
   580  	default:
   581  		return fmt.Errorf("can not unmarshal %q", data)
   582  	}
   583  	return nil
   584  }
   585  
   586  const (
   587  	ackNonePolicyJSONString     = `"none"`
   588  	ackAllPolicyJSONString      = `"all"`
   589  	ackExplicitPolicyJSONString = `"explicit"`
   590  )
   591  
   592  var (
   593  	ackNonePolicyJSONBytes     = []byte(ackNonePolicyJSONString)
   594  	ackAllPolicyJSONBytes      = []byte(ackAllPolicyJSONString)
   595  	ackExplicitPolicyJSONBytes = []byte(ackExplicitPolicyJSONString)
   596  )
   597  
   598  func (ap AckPolicy) MarshalJSON() ([]byte, error) {
   599  	switch ap {
   600  	case AckNone:
   601  		return ackNonePolicyJSONBytes, nil
   602  	case AckAll:
   603  		return ackAllPolicyJSONBytes, nil
   604  	case AckExplicit:
   605  		return ackExplicitPolicyJSONBytes, nil
   606  	default:
   607  		return nil, fmt.Errorf("can not marshal %v", ap)
   608  	}
   609  }
   610  
   611  func (ap *AckPolicy) UnmarshalJSON(data []byte) error {
   612  	switch string(data) {
   613  	case ackNonePolicyJSONString:
   614  		*ap = AckNone
   615  	case ackAllPolicyJSONString:
   616  		*ap = AckAll
   617  	case ackExplicitPolicyJSONString:
   618  		*ap = AckExplicit
   619  	default:
   620  		return fmt.Errorf("can not unmarshal %q", data)
   621  	}
   622  	return nil
   623  }
   624  
   625  const (
   626  	replayInstantPolicyJSONString  = `"instant"`
   627  	replayOriginalPolicyJSONString = `"original"`
   628  )
   629  
   630  var (
   631  	replayInstantPolicyJSONBytes  = []byte(replayInstantPolicyJSONString)
   632  	replayOriginalPolicyJSONBytes = []byte(replayOriginalPolicyJSONString)
   633  )
   634  
   635  func (rp ReplayPolicy) MarshalJSON() ([]byte, error) {
   636  	switch rp {
   637  	case ReplayInstant:
   638  		return replayInstantPolicyJSONBytes, nil
   639  	case ReplayOriginal:
   640  		return replayOriginalPolicyJSONBytes, nil
   641  	default:
   642  		return nil, fmt.Errorf("can not marshal %v", rp)
   643  	}
   644  }
   645  
   646  func (rp *ReplayPolicy) UnmarshalJSON(data []byte) error {
   647  	switch string(data) {
   648  	case replayInstantPolicyJSONString:
   649  		*rp = ReplayInstant
   650  	case replayOriginalPolicyJSONString:
   651  		*rp = ReplayOriginal
   652  	default:
   653  		return fmt.Errorf("can not unmarshal %q", data)
   654  	}
   655  	return nil
   656  }
   657  
   658  const (
   659  	deliverAllPolicyJSONString       = `"all"`
   660  	deliverLastPolicyJSONString      = `"last"`
   661  	deliverNewPolicyJSONString       = `"new"`
   662  	deliverByStartSequenceJSONString = `"by_start_sequence"`
   663  	deliverByStartTimeJSONString     = `"by_start_time"`
   664  	deliverLastPerPolicyJSONString   = `"last_per_subject"`
   665  	deliverUndefinedJSONString       = `"undefined"`
   666  )
   667  
   668  var (
   669  	deliverAllPolicyJSONBytes       = []byte(deliverAllPolicyJSONString)
   670  	deliverLastPolicyJSONBytes      = []byte(deliverLastPolicyJSONString)
   671  	deliverNewPolicyJSONBytes       = []byte(deliverNewPolicyJSONString)
   672  	deliverByStartSequenceJSONBytes = []byte(deliverByStartSequenceJSONString)
   673  	deliverByStartTimeJSONBytes     = []byte(deliverByStartTimeJSONString)
   674  	deliverLastPerPolicyJSONBytes   = []byte(deliverLastPerPolicyJSONString)
   675  	deliverUndefinedJSONBytes       = []byte(deliverUndefinedJSONString)
   676  )
   677  
   678  func (p *DeliverPolicy) UnmarshalJSON(data []byte) error {
   679  	switch string(data) {
   680  	case deliverAllPolicyJSONString, deliverUndefinedJSONString:
   681  		*p = DeliverAll
   682  	case deliverLastPolicyJSONString:
   683  		*p = DeliverLast
   684  	case deliverLastPerPolicyJSONString:
   685  		*p = DeliverLastPerSubject
   686  	case deliverNewPolicyJSONString:
   687  		*p = DeliverNew
   688  	case deliverByStartSequenceJSONString:
   689  		*p = DeliverByStartSequence
   690  	case deliverByStartTimeJSONString:
   691  		*p = DeliverByStartTime
   692  	default:
   693  		return fmt.Errorf("can not unmarshal %q", data)
   694  	}
   695  
   696  	return nil
   697  }
   698  
   699  func (p DeliverPolicy) MarshalJSON() ([]byte, error) {
   700  	switch p {
   701  	case DeliverAll:
   702  		return deliverAllPolicyJSONBytes, nil
   703  	case DeliverLast:
   704  		return deliverLastPolicyJSONBytes, nil
   705  	case DeliverLastPerSubject:
   706  		return deliverLastPerPolicyJSONBytes, nil
   707  	case DeliverNew:
   708  		return deliverNewPolicyJSONBytes, nil
   709  	case DeliverByStartSequence:
   710  		return deliverByStartSequenceJSONBytes, nil
   711  	case DeliverByStartTime:
   712  		return deliverByStartTimeJSONBytes, nil
   713  	default:
   714  		return deliverUndefinedJSONBytes, nil
   715  	}
   716  }
   717  
   718  func isOutOfSpaceErr(err error) bool {
   719  	return err != nil && (strings.Contains(err.Error(), "no space left"))
   720  }
   721  
   722  // For when our upper layer catchup detects its missing messages from the beginning of the stream.
   723  var errFirstSequenceMismatch = errors.New("first sequence mismatch")
   724  
   725  func isClusterResetErr(err error) bool {
   726  	return err == errLastSeqMismatch || err == ErrStoreEOF || err == errFirstSequenceMismatch
   727  }
   728  
   729  // Copy all fields.
   730  func (smo *StoreMsg) copy(sm *StoreMsg) {
   731  	if sm.buf != nil {
   732  		sm.buf = sm.buf[:0]
   733  	}
   734  	sm.buf = append(sm.buf, smo.buf...)
   735  	// We set cap on header in case someone wants to expand it.
   736  	sm.hdr, sm.msg = sm.buf[:len(smo.hdr):len(smo.hdr)], sm.buf[len(smo.hdr):]
   737  	sm.subj, sm.seq, sm.ts = smo.subj, smo.seq, smo.ts
   738  }
   739  
   740  // Clear all fields except underlying buffer but reset that if present to [:0].
   741  func (sm *StoreMsg) clear() {
   742  	if sm == nil {
   743  		return
   744  	}
   745  	*sm = StoreMsg{_EMPTY_, nil, nil, sm.buf, 0, 0}
   746  	if len(sm.buf) > 0 {
   747  		sm.buf = sm.buf[:0]
   748  	}
   749  }
   750  
   751  // Note this will avoid a copy of the data used for the string, but it will also reference the existing slice's data pointer.
   752  // So this should be used sparingly when we know the encompassing byte slice's lifetime is the same.
   753  func bytesToString(b []byte) string {
   754  	if len(b) == 0 {
   755  		return _EMPTY_
   756  	}
   757  	p := unsafe.SliceData(b)
   758  	return unsafe.String(p, len(b))
   759  }
   760  
   761  // Same in reverse. Used less often.
   762  func stringToBytes(s string) []byte {
   763  	if len(s) == 0 {
   764  		return nil
   765  	}
   766  	p := unsafe.StringData(s)
   767  	b := unsafe.Slice(p, len(s))
   768  	return b
   769  }