github.com/nats-io/nats-server/v2@v2.11.0-preview.2/server/store.go (about) 1 // Copyright 2019-2024 The NATS Authors 2 // Licensed under the Apache License, Version 2.0 (the "License"); 3 // you may not use this file except in compliance with the License. 4 // You may obtain a copy of the License at 5 // 6 // http://www.apache.org/licenses/LICENSE-2.0 7 // 8 // Unless required by applicable law or agreed to in writing, software 9 // distributed under the License is distributed on an "AS IS" BASIS, 10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package server 15 16 import ( 17 "encoding/binary" 18 "errors" 19 "fmt" 20 "io" 21 "strings" 22 "time" 23 "unsafe" 24 25 "github.com/nats-io/nats-server/v2/server/avl" 26 ) 27 28 // StorageType determines how messages are stored for retention. 29 type StorageType int 30 31 const ( 32 // File specifies on disk, designated by the JetStream config StoreDir. 33 FileStorage = StorageType(22) 34 // MemoryStorage specifies in memory only. 35 MemoryStorage = StorageType(33) 36 // Any is for internals. 37 AnyStorage = StorageType(44) 38 ) 39 40 var ( 41 // ErrStoreClosed is returned when the store has been closed 42 ErrStoreClosed = errors.New("store is closed") 43 // ErrStoreMsgNotFound when message was not found but was expected to be. 44 ErrStoreMsgNotFound = errors.New("no message found") 45 // ErrStoreEOF is returned when message seq is greater than the last sequence. 46 ErrStoreEOF = errors.New("stream store EOF") 47 // ErrMaxMsgs is returned when we have discard new as a policy and we reached the message limit. 48 ErrMaxMsgs = errors.New("maximum messages exceeded") 49 // ErrMaxBytes is returned when we have discard new as a policy and we reached the bytes limit. 50 ErrMaxBytes = errors.New("maximum bytes exceeded") 51 // ErrMaxMsgsPerSubject is returned when we have discard new as a policy and we reached the message limit per subject. 52 ErrMaxMsgsPerSubject = errors.New("maximum messages per subject exceeded") 53 // ErrStoreSnapshotInProgress is returned when RemoveMsg or EraseMsg is called 54 // while a snapshot is in progress. 55 ErrStoreSnapshotInProgress = errors.New("snapshot in progress") 56 // ErrMsgTooLarge is returned when a message is considered too large. 57 ErrMsgTooLarge = errors.New("message to large") 58 // ErrStoreWrongType is for when you access the wrong storage type. 59 ErrStoreWrongType = errors.New("wrong storage type") 60 // ErrNoAckPolicy is returned when trying to update a consumer's acks with no ack policy. 61 ErrNoAckPolicy = errors.New("ack policy is none") 62 // ErrInvalidSequence is returned when the sequence is not present in the stream store. 63 ErrInvalidSequence = errors.New("invalid sequence") 64 // ErrSequenceMismatch is returned when storing a raw message and the expected sequence is wrong. 65 ErrSequenceMismatch = errors.New("expected sequence does not match store") 66 // ErrCorruptStreamState 67 ErrCorruptStreamState = errors.New("stream state snapshot is corrupt") 68 // ErrTooManyResults 69 ErrTooManyResults = errors.New("too many matching results for request") 70 ) 71 72 // StoreMsg is the stored message format for messages that are retained by the Store layer. 73 type StoreMsg struct { 74 subj string 75 hdr []byte 76 msg []byte 77 buf []byte 78 seq uint64 79 ts int64 80 } 81 82 // Used to call back into the upper layers to report on changes in storage resources. 83 // For the cases where its a single message we will also supply sequence number and subject. 84 type StorageUpdateHandler func(msgs, bytes int64, seq uint64, subj string) 85 86 type StreamStore interface { 87 StoreMsg(subject string, hdr, msg []byte) (uint64, int64, error) 88 StoreRawMsg(subject string, hdr, msg []byte, seq uint64, ts int64) error 89 SkipMsg() uint64 90 SkipMsgs(seq uint64, num uint64) error 91 LoadMsg(seq uint64, sm *StoreMsg) (*StoreMsg, error) 92 LoadNextMsg(filter string, wc bool, start uint64, smp *StoreMsg) (sm *StoreMsg, skip uint64, err error) 93 LoadNextMsgMulti(sl *Sublist, start uint64, smp *StoreMsg) (sm *StoreMsg, skip uint64, err error) 94 LoadLastMsg(subject string, sm *StoreMsg) (*StoreMsg, error) 95 RemoveMsg(seq uint64) (bool, error) 96 EraseMsg(seq uint64) (bool, error) 97 Purge() (uint64, error) 98 PurgeEx(subject string, seq, keep uint64) (uint64, error) 99 Compact(seq uint64) (uint64, error) 100 Truncate(seq uint64) error 101 GetSeqFromTime(t time.Time) uint64 102 FilteredState(seq uint64, subject string) SimpleState 103 SubjectsState(filterSubject string) map[string]SimpleState 104 SubjectsTotals(filterSubject string) map[string]uint64 105 MultiLastSeqs(filters []string, maxSeq uint64, maxAllowed int) ([]uint64, error) 106 NumPending(sseq uint64, filter string, lastPerSubject bool) (total, validThrough uint64) 107 State() StreamState 108 FastState(*StreamState) 109 EncodedStreamState(failed uint64) (enc []byte, err error) 110 SyncDeleted(dbs DeleteBlocks) 111 Type() StorageType 112 RegisterStorageUpdates(StorageUpdateHandler) 113 UpdateConfig(cfg *StreamConfig) error 114 Delete() error 115 Stop() error 116 ConsumerStore(name string, cfg *ConsumerConfig) (ConsumerStore, error) 117 AddConsumer(o ConsumerStore) error 118 RemoveConsumer(o ConsumerStore) error 119 Snapshot(deadline time.Duration, includeConsumers, checkMsgs bool) (*SnapshotResult, error) 120 Utilization() (total, reported uint64, err error) 121 } 122 123 // RetentionPolicy determines how messages in a set are retained. 124 type RetentionPolicy int 125 126 const ( 127 // LimitsPolicy (default) means that messages are retained until any given limit is reached. 128 // This could be one of MaxMsgs, MaxBytes, or MaxAge. 129 LimitsPolicy RetentionPolicy = iota 130 // InterestPolicy specifies that when all known consumers have acknowledged a message it can be removed. 131 InterestPolicy 132 // WorkQueuePolicy specifies that when the first worker or subscriber acknowledges the message it can be removed. 133 WorkQueuePolicy 134 ) 135 136 // Discard Policy determines how we proceed when limits of messages or bytes are hit. The default, DicscardOld will 137 // remove older messages. DiscardNew will fail to store the new message. 138 type DiscardPolicy int 139 140 const ( 141 // DiscardOld will remove older messages to return to the limits. 142 DiscardOld = iota 143 // DiscardNew will error on a StoreMsg call 144 DiscardNew 145 ) 146 147 // StreamState is information about the given stream. 148 type StreamState struct { 149 Msgs uint64 `json:"messages"` 150 Bytes uint64 `json:"bytes"` 151 FirstSeq uint64 `json:"first_seq"` 152 FirstTime time.Time `json:"first_ts"` 153 LastSeq uint64 `json:"last_seq"` 154 LastTime time.Time `json:"last_ts"` 155 NumSubjects int `json:"num_subjects,omitempty"` 156 Subjects map[string]uint64 `json:"subjects,omitempty"` 157 NumDeleted int `json:"num_deleted,omitempty"` 158 Deleted []uint64 `json:"deleted,omitempty"` 159 Lost *LostStreamData `json:"lost,omitempty"` 160 Consumers int `json:"consumer_count"` 161 } 162 163 // SimpleState for filtered subject specific state. 164 type SimpleState struct { 165 Msgs uint64 `json:"messages"` 166 First uint64 `json:"first_seq"` 167 Last uint64 `json:"last_seq"` 168 169 // Internal usage for when the first needs to be updated before use. 170 firstNeedsUpdate bool 171 } 172 173 // LostStreamData indicates msgs that have been lost. 174 type LostStreamData struct { 175 Msgs []uint64 `json:"msgs"` 176 Bytes uint64 `json:"bytes"` 177 } 178 179 // SnapshotResult contains information about the snapshot. 180 type SnapshotResult struct { 181 Reader io.ReadCloser 182 State StreamState 183 } 184 185 const ( 186 // Magic is used to identify stream state encodings. 187 streamStateMagic = uint8(42) 188 // Version 189 streamStateVersion = uint8(1) 190 // Magic / Identifier for run length encodings. 191 runLengthMagic = uint8(33) 192 // Magic / Identifier for AVL seqsets. 193 seqSetMagic = uint8(22) 194 ) 195 196 // Interface for DeleteBlock. 197 // These will be of three types: 198 // 1. AVL seqsets. 199 // 2. Run length encoding of a deleted range. 200 // 3. Legacy []uint64 201 type DeleteBlock interface { 202 State() (first, last, num uint64) 203 Range(f func(uint64) bool) 204 } 205 206 type DeleteBlocks []DeleteBlock 207 208 // StreamReplicatedState represents what is encoded in a binary stream snapshot used 209 // for stream replication in an NRG. 210 type StreamReplicatedState struct { 211 Msgs uint64 212 Bytes uint64 213 FirstSeq uint64 214 LastSeq uint64 215 Failed uint64 216 Deleted DeleteBlocks 217 } 218 219 // Determine if this is an encoded stream state. 220 func IsEncodedStreamState(buf []byte) bool { 221 return len(buf) >= hdrLen && buf[0] == streamStateMagic && buf[1] == streamStateVersion 222 } 223 224 var ErrBadStreamStateEncoding = errors.New("bad stream state encoding") 225 226 func DecodeStreamState(buf []byte) (*StreamReplicatedState, error) { 227 ss := &StreamReplicatedState{} 228 if len(buf) < hdrLen || buf[0] != streamStateMagic || buf[1] != streamStateVersion { 229 return nil, ErrBadStreamStateEncoding 230 } 231 var bi = hdrLen 232 233 readU64 := func() uint64 { 234 if bi < 0 || bi >= len(buf) { 235 bi = -1 236 return 0 237 } 238 num, n := binary.Uvarint(buf[bi:]) 239 if n <= 0 { 240 bi = -1 241 return 0 242 } 243 bi += n 244 return num 245 } 246 247 parserFailed := func() bool { 248 return bi < 0 249 } 250 251 ss.Msgs = readU64() 252 ss.Bytes = readU64() 253 ss.FirstSeq = readU64() 254 ss.LastSeq = readU64() 255 ss.Failed = readU64() 256 257 if parserFailed() { 258 return nil, ErrCorruptStreamState 259 } 260 261 if numDeleted := readU64(); numDeleted > 0 { 262 // If we have some deleted blocks. 263 for l := len(buf); l > bi; { 264 switch buf[bi] { 265 case seqSetMagic: 266 dmap, n, err := avl.Decode(buf[bi:]) 267 if err != nil { 268 return nil, ErrCorruptStreamState 269 } 270 bi += n 271 ss.Deleted = append(ss.Deleted, dmap) 272 case runLengthMagic: 273 bi++ 274 var rl DeleteRange 275 rl.First = readU64() 276 rl.Num = readU64() 277 if parserFailed() { 278 return nil, ErrCorruptStreamState 279 } 280 ss.Deleted = append(ss.Deleted, &rl) 281 default: 282 return nil, ErrCorruptStreamState 283 } 284 } 285 } 286 287 return ss, nil 288 } 289 290 // DeleteRange is a run length encoded delete range. 291 type DeleteRange struct { 292 First uint64 293 Num uint64 294 } 295 296 func (dr *DeleteRange) State() (first, last, num uint64) { 297 return dr.First, dr.First + dr.Num, dr.Num 298 } 299 300 // Range will range over all the deleted sequences represented by this block. 301 func (dr *DeleteRange) Range(f func(uint64) bool) { 302 for seq := dr.First; seq <= dr.First+dr.Num; seq++ { 303 if !f(seq) { 304 return 305 } 306 } 307 } 308 309 // Legacy []uint64 310 type DeleteSlice []uint64 311 312 func (ds DeleteSlice) State() (first, last, num uint64) { 313 if len(ds) == 0 { 314 return 0, 0, 0 315 } 316 return ds[0], ds[len(ds)-1], uint64(len(ds)) 317 } 318 319 // Range will range over all the deleted sequences represented by this []uint64. 320 func (ds DeleteSlice) Range(f func(uint64) bool) { 321 for _, seq := range ds { 322 if !f(seq) { 323 return 324 } 325 } 326 } 327 328 func (dbs DeleteBlocks) NumDeleted() (total uint64) { 329 for _, db := range dbs { 330 _, _, num := db.State() 331 total += num 332 } 333 return total 334 } 335 336 // ConsumerStore stores state on consumers for streams. 337 type ConsumerStore interface { 338 SetStarting(sseq uint64) error 339 HasState() bool 340 UpdateDelivered(dseq, sseq, dc uint64, ts int64) error 341 UpdateAcks(dseq, sseq uint64) error 342 UpdateConfig(cfg *ConsumerConfig) error 343 Update(*ConsumerState) error 344 State() (*ConsumerState, error) 345 BorrowState() (*ConsumerState, error) 346 EncodedState() ([]byte, error) 347 Type() StorageType 348 Stop() error 349 Delete() error 350 StreamDelete() error 351 } 352 353 // SequencePair has both the consumer and the stream sequence. They point to same message. 354 type SequencePair struct { 355 Consumer uint64 `json:"consumer_seq"` 356 Stream uint64 `json:"stream_seq"` 357 } 358 359 // ConsumerState represents a stored state for a consumer. 360 type ConsumerState struct { 361 // Delivered keeps track of last delivered sequence numbers for both the stream and the consumer. 362 Delivered SequencePair `json:"delivered"` 363 // AckFloor keeps track of the ack floors for both the stream and the consumer. 364 AckFloor SequencePair `json:"ack_floor"` 365 // These are both in stream sequence context. 366 // Pending is for all messages pending and the timestamp for the delivered time. 367 // This will only be present when the AckPolicy is ExplicitAck. 368 Pending map[uint64]*Pending `json:"pending,omitempty"` 369 // This is for messages that have been redelivered, so count > 1. 370 Redelivered map[uint64]uint64 `json:"redelivered,omitempty"` 371 } 372 373 // Encode consumer state. 374 func encodeConsumerState(state *ConsumerState) []byte { 375 var hdr [seqsHdrSize]byte 376 var buf []byte 377 378 maxSize := seqsHdrSize 379 if lp := len(state.Pending); lp > 0 { 380 maxSize += lp*(3*binary.MaxVarintLen64) + binary.MaxVarintLen64 381 } 382 if lr := len(state.Redelivered); lr > 0 { 383 maxSize += lr*(2*binary.MaxVarintLen64) + binary.MaxVarintLen64 384 } 385 if maxSize == seqsHdrSize { 386 buf = hdr[:seqsHdrSize] 387 } else { 388 buf = make([]byte, maxSize) 389 } 390 391 // Write header 392 buf[0] = magic 393 buf[1] = 2 394 395 n := hdrLen 396 n += binary.PutUvarint(buf[n:], state.AckFloor.Consumer) 397 n += binary.PutUvarint(buf[n:], state.AckFloor.Stream) 398 n += binary.PutUvarint(buf[n:], state.Delivered.Consumer) 399 n += binary.PutUvarint(buf[n:], state.Delivered.Stream) 400 n += binary.PutUvarint(buf[n:], uint64(len(state.Pending))) 401 402 asflr := state.AckFloor.Stream 403 adflr := state.AckFloor.Consumer 404 405 // These are optional, but always write len. This is to avoid a truncate inline. 406 if len(state.Pending) > 0 { 407 // To save space we will use now rounded to seconds to be our base timestamp. 408 mints := time.Now().Round(time.Second).Unix() 409 // Write minimum timestamp we found from above. 410 n += binary.PutVarint(buf[n:], mints) 411 412 for k, v := range state.Pending { 413 n += binary.PutUvarint(buf[n:], k-asflr) 414 n += binary.PutUvarint(buf[n:], v.Sequence-adflr) 415 // Downsample to seconds to save on space. 416 // Subsecond resolution not needed for recovery etc. 417 ts := v.Timestamp / int64(time.Second) 418 n += binary.PutVarint(buf[n:], mints-ts) 419 } 420 } 421 422 // We always write the redelivered len. 423 n += binary.PutUvarint(buf[n:], uint64(len(state.Redelivered))) 424 425 // We expect these to be small. 426 if len(state.Redelivered) > 0 { 427 for k, v := range state.Redelivered { 428 n += binary.PutUvarint(buf[n:], k-asflr) 429 n += binary.PutUvarint(buf[n:], v) 430 } 431 } 432 433 return buf[:n] 434 } 435 436 // Represents a pending message for explicit ack or ack all. 437 // Sequence is the original consumer sequence. 438 type Pending struct { 439 Sequence uint64 440 Timestamp int64 441 } 442 443 // TemplateStore stores templates. 444 type TemplateStore interface { 445 Store(*streamTemplate) error 446 Delete(*streamTemplate) error 447 } 448 449 const ( 450 limitsPolicyJSONString = `"limits"` 451 interestPolicyJSONString = `"interest"` 452 workQueuePolicyJSONString = `"workqueue"` 453 ) 454 455 var ( 456 limitsPolicyJSONBytes = []byte(limitsPolicyJSONString) 457 interestPolicyJSONBytes = []byte(interestPolicyJSONString) 458 workQueuePolicyJSONBytes = []byte(workQueuePolicyJSONString) 459 ) 460 461 func (rp RetentionPolicy) String() string { 462 switch rp { 463 case LimitsPolicy: 464 return "Limits" 465 case InterestPolicy: 466 return "Interest" 467 case WorkQueuePolicy: 468 return "WorkQueue" 469 default: 470 return "Unknown Retention Policy" 471 } 472 } 473 474 func (rp RetentionPolicy) MarshalJSON() ([]byte, error) { 475 switch rp { 476 case LimitsPolicy: 477 return limitsPolicyJSONBytes, nil 478 case InterestPolicy: 479 return interestPolicyJSONBytes, nil 480 case WorkQueuePolicy: 481 return workQueuePolicyJSONBytes, nil 482 default: 483 return nil, fmt.Errorf("can not marshal %v", rp) 484 } 485 } 486 487 func (rp *RetentionPolicy) UnmarshalJSON(data []byte) error { 488 switch string(data) { 489 case limitsPolicyJSONString: 490 *rp = LimitsPolicy 491 case interestPolicyJSONString: 492 *rp = InterestPolicy 493 case workQueuePolicyJSONString: 494 *rp = WorkQueuePolicy 495 default: 496 return fmt.Errorf("can not unmarshal %q", data) 497 } 498 return nil 499 } 500 501 func (dp DiscardPolicy) String() string { 502 switch dp { 503 case DiscardOld: 504 return "DiscardOld" 505 case DiscardNew: 506 return "DiscardNew" 507 default: 508 return "Unknown Discard Policy" 509 } 510 } 511 512 func (dp DiscardPolicy) MarshalJSON() ([]byte, error) { 513 switch dp { 514 case DiscardOld: 515 return []byte(`"old"`), nil 516 case DiscardNew: 517 return []byte(`"new"`), nil 518 default: 519 return nil, fmt.Errorf("can not marshal %v", dp) 520 } 521 } 522 523 func (dp *DiscardPolicy) UnmarshalJSON(data []byte) error { 524 switch strings.ToLower(string(data)) { 525 case `"old"`: 526 *dp = DiscardOld 527 case `"new"`: 528 *dp = DiscardNew 529 default: 530 return fmt.Errorf("can not unmarshal %q", data) 531 } 532 return nil 533 } 534 535 const ( 536 memoryStorageJSONString = `"memory"` 537 fileStorageJSONString = `"file"` 538 anyStorageJSONString = `"any"` 539 ) 540 541 var ( 542 memoryStorageJSONBytes = []byte(memoryStorageJSONString) 543 fileStorageJSONBytes = []byte(fileStorageJSONString) 544 anyStorageJSONBytes = []byte(anyStorageJSONString) 545 ) 546 547 func (st StorageType) String() string { 548 switch st { 549 case MemoryStorage: 550 return "Memory" 551 case FileStorage: 552 return "File" 553 case AnyStorage: 554 return "Any" 555 default: 556 return "Unknown Storage Type" 557 } 558 } 559 560 func (st StorageType) MarshalJSON() ([]byte, error) { 561 switch st { 562 case MemoryStorage: 563 return memoryStorageJSONBytes, nil 564 case FileStorage: 565 return fileStorageJSONBytes, nil 566 case AnyStorage: 567 return anyStorageJSONBytes, nil 568 default: 569 return nil, fmt.Errorf("can not marshal %v", st) 570 } 571 } 572 573 func (st *StorageType) UnmarshalJSON(data []byte) error { 574 switch string(data) { 575 case memoryStorageJSONString: 576 *st = MemoryStorage 577 case fileStorageJSONString: 578 *st = FileStorage 579 case anyStorageJSONString: 580 *st = AnyStorage 581 default: 582 return fmt.Errorf("can not unmarshal %q", data) 583 } 584 return nil 585 } 586 587 const ( 588 ackNonePolicyJSONString = `"none"` 589 ackAllPolicyJSONString = `"all"` 590 ackExplicitPolicyJSONString = `"explicit"` 591 ) 592 593 var ( 594 ackNonePolicyJSONBytes = []byte(ackNonePolicyJSONString) 595 ackAllPolicyJSONBytes = []byte(ackAllPolicyJSONString) 596 ackExplicitPolicyJSONBytes = []byte(ackExplicitPolicyJSONString) 597 ) 598 599 func (ap AckPolicy) MarshalJSON() ([]byte, error) { 600 switch ap { 601 case AckNone: 602 return ackNonePolicyJSONBytes, nil 603 case AckAll: 604 return ackAllPolicyJSONBytes, nil 605 case AckExplicit: 606 return ackExplicitPolicyJSONBytes, nil 607 default: 608 return nil, fmt.Errorf("can not marshal %v", ap) 609 } 610 } 611 612 func (ap *AckPolicy) UnmarshalJSON(data []byte) error { 613 switch string(data) { 614 case ackNonePolicyJSONString: 615 *ap = AckNone 616 case ackAllPolicyJSONString: 617 *ap = AckAll 618 case ackExplicitPolicyJSONString: 619 *ap = AckExplicit 620 default: 621 return fmt.Errorf("can not unmarshal %q", data) 622 } 623 return nil 624 } 625 626 const ( 627 replayInstantPolicyJSONString = `"instant"` 628 replayOriginalPolicyJSONString = `"original"` 629 ) 630 631 var ( 632 replayInstantPolicyJSONBytes = []byte(replayInstantPolicyJSONString) 633 replayOriginalPolicyJSONBytes = []byte(replayOriginalPolicyJSONString) 634 ) 635 636 func (rp ReplayPolicy) MarshalJSON() ([]byte, error) { 637 switch rp { 638 case ReplayInstant: 639 return replayInstantPolicyJSONBytes, nil 640 case ReplayOriginal: 641 return replayOriginalPolicyJSONBytes, nil 642 default: 643 return nil, fmt.Errorf("can not marshal %v", rp) 644 } 645 } 646 647 func (rp *ReplayPolicy) UnmarshalJSON(data []byte) error { 648 switch string(data) { 649 case replayInstantPolicyJSONString: 650 *rp = ReplayInstant 651 case replayOriginalPolicyJSONString: 652 *rp = ReplayOriginal 653 default: 654 return fmt.Errorf("can not unmarshal %q", data) 655 } 656 return nil 657 } 658 659 const ( 660 deliverAllPolicyJSONString = `"all"` 661 deliverLastPolicyJSONString = `"last"` 662 deliverNewPolicyJSONString = `"new"` 663 deliverByStartSequenceJSONString = `"by_start_sequence"` 664 deliverByStartTimeJSONString = `"by_start_time"` 665 deliverLastPerPolicyJSONString = `"last_per_subject"` 666 deliverUndefinedJSONString = `"undefined"` 667 ) 668 669 var ( 670 deliverAllPolicyJSONBytes = []byte(deliverAllPolicyJSONString) 671 deliverLastPolicyJSONBytes = []byte(deliverLastPolicyJSONString) 672 deliverNewPolicyJSONBytes = []byte(deliverNewPolicyJSONString) 673 deliverByStartSequenceJSONBytes = []byte(deliverByStartSequenceJSONString) 674 deliverByStartTimeJSONBytes = []byte(deliverByStartTimeJSONString) 675 deliverLastPerPolicyJSONBytes = []byte(deliverLastPerPolicyJSONString) 676 deliverUndefinedJSONBytes = []byte(deliverUndefinedJSONString) 677 ) 678 679 func (p *DeliverPolicy) UnmarshalJSON(data []byte) error { 680 switch string(data) { 681 case deliverAllPolicyJSONString, deliverUndefinedJSONString: 682 *p = DeliverAll 683 case deliverLastPolicyJSONString: 684 *p = DeliverLast 685 case deliverLastPerPolicyJSONString: 686 *p = DeliverLastPerSubject 687 case deliverNewPolicyJSONString: 688 *p = DeliverNew 689 case deliverByStartSequenceJSONString: 690 *p = DeliverByStartSequence 691 case deliverByStartTimeJSONString: 692 *p = DeliverByStartTime 693 default: 694 return fmt.Errorf("can not unmarshal %q", data) 695 } 696 697 return nil 698 } 699 700 func (p DeliverPolicy) MarshalJSON() ([]byte, error) { 701 switch p { 702 case DeliverAll: 703 return deliverAllPolicyJSONBytes, nil 704 case DeliverLast: 705 return deliverLastPolicyJSONBytes, nil 706 case DeliverLastPerSubject: 707 return deliverLastPerPolicyJSONBytes, nil 708 case DeliverNew: 709 return deliverNewPolicyJSONBytes, nil 710 case DeliverByStartSequence: 711 return deliverByStartSequenceJSONBytes, nil 712 case DeliverByStartTime: 713 return deliverByStartTimeJSONBytes, nil 714 default: 715 return deliverUndefinedJSONBytes, nil 716 } 717 } 718 719 func isOutOfSpaceErr(err error) bool { 720 return err != nil && (strings.Contains(err.Error(), "no space left")) 721 } 722 723 // For when our upper layer catchup detects its missing messages from the beginning of the stream. 724 var errFirstSequenceMismatch = errors.New("first sequence mismatch") 725 726 func isClusterResetErr(err error) bool { 727 return err == errLastSeqMismatch || err == ErrStoreEOF || err == errFirstSequenceMismatch 728 } 729 730 // Copy all fields. 731 func (smo *StoreMsg) copy(sm *StoreMsg) { 732 if sm.buf != nil { 733 sm.buf = sm.buf[:0] 734 } 735 sm.buf = append(sm.buf, smo.buf...) 736 // We set cap on header in case someone wants to expand it. 737 sm.hdr, sm.msg = sm.buf[:len(smo.hdr):len(smo.hdr)], sm.buf[len(smo.hdr):] 738 sm.subj, sm.seq, sm.ts = smo.subj, smo.seq, smo.ts 739 } 740 741 // Clear all fields except underlying buffer but reset that if present to [:0]. 742 func (sm *StoreMsg) clear() { 743 if sm == nil { 744 return 745 } 746 *sm = StoreMsg{_EMPTY_, nil, nil, sm.buf, 0, 0} 747 if len(sm.buf) > 0 { 748 sm.buf = sm.buf[:0] 749 } 750 } 751 752 // Note this will avoid a copy of the data used for the string, but it will also reference the existing slice's data pointer. 753 // So this should be used sparingly when we know the encompassing byte slice's lifetime is the same. 754 func bytesToString(b []byte) string { 755 if len(b) == 0 { 756 return _EMPTY_ 757 } 758 p := unsafe.SliceData(b) 759 return unsafe.String(p, len(b)) 760 } 761 762 // Same in reverse. Used less often. 763 func stringToBytes(s string) []byte { 764 if len(s) == 0 { 765 return nil 766 } 767 p := unsafe.StringData(s) 768 b := unsafe.Slice(p, len(s)) 769 return b 770 }