github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/kv/kvserver/replica_proposal_buf.go (about) 1 // Copyright 2019 The Cockroach Authors. 2 // 3 // Use of this software is governed by the Business Source License 4 // included in the file licenses/BSL.txt. 5 // 6 // As of the Change Date specified in that file, in accordance with 7 // the Business Source License, use of this software will be governed 8 // by the Apache License, Version 2.0, included in the file 9 // licenses/APL.txt. 10 11 package kvserver 12 13 import ( 14 "context" 15 "sync" 16 "sync/atomic" 17 18 "github.com/cockroachdb/cockroach/pkg/roachpb" 19 "github.com/cockroachdb/cockroach/pkg/util/log" 20 "github.com/cockroachdb/cockroach/pkg/util/protoutil" 21 "github.com/cockroachdb/errors" 22 "go.etcd.io/etcd/raft" 23 "go.etcd.io/etcd/raft/raftpb" 24 ) 25 26 // propBufCnt is a counter maintained by proposal buffer that tracks an index 27 // into the buffer's array and an offset from the buffer's base lease index. 28 // The counter is accessed atomically. 29 // 30 // Bit layout (LSB to MSB): 31 // bits 0 - 31: index into array 32 // bits 32 - 63: lease index offset 33 type propBufCnt uint64 34 35 // propBufCntReq is a request to atomically update the proposal buffer's 36 // counter. The bit layout of the request is similar to that of propBufCnt, 37 // except that the two 32-bit segments represent deltas instead of absolute 38 // values. 39 // 40 // In practice, there are only two variants of requests. The first variant 41 // consists of requests that want to increment only the counter's array index 42 // by one. These are represented like: 43 // 44 // 0 0 0 ..[63 times].. 1 45 // 46 // The second variant consists of requests that want to increment the counter's 47 // array index by one and want to increment the counter's lease index offset by 48 // one. These are represented like: 49 // 50 // 0 0 0 ..[31 times].. 1 0 0 0 ..[31 times].. 1 51 // 52 // Representing requests like this allows them to be atomically added directly 53 // to the proposal buffer counter to reserve an array index and optionally 54 // reserve a lease index. 55 type propBufCntReq uint64 56 57 // propBufCntRes is a response from updating or reading the proposal buffer's 58 // counter. It can be understood as a snapshot of the counter. 59 type propBufCntRes uint64 60 61 // makePropBufCntReq creates a new proposal buffer request. The incLeaseIndex 62 // arg indicates whether the request would like a new maximum lease index or 63 // whether it would like the same maximum lease index as the previous request. 64 func makePropBufCntReq(incLeaseIndex bool) propBufCntReq { 65 r := propBufCntReq(1) 66 if incLeaseIndex { 67 r |= (1 << 32) 68 } 69 return r 70 } 71 72 // arrayLen returns the number of elements in the proposal buffer's array. 73 func (r propBufCntRes) arrayLen() int { 74 return int(r & (1<<32 - 1)) 75 } 76 77 // arrayIndex returns the index into the proposal buffer that was reserved for 78 // the request. The returned index will be -1 if no index was reserved (e.g. by 79 // propBufCnt.read) and if the buffer is empty. 80 func (r propBufCntRes) arrayIndex() int { 81 // NB: -1 because the array is 0-indexed. 82 return r.arrayLen() - 1 83 } 84 85 // leaseIndexOffset returns the offset from the proposal buffer's current lease 86 // index base that was reserved for the request's maximum lease index. 87 func (r propBufCntRes) leaseIndexOffset() uint64 { 88 return uint64(r >> 32) 89 } 90 91 // update accepts a proposal buffer request and applies it to the proposal 92 // buffer counter, returning the response. 93 func (c *propBufCnt) update(r propBufCntReq) propBufCntRes { 94 return propBufCntRes(atomic.AddUint64((*uint64)(c), uint64(r))) 95 } 96 97 // clear resets a proposal buffer counter to its zero value and returns the 98 // response returned to the last accepted request. 99 func (c *propBufCnt) clear() propBufCntRes { 100 return propBufCntRes(atomic.SwapUint64((*uint64)(c), 0)) 101 } 102 103 // read reads from the proposal buffer counter. 104 func (c *propBufCnt) read() propBufCntRes { 105 return propBufCntRes(atomic.LoadUint64((*uint64)(c))) 106 } 107 108 // propBuf is a multi-producer, single-consumer buffer for Raft proposals. The 109 // buffer supports concurrent insertion of proposals. 110 // 111 // The proposal buffer also handles the assignment of maximum lease indexes for 112 // commands. Picking the maximum lease index for commands is done atomically 113 // with determining the order in which they are inserted into the buffer to 114 // ensure that lease indexes are not assigned in a different order from that in 115 // which commands are proposed (and thus likely applied). If this order was to 116 // get out of sync then some commands would necessarily be rejected beneath Raft 117 // during application (see checkForcedErrLocked). 118 // 119 // Proposals enter the buffer via Insert() or ReinsertLocked(). They are moved 120 // into Raft via FlushLockedWithRaftGroup() when the buffer fills up, or during 121 // the next handleRaftReady iteration, whichever happens earlier. This 122 // introduces no additional latency into the replication pipeline compared to 123 // moving them into Raft directly because Raft would not begin replicating the 124 // proposals until the next handleRaftReady iteration anyway. 125 // 126 // propBuf inherits the locking of the proposer that it is bound to during 127 // initialization. Methods called "...Locked" and "...RLocked" expect the 128 // corresponding locker() and rlocker() to be held. 129 type propBuf struct { 130 p proposer 131 full sync.Cond 132 133 liBase uint64 134 cnt propBufCnt 135 arr propBufArray 136 137 testing struct { 138 // leaseIndexFilter can be used by tests to override the max lease index 139 // assigned to a proposal by returning a non-zero lease index. 140 leaseIndexFilter func(*ProposalData) (indexOverride uint64, err error) 141 // submitProposalFilter can be used by tests to observe and optionally 142 // drop Raft proposals before they are handed to etcd/raft to begin the 143 // process of replication. Dropped proposals are still eligible to be 144 // reproposed due to ticks. 145 submitProposalFilter func(*ProposalData) (drop bool, err error) 146 } 147 } 148 149 // A proposer is an object that uses a propBuf to coordinate Raft proposals. 150 type proposer interface { 151 locker() sync.Locker 152 rlocker() sync.Locker 153 // The following require the proposer to hold (at least) a shared lock. 154 replicaID() roachpb.ReplicaID 155 destroyed() destroyStatus 156 leaseAppliedIndex() uint64 157 enqueueUpdateCheck() 158 // The following require the proposer to hold an exclusive lock. 159 withGroupLocked(func(*raft.RawNode) error) error 160 registerProposalLocked(*ProposalData) 161 } 162 163 // Init initializes the proposal buffer and binds it to the provided proposer. 164 func (b *propBuf) Init(p proposer) { 165 b.p = p 166 b.full.L = p.rlocker() 167 } 168 169 // Len returns the number of proposals currently in the buffer. 170 func (b *propBuf) Len() int { 171 return b.cnt.read().arrayLen() 172 } 173 174 // LastAssignedLeaseIndexRLocked returns the last assigned lease index. 175 func (b *propBuf) LastAssignedLeaseIndexRLocked() uint64 { 176 return b.liBase + b.cnt.read().leaseIndexOffset() 177 } 178 179 // Insert inserts a new command into the proposal buffer to be proposed to the 180 // proposer's Raft group. The method accepts the Raft command as part of the 181 // ProposalData struct, along with a partial encoding of the command in the 182 // provided byte slice. It is expected that the byte slice contains marshaled 183 // information for all of the command's fields except for its max lease index, 184 // which is assigned by the method when the command is sequenced in the buffer. 185 // It is also expected that the byte slice has sufficient capacity to marshal 186 // the maximum lease index field into it. After adding the proposal to the 187 // buffer, the assigned max lease index is returned. 188 func (b *propBuf) Insert(p *ProposalData, data []byte) (uint64, error) { 189 // Request a new max lease applied index for any request that isn't itself 190 // a lease request. Lease requests don't need unique max lease index values 191 // because their max lease indexes are ignored. See checkForcedErrLocked. 192 isLease := p.Request.IsLeaseRequest() 193 req := makePropBufCntReq(!isLease) 194 195 // Hold the read lock while inserting into the proposal buffer. Other 196 // insertion attempts will also grab the read lock, so they can insert 197 // concurrently. Consumers of the proposal buffer will grab the write lock, 198 // so they must wait for concurrent insertion attempts to finish. 199 b.p.rlocker().Lock() 200 defer b.p.rlocker().Unlock() 201 202 // Update the proposal buffer counter and determine which index we should 203 // insert at. 204 res, err := b.handleCounterRequestRLocked(req, false /* wLocked */) 205 if err != nil { 206 return 0, err 207 } 208 209 // Assign the command's maximum lease index. 210 p.command.MaxLeaseIndex = b.liBase + res.leaseIndexOffset() 211 if filter := b.testing.leaseIndexFilter; filter != nil { 212 if override, err := filter(p); err != nil { 213 return 0, err 214 } else if override != 0 { 215 p.command.MaxLeaseIndex = override 216 } 217 } 218 if log.V(4) { 219 log.Infof(p.ctx, "submitting proposal %x: maxLeaseIndex=%d", p.idKey, p.command.MaxLeaseIndex) 220 } 221 222 // Marshal the command's footer with the newly assigned maximum lease index 223 // into the command's pre-allocated buffer. It should already have enough 224 // room to accommodate the command footer without needing an allocation. 225 f := &p.tmpFooter 226 f.MaxLeaseIndex = p.command.MaxLeaseIndex 227 footerLen := f.Size() 228 229 preLen := len(data) 230 p.encodedCommand = data[:preLen+footerLen] 231 if _, err := protoutil.MarshalTo(f, p.encodedCommand[preLen:]); err != nil { 232 return 0, err 233 } 234 235 // Insert the proposal into the buffer's array. 236 b.insertIntoArray(p, res.arrayIndex()) 237 238 // Return the maximum lease index that the proposal's command was given. 239 if isLease { 240 // For lease requests, we return zero because no real MaxLeaseIndex is 241 // assigned. We could also return command.MaxLeaseIndex but this invites 242 // confusion. 243 return 0, nil 244 } 245 return p.command.MaxLeaseIndex, nil 246 } 247 248 // ReinsertLocked inserts a command that has already passed through the proposal 249 // buffer back into the buffer to be reproposed at a new Raft log index. Unlike 250 // insert, it does not modify the command or assign a new maximum lease index. 251 func (b *propBuf) ReinsertLocked(p *ProposalData) error { 252 // When re-inserting a command into the proposal buffer, the command never 253 // wants a new lease index. Simply add it back to the buffer and let it be 254 // reproposed. 255 req := makePropBufCntReq(false /* incLeaseIndex */) 256 257 // Update the proposal buffer counter and determine which index we should 258 // insert at. 259 res, err := b.handleCounterRequestRLocked(req, true /* wLocked */) 260 if err != nil { 261 return err 262 } 263 264 // Insert the proposal into the buffer's array. 265 b.insertIntoArray(p, res.arrayIndex()) 266 return nil 267 } 268 269 // handleCounterRequestRLocked accepts a proposal buffer counter request and 270 // uses it to update the proposal buffer counter. The method will repeat the 271 // atomic update operation until it is able to successfully reserve an index 272 // in the array. If an attempt finds that the array is full then it may flush 273 // the array before trying again. 274 // 275 // The method expects that either the proposer's read lock or write lock is 276 // held. It does not mandate which, but expects the caller to specify using 277 // the wLocked argument. 278 func (b *propBuf) handleCounterRequestRLocked( 279 req propBufCntReq, wLocked bool, 280 ) (propBufCntRes, error) { 281 // Repeatedly attempt to find an open index in the buffer's array. 282 for { 283 // NB: We need to check whether the proposer is destroyed before each 284 // iteration in case the proposer has been destroyed between the initial 285 // check and the current acquisition of the read lock. Failure to do so 286 // will leave pending proposals that never get cleared. 287 if status := b.p.destroyed(); !status.IsAlive() { 288 return 0, status.err 289 } 290 291 res := b.cnt.update(req) 292 idx := res.arrayIndex() 293 if idx < b.arr.len() { 294 // The buffer is not full. Our slot in the array is reserved. 295 return res, nil 296 } else if wLocked { 297 // The buffer is full and we're holding the exclusive lock. Flush 298 // the buffer before trying again. 299 if err := b.flushLocked(); err != nil { 300 return 0, err 301 } 302 } else if idx == b.arr.len() { 303 // The buffer is full and we were the first request to notice out of 304 // potentially many requests holding the shared lock and trying to 305 // insert concurrently. Eagerly attempt to flush the buffer before 306 // trying again. 307 if err := b.flushRLocked(); err != nil { 308 return 0, err 309 } 310 } else { 311 // The buffer is full and we were not the first request to notice 312 // out of potentially many requests holding the shared lock and 313 // trying to insert concurrently. Wait for the buffer to be flushed 314 // by someone else before trying again. 315 b.full.Wait() 316 } 317 } 318 } 319 320 // insertIntoArray inserts the proposal into the proposal buffer's array at the 321 // specified index. It also schedules a Raft update check if necessary. 322 func (b *propBuf) insertIntoArray(p *ProposalData, idx int) { 323 b.arr.asSlice()[idx] = p 324 if idx == 0 { 325 // If this is the first proposal in the buffer, schedule a Raft update 326 // check to inform Raft processing about the new proposal. Everyone else 327 // can rely on the request that added the first proposal to the buffer 328 // having already scheduled a Raft update check. 329 b.p.enqueueUpdateCheck() 330 } 331 } 332 333 func (b *propBuf) flushRLocked() error { 334 // Upgrade the shared lock to an exclusive lock. After doing so, check again 335 // whether the proposer has been destroyed. If so, wake up other goroutines 336 // waiting for the flush. 337 b.p.rlocker().Unlock() 338 defer b.p.rlocker().Lock() 339 b.p.locker().Lock() 340 defer b.p.locker().Unlock() 341 if status := b.p.destroyed(); !status.IsAlive() { 342 b.full.Broadcast() 343 return status.err 344 } 345 return b.flushLocked() 346 } 347 348 func (b *propBuf) flushLocked() error { 349 return b.p.withGroupLocked(func(raftGroup *raft.RawNode) error { 350 _, err := b.FlushLockedWithRaftGroup(raftGroup) 351 return err 352 }) 353 } 354 355 // FlushLockedWithRaftGroup flushes the commands from the proposal buffer and 356 // resets the buffer back to an empty state. Each command is handed off to the 357 // Raft proposals map, at which point they are owned by the Raft processor. 358 // 359 // If raftGroup is non-nil (the common case) then the commands will also be 360 // proposed to the RawNode. This initiates Raft replication of the commands. 361 // 362 // Returns the number of proposals handed to the RawNode. 363 func (b *propBuf) FlushLockedWithRaftGroup(raftGroup *raft.RawNode) (int, error) { 364 // Before returning, make sure to forward the lease index base to at least 365 // the proposer's currently applied lease index. This ensures that if the 366 // lease applied index advances outside of this proposer's control (i.e. 367 // other leaseholders commit some stuff and then we get the lease back), 368 // future proposals will be given sufficiently high max lease indexes. 369 defer b.forwardLeaseIndexBase(b.p.leaseAppliedIndex()) 370 371 // We hold the write lock while reading from and flushing the proposal 372 // buffer. This ensures that we synchronize with all producers and other 373 // consumers. 374 res := b.cnt.clear() 375 used := res.arrayLen() 376 // Before returning, consider resizing the proposal buffer's array, 377 // depending on how much of it was used before the current flush. 378 defer b.arr.adjustSize(used) 379 if used == 0 { 380 // The buffer is empty. Nothing to do. 381 return 0, nil 382 } else if used > b.arr.len() { 383 // The buffer is full and at least one writer has tried to allocate 384 // on top of the full buffer, so notify them to try again. 385 used = b.arr.len() 386 defer b.full.Broadcast() 387 } 388 389 // Update the maximum lease index base value, based on the maximum lease 390 // index assigned since the last flush. 391 b.forwardLeaseIndexBase(b.liBase + res.leaseIndexOffset()) 392 393 // Iterate through the proposals in the buffer and propose them to Raft. 394 // While doing so, build up batches of entries and submit them to Raft all 395 // at once. Building up batches of entries and proposing them with a single 396 // Step can dramatically reduce the number of messages required to commit 397 // and apply them. 398 buf := b.arr.asSlice()[:used] 399 ents := make([]raftpb.Entry, 0, used) 400 // Remember the first error that we see when proposing the batch. We don't 401 // immediately return this error because we want to finish clearing out the 402 // buffer and registering each of the proposals with the proposer, but we 403 // stop trying to propose commands to raftGroup. 404 var firstErr error 405 for i, p := range buf { 406 if p == nil { 407 // If we run into an error during proposal insertion, we may have reserved 408 // an array index without actually inserting a proposal. 409 continue 410 } 411 buf[i] = nil // clear buffer 412 413 // Raft processing bookkeeping. 414 b.p.registerProposalLocked(p) 415 416 // Potentially drop the proposal before passing it to etcd/raft, but 417 // only after performing necessary bookkeeping. 418 if filter := b.testing.submitProposalFilter; filter != nil { 419 if drop, err := filter(p); drop || err != nil { 420 if firstErr == nil { 421 firstErr = err 422 } 423 continue 424 } 425 } 426 427 // If we don't have a raft group or if the raft group has rejected one 428 // of the proposals, we don't try to propose any more proposals. The 429 // rest of the proposals will still be registered with the proposer, so 430 // they will eventually be reproposed. 431 if raftGroup == nil || firstErr != nil { 432 continue 433 } 434 435 // Coordinate proposing the command to etcd/raft. 436 if crt := p.command.ReplicatedEvalResult.ChangeReplicas; crt != nil { 437 // Flush any previously batched (non-conf change) proposals to 438 // preserve the correct ordering or proposals. Later proposals 439 // will start a new batch. 440 if err := proposeBatch(raftGroup, b.p.replicaID(), ents); err != nil { 441 firstErr = err 442 continue 443 } 444 ents = ents[len(ents):] 445 446 confChangeCtx := ConfChangeContext{ 447 CommandID: string(p.idKey), 448 Payload: p.encodedCommand, 449 } 450 encodedCtx, err := protoutil.Marshal(&confChangeCtx) 451 if err != nil { 452 firstErr = err 453 continue 454 } 455 456 cc, err := crt.ConfChange(encodedCtx) 457 if err != nil { 458 firstErr = err 459 continue 460 } 461 462 if err := raftGroup.ProposeConfChange( 463 cc, 464 ); err != nil && !errors.Is(err, raft.ErrProposalDropped) { 465 // Silently ignore dropped proposals (they were always silently 466 // ignored prior to the introduction of ErrProposalDropped). 467 // TODO(bdarnell): Handle ErrProposalDropped better. 468 // https://github.com/cockroachdb/cockroach/issues/21849 469 firstErr = err 470 continue 471 } 472 } else { 473 // Add to the batch of entries that will soon be proposed. It is 474 // possible that this batching can cause the batched MsgProp to grow 475 // past the size limit where etcd/raft will drop the entire thing 476 // (see raft.Config.MaxUncommittedEntriesSize), but that's not a 477 // concern. This setting is configured to twice the maximum quota in 478 // the proposal quota pool, so for batching to cause a message to be 479 // dropped the uncommitted portion of the Raft log would already 480 // need to be at least as large as the proposal quota size, assuming 481 // that all in-flight proposals are reproposed in a single batch. 482 ents = append(ents, raftpb.Entry{ 483 Data: p.encodedCommand, 484 }) 485 } 486 } 487 if firstErr != nil { 488 return 0, firstErr 489 } 490 return used, proposeBatch(raftGroup, b.p.replicaID(), ents) 491 } 492 493 func (b *propBuf) forwardLeaseIndexBase(v uint64) { 494 if b.liBase < v { 495 b.liBase = v 496 } 497 } 498 499 func proposeBatch(raftGroup *raft.RawNode, replID roachpb.ReplicaID, ents []raftpb.Entry) error { 500 if len(ents) == 0 { 501 return nil 502 } 503 if err := raftGroup.Step(raftpb.Message{ 504 Type: raftpb.MsgProp, 505 From: uint64(replID), 506 Entries: ents, 507 }); errors.Is(err, raft.ErrProposalDropped) { 508 // Silently ignore dropped proposals (they were always silently 509 // ignored prior to the introduction of ErrProposalDropped). 510 // TODO(bdarnell): Handle ErrProposalDropped better. 511 // https://github.com/cockroachdb/cockroach/issues/21849 512 return nil 513 } else if err != nil { 514 return err 515 } 516 return nil 517 } 518 519 // FlushLockedWithoutProposing is like FlushLockedWithRaftGroup but it does not 520 // attempt to propose any of the commands that it is flushing. Instead, it is 521 // used exclusively to flush all entries in the buffer into the proposals map. 522 // 523 // The intended usage of this method is to flush all proposals in the buffer 524 // into the proposals map so that they can all be manipulated in a single place. 525 // The representative example of this is a caller that wants to flush the buffer 526 // into the proposals map before canceling all proposals. 527 func (b *propBuf) FlushLockedWithoutProposing() { 528 if _, err := b.FlushLockedWithRaftGroup(nil /* raftGroup */); err != nil { 529 log.Fatalf(context.Background(), "unexpected error: %+v", err) 530 } 531 } 532 533 const propBufArrayMinSize = 4 534 const propBufArrayMaxSize = 256 535 const propBufArrayShrinkDelay = 16 536 537 // propBufArray is a dynamically-sized array of ProposalData pointers. The 538 // array grows when it repeatedly fills up between flushes and shrinks when 539 // it repeatedly stays below a certainly level of utilization. Sufficiently 540 // small arrays avoid indirection and are stored inline. 541 type propBufArray struct { 542 small [propBufArrayMinSize]*ProposalData 543 large []*ProposalData 544 shrink int 545 } 546 547 func (a *propBufArray) asSlice() []*ProposalData { 548 if a.large != nil { 549 return a.large 550 } 551 return a.small[:] 552 } 553 554 func (a *propBufArray) len() int { 555 return len(a.asSlice()) 556 } 557 558 // adjustSize adjusts the proposal buffer array's size based on how much of the 559 // array was used before the last flush and whether the size was observed to be 560 // too small, too large, or just right. The size grows quickly and shrinks 561 // slowly to prevent thrashing and oscillation. 562 func (a *propBufArray) adjustSize(used int) { 563 cur := a.len() 564 switch { 565 case used <= cur/4: 566 // The array is too large. Shrink it if possible. 567 if cur <= propBufArrayMinSize { 568 return 569 } 570 a.shrink++ 571 // Require propBufArrayShrinkDelay straight periods of underutilization 572 // before shrinking. An array that is too big is better than an array 573 // that is too small, and we don't want oscillation. 574 if a.shrink == propBufArrayShrinkDelay { 575 a.shrink = 0 576 next := cur / 2 577 if next <= propBufArrayMinSize { 578 a.large = nil 579 } else { 580 a.large = make([]*ProposalData, next) 581 } 582 } 583 case used >= cur: 584 // The array is too small. Grow it if possible. 585 a.shrink = 0 586 next := 2 * cur 587 if next <= propBufArrayMaxSize { 588 a.large = make([]*ProposalData, next) 589 } 590 default: 591 // The array is a good size. Do nothing. 592 a.shrink = 0 593 } 594 } 595 596 // replicaProposer implements the proposer interface. 597 type replicaProposer Replica 598 599 func (rp *replicaProposer) locker() sync.Locker { 600 return &rp.mu.RWMutex 601 } 602 603 func (rp *replicaProposer) rlocker() sync.Locker { 604 return rp.mu.RWMutex.RLocker() 605 } 606 607 func (rp *replicaProposer) replicaID() roachpb.ReplicaID { 608 return rp.mu.replicaID 609 } 610 611 func (rp *replicaProposer) destroyed() destroyStatus { 612 return rp.mu.destroyStatus 613 } 614 615 func (rp *replicaProposer) leaseAppliedIndex() uint64 { 616 return rp.mu.state.LeaseAppliedIndex 617 } 618 619 func (rp *replicaProposer) enqueueUpdateCheck() { 620 rp.store.enqueueRaftUpdateCheck(rp.RangeID) 621 } 622 623 func (rp *replicaProposer) withGroupLocked(fn func(*raft.RawNode) error) error { 624 // Pass true for mayCampaignOnWake because we're about to propose a command. 625 return (*Replica)(rp).withRaftGroupLocked(true, func(raftGroup *raft.RawNode) (bool, error) { 626 // We're proposing a command here so there is no need to wake the leader 627 // if we were quiesced. However, we should make sure we are unquiesced. 628 (*Replica)(rp).unquiesceLocked() 629 return false /* unquiesceLocked */, fn(raftGroup) 630 }) 631 } 632 633 func (rp *replicaProposer) registerProposalLocked(p *ProposalData) { 634 // Record when the proposal was submitted to Raft so that we can later 635 // decide if/when to re-propose it. 636 p.proposedAtTicks = rp.mu.ticks 637 rp.mu.proposals[p.idKey] = p 638 }