github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/kv/kvserver/rangefeed/processor_test.go (about) 1 // Copyright 2018 The Cockroach Authors. 2 // 3 // Use of this software is governed by the Business Source License 4 // included in the file licenses/BSL.txt. 5 // 6 // As of the Change Date specified in that file, in accordance with 7 // the Business Source License, use of this software will be governed 8 // by the Apache License, Version 2.0, included in the file 9 // licenses/APL.txt. 10 11 package rangefeed 12 13 import ( 14 "bytes" 15 "context" 16 "fmt" 17 "runtime" 18 "sort" 19 "sync" 20 "testing" 21 "time" 22 23 "github.com/cockroachdb/cockroach/pkg/roachpb" 24 "github.com/cockroachdb/cockroach/pkg/storage" 25 "github.com/cockroachdb/cockroach/pkg/storage/enginepb" 26 "github.com/cockroachdb/cockroach/pkg/testutils" 27 "github.com/cockroachdb/cockroach/pkg/util/hlc" 28 "github.com/cockroachdb/cockroach/pkg/util/leaktest" 29 "github.com/cockroachdb/cockroach/pkg/util/log" 30 "github.com/cockroachdb/cockroach/pkg/util/stop" 31 "github.com/cockroachdb/cockroach/pkg/util/tracing" 32 "github.com/cockroachdb/cockroach/pkg/util/uuid" 33 "github.com/stretchr/testify/require" 34 ) 35 36 func makeLogicalOp(val interface{}) enginepb.MVCCLogicalOp { 37 var op enginepb.MVCCLogicalOp 38 op.MustSetValue(val) 39 return op 40 } 41 42 func writeValueOpWithKV(key roachpb.Key, ts hlc.Timestamp, val []byte) enginepb.MVCCLogicalOp { 43 return makeLogicalOp(&enginepb.MVCCWriteValueOp{ 44 Key: key, 45 Timestamp: ts, 46 Value: val, 47 }) 48 } 49 50 func writeValueOp(ts hlc.Timestamp) enginepb.MVCCLogicalOp { 51 return writeValueOpWithKV(roachpb.Key("a"), ts, []byte("val")) 52 } 53 54 func writeIntentOpWithDetails( 55 txnID uuid.UUID, key []byte, minTS, ts hlc.Timestamp, 56 ) enginepb.MVCCLogicalOp { 57 return makeLogicalOp(&enginepb.MVCCWriteIntentOp{ 58 TxnID: txnID, 59 TxnKey: key, 60 TxnMinTimestamp: minTS, 61 Timestamp: ts, 62 }) 63 } 64 65 func writeIntentOpWithKey(txnID uuid.UUID, key []byte, ts hlc.Timestamp) enginepb.MVCCLogicalOp { 66 return writeIntentOpWithDetails(txnID, key, ts /* minTS */, ts) 67 } 68 69 func writeIntentOp(txnID uuid.UUID, ts hlc.Timestamp) enginepb.MVCCLogicalOp { 70 return writeIntentOpWithKey(txnID, nil /* key */, ts) 71 } 72 73 func updateIntentOp(txnID uuid.UUID, ts hlc.Timestamp) enginepb.MVCCLogicalOp { 74 return makeLogicalOp(&enginepb.MVCCUpdateIntentOp{ 75 TxnID: txnID, 76 Timestamp: ts, 77 }) 78 } 79 80 func commitIntentOpWithKV( 81 txnID uuid.UUID, key roachpb.Key, ts hlc.Timestamp, val []byte, 82 ) enginepb.MVCCLogicalOp { 83 return makeLogicalOp(&enginepb.MVCCCommitIntentOp{ 84 TxnID: txnID, 85 Key: key, 86 Timestamp: ts, 87 Value: val, 88 }) 89 } 90 91 func commitIntentOp(txnID uuid.UUID, ts hlc.Timestamp) enginepb.MVCCLogicalOp { 92 return commitIntentOpWithKV(txnID, roachpb.Key("a"), ts, nil /* val */) 93 } 94 95 func abortIntentOp(txnID uuid.UUID) enginepb.MVCCLogicalOp { 96 return makeLogicalOp(&enginepb.MVCCAbortIntentOp{ 97 TxnID: txnID, 98 }) 99 } 100 101 func abortTxnOp(txnID uuid.UUID) enginepb.MVCCLogicalOp { 102 return makeLogicalOp(&enginepb.MVCCAbortTxnOp{ 103 TxnID: txnID, 104 }) 105 } 106 107 func makeRangeFeedEvent(val interface{}) *roachpb.RangeFeedEvent { 108 var event roachpb.RangeFeedEvent 109 event.MustSetValue(val) 110 return &event 111 } 112 113 func rangeFeedValueWithPrev(key roachpb.Key, val, prev roachpb.Value) *roachpb.RangeFeedEvent { 114 return makeRangeFeedEvent(&roachpb.RangeFeedValue{ 115 Key: key, 116 Value: val, 117 PrevValue: prev, 118 }) 119 } 120 121 func rangeFeedValue(key roachpb.Key, val roachpb.Value) *roachpb.RangeFeedEvent { 122 return rangeFeedValueWithPrev(key, val, roachpb.Value{}) 123 } 124 125 func rangeFeedCheckpoint(span roachpb.Span, ts hlc.Timestamp) *roachpb.RangeFeedEvent { 126 return makeRangeFeedEvent(&roachpb.RangeFeedCheckpoint{ 127 Span: span, 128 ResolvedTS: ts, 129 }) 130 } 131 132 const testProcessorEventCCap = 16 133 134 func newTestProcessorWithTxnPusher( 135 rtsIter storage.SimpleIterator, txnPusher TxnPusher, 136 ) (*Processor, *stop.Stopper) { 137 stopper := stop.NewStopper() 138 139 var pushTxnInterval, pushTxnAge time.Duration = 0, 0 // disable 140 if txnPusher != nil { 141 pushTxnInterval = 10 * time.Millisecond 142 pushTxnAge = 50 * time.Millisecond 143 } 144 145 p := NewProcessor(Config{ 146 AmbientContext: log.AmbientContext{Tracer: tracing.NewTracer()}, 147 Clock: hlc.NewClock(hlc.UnixNano, time.Nanosecond), 148 Span: roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("z")}, 149 TxnPusher: txnPusher, 150 PushTxnsInterval: pushTxnInterval, 151 PushTxnsAge: pushTxnAge, 152 EventChanCap: testProcessorEventCCap, 153 CheckStreamsInterval: 10 * time.Millisecond, 154 }) 155 p.Start(stopper, rtsIter) 156 return p, stopper 157 } 158 159 func newTestProcessor(rtsIter storage.SimpleIterator) (*Processor, *stop.Stopper) { 160 return newTestProcessorWithTxnPusher(rtsIter, nil /* pusher */) 161 } 162 163 func TestProcessorBasic(t *testing.T) { 164 defer leaktest.AfterTest(t)() 165 p, stopper := newTestProcessor(nil /* rtsIter */) 166 defer stopper.Stop(context.Background()) 167 168 // Test processor without registrations. 169 require.Equal(t, 0, p.Len()) 170 require.NotPanics(t, func() { p.ConsumeLogicalOps() }) 171 require.NotPanics(t, func() { p.ConsumeLogicalOps([]enginepb.MVCCLogicalOp{}...) }) 172 require.NotPanics(t, func() { 173 txn1, txn2 := uuid.MakeV4(), uuid.MakeV4() 174 p.ConsumeLogicalOps( 175 writeValueOp(hlc.Timestamp{WallTime: 1}), 176 writeIntentOp(txn1, hlc.Timestamp{WallTime: 2}), 177 updateIntentOp(txn1, hlc.Timestamp{WallTime: 3}), 178 commitIntentOp(txn1, hlc.Timestamp{WallTime: 4}), 179 writeIntentOp(txn2, hlc.Timestamp{WallTime: 5}), 180 abortIntentOp(txn2), 181 ) 182 p.syncEventC() 183 require.Equal(t, 0, p.rts.intentQ.Len()) 184 }) 185 require.NotPanics(t, func() { p.ForwardClosedTS(hlc.Timestamp{}) }) 186 require.NotPanics(t, func() { p.ForwardClosedTS(hlc.Timestamp{WallTime: 1}) }) 187 188 // Add a registration. 189 r1Stream := newTestStream() 190 r1ErrC := make(chan *roachpb.Error, 1) 191 r1OK, r1Filter := p.Register( 192 roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("m")}, 193 hlc.Timestamp{WallTime: 1}, 194 nil, /* catchUpIter */ 195 false, /* withDiff */ 196 r1Stream, 197 r1ErrC, 198 ) 199 require.True(t, r1OK) 200 p.syncEventAndRegistrations() 201 require.Equal(t, 1, p.Len()) 202 require.Equal(t, 203 []*roachpb.RangeFeedEvent{rangeFeedCheckpoint( 204 roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("m")}, 205 hlc.Timestamp{WallTime: 1}, 206 )}, 207 r1Stream.Events(), 208 ) 209 210 // Test the processor's operation filter. 211 require.True(t, r1Filter.NeedVal(roachpb.Span{Key: roachpb.Key("a")})) 212 require.True(t, r1Filter.NeedVal(roachpb.Span{Key: roachpb.Key("d"), EndKey: roachpb.Key("r")})) 213 require.False(t, r1Filter.NeedVal(roachpb.Span{Key: roachpb.Key("z")})) 214 require.False(t, r1Filter.NeedPrevVal(roachpb.Span{Key: roachpb.Key("a")})) 215 require.False(t, r1Filter.NeedPrevVal(roachpb.Span{Key: roachpb.Key("d"), EndKey: roachpb.Key("r")})) 216 require.False(t, r1Filter.NeedPrevVal(roachpb.Span{Key: roachpb.Key("z")})) 217 218 // Test checkpoint with one registration. 219 p.ForwardClosedTS(hlc.Timestamp{WallTime: 5}) 220 p.syncEventAndRegistrations() 221 require.Equal(t, 222 []*roachpb.RangeFeedEvent{rangeFeedCheckpoint( 223 roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("m")}, 224 hlc.Timestamp{WallTime: 5}, 225 )}, 226 r1Stream.Events(), 227 ) 228 229 // Test value with one registration. 230 p.ConsumeLogicalOps( 231 writeValueOpWithKV(roachpb.Key("c"), hlc.Timestamp{WallTime: 6}, []byte("val")), 232 ) 233 p.syncEventAndRegistrations() 234 require.Equal(t, 235 []*roachpb.RangeFeedEvent{rangeFeedValue( 236 roachpb.Key("c"), 237 roachpb.Value{ 238 RawBytes: []byte("val"), 239 Timestamp: hlc.Timestamp{WallTime: 6}, 240 }, 241 )}, 242 r1Stream.Events(), 243 ) 244 245 // Test value to non-overlapping key with one registration. 246 p.ConsumeLogicalOps( 247 writeValueOpWithKV(roachpb.Key("s"), hlc.Timestamp{WallTime: 6}, []byte("val")), 248 ) 249 p.syncEventAndRegistrations() 250 require.Equal(t, []*roachpb.RangeFeedEvent(nil), r1Stream.Events()) 251 252 // Test intent that is aborted with one registration. 253 txn1 := uuid.MakeV4() 254 // Write intent. 255 p.ConsumeLogicalOps(writeIntentOp(txn1, hlc.Timestamp{WallTime: 6})) 256 p.syncEventAndRegistrations() 257 require.Equal(t, []*roachpb.RangeFeedEvent(nil), r1Stream.Events()) 258 // Abort. 259 p.ConsumeLogicalOps(abortIntentOp(txn1)) 260 p.syncEventC() 261 require.Equal(t, []*roachpb.RangeFeedEvent(nil), r1Stream.Events()) 262 require.Equal(t, 0, p.rts.intentQ.Len()) 263 264 // Test intent that is committed with one registration. 265 txn2 := uuid.MakeV4() 266 // Write intent. 267 p.ConsumeLogicalOps(writeIntentOp(txn2, hlc.Timestamp{WallTime: 10})) 268 p.syncEventAndRegistrations() 269 require.Equal(t, []*roachpb.RangeFeedEvent(nil), r1Stream.Events()) 270 // Forward closed timestamp. Should now be stuck on intent. 271 p.ForwardClosedTS(hlc.Timestamp{WallTime: 15}) 272 p.syncEventAndRegistrations() 273 require.Equal(t, 274 []*roachpb.RangeFeedEvent{rangeFeedCheckpoint( 275 roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("m")}, 276 hlc.Timestamp{WallTime: 9}, 277 )}, 278 r1Stream.Events(), 279 ) 280 // Update the intent. Should forward resolved timestamp. 281 p.ConsumeLogicalOps(updateIntentOp(txn2, hlc.Timestamp{WallTime: 12})) 282 p.syncEventAndRegistrations() 283 require.Equal(t, 284 []*roachpb.RangeFeedEvent{rangeFeedCheckpoint( 285 roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("m")}, 286 hlc.Timestamp{WallTime: 11}, 287 )}, 288 r1Stream.Events(), 289 ) 290 // Commit intent. Should forward resolved timestamp to closed timestamp. 291 p.ConsumeLogicalOps( 292 commitIntentOpWithKV(txn2, roachpb.Key("e"), hlc.Timestamp{WallTime: 13}, []byte("ival")), 293 ) 294 p.syncEventAndRegistrations() 295 require.Equal(t, 296 []*roachpb.RangeFeedEvent{ 297 rangeFeedValue( 298 roachpb.Key("e"), 299 roachpb.Value{ 300 RawBytes: []byte("ival"), 301 Timestamp: hlc.Timestamp{WallTime: 13}, 302 }, 303 ), 304 rangeFeedCheckpoint( 305 roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("m")}, 306 hlc.Timestamp{WallTime: 15}, 307 ), 308 }, 309 r1Stream.Events(), 310 ) 311 312 // Add another registration with withDiff = true. 313 r2Stream := newTestStream() 314 r2ErrC := make(chan *roachpb.Error, 1) 315 r2OK, r1And2Filter := p.Register( 316 roachpb.RSpan{Key: roachpb.RKey("c"), EndKey: roachpb.RKey("z")}, 317 hlc.Timestamp{WallTime: 1}, 318 nil, /* catchUpIter */ 319 true, /* withDiff */ 320 r2Stream, 321 r2ErrC, 322 ) 323 require.True(t, r2OK) 324 p.syncEventAndRegistrations() 325 require.Equal(t, 2, p.Len()) 326 require.Equal(t, 327 []*roachpb.RangeFeedEvent{rangeFeedCheckpoint( 328 roachpb.Span{Key: roachpb.Key("c"), EndKey: roachpb.Key("z")}, 329 hlc.Timestamp{WallTime: 15}, 330 )}, 331 r2Stream.Events(), 332 ) 333 334 // Test the processor's new operation filter. 335 require.True(t, r1And2Filter.NeedVal(roachpb.Span{Key: roachpb.Key("a")})) 336 require.True(t, r1And2Filter.NeedVal(roachpb.Span{Key: roachpb.Key("y")})) 337 require.True(t, r1And2Filter.NeedVal(roachpb.Span{Key: roachpb.Key("y"), EndKey: roachpb.Key("zzz")})) 338 require.False(t, r1And2Filter.NeedVal(roachpb.Span{Key: roachpb.Key("zzz")})) 339 require.False(t, r1And2Filter.NeedPrevVal(roachpb.Span{Key: roachpb.Key("a")})) 340 require.True(t, r1And2Filter.NeedPrevVal(roachpb.Span{Key: roachpb.Key("y")})) 341 require.True(t, r1And2Filter.NeedPrevVal(roachpb.Span{Key: roachpb.Key("y"), EndKey: roachpb.Key("zzz")})) 342 require.False(t, r1And2Filter.NeedPrevVal(roachpb.Span{Key: roachpb.Key("zzz")})) 343 344 // Both registrations should see checkpoint. 345 p.ForwardClosedTS(hlc.Timestamp{WallTime: 20}) 346 p.syncEventAndRegistrations() 347 chEventAM := []*roachpb.RangeFeedEvent{rangeFeedCheckpoint( 348 roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("m")}, 349 hlc.Timestamp{WallTime: 20}, 350 )} 351 require.Equal(t, chEventAM, r1Stream.Events()) 352 chEventCZ := []*roachpb.RangeFeedEvent{rangeFeedCheckpoint( 353 roachpb.Span{Key: roachpb.Key("c"), EndKey: roachpb.Key("z")}, 354 hlc.Timestamp{WallTime: 20}, 355 )} 356 require.Equal(t, chEventCZ, r2Stream.Events()) 357 358 // Test value with two registration that overlaps both. 359 p.ConsumeLogicalOps( 360 writeValueOpWithKV(roachpb.Key("k"), hlc.Timestamp{WallTime: 22}, []byte("val2")), 361 ) 362 p.syncEventAndRegistrations() 363 valEvent := []*roachpb.RangeFeedEvent{rangeFeedValue( 364 roachpb.Key("k"), 365 roachpb.Value{ 366 RawBytes: []byte("val2"), 367 Timestamp: hlc.Timestamp{WallTime: 22}, 368 }, 369 )} 370 require.Equal(t, valEvent, r1Stream.Events()) 371 require.Equal(t, valEvent, r2Stream.Events()) 372 373 // Test value that only overlaps the second registration. 374 p.ConsumeLogicalOps( 375 writeValueOpWithKV(roachpb.Key("v"), hlc.Timestamp{WallTime: 23}, []byte("val3")), 376 ) 377 p.syncEventAndRegistrations() 378 valEvent2 := []*roachpb.RangeFeedEvent{rangeFeedValue( 379 roachpb.Key("v"), 380 roachpb.Value{ 381 RawBytes: []byte("val3"), 382 Timestamp: hlc.Timestamp{WallTime: 23}, 383 }, 384 )} 385 require.Equal(t, []*roachpb.RangeFeedEvent(nil), r1Stream.Events()) 386 require.Equal(t, valEvent2, r2Stream.Events()) 387 388 // Cancel the first registration. 389 r1Stream.Cancel() 390 require.NotNil(t, <-r1ErrC) 391 392 // Stop the processor with an error. 393 pErr := roachpb.NewErrorf("stop err") 394 p.StopWithErr(pErr) 395 require.NotNil(t, <-r2ErrC) 396 397 // Adding another registration should fail. 398 r3Stream := newTestStream() 399 r3ErrC := make(chan *roachpb.Error, 1) 400 r3OK, _ := p.Register( 401 roachpb.RSpan{Key: roachpb.RKey("c"), EndKey: roachpb.RKey("z")}, 402 hlc.Timestamp{WallTime: 1}, 403 nil, /* catchUpIter */ 404 false, /* withDiff */ 405 r3Stream, 406 r3ErrC, 407 ) 408 require.False(t, r3OK) 409 } 410 411 func TestNilProcessor(t *testing.T) { 412 defer leaktest.AfterTest(t)() 413 var p *Processor 414 415 // All of the following should be no-ops. 416 require.Equal(t, 0, p.Len()) 417 require.NotPanics(t, func() { p.Stop() }) 418 require.NotPanics(t, func() { p.StopWithErr(nil) }) 419 require.NotPanics(t, func() { p.ConsumeLogicalOps() }) 420 require.NotPanics(t, func() { p.ConsumeLogicalOps(make([]enginepb.MVCCLogicalOp, 5)...) }) 421 require.NotPanics(t, func() { p.ForwardClosedTS(hlc.Timestamp{}) }) 422 require.NotPanics(t, func() { p.ForwardClosedTS(hlc.Timestamp{WallTime: 1}) }) 423 424 // The following should panic because they are not safe 425 // to call on a nil Processor. 426 require.Panics(t, func() { p.Start(stop.NewStopper(), nil) }) 427 require.Panics(t, func() { p.Register(roachpb.RSpan{}, hlc.Timestamp{}, nil, false, nil, nil) }) 428 } 429 430 func TestProcessorSlowConsumer(t *testing.T) { 431 defer leaktest.AfterTest(t)() 432 p, stopper := newTestProcessor(nil /* rtsIter */) 433 defer stopper.Stop(context.Background()) 434 435 // Add a registration. 436 r1Stream := newTestStream() 437 r1ErrC := make(chan *roachpb.Error, 1) 438 p.Register( 439 roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("m")}, 440 hlc.Timestamp{WallTime: 1}, 441 nil, /* catchUpIter */ 442 false, /* withDiff */ 443 r1Stream, 444 r1ErrC, 445 ) 446 r2Stream := newTestStream() 447 r2ErrC := make(chan *roachpb.Error, 1) 448 p.Register( 449 roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("z")}, 450 hlc.Timestamp{WallTime: 1}, 451 nil, /* catchUpIter */ 452 false, /* withDiff */ 453 r2Stream, 454 r2ErrC, 455 ) 456 p.syncEventAndRegistrations() 457 require.Equal(t, 2, p.Len()) 458 require.Equal(t, 459 []*roachpb.RangeFeedEvent{rangeFeedCheckpoint( 460 roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("m")}, 461 hlc.Timestamp{WallTime: 0}, 462 )}, 463 r1Stream.Events(), 464 ) 465 require.Equal(t, 466 []*roachpb.RangeFeedEvent{rangeFeedCheckpoint( 467 roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("z")}, 468 hlc.Timestamp{WallTime: 0}, 469 )}, 470 r2Stream.Events(), 471 ) 472 473 // Block its Send method and fill up the registration's input channel. 474 unblock := r1Stream.BlockSend() 475 defer func() { 476 if unblock != nil { 477 unblock() 478 } 479 }() 480 // Need one more message to fill the channel because the first one will be 481 // sent to the stream and block the registration outputLoop goroutine. 482 toFill := testProcessorEventCCap + 1 483 for i := 0; i < toFill; i++ { 484 ts := hlc.Timestamp{WallTime: int64(i + 2)} 485 p.ConsumeLogicalOps( 486 writeValueOpWithKV(roachpb.Key("k"), ts, []byte("val")), 487 ) 488 489 // Wait for just the unblocked registration to catch up. This prevents 490 // the race condition where this registration overflows anyway due to 491 // the rapid event consumption and small buffer size. 492 p.syncEventAndRegistrationSpan(spXY) 493 } 494 495 // Consume one more event. Should not block, but should cause r1 to overflow 496 // its registration buffer and drop the event. 497 p.ConsumeLogicalOps( 498 writeValueOpWithKV(roachpb.Key("k"), hlc.Timestamp{WallTime: 18}, []byte("val")), 499 ) 500 501 // Wait for just the unblocked registration to catch up. 502 p.syncEventAndRegistrationSpan(spXY) 503 require.Equal(t, toFill+1, len(r2Stream.Events())) 504 require.Equal(t, 2, p.reg.Len()) 505 506 // Unblock the send channel. The events should quickly be consumed. 507 unblock() 508 unblock = nil 509 p.syncEventAndRegistrations() 510 // At least one event should have been dropped due to overflow. We expect 511 // exactly one event to be dropped, but it is possible that multiple events 512 // were dropped due to rapid event consumption before the r1's outputLoop 513 // began consuming from its event buffer. 514 require.LessOrEqual(t, len(r1Stream.Events()), toFill) 515 require.Equal(t, newErrBufferCapacityExceeded().GoError(), (<-r1ErrC).GoError()) 516 testutils.SucceedsSoon(t, func() error { 517 if act, exp := p.Len(), 1; exp != act { 518 return fmt.Errorf("processor had %d regs, wanted %d", act, exp) 519 } 520 return nil 521 }) 522 } 523 524 // TestProcessorInitializeResolvedTimestamp tests that when a Processor is given 525 // a resolved timestamp iterator, it doesn't initialize its resolved timestamp 526 // until it has consumed all intents in the iterator. 527 func TestProcessorInitializeResolvedTimestamp(t *testing.T) { 528 defer leaktest.AfterTest(t)() 529 530 txn1, txn2 := uuid.MakeV4(), uuid.MakeV4() 531 rtsIter := newTestIterator([]storage.MVCCKeyValue{ 532 makeKV("a", "val1", 10), 533 makeInline("b", "val2"), 534 makeIntent("c", txn1, "txnKey1", 15), 535 makeProvisionalKV("c", "txnKey1", 15), 536 makeKV("c", "val3", 11), 537 makeKV("c", "val4", 9), 538 makeIntent("d", txn2, "txnKey2", 21), 539 makeProvisionalKV("d", "txnKey2", 21), 540 makeKV("d", "val5", 20), 541 makeKV("d", "val6", 19), 542 makeInline("g", "val7"), 543 makeKV("m", "val8", 1), 544 makeIntent("n", txn1, "txnKey1", 12), 545 makeProvisionalKV("n", "txnKey1", 12), 546 makeIntent("r", txn1, "txnKey1", 19), 547 makeProvisionalKV("r", "txnKey1", 19), 548 makeKV("r", "val9", 4), 549 makeIntent("w", txn1, "txnKey1", 3), 550 makeProvisionalKV("w", "txnKey1", 3), 551 makeInline("x", "val10"), 552 makeIntent("z", txn2, "txnKey2", 21), 553 makeProvisionalKV("z", "txnKey2", 21), 554 makeKV("z", "val11", 4), 555 }) 556 rtsIter.block = make(chan struct{}) 557 558 p, stopper := newTestProcessor(rtsIter) 559 defer stopper.Stop(context.Background()) 560 561 // The resolved timestamp should not be initialized. 562 require.False(t, p.rts.IsInit()) 563 require.Equal(t, hlc.Timestamp{}, p.rts.Get()) 564 565 // Add a registration. 566 r1Stream := newTestStream() 567 p.Register( 568 roachpb.RSpan{Key: roachpb.RKey("a"), EndKey: roachpb.RKey("m")}, 569 hlc.Timestamp{WallTime: 1}, 570 nil, /* catchUpIter */ 571 false, /* withDiff */ 572 r1Stream, 573 make(chan *roachpb.Error, 1), 574 ) 575 p.syncEventAndRegistrations() 576 require.Equal(t, 1, p.Len()) 577 578 // The registration should be provided a checkpoint immediately with an 579 // empty resolved timestamp because it did not perform a catch-up scan. 580 chEvent := []*roachpb.RangeFeedEvent{rangeFeedCheckpoint( 581 roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("m")}, 582 hlc.Timestamp{}, 583 )} 584 require.Equal(t, chEvent, r1Stream.Events()) 585 586 // The resolved timestamp should still not be initialized. 587 require.False(t, p.rts.IsInit()) 588 require.Equal(t, hlc.Timestamp{}, p.rts.Get()) 589 590 // Forward the closed timestamp. The resolved timestamp should still 591 // not be initialized. 592 p.ForwardClosedTS(hlc.Timestamp{WallTime: 20}) 593 require.False(t, p.rts.IsInit()) 594 require.Equal(t, hlc.Timestamp{}, p.rts.Get()) 595 596 // Let the scan proceed. 597 close(rtsIter.block) 598 <-rtsIter.done 599 require.True(t, rtsIter.closed) 600 601 // Synchronize the event channel then verify that the resolved timestamp is 602 // initialized and that it's blocked on the oldest unresolved intent's txn 603 // timestamp. Txn1 has intents at many times but the unresolvedIntentQueue 604 // tracks its latest, which is 19, so the resolved timestamp is 605 // 19.FloorPrev() = 18. 606 p.syncEventAndRegistrations() 607 require.True(t, p.rts.IsInit()) 608 require.Equal(t, hlc.Timestamp{WallTime: 18}, p.rts.Get()) 609 610 // The registration should have been informed of the new resolved timestamp. 611 chEvent = []*roachpb.RangeFeedEvent{rangeFeedCheckpoint( 612 roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("m")}, 613 hlc.Timestamp{WallTime: 18}, 614 )} 615 require.Equal(t, chEvent, r1Stream.Events()) 616 } 617 618 func TestProcessorTxnPushAttempt(t *testing.T) { 619 defer leaktest.AfterTest(t)() 620 621 ts10 := hlc.Timestamp{WallTime: 10} 622 ts20 := hlc.Timestamp{WallTime: 20} 623 ts25 := hlc.Timestamp{WallTime: 25} 624 ts30 := hlc.Timestamp{WallTime: 30} 625 ts50 := hlc.Timestamp{WallTime: 50} 626 ts60 := hlc.Timestamp{WallTime: 60} 627 ts70 := hlc.Timestamp{WallTime: 70} 628 ts90 := hlc.Timestamp{WallTime: 90} 629 630 // Create a set of transactions. 631 txn1, txn2, txn3 := uuid.MakeV4(), uuid.MakeV4(), uuid.MakeV4() 632 txn1Meta := enginepb.TxnMeta{ID: txn1, Key: keyA, WriteTimestamp: ts10, MinTimestamp: ts10} 633 txn2Meta := enginepb.TxnMeta{ID: txn2, Key: keyB, WriteTimestamp: ts20, MinTimestamp: ts20} 634 txn3Meta := enginepb.TxnMeta{ID: txn3, Key: keyC, WriteTimestamp: ts30, MinTimestamp: ts30} 635 txn1Proto := &roachpb.Transaction{TxnMeta: txn1Meta, Status: roachpb.PENDING} 636 txn2Proto := &roachpb.Transaction{TxnMeta: txn2Meta, Status: roachpb.PENDING} 637 txn3Proto := &roachpb.Transaction{TxnMeta: txn3Meta, Status: roachpb.PENDING} 638 639 // Modifications for test 2. 640 txn1MetaT2Pre := enginepb.TxnMeta{ID: txn1, Key: keyA, WriteTimestamp: ts25, MinTimestamp: ts10} 641 txn1MetaT2Post := enginepb.TxnMeta{ID: txn1, Key: keyA, WriteTimestamp: ts50, MinTimestamp: ts10} 642 txn2MetaT2Post := enginepb.TxnMeta{ID: txn2, Key: keyB, WriteTimestamp: ts60, MinTimestamp: ts20} 643 txn3MetaT2Post := enginepb.TxnMeta{ID: txn3, Key: keyC, WriteTimestamp: ts70, MinTimestamp: ts30} 644 txn1ProtoT2 := &roachpb.Transaction{TxnMeta: txn1MetaT2Post, Status: roachpb.COMMITTED} 645 txn2ProtoT2 := &roachpb.Transaction{TxnMeta: txn2MetaT2Post, Status: roachpb.PENDING} 646 txn3ProtoT2 := &roachpb.Transaction{TxnMeta: txn3MetaT2Post, Status: roachpb.PENDING} 647 648 // Modifications for test 3. 649 txn2MetaT3Post := enginepb.TxnMeta{ID: txn2, Key: keyB, WriteTimestamp: ts60, MinTimestamp: ts20} 650 txn3MetaT3Post := enginepb.TxnMeta{ID: txn3, Key: keyC, WriteTimestamp: ts90, MinTimestamp: ts30} 651 txn2ProtoT3 := &roachpb.Transaction{TxnMeta: txn2MetaT3Post, Status: roachpb.ABORTED} 652 txn3ProtoT3 := &roachpb.Transaction{TxnMeta: txn3MetaT3Post, Status: roachpb.PENDING} 653 654 testNum := 0 655 pausePushAttemptsC := make(chan struct{}) 656 resumePushAttemptsC := make(chan struct{}) 657 defer close(pausePushAttemptsC) 658 defer close(resumePushAttemptsC) 659 660 // Create a TxnPusher that performs assertions during the first 3 uses. 661 var tp testTxnPusher 662 tp.mockPushTxns(func(txns []enginepb.TxnMeta, ts hlc.Timestamp) ([]*roachpb.Transaction, error) { 663 // The txns are not in a sorted order. Enforce one. 664 sort.Slice(txns, func(i, j int) bool { 665 return bytes.Compare(txns[i].Key, txns[j].Key) < 0 666 }) 667 668 testNum++ 669 switch testNum { 670 case 1: 671 require.Equal(t, 3, len(txns)) 672 require.Equal(t, txn1Meta, txns[0]) 673 require.Equal(t, txn2Meta, txns[1]) 674 require.Equal(t, txn3Meta, txns[2]) 675 676 // Push does not succeed. Protos not at larger ts. 677 return []*roachpb.Transaction{txn1Proto, txn2Proto, txn3Proto}, nil 678 case 2: 679 require.Equal(t, 3, len(txns)) 680 require.Equal(t, txn1MetaT2Pre, txns[0]) 681 require.Equal(t, txn2Meta, txns[1]) 682 require.Equal(t, txn3Meta, txns[2]) 683 684 // Push succeeds. Return new protos. 685 return []*roachpb.Transaction{txn1ProtoT2, txn2ProtoT2, txn3ProtoT2}, nil 686 case 3: 687 require.Equal(t, 2, len(txns)) 688 require.Equal(t, txn2MetaT2Post, txns[0]) 689 require.Equal(t, txn3MetaT2Post, txns[1]) 690 691 // Push succeeds. Return new protos. 692 return []*roachpb.Transaction{txn2ProtoT3, txn3ProtoT3}, nil 693 default: 694 return nil, nil 695 } 696 }) 697 tp.mockCleanupTxnIntentsAsync(func(txns []*roachpb.Transaction) error { 698 switch testNum { 699 case 1: 700 require.Equal(t, 0, len(txns)) 701 case 2: 702 require.Equal(t, 1, len(txns)) 703 require.Equal(t, txn1ProtoT2, txns[0]) 704 case 3: 705 require.Equal(t, 1, len(txns)) 706 require.Equal(t, txn2ProtoT3, txns[0]) 707 default: 708 return nil 709 } 710 711 <-pausePushAttemptsC 712 <-resumePushAttemptsC 713 return nil 714 }) 715 716 p, stopper := newTestProcessorWithTxnPusher(nil /* rtsIter */, &tp) 717 defer stopper.Stop(context.Background()) 718 719 // Add a few intents and move the closed timestamp forward. 720 writeIntentOpFromMeta := func(txn enginepb.TxnMeta) enginepb.MVCCLogicalOp { 721 return writeIntentOpWithDetails(txn.ID, txn.Key, txn.MinTimestamp, txn.WriteTimestamp) 722 } 723 p.ConsumeLogicalOps( 724 writeIntentOpFromMeta(txn1Meta), 725 writeIntentOpFromMeta(txn2Meta), 726 writeIntentOpFromMeta(txn2Meta), 727 writeIntentOpFromMeta(txn3Meta), 728 ) 729 p.ForwardClosedTS(hlc.Timestamp{WallTime: 40}) 730 p.syncEventC() 731 require.Equal(t, hlc.Timestamp{WallTime: 9}, p.rts.Get()) 732 733 // Wait for the first txn push attempt to complete. 734 pausePushAttemptsC <- struct{}{} 735 736 // The resolved timestamp hasn't moved. 737 p.syncEventC() 738 require.Equal(t, hlc.Timestamp{WallTime: 9}, p.rts.Get()) 739 740 // Write another intent for one of the txns. This moves the resolved 741 // timestamp forward. 742 p.ConsumeLogicalOps(writeIntentOpFromMeta(txn1MetaT2Pre)) 743 p.syncEventC() 744 require.Equal(t, hlc.Timestamp{WallTime: 19}, p.rts.Get()) 745 746 // Unblock the second txn push attempt and wait for it to complete. 747 resumePushAttemptsC <- struct{}{} 748 pausePushAttemptsC <- struct{}{} 749 750 // The resolved timestamp should have moved forwards to the closed 751 // timestamp. 752 p.syncEventC() 753 require.Equal(t, hlc.Timestamp{WallTime: 40}, p.rts.Get()) 754 755 // Forward the closed timestamp. 756 p.ForwardClosedTS(hlc.Timestamp{WallTime: 80}) 757 p.syncEventC() 758 require.Equal(t, hlc.Timestamp{WallTime: 49}, p.rts.Get()) 759 760 // Txn1's first intent is committed. Resolved timestamp doesn't change. 761 p.ConsumeLogicalOps( 762 commitIntentOp(txn1MetaT2Post.ID, txn1MetaT2Post.WriteTimestamp), 763 ) 764 p.syncEventC() 765 require.Equal(t, hlc.Timestamp{WallTime: 49}, p.rts.Get()) 766 767 // Txn1's second intent is committed. Resolved timestamp moves forward. 768 p.ConsumeLogicalOps( 769 commitIntentOp(txn1MetaT2Post.ID, txn1MetaT2Post.WriteTimestamp), 770 ) 771 p.syncEventC() 772 require.Equal(t, hlc.Timestamp{WallTime: 59}, p.rts.Get()) 773 774 // Unblock the third txn push attempt and wait for it to complete. 775 resumePushAttemptsC <- struct{}{} 776 pausePushAttemptsC <- struct{}{} 777 778 // The resolved timestamp should have moved forwards to the closed 779 // timestamp. 780 p.syncEventC() 781 require.Equal(t, hlc.Timestamp{WallTime: 80}, p.rts.Get()) 782 783 // Forward the closed timestamp. 784 p.ForwardClosedTS(hlc.Timestamp{WallTime: 100}) 785 p.syncEventC() 786 require.Equal(t, hlc.Timestamp{WallTime: 89}, p.rts.Get()) 787 788 // Commit txn3's only intent. Resolved timestamp moves forward. 789 p.ConsumeLogicalOps( 790 commitIntentOp(txn3MetaT3Post.ID, txn3MetaT3Post.WriteTimestamp), 791 ) 792 p.syncEventC() 793 require.Equal(t, hlc.Timestamp{WallTime: 100}, p.rts.Get()) 794 795 // Release push attempt to avoid deadlock. 796 resumePushAttemptsC <- struct{}{} 797 } 798 799 // TestProcessorConcurrentStop tests that all methods in Processor's API 800 // correctly handle the processor concurrently shutting down. If they did 801 // not then it would be possible for them to deadlock. 802 func TestProcessorConcurrentStop(t *testing.T) { 803 defer leaktest.AfterTest(t)() 804 const trials = 10 805 for i := 0; i < trials; i++ { 806 p, stopper := newTestProcessor(nil /* rtsIter */) 807 808 var wg sync.WaitGroup 809 wg.Add(6) 810 go func() { 811 defer wg.Done() 812 runtime.Gosched() 813 s := newTestStream() 814 errC := make(chan<- *roachpb.Error, 1) 815 p.Register(p.Span, hlc.Timestamp{}, nil, false, s, errC) 816 }() 817 go func() { 818 defer wg.Done() 819 runtime.Gosched() 820 p.Len() 821 }() 822 go func() { 823 defer wg.Done() 824 runtime.Gosched() 825 p.ConsumeLogicalOps( 826 writeValueOpWithKV(roachpb.Key("s"), hlc.Timestamp{WallTime: 6}, []byte("val")), 827 ) 828 }() 829 go func() { 830 defer wg.Done() 831 runtime.Gosched() 832 p.ForwardClosedTS(hlc.Timestamp{WallTime: 2}) 833 }() 834 go func() { 835 defer wg.Done() 836 runtime.Gosched() 837 p.Stop() 838 }() 839 go func() { 840 defer wg.Done() 841 runtime.Gosched() 842 stopper.Stop(context.Background()) 843 }() 844 wg.Wait() 845 } 846 } 847 848 // TestProcessorRegistrationObservesOnlyNewEvents tests that a registration 849 // observes only operations that are consumed after it has registered. 850 func TestProcessorRegistrationObservesOnlyNewEvents(t *testing.T) { 851 defer leaktest.AfterTest(t)() 852 p, stopper := newTestProcessor(nil /* rtsIter */) 853 defer stopper.Stop(context.Background()) 854 855 firstC := make(chan int64) 856 regDone := make(chan struct{}) 857 regs := make(map[*testStream]int64) 858 859 var wg sync.WaitGroup 860 wg.Add(2) 861 go func() { 862 defer wg.Done() 863 for i := int64(1); i < 250; i++ { 864 // Add a new registration every 10 ops. 865 if i%10 == 0 { 866 firstC <- i 867 <-regDone 868 } 869 870 // Consume the logical op. Encode the index in the timestamp. 871 p.ConsumeLogicalOps(writeValueOp(hlc.Timestamp{WallTime: i})) 872 } 873 p.syncEventC() 874 close(firstC) 875 }() 876 go func() { 877 defer wg.Done() 878 for firstIdx := range firstC { 879 // For each index, create a new registration. The first 880 // operation is should see is firstIdx. 881 s := newTestStream() 882 regs[s] = firstIdx 883 errC := make(chan *roachpb.Error, 1) 884 p.Register(p.Span, hlc.Timestamp{}, nil, false, s, errC) 885 regDone <- struct{}{} 886 } 887 }() 888 wg.Wait() 889 p.syncEventAndRegistrations() 890 891 // Verify that no registrations were given operations 892 // from before they registered. 893 for s, expFirstIdx := range regs { 894 events := s.Events() 895 require.IsType(t, &roachpb.RangeFeedCheckpoint{}, events[0].GetValue()) 896 require.IsType(t, &roachpb.RangeFeedValue{}, events[1].GetValue()) 897 898 firstVal := events[1].GetValue().(*roachpb.RangeFeedValue) 899 firstIdx := firstVal.Value.Timestamp.WallTime 900 require.Equal(t, expFirstIdx, firstIdx) 901 } 902 } 903 904 // syncEventAndRegistrations waits for all previously sent events to be 905 // processed *and* for all registration output loops to fully process their own 906 // internal buffers. 907 func (p *Processor) syncEventAndRegistrations() { 908 p.syncEventAndRegistrationSpan(all) 909 } 910 911 // syncEventAndRegistrations waits for all previously sent events to be 912 // processed *and* for all registration output loops for registrations 913 // overlapping the given span to fully process their own internal buffers. 914 func (p *Processor) syncEventAndRegistrationSpan(span roachpb.Span) { 915 syncC := make(chan struct{}) 916 select { 917 case p.eventC <- event{syncC: syncC, testRegCatchupSpan: span}: 918 select { 919 case <-syncC: 920 // Synchronized. 921 case <-p.stoppedC: 922 // Already stopped. Do nothing. 923 } 924 case <-p.stoppedC: 925 // Already stopped. Do nothing. 926 } 927 }