github.com/muyo/sno@v1.2.1/generator_test.go (about) 1 // +build test 2 3 package sno 4 5 import ( 6 "fmt" 7 "sync" 8 "sync/atomic" 9 "testing" 10 "time" 11 _ "unsafe" 12 13 "github.com/muyo/sno/internal" 14 ) 15 16 // snotime is the actual time source used by Generators during tests. 17 // 18 // We split on build tags ("test") to swap out the snotime() implementations provided by platform specific 19 // code so that tests can use mocked time sources without in any way impacting a Generator's runtime performance 20 // in production builds. 21 // 22 // Note: Attempting to run the test suite without the "test" build tag will fail, resulting in several 23 // compilation errors. 24 var snotime = internal.Snotime 25 26 // monotime provides real monotonic clock readings to several tests. 27 //go:linkname monotime runtime.nanotime 28 func monotime() int64 29 30 // staticTime provides tests with a fake time source which returns a fixed time on each call. 31 // The time returned can be changed by directly (atomically) mutating the underlying variable. 32 func staticTime() uint64 { 33 return atomic.LoadUint64(staticWallNow) 34 } 35 36 // staticIncTime provides tests with a fake time source which returns a time based on a fixed time 37 // monotonically increasing by 1 TimeUnit on each call. 38 func staticIncTime() uint64 { 39 wall := atomic.LoadUint64(staticWallNow) + atomic.LoadUint64(staticInc)*TimeUnit 40 41 atomic.AddUint64(staticInc, 1) 42 43 return wall 44 } 45 46 var ( 47 staticInc = new(uint64) 48 staticWallNow = func() *uint64 { 49 wall := snotime() 50 return &wall 51 }() 52 ) 53 54 func TestGenerator_NewNoOverflow(t *testing.T) { 55 var ( 56 part = Partition{255, 255} 57 seqPool = uint16(MaxSequence / 2) 58 seqMin = seqPool 59 seqMax = 2*seqPool - 1 60 61 // Scaled to not exceed bounds, otherwise we run into the seqOverflow race and order - which we 62 // test for in here - becomes non-deterministic. 63 sampleSize = int(seqPool) 64 g, err = NewGenerator(&GeneratorSnapshot{ 65 Partition: part, 66 SequenceMin: seqMin, 67 SequenceMax: seqMax, 68 }, nil) 69 ) 70 71 if err != nil { 72 t.Fatal(err) 73 } 74 75 ids := make([]ID, sampleSize) 76 for i := 0; i < sampleSize; i++ { 77 ids[i] = g.New(byte(i)) 78 } 79 80 for i := 1; i < sampleSize; i++ { 81 curID, prevID := ids[i], ids[i-1] 82 83 seq := ids[i].Sequence() 84 if seq > seqMax { 85 t.Errorf("%d: sequence overflowing max boundary; max [%d], got [%d]", i, seqMin, seq) 86 } 87 88 if seq < seqMin { 89 t.Errorf("%d: sequence underflowing min boundary; min [%d], got [%d]", i, seqMin, seq) 90 } 91 92 // We're expecting the time to increment and never more than by one time unit, since 93 // we generated them in sequence. 94 timeDiff := curID.Timestamp() - prevID.Timestamp() 95 96 // Check if drift got applied in this edge case. 97 if timeDiff < 0 && curID[4]&1 == 0 { 98 t.Error("timestamp of next ID lower than previous and no tick-tock applied") 99 } 100 101 if timeDiff > TimeUnit { 102 t.Error("timestamp diff between IDs is higher than by one time unit") 103 } 104 105 if prevID.Partition() != part { 106 t.Errorf("%d: partition differs from generator's partition; expected [%d], got [%d]", i, part, prevID.Partition()) 107 } 108 } 109 } 110 111 func TestGenerator_NewOverflows(t *testing.T) { 112 var ( 113 part = Partition{255, 255} 114 seqPool = 512 115 seqOverflows = 16 116 seqMin = uint16(seqPool) 117 seqMax = uint16(2*seqPool - 1) 118 sampleSize = seqPool * seqOverflows 119 120 c = make(chan *SequenceOverflowNotification) 121 cc = make(chan struct{}) 122 notesHi = new(int64) 123 124 g, err = NewGenerator(&GeneratorSnapshot{ 125 Partition: part, 126 SequenceMin: seqMin, 127 SequenceMax: seqMax, 128 }, c) 129 ) 130 131 if err != nil { 132 t.Fatal(err) 133 } 134 135 go func() { 136 for { 137 select { 138 case note := <-c: 139 if note.Count > 0 { 140 atomic.AddInt64(notesHi, 1) 141 } 142 case <-cc: 143 return 144 } 145 } 146 }() 147 148 ids := make([]ID, sampleSize) 149 for i := 0; i < sampleSize; i++ { 150 ids[i] = g.New(byte(i)) 151 } 152 153 close(cc) 154 155 // TODO(alcore) The non-blocking writes are far from reliable. The notifications need a rework with 156 // deep profiling. 157 if atomic.LoadInt64(notesHi) < int64(seqOverflows)/4 { 158 t.Errorf("expected at least [%d] overflow notification, got [%d]", seqOverflows/4, atomic.LoadInt64(notesHi)) 159 } 160 161 timeDist := make(map[int64]int) 162 163 for i := 0; i < sampleSize; i++ { 164 id := ids[i] 165 timeDist[id.Timestamp()]++ 166 167 seq := id.Sequence() 168 if seq > seqMax { 169 t.Errorf("%d: sequence overflowing max boundary; max [%d], got [%d]", i, seqMin, seq) 170 } 171 172 if seq < seqMin { 173 t.Errorf("%d: sequence underflowing min boundary; min [%d], got [%d]", i, seqMin, seq) 174 } 175 176 if id.Partition() != part { 177 t.Errorf("%d: partition differs from generator's partition; expected [%d], got [%d]", i, part, id.Partition()) 178 } 179 } 180 181 for tf, c := range timeDist { 182 if c > seqPool { 183 t.Errorf("count of IDs in the given timeframe exceeds pool; timestamp [%d], pool [%d], count [%d]", tf, seqPool, c) 184 } 185 } 186 } 187 188 func TestGenerator_NewTickTocks(t *testing.T) { 189 g, ids := testGeneratorNewTickTocksSetup(t) 190 t.Run("Tick", testGeneratorNewTickTocksTick(g, ids)) 191 t.Run("SafetySlumber", testGeneratorNewTickTocksSafetySlumber(g, ids)) 192 t.Run("Tock", testGeneratorNewTickTocksTock(g, ids)) 193 t.Run("Race", testGeneratorNewTickTocksRace(g, ids)) 194 } 195 196 func testGeneratorNewTickTocksSetup(t *testing.T) (*Generator, []ID) { 197 var ( 198 seqPool = 8096 199 g, err = NewGenerator(&GeneratorSnapshot{ 200 Partition: Partition{255, 255}, 201 SequenceMin: uint16(seqPool), 202 SequenceMax: uint16(2*seqPool - 1), 203 }, nil) 204 ) 205 if err != nil { 206 t.Fatal(err) 207 } 208 209 return g, make([]ID, g.Cap()) 210 } 211 212 func testGeneratorNewTickTocksTick(g *Generator, ids []ID) func(*testing.T) { 213 return func(t *testing.T) { 214 // First batch follows normal time progression. 215 for i := 0; i < 512; i++ { 216 ids[i] = g.New(255) 217 } 218 219 wall := snotime() 220 atomic.StoreUint64(staticWallNow, wall-TimeUnit) 221 222 // Swap out the time source. Next batch is supposed to set a drift, have their tick-tock bit 223 // set to 1, and wallSafe on the generator must be set accordingly. 224 snotime = staticTime 225 226 if atomic.LoadUint32(&g.drifts) != 0 { 227 t.Errorf("expected [0] drifts recorded, got [%d]", atomic.LoadUint32(&g.drifts)) 228 } 229 230 if atomic.LoadUint64(&g.wallSafe) != 0 { 231 t.Errorf("expected wallSafe to be [0], is [%d]", atomic.LoadUint64(&g.wallSafe)) 232 } 233 234 for j := 512; j < 1024; j++ { 235 ids[j] = g.New(255) 236 } 237 238 if atomic.LoadUint32(&g.drifts) != 1 { 239 t.Errorf("expected [1] drift recorded, got [%d]", atomic.LoadUint32(&g.drifts)) 240 } 241 242 if atomic.LoadUint64(&g.wallSafe) == atomic.LoadUint64(staticWallNow) { 243 t.Errorf("expected wallSafe to be [%d], was [%d]", atomic.LoadUint64(staticWallNow), atomic.LoadUint64(&g.wallSafe)) 244 } 245 246 for i := 0; i < 512; i++ { 247 if ids[i][4]&1 != 0 { 248 t.Errorf("%d: expected tick-tock bit to not be set, was set", i) 249 } 250 } 251 252 for j := 512; j < 1024; j++ { 253 if ids[j][4]&1 != 1 { 254 t.Errorf("%d: expected tick-tock bit to be set, was not", j) 255 } 256 } 257 258 snotime = internal.Snotime 259 } 260 } 261 262 func testGeneratorNewTickTocksSafetySlumber(g *Generator, ids []ID) func(*testing.T) { 263 return func(t *testing.T) { 264 // Multi-regression, checking on a single goroutine. 265 atomic.AddUint64(staticWallNow, ^uint64(TimeUnit-1)) 266 267 // Use a clock where the first call will return the static clock times 268 // but subsequent calls will return higher times. Since we didn't adjust the mono clock 269 // at all insofar, it's currently 1 TimeUnit (first drift) behind wallSafe, which got set 270 // during the initial drift. This is the time the next generation call(s) are supposed 271 // to sleep, as we are simulating a multi-regression (into an unsafe past where can't 272 // tick-tock again until reaching wallSafe). 273 snotime = staticIncTime 274 275 mono1 := monotime() 276 id := g.New(255) 277 if id[4]&1 != 1 { 278 t.Errorf("expected tick-tock bit to be set, was not") 279 } 280 mono2 := monotime() 281 282 monoDiff := mono2 - mono1 283 284 // We had 2 regressions by 1 TimeUnit each, so sleep duration should've been roughly 285 // the same since time was static (got incremented only after the sleep). 286 if monoDiff < 2*TimeUnit { 287 t.Errorf("expected to sleep for at least [%f]ns, took [%d] instead", 2*TimeUnit, monoDiff) 288 } else if monoDiff > 5*TimeUnit { 289 t.Errorf("expected to sleep for no more than [%f]ns, took [%d] instead", 5*TimeUnit, monoDiff) 290 } 291 292 if atomic.LoadUint32(&g.drifts) != 1 { 293 t.Errorf("expected [1] drift recorded, got [%d]", atomic.LoadUint32(&g.drifts)) 294 } 295 296 snotime = internal.Snotime 297 } 298 } 299 300 func testGeneratorNewTickTocksTock(g *Generator, ids []ID) func(*testing.T) { 301 return func(t *testing.T) { 302 // At this point we are going to simulate another drift, somewhere in the 'far' future, 303 // with parallel load. 304 snotime = staticTime 305 atomic.AddUint64(staticWallNow, 100*TimeUnit) 306 307 g.New(255) // Updates wallHi 308 309 // Regress again. Not adjusting mono clock - calls below are supposed to simply drift - drift 310 // count is supposed to end at 2 (since we're still using the same generator) and tick-tock 311 // bit is supposed to be unset. 312 atomic.AddUint64(staticWallNow, ^uint64(2*TimeUnit-1)) 313 314 var ( 315 batchCount = 4 316 batchSize = g.Cap() / batchCount 317 wg sync.WaitGroup 318 ) 319 320 wg.Add(batchCount) 321 322 for i := 0; i < batchCount; i++ { 323 go func(mul int) { 324 for i := mul * batchSize; i < mul*batchSize+batchSize; i++ { 325 ids[i] = g.New(255) 326 } 327 wg.Done() 328 }(i) 329 } 330 331 wg.Wait() 332 333 if atomic.LoadUint32(&g.drifts) != 2 { 334 t.Errorf("expected [2] drifts recorded, got [%d]", atomic.LoadUint32(&g.drifts)) 335 } 336 337 for i := 0; i < g.Cap(); i++ { 338 if ids[i][4]&1 != 0 { 339 t.Errorf("%d: expected tick-tock bit to not be set, was set", i) 340 } 341 } 342 343 snotime = internal.Snotime 344 } 345 } 346 347 func testGeneratorNewTickTocksRace(g *Generator, ids []ID) func(*testing.T) { 348 return func(*testing.T) { 349 snotime = staticTime 350 351 atomic.AddUint64(staticWallNow, 100*TimeUnit) 352 g.New(255) 353 atomic.AddUint64(staticWallNow, ^uint64(TimeUnit-1)) 354 355 var ( 356 wgOuter sync.WaitGroup 357 wgInner sync.WaitGroup 358 ) 359 wgOuter.Add(1000) 360 361 wgInner.Add(1000) 362 for i := 0; i < 1000; i++ { 363 go func() { 364 wgInner.Done() 365 wgInner.Wait() 366 for i := 0; i < 2; i++ { 367 _ = g.New(byte(i)) 368 } 369 wgOuter.Done() 370 }() 371 } 372 wgOuter.Wait() 373 374 snotime = internal.Snotime 375 } 376 } 377 378 func TestGenerator_NewGeneratorRestoreRegressions(t *testing.T) { 379 // First one we simply check that the times get applied at all. We get rid of the time 380 // added while simulating the last drift. 381 g, err := NewGenerator(nil, nil) 382 if err != nil { 383 t.Fatal(err) 384 } 385 386 // Reset the static clock. 387 wall := snotime() 388 snotime = staticTime 389 atomic.StoreUint64(staticWallNow, wall) 390 391 // Simulate a regression. 392 g.New(255) 393 atomic.AddUint64(staticWallNow, ^uint64(TimeUnit-1)) 394 g.New(255) 395 396 snapshot := g.Snapshot() 397 398 g, err = NewGenerator(&snapshot, nil) 399 if err != nil { 400 t.Fatal(err) 401 } 402 403 if uint64(snapshot.WallSafe) != atomic.LoadUint64(&g.wallSafe) { 404 t.Errorf("expected [%d], got [%d]", snapshot.WallSafe, atomic.LoadUint64(&g.wallSafe)) 405 } 406 407 if uint64(snapshot.WallHi) != atomic.LoadUint64(&g.wallHi) { 408 t.Errorf("expected [%d], got [%d]", snapshot.WallHi, atomic.LoadUint64(&g.wallHi)) 409 } 410 411 // Second test, with a snapshot taken "in the future" (relative to current wall clock time). 412 wall = internal.Snotime() 413 atomic.StoreUint64(staticWallNow, wall+100*TimeUnit) 414 415 // Simulate another regression. Takes place in the future - we are going to take a snapshot 416 // and create a generator using that snapshot, where the generator will use snotime (current time) 417 // as comparison and is supposed to handle this as if it is in the past relative to the snapshot. 418 g.New(255) 419 atomic.AddUint64(staticWallNow, ^uint64(TimeUnit-1)) 420 g.New(255) 421 422 snotime = internal.Snotime 423 424 snapshot = g.Snapshot() 425 426 g, err = NewGenerator(&snapshot, nil) 427 if err != nil { 428 t.Fatal(err) 429 } 430 431 if uint64(snapshot.WallSafe) != atomic.LoadUint64(&g.wallSafe) { 432 t.Errorf("expected [%d], got [%d]", snapshot.WallSafe, atomic.LoadUint64(&g.wallSafe)) 433 } 434 435 if wall > atomic.LoadUint64(&g.wallHi) { 436 t.Errorf("expected smaller than [%d], got [%d]", wall, atomic.LoadUint64(&g.wallHi)) 437 } 438 } 439 440 func TestGenerator_NewWithTimeOverflows(t *testing.T) { 441 var ( 442 part = Partition{255, 255} 443 seqPool = 12 444 seqOverflows = 4 445 seqMin = uint16(seqPool) 446 seqMax = uint16(2*seqPool - 1) 447 sampleSize = seqPool * seqOverflows 448 449 g, err = NewGenerator(&GeneratorSnapshot{ 450 Partition: part, 451 SequenceMin: seqMin, 452 SequenceMax: seqMax, 453 }, nil) 454 ) 455 456 if err != nil { 457 t.Fatal(err) 458 } 459 460 tn := time.Now() 461 pool := g.Cap() 462 463 ids := make([]ID, sampleSize) 464 for i := 0; i < sampleSize; i++ { 465 ids[i] = g.NewWithTime(byte(i), tn) 466 } 467 468 timeDist := make(map[int64]int) 469 470 for i, s := 0, 0; i < sampleSize; i, s = i+1, s+1 { 471 id := ids[i] 472 timeDist[id.Timestamp()]++ 473 474 seq := id.Sequence() 475 if seq > seqMax { 476 t.Errorf("%d: sequence overflowing max boundary; max [%d], got [%d]", i, seqMin, seq) 477 } 478 479 if seq < seqMin { 480 t.Errorf("%d: sequence underflowing min boundary; min [%d], got [%d]", i, seqMin, seq) 481 } 482 483 // When we overflow with NewWithTime, the static sequence is supposed to roll over silently. 484 if s == pool { 485 s = 0 486 } else if i > 0 && seq-ids[i-1].Sequence() != 1 { 487 t.Errorf("%d: expected sequence to increment by 1, got [%d]", i, seq-ids[i-1].Sequence()) 488 } 489 490 expectedSeq := uint16(s) + seqMin 491 if seq != expectedSeq { 492 t.Errorf("%d: expected sequence [%d], got [%d]", i, expectedSeq, seq) 493 } 494 495 if id.Partition() != part { 496 t.Errorf("%d: partition differs from generator's partition; expected [%d], got [%d]", i, part, id.Partition()) 497 } 498 } 499 500 if len(timeDist) > 1 { 501 t.Error("IDs generated with the same time ended up with different timestamps") 502 } 503 504 // Race test. 505 var wg sync.WaitGroup 506 wg.Add(1000) 507 for i := 0; i < 1000; i++ { 508 go func() { 509 for i := 0; i < sampleSize; i++ { 510 _ = g.NewWithTime(byte(i), tn) 511 } 512 wg.Done() 513 }() 514 } 515 wg.Wait() 516 } 517 518 func TestGenerator_Uniqueness(t *testing.T) { 519 var ( 520 collisions int 521 setSize = 4 * MaxSequence 522 ) 523 524 ids := make(map[ID]struct{}, setSize) 525 526 for i := 1; i < setSize; i++ { 527 id := generator.New(255) 528 if _, found := ids[id]; found { 529 collisions++ 530 } else { 531 ids[id] = struct{}{} 532 } 533 } 534 535 if collisions > 0 { 536 t.Errorf("generated %d colliding IDs in a set of %d", collisions, setSize) 537 } 538 } 539 540 func TestGenerator_Partition(t *testing.T) { 541 expected := Partition{'A', 255} 542 g, err := NewGenerator(&GeneratorSnapshot{ 543 Partition: expected, 544 }, nil) 545 if err != nil { 546 t.Fatal(err) 547 } 548 549 actual := g.Partition() 550 if actual != expected { 551 t.Errorf("expected [%s], got [%s]", expected, actual) 552 } 553 } 554 555 func TestGenerator_SequenceBounds(t *testing.T) { 556 min := uint16(1024) 557 max := uint16(2047) 558 g, err := NewGenerator(&GeneratorSnapshot{ 559 SequenceMin: min, 560 SequenceMax: max, 561 }, nil) 562 if err != nil { 563 t.Fatal(err) 564 } 565 566 if actual, expected := g.SequenceMin(), min; actual != expected { 567 t.Errorf("expected [%d], got [%d]", expected, actual) 568 } 569 570 if actual, expected := g.SequenceMax(), max; actual != expected { 571 t.Errorf("expected [%d], got [%d]", expected, actual) 572 } 573 574 if actual, expected := g.Cap(), int(max-min)+1; actual != expected { 575 t.Errorf("expected [%d], got [%d]", expected, actual) 576 } 577 578 if actual, expected := g.Len(), 0; actual != expected { 579 t.Errorf("expected [%d], got [%d]", expected, actual) 580 } 581 582 for i := 0; i < 5; i++ { 583 g.New(255) 584 } 585 586 if actual, expected := g.Len(), 5; actual != expected { 587 t.Errorf("expected [%d], got [%d]", expected, actual) 588 } 589 590 g, err = NewGenerator(&GeneratorSnapshot{ 591 SequenceMin: 8, 592 SequenceMax: 16, 593 }, nil) 594 if err != nil { 595 t.Fatal(err) 596 } 597 598 // Simulate an overflow. All IDs over Cap() must be generated in a subsequent timeframe 599 // meaning Len will reflect the count in the last frame. 600 // TODO(alcore) This *can* occasionally fail as we are not using a deterministic time source, 601 // meaning first batch can get split up if time changes during the test and then end up 602 // spilling into the Len() we test for. 603 for i := 0; i < g.Cap()+7; i++ { 604 g.New(255) 605 } 606 607 if actual, expected := g.Len(), 7; actual != expected { 608 t.Errorf("expected [%d], got [%d]", expected, actual) 609 } 610 611 g, err = NewGenerator(&GeneratorSnapshot{ 612 SequenceMin: 8, 613 SequenceMax: 16, 614 }, nil) 615 if err != nil { 616 t.Fatal(err) 617 } 618 619 for i := 0; i < g.Cap(); i++ { 620 g.New(255) 621 } 622 623 if actual, expected := g.Len(), g.Cap(); actual != expected { 624 t.Errorf("expected [%d], got [%d]", expected, actual) 625 } 626 } 627 628 func TestGenerator_Sequence_Single(t *testing.T) { 629 g, err := NewGenerator(nil, nil) 630 if err != nil { 631 t.Fatal(err) 632 } 633 634 expected0 := uint32(0) 635 expected1 := expected0 636 expected2 := expected1 + 1 637 actual0 := g.Sequence() 638 _ = g.New(255) 639 actual1 := g.Sequence() 640 _ = g.New(255) 641 actual2 := g.Sequence() 642 643 if actual0 != expected0 { 644 t.Errorf("expected [%d], got [%d]", expected0, actual0) 645 } 646 if actual1 != expected1 { 647 t.Errorf("expected [%d], got [%d]", expected1, actual1) 648 } 649 if actual2 != expected2 { 650 t.Errorf("expected [%d], got [%d]", expected2, actual2) 651 } 652 } 653 654 func TestGenerator_Sequence_Batch(t *testing.T) { 655 g, err := NewGenerator(nil, nil) 656 if err != nil { 657 t.Fatal(err) 658 } 659 660 expected := uint32(9) 661 for i := 0; i <= int(expected); i++ { 662 _ = g.New(255) 663 } 664 665 actual := g.Sequence() 666 if actual != expected { 667 t.Errorf("expected [%d], got [%d]", expected, actual) 668 } 669 } 670 671 func TestGenerator_FromSnapshot_Sequence(t *testing.T) { 672 seq := uint32(1024) 673 g, err := NewGenerator(&GeneratorSnapshot{ 674 SequenceMin: uint16(seq), 675 Sequence: seq, 676 }, nil) 677 if err != nil { 678 t.Fatal(err) 679 } 680 681 expected1 := seq 682 expected2 := seq + 1 683 _ = g.New(255) 684 actual1 := g.Sequence() 685 _ = g.New(255) 686 actual2 := g.Sequence() 687 688 if actual1 != expected1 { 689 t.Errorf("expected [%d], got [%d]", expected1, actual1) 690 } 691 if actual2 != expected2 { 692 t.Errorf("expected [%d], got [%d]", expected2, actual2) 693 } 694 } 695 696 func TestGenerator_FromSnapshot_Pool_Defaults(t *testing.T) { 697 t.Parallel() 698 699 g, err := NewGenerator(&GeneratorSnapshot{ 700 SequenceMin: 0, 701 SequenceMax: 0, 702 }, nil) 703 if err != nil { 704 t.Fatal(err) 705 } 706 707 if g.SequenceMin() != 0 { 708 t.Errorf("expected [%d], got [%d]", 0, g.SequenceMin()) 709 } 710 711 if g.SequenceMax() != MaxSequence { 712 t.Errorf("expected [%d], got [%d]", MaxSequence, g.SequenceMax()) 713 } 714 715 // Max as default when min is given. 716 g, err = NewGenerator(&GeneratorSnapshot{ 717 SequenceMin: 2048, 718 }, nil) 719 if err != nil { 720 t.Fatal(err) 721 } 722 723 if g.SequenceMin() != 2048 { 724 t.Errorf("expected [%d], got [%d]", 2048, g.SequenceMin()) 725 } 726 727 if g.SequenceMax() != MaxSequence { 728 t.Errorf("expected [%d], got [%d]", MaxSequence, g.SequenceMax()) 729 } 730 } 731 732 func TestGenerator_FromSnapshot_Pool_BoundsOrder(t *testing.T) { 733 t.Parallel() 734 735 g, err := NewGenerator(&GeneratorSnapshot{ 736 SequenceMin: 2048, 737 SequenceMax: 1024, 738 }, nil) 739 if err != nil { 740 t.Fatal(err) 741 } 742 743 if g.SequenceMin() != 1024 { 744 t.Errorf("expected [%d], got [%d]", 1024, g.SequenceMin()) 745 } 746 747 if g.SequenceMax() != 2048 { 748 t.Errorf("expected [%d], got [%d]", 2048, g.SequenceMax()) 749 } 750 } 751 752 func TestGenerator_FromSnapshot_Pool_None(t *testing.T) { 753 t.Parallel() 754 755 bound := uint16(2048) 756 _, err := NewGenerator(&GeneratorSnapshot{ 757 SequenceMin: bound, 758 SequenceMax: bound, 759 }, nil) 760 if err == nil { 761 t.Errorf("expected error, got none") 762 return 763 } 764 765 verr, ok := err.(*InvalidSequenceBoundsError) 766 if !ok { 767 t.Errorf("expected error type [%T], got [%T]", &InvalidSequenceBoundsError{}, err) 768 return 769 } 770 771 if verr.Msg != errSequenceBoundsIdenticalMsg { 772 t.Errorf("expected error msg [%s], got [%s]", errSequenceBoundsIdenticalMsg, verr.Msg) 773 } 774 775 if verr.Min != bound { 776 t.Errorf("expected [%d], got [%d]", bound, verr.Min) 777 } 778 779 if verr.Max != bound { 780 t.Errorf("expected [%d], got [%d]", bound, verr.Max) 781 } 782 783 expectedMsg := fmt.Sprintf(errInvalidSequenceBoundsFmt, errSequenceBoundsIdenticalMsg, bound, 0, bound, 1) 784 if verr.Error() != expectedMsg { 785 t.Errorf("expected error msg [%s], got [%s]", expectedMsg, verr.Error()) 786 } 787 } 788 789 func TestGenerator_FromSnapshot_Pool_Size(t *testing.T) { 790 t.Parallel() 791 792 seqMin := uint16(0) 793 seqMax := seqMin + minSequencePoolSize - 1 794 _, err := NewGenerator(&GeneratorSnapshot{ 795 SequenceMin: seqMin, 796 SequenceMax: seqMax, 797 }, nil) 798 if err == nil { 799 t.Errorf("expected error, got none") 800 return 801 } 802 803 verr, ok := err.(*InvalidSequenceBoundsError) 804 if !ok { 805 t.Errorf("expected error type [%T], got [%T]", &InvalidSequenceBoundsError{}, err) 806 return 807 } 808 809 if verr.Msg != errSequencePoolTooSmallMsg { 810 t.Errorf("expected error msg [%s], got [%s]", errSequencePoolTooSmallMsg, verr.Msg) 811 } 812 813 if verr.Min != seqMin { 814 t.Errorf("expected [%d], got [%d]", seqMin, verr.Min) 815 } 816 817 if verr.Max != seqMax { 818 t.Errorf("expected [%d], got [%d]", seqMax, verr.Max) 819 } 820 821 expectedMsg := fmt.Sprintf(errInvalidSequenceBoundsFmt, errSequencePoolTooSmallMsg, seqMin, 0, seqMax, seqMax-seqMin+1) 822 if verr.Error() != expectedMsg { 823 t.Errorf("expected error msg [%s], got [%s]", expectedMsg, verr.Error()) 824 } 825 } 826 827 func TestGenerator_FromSnapshot_Underflow(t *testing.T) { 828 t.Parallel() 829 830 seqMin := uint16(2048) 831 seq := uint32(seqMin - 1) 832 _, err := NewGenerator(&GeneratorSnapshot{ 833 SequenceMin: seqMin, 834 Sequence: seq, 835 }, nil) 836 if err == nil { 837 t.Errorf("expected error, got none") 838 return 839 } 840 841 verr, ok := err.(*InvalidSequenceBoundsError) 842 if !ok { 843 t.Errorf("expected error type [%T], got [%T]", &InvalidSequenceBoundsError{}, err) 844 return 845 } 846 847 if verr.Msg != errSequenceUnderflowsBound { 848 t.Errorf("expected error msg [%s], got [%s]", errSequenceUnderflowsBound, verr.Msg) 849 } 850 851 if verr.Min != seqMin { 852 t.Errorf("expected [%d], got [%d]", seqMin, verr.Min) 853 } 854 855 if verr.Cur != seq { 856 t.Errorf("expected [%d], got [%d]", seq, verr.Cur) 857 } 858 859 expectedMsg := fmt.Sprintf(errInvalidSequenceBoundsFmt, errSequenceUnderflowsBound, seqMin, seq, MaxSequence, MaxSequence-seqMin+1) 860 if verr.Error() != expectedMsg { 861 t.Errorf("expected error msg [%s], got [%s]", expectedMsg, verr.Error()) 862 } 863 } 864 865 func TestGenerator_Snapshot(t *testing.T) { 866 var ( 867 part = Partition{128, 255} 868 seqMin = uint16(1024) 869 seqMax = uint16(2047) 870 seq = uint32(1024) 871 ) 872 873 snap := &GeneratorSnapshot{ 874 Partition: part, 875 SequenceMin: seqMin, 876 SequenceMax: seqMax, 877 Sequence: seq, 878 } 879 880 g, err := NewGenerator(snap, nil) 881 if err != nil { 882 t.Fatal(err) 883 } 884 885 actual := g.Snapshot() 886 if actual.Sequence != seq { 887 t.Errorf("expected [%d], got [%d]", seq, actual.Sequence) 888 } 889 890 atomic.AddUint32(&g.drifts, 1) 891 wallNow := snotime() 892 g.New(255) // First call will catch a zero wallHi and reset the sequence, while we want to measure an incr. 893 g.New(255) 894 actual = g.Snapshot() 895 896 if uint64(actual.Now) != wallNow { 897 t.Errorf("expected [%d], got [%d]", wallNow, actual.Now) 898 } 899 900 if uint64(actual.WallHi) != wallNow { 901 t.Errorf("expected [%d], got [%d]", wallNow, actual.WallHi) 902 } 903 904 if actual.Drifts != 1 { 905 t.Errorf("expected [%d], got [%d]", 1, actual.Drifts) 906 } 907 908 if actual.Sequence != seq+1 { 909 t.Errorf("expected [%d], got [%d]", seq+1, actual.Sequence) 910 } 911 912 if actual.Partition != part { 913 t.Errorf("expected [%s], got [%s]", part, actual.Partition) 914 } 915 916 if actual.SequenceMin != seqMin { 917 t.Errorf("expected [%d], got [%d]", seqMin, actual.SequenceMin) 918 } 919 920 if actual.SequenceMax != seqMax { 921 t.Errorf("expected [%d], got [%d]", seqMax, actual.SequenceMax) 922 } 923 }