github.com/benz9527/xboot@v0.0.0-20240504061247-c23f15593274/lib/ipc/x_disruptor_test.go (about) 1 package ipc 2 3 import ( 4 "bytes" 5 "fmt" 6 "log/slog" 7 "math" 8 "math/rand" 9 "os" 10 "path/filepath" 11 "strconv" 12 "sync" 13 "sync/atomic" 14 "testing" 15 "time" 16 17 "github.com/stretchr/testify/assert" 18 19 "github.com/benz9527/xboot/lib/bits" 20 ) 21 22 func TestCeilCapacity(t *testing.T) { 23 testcases := []struct { 24 capacity uint64 25 ceil uint64 26 }{ 27 {0, 0}, 28 {1, 1}, 29 {2, 2}, 30 {3, 4}, 31 {4, 4}, 32 {7, 8}, 33 {8, 8}, 34 {9, 16}, 35 {16, 16}, 36 {31, 32}, 37 {32, 32}, 38 {58, 64}, 39 {64, 64}, 40 } 41 for _, tc := range testcases { 42 t.Run(fmt.Sprintf("capacity: %d, ceil: %d", tc.capacity, tc.ceil), func(t *testing.T) { 43 assert.Equal(t, tc.ceil, bits.RoundupPowOf2ByCeil(tc.capacity)) 44 }) 45 } 46 } 47 48 func testXSinglePipelineDisruptorUint64( 49 t *testing.T, gTotal, tasks int, capacity uint64, 50 bs BlockStrategy, bitmapCheck bool, 51 reportFile *os.File, errorCounter *atomic.Uint64, 52 ) { 53 var ( 54 counter = &atomic.Int64{} 55 bm bits.Bitmap 56 checkBM bits.Bitmap 57 ) 58 checkBM = bits.NewX32Bitmap(uint64(gTotal * tasks)) 59 if bitmapCheck { 60 bm = bits.NewX32Bitmap(uint64(gTotal * tasks)) 61 } 62 wg := &sync.WaitGroup{} 63 wg.Add(gTotal) 64 rwg := &sync.WaitGroup{} 65 rwg.Add(gTotal * tasks) 66 disruptor := NewXDisruptor[uint64](capacity, 67 bs, 68 func(event uint64) error { 69 defer rwg.Done() 70 counter.Add(1) 71 if bitmapCheck { 72 bm.SetBit(event) 73 } 74 return nil 75 }, 76 ) 77 if err := disruptor.Start(); err != nil { 78 t.Fatalf("disruptor start failed, err: %v", err) 79 } 80 for i := 0; i < gTotal; i++ { 81 for j := 0; j < tasks; j++ { 82 checkBM.SetBit(uint64(i*tasks + j)) 83 } 84 } 85 beginTs := time.Now() 86 for i := 0; i < gTotal; i++ { 87 go func(idx int) { 88 defer wg.Done() 89 for j := 0; j < tasks; j++ { 90 if _, _, err := disruptor.Publish(uint64(idx*tasks + j)); err != nil { 91 t.Logf("publish failed, err: %v", err) 92 if errorCounter != nil { 93 errorCounter.Add(1) 94 } 95 break 96 } 97 } 98 }(i) 99 } 100 wg.Wait() 101 diff := time.Now().Sub(beginTs) 102 tps := float64(gTotal*tasks) / diff.Seconds() 103 if math.IsInf(tps, 0) { 104 tps = 0.0 105 } 106 summary := fmt.Sprintf("published total: %d, tasks: %d, cost: %v, tps: %v/s", gTotal, tasks, diff, tps) 107 t.Log(summary) 108 if reportFile != nil { 109 _, _ = reportFile.WriteString(summary + "\n") 110 } 111 rwg.Wait() 112 if reportFile == nil { 113 time.Sleep(time.Second) 114 assert.Equal(t, int64(gTotal*tasks), counter.Load()) 115 } 116 err := disruptor.Stop() 117 assert.NoError(t, err) 118 if bitmapCheck { 119 if reportFile != nil { 120 _, _ = reportFile.WriteString(fmt.Sprintf("gTotal(%d), tasks(%d):\n", gTotal, tasks)) 121 } 122 bm1bits := bm.GetBits() 123 bm2bits := checkBM.GetBits() 124 if !bm.EqualTo(checkBM) { 125 if reportFile != nil { 126 _, _ = reportFile.WriteString("bitmap check failed by not equal!\n") 127 } 128 if errorCounter != nil { 129 errorCounter.Add(1) 130 } 131 for i := 0; i < len(bm1bits); i++ { 132 if bytes.Compare(bm1bits[i:i+1], bm2bits[i:i+1]) != 0 { 133 if reportFile != nil { 134 _, _ = reportFile.WriteString(fmt.Sprintf("idx: %d, bm1: %08b, bm2: %08b\n", i, bm1bits[i:i+1], bm2bits[i:i+1])) 135 } 136 t.Logf("idx: %d, bm1: %08b, bm2: %08b\n", i, bm1bits[i:i+1], bm2bits[i:i+1]) 137 } 138 } 139 } 140 // check store if contains zero bits 141 if reportFile != nil { 142 _, _ = reportFile.WriteString("check store whether contains zero bits(exclude the last one):\n") 143 for i := 0; i < len(bm2bits)-1; i++ { 144 if bm2bits[i]&0xf != 0xf { 145 _, _ = reportFile.WriteString(fmt.Sprintf("store idx: %d, bm2: %08b\n", i, bm2bits[i:i+1])) 146 } 147 } 148 _, _ = reportFile.WriteString("====== end report ======\n") 149 } 150 } 151 if bm != nil { 152 bm.Purge() 153 } 154 if checkBM != nil { 155 checkBM.Purge() 156 } 157 } 158 159 func testXDisruptorString(t *testing.T, gTotal, tasks int, capacity uint64, bs BlockStrategy, bitmapCheck bool, reportFile *os.File, errorCounter *atomic.Uint64) { 160 var ( 161 counter = &atomic.Int64{} 162 bm bits.Bitmap 163 checkBM bits.Bitmap 164 ) 165 if bitmapCheck { 166 bm = bits.NewX32Bitmap(uint64(gTotal * tasks)) 167 checkBM = bits.NewX32Bitmap(uint64(gTotal * tasks)) 168 } 169 wg := &sync.WaitGroup{} 170 wg.Add(gTotal) 171 rwg := &sync.WaitGroup{} 172 rwg.Add(gTotal * tasks) 173 disruptor := NewXDisruptor[string](capacity, 174 bs, 175 func(event string) error { 176 defer func() { 177 if r := recover(); r != nil { 178 t.Logf("error panic: %v", r) 179 if reportFile != nil { 180 _, _ = reportFile.WriteString(fmt.Sprintf("error panic: %v\n", r)) 181 } 182 if errorCounter != nil { 183 errorCounter.Add(1) 184 } 185 } 186 rwg.Done() 187 }() 188 counter.Add(1) 189 if bitmapCheck { 190 e, err := strconv.ParseUint(event, 10, 64) 191 if err != nil { 192 t.Logf("error parse uint64 failed, err: %v", err) 193 if reportFile != nil { 194 _, _ = reportFile.WriteString(fmt.Sprintf("error parse uint64 failed, err: %v\n", err)) 195 } 196 } 197 bm.SetBit(e) 198 } 199 if event == "" { 200 t.Logf("error event is empty, counter: %d", counter.Load()) 201 } 202 return nil 203 }, 204 ) 205 if err := disruptor.Start(); err != nil { 206 t.Fatalf("disruptor start failed, err: %v", err) 207 } 208 for i := 0; i < gTotal; i++ { 209 for j := 0; j < tasks; j++ { 210 checkBM.SetBit(uint64(i*tasks + j)) 211 } 212 } 213 beginTs := time.Now() 214 for i := 0; i < gTotal; i++ { 215 go func(idx int) { 216 defer wg.Done() 217 for j := 0; j < tasks; j++ { 218 if _, _, err := disruptor.Publish(fmt.Sprintf("%d", idx*tasks+j)); err != nil { 219 t.Logf("publish failed, err: %v", err) 220 if errorCounter != nil { 221 errorCounter.Add(1) 222 } 223 break 224 } 225 } 226 }(i) 227 } 228 wg.Wait() 229 diff := time.Now().Sub(beginTs) 230 tps := float64(gTotal*tasks) / diff.Seconds() 231 if math.IsInf(tps, 1) { 232 tps = 0.0 233 } 234 summary := fmt.Sprintf("published total: %d, tasks: %d, cost: %v, tps: %v/s", gTotal, tasks, diff, tps) 235 t.Log(summary) 236 if reportFile != nil { 237 _, _ = reportFile.WriteString(summary + "\n") 238 } 239 rwg.Wait() 240 if reportFile == nil { 241 time.Sleep(time.Second) 242 assert.Equal(t, int64(gTotal*tasks), counter.Load()) 243 } 244 err := disruptor.Stop() 245 assert.NoError(t, err) 246 if bitmapCheck { 247 if reportFile != nil { 248 _, _ = reportFile.WriteString(fmt.Sprintf("gTotal(%d), tasks(%d):\n", gTotal, tasks)) 249 } 250 bm1bits := bm.GetBits() 251 bm2bits := checkBM.GetBits() 252 if !bm.EqualTo(checkBM) { 253 if reportFile != nil { 254 _, _ = reportFile.WriteString("bitmap check failed by not equal!\n") 255 } 256 if errorCounter != nil { 257 errorCounter.Add(1) 258 } 259 for i := 0; i < len(bm1bits); i++ { 260 if bytes.Compare(bm1bits[i:i+1], bm2bits[i:i+1]) != 0 { 261 if reportFile != nil { 262 _, _ = reportFile.WriteString(fmt.Sprintf("error store idx: %d, bm1: %08b, bm2: %08b\n", i, bm1bits[i:i+1], bm2bits[i:i+1])) 263 } 264 t.Logf("idx: %d, bm1: %08b, bm2: %08b\n", i, bm1bits[i:i+1], bm2bits[i:i+1]) 265 } 266 } 267 } 268 // check store if contains zero bits 269 if reportFile != nil { 270 _, _ = reportFile.WriteString("check store whether contains zero bits(exclude the last one):\n") 271 for i := 0; i < len(bm2bits)-1; i++ { 272 if bm2bits[i]&0xf != 0xf { 273 _, _ = reportFile.WriteString(fmt.Sprintf("error store idx: %d, bm2: %08b\n", i, bm2bits[i:i+1])) 274 } 275 } 276 _, _ = reportFile.WriteString("====== end report ======\n") 277 } 278 } 279 if bm != nil { 280 bm.Purge() 281 } 282 if checkBM != nil { 283 checkBM.Purge() 284 } 285 } 286 287 func TestXDisruptor_DataRace(t *testing.T) { 288 testcases := []struct { 289 name string 290 gTotal int 291 tasks int 292 bs BlockStrategy 293 }{ 294 {"gosched 10*100", 10, 100, NewXGoSchedBlockStrategy()}, 295 {"gosched 100*10000", 100, 10000, NewXGoSchedBlockStrategy()}, 296 {"gosched 500*10000", 500, 10000, NewXGoSchedBlockStrategy()}, 297 {"gosched 1000*10000", 1000, 10000, NewXGoSchedBlockStrategy()}, 298 {"gosched 5000*10000", 5000, 10000, NewXGoSchedBlockStrategy()}, 299 {"gosched 10000*10000", 10000, 10000, NewXGoSchedBlockStrategy()}, 300 {"nochan 5000*10000", 5000, 10000, NewXCacheChannelBlockStrategy()}, 301 {"cond 5000*10000", 5000, 10000, NewXCondBlockStrategy()}, 302 } 303 for _, tc := range testcases { 304 t.Run(tc.name, func(t *testing.T) { 305 testXSinglePipelineDisruptorUint64(t, tc.gTotal, tc.tasks, 1024*1024, tc.bs, false, nil, nil) 306 }) 307 } 308 } 309 310 func TestXDisruptorWithBitmapCheck_DataRace(t *testing.T) { 311 testcases := []struct { 312 name string 313 gTotal int 314 tasks int 315 bs BlockStrategy 316 }{ 317 {"gosched 1*10000", 1, 10000, NewXGoSchedBlockStrategy()}, 318 {"nocachech 1*10000", 1, 10000, NewXCacheChannelBlockStrategy()}, 319 {"cond 1*10000", 1, 10000, NewXCondBlockStrategy()}, 320 //{"gosched 10*100", 10, 100, NewXGoSchedBlockStrategy()}, 321 //{"gosched 100*10000", 100, 10000, NewXGoSchedBlockStrategy()}, 322 //{"gosched 500*10000", 500, 10000, NewXGoSchedBlockStrategy()}, 323 //{"gosched 1000*10000", 1000, 10000, NewXGoSchedBlockStrategy()}, 324 //{"gosched 5000*10000", 5000, 10000, NewXGoSchedBlockStrategy()}, 325 //{"gosched 10000*10000", 10000, 10000, NewXGoSchedBlockStrategy()}, 326 //{"nochan 5000*10000", 5000, 10000, NewXCacheChannelBlockStrategy()}, 327 //{"cond 5000*10000", 5000, 10000, NewXCondBlockStrategy()}, 328 } 329 for _, tc := range testcases { 330 t.Run(tc.name, func(t *testing.T) { 331 testXSinglePipelineDisruptorUint64(t, tc.gTotal, tc.tasks, 1024*1024, tc.bs, true, nil, nil) 332 }) 333 } 334 } 335 336 func TestXDisruptorWithBitmapCheckAndReport_DataRace(t *testing.T) { 337 errorCounter := &atomic.Uint64{} 338 reportFile, err := os.OpenFile(filepath.Join(os.TempDir(), "pubsub-report-"+time.Now().Format("2006-01-02_15_04_05")+".txt"), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o644) 339 defer func() { 340 if reportFile != nil { 341 _ = reportFile.Close() 342 } 343 }() 344 assert.NoError(t, err) 345 testcases := []struct { 346 name string 347 loop int 348 gTotal int 349 tasks int 350 capacity uint64 351 bs BlockStrategy 352 }{ 353 {"gosched 1*10000", 100, 1, 10000, 1024, NewXGoSchedBlockStrategy()}, 354 {"gosched 10*100", 100, 10, 100, 512, NewXGoSchedBlockStrategy()}, 355 {"gosched 10*100", 100, 10, 100, 1024, NewXGoSchedBlockStrategy()}, 356 {"gosched 100*10000", 50, 100, 10000, 1024 * 1024, NewXGoSchedBlockStrategy()}, 357 {"gosched 500*10000", 10, 500, 10000, 1024 * 1024, NewXGoSchedBlockStrategy()}, 358 {"gosched 1000*10000", 10, 1000, 10000, 1024 * 1024, NewXGoSchedBlockStrategy()}, 359 {"gosched 5000*10000", 10, 5000, 10000, 1024 * 1024, NewXGoSchedBlockStrategy()}, 360 {"gosched 10000*10000", 5, 10000, 10000, 1024 * 1024, NewXGoSchedBlockStrategy()}, 361 //{"chan 1*10000", 10000, 1, 10000, 1024, NewXCacheChannelBlockStrategy()}, 362 //{"chan 10*100", 1000, 10, 100, 512, NewXCacheChannelBlockStrategy()}, 363 //{"chan 10*100", 1000, 10, 100, 1024, NewXCacheChannelBlockStrategy()}, 364 //{"chan 100*10000", 200, 100, 10000, 1024 * 1024, NewXCacheChannelBlockStrategy()}, 365 //{"chan 500*10000", 10, 500, 10000, 1024 * 1024, NewXCacheChannelBlockStrategy()}, 366 //{"chan 1000*10000", 10, 1000, 10000, 1024 * 1024, NewXCacheChannelBlockStrategy()}, 367 //{"chan 5000*10000", 10, 5000, 10000, 1024 * 1024, NewXCacheChannelBlockStrategy()}, 368 //{"chan 10000*10000", 5, 10000, 10000, 1024 * 1024, NewXCacheChannelBlockStrategy()}, 369 //{"cond 1*10000", 10000, 1, 10000, 1024, NewXCondBlockStrategy()}, 370 } 371 for _, tc := range testcases { 372 t.Run(tc.name, func(t *testing.T) { 373 for i := 0; i < tc.loop; i++ { 374 t.Log(i) 375 _, _ = reportFile.WriteString(fmt.Sprintf("\n====== begin uint64 report(%s, %d) ======\n", tc.name, i)) 376 testXSinglePipelineDisruptorUint64(t, tc.gTotal, tc.tasks, tc.capacity, tc.bs, true, reportFile, errorCounter) 377 } 378 }) 379 } 380 } 381 382 func TestXDisruptorWithBitmapCheckAndReport_Str_DataRace(t *testing.T) { 383 errorCounter := &atomic.Uint64{} 384 reportFile, err := os.OpenFile(filepath.Join(os.TempDir(), "pubsub-report-str-"+time.Now().Format("2006-01-02_15_04_05")+".txt"), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o644) 385 defer func() { 386 if reportFile != nil { 387 _ = reportFile.Close() 388 } 389 }() 390 assert.NoError(t, err) 391 testcases := []struct { 392 name string 393 loop int 394 gTotal int 395 tasks int 396 capacity uint64 397 bs BlockStrategy 398 }{ 399 {"gosched 1*10000 str", 100, 1, 10000, 1024, NewXGoSchedBlockStrategy()}, 400 {"gosched 10*100 str", 100, 10, 100, 512, NewXGoSchedBlockStrategy()}, 401 {"gosched 10*100 str", 100, 10, 100, 1024, NewXGoSchedBlockStrategy()}, 402 {"gosched 100*10000 str", 50, 100, 10000, 1024 * 1024, NewXGoSchedBlockStrategy()}, 403 {"gosched 500*10000 str", 50, 500, 10000, 1024 * 1024, NewXGoSchedBlockStrategy()}, 404 {"gosched 1000*10000 str", 10, 1000, 10000, 1024 * 1024, NewXGoSchedBlockStrategy()}, 405 {"gosched 5000*10000 str", 10, 5000, 10000, 1024 * 1024, NewXGoSchedBlockStrategy()}, 406 {"gosched 10000*10000 str", 5, 10000, 10000, 1024 * 1024, NewXGoSchedBlockStrategy()}, 407 //{"chan 1*10000 str", 1000, 1, 10000, 1024, NewXCacheChannelBlockStrategy()}, 408 //{"chan 10*100 str", 1000, 10, 100, 512, NewXCacheChannelBlockStrategy()}, 409 //{"chan 10*100 str", 1000, 10, 100, 1024, NewXCacheChannelBlockStrategy()}, 410 //{"chan 100*10000 str", 1000, 100, 10000, 1024 * 1024, NewXCacheChannelBlockStrategy()}, 411 //{"chan 500*10000 str", 10, 500, 10000, 1024 * 1024, NewXCacheChannelBlockStrategy()}, 412 //{"chan 1000*10000 str", 10, 1000, 10000, 1024 * 1024, NewXCacheChannelBlockStrategy()}, 413 //{"chan 5000*10000 str", 10, 5000, 10000, 1024 * 1024, NewXCacheChannelBlockStrategy()}, 414 //{"chan 10000*10000 str", 5, 10000, 10000, 1024 * 1024, NewXCacheChannelBlockStrategy()}, 415 //{"cond 1*10000 str", 1000, 1, 10000, 1024, NewXCondBlockStrategy()}, 416 } 417 for _, tc := range testcases { 418 t.Run(tc.name, func(t *testing.T) { 419 for i := 0; i < tc.loop; i++ { 420 _, _ = reportFile.WriteString(fmt.Sprintf("\n====== begin string report(%s, %d) ======\n", tc.name, i)) 421 testXDisruptorString(t, tc.gTotal, tc.tasks, tc.capacity, tc.bs, true, reportFile, errorCounter) 422 } 423 }) 424 } 425 t.Logf("errors: %d\n", errorCounter.Load()) 426 } 427 428 func testNoCacheChannel(t *testing.T, chSize, gTotal, tasks int) { 429 _, debugLogDisabled := os.LookupEnv("DISABLE_TEST_DEBUG_LOG") 430 counter := &atomic.Int64{} 431 wg := &sync.WaitGroup{} 432 wg.Add(gTotal) 433 var ch chan int 434 if chSize > 0 { 435 ch = make(chan int, chSize) 436 } else { 437 ch = make(chan int) 438 } 439 go func() { 440 for range ch { 441 counter.Add(1) 442 } 443 }() 444 beginTs := time.Now() 445 for i := 0; i < gTotal; i++ { 446 go func() { 447 defer wg.Done() 448 for j := 0; j < tasks; j++ { 449 ch <- j 450 } 451 }() 452 } 453 wg.Wait() 454 diff := time.Now().Sub(beginTs) 455 if !debugLogDisabled { 456 t.Logf("total: %d, tasks: %d, cost: %v, tps: %v/s", gTotal, tasks, diff, float64(gTotal*tasks)/diff.Seconds()) 457 } 458 time.Sleep(time.Second) 459 assert.Equal(t, int64(gTotal*tasks), counter.Load()) 460 } 461 462 func TestNoCacheChannel_DataRace(t *testing.T) { 463 testcases := []struct { 464 name string 465 gTotal int 466 tasks int 467 }{ 468 {"nochan 10*100", 10, 100}, 469 {"nochan 100*10000", 100, 10000}, 470 {"nochan 500*10000", 500, 10000}, 471 {"nochan 1000*10000", 1000, 10000}, 472 {"nochan 5000*10000", 5000, 10000}, 473 {"nochan 10000*10000", 10000, 10000}, 474 } 475 for _, tc := range testcases { 476 t.Run(tc.name, func(t *testing.T) { 477 testNoCacheChannel(t, 0, tc.gTotal, tc.tasks) 478 }) 479 } 480 } 481 482 func TestCacheChannel_DataRace(t *testing.T) { 483 testcases := []struct { 484 name string 485 gTotal int 486 tasks int 487 }{ 488 {"cachechan 10*100", 10, 100}, 489 {"cachechan 100*10000", 100, 10000}, 490 {"cachechan 500*10000", 500, 10000}, 491 {"cachechan 1000*10000", 1000, 10000}, 492 {"cachechan 5000*10000", 5000, 10000}, 493 {"cachechan 10000*10000", 10000, 10000}, 494 } 495 for _, tc := range testcases { 496 t.Run(tc.name, func(t *testing.T) { 497 testNoCacheChannel(t, 1024*1024, tc.gTotal, tc.tasks) 498 }) 499 } 500 } 501 502 func testXDisruptorWithRandomSleep(t *testing.T, num, capacity int) { 503 wg := &sync.WaitGroup{} 504 wg.Add(num) 505 results := map[string]struct{}{} 506 disruptor := NewXDisruptor[string](uint64(capacity), 507 NewXCacheChannelBlockStrategy(), 508 func(event string) error { 509 nextInt := rand.Intn(100) 510 time.Sleep(time.Duration(nextInt) * time.Millisecond) 511 results[event] = struct{}{} 512 wg.Done() 513 return nil 514 }, 515 ) 516 if err := disruptor.Start(); err != nil { 517 t.Fatalf("disruptor start failed, err: %v", err) 518 } 519 for i := 0; i < num; i++ { 520 if _, _, err := disruptor.Publish(fmt.Sprintf("event-%d", i)); err != nil { 521 t.Logf("publish failed, err: %v", err) 522 } 523 } 524 wg.Wait() 525 err := disruptor.Stop() 526 assert.NoError(t, err) 527 assert.Equal(t, num, len(results)) 528 for i := 0; i < num; i++ { 529 assert.Contains(t, results, fmt.Sprintf("event-%d", i)) 530 } 531 } 532 533 func TestXDisruptorWithRandomSleepEvent(t *testing.T) { 534 testcases := []struct { 535 num int 536 capacity int 537 }{ 538 {10, 2}, 539 {100, 4}, 540 {200, 10}, 541 {500, 20}, 542 } 543 loops := 2 544 for i := 0; i < loops; i++ { 545 for _, tc := range testcases { 546 t.Run(fmt.Sprintf("num: %d, capacity: %d", tc.num, tc.capacity), func(t *testing.T) { 547 testXDisruptorWithRandomSleep(t, tc.num, tc.capacity) 548 }) 549 } 550 } 551 } 552 553 func TestXDisruptor_PublishTimeout(t *testing.T) { 554 num := 10 555 disruptor := NewXDisruptor[string](2, 556 NewXGoSchedBlockStrategy(), 557 func(event string) error { 558 nextInt := rand.Intn(10) 559 if nextInt == 0 { 560 nextInt = 2 561 } 562 time.Sleep(time.Duration(nextInt) * time.Millisecond) 563 slog.Info("handle event details", "name", event) 564 return nil 565 }, 566 ) 567 if err := disruptor.Start(); err != nil { 568 t.Fatalf("disruptor start failed, err: %v", err) 569 } 570 for i := 0; i < num; i++ { 571 event := fmt.Sprintf("event-%d", i) 572 disruptor.PublishTimeout(event, 5*time.Millisecond) 573 } 574 time.Sleep(500 * time.Millisecond) 575 err := disruptor.Stop() 576 assert.NoError(t, err) 577 }