github.com/mailru/activerecord@v1.12.2/pkg/iproto/util/pool/pool_test.go (about)

     1  //go:build fixme
     2  
     3  //undefined reference to `sync.(*WaitGroup).state' with gcc on go 1.20
     4  
     5  package pool
     6  
     7  import (
     8  	"fmt"
     9  	"runtime"
    10  	"sync"
    11  	"sync/atomic"
    12  	"testing"
    13  	"time"
    14  	_ "unsafe" // for go:linkname
    15  
    16  	"golang.org/x/net/context"
    17  )
    18  
    19  func TestPoolScheduleAfterClose(t *testing.T) {
    20  	for _, test := range []struct {
    21  		name   string
    22  		config *Config
    23  	}{
    24  		{
    25  			config: &Config{
    26  				UnstoppableWorkers: 5,
    27  				MaxWorkers:         10,
    28  				WorkQueueSize:      5,
    29  			},
    30  		},
    31  		{
    32  			config: &Config{
    33  				UnstoppableWorkers: 0,
    34  				MaxWorkers:         0,
    35  			},
    36  		},
    37  		{
    38  			config: &Config{
    39  				UnstoppableWorkers: 1,
    40  				MaxWorkers:         0,
    41  			},
    42  		},
    43  	} {
    44  		t.Run(test.name, func(t *testing.T) {
    45  			p := Must(New(test.config))
    46  			p.Close()
    47  			if err := p.Schedule(TaskFunc(nil)); err != ErrPoolClosed {
    48  				t.Errorf("error is %v; want %v", err, ErrPoolClosed)
    49  			}
    50  		})
    51  	}
    52  }
    53  
    54  func TestPoolWait(t *testing.T) {
    55  	for _, test := range []struct {
    56  		name     string
    57  		config   *Config
    58  		schedule int
    59  		delay    time.Duration
    60  		err      error
    61  		timeout  time.Duration
    62  	}{
    63  		{
    64  			schedule: 15,
    65  			config: &Config{
    66  				UnstoppableWorkers: 5,
    67  				MaxWorkers:         10,
    68  				ExtraWorkerTTL:     time.Millisecond,
    69  				WorkQueueSize:      5,
    70  			},
    71  		},
    72  		{
    73  			schedule: 3,
    74  			config: &Config{
    75  				UnstoppableWorkers: 5,
    76  				MaxWorkers:         10,
    77  				ExtraWorkerTTL:     time.Millisecond,
    78  				WorkQueueSize:      5,
    79  			},
    80  		},
    81  		{
    82  			schedule: 5,
    83  			config: &Config{
    84  				UnstoppableWorkers: 1,
    85  				MaxWorkers:         1,
    86  				ExtraWorkerTTL:     time.Millisecond,
    87  				WorkQueueSize:      5,
    88  			},
    89  		},
    90  	} {
    91  		t.Run(test.name, func(t *testing.T) {
    92  			p := Must(New(test.config))
    93  
    94  			n := test.schedule
    95  			m := new(int32)
    96  
    97  			release := make(chan struct{})
    98  			for i := 0; i < n; i++ {
    99  				err := p.ScheduleTimeout(time.Second, TaskFunc(func() {
   100  					<-release
   101  					atomic.AddInt32(m, 1)
   102  				}))
   103  				if err != nil {
   104  					t.Fatal(err)
   105  				}
   106  			}
   107  			time.AfterFunc(test.delay, func() {
   108  				close(release)
   109  			})
   110  
   111  			var timeout <-chan time.Time
   112  			if tm := test.timeout; tm != 0 {
   113  				timeout = time.After(tm)
   114  			}
   115  			err := p.wait(timeout, nil)
   116  			if err != test.err {
   117  				t.Errorf("unexpected error: %v; want %v", err, test.err)
   118  			}
   119  			if m := int(atomic.LoadInt32(m)); m != n {
   120  				t.Errorf("wait returned before all queued tasks completed: %d(%d)", m, n)
   121  			}
   122  		})
   123  	}
   124  }
   125  
   126  func TestPoolScheduleOnClosedPool(t *testing.T) {
   127  	for _, test := range []struct {
   128  		name   string
   129  		config *Config
   130  	}{
   131  		{
   132  			config: &Config{
   133  				MaxWorkers:         0,
   134  				UnstoppableWorkers: 1,
   135  				WorkQueueSize:      0,
   136  			},
   137  		},
   138  		{
   139  			config: &Config{
   140  				MaxWorkers:         1,
   141  				UnstoppableWorkers: 1,
   142  				WorkQueueSize:      1,
   143  			},
   144  		},
   145  		{
   146  			config: &Config{
   147  				MaxWorkers:         50,
   148  				UnstoppableWorkers: 10,
   149  				WorkQueueSize:      10,
   150  			},
   151  		},
   152  	} {
   153  		t.Run(test.name, func(t *testing.T) {
   154  			release := make(chan struct{})
   155  			test.config.OnTaskIn = func() {
   156  				<-release
   157  			}
   158  
   159  			p := Must(New(test.config))
   160  			time.AfterFunc(time.Millisecond, func() {
   161  				go p.Close()
   162  				close(release)
   163  			})
   164  
   165  			scheduled := make(chan error, (test.config.WorkQueueSize+1)*5)
   166  			for i := 0; i < cap(scheduled); i++ {
   167  				go func() {
   168  					err := p.Schedule(TaskFunc(func() {
   169  						scheduled <- nil
   170  					}))
   171  					if err != nil {
   172  						scheduled <- err
   173  					}
   174  				}()
   175  			}
   176  			timeout := time.After(time.Second)
   177  			for i := 0; i < cap(scheduled); i++ {
   178  				select {
   179  				case <-scheduled:
   180  					//t.Logf("schedule error: %v", err)
   181  				case <-timeout:
   182  					t.Errorf("task was not scheduled during 1s")
   183  				}
   184  			}
   185  
   186  			<-p.Done()
   187  		})
   188  	}
   189  }
   190  
   191  func TestPoolBarrierOnClosedPool(t *testing.T) {
   192  	p := Must(New(&Config{
   193  		MaxWorkers:         1,
   194  		UnstoppableWorkers: 1,
   195  	}))
   196  
   197  	release := make(chan struct{})
   198  	_ = p.Schedule(TaskFunc(func() {
   199  		<-release
   200  	}))
   201  	// After closing release channel, we expect that Done() will be fulfilled,
   202  	// and Barrier() will return somewhen close to this.
   203  	time.AfterFunc(10*time.Millisecond, func() {
   204  		close(release)
   205  	})
   206  
   207  	// Do not wait when Close() returns.
   208  	go p.Close()
   209  
   210  	timeline := make(chan time.Time, 2)
   211  	go func() {
   212  		p.Barrier()
   213  		timeline <- time.Now()
   214  	}()
   215  	go func() {
   216  		<-p.Done()
   217  		timeline <- time.Now()
   218  	}()
   219  
   220  	a := <-timeline
   221  	b := <-timeline
   222  
   223  	diff := a.Sub(b).Nanoseconds()
   224  	if act, max := time.Duration(int64abs(diff)), 500*time.Microsecond; act > max {
   225  		t.Errorf(
   226  			"difference between Done() and Barrier() is %v; want at most %v",
   227  			act, max,
   228  		)
   229  	}
   230  }
   231  
   232  func TestPoolWaitOnClosedPool(t *testing.T) {
   233  	t.Skip("Unstable test")
   234  	p := Must(New(&Config{
   235  		MaxWorkers:         1,
   236  		UnstoppableWorkers: 1,
   237  	}))
   238  
   239  	release := make(chan struct{})
   240  	_ = p.Schedule(TaskFunc(func() {
   241  		<-release
   242  	}))
   243  	// After closing release channel, we expect that Done() will be fulfilled,
   244  	// and Wait() will return somewhen close to this.
   245  	time.AfterFunc(10*time.Millisecond, func() {
   246  		close(release)
   247  	})
   248  
   249  	// Do not wait when Close() returns.
   250  	go p.Close()
   251  
   252  	timeline := make(chan time.Time, 2)
   253  	go func() {
   254  		p.Wait()
   255  		timeline <- time.Now()
   256  	}()
   257  	go func() {
   258  		<-p.Done()
   259  		timeline <- time.Now()
   260  	}()
   261  
   262  	a := <-timeline
   263  	b := <-timeline
   264  
   265  	diff := a.Sub(b).Nanoseconds()
   266  	if act, max := time.Duration(int64abs(diff)), 500*time.Microsecond; act > max {
   267  		t.Errorf(
   268  			"difference between Done() and Wait() is %v; want at most %v",
   269  			act, max,
   270  		)
   271  	}
   272  }
   273  
   274  func TestPoolTwiceClose(t *testing.T) {
   275  	p := Must(New(&Config{
   276  		MaxWorkers:         1,
   277  		UnstoppableWorkers: 1,
   278  	}))
   279  
   280  	release := make(chan struct{})
   281  
   282  	_ = p.Schedule(TaskFunc(func() {
   283  		<-release
   284  	}))
   285  
   286  	time.AfterFunc(10*time.Millisecond, func() {
   287  		close(release)
   288  	})
   289  
   290  	timeline := make(chan time.Time, 2)
   291  	go func() {
   292  		p.Close()
   293  		timeline <- time.Now()
   294  	}()
   295  	go func() {
   296  		p.Close()
   297  		timeline <- time.Now()
   298  	}()
   299  
   300  	a := <-timeline
   301  	b := <-timeline
   302  
   303  	diff := a.Sub(b).Nanoseconds()
   304  	if act, max := time.Duration(int64abs(diff)), 100*time.Microsecond; act > max {
   305  		t.Errorf(
   306  			"difference between Close() return is %v; want at most %v",
   307  			act, max,
   308  		)
   309  	}
   310  }
   311  
   312  func int64abs(v int64) int64 {
   313  	m := v >> 63 // Get 111 for negative and 000 for positive.
   314  	v ^= m       // Get (NOT v) for negative and v for positive.
   315  	v -= m       // Subtract -1 (add one) for negative and 0 for positive.
   316  	return v
   317  }
   318  
   319  func TestPoolMulticast(t *testing.T) {
   320  	p := Must(New(&Config{
   321  		MaxWorkers:         2,
   322  		UnstoppableWorkers: 2,
   323  		WorkQueueSize:      0,
   324  	}))
   325  
   326  	var (
   327  		seq     = make(chan string)
   328  		release = make(chan struct{})
   329  	)
   330  
   331  	// Lock first worker.
   332  	_ = p.Schedule(TaskFunc(func() {
   333  		<-release
   334  		seq <- "unicast"
   335  	}))
   336  
   337  	// Send multicast task for every worker. We expect that second worker runs
   338  	// it immediately.
   339  	n, err := p.multicast(TaskFunc(func() {
   340  		seq <- "multicast"
   341  	}), nil, nil)
   342  
   343  	if err != nil {
   344  		t.Fatal(err)
   345  	}
   346  
   347  	if n != 2 {
   348  		t.Fatalf("multicast task sent to %d workers; want %d", n, 2)
   349  	}
   350  
   351  	// Prepare store the order of execution.
   352  	var tasks [3]string
   353  
   354  	// Note that because worker's direct channel is buffered, scheduler will
   355  	// not run worker goroutine immediately in most of times. That's why we
   356  	// need to force scheduling by locking on seq channel.
   357  	select {
   358  	case tasks[0] = <-seq:
   359  	case <-time.After(time.Second):
   360  		t.Fatalf("no action during second")
   361  	}
   362  
   363  	// Release first worker. We expect that after task done first worker will
   364  	// run multicast task.
   365  	close(release)
   366  	tasks[1] = <-seq
   367  	tasks[2] = <-seq
   368  
   369  	if tasks != [3]string{"multicast", "unicast", "multicast"} {
   370  		t.Fatalf("unexpected order of task execution: %v", tasks)
   371  	}
   372  }
   373  
   374  func TestPoolBarrier(t *testing.T) {
   375  	p := Must(New(&Config{
   376  		MaxWorkers:         32,
   377  		UnstoppableWorkers: 32,
   378  		WorkQueueSize:      32,
   379  	}))
   380  
   381  	// Prepare two counters, which will be used before and after Barrier().
   382  	a := new(int32)
   383  	b := new(int32)
   384  
   385  	var counter atomic.Value
   386  	counter.Store(a)
   387  
   388  	for i := 0; i < runtime.NumCPU(); i++ {
   389  		go func() {
   390  			runtime.LockOSThread()
   391  			var err error
   392  			for err == nil {
   393  				err = p.ScheduleCustom(nil, TaskFunc(func() {
   394  					n := counter.Load().(*int32)
   395  					// Add some delay to imitate some real work.
   396  					time.Sleep(time.Microsecond)
   397  					atomic.AddInt32(n, 1)
   398  				}))
   399  			}
   400  		}()
   401  	}
   402  
   403  	// Let the workers to increment A counter.
   404  	time.Sleep(time.Millisecond * 50)
   405  
   406  	// Swap counter. Note that after this, workers could increment both
   407  	// counters.
   408  	counter.Store(b)
   409  
   410  	// Barrier workers. After that all workers MUST increment only B counter.
   411  	p.Barrier()
   412  
   413  	// Load last value of A counter.
   414  	x := atomic.LoadInt32(a)
   415  
   416  	// Let the workers to increment B counter.
   417  	time.Sleep(time.Millisecond * 50)
   418  
   419  	// Stop the pool.
   420  	p.Close()
   421  
   422  	if n := atomic.LoadInt32(a); n != x {
   423  		t.Fatalf("counter has been changed after Barrier(); %d != %d", n, x)
   424  	}
   425  }
   426  
   427  func TestPoolNew(t *testing.T) {
   428  	for i, test := range []struct {
   429  		config *Config
   430  		spawn  int
   431  		err    bool
   432  	}{
   433  		{
   434  			config: &Config{MaxWorkers: 1, UnstoppableWorkers: 2},
   435  			err:    true,
   436  		},
   437  		{
   438  			config: &Config{MaxWorkers: 0},
   439  			err:    false,
   440  		},
   441  		{
   442  			config: &Config{MaxWorkers: 1, UnstoppableWorkers: 2},
   443  			err:    true,
   444  		},
   445  		{
   446  			config: &Config{MaxWorkers: 1, UnstoppableWorkers: 0, WorkQueueSize: 1},
   447  			err:    true,
   448  		},
   449  		{
   450  			config: &Config{MaxWorkers: 1, UnstoppableWorkers: 0},
   451  			spawn:  0,
   452  		},
   453  		{
   454  			config: &Config{MaxWorkers: 1, UnstoppableWorkers: 1},
   455  			spawn:  1,
   456  		},
   457  		{
   458  			config: &Config{MaxWorkers: 8, UnstoppableWorkers: 4},
   459  			spawn:  4,
   460  		},
   461  	} {
   462  		t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
   463  			p, err := New(test.config)
   464  			if test.err && err == nil {
   465  				t.Errorf("expected error, got nil")
   466  			}
   467  			if !test.err && err != nil {
   468  				t.Errorf("unexpected error: %s", err)
   469  			}
   470  			if err != nil {
   471  				return
   472  			}
   473  			if n := len(p.sem); n != test.spawn {
   474  				t.Errorf("spawned %d goroutines; want %d", n, test.spawn)
   475  			}
   476  		})
   477  	}
   478  }
   479  
   480  // TODO: fix error /usr/bin/ld: go.go:(.text+0x11f9f3): неопределённая ссылка на «sync.(*WaitGroup).state»
   481  func _TestPoolSchedule(t *testing.T) {
   482  	for i, test := range []struct {
   483  		config      *Config
   484  		tasks       int
   485  		spawnBefore int
   486  		spawnAfter  int
   487  		kill        int
   488  		cbDelay     time.Duration
   489  		sleep       time.Duration
   490  	}{
   491  		{
   492  			config: &Config{
   493  				MaxWorkers:     4,
   494  				WorkQueueSize:  0,
   495  				ExtraWorkerTTL: time.Millisecond * 100,
   496  			},
   497  			tasks:       4,
   498  			spawnBefore: 0,
   499  			spawnAfter:  4,
   500  			kill:        4,
   501  
   502  			cbDelay: time.Millisecond * 2,
   503  			sleep:   time.Millisecond * 500,
   504  		},
   505  		{
   506  			config: &Config{
   507  				UnstoppableWorkers: 2,
   508  				MaxWorkers:         4,
   509  				WorkQueueSize:      4,
   510  				ExtraWorkerTTL:     time.Millisecond,
   511  			},
   512  			tasks:       4,
   513  			spawnBefore: 2,
   514  			spawnAfter:  0,
   515  			kill:        0,
   516  
   517  			cbDelay: time.Millisecond * 2,
   518  			sleep:   time.Millisecond * 4,
   519  		},
   520  		{
   521  			config: &Config{
   522  				UnstoppableWorkers: 0,
   523  				MaxWorkers:         4,
   524  				WorkQueueSize:      0,
   525  				ExtraWorkerTTL:     time.Millisecond,
   526  			},
   527  			tasks:       16,
   528  			spawnBefore: 0,
   529  			spawnAfter:  4,
   530  			kill:        4,
   531  
   532  			cbDelay: time.Millisecond * 2,
   533  			sleep:   time.Millisecond * 16,
   534  		},
   535  		{
   536  			config: &Config{
   537  				UnstoppableWorkers: 2,
   538  				MaxWorkers:         0, // no limit
   539  				WorkQueueSize:      0,
   540  				ExtraWorkerTTL:     time.Millisecond,
   541  			},
   542  			tasks:       8,
   543  			spawnBefore: 2,
   544  			spawnAfter:  6,
   545  			kill:        6,
   546  
   547  			cbDelay: time.Millisecond * 2,
   548  			sleep:   time.Millisecond * 16,
   549  		},
   550  	} {
   551  		t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
   552  			pool, err := New(test.config)
   553  			if err != nil {
   554  				t.Fatal(err)
   555  			}
   556  
   557  			if test.config.UnstoppableWorkers > 0 {
   558  				// Let workers to be spawned.
   559  				time.Sleep(time.Millisecond * 100)
   560  			}
   561  
   562  			n1 := wgCount(&pool.wg)
   563  			if n1 != test.spawnBefore {
   564  				t.Errorf("spawned %d goroutines before tasks; want %d", n1, test.spawnBefore)
   565  			}
   566  
   567  			var callbacks sync.WaitGroup
   568  			callbacks.Add(test.tasks)
   569  			for i := 0; i < test.tasks; i++ {
   570  				err := pool.Schedule(TaskFunc(func() {
   571  					time.Sleep(test.cbDelay)
   572  					callbacks.Done()
   573  				}))
   574  				if err != nil {
   575  					t.Fatal(err)
   576  				}
   577  			}
   578  
   579  			runtime.Gosched()
   580  
   581  			n2 := wgCount(&pool.wg)
   582  			if n := n2 - n1; n != test.spawnAfter {
   583  				t.Errorf("spawned %d goroutines after tasks; want %d", n, test.spawnAfter)
   584  			}
   585  
   586  			callbacks.Wait()
   587  			time.Sleep(test.sleep)
   588  
   589  			n3 := wgCount(&pool.wg)
   590  			if n := n2 - n3; n != test.kill {
   591  				t.Errorf("killed %d goroutines after sleep; want %d", n, test.kill)
   592  			}
   593  		})
   594  	}
   595  }
   596  
   597  func TestPoolScheduleImmediate(t *testing.T) {
   598  	// We set GOMAXPROCS to 1 here to avoid races on first locking task
   599  	// completion and worker locking on task queue. When GOMAXPROCS > 1 we
   600  	// could get flaky errors here, such that locker is finished, but worker
   601  	// not yet locked on task queue, but our goroutine already calls
   602  	// ScheduleImmediate and fails with ErrUnavailable.
   603  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
   604  
   605  	pool := Must(New(&Config{
   606  		MaxWorkers:    1,
   607  		WorkQueueSize: 0,
   608  	}))
   609  
   610  	var (
   611  		err error
   612  
   613  		lock = make(chan struct{})
   614  		done = make(chan struct{})
   615  		noop = TaskFunc(func() {})
   616  	)
   617  	locker := TaskFunc(func() {
   618  		<-lock
   619  		close(done)
   620  	})
   621  
   622  	if err = pool.ScheduleImmediate(locker); err != nil {
   623  		t.Fatalf("unexpected error: %v", err)
   624  	}
   625  	if err = pool.ScheduleImmediate(noop); err == nil {
   626  		t.Fatalf("expected error got nil")
   627  	}
   628  
   629  	// Complete the first task.
   630  	close(lock)
   631  	<-done
   632  
   633  	// Let the worker to lock on reading from pool.work queue.
   634  	runtime.Gosched()
   635  
   636  	if err = pool.ScheduleImmediate(noop); err != nil {
   637  		t.Fatalf("unexpected error: %v", err)
   638  	}
   639  }
   640  
   641  func TestPoolScheduleTimeout(t *testing.T) {
   642  	pool, err := New(&Config{
   643  		MaxWorkers:    1,
   644  		WorkQueueSize: 0,
   645  	})
   646  	if err != nil {
   647  		t.Fatal(err)
   648  	}
   649  
   650  	// First, create task that will block other tasks execution until we
   651  	// close done channel.
   652  	done := make(chan struct{})
   653  	err = pool.ScheduleTimeout(10*time.Millisecond, TaskFunc(func() {
   654  		<-done
   655  	}))
   656  	if err != nil {
   657  		t.Fatalf("unexpected error: %s", err)
   658  	}
   659  
   660  	// Next, try to schedule task and expect the ErrUnavailable.
   661  	err = pool.ScheduleTimeout(10*time.Millisecond, TaskFunc(func() {
   662  		t.Errorf("unexpected task execution")
   663  	}))
   664  	if err != ErrUnavailable {
   665  		t.Errorf("unexpected error: %s; want %s", err, ErrUnavailable)
   666  	}
   667  
   668  	// Finally, release the pool and try to schedule another task,
   669  	// expecting that it well be okay.
   670  	close(done)
   671  	ok := make(chan struct{})
   672  	err = pool.ScheduleTimeout(10*time.Millisecond, TaskFunc(func() {
   673  		close(ok)
   674  	}))
   675  	if err != nil {
   676  		t.Errorf("unexecpted error: %s", err)
   677  	}
   678  
   679  	<-ok
   680  }
   681  
   682  func TestScheduleContext(t *testing.T) {
   683  	pool, err := New(&Config{
   684  		MaxWorkers:    1,
   685  		WorkQueueSize: 0,
   686  	})
   687  	if err != nil {
   688  		t.Fatal(err)
   689  	}
   690  
   691  	// First, create task that will block other tasks execution until we
   692  	// close done channel.
   693  	done := make(chan struct{})
   694  	err = pool.ScheduleTimeout(time.Millisecond, TaskFunc(func() {
   695  		<-done
   696  	}))
   697  	if err != nil {
   698  		t.Errorf("unexecpted error: %s", err)
   699  	}
   700  
   701  	ctx, cancel := context.WithCancel(context.Background())
   702  	time.AfterFunc(time.Millisecond, cancel)
   703  
   704  	// Next, try to schedule task and expect the context.Canceled error.
   705  	err = pool.ScheduleContext(ctx, TaskFunc(func() {
   706  		t.Errorf("unexpected task execution")
   707  	}))
   708  	if err != context.Canceled {
   709  		t.Errorf("unexpected error: %s; want %s", err, context.Canceled)
   710  	}
   711  
   712  	// Finally, release the pool and try to schedule another task,
   713  	// expecting that it well be okay.
   714  	close(done)
   715  	ok := make(chan struct{})
   716  	err = pool.ScheduleContext(context.Background(), TaskFunc(func() {
   717  		close(ok)
   718  	}))
   719  	if err != nil {
   720  		t.Errorf("unexecpted error: %s", err)
   721  	}
   722  
   723  	<-ok
   724  }
   725  
   726  func TestPoolClose(t *testing.T) {
   727  	pool, err := New(&Config{
   728  		UnstoppableWorkers: 1,
   729  		MaxWorkers:         1,
   730  		WorkQueueSize:      1,
   731  	})
   732  	if err != nil {
   733  		t.Fatal(err)
   734  	}
   735  
   736  	task := TaskFunc(func() {})
   737  	err = pool.ScheduleImmediate(task)
   738  	if err != nil {
   739  		t.Fatal(err)
   740  	}
   741  	pool.Close()
   742  
   743  	if err = pool.ScheduleImmediate(task); err != ErrPoolClosed {
   744  		t.Fatal(err)
   745  	}
   746  }
   747  
   748  func TestPoolScheduleStat(t *testing.T) {
   749  	var tasks int32
   750  
   751  	p := Must(New(&Config{
   752  		UnstoppableWorkers: 1,
   753  		MaxWorkers:         1,
   754  		WorkQueueSize:      0,
   755  
   756  		OnTaskIn: func() {
   757  			atomic.AddInt32(&tasks, 1)
   758  		},
   759  		OnTaskOut: func() {
   760  			atomic.AddInt32(&tasks, -1)
   761  		},
   762  	}))
   763  
   764  	// First lock the pool.
   765  	done := make(chan struct{})
   766  	_ = p.Schedule(TaskFunc(func() {
   767  		<-done
   768  	}))
   769  
   770  	// Prepare canceled context.
   771  	ctx, cancel := context.WithCancel(context.Background())
   772  	cancel()
   773  
   774  	// Make multiple Schedule* calls, all of which will fail.
   775  	_ = p.ScheduleImmediate(nil)
   776  	_ = p.ScheduleTimeout(time.Nanosecond, nil)
   777  	_ = p.ScheduleContext(ctx, nil)
   778  	_ = p.ScheduleCustom(ctx.Done(), nil)
   779  
   780  	close(done)
   781  	// Let pool become unlocked.
   782  	runtime.Gosched()
   783  
   784  	if n := atomic.LoadInt32(&tasks); n != 0 {
   785  		t.Fatalf("eventually got %d enqueued tasks; want 0", n)
   786  	}
   787  }
   788  
   789  func BenchmarkSchedule(b *testing.B) {
   790  	for _, test := range []struct {
   791  		config *Config
   792  	}{
   793  		{&Config{UnstoppableWorkers: 0, MaxWorkers: 1, WorkQueueSize: 0}},
   794  		{&Config{UnstoppableWorkers: 8, MaxWorkers: 8, WorkQueueSize: 0}},
   795  		{&Config{UnstoppableWorkers: 8, MaxWorkers: 8, WorkQueueSize: 8}},
   796  		{&Config{UnstoppableWorkers: 1, MaxWorkers: 8, WorkQueueSize: 8}},
   797  		{&Config{UnstoppableWorkers: 1, MaxWorkers: 0, WorkQueueSize: 8}},
   798  		{&Config{UnstoppableWorkers: 1, MaxWorkers: 0, WorkQueueSize: 0}},
   799  	} {
   800  
   801  		b.Run(test.config.String(), func(b *testing.B) {
   802  			var wg sync.WaitGroup
   803  			wg.Add(b.N)
   804  			task := TaskFunc(wg.Done)
   805  
   806  			pool := Must(New(test.config))
   807  
   808  			b.ResetTimer()
   809  			for i := 0; i < b.N; i++ {
   810  				_ = pool.schedule(task, nil, nil)
   811  			}
   812  
   813  			wg.Wait()
   814  		})
   815  	}
   816  }
   817  
   818  func (c *Config) String() string {
   819  	return fmt.Sprintf(
   820  		"unstpb:%d max:%d queue:%d",
   821  		c.UnstoppableWorkers,
   822  		c.MaxWorkers,
   823  		c.WorkQueueSize,
   824  	)
   825  }
   826  
   827  func wgCount(wg *sync.WaitGroup) int {
   828  	statep := wgState(wg)
   829  	v := atomic.LoadUint64(statep)
   830  	return int(v >> 32)
   831  }
   832  
   833  //go:linkname wgState sync.(*WaitGroup).state
   834  func wgState(*sync.WaitGroup) *uint64