github.com/decred/dcrlnd@v0.7.6/pool/worker_test.go (about)

     1  package pool_test
     2  
     3  import (
     4  	"bytes"
     5  	crand "crypto/rand"
     6  	"fmt"
     7  	"io"
     8  	"math/rand"
     9  	"testing"
    10  	"time"
    11  
    12  	"github.com/decred/dcrlnd/buffer"
    13  	"github.com/decred/dcrlnd/pool"
    14  )
    15  
    16  type workerPoolTest struct {
    17  	name       string
    18  	newPool    func() interface{}
    19  	numWorkers int
    20  }
    21  
    22  // TestConcreteWorkerPools asserts the behavior of any concrete implementations
    23  // of worker pools provided by the pool package. Currently this tests the
    24  // pool.Read and pool.Write instances.
    25  func TestConcreteWorkerPools(t *testing.T) {
    26  	const (
    27  		gcInterval     = time.Second
    28  		expiryInterval = 250 * time.Millisecond
    29  		numWorkers     = 5
    30  		workerTimeout  = 500 * time.Millisecond
    31  	)
    32  
    33  	tests := []workerPoolTest{
    34  		{
    35  			name: "write pool",
    36  			newPool: func() interface{} {
    37  				bp := pool.NewWriteBuffer(
    38  					gcInterval, expiryInterval,
    39  				)
    40  
    41  				return pool.NewWrite(
    42  					bp, numWorkers, workerTimeout,
    43  				)
    44  			},
    45  			numWorkers: numWorkers,
    46  		},
    47  		{
    48  			name: "read pool",
    49  			newPool: func() interface{} {
    50  				bp := pool.NewReadBuffer(
    51  					gcInterval, expiryInterval,
    52  				)
    53  
    54  				return pool.NewRead(
    55  					bp, numWorkers, workerTimeout,
    56  				)
    57  			},
    58  			numWorkers: numWorkers,
    59  		},
    60  	}
    61  
    62  	for _, test := range tests {
    63  		testWorkerPool(t, test)
    64  	}
    65  }
    66  
    67  func testWorkerPool(t *testing.T, test workerPoolTest) {
    68  	t.Run(test.name+" non blocking", func(t *testing.T) {
    69  		t.Parallel()
    70  
    71  		p := test.newPool()
    72  		startGeneric(t, p)
    73  		defer stopGeneric(t, p)
    74  
    75  		submitNonblockingGeneric(t, p, test.numWorkers)
    76  	})
    77  
    78  	t.Run(test.name+" blocking", func(t *testing.T) {
    79  		t.Parallel()
    80  
    81  		p := test.newPool()
    82  		startGeneric(t, p)
    83  		defer stopGeneric(t, p)
    84  
    85  		submitBlockingGeneric(t, p, test.numWorkers)
    86  	})
    87  
    88  	t.Run(test.name+" partial blocking", func(t *testing.T) {
    89  		t.Parallel()
    90  
    91  		p := test.newPool()
    92  		startGeneric(t, p)
    93  		defer stopGeneric(t, p)
    94  
    95  		submitPartialBlockingGeneric(t, p, test.numWorkers)
    96  	})
    97  }
    98  
    99  // submitNonblockingGeneric asserts that queueing tasks to the worker pool and
   100  // allowing them all to unblock simultaneously results in all of the tasks being
   101  // completed in a timely manner.
   102  func submitNonblockingGeneric(t *testing.T, p interface{}, nWorkers int) {
   103  	// We'll submit 2*nWorkers tasks that will all be unblocked
   104  	// simultaneously.
   105  	nUnblocked := 2 * nWorkers
   106  
   107  	// First we'll queue all of the tasks for the pool.
   108  	errChan := make(chan error)
   109  	semChan := make(chan struct{})
   110  	for i := 0; i < nUnblocked; i++ {
   111  		go func() { errChan <- submitGeneric(p, semChan) }()
   112  	}
   113  
   114  	// Since we haven't signaled the semaphore, none of the them should
   115  	// complete.
   116  	pullNothing(t, errChan)
   117  
   118  	// Now, unblock them all simultaneously. All of the tasks should then be
   119  	// processed in parallel. Afterward, no more errors should come through.
   120  	close(semChan)
   121  	pullParllel(t, nUnblocked, errChan)
   122  	pullNothing(t, errChan)
   123  }
   124  
   125  // submitBlockingGeneric asserts that submitting blocking tasks to the pool and
   126  // unblocking each sequentially results in a single task being processed at a
   127  // time.
   128  func submitBlockingGeneric(t *testing.T, p interface{}, nWorkers int) {
   129  	// We'll submit 2*nWorkers tasks that will be unblocked sequentially.
   130  	nBlocked := 2 * nWorkers
   131  
   132  	// First, queue all of the blocking tasks for the pool.
   133  	errChan := make(chan error)
   134  	semChan := make(chan struct{})
   135  	for i := 0; i < nBlocked; i++ {
   136  		go func() { errChan <- submitGeneric(p, semChan) }()
   137  	}
   138  
   139  	// Since we haven't signaled the semaphore, none of them should
   140  	// complete.
   141  	pullNothing(t, errChan)
   142  
   143  	// Now, pull each blocking task sequentially from the pool. Afterwards,
   144  	// no more errors should come through.
   145  	pullSequntial(t, nBlocked, errChan, semChan)
   146  	pullNothing(t, errChan)
   147  
   148  }
   149  
   150  // submitPartialBlockingGeneric tests that so long as one worker is not blocked,
   151  // any other non-blocking submitted tasks can still be processed.
   152  func submitPartialBlockingGeneric(t *testing.T, p interface{}, nWorkers int) {
   153  	// We'll submit nWorkers-1 tasks that will be initially blocked, the
   154  	// remainder will all be unblocked simultaneously. After the unblocked
   155  	// tasks have finished, we will sequentially unblock the nWorkers-1
   156  	// tasks that were first submitted.
   157  	nBlocked := nWorkers - 1
   158  	nUnblocked := 2*nWorkers - nBlocked
   159  
   160  	// First, submit all of the blocking tasks to the pool.
   161  	errChan := make(chan error)
   162  	semChan := make(chan struct{})
   163  	for i := 0; i < nBlocked; i++ {
   164  		go func() { errChan <- submitGeneric(p, semChan) }()
   165  	}
   166  
   167  	// Since these are all blocked, no errors should be returned yet.
   168  	pullNothing(t, errChan)
   169  
   170  	// Now, add all of the non-blocking task to the pool.
   171  	semChanNB := make(chan struct{})
   172  	for i := 0; i < nUnblocked; i++ {
   173  		go func() { errChan <- submitGeneric(p, semChanNB) }()
   174  	}
   175  
   176  	// Since we haven't unblocked the second batch, we again expect no tasks
   177  	// to finish.
   178  	pullNothing(t, errChan)
   179  
   180  	// Now, unblock the unblocked task and pull all of them. After they have
   181  	// been pulled, we should see no more tasks.
   182  	close(semChanNB)
   183  	pullParllel(t, nUnblocked, errChan)
   184  	pullNothing(t, errChan)
   185  
   186  	// Finally, unblock each the blocked tasks we added initially, and
   187  	// assert that no further errors come through.
   188  	pullSequntial(t, nBlocked, errChan, semChan)
   189  	pullNothing(t, errChan)
   190  }
   191  
   192  func pullNothing(t *testing.T, errChan chan error) {
   193  	t.Helper()
   194  
   195  	select {
   196  	case err := <-errChan:
   197  		t.Fatalf("received unexpected error before semaphore "+
   198  			"release: %v", err)
   199  
   200  	case <-time.After(time.Second):
   201  	}
   202  }
   203  
   204  func pullParllel(t *testing.T, n int, errChan chan error) {
   205  	t.Helper()
   206  
   207  	for i := 0; i < n; i++ {
   208  		select {
   209  		case err := <-errChan:
   210  			if err != nil {
   211  				t.Fatal(err)
   212  			}
   213  
   214  		case <-time.After(time.Second):
   215  			t.Fatalf("task %d was not processed in time", i)
   216  		}
   217  	}
   218  }
   219  
   220  func pullSequntial(t *testing.T, n int, errChan chan error, semChan chan struct{}) {
   221  	t.Helper()
   222  
   223  	for i := 0; i < n; i++ {
   224  		// Signal for another task to unblock.
   225  		select {
   226  		case semChan <- struct{}{}:
   227  		case <-time.After(time.Second):
   228  			t.Fatalf("task %d was not unblocked", i)
   229  		}
   230  
   231  		// Wait for the error to arrive, we expect it to be non-nil.
   232  		select {
   233  		case err := <-errChan:
   234  			if err != nil {
   235  				t.Fatal(err)
   236  			}
   237  
   238  		case <-time.After(time.Second):
   239  			t.Fatalf("task %d was not processed in time", i)
   240  		}
   241  	}
   242  }
   243  
   244  func startGeneric(t *testing.T, p interface{}) {
   245  	t.Helper()
   246  
   247  	var err error
   248  	switch pp := p.(type) {
   249  	case *pool.Write:
   250  		err = pp.Start()
   251  
   252  	case *pool.Read:
   253  		err = pp.Start()
   254  
   255  	default:
   256  		t.Fatalf("unknown worker pool type: %T", p)
   257  	}
   258  
   259  	if err != nil {
   260  		t.Fatalf("unable to start worker pool: %v", err)
   261  	}
   262  }
   263  
   264  func stopGeneric(t *testing.T, p interface{}) {
   265  	t.Helper()
   266  
   267  	var err error
   268  	switch pp := p.(type) {
   269  	case *pool.Write:
   270  		err = pp.Stop()
   271  
   272  	case *pool.Read:
   273  		err = pp.Stop()
   274  
   275  	default:
   276  		t.Fatalf("unknown worker pool type: %T", p)
   277  	}
   278  
   279  	if err != nil {
   280  		t.Fatalf("unable to stop worker pool: %v", err)
   281  	}
   282  }
   283  
   284  func submitGeneric(p interface{}, sem <-chan struct{}) error {
   285  	var err error
   286  	switch pp := p.(type) {
   287  	case *pool.Write:
   288  		err = pp.Submit(func(buf *bytes.Buffer) error {
   289  			// Verify that the provided buffer has been reset to be
   290  			// zero length.
   291  			if buf.Len() != 0 {
   292  				return fmt.Errorf("buf should be length zero, "+
   293  					"instead has length %d", buf.Len())
   294  			}
   295  
   296  			// Verify that the capacity of the buffer has the
   297  			// correct underlying size of a buffer.WriteSize.
   298  			if buf.Cap() != buffer.WriteSize {
   299  				return fmt.Errorf("buf should have capacity "+
   300  					"%d, instead has capacity %d",
   301  					buffer.WriteSize, buf.Cap())
   302  			}
   303  
   304  			// Sample some random bytes that we'll use to dirty the
   305  			// buffer.
   306  			b := make([]byte, rand.Intn(buf.Cap()))
   307  			_, err := io.ReadFull(crand.Reader, b)
   308  			if err != nil {
   309  				return err
   310  			}
   311  
   312  			// Write the random bytes the buffer.
   313  			_, err = buf.Write(b)
   314  
   315  			// Wait until this task is signaled to exit.
   316  			<-sem
   317  
   318  			return err
   319  		})
   320  
   321  	case *pool.Read:
   322  		err = pp.Submit(func(buf *buffer.Read) error {
   323  			// Assert that all of the bytes in the provided array
   324  			// are zero, indicating that the buffer was reset
   325  			// between uses.
   326  			for i := range buf[:] {
   327  				if buf[i] != 0x00 {
   328  					return fmt.Errorf("byte %d of "+
   329  						"buffer.Read should be "+
   330  						"0, instead is %d", i, buf[i])
   331  				}
   332  			}
   333  
   334  			// Sample some random bytes to read into the buffer.
   335  			_, err := io.ReadFull(crand.Reader, buf[:])
   336  
   337  			// Wait until this task is signaled to exit.
   338  			<-sem
   339  
   340  			return err
   341  
   342  		})
   343  
   344  	default:
   345  		return fmt.Errorf("unknown worker pool type: %T", p)
   346  	}
   347  
   348  	if err != nil {
   349  		return fmt.Errorf("unable to submit task: %v", err)
   350  	}
   351  
   352  	return nil
   353  }