code.gitea.io/gitea@v1.22.3/modules/queue/workerqueue_test.go (about)

     1  // Copyright 2023 The Gitea Authors. All rights reserved.
     2  // SPDX-License-Identifier: MIT
     3  
     4  package queue
     5  
     6  import (
     7  	"context"
     8  	"slices"
     9  	"strconv"
    10  	"sync"
    11  	"sync/atomic"
    12  	"testing"
    13  	"time"
    14  
    15  	"code.gitea.io/gitea/modules/setting"
    16  	"code.gitea.io/gitea/modules/test"
    17  
    18  	"github.com/stretchr/testify/assert"
    19  )
    20  
    21  func runWorkerPoolQueue[T any](q *WorkerPoolQueue[T]) func() {
    22  	go q.Run()
    23  	return func() {
    24  		q.ShutdownWait(1 * time.Second)
    25  	}
    26  }
    27  
    28  func TestWorkerPoolQueueUnhandled(t *testing.T) {
    29  	oldUnhandledItemRequeueDuration := unhandledItemRequeueDuration.Load()
    30  	unhandledItemRequeueDuration.Store(0)
    31  	defer unhandledItemRequeueDuration.Store(oldUnhandledItemRequeueDuration)
    32  
    33  	mu := sync.Mutex{}
    34  
    35  	test := func(t *testing.T, queueSetting setting.QueueSettings) {
    36  		queueSetting.Length = 100
    37  		queueSetting.Type = "channel"
    38  		queueSetting.Datadir = t.TempDir() + "/test-queue"
    39  		m := map[int]int{}
    40  
    41  		// odds are handled once, evens are handled twice
    42  		handler := func(items ...int) (unhandled []int) {
    43  			testRecorder.Record("handle:%v", items)
    44  			for _, item := range items {
    45  				mu.Lock()
    46  				if item%2 == 0 && m[item] == 0 {
    47  					unhandled = append(unhandled, item)
    48  				}
    49  				m[item]++
    50  				mu.Unlock()
    51  			}
    52  			return unhandled
    53  		}
    54  
    55  		q, _ := newWorkerPoolQueueForTest("test-workpoolqueue", queueSetting, handler, false)
    56  		stop := runWorkerPoolQueue(q)
    57  		for i := 0; i < queueSetting.Length; i++ {
    58  			testRecorder.Record("push:%v", i)
    59  			assert.NoError(t, q.Push(i))
    60  		}
    61  		assert.NoError(t, q.FlushWithContext(context.Background(), 0))
    62  		stop()
    63  
    64  		ok := true
    65  		for i := 0; i < queueSetting.Length; i++ {
    66  			if i%2 == 0 {
    67  				ok = ok && assert.EqualValues(t, 2, m[i], "test %s: item %d", t.Name(), i)
    68  			} else {
    69  				ok = ok && assert.EqualValues(t, 1, m[i], "test %s: item %d", t.Name(), i)
    70  			}
    71  		}
    72  		if !ok {
    73  			t.Logf("m: %v", m)
    74  			t.Logf("records: %v", testRecorder.Records())
    75  		}
    76  		testRecorder.Reset()
    77  	}
    78  
    79  	runCount := 2 // we can run these tests even hundreds times to see its stability
    80  	t.Run("1/1", func(t *testing.T) {
    81  		for i := 0; i < runCount; i++ {
    82  			test(t, setting.QueueSettings{BatchLength: 1, MaxWorkers: 1})
    83  		}
    84  	})
    85  	t.Run("3/1", func(t *testing.T) {
    86  		for i := 0; i < runCount; i++ {
    87  			test(t, setting.QueueSettings{BatchLength: 3, MaxWorkers: 1})
    88  		}
    89  	})
    90  	t.Run("4/5", func(t *testing.T) {
    91  		for i := 0; i < runCount; i++ {
    92  			test(t, setting.QueueSettings{BatchLength: 4, MaxWorkers: 5})
    93  		}
    94  	})
    95  }
    96  
    97  func TestWorkerPoolQueuePersistence(t *testing.T) {
    98  	runCount := 2 // we can run these tests even hundreds times to see its stability
    99  	t.Run("1/1", func(t *testing.T) {
   100  		for i := 0; i < runCount; i++ {
   101  			testWorkerPoolQueuePersistence(t, setting.QueueSettings{BatchLength: 1, MaxWorkers: 1, Length: 100})
   102  		}
   103  	})
   104  	t.Run("3/1", func(t *testing.T) {
   105  		for i := 0; i < runCount; i++ {
   106  			testWorkerPoolQueuePersistence(t, setting.QueueSettings{BatchLength: 3, MaxWorkers: 1, Length: 100})
   107  		}
   108  	})
   109  	t.Run("4/5", func(t *testing.T) {
   110  		for i := 0; i < runCount; i++ {
   111  			testWorkerPoolQueuePersistence(t, setting.QueueSettings{BatchLength: 4, MaxWorkers: 5, Length: 100})
   112  		}
   113  	})
   114  }
   115  
   116  func testWorkerPoolQueuePersistence(t *testing.T, queueSetting setting.QueueSettings) {
   117  	testCount := queueSetting.Length
   118  	queueSetting.Type = "level"
   119  	queueSetting.Datadir = t.TempDir() + "/test-queue"
   120  
   121  	mu := sync.Mutex{}
   122  
   123  	var tasksQ1, tasksQ2 []string
   124  	q1 := func() {
   125  		startWhenAllReady := make(chan struct{}) // only start data consuming when the "testCount" tasks are all pushed into queue
   126  		stopAt20Shutdown := make(chan struct{})  // stop and shutdown at the 20th item
   127  
   128  		testHandler := func(data ...string) []string {
   129  			<-startWhenAllReady
   130  			time.Sleep(10 * time.Millisecond)
   131  			for _, s := range data {
   132  				mu.Lock()
   133  				tasksQ1 = append(tasksQ1, s)
   134  				mu.Unlock()
   135  
   136  				if s == "task-20" {
   137  					close(stopAt20Shutdown)
   138  				}
   139  			}
   140  			return nil
   141  		}
   142  
   143  		q, _ := newWorkerPoolQueueForTest("pr_patch_checker_test", queueSetting, testHandler, true)
   144  		stop := runWorkerPoolQueue(q)
   145  		for i := 0; i < testCount; i++ {
   146  			_ = q.Push("task-" + strconv.Itoa(i))
   147  		}
   148  		close(startWhenAllReady)
   149  		<-stopAt20Shutdown // it's possible to have more than 20 tasks executed
   150  		stop()
   151  	}
   152  
   153  	q1() // run some tasks and shutdown at an intermediate point
   154  
   155  	time.Sleep(100 * time.Millisecond) // because the handler in q1 has a slight delay, we need to wait for it to finish
   156  
   157  	q2 := func() {
   158  		testHandler := func(data ...string) []string {
   159  			for _, s := range data {
   160  				mu.Lock()
   161  				tasksQ2 = append(tasksQ2, s)
   162  				mu.Unlock()
   163  			}
   164  			return nil
   165  		}
   166  
   167  		q, _ := newWorkerPoolQueueForTest("pr_patch_checker_test", queueSetting, testHandler, true)
   168  		stop := runWorkerPoolQueue(q)
   169  		assert.NoError(t, q.FlushWithContext(context.Background(), 0))
   170  		stop()
   171  	}
   172  
   173  	q2() // restart the queue to continue to execute the tasks in it
   174  
   175  	assert.NotZero(t, len(tasksQ1))
   176  	assert.NotZero(t, len(tasksQ2))
   177  	assert.EqualValues(t, testCount, len(tasksQ1)+len(tasksQ2))
   178  }
   179  
   180  func TestWorkerPoolQueueActiveWorkers(t *testing.T) {
   181  	defer test.MockVariableValue(&workerIdleDuration, 300*time.Millisecond)()
   182  
   183  	handler := func(items ...int) (unhandled []int) {
   184  		time.Sleep(100 * time.Millisecond)
   185  		return nil
   186  	}
   187  
   188  	q, _ := newWorkerPoolQueueForTest("test-workpoolqueue", setting.QueueSettings{Type: "channel", BatchLength: 1, MaxWorkers: 1, Length: 100}, handler, false)
   189  	stop := runWorkerPoolQueue(q)
   190  	for i := 0; i < 5; i++ {
   191  		assert.NoError(t, q.Push(i))
   192  	}
   193  
   194  	time.Sleep(50 * time.Millisecond)
   195  	assert.EqualValues(t, 1, q.GetWorkerNumber())
   196  	assert.EqualValues(t, 1, q.GetWorkerActiveNumber())
   197  	time.Sleep(500 * time.Millisecond)
   198  	assert.EqualValues(t, 1, q.GetWorkerNumber())
   199  	assert.EqualValues(t, 0, q.GetWorkerActiveNumber())
   200  	time.Sleep(workerIdleDuration)
   201  	assert.EqualValues(t, 1, q.GetWorkerNumber()) // there is at least one worker after the queue begins working
   202  	stop()
   203  
   204  	q, _ = newWorkerPoolQueueForTest("test-workpoolqueue", setting.QueueSettings{Type: "channel", BatchLength: 1, MaxWorkers: 3, Length: 100}, handler, false)
   205  	stop = runWorkerPoolQueue(q)
   206  	for i := 0; i < 15; i++ {
   207  		assert.NoError(t, q.Push(i))
   208  	}
   209  
   210  	time.Sleep(50 * time.Millisecond)
   211  	assert.EqualValues(t, 3, q.GetWorkerNumber())
   212  	assert.EqualValues(t, 3, q.GetWorkerActiveNumber())
   213  	time.Sleep(500 * time.Millisecond)
   214  	assert.EqualValues(t, 3, q.GetWorkerNumber())
   215  	assert.EqualValues(t, 0, q.GetWorkerActiveNumber())
   216  	time.Sleep(workerIdleDuration)
   217  	assert.EqualValues(t, 1, q.GetWorkerNumber()) // there is at least one worker after the queue begins working
   218  	stop()
   219  }
   220  
   221  func TestWorkerPoolQueueShutdown(t *testing.T) {
   222  	oldUnhandledItemRequeueDuration := unhandledItemRequeueDuration.Load()
   223  	unhandledItemRequeueDuration.Store(int64(100 * time.Millisecond))
   224  	defer unhandledItemRequeueDuration.Store(oldUnhandledItemRequeueDuration)
   225  
   226  	// simulate a slow handler, it doesn't handle any item (all items will be pushed back to the queue)
   227  	handlerCalled := make(chan struct{})
   228  	handler := func(items ...int) (unhandled []int) {
   229  		if items[0] == 0 {
   230  			close(handlerCalled)
   231  		}
   232  		time.Sleep(400 * time.Millisecond)
   233  		return items
   234  	}
   235  
   236  	qs := setting.QueueSettings{Type: "level", Datadir: t.TempDir() + "/queue", BatchLength: 3, MaxWorkers: 4, Length: 20}
   237  	q, _ := newWorkerPoolQueueForTest("test-workpoolqueue", qs, handler, false)
   238  	stop := runWorkerPoolQueue(q)
   239  	for i := 0; i < qs.Length; i++ {
   240  		assert.NoError(t, q.Push(i))
   241  	}
   242  	<-handlerCalled
   243  	time.Sleep(200 * time.Millisecond) // wait for a while to make sure all workers are active
   244  	assert.EqualValues(t, 4, q.GetWorkerActiveNumber())
   245  	stop() // stop triggers shutdown
   246  	assert.EqualValues(t, 0, q.GetWorkerActiveNumber())
   247  
   248  	// no item was ever handled, so we still get all of them again
   249  	q, _ = newWorkerPoolQueueForTest("test-workpoolqueue", qs, handler, false)
   250  	assert.EqualValues(t, 20, q.GetQueueItemNumber())
   251  }
   252  
   253  func TestWorkerPoolQueueWorkerIdleReset(t *testing.T) {
   254  	defer test.MockVariableValue(&workerIdleDuration, 10*time.Millisecond)()
   255  	defer mockBackoffDuration(5 * time.Millisecond)()
   256  
   257  	var q *WorkerPoolQueue[int]
   258  	var handledCount atomic.Int32
   259  	var hasOnlyOneWorkerRunning atomic.Bool
   260  	handler := func(items ...int) (unhandled []int) {
   261  		handledCount.Add(int32(len(items)))
   262  		// make each work have different duration, and check the active worker number periodically
   263  		var activeNums []int
   264  		for i := 0; i < 5-items[0]%2; i++ {
   265  			time.Sleep(workerIdleDuration * 2)
   266  			activeNums = append(activeNums, q.GetWorkerActiveNumber())
   267  		}
   268  		// When the queue never becomes empty, the existing workers should keep working
   269  		// It is not 100% true at the moment because the data-race in workergroup.go is not resolved, see that TODO */
   270  		// If the "active worker numbers" is like [2 2 ... 1 1], it means that an existing worker exited and the no new worker is started.
   271  		if slices.Equal([]int{1, 1}, activeNums[len(activeNums)-2:]) {
   272  			hasOnlyOneWorkerRunning.Store(true)
   273  		}
   274  		return nil
   275  	}
   276  	q, _ = newWorkerPoolQueueForTest("test-workpoolqueue", setting.QueueSettings{Type: "channel", BatchLength: 1, MaxWorkers: 2, Length: 100}, handler, false)
   277  	stop := runWorkerPoolQueue(q)
   278  	for i := 0; i < 100; i++ {
   279  		assert.NoError(t, q.Push(i))
   280  	}
   281  	time.Sleep(500 * time.Millisecond)
   282  	assert.Greater(t, int(handledCount.Load()), 4) // make sure there are enough items handled during the test
   283  	assert.False(t, hasOnlyOneWorkerRunning.Load(), "a slow handler should not block other workers from starting")
   284  	stop()
   285  }