github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/module/jobqueue/consumer_test.go (about) 1 package jobqueue 2 3 import ( 4 "fmt" 5 "strconv" 6 "sync" 7 "testing" 8 "time" 9 10 badgerdb "github.com/dgraph-io/badger/v2" 11 "github.com/rs/zerolog" 12 "github.com/stretchr/testify/assert" 13 "github.com/stretchr/testify/require" 14 15 "github.com/onflow/flow-go/module" 16 "github.com/onflow/flow-go/storage" 17 "github.com/onflow/flow-go/storage/badger" 18 "github.com/onflow/flow-go/utils/unittest" 19 ) 20 21 func TestProcessableJobs(t *testing.T) { 22 t.Parallel() 23 24 processedIndex := uint64(2) 25 maxProcessing := uint64(3) 26 maxSearchAhead := uint64(5) 27 28 populate := func(start, end uint64, incomplete []uint64) map[uint64]*jobStatus { 29 processings := map[uint64]*jobStatus{} 30 for i := start; i <= end; i++ { 31 processings[i] = &jobStatus{jobID: JobIDAtIndex(i), done: true} 32 } 33 for _, i := range incomplete { 34 processings[i].done = false 35 } 36 37 return processings 38 } 39 40 t.Run("no job, nothing to process", func(t *testing.T) { 41 jobs := NewMockJobs() // no job in the queue 42 processings := map[uint64]*jobStatus{} 43 processedIndex := uint64(0) 44 45 jobsToRun, processedTo, err := processableJobs(jobs, processings, maxProcessing, 0, processedIndex) 46 47 require.NoError(t, err) 48 require.Equal(t, uint64(0), processedTo) 49 assertJobs(t, []uint64{}, jobsToRun) 50 }) 51 52 t.Run("max processing was not reached", func(t *testing.T) { 53 jobs := NewMockJobs() 54 require.NoError(t, jobs.PushN(20)) // enough jobs in the queue 55 56 // job 3 are 5 are not done, 2 processing in total 57 // 4, 6, 7, 8, 9, 10, 11 are finished, 7 finished in total 58 processings := populate(3, 11, []uint64{3, 5}) 59 60 jobsToRun, processedTo, err := processableJobs(jobs, processings, maxProcessing, 0, processedIndex) 61 62 require.NoError(t, err) 63 require.Equal(t, uint64(2), processedTo) 64 // it will process on more job, and reach the max processing. 65 assertJobs(t, []uint64{ 66 12, 67 }, jobsToRun) 68 }) 69 70 t.Run("reached max processing", func(t *testing.T) { 71 jobs := NewMockJobs() 72 require.NoError(t, jobs.PushN(20)) // enough jobs in the queue 73 74 // job 3, 5, 6 are not done, which have reached max processing(3) 75 // 4, 7, 8, 9, 10, 11, 12 are finished, 7 finished in total 76 processings := populate(3, 12, []uint64{3, 5, 6}) 77 78 jobsToRun, processedTo, err := processableJobs(jobs, processings, maxProcessing, 0, processedIndex) 79 80 require.NoError(t, err) 81 require.Equal(t, uint64(2), processedTo) 82 // it will not process any job, because the max processing is reached. 83 assertJobs(t, []uint64{}, jobsToRun) 84 }) 85 86 t.Run("processing pauses and resumes", func(t *testing.T) { 87 jobs := NewMockJobs() 88 require.NoError(t, jobs.PushN(20)) // enough jobs in the queue 89 90 maxProcessing := uint64(4) 91 92 // job 3, 5 are not done 93 // 4, 6, 7 are finished, 3 finished in total 94 processings := populate(3, processedIndex+maxSearchAhead, []uint64{3, 5}) 95 96 // it will not process any job, because the consumer is paused 97 jobsToRun, processedTo, err := processableJobs(jobs, processings, maxProcessing, maxSearchAhead, processedIndex) 98 99 require.NoError(t, err) 100 require.Equal(t, processedIndex, processedTo) 101 assertJobs(t, []uint64{}, jobsToRun) 102 103 // lowest job is processed, which should cause consumer to resume 104 processings[uint64(3)].done = true 105 106 // Job 3 is done, so it should return 2 more jobs 8-9 and pause again with one available worker 107 jobsToRun, processedTo, err = processableJobs(jobs, processings, maxProcessing, maxSearchAhead, processedIndex) 108 109 require.NoError(t, err) 110 require.Equal(t, uint64(4), processedTo) 111 assertJobs(t, []uint64{8, 9}, jobsToRun) 112 113 // lowest job is processed, which should cause consumer to resume 114 processings[uint64(5)].done = true 115 116 // job 5 is processed, it should return jobs 8-11 (one job for each worker) 117 jobsToRun, processedTo, err = processableJobs(jobs, processings, maxProcessing, maxSearchAhead, processedIndex) 118 119 require.NoError(t, err) 120 require.Equal(t, uint64(7), processedTo) 121 assertJobs(t, []uint64{8, 9, 10, 11}, jobsToRun) 122 }) 123 124 t.Run("no more job", func(t *testing.T) { 125 jobs := NewMockJobs() 126 require.NoError(t, jobs.PushN(11)) // 11 jobs, no more job to process 127 128 // job 3, 11 are not done, which have not reached max processing (3) 129 // 4, 5, 6, 7, 8, 9, 10 are finished, 7 finished in total 130 processings := populate(3, 11, []uint64{3, 11}) 131 132 jobsToRun, processedTo, err := processableJobs(jobs, processings, maxProcessing, 0, processedIndex) 133 134 require.NoError(t, err) 135 require.Equal(t, uint64(2), processedTo) 136 assertJobs(t, []uint64{}, jobsToRun) 137 }) 138 139 t.Run("next jobs were done", func(t *testing.T) { 140 jobs := NewMockJobs() 141 require.NoError(t, jobs.PushN(20)) // enough jobs in the queue 142 143 // job 3, 5 are done 144 // job 4, 6 are not done, which have not reached max processing 145 processings := populate(3, 6, []uint64{4, 6}) 146 147 jobsToRun, processedTo, err := processableJobs(jobs, processings, maxProcessing, 0, processedIndex) 148 149 require.NoError(t, err) 150 require.Equal(t, uint64(3), processedTo) 151 assertJobs(t, []uint64{ 152 7, 153 }, jobsToRun) 154 }) 155 156 } 157 158 // Test after jobs have been processed, the job status are removed to prevent from memory-leak 159 func TestProcessedIndexDeletion(t *testing.T) { 160 setup := func(t *testing.T, f func(c *Consumer, jobs *MockJobs)) { 161 unittest.RunWithBadgerDB(t, func(db *badgerdb.DB) { 162 log := unittest.Logger().With().Str("module", "consumer").Logger() 163 jobs := NewMockJobs() 164 progress := badger.NewConsumerProgress(db, "consumer") 165 worker := newMockWorker() 166 maxProcessing := uint64(3) 167 c, err := NewConsumer(log, jobs, progress, worker, maxProcessing, 0, 0) 168 require.NoError(t, err) 169 worker.WithConsumer(c) 170 171 f(c, jobs) 172 }) 173 } 174 175 setup(t, func(c *Consumer, jobs *MockJobs) { 176 require.NoError(t, jobs.PushN(10)) 177 require.NoError(t, c.Start()) 178 179 require.Eventually(t, func() bool { 180 c.mu.Lock() 181 defer c.mu.Unlock() 182 return c.processedIndex == uint64(10) 183 }, 2*time.Second, 10*time.Millisecond) 184 185 // should have no processing after all jobs are processed 186 c.mu.Lock() 187 defer c.mu.Unlock() 188 require.Len(t, c.processings, 0) 189 require.Len(t, c.processingsIndex, 0) 190 }) 191 } 192 193 func TestCheckBeforeStartIsNoop(t *testing.T) { 194 t.Parallel() 195 196 unittest.RunWithBadgerDB(t, func(db *badgerdb.DB) { 197 storedProcessedIndex := uint64(100) 198 199 worker := newMockWorker() 200 progress := badger.NewConsumerProgress(db, "consumer") 201 err := progress.InitProcessedIndex(storedProcessedIndex) 202 require.NoError(t, err) 203 204 c, err := NewConsumer( 205 unittest.Logger(), 206 NewMockJobs(), 207 progress, 208 worker, 209 uint64(3), 210 0, 211 10, 212 ) 213 require.NoError(t, err) 214 worker.WithConsumer(c) 215 216 // check will store the processedIndex. Before start, it will be uninitialized (0) 217 c.Check() 218 219 // start will load the processedIndex from storage 220 err = c.Start() 221 require.NoError(t, err) 222 223 // make sure that the processedIndex at the end is from storage 224 assert.Equal(t, storedProcessedIndex, c.LastProcessedIndex()) 225 }) 226 } 227 228 func assertJobs(t *testing.T, expectedIndex []uint64, jobsToRun []*jobAtIndex) { 229 actualIndex := make([]uint64, 0, len(jobsToRun)) 230 for _, jobAtIndex := range jobsToRun { 231 require.NotNil(t, jobAtIndex.job) 232 actualIndex = append(actualIndex, jobAtIndex.index) 233 } 234 require.Equal(t, expectedIndex, actualIndex) 235 } 236 237 // MockJobs implements the Jobs int64erface, and is used as the dependency for 238 // the Consumer for testing purpose 239 type MockJobs struct { 240 sync.Mutex 241 log zerolog.Logger 242 last int 243 jobs map[int]module.Job 244 index map[module.JobID]int 245 JobMaker *JobMaker 246 } 247 248 func NewMockJobs() *MockJobs { 249 return &MockJobs{ 250 log: unittest.Logger().With().Str("module", "jobs").Logger(), 251 last: 0, // must be from 1 252 jobs: make(map[int]module.Job), 253 index: make(map[module.JobID]int), 254 JobMaker: NewJobMaker(), 255 } 256 } 257 258 func (j *MockJobs) AtIndex(index uint64) (module.Job, error) { 259 j.Lock() 260 defer j.Unlock() 261 262 job, ok := j.jobs[int(index)] 263 264 j.log.Debug().Int("index", int(index)).Bool("exists", ok).Msg("reading job at index") 265 266 if !ok { 267 return nil, storage.ErrNotFound 268 } 269 270 return job, nil 271 } 272 273 func (j *MockJobs) Head() (uint64, error) { 274 return uint64(j.last), nil 275 } 276 277 func (j *MockJobs) Add(job module.Job) error { 278 j.Lock() 279 defer j.Unlock() 280 281 j.log.Debug().Str("job_id", string(job.ID())).Msg("adding job") 282 283 id := job.ID() 284 _, ok := j.index[id] 285 if ok { 286 return storage.ErrAlreadyExists 287 } 288 289 index := j.last + 1 290 j.index[id] = int(index) 291 j.jobs[index] = job 292 j.last++ 293 294 j.log. 295 Debug().Str("job_id", string(job.ID())). 296 Int("index", index). 297 Msg("job added at index") 298 299 return nil 300 } 301 302 func (j *MockJobs) PushOne() error { 303 job := j.JobMaker.Next() 304 return j.Add(job) 305 } 306 307 func (j *MockJobs) PushN(n int64) error { 308 for i := 0; i < int(n); i++ { 309 err := j.PushOne() 310 if err != nil { 311 return err 312 } 313 } 314 return nil 315 } 316 317 // deterministically compute the JobID from index 318 func JobIDAtIndex(index uint64) module.JobID { 319 return module.JobID(fmt.Sprintf("%v", index)) 320 } 321 322 func JobIDToIndex(id module.JobID) (uint64, error) { 323 return strconv.ParseUint(string(id), 10, 64) 324 } 325 326 // JobMaker is a test helper. 327 // it creates new job with unique job id 328 type JobMaker struct { 329 sync.Mutex 330 index uint64 331 } 332 333 func NewJobMaker() *JobMaker { 334 return &JobMaker{ 335 index: 1, 336 } 337 } 338 339 type TestJob struct { 340 index uint64 341 } 342 343 func (tj TestJob) ID() module.JobID { 344 return JobIDAtIndex(tj.index) 345 } 346 347 // return next unique job 348 func (j *JobMaker) Next() module.Job { 349 j.Lock() 350 defer j.Unlock() 351 352 job := &TestJob{ 353 index: j.index, 354 } 355 j.index++ 356 return job 357 } 358 359 type mockWorker struct { 360 consumer *Consumer 361 } 362 363 func newMockWorker() *mockWorker { 364 return &mockWorker{} 365 } 366 367 func (w *mockWorker) WithConsumer(c *Consumer) { 368 w.consumer = c 369 } 370 371 func (w *mockWorker) Run(job module.Job) error { 372 w.consumer.NotifyJobIsDone(job.ID()) 373 return nil 374 }