github.com/gitbundle/modules@v0.0.0-20231025071548-85b91c5c3b01/queue/queue_bytefifo.go (about) 1 // Copyright 2023 The GitBundle Inc. All rights reserved. 2 // Copyright 2017 The Gitea Authors. All rights reserved. 3 // Use of this source code is governed by a MIT-style 4 // license that can be found in the LICENSE file. 5 6 package queue 7 8 import ( 9 "context" 10 "fmt" 11 "runtime/pprof" 12 "sync" 13 "sync/atomic" 14 "time" 15 16 "github.com/gitbundle/modules/json" 17 "github.com/gitbundle/modules/log" 18 "github.com/gitbundle/modules/util" 19 ) 20 21 // ByteFIFOQueueConfiguration is the configuration for a ByteFIFOQueue 22 type ByteFIFOQueueConfiguration struct { 23 WorkerPoolConfiguration 24 Workers int 25 WaitOnEmpty bool 26 } 27 28 var _ Queue = &ByteFIFOQueue{} 29 30 // ByteFIFOQueue is a Queue formed from a ByteFIFO and WorkerPool 31 type ByteFIFOQueue struct { 32 *WorkerPool 33 byteFIFO ByteFIFO 34 typ Type 35 shutdownCtx context.Context 36 shutdownCtxCancel context.CancelFunc 37 terminateCtx context.Context 38 terminateCtxCancel context.CancelFunc 39 exemplar interface{} 40 workers int 41 name string 42 lock sync.Mutex 43 waitOnEmpty bool 44 pushed chan struct{} 45 } 46 47 // NewByteFIFOQueue creates a new ByteFIFOQueue 48 func NewByteFIFOQueue(typ Type, byteFIFO ByteFIFO, handle HandlerFunc, cfg, exemplar interface{}) (*ByteFIFOQueue, error) { 49 configInterface, err := toConfig(ByteFIFOQueueConfiguration{}, cfg) 50 if err != nil { 51 return nil, err 52 } 53 config := configInterface.(ByteFIFOQueueConfiguration) 54 55 terminateCtx, terminateCtxCancel := context.WithCancel(context.Background()) 56 shutdownCtx, shutdownCtxCancel := context.WithCancel(terminateCtx) 57 58 q := &ByteFIFOQueue{ 59 byteFIFO: byteFIFO, 60 typ: typ, 61 shutdownCtx: shutdownCtx, 62 shutdownCtxCancel: shutdownCtxCancel, 63 terminateCtx: terminateCtx, 64 terminateCtxCancel: terminateCtxCancel, 65 exemplar: exemplar, 66 workers: config.Workers, 67 name: config.Name, 68 waitOnEmpty: config.WaitOnEmpty, 69 pushed: make(chan struct{}, 1), 70 } 71 q.WorkerPool = NewWorkerPool(func(data ...Data) (failed []Data) { 72 for _, unhandled := range handle(data...) { 73 if fail := q.PushBack(unhandled); fail != nil { 74 failed = append(failed, fail) 75 } 76 } 77 return 78 }, config.WorkerPoolConfiguration) 79 80 return q, nil 81 } 82 83 // Name returns the name of this queue 84 func (q *ByteFIFOQueue) Name() string { 85 return q.name 86 } 87 88 // Push pushes data to the fifo 89 func (q *ByteFIFOQueue) Push(data Data) error { 90 return q.PushFunc(data, nil) 91 } 92 93 // PushBack pushes data to the fifo 94 func (q *ByteFIFOQueue) PushBack(data Data) error { 95 if !assignableTo(data, q.exemplar) { 96 return fmt.Errorf("unable to assign data: %v to same type as exemplar: %v in %s", data, q.exemplar, q.name) 97 } 98 bs, err := json.Marshal(data) 99 if err != nil { 100 return err 101 } 102 defer func() { 103 select { 104 case q.pushed <- struct{}{}: 105 default: 106 } 107 }() 108 return q.byteFIFO.PushBack(q.terminateCtx, bs) 109 } 110 111 // PushFunc pushes data to the fifo 112 func (q *ByteFIFOQueue) PushFunc(data Data, fn func() error) error { 113 if !assignableTo(data, q.exemplar) { 114 return fmt.Errorf("unable to assign data: %v to same type as exemplar: %v in %s", data, q.exemplar, q.name) 115 } 116 bs, err := json.Marshal(data) 117 if err != nil { 118 return err 119 } 120 defer func() { 121 select { 122 case q.pushed <- struct{}{}: 123 default: 124 } 125 }() 126 return q.byteFIFO.PushFunc(q.terminateCtx, bs, fn) 127 } 128 129 // IsEmpty checks if the queue is empty 130 func (q *ByteFIFOQueue) IsEmpty() bool { 131 q.lock.Lock() 132 defer q.lock.Unlock() 133 if !q.WorkerPool.IsEmpty() { 134 return false 135 } 136 return q.byteFIFO.Len(q.terminateCtx) == 0 137 } 138 139 // NumberInQueue returns the number in the queue 140 func (q *ByteFIFOQueue) NumberInQueue() int64 { 141 q.lock.Lock() 142 defer q.lock.Unlock() 143 return q.byteFIFO.Len(q.terminateCtx) + q.WorkerPool.NumberInQueue() 144 } 145 146 // Flush flushes the ByteFIFOQueue 147 func (q *ByteFIFOQueue) Flush(timeout time.Duration) error { 148 select { 149 case q.pushed <- struct{}{}: 150 default: 151 } 152 return q.WorkerPool.Flush(timeout) 153 } 154 155 // Run runs the bytefifo queue 156 func (q *ByteFIFOQueue) Run(atShutdown, atTerminate func(func())) { 157 pprof.SetGoroutineLabels(q.baseCtx) 158 atShutdown(q.Shutdown) 159 atTerminate(q.Terminate) 160 log.Debug("%s: %s Starting", q.typ, q.name) 161 162 _ = q.AddWorkers(q.workers, 0) 163 164 log.Trace("%s: %s Now running", q.typ, q.name) 165 q.readToChan() 166 167 <-q.shutdownCtx.Done() 168 log.Trace("%s: %s Waiting til done", q.typ, q.name) 169 q.Wait() 170 171 log.Trace("%s: %s Waiting til cleaned", q.typ, q.name) 172 q.CleanUp(q.terminateCtx) 173 q.terminateCtxCancel() 174 } 175 176 const maxBackOffTime = time.Second * 3 177 178 func (q *ByteFIFOQueue) readToChan() { 179 // handle quick cancels 180 select { 181 case <-q.shutdownCtx.Done(): 182 // tell the pool to shutdown. 183 q.baseCtxCancel() 184 return 185 default: 186 } 187 188 // Default backoff values 189 backOffTime := time.Millisecond * 100 190 backOffTimer := time.NewTimer(0) 191 util.StopTimer(backOffTimer) 192 193 paused, _ := q.IsPausedIsResumed() 194 195 loop: 196 for { 197 select { 198 case <-paused: 199 log.Trace("Queue %s pausing", q.name) 200 _, resumed := q.IsPausedIsResumed() 201 202 select { 203 case <-resumed: 204 paused, _ = q.IsPausedIsResumed() 205 log.Trace("Queue %s resuming", q.name) 206 if q.HasNoWorkerScaling() { 207 log.Warn( 208 "Queue: %s is configured to be non-scaling and has no workers - this configuration is likely incorrect.\n"+ 209 "The queue will be paused to prevent data-loss with the assumption that you will add workers and unpause as required.", q.name) 210 q.Pause() 211 continue loop 212 } 213 case <-q.shutdownCtx.Done(): 214 // tell the pool to shutdown. 215 q.baseCtxCancel() 216 return 217 case data, ok := <-q.dataChan: 218 if !ok { 219 return 220 } 221 if err := q.PushBack(data); err != nil { 222 log.Error("Unable to push back data into queue %s", q.name) 223 } 224 atomic.AddInt64(&q.numInQueue, -1) 225 } 226 default: 227 } 228 229 // empty the pushed channel 230 select { 231 case <-q.pushed: 232 default: 233 } 234 235 err := q.doPop() 236 237 util.StopTimer(backOffTimer) 238 239 if err != nil { 240 if err == errQueueEmpty && q.waitOnEmpty { 241 log.Trace("%s: %s Waiting on Empty", q.typ, q.name) 242 243 // reset the backoff time but don't set the timer 244 backOffTime = 100 * time.Millisecond 245 } else if err == errUnmarshal { 246 // reset the timer and backoff 247 backOffTime = 100 * time.Millisecond 248 backOffTimer.Reset(backOffTime) 249 } else { 250 // backoff 251 backOffTimer.Reset(backOffTime) 252 } 253 254 // Need to Backoff 255 select { 256 case <-q.shutdownCtx.Done(): 257 // Oops we've been shutdown whilst backing off 258 // Make sure the worker pool is shutdown too 259 q.baseCtxCancel() 260 return 261 case <-q.pushed: 262 // Data has been pushed to the fifo (or flush has been called) 263 // reset the backoff time 264 backOffTime = 100 * time.Millisecond 265 continue loop 266 case <-backOffTimer.C: 267 // Calculate the next backoff time 268 backOffTime += backOffTime / 2 269 if backOffTime > maxBackOffTime { 270 backOffTime = maxBackOffTime 271 } 272 continue loop 273 } 274 } 275 276 // Reset the backoff time 277 backOffTime = 100 * time.Millisecond 278 279 select { 280 case <-q.shutdownCtx.Done(): 281 // Oops we've been shutdown 282 // Make sure the worker pool is shutdown too 283 q.baseCtxCancel() 284 return 285 default: 286 continue loop 287 } 288 } 289 } 290 291 var ( 292 errQueueEmpty = fmt.Errorf("empty queue") 293 errEmptyBytes = fmt.Errorf("empty bytes") 294 errUnmarshal = fmt.Errorf("failed to unmarshal") 295 ) 296 297 func (q *ByteFIFOQueue) doPop() error { 298 q.lock.Lock() 299 defer q.lock.Unlock() 300 bs, err := q.byteFIFO.Pop(q.shutdownCtx) 301 if err != nil { 302 if err == context.Canceled { 303 q.baseCtxCancel() 304 return err 305 } 306 log.Error("%s: %s Error on Pop: %v", q.typ, q.name, err) 307 return err 308 } 309 if len(bs) == 0 { 310 if q.waitOnEmpty && q.byteFIFO.Len(q.shutdownCtx) == 0 { 311 return errQueueEmpty 312 } 313 return errEmptyBytes 314 } 315 316 data, err := unmarshalAs(bs, q.exemplar) 317 if err != nil { 318 log.Error("%s: %s Failed to unmarshal with error: %v", q.typ, q.name, err) 319 return errUnmarshal 320 } 321 322 log.Trace("%s %s: Task found: %#v", q.typ, q.name, data) 323 q.WorkerPool.Push(data) 324 return nil 325 } 326 327 // Shutdown processing from this queue 328 func (q *ByteFIFOQueue) Shutdown() { 329 log.Trace("%s: %s Shutting down", q.typ, q.name) 330 select { 331 case <-q.shutdownCtx.Done(): 332 return 333 default: 334 } 335 q.shutdownCtxCancel() 336 log.Debug("%s: %s Shutdown", q.typ, q.name) 337 } 338 339 // IsShutdown returns a channel which is closed when this Queue is shutdown 340 func (q *ByteFIFOQueue) IsShutdown() <-chan struct{} { 341 return q.shutdownCtx.Done() 342 } 343 344 // Terminate this queue and close the queue 345 func (q *ByteFIFOQueue) Terminate() { 346 log.Trace("%s: %s Terminating", q.typ, q.name) 347 q.Shutdown() 348 select { 349 case <-q.terminateCtx.Done(): 350 return 351 default: 352 } 353 if log.IsDebug() { 354 log.Debug("%s: %s Closing with %d tasks left in queue", q.typ, q.name, q.byteFIFO.Len(q.terminateCtx)) 355 } 356 q.terminateCtxCancel() 357 if err := q.byteFIFO.Close(); err != nil { 358 log.Error("Error whilst closing internal byte fifo in %s: %s: %v", q.typ, q.name, err) 359 } 360 q.baseCtxFinished() 361 log.Debug("%s: %s Terminated", q.typ, q.name) 362 } 363 364 // IsTerminated returns a channel which is closed when this Queue is terminated 365 func (q *ByteFIFOQueue) IsTerminated() <-chan struct{} { 366 return q.terminateCtx.Done() 367 } 368 369 var _ UniqueQueue = &ByteFIFOUniqueQueue{} 370 371 // ByteFIFOUniqueQueue represents a UniqueQueue formed from a UniqueByteFifo 372 type ByteFIFOUniqueQueue struct { 373 ByteFIFOQueue 374 } 375 376 // NewByteFIFOUniqueQueue creates a new ByteFIFOUniqueQueue 377 func NewByteFIFOUniqueQueue(typ Type, byteFIFO UniqueByteFIFO, handle HandlerFunc, cfg, exemplar interface{}) (*ByteFIFOUniqueQueue, error) { 378 configInterface, err := toConfig(ByteFIFOQueueConfiguration{}, cfg) 379 if err != nil { 380 return nil, err 381 } 382 config := configInterface.(ByteFIFOQueueConfiguration) 383 terminateCtx, terminateCtxCancel := context.WithCancel(context.Background()) 384 shutdownCtx, shutdownCtxCancel := context.WithCancel(terminateCtx) 385 386 q := &ByteFIFOUniqueQueue{ 387 ByteFIFOQueue: ByteFIFOQueue{ 388 byteFIFO: byteFIFO, 389 typ: typ, 390 shutdownCtx: shutdownCtx, 391 shutdownCtxCancel: shutdownCtxCancel, 392 terminateCtx: terminateCtx, 393 terminateCtxCancel: terminateCtxCancel, 394 exemplar: exemplar, 395 workers: config.Workers, 396 name: config.Name, 397 }, 398 } 399 q.WorkerPool = NewWorkerPool(func(data ...Data) (failed []Data) { 400 for _, unhandled := range handle(data...) { 401 if fail := q.PushBack(unhandled); fail != nil { 402 failed = append(failed, fail) 403 } 404 } 405 return 406 }, config.WorkerPoolConfiguration) 407 408 return q, nil 409 } 410 411 // Has checks if the provided data is in the queue 412 func (q *ByteFIFOUniqueQueue) Has(data Data) (bool, error) { 413 if !assignableTo(data, q.exemplar) { 414 return false, fmt.Errorf("unable to assign data: %v to same type as exemplar: %v in %s", data, q.exemplar, q.name) 415 } 416 bs, err := json.Marshal(data) 417 if err != nil { 418 return false, err 419 } 420 return q.byteFIFO.(UniqueByteFIFO).Has(q.terminateCtx, bs) 421 }