github.heygears.com/openimsdk/tools@v0.0.49/mq/memamq/queue.go (about) 1 package memamq 2 3 import ( 4 "context" 5 "errors" 6 "sync" 7 "sync/atomic" 8 "time" 9 ) 10 11 var ( 12 ErrStop = errors.New("push failed: queue is stopped") 13 ErrFull = errors.New("push failed: queue is full") 14 ) 15 16 // AsyncQueue is the interface responsible for asynchronous processing of functions. 17 //type AsyncQueue interface { 18 // Initialize(processFunc func(), workerCount int, bufferSize int) 19 // Push(task func()) error 20 //} 21 22 // MemoryQueue is an implementation of the AsyncQueue interface using a channel to process functions. 23 type MemoryQueue struct { 24 taskChan chan func() 25 wg sync.WaitGroup 26 isStopped atomic.Bool 27 count atomic.Int64 28 //stopMutex sync.Mutex // Mutex to protect access to isStopped 29 } 30 31 func NewMemoryQueue(workerCount int, bufferSize int) *MemoryQueue { 32 if workerCount < 1 || bufferSize < 1 { 33 panic("workerCount and bufferSize must be greater than 0") 34 } 35 mq := &MemoryQueue{} // Create a new instance of MemoryQueue 36 mq.initialize(workerCount, bufferSize) // Initialize it with specified parameters 37 return mq 38 } 39 40 // Initialize sets up the worker nodes and the buffer size of the channel, 41 // starting internal goroutines to handle tasks from the channel. 42 func (mq *MemoryQueue) initialize(workerCount int, bufferSize int) { 43 mq.taskChan = make(chan func(), bufferSize) // Initialize the channel with the provided buffer size. 44 // Start multiple goroutines based on the specified workerCount. 45 for i := 0; i < workerCount; i++ { 46 mq.wg.Add(1) 47 go func() { 48 defer mq.wg.Done() 49 for task := range mq.taskChan { 50 task() // Execute the function 51 } 52 }() 53 } 54 } 55 56 // Push submits a function to the queue. 57 // Returns an error if the queue is stopped or if the queue is full. 58 func (mq *MemoryQueue) Push(task func()) error { 59 mq.count.Add(1) 60 defer mq.count.Add(-1) 61 if mq.isStopped.Load() { 62 return ErrStop 63 } 64 timer := time.NewTimer(time.Millisecond * 100) 65 defer timer.Stop() 66 select { 67 case mq.taskChan <- task: 68 return nil 69 case <-timer.C: // Timeout to prevent deadlock/blocking 70 return ErrFull 71 } 72 } 73 74 func (mq *MemoryQueue) PushCtx(ctx context.Context, task func()) error { 75 mq.count.Add(1) 76 defer mq.count.Add(-1) 77 if mq.isStopped.Load() { 78 return ErrStop 79 } 80 select { 81 case mq.taskChan <- task: 82 return nil 83 case <-ctx.Done(): 84 return context.Cause(ctx) 85 } 86 } 87 88 func (mq *MemoryQueue) BatchPushCtx(ctx context.Context, tasks ...func()) (int, error) { 89 mq.count.Add(1) 90 defer mq.count.Add(-1) 91 if mq.isStopped.Load() { 92 return 0, ErrStop 93 } 94 for i := range tasks { 95 select { 96 case <-ctx.Done(): 97 return i, context.Cause(ctx) 98 case mq.taskChan <- tasks[i]: 99 } 100 } 101 return len(tasks), nil 102 } 103 104 func (mq *MemoryQueue) NotWaitPush(task func()) error { 105 mq.count.Add(1) 106 defer mq.count.Add(-1) 107 if mq.isStopped.Load() { 108 return ErrStop 109 } 110 select { 111 case mq.taskChan <- task: 112 return nil 113 default: 114 return ErrFull 115 } 116 } 117 118 // Stop is used to terminate the internal goroutines and close the channel. 119 func (mq *MemoryQueue) Stop() { 120 if !mq.isStopped.CompareAndSwap(false, true) { 121 return 122 } 123 mq.waitSafeClose() 124 close(mq.taskChan) 125 mq.wg.Wait() 126 } 127 128 func (mq *MemoryQueue) waitSafeClose() { 129 if mq.count.Load() == 0 { 130 return 131 } 132 ticker := time.NewTicker(time.Second / 10) 133 defer ticker.Stop() 134 for range ticker.C { 135 if mq.count.Load() == 0 { 136 return 137 } 138 } 139 }