code.gitea.io/gitea@v1.19.3/modules/queue/unique_queue_channel.go (about)

     1  // Copyright 2020 The Gitea Authors. All rights reserved.
     2  // SPDX-License-Identifier: MIT
     3  
     4  package queue
     5  
     6  import (
     7  	"context"
     8  	"fmt"
     9  	"runtime/pprof"
    10  	"sync"
    11  	"time"
    12  
    13  	"code.gitea.io/gitea/modules/container"
    14  	"code.gitea.io/gitea/modules/json"
    15  	"code.gitea.io/gitea/modules/log"
    16  )
    17  
    18  // ChannelUniqueQueueType is the type for channel queue
    19  const ChannelUniqueQueueType Type = "unique-channel"
    20  
    21  // ChannelUniqueQueueConfiguration is the configuration for a ChannelUniqueQueue
    22  type ChannelUniqueQueueConfiguration ChannelQueueConfiguration
    23  
    24  // ChannelUniqueQueue implements UniqueQueue
    25  //
    26  // It is basically a thin wrapper around a WorkerPool but keeps a store of
    27  // what has been pushed within a table.
    28  //
    29  // Please note that this Queue does not guarantee that a particular
    30  // task cannot be processed twice or more at the same time. Uniqueness is
    31  // only guaranteed whilst the task is waiting in the queue.
    32  type ChannelUniqueQueue struct {
    33  	*WorkerPool
    34  	lock               sync.Mutex
    35  	table              container.Set[string]
    36  	shutdownCtx        context.Context
    37  	shutdownCtxCancel  context.CancelFunc
    38  	terminateCtx       context.Context
    39  	terminateCtxCancel context.CancelFunc
    40  	exemplar           interface{}
    41  	workers            int
    42  	name               string
    43  }
    44  
    45  // NewChannelUniqueQueue create a memory channel queue
    46  func NewChannelUniqueQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) {
    47  	configInterface, err := toConfig(ChannelUniqueQueueConfiguration{}, cfg)
    48  	if err != nil {
    49  		return nil, err
    50  	}
    51  	config := configInterface.(ChannelUniqueQueueConfiguration)
    52  	if config.BatchLength == 0 {
    53  		config.BatchLength = 1
    54  	}
    55  
    56  	terminateCtx, terminateCtxCancel := context.WithCancel(context.Background())
    57  	shutdownCtx, shutdownCtxCancel := context.WithCancel(terminateCtx)
    58  
    59  	queue := &ChannelUniqueQueue{
    60  		table:              make(container.Set[string]),
    61  		shutdownCtx:        shutdownCtx,
    62  		shutdownCtxCancel:  shutdownCtxCancel,
    63  		terminateCtx:       terminateCtx,
    64  		terminateCtxCancel: terminateCtxCancel,
    65  		exemplar:           exemplar,
    66  		workers:            config.Workers,
    67  		name:               config.Name,
    68  	}
    69  	queue.WorkerPool = NewWorkerPool(func(data ...Data) (unhandled []Data) {
    70  		for _, datum := range data {
    71  			// No error is possible here because PushFunc ensures that this can be marshalled
    72  			bs, _ := json.Marshal(datum)
    73  
    74  			queue.lock.Lock()
    75  			queue.table.Remove(string(bs))
    76  			queue.lock.Unlock()
    77  
    78  			if u := handle(datum); u != nil {
    79  				if queue.IsPaused() {
    80  					// We can only pushback to the channel if we're paused.
    81  					go func() {
    82  						if err := queue.Push(u[0]); err != nil {
    83  							log.Error("Unable to push back to queue %d. Error: %v", queue.qid, err)
    84  						}
    85  					}()
    86  				} else {
    87  					unhandled = append(unhandled, u...)
    88  				}
    89  			}
    90  		}
    91  		return unhandled
    92  	}, config.WorkerPoolConfiguration)
    93  
    94  	queue.qid = GetManager().Add(queue, ChannelUniqueQueueType, config, exemplar)
    95  	return queue, nil
    96  }
    97  
    98  // Run starts to run the queue
    99  func (q *ChannelUniqueQueue) Run(atShutdown, atTerminate func(func())) {
   100  	pprof.SetGoroutineLabels(q.baseCtx)
   101  	atShutdown(q.Shutdown)
   102  	atTerminate(q.Terminate)
   103  	log.Debug("ChannelUniqueQueue: %s Starting", q.name)
   104  	_ = q.AddWorkers(q.workers, 0)
   105  }
   106  
   107  // Push will push data into the queue if the data is not already in the queue
   108  func (q *ChannelUniqueQueue) Push(data Data) error {
   109  	return q.PushFunc(data, nil)
   110  }
   111  
   112  // PushFunc will push data into the queue
   113  func (q *ChannelUniqueQueue) PushFunc(data Data, fn func() error) error {
   114  	if !assignableTo(data, q.exemplar) {
   115  		return fmt.Errorf("unable to assign data: %v to same type as exemplar: %v in queue: %s", data, q.exemplar, q.name)
   116  	}
   117  
   118  	bs, err := json.Marshal(data)
   119  	if err != nil {
   120  		return err
   121  	}
   122  	q.lock.Lock()
   123  	locked := true
   124  	defer func() {
   125  		if locked {
   126  			q.lock.Unlock()
   127  		}
   128  	}()
   129  	if !q.table.Add(string(bs)) {
   130  		return ErrAlreadyInQueue
   131  	}
   132  	// FIXME: We probably need to implement some sort of limit here
   133  	// If the downstream queue blocks this table will grow without limit
   134  	if fn != nil {
   135  		err := fn()
   136  		if err != nil {
   137  			q.table.Remove(string(bs))
   138  			return err
   139  		}
   140  	}
   141  	locked = false
   142  	q.lock.Unlock()
   143  	q.WorkerPool.Push(data)
   144  	return nil
   145  }
   146  
   147  // Has checks if the data is in the queue
   148  func (q *ChannelUniqueQueue) Has(data Data) (bool, error) {
   149  	bs, err := json.Marshal(data)
   150  	if err != nil {
   151  		return false, err
   152  	}
   153  
   154  	q.lock.Lock()
   155  	defer q.lock.Unlock()
   156  	return q.table.Contains(string(bs)), nil
   157  }
   158  
   159  // Flush flushes the channel with a timeout - the Flush worker will be registered as a flush worker with the manager
   160  func (q *ChannelUniqueQueue) Flush(timeout time.Duration) error {
   161  	if q.IsPaused() {
   162  		return nil
   163  	}
   164  	ctx, cancel := q.commonRegisterWorkers(1, timeout, true)
   165  	defer cancel()
   166  	return q.FlushWithContext(ctx)
   167  }
   168  
   169  // Shutdown processing from this queue
   170  func (q *ChannelUniqueQueue) Shutdown() {
   171  	log.Trace("ChannelUniqueQueue: %s Shutting down", q.name)
   172  	select {
   173  	case <-q.shutdownCtx.Done():
   174  		return
   175  	default:
   176  	}
   177  	go func() {
   178  		log.Trace("ChannelUniqueQueue: %s Flushing", q.name)
   179  		if err := q.FlushWithContext(q.terminateCtx); err != nil {
   180  			if !q.IsEmpty() {
   181  				log.Warn("ChannelUniqueQueue: %s Terminated before completed flushing", q.name)
   182  			}
   183  			return
   184  		}
   185  		log.Debug("ChannelUniqueQueue: %s Flushed", q.name)
   186  	}()
   187  	q.shutdownCtxCancel()
   188  	log.Debug("ChannelUniqueQueue: %s Shutdown", q.name)
   189  }
   190  
   191  // Terminate this queue and close the queue
   192  func (q *ChannelUniqueQueue) Terminate() {
   193  	log.Trace("ChannelUniqueQueue: %s Terminating", q.name)
   194  	q.Shutdown()
   195  	select {
   196  	case <-q.terminateCtx.Done():
   197  		return
   198  	default:
   199  	}
   200  	q.terminateCtxCancel()
   201  	q.baseCtxFinished()
   202  	log.Debug("ChannelUniqueQueue: %s Terminated", q.name)
   203  }
   204  
   205  // Name returns the name of this queue
   206  func (q *ChannelUniqueQueue) Name() string {
   207  	return q.name
   208  }
   209  
   210  func init() {
   211  	queuesMap[ChannelUniqueQueueType] = NewChannelUniqueQueue
   212  }