code.gitea.io/gitea@v1.19.3/modules/queue/manager.go (about)

     1  // Copyright 2019 The Gitea Authors. All rights reserved.
     2  // SPDX-License-Identifier: MIT
     3  
     4  package queue
     5  
     6  import (
     7  	"context"
     8  	"fmt"
     9  	"reflect"
    10  	"sort"
    11  	"strings"
    12  	"sync"
    13  	"time"
    14  
    15  	"code.gitea.io/gitea/modules/json"
    16  	"code.gitea.io/gitea/modules/log"
    17  )
    18  
    19  var manager *Manager
    20  
    21  // Manager is a queue manager
    22  type Manager struct {
    23  	mutex sync.Mutex
    24  
    25  	counter int64
    26  	Queues  map[int64]*ManagedQueue
    27  }
    28  
    29  // ManagedQueue represents a working queue with a Pool of workers.
    30  //
    31  // Although a ManagedQueue should really represent a Queue this does not
    32  // necessarily have to be the case. This could be used to describe any queue.WorkerPool.
    33  type ManagedQueue struct {
    34  	mutex         sync.Mutex
    35  	QID           int64
    36  	Type          Type
    37  	Name          string
    38  	Configuration interface{}
    39  	ExemplarType  string
    40  	Managed       interface{}
    41  	counter       int64
    42  	PoolWorkers   map[int64]*PoolWorkers
    43  }
    44  
    45  // Flushable represents a pool or queue that is flushable
    46  type Flushable interface {
    47  	// Flush will add a flush worker to the pool - the worker should be autoregistered with the manager
    48  	Flush(time.Duration) error
    49  	// FlushWithContext is very similar to Flush
    50  	// NB: The worker will not be registered with the manager.
    51  	FlushWithContext(ctx context.Context) error
    52  	// IsEmpty will return if the managed pool is empty and has no work
    53  	IsEmpty() bool
    54  }
    55  
    56  // Pausable represents a pool or queue that is Pausable
    57  type Pausable interface {
    58  	// IsPaused will return if the pool or queue is paused
    59  	IsPaused() bool
    60  	// Pause will pause the pool or queue
    61  	Pause()
    62  	// Resume will resume the pool or queue
    63  	Resume()
    64  	// IsPausedIsResumed will return a bool indicating if the pool or queue is paused and a channel that will be closed when it is resumed
    65  	IsPausedIsResumed() (paused, resumed <-chan struct{})
    66  }
    67  
    68  // ManagedPool is a simple interface to get certain details from a worker pool
    69  type ManagedPool interface {
    70  	// AddWorkers adds a number of worker as group to the pool with the provided timeout. A CancelFunc is provided to cancel the group
    71  	AddWorkers(number int, timeout time.Duration) context.CancelFunc
    72  	// NumberOfWorkers returns the total number of workers in the pool
    73  	NumberOfWorkers() int
    74  	// MaxNumberOfWorkers returns the maximum number of workers the pool can dynamically grow to
    75  	MaxNumberOfWorkers() int
    76  	// SetMaxNumberOfWorkers sets the maximum number of workers the pool can dynamically grow to
    77  	SetMaxNumberOfWorkers(int)
    78  	// BoostTimeout returns the current timeout for worker groups created during a boost
    79  	BoostTimeout() time.Duration
    80  	// BlockTimeout returns the timeout the internal channel can block for before a boost would occur
    81  	BlockTimeout() time.Duration
    82  	// BoostWorkers sets the number of workers to be created during a boost
    83  	BoostWorkers() int
    84  	// SetPoolSettings sets the user updatable settings for the pool
    85  	SetPoolSettings(maxNumberOfWorkers, boostWorkers int, timeout time.Duration)
    86  	// NumberInQueue returns the total number of items in the pool
    87  	NumberInQueue() int64
    88  	// Done returns a channel that will be closed when the Pool's baseCtx is closed
    89  	Done() <-chan struct{}
    90  }
    91  
    92  // ManagedQueueList implements the sort.Interface
    93  type ManagedQueueList []*ManagedQueue
    94  
    95  // PoolWorkers represents a group of workers working on a queue
    96  type PoolWorkers struct {
    97  	PID        int64
    98  	Workers    int
    99  	Start      time.Time
   100  	Timeout    time.Time
   101  	HasTimeout bool
   102  	Cancel     context.CancelFunc
   103  	IsFlusher  bool
   104  }
   105  
   106  // PoolWorkersList implements the sort.Interface for PoolWorkers
   107  type PoolWorkersList []*PoolWorkers
   108  
   109  func init() {
   110  	_ = GetManager()
   111  }
   112  
   113  // GetManager returns a Manager and initializes one as singleton if there's none yet
   114  func GetManager() *Manager {
   115  	if manager == nil {
   116  		manager = &Manager{
   117  			Queues: make(map[int64]*ManagedQueue),
   118  		}
   119  	}
   120  	return manager
   121  }
   122  
   123  // Add adds a queue to this manager
   124  func (m *Manager) Add(managed interface{},
   125  	t Type,
   126  	configuration,
   127  	exemplar interface{},
   128  ) int64 {
   129  	cfg, _ := json.Marshal(configuration)
   130  	mq := &ManagedQueue{
   131  		Type:          t,
   132  		Configuration: string(cfg),
   133  		ExemplarType:  reflect.TypeOf(exemplar).String(),
   134  		PoolWorkers:   make(map[int64]*PoolWorkers),
   135  		Managed:       managed,
   136  	}
   137  	m.mutex.Lock()
   138  	m.counter++
   139  	mq.QID = m.counter
   140  	mq.Name = fmt.Sprintf("queue-%d", mq.QID)
   141  	if named, ok := managed.(Named); ok {
   142  		name := named.Name()
   143  		if len(name) > 0 {
   144  			mq.Name = name
   145  		}
   146  	}
   147  	m.Queues[mq.QID] = mq
   148  	m.mutex.Unlock()
   149  	log.Trace("Queue Manager registered: %s (QID: %d)", mq.Name, mq.QID)
   150  	return mq.QID
   151  }
   152  
   153  // Remove a queue from the Manager
   154  func (m *Manager) Remove(qid int64) {
   155  	m.mutex.Lock()
   156  	delete(m.Queues, qid)
   157  	m.mutex.Unlock()
   158  	log.Trace("Queue Manager removed: QID: %d", qid)
   159  }
   160  
   161  // GetManagedQueue by qid
   162  func (m *Manager) GetManagedQueue(qid int64) *ManagedQueue {
   163  	m.mutex.Lock()
   164  	defer m.mutex.Unlock()
   165  	return m.Queues[qid]
   166  }
   167  
   168  // FlushAll flushes all the flushable queues attached to this manager
   169  func (m *Manager) FlushAll(baseCtx context.Context, timeout time.Duration) error {
   170  	var ctx context.Context
   171  	var cancel context.CancelFunc
   172  	start := time.Now()
   173  	end := start
   174  	hasTimeout := false
   175  	if timeout > 0 {
   176  		ctx, cancel = context.WithTimeout(baseCtx, timeout)
   177  		end = start.Add(timeout)
   178  		hasTimeout = true
   179  	} else {
   180  		ctx, cancel = context.WithCancel(baseCtx)
   181  	}
   182  	defer cancel()
   183  
   184  	for {
   185  		select {
   186  		case <-ctx.Done():
   187  			mqs := m.ManagedQueues()
   188  			nonEmptyQueues := []string{}
   189  			for _, mq := range mqs {
   190  				if !mq.IsEmpty() {
   191  					nonEmptyQueues = append(nonEmptyQueues, mq.Name)
   192  				}
   193  			}
   194  			if len(nonEmptyQueues) > 0 {
   195  				return fmt.Errorf("flush timeout with non-empty queues: %s", strings.Join(nonEmptyQueues, ", "))
   196  			}
   197  			return nil
   198  		default:
   199  		}
   200  		mqs := m.ManagedQueues()
   201  		log.Debug("Found %d Managed Queues", len(mqs))
   202  		wg := sync.WaitGroup{}
   203  		wg.Add(len(mqs))
   204  		allEmpty := true
   205  		for _, mq := range mqs {
   206  			if mq.IsEmpty() {
   207  				wg.Done()
   208  				continue
   209  			}
   210  			if pausable, ok := mq.Managed.(Pausable); ok {
   211  				// no point flushing paused queues
   212  				if pausable.IsPaused() {
   213  					wg.Done()
   214  					continue
   215  				}
   216  			}
   217  			if pool, ok := mq.Managed.(ManagedPool); ok {
   218  				// No point into flushing pools when their base's ctx is already done.
   219  				select {
   220  				case <-pool.Done():
   221  					wg.Done()
   222  					continue
   223  				default:
   224  				}
   225  			}
   226  
   227  			allEmpty = false
   228  			if flushable, ok := mq.Managed.(Flushable); ok {
   229  				log.Debug("Flushing (flushable) queue: %s", mq.Name)
   230  				go func(q *ManagedQueue) {
   231  					localCtx, localCtxCancel := context.WithCancel(ctx)
   232  					pid := q.RegisterWorkers(1, start, hasTimeout, end, localCtxCancel, true)
   233  					err := flushable.FlushWithContext(localCtx)
   234  					if err != nil && err != ctx.Err() {
   235  						cancel()
   236  					}
   237  					q.CancelWorkers(pid)
   238  					localCtxCancel()
   239  					wg.Done()
   240  				}(mq)
   241  			} else {
   242  				log.Debug("Queue: %s is non-empty but is not flushable", mq.Name)
   243  				wg.Done()
   244  			}
   245  		}
   246  		if allEmpty {
   247  			log.Debug("All queues are empty")
   248  			break
   249  		}
   250  		// Ensure there are always at least 100ms between loops but not more if we've actually been doing some flushing
   251  		// but don't delay cancellation here.
   252  		select {
   253  		case <-ctx.Done():
   254  		case <-time.After(100 * time.Millisecond):
   255  		}
   256  		wg.Wait()
   257  	}
   258  	return nil
   259  }
   260  
   261  // ManagedQueues returns the managed queues
   262  func (m *Manager) ManagedQueues() []*ManagedQueue {
   263  	m.mutex.Lock()
   264  	mqs := make([]*ManagedQueue, 0, len(m.Queues))
   265  	for _, mq := range m.Queues {
   266  		mqs = append(mqs, mq)
   267  	}
   268  	m.mutex.Unlock()
   269  	sort.Sort(ManagedQueueList(mqs))
   270  	return mqs
   271  }
   272  
   273  // Workers returns the poolworkers
   274  func (q *ManagedQueue) Workers() []*PoolWorkers {
   275  	q.mutex.Lock()
   276  	workers := make([]*PoolWorkers, 0, len(q.PoolWorkers))
   277  	for _, worker := range q.PoolWorkers {
   278  		workers = append(workers, worker)
   279  	}
   280  	q.mutex.Unlock()
   281  	sort.Sort(PoolWorkersList(workers))
   282  	return workers
   283  }
   284  
   285  // RegisterWorkers registers workers to this queue
   286  func (q *ManagedQueue) RegisterWorkers(number int, start time.Time, hasTimeout bool, timeout time.Time, cancel context.CancelFunc, isFlusher bool) int64 {
   287  	q.mutex.Lock()
   288  	defer q.mutex.Unlock()
   289  	q.counter++
   290  	q.PoolWorkers[q.counter] = &PoolWorkers{
   291  		PID:        q.counter,
   292  		Workers:    number,
   293  		Start:      start,
   294  		Timeout:    timeout,
   295  		HasTimeout: hasTimeout,
   296  		Cancel:     cancel,
   297  		IsFlusher:  isFlusher,
   298  	}
   299  	return q.counter
   300  }
   301  
   302  // CancelWorkers cancels pooled workers with pid
   303  func (q *ManagedQueue) CancelWorkers(pid int64) {
   304  	q.mutex.Lock()
   305  	pw, ok := q.PoolWorkers[pid]
   306  	q.mutex.Unlock()
   307  	if !ok {
   308  		return
   309  	}
   310  	pw.Cancel()
   311  }
   312  
   313  // RemoveWorkers deletes pooled workers with pid
   314  func (q *ManagedQueue) RemoveWorkers(pid int64) {
   315  	q.mutex.Lock()
   316  	pw, ok := q.PoolWorkers[pid]
   317  	delete(q.PoolWorkers, pid)
   318  	q.mutex.Unlock()
   319  	if ok && pw.Cancel != nil {
   320  		pw.Cancel()
   321  	}
   322  }
   323  
   324  // AddWorkers adds workers to the queue if it has registered an add worker function
   325  func (q *ManagedQueue) AddWorkers(number int, timeout time.Duration) context.CancelFunc {
   326  	if pool, ok := q.Managed.(ManagedPool); ok {
   327  		// the cancel will be added to the pool workers description above
   328  		return pool.AddWorkers(number, timeout)
   329  	}
   330  	return nil
   331  }
   332  
   333  // Flushable returns true if the queue is flushable
   334  func (q *ManagedQueue) Flushable() bool {
   335  	_, ok := q.Managed.(Flushable)
   336  	return ok
   337  }
   338  
   339  // Flush flushes the queue with a timeout
   340  func (q *ManagedQueue) Flush(timeout time.Duration) error {
   341  	if flushable, ok := q.Managed.(Flushable); ok {
   342  		// the cancel will be added to the pool workers description above
   343  		return flushable.Flush(timeout)
   344  	}
   345  	return nil
   346  }
   347  
   348  // IsEmpty returns if the queue is empty
   349  func (q *ManagedQueue) IsEmpty() bool {
   350  	if flushable, ok := q.Managed.(Flushable); ok {
   351  		return flushable.IsEmpty()
   352  	}
   353  	return true
   354  }
   355  
   356  // Pausable returns whether the queue is Pausable
   357  func (q *ManagedQueue) Pausable() bool {
   358  	_, ok := q.Managed.(Pausable)
   359  	return ok
   360  }
   361  
   362  // Pause pauses the queue
   363  func (q *ManagedQueue) Pause() {
   364  	if pausable, ok := q.Managed.(Pausable); ok {
   365  		pausable.Pause()
   366  	}
   367  }
   368  
   369  // IsPaused reveals if the queue is paused
   370  func (q *ManagedQueue) IsPaused() bool {
   371  	if pausable, ok := q.Managed.(Pausable); ok {
   372  		return pausable.IsPaused()
   373  	}
   374  	return false
   375  }
   376  
   377  // Resume resumes the queue
   378  func (q *ManagedQueue) Resume() {
   379  	if pausable, ok := q.Managed.(Pausable); ok {
   380  		pausable.Resume()
   381  	}
   382  }
   383  
   384  // NumberOfWorkers returns the number of workers in the queue
   385  func (q *ManagedQueue) NumberOfWorkers() int {
   386  	if pool, ok := q.Managed.(ManagedPool); ok {
   387  		return pool.NumberOfWorkers()
   388  	}
   389  	return -1
   390  }
   391  
   392  // MaxNumberOfWorkers returns the maximum number of workers for the pool
   393  func (q *ManagedQueue) MaxNumberOfWorkers() int {
   394  	if pool, ok := q.Managed.(ManagedPool); ok {
   395  		return pool.MaxNumberOfWorkers()
   396  	}
   397  	return 0
   398  }
   399  
   400  // BoostWorkers returns the number of workers for a boost
   401  func (q *ManagedQueue) BoostWorkers() int {
   402  	if pool, ok := q.Managed.(ManagedPool); ok {
   403  		return pool.BoostWorkers()
   404  	}
   405  	return -1
   406  }
   407  
   408  // BoostTimeout returns the timeout of the next boost
   409  func (q *ManagedQueue) BoostTimeout() time.Duration {
   410  	if pool, ok := q.Managed.(ManagedPool); ok {
   411  		return pool.BoostTimeout()
   412  	}
   413  	return 0
   414  }
   415  
   416  // BlockTimeout returns the timeout til the next boost
   417  func (q *ManagedQueue) BlockTimeout() time.Duration {
   418  	if pool, ok := q.Managed.(ManagedPool); ok {
   419  		return pool.BlockTimeout()
   420  	}
   421  	return 0
   422  }
   423  
   424  // SetPoolSettings sets the setable boost values
   425  func (q *ManagedQueue) SetPoolSettings(maxNumberOfWorkers, boostWorkers int, timeout time.Duration) {
   426  	if pool, ok := q.Managed.(ManagedPool); ok {
   427  		pool.SetPoolSettings(maxNumberOfWorkers, boostWorkers, timeout)
   428  	}
   429  }
   430  
   431  // NumberInQueue returns the number of items in the queue
   432  func (q *ManagedQueue) NumberInQueue() int64 {
   433  	if pool, ok := q.Managed.(ManagedPool); ok {
   434  		return pool.NumberInQueue()
   435  	}
   436  	return -1
   437  }
   438  
   439  func (l ManagedQueueList) Len() int {
   440  	return len(l)
   441  }
   442  
   443  func (l ManagedQueueList) Less(i, j int) bool {
   444  	return l[i].Name < l[j].Name
   445  }
   446  
   447  func (l ManagedQueueList) Swap(i, j int) {
   448  	l[i], l[j] = l[j], l[i]
   449  }
   450  
   451  func (l PoolWorkersList) Len() int {
   452  	return len(l)
   453  }
   454  
   455  func (l PoolWorkersList) Less(i, j int) bool {
   456  	return l[i].Start.Before(l[j].Start)
   457  }
   458  
   459  func (l PoolWorkersList) Swap(i, j int) {
   460  	l[i], l[j] = l[j], l[i]
   461  }