github.com/jxskiss/gopkg/v2@v2.14.9-0.20240514120614-899f3e7952b4/perf/gopool/pool.go (about)

     1  // Copyright 2021 ByteDance Inc.
     2  // Copyright 2023 Shawn Wang <jxskiss@126.com>.
     3  //
     4  // Licensed under the Apache License, Version 2.0 (the "License");
     5  // you may not use this file except in compliance with the License.
     6  // You may obtain a copy of the License at
     7  //
     8  //     http://www.apache.org/licenses/LICENSE-2.0
     9  //
    10  // Unless required by applicable law or agreed to in writing, software
    11  // distributed under the License is distributed on an "AS IS" BASIS,
    12  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  // See the License for the specific language governing permissions and
    14  // limitations under the License.
    15  
    16  package gopool
    17  
    18  import (
    19  	"context"
    20  	"math"
    21  	"sync"
    22  	"sync/atomic"
    23  )
    24  
    25  // Pool manages a goroutine pool and tasks for better performance,
    26  // it reuses goroutines and limits the number of goroutines.
    27  type Pool = TypedPool[func()]
    28  
    29  // NewPool creates a new pool with the config.
    30  func NewPool(config *Config) *Pool {
    31  	runner := funcTaskRunner
    32  	p := &TypedPool[func()]{}
    33  	p.init(config, runner)
    34  	return p
    35  }
    36  
    37  // TypedPool is a task-specific pool.
    38  // A TypedPool is like pool, but it executes a handler to process values
    39  // of a specific type.
    40  // Compared to Pool, it helps to reduce unnecessary memory allocation of
    41  // closures when submitting tasks.
    42  type TypedPool[T any] struct {
    43  	internalPool
    44  }
    45  
    46  // NewTypedPool creates a new task-specific pool with given handler and config.
    47  func NewTypedPool[T any](config *Config, handler func(context.Context, T)) *TypedPool[T] {
    48  	runner := newTypedTaskRunner(handler)
    49  	p := &TypedPool[T]{}
    50  	p.init(config, runner)
    51  	return p
    52  }
    53  
    54  // Go submits a task to the pool.
    55  func (p *TypedPool[T]) Go(arg T) {
    56  	p.submit(context.Background(), arg)
    57  }
    58  
    59  // CtxGo submits a task to the pool, it's preferred over Go.
    60  func (p *TypedPool[T]) CtxGo(ctx context.Context, arg T) {
    61  	p.submit(ctx, arg)
    62  }
    63  
    64  type internalPool struct {
    65  	config *Config
    66  	runner taskRunner
    67  
    68  	// taskCh sends tasks to permanent workers.
    69  	taskCh chan *task
    70  
    71  	// mu protects adhocState and taskList.
    72  	// adhocState:
    73  	// - higher 32 bits is adhocLimit, max number of adhoc workers that can run simultaneously
    74  	// - lower 32 bits is adhocCount, the number of currently running adhoc workers
    75  	mu         sync.Mutex
    76  	adhocState int64
    77  	taskList   taskList
    78  }
    79  
    80  func (p *internalPool) init(config *Config, runner taskRunner) {
    81  	config.checkAndSetDefaults()
    82  	p.config = config
    83  	p.runner = runner
    84  	p.SetAdhocWorkerLimit(config.AdhocWorkerLimit)
    85  	p.startPermanentWorkers()
    86  }
    87  
    88  // Name returns the name of a pool.
    89  func (p *internalPool) Name() string {
    90  	return p.config.Name
    91  }
    92  
    93  // SetAdhocWorkerLimit changes the limit of adhoc workers.
    94  // 0 or negative value means no limit.
    95  func (p *internalPool) SetAdhocWorkerLimit(limit int) {
    96  	if limit <= 0 || limit > math.MaxInt32 {
    97  		limit = math.MaxInt32
    98  	}
    99  	p.mu.Lock()
   100  	defer p.mu.Unlock()
   101  	oldLimit, _ := p.getAdhocState()
   102  	diff := int32(limit) - oldLimit
   103  	if diff != 0 {
   104  		atomic.AddInt64(&p.adhocState, int64(diff)<<32)
   105  	}
   106  }
   107  
   108  func (p *internalPool) submit(ctx context.Context, arg any) {
   109  	t := newTask()
   110  	t.ctx = ctx
   111  	t.arg = arg
   112  
   113  	// Try permanent worker first.
   114  	select {
   115  	case p.taskCh <- t:
   116  		return
   117  	default:
   118  	}
   119  
   120  	// No free permanent worker available, check to start a new
   121  	// adhoc worker or submit to the task list.
   122  	//
   123  	// Start a new adhoc worker if there are currently no adhoc workers,
   124  	// or the following two conditions are met:
   125  	//   1. the number of tasks is greater than the threshold.
   126  	//   2. The current number of adhoc workers is less than the limit.
   127  	p.mu.Lock()
   128  	tCnt := p.taskList.count + 1
   129  	wLimit, wCnt := p.getAdhocState()
   130  	if wCnt == 0 || (tCnt >= p.config.ScaleThreshold && wCnt < wLimit) {
   131  		p.incAdhocWorkerCount()
   132  		p.mu.Unlock()
   133  		go p.adhocWorker(t)
   134  	} else {
   135  		p.taskList.add(t)
   136  		p.mu.Unlock()
   137  	}
   138  }
   139  
   140  // AdhocWorkerLimit returns the current limit of adhoc workers.
   141  func (p *internalPool) AdhocWorkerLimit() int32 {
   142  	limit, _ := p.getAdhocState()
   143  	return limit
   144  }
   145  
   146  // AdhocWorkerCount returns the number of running adhoc workers.
   147  func (p *internalPool) AdhocWorkerCount() int32 {
   148  	_, count := p.getAdhocState()
   149  	return count
   150  }
   151  
   152  func (p *internalPool) getAdhocState() (limit, count int32) {
   153  	x := atomic.LoadInt64(&p.adhocState)
   154  	return int32(x >> 32), int32((x << 32) >> 32)
   155  }
   156  
   157  func (p *internalPool) incAdhocWorkerCount() {
   158  	atomic.AddInt64(&p.adhocState, 1)
   159  }
   160  
   161  func (p *internalPool) decAdhocWorkerCount() {
   162  	atomic.AddInt64(&p.adhocState, -1)
   163  }
   164  
   165  // PermanentWorkerCount returns the number of permanent workers.
   166  func (p *internalPool) PermanentWorkerCount() int32 {
   167  	return int32(p.config.PermanentWorkerNum)
   168  }
   169  
   170  func (p *internalPool) startPermanentWorkers() {
   171  	if p.config.PermanentWorkerNum <= 0 {
   172  		return
   173  	}
   174  	p.taskCh = make(chan *task)
   175  	for i := 0; i < p.config.PermanentWorkerNum; i++ {
   176  		go p.permanentWorker()
   177  	}
   178  }
   179  
   180  func (p *internalPool) permanentWorker() {
   181  	for {
   182  		select {
   183  		case t := <-p.taskCh:
   184  			p.runner(p, t)
   185  
   186  			// Drain pending tasks.
   187  			for {
   188  				p.mu.Lock()
   189  				t = p.taskList.pop()
   190  				p.mu.Unlock()
   191  				if t == nil {
   192  					break
   193  				}
   194  				p.runner(p, t)
   195  			}
   196  		}
   197  	}
   198  }
   199  
   200  func (p *internalPool) adhocWorker(t *task) {
   201  	p.runner(p, t)
   202  	for {
   203  		p.mu.Lock()
   204  		t = p.taskList.pop()
   205  		if t == nil {
   206  			p.decAdhocWorkerCount()
   207  			p.mu.Unlock()
   208  			return
   209  		}
   210  		p.mu.Unlock()
   211  		p.runner(p, t)
   212  	}
   213  }