github.com/treeverse/lakefs@v1.24.1-0.20240520134607-95648127bfb0/pkg/testutil/stress/pool.go (about)

     1  package stress
     2  
     3  import (
     4  	"fmt"
     5  	"os"
     6  	"os/signal"
     7  	"sync"
     8  	"time"
     9  )
    10  
    11  type Result struct {
    12  	Error error
    13  	Took  time.Duration
    14  }
    15  
    16  type WorkFn func(input chan string, output chan Result)
    17  
    18  type (
    19  	GeneratorAddFn func(string)
    20  	GenerateFn     func(add GeneratorAddFn)
    21  )
    22  
    23  type WorkerPool struct {
    24  	parallelism int
    25  	Input       chan string
    26  	Output      chan Result
    27  	wg          sync.WaitGroup
    28  	done        chan struct{}
    29  }
    30  
    31  func NewWorkerPool(parallelism int) *WorkerPool {
    32  	return &WorkerPool{
    33  		parallelism: parallelism,
    34  		Input:       make(chan string),
    35  		Output:      make(chan Result),
    36  		done:        make(chan struct{}),
    37  	}
    38  }
    39  
    40  func (p *WorkerPool) Start(workFn WorkFn) {
    41  	// spawn workers
    42  	p.wg.Add(p.parallelism)
    43  
    44  	// use wait-group and a channel as barrier for calling the worker from all goroutines
    45  	var startWG sync.WaitGroup
    46  	startWG.Add(p.parallelism)
    47  	startCh := make(chan struct{})
    48  
    49  	for i := 0; i < p.parallelism; i++ {
    50  		go func() {
    51  			defer p.wg.Done()
    52  			startWG.Done()
    53  			<-startCh
    54  			workFn(p.Input, p.Output) // call the worker we were given
    55  		}()
    56  	}
    57  	startWG.Wait()
    58  	close(startCh)
    59  
    60  	go func() {
    61  		p.wg.Wait()
    62  		p.done <- struct{}{}
    63  	}()
    64  }
    65  
    66  func (p *WorkerPool) Done() chan struct{} {
    67  	return p.done
    68  }
    69  
    70  // Generator sets up a pool and a result collector
    71  type Generator struct {
    72  	name          string
    73  	pool          *WorkerPool
    74  	collector     *ResultCollector
    75  	handleSignals []os.Signal
    76  }
    77  
    78  type GeneratorOption func(*Generator)
    79  
    80  func WithSignalHandlersFor(sigs ...os.Signal) GeneratorOption {
    81  	return func(generator *Generator) {
    82  		generator.handleSignals = sigs
    83  	}
    84  }
    85  
    86  func NewGenerator(name string, parallelism int, opts ...GeneratorOption) *Generator {
    87  	pool := NewWorkerPool(parallelism)
    88  	collector := NewResultCollector(pool.Output)
    89  	g := &Generator{
    90  		name:          name,
    91  		pool:          pool,
    92  		collector:     collector,
    93  		handleSignals: []os.Signal{},
    94  	}
    95  	for _, opt := range opts {
    96  		opt(g)
    97  	}
    98  	return g
    99  }
   100  
   101  func (g *Generator) addResult(s string) {
   102  	g.pool.Input <- s
   103  }
   104  
   105  func (g *Generator) Setup(fn GenerateFn) {
   106  	go func() {
   107  		fn(g.addResult)
   108  		close(g.pool.Input)
   109  	}()
   110  }
   111  
   112  // Run will start the worker goroutines and print out their
   113  // progress every second. Upon completion (or on a SIGTERM), will also print a latency histogram
   114  func (g *Generator) Run(fn WorkFn) {
   115  	go g.collector.Collect()
   116  	g.pool.Start(fn)
   117  
   118  	termSignal := make(chan os.Signal, 1)
   119  	if len(g.handleSignals) > 0 {
   120  		signal.Notify(termSignal, g.handleSignals...)
   121  	}
   122  
   123  	collecting := true
   124  	ticker := time.NewTicker(time.Second)
   125  	for collecting {
   126  		select {
   127  		case <-ticker.C:
   128  			fmt.Printf("%s - %s\n", g.name, g.collector.Stats())
   129  		case <-g.pool.Done():
   130  			collecting = false
   131  		case <-termSignal:
   132  			collecting = false
   133  		}
   134  	}
   135  	fmt.Printf("%s\n\n", g.collector.Stats())
   136  	fmt.Printf("Histogram (ms):\n%s\n", g.collector.Histogram())
   137  }