github.com/containers/libpod@v1.9.4-0.20220419124438-4284fd425507/cmd/podman/shared/workers.go (about)

     1  package shared
     2  
     3  import (
     4  	"reflect"
     5  	"runtime"
     6  	"strings"
     7  	"sync"
     8  
     9  	"github.com/sirupsen/logrus"
    10  )
    11  
    12  // JobFunc provides the function signature for the pool'ed functions
    13  type JobFunc func() error
    14  
    15  // Job defines the function to run
    16  type Job struct {
    17  	ID string
    18  	Fn JobFunc
    19  }
    20  
    21  // JobResult defines the results from the function ran
    22  type JobResult struct {
    23  	Job Job
    24  	Err error
    25  }
    26  
    27  // Pool defines the worker pool and queues
    28  type Pool struct {
    29  	id       string
    30  	wg       *sync.WaitGroup
    31  	jobs     chan Job
    32  	results  chan JobResult
    33  	size     int
    34  	capacity int
    35  }
    36  
    37  // NewPool creates and initializes a new Pool
    38  func NewPool(id string, size int, capacity int) *Pool {
    39  	var wg sync.WaitGroup
    40  
    41  	// min for int...
    42  	s := size
    43  	if s > capacity {
    44  		s = capacity
    45  	}
    46  
    47  	return &Pool{
    48  		id,
    49  		&wg,
    50  		make(chan Job, capacity),
    51  		make(chan JobResult, capacity),
    52  		s,
    53  		capacity,
    54  	}
    55  }
    56  
    57  // Add Job to pool for parallel processing
    58  func (p *Pool) Add(job Job) {
    59  	p.wg.Add(1)
    60  	p.jobs <- job
    61  }
    62  
    63  // Run the Job's in the pool, gather and return results
    64  func (p *Pool) Run() ([]string, map[string]error, error) {
    65  	var (
    66  		ok       = []string{}
    67  		failures = map[string]error{}
    68  	)
    69  
    70  	for w := 0; w < p.size; w++ {
    71  		w := w
    72  		go p.newWorker(w)
    73  	}
    74  	close(p.jobs)
    75  	p.wg.Wait()
    76  
    77  	close(p.results)
    78  	for r := range p.results {
    79  		if r.Err == nil {
    80  			ok = append(ok, r.Job.ID)
    81  		} else {
    82  			failures[r.Job.ID] = r.Err
    83  		}
    84  	}
    85  
    86  	if logrus.GetLevel() == logrus.DebugLevel {
    87  		for i, f := range failures {
    88  			logrus.Debugf("Pool[%s, %s: %s]", p.id, i, f.Error())
    89  		}
    90  	}
    91  
    92  	return ok, failures, nil
    93  }
    94  
    95  // newWorker creates new parallel workers to monitor jobs channel from Pool
    96  func (p *Pool) newWorker(slot int) {
    97  	for job := range p.jobs {
    98  		err := job.Fn()
    99  		p.results <- JobResult{job, err}
   100  		if logrus.GetLevel() == logrus.DebugLevel {
   101  			n := strings.Split(runtime.FuncForPC(reflect.ValueOf(job.Fn).Pointer()).Name(), ".")
   102  			logrus.Debugf("Worker#%d finished job %s/%s (%v)", slot, n[2:], job.ID, err)
   103  		}
   104  		p.wg.Done()
   105  	}
   106  }
   107  
   108  // DefaultPoolSize provides the maximum number of parallel workers (int) as calculated by a basic
   109  // heuristic. This can be overridden by the --max-workers primary switch to podman.
   110  func DefaultPoolSize(name string) int {
   111  	numCpus := runtime.NumCPU()
   112  	switch name {
   113  	case "init":
   114  		fallthrough
   115  	case "kill":
   116  		fallthrough
   117  	case "pause":
   118  		fallthrough
   119  	case "rm":
   120  		fallthrough
   121  	case "unpause":
   122  		if numCpus <= 3 {
   123  			return numCpus * 3
   124  		}
   125  		return numCpus * 4
   126  	case "ps":
   127  		return 8
   128  	case "restart":
   129  		return numCpus * 2
   130  	case "stop":
   131  		if numCpus <= 2 {
   132  			return 4
   133  		} else {
   134  			return numCpus * 3
   135  		}
   136  	}
   137  	return 3
   138  }