github.com/containers/libpod@v1.9.4-0.20220419124438-4284fd425507/cmd/podman/shared/parallel.go (about)

     1  package shared
     2  
     3  import (
     4  	"runtime"
     5  	"sync"
     6  )
     7  
     8  type pFunc func() error
     9  
    10  // ParallelWorkerInput is a struct used to pass in a slice of parallel funcs to be
    11  // performed on a container ID
    12  type ParallelWorkerInput struct {
    13  	ContainerID  string
    14  	ParallelFunc pFunc
    15  }
    16  
    17  type containerError struct {
    18  	ContainerID string
    19  	Err         error
    20  }
    21  
    22  // ParallelWorker is a "threaded" worker that takes jobs from the channel "queue"
    23  func ParallelWorker(wg *sync.WaitGroup, jobs <-chan ParallelWorkerInput, results chan<- containerError) {
    24  	for j := range jobs {
    25  		err := j.ParallelFunc()
    26  		results <- containerError{ContainerID: j.ContainerID, Err: err}
    27  		wg.Done()
    28  	}
    29  }
    30  
    31  // ParallelExecuteWorkerPool takes container jobs and performs them in parallel.  The worker
    32  // int determines how many workers/threads should be premade.
    33  func ParallelExecuteWorkerPool(workers int, functions []ParallelWorkerInput) (map[string]error, int) {
    34  	var (
    35  		wg         sync.WaitGroup
    36  		errorCount int
    37  	)
    38  
    39  	resultChan := make(chan containerError, len(functions))
    40  	results := make(map[string]error)
    41  	paraJobs := make(chan ParallelWorkerInput, len(functions))
    42  
    43  	// If we have more workers than functions, match up the number of workers and functions
    44  	if workers > len(functions) {
    45  		workers = len(functions)
    46  	}
    47  
    48  	// Create the workers
    49  	for w := 1; w <= workers; w++ {
    50  		go ParallelWorker(&wg, paraJobs, resultChan)
    51  	}
    52  
    53  	// Add jobs to the workers
    54  	for _, j := range functions {
    55  		j := j
    56  		wg.Add(1)
    57  		paraJobs <- j
    58  	}
    59  
    60  	close(paraJobs)
    61  	wg.Wait()
    62  
    63  	close(resultChan)
    64  	for ctrError := range resultChan {
    65  		results[ctrError.ContainerID] = ctrError.Err
    66  		if ctrError.Err != nil {
    67  			errorCount += 1
    68  		}
    69  	}
    70  
    71  	return results, errorCount
    72  }
    73  
    74  // Parallelize provides the maximum number of parallel workers (int) as calculated by a basic
    75  // heuristic. This can be overridden by the --max-workers primary switch to podman.
    76  func Parallelize(job string) int {
    77  	numCpus := runtime.NumCPU()
    78  	switch job {
    79  	case "kill":
    80  		if numCpus <= 3 {
    81  			return numCpus * 3
    82  		}
    83  		return numCpus * 4
    84  	case "pause":
    85  		if numCpus <= 3 {
    86  			return numCpus * 3
    87  		}
    88  		return numCpus * 4
    89  	case "ps":
    90  		return 8
    91  	case "restart":
    92  		return numCpus * 2
    93  	case "rm":
    94  		if numCpus <= 3 {
    95  			return numCpus * 3
    96  		} else {
    97  			return numCpus * 4
    98  		}
    99  	case "stop":
   100  		if numCpus <= 2 {
   101  			return 4
   102  		} else {
   103  			return numCpus * 3
   104  		}
   105  	case "unpause":
   106  		if numCpus <= 3 {
   107  			return numCpus * 3
   108  		}
   109  		return numCpus * 4
   110  	}
   111  	return 3
   112  }