github.com/tooploox/oya@v0.0.21-0.20230524103240-1cda1861aad6/pkg/mvs/internal/work.go (about)

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Package internal implements parallel execution helpers.
     6  
     7  package internal
     8  
     9  import (
    10  	"math/rand"
    11  	"sync"
    12  	"sync/atomic"
    13  )
    14  
    15  type Job interface {
    16  	ID() interface{}
    17  	Payload() interface{}
    18  }
    19  
    20  // Work manages a set of work items to be executed in parallel, at most once each.
    21  // The items in the set must all be valid map keys.
    22  type Work struct {
    23  	f       func(Job) // function to run for each item
    24  	running int       // total number of runners
    25  
    26  	mu      sync.Mutex
    27  	added   map[interface{}]bool // items added to set
    28  	todo    []Job                // items yet to be run
    29  	wait    sync.Cond            // wait when todo is empty
    30  	waiting int                  // number of runners waiting for todo
    31  }
    32  
    33  func (w *Work) init() {
    34  	if w.added == nil {
    35  		w.added = make(map[interface{}]bool)
    36  	}
    37  }
    38  
    39  // Add adds item to the work set, if it hasn't already been added.
    40  func (w *Work) Add(item Job) {
    41  	w.mu.Lock()
    42  	w.init()
    43  	if !w.added[item.ID()] {
    44  		w.added[item.ID()] = true
    45  		w.todo = append(w.todo, item)
    46  		if w.waiting > 0 {
    47  			w.wait.Signal()
    48  		}
    49  	}
    50  	w.mu.Unlock()
    51  }
    52  
    53  // Do runs f in parallel on items from the work set,
    54  // with at most n invocations of f running at a time.
    55  // It returns when everything added to the work set has been processed.
    56  // At least one item should have been added to the work set
    57  // before calling Do (or else Do returns immediately),
    58  // but it is allowed for f(item) to add new items to the set.
    59  // Do should only be used once on a given Work.
    60  func (w *Work) Do(n int, f func(item Job)) {
    61  	if n < 1 {
    62  		panic("par.Work.Do: n < 1")
    63  	}
    64  	if w.running >= 1 {
    65  		panic("par.Work.Do: already called Do")
    66  	}
    67  
    68  	w.running = n
    69  	w.f = f
    70  	w.wait.L = &w.mu
    71  
    72  	for i := 0; i < n-1; i++ {
    73  		go w.runner()
    74  	}
    75  	w.runner()
    76  }
    77  
    78  // runner executes work in w until both nothing is left to do
    79  // and all the runners are waiting for work.
    80  // (Then all the runners return.)
    81  func (w *Work) runner() {
    82  	for {
    83  		// Wait for something to do.
    84  		w.mu.Lock()
    85  		for len(w.todo) == 0 {
    86  			w.waiting++
    87  			if w.waiting == w.running {
    88  				// All done.
    89  				w.wait.Broadcast()
    90  				w.mu.Unlock()
    91  				return
    92  			}
    93  			w.wait.Wait()
    94  			w.waiting--
    95  		}
    96  
    97  		// Pick something to do at random,
    98  		// to eliminate pathological contention
    99  		// in case items added at about the same time
   100  		// are most likely to contend.
   101  		i := rand.Intn(len(w.todo))
   102  		item := w.todo[i]
   103  		w.todo[i] = w.todo[len(w.todo)-1]
   104  		w.todo = w.todo[:len(w.todo)-1]
   105  		w.mu.Unlock()
   106  
   107  		w.f(item)
   108  	}
   109  }
   110  
   111  // Cache runs an action once per key and caches the result.
   112  type Cache struct {
   113  	m sync.Map
   114  }
   115  
   116  type cacheEntry struct {
   117  	done   uint32
   118  	mu     sync.Mutex
   119  	result interface{}
   120  }
   121  
   122  // Do calls the function f if and only if Do is being called for the first time with this key.
   123  // No call to Do with a given key returns until the one call to f returns.
   124  // Do returns the value returned by the one call to f.
   125  func (c *Cache) Do(key interface{}, f func() interface{}) interface{} {
   126  	entryIface, ok := c.m.Load(key)
   127  	if !ok {
   128  		entryIface, _ = c.m.LoadOrStore(key, new(cacheEntry))
   129  	}
   130  	e := entryIface.(*cacheEntry)
   131  	if atomic.LoadUint32(&e.done) == 0 {
   132  		e.mu.Lock()
   133  		if atomic.LoadUint32(&e.done) == 0 {
   134  			e.result = f()
   135  			atomic.StoreUint32(&e.done, 1)
   136  		}
   137  		e.mu.Unlock()
   138  	}
   139  	return e.result
   140  }
   141  
   142  // Get returns the cached result associated with key.
   143  // It returns nil if there is no such result.
   144  // If the result for key is being computed, Get does not wait for the computation to finish.
   145  func (c *Cache) Get(key interface{}) interface{} {
   146  	entryIface, ok := c.m.Load(key)
   147  	if !ok {
   148  		return nil
   149  	}
   150  	e := entryIface.(*cacheEntry)
   151  	if atomic.LoadUint32(&e.done) == 0 {
   152  		return nil
   153  	}
   154  	return e.result
   155  }