github.com/derat/nup@v0.0.0-20230418113745-15592ba7c620/cmd/nup/client/task_cache.go (about)

     1  // Copyright 2022 Daniel Erat.
     2  // All rights reserved.
     3  
     4  package client
     5  
     6  import (
     7  	"fmt"
     8  	"sync"
     9  )
    10  
    11  // TaskCache runs tasks that each produce one or more key-value pairs.
    12  type TaskCache struct {
    13  	items    map[string]interface{} // cached values
    14  	tasks    map[string]struct{}    // keys of in-progress tasks
    15  	maxTasks int                    // maximum number of simultaneous tasks
    16  	mu       sync.Mutex
    17  	cond     *sync.Cond
    18  }
    19  
    20  // NewTaskCache returns a TaskCache that will run up to maxTasks simultaneous tasks.
    21  func NewTaskCache(maxTasks int) *TaskCache {
    22  	c := TaskCache{
    23  		items:    make(map[string]interface{}),
    24  		tasks:    make(map[string]struct{}),
    25  		maxTasks: maxTasks,
    26  	}
    27  	c.cond = sync.NewCond(&c.mu)
    28  	return &c
    29  }
    30  
    31  // Size returns the number of cached values.
    32  func (c *TaskCache) Size() int {
    33  	c.mu.Lock()
    34  	defer c.mu.Unlock()
    35  	return len(c.items)
    36  }
    37  
    38  // Task produces one or more key-value pairs.
    39  type Task func() (map[string]interface{}, error)
    40  
    41  // Get returns the item with the supplied key from the cache.
    42  // If the item is not already in the cache, task will be executed
    43  // (if another task with the same task key is not already running)
    44  // and the resulting items will be saved to the cache.
    45  func (c *TaskCache) Get(itemKey, taskKey string, task Task) (interface{}, error) {
    46  	c.mu.Lock()
    47  	defer c.mu.Unlock()
    48  
    49  	// Wait until the item is ready or we're allowed to run the task.
    50  	for !c.ready(itemKey, taskKey) {
    51  		c.cond.Wait()
    52  	}
    53  
    54  	if v, ok := c.items[itemKey]; ok {
    55  		return v, nil
    56  	}
    57  
    58  	if _, ok := c.tasks[taskKey]; ok {
    59  		return nil, fmt.Errorf("task %q already running", taskKey)
    60  	}
    61  	c.tasks[taskKey] = struct{}{}
    62  
    63  	c.mu.Unlock()
    64  	m, err := task()
    65  	c.mu.Lock() // see earlier deferred Unlock()
    66  
    67  	delete(c.tasks, taskKey)
    68  	defer c.cond.Broadcast()
    69  
    70  	if err != nil {
    71  		return nil, err
    72  	}
    73  	for k, v := range m {
    74  		if _, ok := c.items[k]; ok {
    75  			return nil, fmt.Errorf("task %q produced already-present item %q", taskKey, k)
    76  		}
    77  		c.items[k] = v
    78  	}
    79  	if v, ok := c.items[itemKey]; ok {
    80  		return v, nil
    81  	} else {
    82  		return nil, fmt.Errorf("task %q didn't produce item %q", taskKey, itemKey)
    83  	}
    84  }
    85  
    86  // ready returns true if either itemKey is in the cache or a new
    87  // task identified by taskKey can be launched.
    88  func (c *TaskCache) ready(itemKey, taskKey string) bool {
    89  	if _, ok := c.items[itemKey]; ok {
    90  		return true
    91  	}
    92  	if _, ok := c.tasks[taskKey]; ok {
    93  		return false
    94  	}
    95  	return len(c.tasks) < c.maxTasks
    96  }
    97  
    98  // GetIfExists returns the item with the supplied key only if it's already been computed.
    99  func (c *TaskCache) GetIfExists(itemKey string) (interface{}, bool) {
   100  	c.mu.Lock()
   101  	defer c.mu.Unlock()
   102  	v, ok := c.items[itemKey]
   103  	return v, ok
   104  }