github.com/xmidt-org/webpa-common@v1.11.9/secure/key/cache.go (about)

     1  package key
     2  
     3  import (
     4  	"github.com/xmidt-org/webpa-common/concurrent"
     5  	"sync"
     6  	"sync/atomic"
     7  	"time"
     8  )
     9  
    10  const (
    11  	// dummyKeyId is used when no actual keyID is necessary.
    12  	dummyKeyId = ""
    13  )
    14  
    15  // Cache is a Resolver type which provides caching for keys based on keyID.
    16  //
    17  // All implementations will block the first time a particular key is accessed
    18  // and will initialize the value for that key.  Thereafter, all updates happen
    19  // in a separate goroutine.  This allows HTTP transactions to avoid paying
    20  // the cost of loading a key after the initial fetch.
    21  type Cache interface {
    22  	Resolver
    23  
    24  	// UpdateKeys updates all keys known to this cache.  This method makes
    25  	// a best-effort to avoid blocking other goroutines which use ResolveKey,
    26  	// which may mean copy-on-write semantics.
    27  	//
    28  	// The first return value is the count of keys for which attempts were
    29  	// made to update.
    30  	//
    31  	// UpdateKeys may run multiple I/O operations.  The second return value is a slice of
    32  	// errors that occurred while it attempted to update each key.  Exactly one (1)
    33  	// attempt will be made to update each key present in the cache, regardless
    34  	// of any update errors for each individual key.  This slice may be nil if no
    35  	// errors occurred.
    36  	UpdateKeys() (int, []error)
    37  }
    38  
    39  // basicCache contains the internal members common to all cache implementations
    40  type basicCache struct {
    41  	delegate   Resolver
    42  	value      atomic.Value
    43  	updateLock sync.Mutex
    44  }
    45  
    46  func (b *basicCache) load() interface{} {
    47  	return b.value.Load()
    48  }
    49  
    50  func (b *basicCache) store(newValue interface{}) {
    51  	b.value.Store(newValue)
    52  }
    53  
    54  // update provides a critical section for an update operation
    55  func (b *basicCache) update(operation func()) {
    56  	b.updateLock.Lock()
    57  	defer b.updateLock.Unlock()
    58  	operation()
    59  }
    60  
    61  // singleCache assumes that the delegate Resolver
    62  // only returns (1) key.
    63  type singleCache struct {
    64  	basicCache
    65  }
    66  
    67  func (cache *singleCache) ResolveKey(keyID string) (pair Pair, err error) {
    68  	var ok bool
    69  	pair, ok = cache.load().(Pair)
    70  	if !ok {
    71  		cache.update(func() {
    72  			pair, ok = cache.load().(Pair)
    73  			if !ok {
    74  				pair, err = cache.delegate.ResolveKey(keyID)
    75  				if err == nil {
    76  					cache.store(pair)
    77  				}
    78  			}
    79  		})
    80  	}
    81  
    82  	return
    83  }
    84  
    85  func (cache *singleCache) UpdateKeys() (count int, errors []error) {
    86  	count = 1
    87  	cache.update(func() {
    88  		// this type of cache is specifically for resolvers which don't use the keyID,
    89  		// so just pass an empty string in
    90  		if pair, err := cache.delegate.ResolveKey(dummyKeyId); err == nil {
    91  			cache.store(pair)
    92  		} else {
    93  			errors = []error{err}
    94  		}
    95  	})
    96  
    97  	return
    98  }
    99  
   100  // multiCache uses an atomic map reference to store keys.
   101  // Once created, each internal map instance will never be written
   102  // to again, thus removing the need to lock for reads.  This approach
   103  // does consume more memory, however.  The updateLock ensures that only
   104  // (1) goroutine will ever be updating the map at anytime.
   105  type multiCache struct {
   106  	basicCache
   107  }
   108  
   109  // fetchPair uses the atomic reference to the keys map and attempts
   110  // to fetch the key from the cache.
   111  func (cache *multiCache) fetchPair(keyID string) (pair Pair, ok bool) {
   112  	pairs, ok := cache.load().(map[string]Pair)
   113  	if ok {
   114  		pair, ok = pairs[keyID]
   115  	}
   116  
   117  	return
   118  }
   119  
   120  // copyPairs creates a copy of the current key cache.  If no keys are present
   121  // yet, this method returns a non-nil empty map.
   122  func (cache *multiCache) copyPairs() map[string]Pair {
   123  	pairs, _ := cache.load().(map[string]Pair)
   124  
   125  	// make the capacity 1 larger, since this method is almost always
   126  	// going to be invoked prior to doing a copy-on-write update.
   127  	newPairs := make(map[string]Pair, len(pairs)+1)
   128  
   129  	for keyID, pair := range pairs {
   130  		newPairs[keyID] = pair
   131  	}
   132  
   133  	return newPairs
   134  }
   135  
   136  func (cache *multiCache) ResolveKey(keyID string) (pair Pair, err error) {
   137  	var ok bool
   138  	pair, ok = cache.fetchPair(keyID)
   139  	if !ok {
   140  		cache.update(func() {
   141  			pair, ok = cache.fetchPair(keyID)
   142  			if !ok {
   143  				pair, err = cache.delegate.ResolveKey(keyID)
   144  				if err == nil {
   145  					newPairs := cache.copyPairs()
   146  					newPairs[keyID] = pair
   147  					cache.store(newPairs)
   148  				}
   149  			}
   150  		})
   151  	}
   152  
   153  	return
   154  }
   155  
   156  func (cache *multiCache) UpdateKeys() (count int, errors []error) {
   157  	if existingPairs, ok := cache.load().(map[string]Pair); ok {
   158  		count = len(existingPairs)
   159  		cache.update(func() {
   160  			newCount := 0
   161  			newPairs := make(map[string]Pair, len(existingPairs))
   162  			for keyID, oldPair := range existingPairs {
   163  				if newPair, err := cache.delegate.ResolveKey(keyID); err == nil {
   164  					newCount++
   165  					newPairs[keyID] = newPair
   166  				} else {
   167  					// keep the old key in the event of an error
   168  					newPairs[keyID] = oldPair
   169  					errors = append(errors, err)
   170  				}
   171  			}
   172  
   173  			// small optimization: don't bother doing the atomic swap
   174  			// if every key operation failed
   175  			if newCount > 0 {
   176  				cache.store(newPairs)
   177  			}
   178  		})
   179  	}
   180  
   181  	return
   182  }
   183  
   184  // NewUpdater conditionally creates a Runnable which will update the keys in
   185  // the given resolver on the configured updateInterval.  If both (1) the
   186  // updateInterval is positive, and (2) resolver implements Cache, then this
   187  // method returns a non-nil function that will spawn a goroutine to update
   188  // the cache in the background.  Otherwise, this method returns nil.
   189  func NewUpdater(updateInterval time.Duration, resolver Resolver) (updater concurrent.Runnable) {
   190  	if updateInterval < 1 {
   191  		return
   192  	}
   193  
   194  	if keyCache, ok := resolver.(Cache); ok {
   195  		updater = concurrent.RunnableFunc(func(waitGroup *sync.WaitGroup, shutdown <-chan struct{}) error {
   196  			waitGroup.Add(1)
   197  
   198  			go func() {
   199  				defer waitGroup.Done()
   200  
   201  				ticker := time.NewTicker(updateInterval)
   202  				defer ticker.Stop()
   203  
   204  				for {
   205  					select {
   206  					case <-shutdown:
   207  						return
   208  					case <-ticker.C:
   209  						keyCache.UpdateKeys()
   210  					}
   211  				}
   212  			}()
   213  
   214  			return nil
   215  		})
   216  	}
   217  
   218  	return
   219  }