github.com/elfadel/cilium@v1.6.12/pkg/allocator/cache.go (about)

     1  // Copyright 2016-2019 Authors of Cilium
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package allocator
    16  
    17  import (
    18  	"context"
    19  	"sync"
    20  	"time"
    21  
    22  	"github.com/cilium/cilium/pkg/idpool"
    23  	"github.com/cilium/cilium/pkg/kvstore"
    24  	"github.com/cilium/cilium/pkg/lock"
    25  )
    26  
    27  // backendOpTimeout is the time allowed for operations sent to backends in
    28  // response to events such as create/modify/delete.
    29  const backendOpTimeout = 10 * time.Second
    30  
    31  // idMap provides mapping from ID to an AllocatorKey
    32  type idMap map[idpool.ID]AllocatorKey
    33  
    34  // keyMap provides mapping from AllocatorKey to ID
    35  type keyMap map[string]idpool.ID
    36  
    37  type cache struct {
    38  	allocator *Allocator
    39  
    40  	stopChan chan struct{}
    41  
    42  	// mutex protects all cache data structures
    43  	mutex lock.RWMutex
    44  
    45  	// cache is a local cache of all IDs allocated in the kvstore. It is
    46  	// being maintained by watching for kvstore events and can thus lag
    47  	// behind.
    48  	cache idMap
    49  
    50  	// keyCache shadows cache and allows access by key
    51  	keyCache keyMap
    52  
    53  	// nextCache is the cache is constantly being filled by startWatch(),
    54  	// when startWatch has successfully performed the initial fill using
    55  	// ListPrefix, the cache above will be pointed to nextCache. If the
    56  	// startWatch() fails to perform the initial list, then the cache is
    57  	// never pointed to nextCache. This guarantees that a valid cache is
    58  	// kept at all times.
    59  	nextCache idMap
    60  
    61  	// nextKeyCache follows the same logic as nextCache but for keyCache
    62  	nextKeyCache keyMap
    63  
    64  	listDone waitChan
    65  
    66  	// stopWatchWg is a wait group that gets conditions added when a
    67  	// watcher is started with the conditions marked as done when the
    68  	// watcher has exited
    69  	stopWatchWg sync.WaitGroup
    70  }
    71  
    72  func newCache(a *Allocator) cache {
    73  	return cache{
    74  		allocator: a,
    75  		cache:     idMap{},
    76  		keyCache:  keyMap{},
    77  		stopChan:  make(chan struct{}),
    78  	}
    79  }
    80  
    81  type waitChan chan bool
    82  
    83  // CacheMutations are the operations given to a Backend's ListAndWatch command.
    84  // They are called on changes to identities.
    85  type CacheMutations interface {
    86  	// OnListDone is called when the initial full-sync is complete.
    87  	OnListDone()
    88  
    89  	// OnAdd is called when a new key->ID appears.
    90  	OnAdd(id idpool.ID, key AllocatorKey)
    91  
    92  	// OnModify is called when a key->ID mapping is modified. This may happen
    93  	// when leases are updated, and does not mean the actual mapping had changed.
    94  	OnModify(id idpool.ID, key AllocatorKey)
    95  
    96  	// OnDelete is called when a key->ID mapping is removed. This may trigger
    97  	// master-key protection, if enabled, where the local allocator will recreate
    98  	// the key->ID association is recreated because the local node is still using
    99  	// it.
   100  	OnDelete(id idpool.ID, key AllocatorKey)
   101  }
   102  
   103  func (c *cache) sendEvent(typ kvstore.EventType, id idpool.ID, key AllocatorKey) {
   104  	if events := c.allocator.events; events != nil {
   105  		events <- AllocatorEvent{Typ: typ, ID: id, Key: key}
   106  	}
   107  }
   108  
   109  func (c *cache) OnListDone() {
   110  	c.mutex.Lock()
   111  	// nextCache is valid, point the live cache to it
   112  	c.cache = c.nextCache
   113  	c.keyCache = c.nextKeyCache
   114  	c.mutex.Unlock()
   115  
   116  	log.Debug("Initial list of identities received")
   117  
   118  	// report that the list operation has
   119  	// been completed and the allocator is
   120  	// ready to use
   121  	close(c.listDone)
   122  }
   123  
   124  func (c *cache) OnAdd(id idpool.ID, key AllocatorKey) {
   125  	c.mutex.Lock()
   126  	defer c.mutex.Unlock()
   127  
   128  	c.nextCache[id] = key
   129  	if key != nil {
   130  		c.nextKeyCache[c.allocator.encodeKey(key)] = id
   131  	}
   132  	c.allocator.idPool.Remove(id)
   133  
   134  	c.sendEvent(kvstore.EventTypeCreate, id, key)
   135  }
   136  
   137  func (c *cache) OnModify(id idpool.ID, key AllocatorKey) {
   138  	c.mutex.Lock()
   139  	defer c.mutex.Unlock()
   140  
   141  	if k, ok := c.nextCache[id]; ok {
   142  		delete(c.nextKeyCache, c.allocator.encodeKey(k))
   143  	}
   144  
   145  	c.nextCache[id] = key
   146  	if key != nil {
   147  		c.nextKeyCache[c.allocator.encodeKey(key)] = id
   148  	}
   149  
   150  	c.sendEvent(kvstore.EventTypeModify, id, key)
   151  }
   152  
   153  func (c *cache) OnDelete(id idpool.ID, key AllocatorKey) {
   154  	c.mutex.Lock()
   155  	defer c.mutex.Unlock()
   156  
   157  	a := c.allocator
   158  	if a.enableMasterKeyProtection {
   159  		if value := a.localKeys.lookupID(id); value != nil {
   160  			ctx, cancel := context.WithTimeout(context.TODO(), backendOpTimeout)
   161  			defer cancel()
   162  			a.backend.UpdateKey(ctx, id, value, true)
   163  			return
   164  		}
   165  	}
   166  
   167  	if k, ok := c.nextCache[id]; ok && k != nil {
   168  		delete(c.nextKeyCache, c.allocator.encodeKey(k))
   169  	}
   170  
   171  	delete(c.nextCache, id)
   172  	a.idPool.Insert(id)
   173  
   174  	c.sendEvent(kvstore.EventTypeDelete, id, key)
   175  }
   176  
   177  // start requests a LIST operation from the kvstore and starts watching the
   178  // prefix in a go subroutine.
   179  func (c *cache) start() waitChan {
   180  	c.listDone = make(waitChan)
   181  
   182  	c.mutex.Lock()
   183  
   184  	// start with a fresh nextCache
   185  	c.nextCache = idMap{}
   186  	c.nextKeyCache = keyMap{}
   187  	c.mutex.Unlock()
   188  
   189  	c.stopWatchWg.Add(1)
   190  
   191  	go func() {
   192  		c.allocator.backend.ListAndWatch(c, c.stopChan)
   193  		c.stopWatchWg.Done()
   194  	}()
   195  
   196  	return c.listDone
   197  }
   198  
   199  func (c *cache) stop() {
   200  	close(c.stopChan)
   201  	c.stopWatchWg.Wait()
   202  }
   203  
   204  func (c *cache) get(key string) idpool.ID {
   205  	c.mutex.RLock()
   206  	if id, ok := c.keyCache[key]; ok {
   207  		c.mutex.RUnlock()
   208  		return id
   209  	}
   210  	c.mutex.RUnlock()
   211  
   212  	return idpool.NoID
   213  }
   214  
   215  func (c *cache) getByID(id idpool.ID) AllocatorKey {
   216  	c.mutex.RLock()
   217  	if v, ok := c.cache[id]; ok {
   218  		c.mutex.RUnlock()
   219  		return v
   220  	}
   221  	c.mutex.RUnlock()
   222  
   223  	return nil
   224  }
   225  
   226  func (c *cache) foreach(cb RangeFunc) {
   227  	c.mutex.RLock()
   228  	for k, v := range c.cache {
   229  		cb(k, v)
   230  	}
   231  	c.mutex.RUnlock()
   232  }
   233  
   234  func (c *cache) insert(key AllocatorKey, val idpool.ID) {
   235  	c.mutex.Lock()
   236  	c.nextCache[val] = key
   237  	c.nextKeyCache[c.allocator.encodeKey(key)] = val
   238  	c.mutex.Unlock()
   239  }