github.com/cilium/cilium@v1.16.2/pkg/kvstore/allocator/allocator.go (about)

     1  // SPDX-License-Identifier: Apache-2.0
     2  // Copyright Authors of Cilium
     3  
     4  package allocator
     5  
     6  import (
     7  	"context"
     8  	"fmt"
     9  	"path"
    10  	"strconv"
    11  	"strings"
    12  
    13  	"github.com/sirupsen/logrus"
    14  
    15  	"github.com/cilium/cilium/pkg/allocator"
    16  	"github.com/cilium/cilium/pkg/idpool"
    17  	"github.com/cilium/cilium/pkg/kvstore"
    18  	"github.com/cilium/cilium/pkg/logging"
    19  	"github.com/cilium/cilium/pkg/logging/logfields"
    20  	"github.com/cilium/cilium/pkg/rate"
    21  )
    22  
    23  var (
    24  	log = logging.DefaultLogger.WithField(logfields.LogSubsys, "kvstorebackend")
    25  )
    26  
    27  // kvstoreBackend is an implementaton of pkg/allocator.Backend. It store
    28  // identities in the following format:
    29  //
    30  // Slave keys:
    31  //
    32  // Slave keys are owned by individual nodes:
    33  //   - basePath/value/key1/node1 => 1001
    34  //   - basePath/value/key1/node2 => 1001
    35  //   - basePath/value/key2/node1 => 1002
    36  //   - basePath/value/key2/node2 => 1002
    37  //
    38  // If at least one key exists with the prefix basePath/value/keyN then that
    39  // key must be considered to be in use in the allocation space.
    40  //
    41  // Slave keys are protected by a lease and will automatically get removed
    42  // after ~ option.Config.KVstoreLeaseTTL if the node does not renew in time.
    43  //
    44  // Master key:
    45  //   - basePath/id/1001 => key1
    46  //   - basePath/id/1002 => key2
    47  //
    48  // Master keys provide the mapping from ID to key. As long as a master key
    49  // for an ID exists, the ID is still in use. However, if a master key is no
    50  // longer backed by at least one slave key, the garbage collector will
    51  // eventually release the master key and return it back to the pool.
    52  type kvstoreBackend struct {
    53  	// basePrefix is the prefix in the kvstore that all keys share which
    54  	// are being managed by this allocator. The basePrefix typically
    55  	// consists of something like: "space/project/allocatorName"
    56  	basePrefix string
    57  
    58  	// idPrefix is the kvstore key prefix for all master keys. It is being
    59  	// derived from the basePrefix.
    60  	idPrefix string
    61  
    62  	// valuePrefix is the kvstore key prefix for all slave keys. It is
    63  	// being derived from the basePrefix.
    64  	valuePrefix string
    65  
    66  	// lockPrefix is the prefix to use for all kvstore locks. This prefix
    67  	// is different from the idPrefix and valuePrefix to simplify watching
    68  	// for ID and key changes.
    69  	lockPrefix string
    70  
    71  	// suffix is the suffix attached to keys which must be node specific,
    72  	// this is typical set to the node's IP address
    73  	suffix string
    74  
    75  	backend kvstore.BackendOperations
    76  
    77  	keyType allocator.AllocatorKey
    78  }
    79  
    80  func prefixMatchesKey(prefix, key string) bool {
    81  	// cilium/state/identities/v1/value/label;foo;bar;/172.0.124.60
    82  	lastSlash := strings.LastIndex(key, "/")
    83  	return len(prefix) == lastSlash
    84  }
    85  
    86  // NewKVStoreBackend creates a pkg/allocator.Backend compatible instance. The
    87  // specific kvstore used is configured in pkg/kvstore.
    88  func NewKVStoreBackend(basePath, suffix string, typ allocator.AllocatorKey, backend kvstore.BackendOperations) (*kvstoreBackend, error) {
    89  	if backend == nil {
    90  		return nil, fmt.Errorf("kvstore client not configured")
    91  	}
    92  
    93  	return &kvstoreBackend{
    94  		basePrefix:  basePath,
    95  		idPrefix:    path.Join(basePath, "id"),
    96  		valuePrefix: path.Join(basePath, "value"),
    97  		lockPrefix:  path.Join(basePath, "locks"),
    98  		suffix:      suffix,
    99  		keyType:     typ,
   100  		backend:     backend,
   101  	}, nil
   102  }
   103  
   104  // lockPath locks a key in the scope of an allocator
   105  func (k *kvstoreBackend) lockPath(ctx context.Context, key string) (*kvstore.Lock, error) {
   106  	suffix := strings.TrimPrefix(key, k.basePrefix)
   107  	return kvstore.LockPath(ctx, k.backend, path.Join(k.lockPrefix, suffix))
   108  }
   109  
   110  // DeleteAllKeys will delete all keys
   111  func (k *kvstoreBackend) DeleteAllKeys(ctx context.Context) {
   112  	k.backend.DeletePrefix(ctx, k.basePrefix)
   113  }
   114  
   115  // AllocateID allocates a key->ID mapping in the kvstore.
   116  func (k *kvstoreBackend) AllocateID(ctx context.Context, id idpool.ID, key allocator.AllocatorKey) (allocator.AllocatorKey, error) {
   117  	// create /id/<ID> and fail if it already exists
   118  	keyPath := path.Join(k.idPrefix, id.String())
   119  	keyEncoded := []byte(k.backend.Encode([]byte(key.GetKey())))
   120  	success, err := k.backend.CreateOnly(ctx, keyPath, keyEncoded, false)
   121  	if err != nil || !success {
   122  		return nil, fmt.Errorf("unable to create master key '%s': %w", keyPath, err)
   123  	}
   124  
   125  	return key, nil
   126  }
   127  
   128  // AllocateID allocates a key->ID mapping in the kvstore.
   129  func (k *kvstoreBackend) AllocateIDIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) (allocator.AllocatorKey, error) {
   130  	// create /id/<ID> and fail if it already exists
   131  	keyPath := path.Join(k.idPrefix, id.String())
   132  	keyEncoded := []byte(k.backend.Encode([]byte(key.GetKey())))
   133  	success, err := k.backend.CreateOnlyIfLocked(ctx, keyPath, keyEncoded, false, lock)
   134  	if err != nil || !success {
   135  		return nil, fmt.Errorf("unable to create master key '%s': %w", keyPath, err)
   136  	}
   137  
   138  	return key, nil
   139  }
   140  
   141  // AcquireReference marks that this node is using this key->ID mapping in the kvstore.
   142  func (k *kvstoreBackend) AcquireReference(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) error {
   143  	keyString := k.backend.Encode([]byte(key.GetKey()))
   144  	if err := k.createValueNodeKey(ctx, keyString, id, lock); err != nil {
   145  		return fmt.Errorf("unable to create slave key '%s': %w", keyString, err)
   146  	}
   147  	return nil
   148  }
   149  
   150  // createValueKey records that this "node" is using this key->ID
   151  func (k *kvstoreBackend) createValueNodeKey(ctx context.Context, key string, newID idpool.ID, lock kvstore.KVLocker) error {
   152  	// add a new key /value/<key>/<node> to account for the reference
   153  	// The key is protected with a TTL/lease and will expire after LeaseTTL
   154  	valueKey := path.Join(k.valuePrefix, key, k.suffix)
   155  	if _, err := k.backend.UpdateIfDifferentIfLocked(ctx, valueKey, []byte(newID.String()), true, lock); err != nil {
   156  		return fmt.Errorf("unable to create value-node key '%s': %w", valueKey, err)
   157  	}
   158  
   159  	return nil
   160  }
   161  
   162  // Lock locks a key in the scope of an allocator
   163  func (k *kvstoreBackend) lock(ctx context.Context, key string) (*kvstore.Lock, error) {
   164  	suffix := strings.TrimPrefix(key, k.basePrefix)
   165  	return kvstore.LockPath(ctx, k.backend, path.Join(k.lockPrefix, suffix))
   166  }
   167  
   168  // Lock locks a key in the scope of an allocator
   169  func (k *kvstoreBackend) Lock(ctx context.Context, key allocator.AllocatorKey) (kvstore.KVLocker, error) {
   170  	return k.lock(ctx, k.backend.Encode([]byte(key.GetKey())))
   171  }
   172  
   173  // Get returns the ID which is allocated to a key in the kvstore
   174  func (k *kvstoreBackend) Get(ctx context.Context, key allocator.AllocatorKey) (idpool.ID, error) {
   175  	// ListPrefix() will return all keys matching the prefix, the prefix
   176  	// can cover multiple different keys, example:
   177  	//
   178  	// key1 := label1;label2;
   179  	// key2 := label1;label2;label3;
   180  	//
   181  	// In order to retrieve the correct key, the position of the last '/'
   182  	// is significant, e.g.
   183  	//
   184  	// prefix := cilium/state/identities/v1/value/label;foo;
   185  	//
   186  	// key1 := cilium/state/identities/v1/value/label;foo;/172.0.124.60
   187  	// key2 := cilium/state/identities/v1/value/label;foo;bar;/172.0.124.60
   188  	//
   189  	// Only key1 should match
   190  	prefix := path.Join(k.valuePrefix, k.backend.Encode([]byte(key.GetKey())))
   191  	pairs, err := k.backend.ListPrefix(ctx, prefix)
   192  	kvstore.Trace("ListPrefix", err, logrus.Fields{logfields.Prefix: prefix, logfields.Entries: len(pairs)})
   193  	if err != nil {
   194  		return 0, err
   195  	}
   196  
   197  	for k, v := range pairs {
   198  		if prefixMatchesKey(prefix, k) {
   199  			id, err := strconv.ParseUint(string(v.Data), 10, 64)
   200  			if err == nil {
   201  				return idpool.ID(id), nil
   202  			}
   203  		}
   204  	}
   205  
   206  	return idpool.NoID, nil
   207  }
   208  
   209  // GetIfLocked returns the ID which is allocated to a key in the kvstore
   210  // if the client is still holding the given lock.
   211  func (k *kvstoreBackend) GetIfLocked(ctx context.Context, key allocator.AllocatorKey, lock kvstore.KVLocker) (idpool.ID, error) {
   212  	// ListPrefixIfLocked() will return all keys matching the prefix, the prefix
   213  	// can cover multiple different keys, example:
   214  	//
   215  	// key1 := label1;label2;
   216  	// key2 := label1;label2;label3;
   217  	//
   218  	// In order to retrieve the correct key, the position of the last '/'
   219  	// is significant, e.g.
   220  	//
   221  	// prefix := cilium/state/identities/v1/value/label;foo;
   222  	//
   223  	// key1 := cilium/state/identities/v1/value/label;foo;/172.0.124.60
   224  	// key2 := cilium/state/identities/v1/value/label;foo;bar;/172.0.124.60
   225  	//
   226  	// Only key1 should match
   227  	prefix := path.Join(k.valuePrefix, k.backend.Encode([]byte(key.GetKey())))
   228  	pairs, err := k.backend.ListPrefixIfLocked(ctx, prefix, lock)
   229  	kvstore.Trace("ListPrefixLocked", err, logrus.Fields{logfields.Prefix: prefix, logfields.Entries: len(pairs)})
   230  	if err != nil {
   231  		return 0, err
   232  	}
   233  
   234  	for k, v := range pairs {
   235  		if prefixMatchesKey(prefix, k) {
   236  			id, err := strconv.ParseUint(string(v.Data), 10, 64)
   237  			if err == nil {
   238  				return idpool.ID(id), nil
   239  			}
   240  		}
   241  	}
   242  
   243  	return idpool.NoID, nil
   244  }
   245  
   246  // GetByID returns the key associated with an ID. Returns nil if no key is
   247  // associated with the ID.
   248  func (k *kvstoreBackend) GetByID(ctx context.Context, id idpool.ID) (allocator.AllocatorKey, error) {
   249  	v, err := k.backend.Get(ctx, path.Join(k.idPrefix, id.String()))
   250  	if err != nil {
   251  		return nil, err
   252  	}
   253  
   254  	if v == nil {
   255  		return nil, nil
   256  	}
   257  
   258  	s, err := k.backend.Decode(string(v))
   259  	if err != nil {
   260  		return nil, err
   261  	}
   262  
   263  	return k.keyType.PutKey(string(s)), nil
   264  }
   265  
   266  // UpdateKey refreshes the record that this node is using this key -> id
   267  // mapping. When reliablyMissing is set it will also recreate missing master or
   268  // slave keys.
   269  func (k *kvstoreBackend) UpdateKey(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, reliablyMissing bool) error {
   270  	var (
   271  		err        error
   272  		recreated  bool
   273  		keyPath    = path.Join(k.idPrefix, id.String())
   274  		keyEncoded = []byte(k.backend.Encode([]byte(key.GetKey())))
   275  		valueKey   = path.Join(k.valuePrefix, k.backend.Encode([]byte(key.GetKey())), k.suffix)
   276  	)
   277  
   278  	// Use of CreateOnly() ensures that any existing potentially
   279  	// conflicting key is never overwritten.
   280  	success, err := k.backend.CreateOnly(ctx, keyPath, keyEncoded, false)
   281  	switch {
   282  	case err != nil:
   283  		return fmt.Errorf("Unable to re-create missing master key \"%s\" -> \"%s\": %w", logfields.Key, valueKey, err)
   284  	case success:
   285  		log.WithField(logfields.Key, keyPath).Warning("Re-created missing master key")
   286  	}
   287  
   288  	// Also re-create the slave key in case it has been deleted. This will
   289  	// ensure that the next garbage collection cycle of any participating
   290  	// node does not remove the master key again.
   291  	if reliablyMissing {
   292  		recreated, err = k.backend.CreateOnly(ctx, valueKey, []byte(id.String()), true)
   293  	} else {
   294  		recreated, err = k.backend.UpdateIfDifferent(ctx, valueKey, []byte(id.String()), true)
   295  	}
   296  	switch {
   297  	case err != nil:
   298  		return fmt.Errorf("Unable to re-create missing slave key \"%s\" -> \"%s\": %w", logfields.Key, valueKey, err)
   299  	case recreated:
   300  		log.WithField(logfields.Key, valueKey).Warning("Re-created missing slave key")
   301  	}
   302  
   303  	return nil
   304  }
   305  
   306  // UpdateKeyIfLocked refreshes the record that this node is using this key -> id
   307  // mapping. When reliablyMissing is set it will also recreate missing master or
   308  // slave keys.
   309  func (k *kvstoreBackend) UpdateKeyIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, reliablyMissing bool, lock kvstore.KVLocker) error {
   310  	var (
   311  		err        error
   312  		recreated  bool
   313  		keyPath    = path.Join(k.idPrefix, id.String())
   314  		keyEncoded = []byte(k.backend.Encode([]byte(key.GetKey())))
   315  		valueKey   = path.Join(k.valuePrefix, k.backend.Encode([]byte(key.GetKey())), k.suffix)
   316  	)
   317  
   318  	// Use of CreateOnly() ensures that any existing potentially
   319  	// conflicting key is never overwritten.
   320  	success, err := k.backend.CreateOnlyIfLocked(ctx, keyPath, keyEncoded, false, lock)
   321  	switch {
   322  	case err != nil:
   323  		return fmt.Errorf("Unable to re-create missing master key \"%s\" -> \"%s\": %w", logfields.Key, valueKey, err)
   324  	case success:
   325  		log.WithField(logfields.Key, keyPath).Warning("Re-created missing master key")
   326  	}
   327  
   328  	// Also re-create the slave key in case it has been deleted. This will
   329  	// ensure that the next garbage collection cycle of any participating
   330  	// node does not remove the master key again.
   331  	// lock is ignored since the key doesn't exist.
   332  	if reliablyMissing {
   333  		recreated, err = k.backend.CreateOnly(ctx, valueKey, []byte(id.String()), true)
   334  	} else {
   335  		recreated, err = k.backend.UpdateIfDifferentIfLocked(ctx, valueKey, []byte(id.String()), true, lock)
   336  	}
   337  	switch {
   338  	case err != nil:
   339  		return fmt.Errorf("Unable to re-create missing slave key \"%s\" -> \"%s\": %w", logfields.Key, valueKey, err)
   340  	case recreated:
   341  		log.WithField(logfields.Key, valueKey).Warning("Re-created missing slave key")
   342  	}
   343  
   344  	return nil
   345  }
   346  
   347  // Release releases the use of an ID associated with the provided key.  It does
   348  // not guard against concurrent releases. This is currently guarded by
   349  // Allocator.slaveKeysMutex when called from pkg/allocator.Allocator.Release.
   350  func (k *kvstoreBackend) Release(ctx context.Context, _ idpool.ID, key allocator.AllocatorKey) (err error) {
   351  	valueKey := path.Join(k.valuePrefix, k.backend.Encode([]byte(key.GetKey())), k.suffix)
   352  	log.WithField(logfields.Key, key).Info("Released last local use of key, invoking global release")
   353  
   354  	// does not need to be deleted with a lock as its protected by the
   355  	// Allocator.slaveKeysMutex
   356  	if err := k.backend.Delete(ctx, valueKey); err != nil {
   357  		log.WithError(err).WithFields(logrus.Fields{logfields.Key: key}).Warning("Ignoring node specific ID")
   358  		return err
   359  	}
   360  
   361  	// if k.lockless {
   362  	// FIXME: etcd 3.3 will make it possible to do a lockless
   363  	// cleanup of the ID and release it right away. For now we rely
   364  	// on the GC to kick in a release unused IDs.
   365  	// }
   366  
   367  	return nil
   368  }
   369  
   370  // RunLocksGC scans the kvstore for unused locks and removes them. Returns
   371  // a map of locks that are currently being held, including the ones that have
   372  // failed to be GCed.
   373  func (k *kvstoreBackend) RunLocksGC(ctx context.Context, staleKeysPrevRound map[string]kvstore.Value) (map[string]kvstore.Value, error) {
   374  	// fetch list of all /../locks keys
   375  	allocated, err := k.backend.ListPrefix(ctx, k.lockPrefix)
   376  	if err != nil {
   377  		return nil, fmt.Errorf("list failed: %w", err)
   378  	}
   379  
   380  	staleKeys := map[string]kvstore.Value{}
   381  
   382  	// iterate over /../locks
   383  	for key, v := range allocated {
   384  		scopedLog := log.WithFields(logrus.Fields{
   385  			logfields.Key:     key,
   386  			logfields.LeaseID: strconv.FormatUint(uint64(v.LeaseID), 16),
   387  		})
   388  		// Only delete if this key was previously marked as to be deleted
   389  		if modRev, ok := staleKeysPrevRound[key]; ok &&
   390  			// comparing ModRevision ensures the same client is still holding
   391  			// this lock since the last GC was called.
   392  			modRev.ModRevision == v.ModRevision &&
   393  			modRev.LeaseID == v.LeaseID &&
   394  			modRev.SessionID == v.SessionID {
   395  			if err := k.backend.Delete(ctx, key); err == nil {
   396  				scopedLog.Warning("Forcefully removed distributed lock due to client staleness." +
   397  					" Please check the connectivity between the KVStore and the client with that lease ID.")
   398  				continue
   399  			}
   400  			scopedLog.WithError(err).
   401  				Warning("Unable to remove distributed lock due to client staleness." +
   402  					" Please check the connectivity between the KVStore and the client with that lease ID.")
   403  		}
   404  		// If the key was not found mark it to be delete in the next RunGC
   405  		staleKeys[key] = kvstore.Value{
   406  			ModRevision: v.ModRevision,
   407  			LeaseID:     v.LeaseID,
   408  			SessionID:   v.SessionID,
   409  		}
   410  	}
   411  
   412  	return staleKeys, nil
   413  }
   414  
   415  // RunGC scans the kvstore for unused master keys and removes them
   416  func (k *kvstoreBackend) RunGC(
   417  	ctx context.Context,
   418  	rateLimit *rate.Limiter,
   419  	staleKeysPrevRound map[string]uint64,
   420  	minID, maxID idpool.ID,
   421  ) (map[string]uint64, *allocator.GCStats, error) {
   422  
   423  	// fetch list of all /id/ keys
   424  	allocated, err := k.backend.ListPrefix(ctx, k.idPrefix)
   425  	if err != nil {
   426  		return nil, nil, fmt.Errorf("list failed: %w", err)
   427  	}
   428  
   429  	totalEntries := len(allocated)
   430  	deletedEntries := 0
   431  
   432  	staleKeys := map[string]uint64{}
   433  
   434  	min := uint64(minID)
   435  	max := uint64(maxID)
   436  	reasonOutOfRange := "out of local cluster identity range [" + strconv.FormatUint(min, 10) + "," + strconv.FormatUint(max, 10) + "]"
   437  
   438  	// iterate over /id/
   439  	for key, v := range allocated {
   440  		// if k.lockless {
   441  		// FIXME: Add DeleteOnZeroCount support
   442  		// }
   443  
   444  		// Parse identity ID
   445  		items := strings.Split(key, "/")
   446  		if len(items) == 0 {
   447  			log.WithField(logfields.Key, key).WithError(err).Warning("Unknown identity key found, skipping")
   448  			continue
   449  		}
   450  
   451  		if identityID, err := strconv.ParseUint(items[len(items)-1], 10, 64); err != nil {
   452  			log.WithField(logfields.Key, key).WithError(err).Warning("Parse identity failed, skipping")
   453  			continue
   454  		} else {
   455  			// We should not GC those identities that are out of our scope
   456  			if identityID < min || identityID > max {
   457  				log.WithFields(logrus.Fields{
   458  					logfields.Key:    key,
   459  					logfields.Reason: reasonOutOfRange,
   460  				}).Debug("Skipping this key")
   461  				continue
   462  			}
   463  		}
   464  
   465  		lock, err := k.lockPath(ctx, key)
   466  		if err != nil {
   467  			log.WithError(err).WithField(logfields.Key, key).Warning("allocator garbage collector was unable to lock key")
   468  			continue
   469  		}
   470  
   471  		// fetch list of all /value/<key> keys
   472  		valueKeyPrefix := path.Join(k.valuePrefix, string(v.Data))
   473  		pairs, err := k.backend.ListPrefixIfLocked(ctx, valueKeyPrefix, lock)
   474  		if err != nil {
   475  			log.WithError(err).WithField(logfields.Prefix, valueKeyPrefix).Warning("allocator garbage collector was unable to list keys")
   476  			lock.Unlock(context.Background())
   477  			continue
   478  		}
   479  
   480  		hasUsers := false
   481  		for prefix := range pairs {
   482  			if prefixMatchesKey(valueKeyPrefix, prefix) {
   483  				hasUsers = true
   484  				break
   485  			}
   486  		}
   487  
   488  		var deleted bool
   489  		// if ID has no user, delete it
   490  		if !hasUsers {
   491  			scopedLog := log.WithFields(logrus.Fields{
   492  				logfields.Key:      key,
   493  				logfields.Identity: path.Base(key),
   494  			})
   495  			// Only delete if this key was previously marked as to be deleted
   496  			if modRev, ok := staleKeysPrevRound[key]; ok {
   497  				// if the v.ModRevision is different than the modRev (which is
   498  				// the last seen v.ModRevision) then this key was re-used in
   499  				// between GC calls. We should not mark it as stale keys yet,
   500  				// but the next GC call will do it.
   501  				if modRev == v.ModRevision {
   502  					if err := k.backend.DeleteIfLocked(ctx, key, lock); err != nil {
   503  						scopedLog.WithError(err).Warning("Unable to delete unused allocator master key")
   504  					} else {
   505  						deletedEntries++
   506  						scopedLog.Info("Deleted unused allocator master key")
   507  					}
   508  					// consider the key regardless if there was an error from
   509  					// the kvstore. We want to rate limit the number of requests
   510  					// done to the KVStore.
   511  					deleted = true
   512  				}
   513  			} else {
   514  				// If the key was not found mark it to be delete in the next RunGC
   515  				staleKeys[key] = v.ModRevision
   516  			}
   517  		}
   518  
   519  		lock.Unlock(context.Background())
   520  		if deleted {
   521  			// Wait after deleted the key. This is not ideal because we have
   522  			// done the operation that should be rate limited before checking the
   523  			// rate limit. We have to do this here to avoid holding the global lock
   524  			// for a long period of time.
   525  			err = rateLimit.Wait(ctx)
   526  			if err != nil {
   527  				return nil, nil, err
   528  			}
   529  		}
   530  	}
   531  
   532  	gcStats := &allocator.GCStats{
   533  		Alive:   totalEntries - deletedEntries,
   534  		Deleted: deletedEntries,
   535  	}
   536  	return staleKeys, gcStats, nil
   537  }
   538  
   539  func (k *kvstoreBackend) keyToID(key string) (id idpool.ID, err error) {
   540  	if !strings.HasPrefix(key, k.idPrefix) {
   541  		return idpool.NoID, fmt.Errorf("Found invalid key \"%s\" outside of prefix \"%s\"", key, k.idPrefix)
   542  	}
   543  
   544  	suffix := strings.TrimPrefix(key, k.idPrefix)
   545  	if suffix[0] == '/' {
   546  		suffix = suffix[1:]
   547  	}
   548  
   549  	idParsed, err := strconv.ParseUint(suffix, 10, 64)
   550  	if err != nil {
   551  		return idpool.NoID, fmt.Errorf("Cannot parse key suffix \"%s\"", suffix)
   552  	}
   553  
   554  	return idpool.ID(idParsed), nil
   555  }
   556  
   557  func (k *kvstoreBackend) ListAndWatch(ctx context.Context, handler allocator.CacheMutations, stopChan chan struct{}) {
   558  	ctx, cancel := context.WithCancel(ctx)
   559  	watcher := k.backend.ListAndWatch(ctx, k.idPrefix, 512)
   560  
   561  	for {
   562  		select {
   563  		case event, ok := <-watcher.Events:
   564  			if !ok {
   565  				goto abort
   566  			}
   567  			if event.Typ == kvstore.EventTypeListDone {
   568  				handler.OnListDone()
   569  				continue
   570  			}
   571  
   572  			id, err := k.keyToID(event.Key)
   573  			switch {
   574  			case err != nil:
   575  				log.WithError(err).WithField(logfields.Key, event.Key).Warning("Invalid key")
   576  
   577  			case id != idpool.NoID:
   578  				var key allocator.AllocatorKey
   579  
   580  				if len(event.Value) > 0 {
   581  					s, err := k.backend.Decode(string(event.Value))
   582  					if err != nil {
   583  						log.WithError(err).WithFields(logrus.Fields{
   584  							logfields.Key:   event.Key,
   585  							logfields.Value: event.Value,
   586  						}).Warning("Unable to decode key value")
   587  						continue
   588  					}
   589  
   590  					key = k.keyType.PutKey(string(s))
   591  				} else {
   592  					if event.Typ != kvstore.EventTypeDelete {
   593  						log.WithFields(logrus.Fields{
   594  							logfields.Key:       event.Key,
   595  							logfields.EventType: event.Typ,
   596  						}).Error("Received a key with an empty value")
   597  						continue
   598  					}
   599  				}
   600  
   601  				switch event.Typ {
   602  				case kvstore.EventTypeCreate, kvstore.EventTypeModify:
   603  					handler.OnUpsert(id, key)
   604  
   605  				case kvstore.EventTypeDelete:
   606  					handler.OnDelete(id, key)
   607  				}
   608  			}
   609  
   610  		case <-stopChan:
   611  			goto abort
   612  		}
   613  	}
   614  
   615  abort:
   616  	cancel()
   617  	watcher.Stop()
   618  }
   619  
   620  func (k *kvstoreBackend) Status() (string, error) {
   621  	return k.backend.Status()
   622  }
   623  
   624  func (k *kvstoreBackend) Encode(v string) string {
   625  	return k.backend.Encode([]byte(v))
   626  }