github.com/datadog/cilium@v1.6.12/daemon/policy.go (about)

     1  // Copyright 2016-2019 Authors of Cilium
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package main
    16  
    17  import (
    18  	"bytes"
    19  	"context"
    20  	"encoding/json"
    21  	"fmt"
    22  	"net"
    23  	"strings"
    24  	"sync"
    25  	"time"
    26  
    27  	"github.com/cilium/cilium/api/v1/models"
    28  	. "github.com/cilium/cilium/api/v1/server/restapi/policy"
    29  	"github.com/cilium/cilium/pkg/api"
    30  	"github.com/cilium/cilium/pkg/endpoint"
    31  	"github.com/cilium/cilium/pkg/endpoint/regeneration"
    32  	"github.com/cilium/cilium/pkg/endpointmanager"
    33  	"github.com/cilium/cilium/pkg/eventqueue"
    34  	"github.com/cilium/cilium/pkg/identity/cache"
    35  	"github.com/cilium/cilium/pkg/ipcache"
    36  	"github.com/cilium/cilium/pkg/labels"
    37  	"github.com/cilium/cilium/pkg/logging/logfields"
    38  	bpfIPCache "github.com/cilium/cilium/pkg/maps/ipcache"
    39  	"github.com/cilium/cilium/pkg/metrics"
    40  	monitorAPI "github.com/cilium/cilium/pkg/monitor/api"
    41  	"github.com/cilium/cilium/pkg/option"
    42  	"github.com/cilium/cilium/pkg/policy"
    43  	policyAPI "github.com/cilium/cilium/pkg/policy/api"
    44  	"github.com/cilium/cilium/pkg/safetime"
    45  	"github.com/cilium/cilium/pkg/uuid"
    46  
    47  	"github.com/go-openapi/runtime/middleware"
    48  	"github.com/op/go-logging"
    49  )
    50  
    51  type policyTriggerMetrics struct{}
    52  
    53  func (p *policyTriggerMetrics) QueueEvent(reason string) {
    54  	if option.Config.MetricsConfig.TriggerPolicyUpdateTotal {
    55  		metrics.TriggerPolicyUpdateTotal.WithLabelValues(reason).Inc()
    56  	}
    57  }
    58  
    59  func (p *policyTriggerMetrics) PostRun(duration, latency time.Duration, folds int) {
    60  	if option.Config.MetricsConfig.TriggerPolicyUpdateCallDuration {
    61  		metrics.TriggerPolicyUpdateCallDuration.WithLabelValues("duration").Observe(duration.Seconds())
    62  		metrics.TriggerPolicyUpdateCallDuration.WithLabelValues("latency").Observe(latency.Seconds())
    63  	}
    64  	if option.Config.MetricsConfig.TriggerPolicyUpdateFolds {
    65  		metrics.TriggerPolicyUpdateFolds.Set(float64(folds))
    66  	}
    67  }
    68  
    69  func (d *Daemon) policyUpdateTrigger(reasons []string) {
    70  	log.Debugf("Regenerating all endpoints")
    71  	reason := strings.Join(reasons, ", ")
    72  
    73  	regenerationMetadata := &regeneration.ExternalRegenerationMetadata{Reason: reason}
    74  	endpointmanager.RegenerateAllEndpoints(regenerationMetadata)
    75  }
    76  
    77  // TriggerPolicyUpdates triggers policy updates for every daemon's endpoint.
    78  // This may be called in a variety of situations: after policy changes, changes
    79  // in agent configuration, changes in endpoint labels, and change of security
    80  // identities.
    81  func (d *Daemon) TriggerPolicyUpdates(force bool, reason string) {
    82  	if force {
    83  		log.Debugf("Artificially increasing policy revision to enforce policy recalculation")
    84  		d.policy.BumpRevision()
    85  	}
    86  
    87  	d.policyTrigger.TriggerWithReason(reason)
    88  }
    89  
    90  // UpdateIdentities informs the policy package of all identity changes
    91  // and also triggers policy updates.
    92  //
    93  // The caller is responsible for making sure the same identity is not
    94  // present in both 'added' and 'deleted'.
    95  func (d *Daemon) UpdateIdentities(added, deleted cache.IdentityCache) {
    96  	d.policy.GetSelectorCache().UpdateIdentities(added, deleted)
    97  	d.TriggerPolicyUpdates(false, "one or more identities created or deleted")
    98  }
    99  
   100  type getPolicyResolve struct {
   101  	daemon *Daemon
   102  }
   103  
   104  func NewGetPolicyResolveHandler(d *Daemon) GetPolicyResolveHandler {
   105  	return &getPolicyResolve{daemon: d}
   106  }
   107  
   108  func (h *getPolicyResolve) Handle(params GetPolicyResolveParams) middleware.Responder {
   109  	log.WithField(logfields.Params, logfields.Repr(params)).Debug("GET /policy/resolve request")
   110  
   111  	d := h.daemon
   112  
   113  	var policyEnforcementMsg string
   114  	isPolicyEnforcementEnabled := true
   115  
   116  	d.policy.Mutex.RLock()
   117  
   118  	// If policy enforcement isn't enabled, then traffic is allowed.
   119  	if policy.GetPolicyEnabled() == option.NeverEnforce {
   120  		policyEnforcementMsg = "Policy enforcement is disabled for the daemon."
   121  		isPolicyEnforcementEnabled = false
   122  	} else if policy.GetPolicyEnabled() == option.DefaultEnforcement {
   123  		// If there are no rules matching the set of from / to labels provided in
   124  		// the API request, that means that policy enforcement is not enabled
   125  		// for the endpoints corresponding to said sets of labels; thus, we allow
   126  		// traffic between these sets of labels, and do not enforce policy between them.
   127  		fromIngress, fromEgress := d.policy.GetRulesMatching(labels.NewSelectLabelArrayFromModel(params.TraceSelector.From.Labels))
   128  		toIngress, toEgress := d.policy.GetRulesMatching(labels.NewSelectLabelArrayFromModel(params.TraceSelector.To.Labels))
   129  		if !fromIngress && !fromEgress && !toIngress && !toEgress {
   130  			policyEnforcementMsg = "Policy enforcement is disabled because " +
   131  				"no rules in the policy repository match any endpoint selector " +
   132  				"from the provided destination sets of labels."
   133  			isPolicyEnforcementEnabled = false
   134  		}
   135  	}
   136  
   137  	d.policy.Mutex.RUnlock()
   138  
   139  	// Return allowed verdict if policy enforcement isn't enabled between the two sets of labels.
   140  	if !isPolicyEnforcementEnabled {
   141  		buffer := new(bytes.Buffer)
   142  		ctx := params.TraceSelector
   143  		searchCtx := policy.SearchContext{
   144  			From:    labels.NewSelectLabelArrayFromModel(ctx.From.Labels),
   145  			Trace:   policy.TRACE_ENABLED,
   146  			To:      labels.NewSelectLabelArrayFromModel(ctx.To.Labels),
   147  			DPorts:  ctx.To.Dports,
   148  			Logging: logging.NewLogBackend(buffer, "", 0),
   149  		}
   150  		if ctx.Verbose {
   151  			searchCtx.Trace = policy.TRACE_VERBOSE
   152  		}
   153  		verdict := policyAPI.Allowed.String()
   154  		searchCtx.PolicyTrace("Label verdict: %s\n", verdict)
   155  		msg := fmt.Sprintf("%s\n  %s\n%s", searchCtx.String(), policyEnforcementMsg, buffer.String())
   156  		return NewGetPolicyResolveOK().WithPayload(&models.PolicyTraceResult{
   157  			Log:     msg,
   158  			Verdict: verdict,
   159  		})
   160  	}
   161  
   162  	// If we hit the following code, policy enforcement is enabled for at least
   163  	// one of the endpoints corresponding to the provided sets of labels, or for
   164  	// the daemon.
   165  	ingressBuffer := new(bytes.Buffer)
   166  
   167  	ctx := params.TraceSelector
   168  	ingressSearchCtx := policy.SearchContext{
   169  		Trace:   policy.TRACE_ENABLED,
   170  		Logging: logging.NewLogBackend(ingressBuffer, "", 0),
   171  		From:    labels.NewSelectLabelArrayFromModel(ctx.From.Labels),
   172  		To:      labels.NewSelectLabelArrayFromModel(ctx.To.Labels),
   173  		DPorts:  ctx.To.Dports,
   174  	}
   175  	if ctx.Verbose {
   176  		ingressSearchCtx.Trace = policy.TRACE_VERBOSE
   177  	}
   178  
   179  	// TODO: GH-3394 (add egress trace to API for policy trace).
   180  	egressBuffer := new(bytes.Buffer)
   181  	egressSearchCtx := ingressSearchCtx
   182  	egressSearchCtx.Logging = logging.NewLogBackend(egressBuffer, "", 0)
   183  
   184  	d.policy.Mutex.RLock()
   185  
   186  	ingressVerdict := d.policy.AllowsIngressRLocked(&ingressSearchCtx)
   187  
   188  	d.policy.Mutex.RUnlock()
   189  
   190  	result := models.PolicyTraceResult{
   191  		Verdict: ingressVerdict.String(),
   192  		Log:     ingressBuffer.String(),
   193  	}
   194  
   195  	return NewGetPolicyResolveOK().WithPayload(&result)
   196  }
   197  
   198  // AddOptions are options which can be passed to PolicyAdd
   199  type AddOptions struct {
   200  	// Replace if true indicates that existing rules with identical labels should be replaced
   201  	Replace bool
   202  	// ReplaceWithLabels if present indicates that existing rules with the
   203  	// given LabelArray should be deleted.
   204  	ReplaceWithLabels labels.LabelArray
   205  	// Generated should be set as true to signalize a the policy being inserted
   206  	// was generated by cilium-agent, e.g. dns poller.
   207  	Generated bool
   208  
   209  	// The source of this policy, one of api, fqdn or k8s
   210  	Source string
   211  }
   212  
   213  // PolicyAddEvent is a wrapper around the parameters for policyAdd.
   214  type PolicyAddEvent struct {
   215  	rules policyAPI.Rules
   216  	opts  *AddOptions
   217  	d     *Daemon
   218  }
   219  
   220  // Handle implements pkg/eventqueue/EventHandler interface.
   221  func (p *PolicyAddEvent) Handle(res chan interface{}) {
   222  	p.d.policyAdd(p.rules, p.opts, res)
   223  }
   224  
   225  // PolicyAddResult is a wrapper around the values returned by policyAdd. It
   226  // contains the new revision of a policy repository after adding a list of rules
   227  // to it, and any error associated with adding rules to said repository.
   228  type PolicyAddResult struct {
   229  	newRev uint64
   230  	err    error
   231  }
   232  
   233  // PolicyAdd adds a slice of rules to the policy repository owned by the
   234  // daemon. Eventual changes in policy rules are propagated to all locally
   235  // managed endpoints. Returns the policy revision number of the repository after
   236  // adding the rules into the repository, or an error if the updated policy
   237  // was not able to be imported.
   238  func (d *Daemon) PolicyAdd(rules policyAPI.Rules, opts *AddOptions) (newRev uint64, err error) {
   239  	p := &PolicyAddEvent{
   240  		rules: rules,
   241  		opts:  opts,
   242  		d:     d,
   243  	}
   244  	polAddEvent := eventqueue.NewEvent(p)
   245  	resChan, err := d.policy.RepositoryChangeQueue.Enqueue(polAddEvent)
   246  	if err != nil {
   247  		return 0, fmt.Errorf("enqueue of PolicyAddEvent failed: %s", err)
   248  	}
   249  
   250  	res, ok := <-resChan
   251  	if ok {
   252  		pRes := res.(*PolicyAddResult)
   253  		return pRes.newRev, pRes.err
   254  	}
   255  	return 0, fmt.Errorf("policy addition event was cancelled")
   256  }
   257  
   258  // policyAdd adds a slice of rules to the policy repository owned by the
   259  // daemon. Eventual changes in policy rules are propagated to all locally
   260  // managed endpoints. Returns the policy revision number of the repository after
   261  // adding the rules into the repository, or an error if the updated policy
   262  // was not able to be imported.
   263  func (d *Daemon) policyAdd(sourceRules policyAPI.Rules, opts *AddOptions, resChan chan interface{}) {
   264  	policyAddStartTime := time.Now()
   265  	logger := log.WithField("policyAddRequest", uuid.NewUUID().String())
   266  
   267  	if opts != nil && opts.Generated {
   268  		logger.WithField(logfields.CiliumNetworkPolicy, sourceRules.String()).Debug("Policy Add Request")
   269  	} else {
   270  		logger.WithField(logfields.CiliumNetworkPolicy, sourceRules.String()).Info("Policy Add Request")
   271  	}
   272  
   273  	prefixes := policy.GetCIDRPrefixes(sourceRules)
   274  	logger.WithField("prefixes", prefixes).Debug("Policy imported via API, found CIDR prefixes...")
   275  
   276  	newPrefixLengths, err := d.prefixLengths.Add(prefixes)
   277  	if err != nil {
   278  		metrics.PolicyImportErrors.Inc()
   279  		logger.WithError(err).WithField("prefixes", prefixes).Warn(
   280  			"Failed to reference-count prefix lengths in CIDR policy")
   281  		resChan <- &PolicyAddResult{
   282  			newRev: 0,
   283  			err:    api.Error(PutPolicyFailureCode, err),
   284  		}
   285  		return
   286  	}
   287  	if newPrefixLengths && !bpfIPCache.BackedByLPM() {
   288  		// Only recompile if configuration has changed.
   289  		logger.Debug("CIDR policy has changed; recompiling base programs")
   290  		if err := d.compileBase(); err != nil {
   291  			_ = d.prefixLengths.Delete(prefixes)
   292  			metrics.PolicyImportErrors.Inc()
   293  			err2 := fmt.Errorf("Unable to recompile base programs: %s", err)
   294  			logger.WithError(err2).WithField("prefixes", prefixes).Warn(
   295  				"Failed to recompile base programs due to prefix length count change")
   296  			resChan <- &PolicyAddResult{
   297  				newRev: 0,
   298  				err:    api.Error(PutPolicyFailureCode, err),
   299  			}
   300  			return
   301  		}
   302  	}
   303  
   304  	if _, err := ipcache.AllocateCIDRs(bpfIPCache.IPCache, prefixes); err != nil {
   305  		_ = d.prefixLengths.Delete(prefixes)
   306  		metrics.PolicyImportErrors.Inc()
   307  		logger.WithError(err).WithField("prefixes", prefixes).Warn(
   308  			"Failed to allocate identities for CIDRs during policy add")
   309  		resChan <- &PolicyAddResult{
   310  			newRev: 0,
   311  			err:    err,
   312  		}
   313  		return
   314  	}
   315  
   316  	// No errors past this point!
   317  
   318  	d.policy.Mutex.Lock()
   319  
   320  	// removedPrefixes tracks prefixes that we replace in the rules. It is used
   321  	// after we release the policy repository lock.
   322  	var removedPrefixes []*net.IPNet
   323  
   324  	// policySelectionWG is used to signal when the updating of all of the
   325  	// caches of endpoints in the rules which were added / updated have been
   326  	// updated.
   327  	var policySelectionWG sync.WaitGroup
   328  
   329  	// Get all endpoints at the time rules were added / updated so we can figure
   330  	// out which endpoints to regenerate / bump policy revision.
   331  	allEndpoints := endpointmanager.GetPolicyEndpoints()
   332  
   333  	// Start with all endpoints to be in set for which we need to bump their
   334  	// revision.
   335  	endpointsToBumpRevision := policy.NewEndpointSet(allEndpoints)
   336  
   337  	endpointsToRegen := policy.NewEndpointSet(nil)
   338  
   339  	if opts != nil {
   340  		if opts.Replace {
   341  			for _, r := range sourceRules {
   342  				oldRules := d.policy.SearchRLocked(r.Labels)
   343  				removedPrefixes = append(removedPrefixes, policy.GetCIDRPrefixes(oldRules)...)
   344  				if len(oldRules) > 0 {
   345  					deletedRules, _, _ := d.policy.DeleteByLabelsLocked(r.Labels)
   346  					deletedRules.UpdateRulesEndpointsCaches(endpointsToBumpRevision, endpointsToRegen, &policySelectionWG)
   347  				}
   348  			}
   349  		}
   350  		if len(opts.ReplaceWithLabels) > 0 {
   351  			oldRules := d.policy.SearchRLocked(opts.ReplaceWithLabels)
   352  			removedPrefixes = append(removedPrefixes, policy.GetCIDRPrefixes(oldRules)...)
   353  			if len(oldRules) > 0 {
   354  				deletedRules, _, _ := d.policy.DeleteByLabelsLocked(opts.ReplaceWithLabels)
   355  				deletedRules.UpdateRulesEndpointsCaches(endpointsToBumpRevision, endpointsToRegen, &policySelectionWG)
   356  			}
   357  		}
   358  	}
   359  
   360  	addedRules, newRev := d.policy.AddListLocked(sourceRules)
   361  
   362  	// The information needed by the caller is available at this point, signal
   363  	// accordingly.
   364  	resChan <- &PolicyAddResult{
   365  		newRev: newRev,
   366  		err:    nil,
   367  	}
   368  
   369  	addedRules.UpdateRulesEndpointsCaches(endpointsToBumpRevision, endpointsToRegen, &policySelectionWG)
   370  
   371  	d.policy.Mutex.Unlock()
   372  
   373  	// Begin tracking the time taken to deploy newRev to the datapath. The start
   374  	// time is from before the locking above, and thus includes all waits and
   375  	// processing in this function.
   376  	source := ""
   377  	if opts != nil {
   378  		source = opts.Source
   379  	}
   380  	endpointmanager.CallbackForEndpointsAtPolicyRev(context.Background(), newRev, func(now time.Time) {
   381  		duration, _ := safetime.TimeSinceSafe(policyAddStartTime, logger)
   382  		metrics.PolicyImplementationDelay.WithLabelValues(source).Observe(duration.Seconds())
   383  	})
   384  
   385  	// remove prefixes of replaced rules above. This potentially blocks on the
   386  	// kvstore and should happen without holding the policy lock. Refcounts have
   387  	// been incremented above, so any decrements here will be no-ops for CIDRs
   388  	// that are re-added, and will trigger deletions for those that are no longer
   389  	// used.
   390  	if len(removedPrefixes) > 0 {
   391  		logger.WithField("prefixes", removedPrefixes).Debug("Decrementing replaced CIDR refcounts when adding rules")
   392  		ipcache.ReleaseCIDRs(removedPrefixes)
   393  		d.prefixLengths.Delete(removedPrefixes)
   394  	}
   395  
   396  	logger.WithField(logfields.PolicyRevision, newRev).Info("Policy imported via API, recalculating...")
   397  
   398  	labels := make([]string, 0, len(sourceRules))
   399  	for _, r := range sourceRules {
   400  		labels = append(labels, r.Labels.GetModel()...)
   401  	}
   402  	repr, err := monitorAPI.PolicyUpdateRepr(len(sourceRules), labels, newRev)
   403  	if err != nil {
   404  		logger.WithField(logfields.PolicyRevision, newRev).Warn("Failed to represent policy update as monitor notification")
   405  	} else {
   406  		d.SendNotification(monitorAPI.AgentNotifyPolicyUpdated, repr)
   407  	}
   408  
   409  	if option.Config.SelectiveRegeneration {
   410  		// Only regenerate endpoints which are needed to be regenerated as a
   411  		// result of the rule update. The rules which were imported most likely
   412  		// do not select all endpoints in the policy repository (and may not
   413  		// select any at all). The "reacting" to rule updates enqueues events
   414  		// for all endpoints. Once all endpoints have events queued up, this
   415  		// function will return.
   416  
   417  		r := &PolicyReactionEvent{
   418  			d:                 d,
   419  			wg:                &policySelectionWG,
   420  			epsToBumpRevision: endpointsToBumpRevision,
   421  			endpointsToRegen:  endpointsToRegen,
   422  			newRev:            newRev,
   423  		}
   424  
   425  		ev := eventqueue.NewEvent(r)
   426  		// This event may block if the RuleReactionQueue is full. We don't care
   427  		// about when it finishes, just that the work it does is done in a serial
   428  		// order.
   429  		_, err := d.policy.RuleReactionQueue.Enqueue(ev)
   430  		if err != nil {
   431  			log.WithField(logfields.PolicyRevision, newRev).Errorf("enqueue of RuleReactionEvent failed: %s", err)
   432  		}
   433  	} else {
   434  		// Regenerate all endpoints unconditionally.
   435  		d.TriggerPolicyUpdates(false, "policy rules added")
   436  	}
   437  
   438  	return
   439  }
   440  
   441  // PolicyReactionEvent is an event which needs to be serialized after changes
   442  // to a policy repository for a daemon. This currently consists of endpoint
   443  // regenerations / policy revision incrementing for a given endpoint.
   444  type PolicyReactionEvent struct {
   445  	d                 *Daemon
   446  	wg                *sync.WaitGroup
   447  	epsToBumpRevision *policy.EndpointSet
   448  	endpointsToRegen  *policy.EndpointSet
   449  	newRev            uint64
   450  }
   451  
   452  // Handle implements pkg/eventqueue/EventHandler interface.
   453  func (r *PolicyReactionEvent) Handle(res chan interface{}) {
   454  	// Wait until we have calculated which endpoints need to be selected
   455  	// across multiple goroutines.
   456  	r.wg.Wait()
   457  	r.d.ReactToRuleUpdates(r.epsToBumpRevision, r.endpointsToRegen, r.newRev)
   458  }
   459  
   460  // ReactToRuleUpdates does the following:
   461  // * regenerate all endpoints in epsToRegen
   462  // * bump the policy revision of all endpoints not in epsToRegen, but which are
   463  //   in allEps, to revision rev.
   464  func (d *Daemon) ReactToRuleUpdates(epsToBumpRevision, epsToRegen *policy.EndpointSet, rev uint64) {
   465  	var enqueueWaitGroup sync.WaitGroup
   466  
   467  	// Bump revision of endpoints which don't need to be regenerated.
   468  	epsToBumpRevision.ForEachGo(&enqueueWaitGroup, func(epp policy.Endpoint) {
   469  		if epp == nil {
   470  			return
   471  		}
   472  		epp.PolicyRevisionBumpEvent(rev)
   473  	})
   474  
   475  	// Regenerate all other endpoints.
   476  	regenMetadata := &regeneration.ExternalRegenerationMetadata{Reason: "policy rules added"}
   477  	epsToRegen.ForEachGo(&enqueueWaitGroup, func(ep policy.Endpoint) {
   478  		if ep != nil {
   479  			switch e := ep.(type) {
   480  			case *endpoint.Endpoint:
   481  				// Do not wait for the returned channel as we want this to be
   482  				// ASync
   483  				e.RegenerateIfAlive(regenMetadata)
   484  			default:
   485  				log.Errorf("BUG: endpoint not type of *endpoint.Endpoint, received '%s' instead", e)
   486  			}
   487  		}
   488  	})
   489  
   490  	enqueueWaitGroup.Wait()
   491  }
   492  
   493  // PolicyDeleteEvent is a wrapper around deletion of policy rules with a given
   494  // set of labels from the policy repository in the daemon.
   495  type PolicyDeleteEvent struct {
   496  	labels labels.LabelArray
   497  	d      *Daemon
   498  }
   499  
   500  // Handle implements pkg/eventqueue/EventHandler interface.
   501  func (p *PolicyDeleteEvent) Handle(res chan interface{}) {
   502  	p.d.policyDelete(p.labels, res)
   503  }
   504  
   505  // PolicyDeleteResult is a wrapper around the values returned by policyDelete.
   506  // It contains the new revision of a policy repository after deleting a list of
   507  // rules to it, and any error associated with adding rules to said repository.
   508  type PolicyDeleteResult struct {
   509  	newRev uint64
   510  	err    error
   511  }
   512  
   513  // PolicyDelete deletes the policy rules with the provided set of labels from
   514  // the policy repository of the daemon.
   515  // Returns the revision number and an error in case it was not possible to
   516  // delete the policy.
   517  func (d *Daemon) PolicyDelete(labels labels.LabelArray) (newRev uint64, err error) {
   518  
   519  	p := &PolicyDeleteEvent{
   520  		labels: labels,
   521  		d:      d,
   522  	}
   523  	policyDeleteEvent := eventqueue.NewEvent(p)
   524  	resChan, err := d.policy.RepositoryChangeQueue.Enqueue(policyDeleteEvent)
   525  	if err != nil {
   526  		return 0, fmt.Errorf("enqueue of PolicyDeleteEvent failed: %s", err)
   527  	}
   528  
   529  	res, ok := <-resChan
   530  	if ok {
   531  		ress := res.(*PolicyDeleteResult)
   532  		return ress.newRev, ress.err
   533  	}
   534  	return 0, fmt.Errorf("policy deletion event cancelled")
   535  }
   536  
   537  func (d *Daemon) policyDelete(labels labels.LabelArray, res chan interface{}) {
   538  	log.WithField(logfields.IdentityLabels, logfields.Repr(labels)).Debug("Policy Delete Request")
   539  
   540  	d.policy.Mutex.Lock()
   541  
   542  	// First, find rules by the label. We'll use this set of rules to
   543  	// determine which CIDR identities that we need to release.
   544  	rules := d.policy.SearchRLocked(labels)
   545  
   546  	// Return an error if a label filter was provided and there are no
   547  	// rules matching it. A deletion request for all policy entries should
   548  	// not fail if no policies are loaded.
   549  	if len(rules) == 0 && len(labels) != 0 {
   550  		rev := d.policy.GetRevision()
   551  		d.policy.Mutex.Unlock()
   552  
   553  		err := api.New(DeletePolicyNotFoundCode, "policy not found")
   554  
   555  		res <- &PolicyDeleteResult{
   556  			newRev: rev,
   557  			err:    err,
   558  		}
   559  		return
   560  	}
   561  
   562  	// policySelectionWG is used to signal when the updating of all of the
   563  	// caches of allEndpoints in the rules which were added / updated have been
   564  	// updated.
   565  	var policySelectionWG sync.WaitGroup
   566  
   567  	// Get all endpoints at the time rules were added / updated so we can figure
   568  	// out which endpoints to regenerate / bump policy revision.
   569  	allEndpoints := endpointmanager.GetPolicyEndpoints()
   570  	// Initially keep all endpoints in set of endpoints which need to have
   571  	// revision bumped.
   572  	epsToBumpRevision := policy.NewEndpointSet(allEndpoints)
   573  
   574  	endpointsToRegen := policy.NewEndpointSet(nil)
   575  
   576  	deletedRules, rev, deleted := d.policy.DeleteByLabelsLocked(labels)
   577  	deletedRules.UpdateRulesEndpointsCaches(epsToBumpRevision, endpointsToRegen, &policySelectionWG)
   578  
   579  	res <- &PolicyDeleteResult{
   580  		newRev: rev,
   581  		err:    nil,
   582  	}
   583  
   584  	d.policy.Mutex.Unlock()
   585  
   586  	// Now that the policies are deleted, we can also attempt to remove
   587  	// all CIDR identities referenced by the deleted rules.
   588  	//
   589  	// We don't treat failures to clean up identities as API failures,
   590  	// because the policy can still successfully be updated. We're just
   591  	// not appropriately performing garbage collection.
   592  	prefixes := policy.GetCIDRPrefixes(rules)
   593  	log.WithField("prefixes", prefixes).Debug("Policy deleted via API, found prefixes...")
   594  	ipcache.ReleaseCIDRs(prefixes)
   595  
   596  	prefixesChanged := d.prefixLengths.Delete(prefixes)
   597  	if !bpfIPCache.BackedByLPM() && prefixesChanged {
   598  		// Only recompile if configuration has changed.
   599  		log.Debug("CIDR policy has changed; recompiling base programs")
   600  		if err := d.compileBase(); err != nil {
   601  			log.WithError(err).Error("Unable to recompile base programs")
   602  		}
   603  	}
   604  
   605  	if option.Config.SelectiveRegeneration {
   606  		r := &PolicyReactionEvent{
   607  			d:                 d,
   608  			wg:                &policySelectionWG,
   609  			epsToBumpRevision: epsToBumpRevision,
   610  			endpointsToRegen:  endpointsToRegen,
   611  			newRev:            rev,
   612  		}
   613  
   614  		ev := eventqueue.NewEvent(r)
   615  		// This event may block if the RuleReactionQueue is full. We don't care
   616  		// about when it finishes, just that the work it does is done in a serial
   617  		// order.
   618  		_, err := d.policy.RuleReactionQueue.Enqueue(ev)
   619  		if err != nil {
   620  			log.WithField(logfields.PolicyRevision, rev).Errorf("enqueue of RuleReactionEvent failed: %s", err)
   621  		}
   622  	} else {
   623  		d.TriggerPolicyUpdates(true, "policy rules deleted")
   624  	}
   625  
   626  	repr, err := monitorAPI.PolicyDeleteRepr(deleted, labels.GetModel(), rev)
   627  	if err != nil {
   628  		log.WithField(logfields.PolicyRevision, rev).Warn("Failed to represent policy update as monitor notification")
   629  	} else {
   630  		d.SendNotification(monitorAPI.AgentNotifyPolicyDeleted, repr)
   631  	}
   632  
   633  	return
   634  }
   635  
   636  type deletePolicy struct {
   637  	daemon *Daemon
   638  }
   639  
   640  func newDeletePolicyHandler(d *Daemon) DeletePolicyHandler {
   641  	return &deletePolicy{daemon: d}
   642  }
   643  
   644  func (h *deletePolicy) Handle(params DeletePolicyParams) middleware.Responder {
   645  	d := h.daemon
   646  	lbls := labels.ParseSelectLabelArrayFromArray(params.Labels)
   647  	rev, err := d.PolicyDelete(lbls)
   648  	if err != nil {
   649  		return api.Error(DeletePolicyFailureCode, err)
   650  	}
   651  
   652  	ruleList := d.policy.SearchRLocked(labels.LabelArray{})
   653  	policy := &models.Policy{
   654  		Revision: int64(rev),
   655  		Policy:   policy.JSONMarshalRules(ruleList),
   656  	}
   657  	return NewDeletePolicyOK().WithPayload(policy)
   658  }
   659  
   660  type putPolicy struct {
   661  	daemon *Daemon
   662  }
   663  
   664  func newPutPolicyHandler(d *Daemon) PutPolicyHandler {
   665  	return &putPolicy{daemon: d}
   666  }
   667  
   668  func (h *putPolicy) Handle(params PutPolicyParams) middleware.Responder {
   669  	d := h.daemon
   670  
   671  	var rules policyAPI.Rules
   672  	if err := json.Unmarshal([]byte(params.Policy), &rules); err != nil {
   673  		return NewPutPolicyInvalidPolicy()
   674  	}
   675  
   676  	for _, r := range rules {
   677  		if err := r.Sanitize(); err != nil {
   678  			return api.Error(PutPolicyFailureCode, err)
   679  		}
   680  	}
   681  
   682  	rev, err := d.PolicyAdd(rules, &AddOptions{Source: metrics.LabelEventSourceAPI})
   683  	if err != nil {
   684  		return api.Error(PutPolicyFailureCode, err)
   685  	}
   686  
   687  	policy := &models.Policy{
   688  		Revision: int64(rev),
   689  		Policy:   policy.JSONMarshalRules(rules),
   690  	}
   691  	return NewPutPolicyOK().WithPayload(policy)
   692  }
   693  
   694  type getPolicy struct {
   695  	daemon *Daemon
   696  }
   697  
   698  func newGetPolicyHandler(d *Daemon) GetPolicyHandler {
   699  	return &getPolicy{daemon: d}
   700  }
   701  
   702  func (h *getPolicy) Handle(params GetPolicyParams) middleware.Responder {
   703  	d := h.daemon
   704  	d.policy.Mutex.RLock()
   705  	defer d.policy.Mutex.RUnlock()
   706  
   707  	lbls := labels.ParseSelectLabelArrayFromArray(params.Labels)
   708  	ruleList := d.policy.SearchRLocked(lbls)
   709  
   710  	// Error if labels have been specified but no entries found, otherwise,
   711  	// return empty list
   712  	if len(ruleList) == 0 && len(lbls) != 0 {
   713  		return NewGetPolicyNotFound()
   714  	}
   715  
   716  	policy := &models.Policy{
   717  		Revision: int64(d.policy.GetRevision()),
   718  		Policy:   policy.JSONMarshalRules(ruleList),
   719  	}
   720  	return NewGetPolicyOK().WithPayload(policy)
   721  }
   722  
   723  type getPolicySelectors struct {
   724  	daemon *Daemon
   725  }
   726  
   727  func newGetPolicyCacheHandler(d *Daemon) GetPolicySelectorsHandler {
   728  	return &getPolicySelectors{daemon: d}
   729  }
   730  
   731  func (h *getPolicySelectors) Handle(params GetPolicySelectorsParams) middleware.Responder {
   732  	return NewGetPolicySelectorsOK().WithPayload(h.daemon.policy.GetSelectorCache().GetModel())
   733  }