github.com/imran-kn/cilium-fork@v1.6.9/pkg/policy/repository.go (about)

     1  // Copyright 2016-2019 Authors of Cilium
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package policy
    16  
    17  import (
    18  	"encoding/json"
    19  	"sync"
    20  	"sync/atomic"
    21  
    22  	"github.com/cilium/cilium/api/v1/models"
    23  	"github.com/cilium/cilium/pkg/eventqueue"
    24  	"github.com/cilium/cilium/pkg/identity"
    25  	"github.com/cilium/cilium/pkg/identity/cache"
    26  	"github.com/cilium/cilium/pkg/labels"
    27  	"github.com/cilium/cilium/pkg/lock"
    28  	"github.com/cilium/cilium/pkg/logging/logfields"
    29  	"github.com/cilium/cilium/pkg/metrics"
    30  	"github.com/cilium/cilium/pkg/option"
    31  	"github.com/cilium/cilium/pkg/policy/api"
    32  )
    33  
    34  // Repository is a list of policy rules which in combination form the security
    35  // policy. A policy repository can be
    36  type Repository struct {
    37  	// Mutex protects the whole policy tree
    38  	Mutex lock.RWMutex
    39  	rules ruleSlice
    40  
    41  	// revision is the revision of the policy repository. It will be
    42  	// incremented whenever the policy repository is changed.
    43  	// Always positive (>0).
    44  	revision uint64
    45  
    46  	// RepositoryChangeQueue is a queue which serializes changes to the policy
    47  	// repository.
    48  	RepositoryChangeQueue *eventqueue.EventQueue
    49  
    50  	// RuleReactionQueue is a queue which serializes the resultant events that
    51  	// need to occur after updating the state of the policy repository. This
    52  	// can include queueing endpoint regenerations, policy revision increments
    53  	// for endpoints, etc.
    54  	RuleReactionQueue *eventqueue.EventQueue
    55  
    56  	// SelectorCache tracks the selectors used in the policies
    57  	// resolved from the repository.
    58  	selectorCache *SelectorCache
    59  
    60  	// PolicyCache tracks the selector policies created from this repo
    61  	policyCache *PolicyCache
    62  }
    63  
    64  // GetSelectorCache() returns the selector cache used by the Repository
    65  func (p *Repository) GetSelectorCache() *SelectorCache {
    66  	return p.selectorCache
    67  }
    68  
    69  // GetPolicyCache() returns the policy cache used by the Repository
    70  func (p *Repository) GetPolicyCache() *PolicyCache {
    71  	return p.policyCache
    72  }
    73  
    74  // NewPolicyRepository allocates a new policy repository
    75  func NewPolicyRepository() *Repository {
    76  	repoChangeQueue := eventqueue.NewEventQueueBuffered("repository-change-queue", option.Config.PolicyQueueSize)
    77  	ruleReactionQueue := eventqueue.NewEventQueueBuffered("repository-reaction-queue", option.Config.PolicyQueueSize)
    78  	repoChangeQueue.Run()
    79  	ruleReactionQueue.Run()
    80  	repo := &Repository{
    81  		revision:              1,
    82  		RepositoryChangeQueue: repoChangeQueue,
    83  		RuleReactionQueue:     ruleReactionQueue,
    84  		selectorCache:         NewSelectorCache(cache.GetIdentityCache()),
    85  	}
    86  	repo.policyCache = NewPolicyCache(repo, true)
    87  	return repo
    88  }
    89  
    90  // traceState is an internal structure used to collect information
    91  // while determining policy decision
    92  type traceState struct {
    93  	// selectedRules is the number of rules with matching EndpointSelector
    94  	selectedRules int
    95  
    96  	// matchedRules is the number of rules that have allowed traffic
    97  	matchedRules int
    98  
    99  	// constrainedRules counts how many "FromRequires" constraints are
   100  	// unsatisfied
   101  	constrainedRules int
   102  
   103  	// ruleID is the rule ID currently being evaluated
   104  	ruleID int
   105  }
   106  
   107  func (state *traceState) trace(rules int, ctx *SearchContext) {
   108  	ctx.PolicyTrace("%d/%d rules selected\n", state.selectedRules, rules)
   109  	if state.constrainedRules > 0 {
   110  		ctx.PolicyTrace("Found unsatisfied FromRequires constraint\n")
   111  	} else if state.matchedRules > 0 {
   112  		ctx.PolicyTrace("Found allow rule\n")
   113  	} else {
   114  		ctx.PolicyTrace("Found no allow rule\n")
   115  	}
   116  }
   117  
   118  // This belongs to l4.go as this manipulates L4Filters
   119  func wildcardL3L4Rule(proto api.L4Proto, port int, endpoints api.EndpointSelectorSlice,
   120  	ruleLabels labels.LabelArray, l4Policy L4PolicyMap, selectorCache *SelectorCache) {
   121  	for _, filter := range l4Policy {
   122  		if proto != filter.Protocol || (port != 0 && port != filter.Port) {
   123  			continue
   124  		}
   125  		switch filter.L7Parser {
   126  		case ParserTypeNone:
   127  			continue
   128  		case ParserTypeHTTP:
   129  			// Wildcard at L7 all the endpoints allowed at L3 or L4.
   130  			for _, sel := range endpoints {
   131  				cs := filter.cacheIdentitySelector(sel, selectorCache)
   132  				filter.L7RulesPerEp[cs] = api.L7Rules{
   133  					HTTP: []api.PortRuleHTTP{{}},
   134  				}
   135  			}
   136  		case ParserTypeKafka:
   137  			// Wildcard at L7 all the endpoints allowed at L3 or L4.
   138  			for _, sel := range endpoints {
   139  				rule := api.PortRuleKafka{}
   140  				rule.Sanitize()
   141  				cs := filter.cacheIdentitySelector(sel, selectorCache)
   142  				filter.L7RulesPerEp[cs] = api.L7Rules{
   143  					Kafka: []api.PortRuleKafka{rule},
   144  				}
   145  			}
   146  		case ParserTypeDNS:
   147  			// Wildcard at L7 all the endpoints allowed at L3 or L4.
   148  			for _, sel := range endpoints {
   149  				// Wildcarding at L7 for DNS is specified via allowing all via
   150  				// MatchPattern!
   151  				rule := api.PortRuleDNS{
   152  					MatchPattern: "*",
   153  				}
   154  				rule.Sanitize()
   155  				cs := filter.cacheIdentitySelector(sel, selectorCache)
   156  				filter.L7RulesPerEp[cs] = api.L7Rules{
   157  					DNS: []api.PortRuleDNS{rule},
   158  				}
   159  			}
   160  		default:
   161  			// Wildcard at L7 all the endpoints allowed at L3 or L4.
   162  			for _, sel := range endpoints {
   163  				cs := filter.cacheIdentitySelector(sel, selectorCache)
   164  				filter.L7RulesPerEp[cs] = api.L7Rules{
   165  					L7Proto: filter.L7Parser.String(),
   166  					L7:      []api.PortRuleL7{},
   167  				}
   168  			}
   169  		}
   170  		filter.DerivedFromRules = append(filter.DerivedFromRules, ruleLabels)
   171  	}
   172  }
   173  
   174  // ResolveL4IngressPolicy resolves the L4 ingress policy for a set of endpoints
   175  // by searching the policy repository for `PortRule` rules that are attached to
   176  // a `Rule` where the EndpointSelector matches `ctx.To`. `ctx.From` takes no effect and
   177  // is ignored in the search.  If multiple `PortRule` rules are found, all rules
   178  // are merged together. If rules contains overlapping port definitions, the first
   179  // rule found in the repository takes precedence.
   180  //
   181  // TODO: Coalesce l7 rules?
   182  //
   183  // Caller must release resources by calling Detach() on the returned map!
   184  //
   185  // Note: Only used for policy tracing
   186  func (p *Repository) ResolveL4IngressPolicy(ctx *SearchContext) (L4PolicyMap, error) {
   187  
   188  	result, err := p.rules.resolveL4IngressPolicy(ctx, p.GetRevision(), p.GetSelectorCache())
   189  	if err != nil {
   190  		return nil, err
   191  	}
   192  
   193  	return result, nil
   194  }
   195  
   196  // ResolveL4EgressPolicy resolves the L4 egress policy for a set of endpoints
   197  // by searching the policy repository for `PortRule` rules that are attached to
   198  // a `Rule` where the EndpointSelector matches `ctx.From`. `ctx.To` takes no effect and
   199  // is ignored in the search.  If multiple `PortRule` rules are found, all rules
   200  // are merged together. If rules contains overlapping port definitions, the first
   201  // rule found in the repository takes precedence.
   202  //
   203  // Caller must release resources by calling Detach() on the returned map!
   204  //
   205  // NOTE: This is only called from unit tests, but from multiple packages.
   206  func (p *Repository) ResolveL4EgressPolicy(ctx *SearchContext) (L4PolicyMap, error) {
   207  	result, err := p.rules.resolveL4EgressPolicy(ctx, p.GetRevision(), p.GetSelectorCache())
   208  
   209  	if err != nil {
   210  		return nil, err
   211  	}
   212  
   213  	return result, nil
   214  }
   215  
   216  // AllowsIngressRLocked evaluates the policy repository for the provided search
   217  // context and returns the verdict for ingress. If no matching policy allows for
   218  // the  connection, the request will be denied. The policy repository mutex must
   219  // be held.
   220  func (p *Repository) AllowsIngressRLocked(ctx *SearchContext) api.Decision {
   221  	// Lack of DPorts in the SearchContext means L3-only search
   222  	if len(ctx.DPorts) == 0 {
   223  		newCtx := *ctx
   224  		newCtx.DPorts = []*models.Port{{
   225  			Port:     0,
   226  			Protocol: models.PortProtocolANY,
   227  		}}
   228  		ctx = &newCtx
   229  	}
   230  
   231  	ctx.PolicyTrace("Tracing %s", ctx.String())
   232  	ingressPolicy, err := p.ResolveL4IngressPolicy(ctx)
   233  	if err != nil {
   234  		log.WithError(err).Warn("Evaluation error while resolving L4 ingress policy")
   235  	}
   236  
   237  	verdict := api.Denied
   238  	if err == nil && len(ingressPolicy) > 0 {
   239  		verdict = ingressPolicy.IngressCoversContext(ctx)
   240  	}
   241  
   242  	ctx.PolicyTrace("Ingress verdict: %s", verdict.String())
   243  	ingressPolicy.Detach(p.GetSelectorCache())
   244  
   245  	return verdict
   246  }
   247  
   248  // AllowsEgressRLocked evaluates the policy repository for the provided search
   249  // context and returns the verdict. If no matching policy allows for the
   250  // connection, the request will be denied. The policy repository mutex must be
   251  // held.
   252  //
   253  // NOTE: This is only called from unit tests, but from multiple packages.
   254  func (p *Repository) AllowsEgressRLocked(ctx *SearchContext) api.Decision {
   255  	// Lack of DPorts in the SearchContext means L3-only search
   256  	if len(ctx.DPorts) == 0 {
   257  		newCtx := *ctx
   258  		newCtx.DPorts = []*models.Port{{
   259  			Port:     0,
   260  			Protocol: models.PortProtocolANY,
   261  		}}
   262  		ctx = &newCtx
   263  	}
   264  
   265  	ctx.PolicyTrace("Tracing %s\n", ctx.String())
   266  	egressPolicy, err := p.ResolveL4EgressPolicy(ctx)
   267  	if err != nil {
   268  		log.WithError(err).Warn("Evaluation error while resolving L4 egress policy")
   269  	}
   270  	verdict := api.Denied
   271  	if err == nil && len(egressPolicy) > 0 {
   272  		verdict = egressPolicy.EgressCoversContext(ctx)
   273  	}
   274  
   275  	ctx.PolicyTrace("Egress verdict: %s", verdict.String())
   276  	egressPolicy.Detach(p.GetSelectorCache())
   277  	return verdict
   278  }
   279  
   280  // SearchRLocked searches the policy repository for rules which match the
   281  // specified labels and will return an array of all rules which matched.
   282  func (p *Repository) SearchRLocked(labels labels.LabelArray) api.Rules {
   283  	result := api.Rules{}
   284  
   285  	for _, r := range p.rules {
   286  		if r.Labels.Contains(labels) {
   287  			result = append(result, &r.Rule)
   288  		}
   289  	}
   290  
   291  	return result
   292  }
   293  
   294  // Add inserts a rule into the policy repository
   295  // This is just a helper function for unit testing.
   296  // TODO: this should be in a test_helpers.go file or something similar
   297  // so we can clearly delineate what helpers are for testing.
   298  // NOTE: This is only called from unit tests, but from multiple packages.
   299  func (p *Repository) Add(r api.Rule, localRuleConsumers []Endpoint) (uint64, map[uint16]struct{}, error) {
   300  	p.Mutex.Lock()
   301  	defer p.Mutex.Unlock()
   302  
   303  	if err := r.Sanitize(); err != nil {
   304  		return p.GetRevision(), nil, err
   305  	}
   306  
   307  	newList := make([]*api.Rule, 1)
   308  	newList[0] = &r
   309  	_, rev := p.AddListLocked(newList)
   310  	return rev, map[uint16]struct{}{}, nil
   311  }
   312  
   313  // AddListLocked inserts a rule into the policy repository with the repository already locked
   314  // Expects that the entire rule list has already been sanitized.
   315  func (p *Repository) AddListLocked(rules api.Rules) (ruleSlice, uint64) {
   316  
   317  	newList := make(ruleSlice, len(rules))
   318  	for i := range rules {
   319  		newRule := &rule{
   320  			Rule:     *rules[i],
   321  			metadata: newRuleMetadata(),
   322  		}
   323  		newList[i] = newRule
   324  	}
   325  
   326  	p.rules = append(p.rules, newList...)
   327  	p.BumpRevision()
   328  	metrics.PolicyCount.Add(float64(len(newList)))
   329  
   330  	return newList, p.GetRevision()
   331  }
   332  
   333  // removeIdentityFromRuleCaches removes the identity from the selector cache
   334  // in each rule in the repository.
   335  //
   336  // Returns a sync.WaitGroup that blocks until the policy operation is complete.
   337  // The repository read lock must be held until the waitgroup is complete.
   338  func (p *Repository) removeIdentityFromRuleCaches(identity *identity.Identity) *sync.WaitGroup {
   339  	var wg sync.WaitGroup
   340  	wg.Add(len(p.rules))
   341  	for _, r := range p.rules {
   342  		go func(rr *rule, wgg *sync.WaitGroup) {
   343  			rr.metadata.delete(identity)
   344  			wgg.Done()
   345  		}(r, &wg)
   346  	}
   347  	return &wg
   348  }
   349  
   350  // LocalEndpointIdentityAdded handles local identity add events.
   351  func (p *Repository) LocalEndpointIdentityAdded(*identity.Identity) {
   352  	// no-op for now.
   353  }
   354  
   355  // LocalEndpointIdentityRemoved handles local identity removal events to
   356  // remove references from rules in the repository to the specified identity.
   357  func (p *Repository) LocalEndpointIdentityRemoved(identity *identity.Identity) {
   358  	go func() {
   359  		scopedLog := log.WithField(logfields.Identity, identity)
   360  		scopedLog.Debug("Removing identity references from policy cache")
   361  		p.Mutex.RLock()
   362  		wg := p.removeIdentityFromRuleCaches(identity)
   363  		wg.Wait()
   364  		p.Mutex.RUnlock()
   365  		scopedLog.Debug("Finished cleaning policy cache")
   366  	}()
   367  }
   368  
   369  // AddList inserts a rule into the policy repository. It is used for
   370  // unit-testing purposes only.
   371  func (p *Repository) AddList(rules api.Rules) (ruleSlice, uint64) {
   372  	p.Mutex.Lock()
   373  	defer p.Mutex.Unlock()
   374  	return p.AddListLocked(rules)
   375  }
   376  
   377  // UpdateRulesEndpointsCaches updates the caches within each rule in r that
   378  // specify whether the rule selects the endpoints in eps. If any rule matches
   379  // the endpoints, it is added to the provided IDSet, and removed from the
   380  // provided EndpointSet. The provided WaitGroup is signaled for a given endpoint
   381  // when it is finished being processed.
   382  func (r ruleSlice) UpdateRulesEndpointsCaches(endpointsToBumpRevision, endpointsToRegenerate *EndpointSet, policySelectionWG *sync.WaitGroup) {
   383  	// No need to check whether endpoints need to be regenerated here since we
   384  	// will unconditionally regenerate all endpoints later.
   385  	if !option.Config.SelectiveRegeneration {
   386  		return
   387  	}
   388  
   389  	endpointsToBumpRevision.ForEachGo(policySelectionWG, func(epp Endpoint) {
   390  		endpointSelected, err := r.updateEndpointsCaches(epp)
   391  		if endpointSelected {
   392  			endpointsToRegenerate.Insert(epp)
   393  		}
   394  		// If we could not evaluate the rules against the current endpoint, or
   395  		// the endpoint is not selected by the rules, remove it from the set
   396  		// of endpoints to bump the revision. If the error is non-nil, the
   397  		// endpoint is no longer in either set (endpointsToBumpRevision or
   398  		// endpointsToRegenerate, as we could not determine what to do for the
   399  		// endpoint). This is usually the case when the endpoint is no longer
   400  		// alive (i.e., it has been marked to be deleted).
   401  		if endpointSelected || err != nil {
   402  			if err != nil {
   403  				log.WithError(err).Debug("could not determine whether endpoint was selected by rule")
   404  			}
   405  			endpointsToBumpRevision.Delete(epp)
   406  		}
   407  	})
   408  }
   409  
   410  // DeleteByLabelsLocked deletes all rules in the policy repository which
   411  // contain the specified labels. Returns the revision of the policy repository
   412  // after deleting the rules, as well as now many rules were deleted.
   413  func (p *Repository) DeleteByLabelsLocked(labels labels.LabelArray) (ruleSlice, uint64, int) {
   414  
   415  	deleted := 0
   416  	new := p.rules[:0]
   417  	deletedRules := ruleSlice{}
   418  
   419  	for _, r := range p.rules {
   420  		if !r.Labels.Contains(labels) {
   421  			new = append(new, r)
   422  		} else {
   423  			deletedRules = append(deletedRules, r)
   424  			deleted++
   425  		}
   426  	}
   427  
   428  	if deleted > 0 {
   429  		p.BumpRevision()
   430  		p.rules = new
   431  		metrics.PolicyCount.Sub(float64(deleted))
   432  	}
   433  
   434  	return deletedRules, p.GetRevision(), deleted
   435  }
   436  
   437  // DeleteByLabels deletes all rules in the policy repository which contain the
   438  // specified labels
   439  func (p *Repository) DeleteByLabels(labels labels.LabelArray) (uint64, int) {
   440  	p.Mutex.Lock()
   441  	defer p.Mutex.Unlock()
   442  	_, rev, numDeleted := p.DeleteByLabelsLocked(labels)
   443  	return rev, numDeleted
   444  }
   445  
   446  // JSONMarshalRules returns a slice of policy rules as string in JSON
   447  // representation
   448  func JSONMarshalRules(rules api.Rules) string {
   449  	b, err := json.MarshalIndent(rules, "", "  ")
   450  	if err != nil {
   451  		return err.Error()
   452  	}
   453  	return string(b)
   454  }
   455  
   456  // GetJSON returns all rules of the policy repository as string in JSON
   457  // representation
   458  func (p *Repository) GetJSON() string {
   459  	p.Mutex.RLock()
   460  	defer p.Mutex.RUnlock()
   461  
   462  	result := api.Rules{}
   463  	for _, r := range p.rules {
   464  		result = append(result, &r.Rule)
   465  	}
   466  
   467  	return JSONMarshalRules(result)
   468  }
   469  
   470  // GetRulesMatching returns whether any of the rules in a repository contain a
   471  // rule with labels matching the labels in the provided LabelArray.
   472  //
   473  // Must be called with p.Mutex held
   474  func (p *Repository) GetRulesMatching(labels labels.LabelArray) (ingressMatch bool, egressMatch bool) {
   475  	ingressMatch = false
   476  	egressMatch = false
   477  	for _, r := range p.rules {
   478  		rulesMatch := r.EndpointSelector.Matches(labels)
   479  		if rulesMatch {
   480  			if len(r.Ingress) > 0 {
   481  				ingressMatch = true
   482  			}
   483  			if len(r.Egress) > 0 {
   484  				egressMatch = true
   485  			}
   486  		}
   487  
   488  		if ingressMatch && egressMatch {
   489  			return
   490  		}
   491  	}
   492  	return
   493  }
   494  
   495  // getMatchingRules returns whether any of the rules in a repository contain a
   496  // rule with labels matching the given security identity, as well as
   497  // a slice of all rules which match.
   498  //
   499  // Must be called with p.Mutex held
   500  func (p *Repository) getMatchingRules(securityIdentity *identity.Identity) (ingressMatch bool, egressMatch bool, matchingRules ruleSlice) {
   501  	matchingRules = []*rule{}
   502  	ingressMatch = false
   503  	egressMatch = false
   504  	for _, r := range p.rules {
   505  		if ruleMatches := r.matches(securityIdentity); ruleMatches {
   506  			// Don't need to update whether ingressMatch is true if it already
   507  			// has been determined to be true - allows us to not have to check
   508  			// lenth of slice.
   509  			if !ingressMatch && len(r.Ingress) > 0 {
   510  				ingressMatch = true
   511  			}
   512  			if !egressMatch && len(r.Egress) > 0 {
   513  				egressMatch = true
   514  			}
   515  			matchingRules = append(matchingRules, r)
   516  		}
   517  	}
   518  	return
   519  }
   520  
   521  // NumRules returns the amount of rules in the policy repository.
   522  //
   523  // Must be called with p.Mutex held
   524  func (p *Repository) NumRules() int {
   525  	return len(p.rules)
   526  }
   527  
   528  // GetRevision returns the revision of the policy repository
   529  func (p *Repository) GetRevision() uint64 {
   530  	return atomic.LoadUint64(&p.revision)
   531  }
   532  
   533  // Empty returns 'true' if repository has no rules, 'false' otherwise.
   534  //
   535  // Must be called without p.Mutex held
   536  func (p *Repository) Empty() bool {
   537  	p.Mutex.Lock()
   538  	defer p.Mutex.Unlock()
   539  	return p.NumRules() == 0
   540  }
   541  
   542  // TranslationResult contains the results of the rule translation
   543  type TranslationResult struct {
   544  	// NumToServicesRules is the number of ToServices rules processed while
   545  	// translating the rules
   546  	NumToServicesRules int
   547  }
   548  
   549  // TranslateRules traverses rules and applies provided translator to rules
   550  //
   551  // Note: Only used by the k8s watcher.
   552  func (p *Repository) TranslateRules(translator Translator) (*TranslationResult, error) {
   553  	p.Mutex.Lock()
   554  	defer p.Mutex.Unlock()
   555  
   556  	result := &TranslationResult{}
   557  
   558  	for ruleIndex := range p.rules {
   559  		if err := translator.Translate(&p.rules[ruleIndex].Rule, result); err != nil {
   560  			return nil, err
   561  		}
   562  	}
   563  	return result, nil
   564  }
   565  
   566  // BumpRevision allows forcing policy regeneration
   567  func (p *Repository) BumpRevision() {
   568  	metrics.PolicyRevision.Inc()
   569  	atomic.AddUint64(&p.revision, 1)
   570  }
   571  
   572  // GetRulesList returns the current policy
   573  func (p *Repository) GetRulesList() *models.Policy {
   574  	p.Mutex.RLock()
   575  	defer p.Mutex.RUnlock()
   576  
   577  	lbls := labels.ParseSelectLabelArrayFromArray([]string{})
   578  	ruleList := p.SearchRLocked(lbls)
   579  
   580  	return &models.Policy{
   581  		Revision: int64(p.GetRevision()),
   582  		Policy:   JSONMarshalRules(ruleList),
   583  	}
   584  }
   585  
   586  // resolvePolicyLocked returns the selectorPolicy for the provided
   587  // identity from the set of rules in the repository.  If the policy
   588  // cannot be generated due to conflicts at L4 or L7, returns an error.
   589  //
   590  // Must be performed while holding the Repository lock.
   591  func (p *Repository) resolvePolicyLocked(securityIdentity *identity.Identity) (*selectorPolicy, error) {
   592  	// First obtain whether policy applies in both traffic directions, as well
   593  	// as list of rules which actually select this endpoint. This allows us
   594  	// to not have to iterate through the entire rule list multiple times and
   595  	// perform the matching decision again when computing policy for each
   596  	// protocol layer, which is quite costly in terms of performance.
   597  	ingressEnabled, egressEnabled, matchingRules := p.computePolicyEnforcementAndRules(securityIdentity)
   598  
   599  	calculatedPolicy := &selectorPolicy{
   600  		Revision:             p.GetRevision(),
   601  		SelectorCache:        p.GetSelectorCache(),
   602  		L4Policy:             NewL4Policy(p.GetRevision()),
   603  		CIDRPolicy:           NewCIDRPolicy(),
   604  		IngressPolicyEnabled: ingressEnabled,
   605  		EgressPolicyEnabled:  egressEnabled,
   606  	}
   607  	calculatedPolicy.IngressPolicyEnabled = ingressEnabled
   608  	calculatedPolicy.EgressPolicyEnabled = egressEnabled
   609  
   610  	labels := securityIdentity.LabelArray
   611  	ingressCtx := SearchContext{
   612  		To:          labels,
   613  		rulesSelect: true,
   614  	}
   615  
   616  	egressCtx := SearchContext{
   617  		From:        labels,
   618  		rulesSelect: true,
   619  	}
   620  
   621  	if option.Config.TracingEnabled() {
   622  		ingressCtx.Trace = TRACE_ENABLED
   623  		egressCtx.Trace = TRACE_ENABLED
   624  	}
   625  
   626  	if ingressEnabled {
   627  		newL4IngressPolicy, err := matchingRules.resolveL4IngressPolicy(&ingressCtx, p.GetRevision(), p.GetSelectorCache())
   628  		if err != nil {
   629  			return nil, err
   630  		}
   631  
   632  		newCIDRIngressPolicy := matchingRules.resolveCIDRPolicy(&ingressCtx)
   633  		if err := newCIDRIngressPolicy.Validate(); err != nil {
   634  			return nil, err
   635  		}
   636  
   637  		calculatedPolicy.CIDRPolicy.Ingress = newCIDRIngressPolicy.Ingress
   638  		calculatedPolicy.L4Policy.Ingress = newL4IngressPolicy
   639  	}
   640  
   641  	if egressEnabled {
   642  		newL4EgressPolicy, err := matchingRules.resolveL4EgressPolicy(&egressCtx, p.GetRevision(), p.GetSelectorCache())
   643  		if err != nil {
   644  			return nil, err
   645  		}
   646  
   647  		newCIDREgressPolicy := matchingRules.resolveCIDRPolicy(&egressCtx)
   648  		if err := newCIDREgressPolicy.Validate(); err != nil {
   649  			return nil, err
   650  		}
   651  
   652  		calculatedPolicy.CIDRPolicy.Egress = newCIDREgressPolicy.Egress
   653  		calculatedPolicy.L4Policy.Egress = newL4EgressPolicy
   654  	}
   655  
   656  	// Make the calculated policy ready for incremental updates
   657  	calculatedPolicy.Attach()
   658  
   659  	return calculatedPolicy, nil
   660  }
   661  
   662  // computePolicyEnforcementAndRules returns whether policy applies at ingress or ingress
   663  // for the given security identity, as well as a list of any rules which select
   664  // the set of labels of the given security identity.
   665  //
   666  // Must be called with repo mutex held for reading.
   667  func (p *Repository) computePolicyEnforcementAndRules(securityIdentity *identity.Identity) (ingress bool, egress bool, matchingRules ruleSlice) {
   668  
   669  	lbls := securityIdentity.LabelArray
   670  	// Check if policy enforcement should be enabled at the daemon level.
   671  	switch GetPolicyEnabled() {
   672  	case option.AlwaysEnforce:
   673  		_, _, matchingRules = p.getMatchingRules(securityIdentity)
   674  		// If policy enforcement is enabled for the daemon, then it has to be
   675  		// enabled for the endpoint.
   676  		return true, true, matchingRules
   677  	case option.DefaultEnforcement:
   678  		ingress, egress, matchingRules = p.getMatchingRules(securityIdentity)
   679  		// If the endpoint has the reserved:init label, i.e. if it has not yet
   680  		// received any labels, always enforce policy (default deny).
   681  		if lbls.Has(labels.IDNameInit) {
   682  			return true, true, matchingRules
   683  		}
   684  
   685  		// Default mode means that if rules contain labels that match this
   686  		// endpoint, then enable policy enforcement for this endpoint.
   687  		return ingress, egress, matchingRules
   688  	default:
   689  		// If policy enforcement isn't enabled, we do not enable policy
   690  		// enforcement for the endpoint. We don't care about returning any
   691  		// rules that match.
   692  		return false, false, nil
   693  	}
   694  }