github.com/fafucoder/cilium@v1.6.11/pkg/endpoint/bpf.go (about)

     1  // Copyright 2016-2020 Authors of Cilium
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package endpoint
    16  
    17  import (
    18  	"bufio"
    19  	"context"
    20  	"fmt"
    21  	"io"
    22  	"os"
    23  	"path/filepath"
    24  	"syscall"
    25  	"time"
    26  
    27  	"github.com/cilium/cilium/api/v1/models"
    28  	"github.com/cilium/cilium/common"
    29  	"github.com/cilium/cilium/pkg/bpf"
    30  	"github.com/cilium/cilium/pkg/completion"
    31  	"github.com/cilium/cilium/pkg/controller"
    32  	"github.com/cilium/cilium/pkg/datapath/loader"
    33  	"github.com/cilium/cilium/pkg/endpoint/regeneration"
    34  	"github.com/cilium/cilium/pkg/loadinfo"
    35  	"github.com/cilium/cilium/pkg/logging/logfields"
    36  	bpfconfig "github.com/cilium/cilium/pkg/maps/configmap"
    37  	"github.com/cilium/cilium/pkg/maps/ctmap"
    38  	"github.com/cilium/cilium/pkg/maps/eppolicymap"
    39  	"github.com/cilium/cilium/pkg/maps/lxcmap"
    40  	"github.com/cilium/cilium/pkg/maps/policymap"
    41  	"github.com/cilium/cilium/pkg/option"
    42  	"github.com/cilium/cilium/pkg/policy"
    43  	"github.com/cilium/cilium/pkg/policy/trafficdirection"
    44  	"github.com/cilium/cilium/pkg/revert"
    45  	"github.com/cilium/cilium/pkg/version"
    46  
    47  	"github.com/sirupsen/logrus"
    48  )
    49  
    50  const (
    51  	// EndpointGenerationTimeout specifies timeout for proxy completion context
    52  	EndpointGenerationTimeout = 330 * time.Second
    53  )
    54  
    55  // PolicyMapPathLocked returns the path to the policy map of endpoint.
    56  func (e *Endpoint) PolicyMapPathLocked() string {
    57  	return bpf.LocalMapPath(policymap.MapName, e.ID)
    58  }
    59  
    60  // CallsMapPathLocked returns the path to cilium tail calls map of an endpoint.
    61  func (e *Endpoint) CallsMapPathLocked() string {
    62  	return bpf.LocalMapPath(loader.CallsMapName, e.ID)
    63  }
    64  
    65  // BPFConfigMapPath returns the path to the BPF config map of endpoint.
    66  func (e *Endpoint) BPFConfigMapPath() string {
    67  	return bpf.LocalMapPath(bpfconfig.MapNamePrefix, e.ID)
    68  }
    69  
    70  // BPFIpvlanMapPath returns the path to the ipvlan tail call map of an endpoint.
    71  func (e *Endpoint) BPFIpvlanMapPath() string {
    72  	return bpf.LocalMapPath(IpvlanMapName, e.ID)
    73  }
    74  
    75  // writeInformationalComments writes annotations to the specified writer,
    76  // including a base64 encoding of the endpoint object, and human-readable
    77  // strings describing the configuration of the datapath.
    78  //
    79  // For configuration of actual datapath behavior, see WriteEndpointConfig().
    80  //
    81  // e.Mutex must be held
    82  func (e *Endpoint) writeInformationalComments(w io.Writer) error {
    83  	fw := bufio.NewWriter(w)
    84  
    85  	fmt.Fprint(fw, "/*\n")
    86  
    87  	epStr64, err := e.base64()
    88  	if err == nil {
    89  		var verBase64 string
    90  		verBase64, err = version.Base64()
    91  		if err == nil {
    92  			fmt.Fprintf(fw, " * %s%s:%s\n * \n", common.CiliumCHeaderPrefix,
    93  				verBase64, epStr64)
    94  		}
    95  	}
    96  	if err != nil {
    97  		e.logStatusLocked(BPF, Warning, fmt.Sprintf("Unable to create a base64: %s", err))
    98  	}
    99  
   100  	if e.ContainerID == "" {
   101  		fmt.Fprintf(fw, " * Docker Network ID: %s\n", e.DockerNetworkID)
   102  		fmt.Fprintf(fw, " * Docker Endpoint ID: %s\n", e.DockerEndpointID)
   103  	} else {
   104  		fmt.Fprintf(fw, " * Container ID: %s\n", e.ContainerID)
   105  	}
   106  
   107  	fmt.Fprintf(fw, ""+
   108  		" * IPv6 address: %s\n"+
   109  		" * IPv4 address: %s\n"+
   110  		" * Identity: %d\n"+
   111  		" * PolicyMap: %s\n"+
   112  		" * NodeMAC: %s\n"+
   113  		" */\n\n",
   114  		e.IPv6.String(), e.IPv4.String(),
   115  		e.getIdentity(), bpf.LocalMapName(policymap.MapName, e.ID),
   116  		e.NodeMAC)
   117  
   118  	fw.WriteString("/*\n")
   119  	fw.WriteString(" * Labels:\n")
   120  	if e.SecurityIdentity != nil {
   121  		if len(e.SecurityIdentity.Labels) == 0 {
   122  			fmt.Fprintf(fw, " * - %s\n", "(no labels)")
   123  		} else {
   124  			for _, v := range e.SecurityIdentity.Labels {
   125  				fmt.Fprintf(fw, " * - %s\n", v)
   126  			}
   127  		}
   128  	}
   129  	fw.WriteString(" */\n\n")
   130  
   131  	return fw.Flush()
   132  }
   133  
   134  // writeHeaderfile writes the lxc_config.h header file of an endpoint
   135  //
   136  // e.Mutex must be held.
   137  func (e *Endpoint) writeHeaderfile(prefix string) error {
   138  	headerPath := filepath.Join(prefix, common.CHeaderFileName)
   139  	e.getLogger().WithFields(logrus.Fields{
   140  		logfields.Path: headerPath,
   141  	}).Debug("writing header file")
   142  	f, err := os.Create(headerPath)
   143  	if err != nil {
   144  		return fmt.Errorf("failed to open file %s for writing: %s", headerPath, err)
   145  
   146  	}
   147  	defer f.Close()
   148  
   149  	if err = e.writeInformationalComments(f); err != nil {
   150  		return err
   151  	}
   152  	return e.owner.Datapath().WriteEndpointConfig(f, e)
   153  }
   154  
   155  // addNewRedirectsFromMap must be called while holding the endpoint lock for
   156  // writing. On success, returns nil; otherwise, returns an error  indicating the
   157  // problem that occurred while adding an l7 redirect for the specified policy.
   158  // Must be called with endpoint.Mutex held.
   159  func (e *Endpoint) addNewRedirectsFromMap(m policy.L4PolicyMap, desiredRedirects map[string]bool, proxyWaitGroup *completion.WaitGroup) (error, revert.FinalizeFunc, revert.RevertFunc) {
   160  	if option.Config.DryMode {
   161  		return nil, nil, nil
   162  	}
   163  
   164  	var finalizeList revert.FinalizeList
   165  	var revertStack revert.RevertStack
   166  	var updatedStats []*models.ProxyStatistics
   167  	insertedDesiredMapState := make(map[policy.Key]struct{})
   168  	updatedDesiredMapState := make(policy.MapState)
   169  
   170  	for _, l4 := range m {
   171  		if l4.IsRedirect() {
   172  			var redirectPort uint16
   173  			var err error
   174  			// Only create a redirect if the proxy is NOT running in a sidecar
   175  			// container. If running in a sidecar container, just allow traffic
   176  			// to the port at L4 by setting the proxy port to 0.
   177  			if !e.hasSidecarProxy || l4.L7Parser != policy.ParserTypeHTTP {
   178  				var finalizeFunc revert.FinalizeFunc
   179  				var revertFunc revert.RevertFunc
   180  				redirectPort, err, finalizeFunc, revertFunc = e.owner.UpdateProxyRedirect(e, l4, proxyWaitGroup)
   181  				if err != nil {
   182  					revertStack.Revert() // Ignore errors while reverting. This is best-effort.
   183  					return err, nil, nil
   184  				}
   185  				finalizeList.Append(finalizeFunc)
   186  				revertStack.Push(revertFunc)
   187  
   188  				proxyID := e.ProxyID(l4)
   189  				if e.realizedRedirects == nil {
   190  					e.realizedRedirects = make(map[string]uint16)
   191  				}
   192  				if _, found := e.realizedRedirects[proxyID]; !found {
   193  					revertStack.Push(func() error {
   194  						delete(e.realizedRedirects, proxyID)
   195  						return nil
   196  					})
   197  				}
   198  				e.realizedRedirects[proxyID] = redirectPort
   199  
   200  				desiredRedirects[proxyID] = true
   201  
   202  				// Update the endpoint API model to report that Cilium manages a
   203  				// redirect for that port.
   204  				e.proxyStatisticsMutex.Lock()
   205  				proxyStats := e.getProxyStatisticsLocked(proxyID, string(l4.L7Parser), uint16(l4.Port), l4.Ingress)
   206  				proxyStats.AllocatedProxyPort = int64(redirectPort)
   207  				e.proxyStatisticsMutex.Unlock()
   208  
   209  				updatedStats = append(updatedStats, proxyStats)
   210  			}
   211  
   212  			// Set the proxy port in the policy map.
   213  			var direction trafficdirection.TrafficDirection
   214  			if l4.Ingress {
   215  				direction = trafficdirection.Ingress
   216  			} else {
   217  				direction = trafficdirection.Egress
   218  			}
   219  
   220  			keysFromFilter := l4.ToKeys(direction)
   221  
   222  			for _, keyFromFilter := range keysFromFilter {
   223  				if oldEntry, ok := e.desiredPolicy.PolicyMapState[keyFromFilter]; ok {
   224  					updatedDesiredMapState[keyFromFilter] = oldEntry
   225  				} else {
   226  					insertedDesiredMapState[keyFromFilter] = struct{}{}
   227  				}
   228  
   229  				e.desiredPolicy.PolicyMapState[keyFromFilter] = policy.MapStateEntry{ProxyPort: redirectPort}
   230  			}
   231  
   232  		}
   233  	}
   234  
   235  	revertStack.Push(func() error {
   236  		// Restore the proxy stats.
   237  		e.proxyStatisticsMutex.Lock()
   238  		for _, stats := range updatedStats {
   239  			stats.AllocatedProxyPort = 0
   240  		}
   241  		e.proxyStatisticsMutex.Unlock()
   242  
   243  		// Restore the desired policy map state.
   244  		for key := range insertedDesiredMapState {
   245  			delete(e.desiredPolicy.PolicyMapState, key)
   246  		}
   247  		for key, entry := range updatedDesiredMapState {
   248  			e.desiredPolicy.PolicyMapState[key] = entry
   249  		}
   250  		return nil
   251  	})
   252  
   253  	return nil, finalizeList.Finalize, revertStack.Revert
   254  }
   255  
   256  // addNewRedirects must be called while holding the endpoint lock for writing.
   257  // On success, returns nil; otherwise, returns an error indicating the problem
   258  // that occurred while adding an l7 redirect for the specified policy.
   259  // The returned map contains the exact set of IDs of proxy redirects that is
   260  // required to implement the given L4 policy.
   261  // Must be called with endpoint.Mutex held.
   262  func (e *Endpoint) addNewRedirects(m *policy.L4Policy, proxyWaitGroup *completion.WaitGroup) (desiredRedirects map[string]bool, err error, finalizeFunc revert.FinalizeFunc, revertFunc revert.RevertFunc) {
   263  	desiredRedirects = make(map[string]bool)
   264  	var finalizeList revert.FinalizeList
   265  	var revertStack revert.RevertStack
   266  
   267  	var ff revert.FinalizeFunc
   268  	var rf revert.RevertFunc
   269  
   270  	err, ff, rf = e.addNewRedirectsFromMap(m.Ingress, desiredRedirects, proxyWaitGroup)
   271  	if err != nil {
   272  		return desiredRedirects, fmt.Errorf("unable to allocate ingress redirects: %s", err), nil, nil
   273  	}
   274  	finalizeList.Append(ff)
   275  	revertStack.Push(rf)
   276  
   277  	err, ff, rf = e.addNewRedirectsFromMap(m.Egress, desiredRedirects, proxyWaitGroup)
   278  	if err != nil {
   279  		revertStack.Revert() // Ignore errors while reverting. This is best-effort.
   280  		return desiredRedirects, fmt.Errorf("unable to allocate egress redirects: %s", err), nil, nil
   281  	}
   282  	finalizeList.Append(ff)
   283  	revertStack.Push(rf)
   284  
   285  	return desiredRedirects, nil, finalizeList.Finalize, func() error {
   286  		e.getLogger().Debug("Reverting proxy redirect additions")
   287  
   288  		err := revertStack.Revert()
   289  
   290  		e.getLogger().Debug("Finished reverting proxy redirect additions")
   291  
   292  		return err
   293  	}
   294  }
   295  
   296  // Must be called with endpoint.Mutex held.
   297  func (e *Endpoint) removeOldRedirects(desiredRedirects map[string]bool, proxyWaitGroup *completion.WaitGroup) (revert.FinalizeFunc, revert.RevertFunc) {
   298  	if option.Config.DryMode {
   299  		return nil, nil
   300  	}
   301  
   302  	var finalizeList revert.FinalizeList
   303  	var revertStack revert.RevertStack
   304  	removedRedirects := make(map[string]uint16, len(e.realizedRedirects))
   305  	updatedStats := make(map[uint16]*models.ProxyStatistics, len(e.realizedRedirects))
   306  
   307  	for id, redirectPort := range e.realizedRedirects {
   308  		// Remove only the redirects that are not required.
   309  		if desiredRedirects[id] {
   310  			continue
   311  		}
   312  
   313  		err, finalizeFunc, revertFunc := e.owner.RemoveProxyRedirect(e, id, proxyWaitGroup)
   314  		if err != nil {
   315  			e.getLogger().WithError(err).WithField(logfields.L4PolicyID, id).Warn("Error while removing proxy redirect")
   316  			continue
   317  		}
   318  		finalizeList.Append(finalizeFunc)
   319  		revertStack.Push(revertFunc)
   320  
   321  		delete(e.realizedRedirects, id)
   322  		removedRedirects[id] = redirectPort
   323  
   324  		// Update the endpoint API model to report that no redirect is
   325  		// active or known for that port anymore. We never delete stats
   326  		// until an endpoint is deleted, so we only set the redirect port
   327  		// to 0.
   328  		e.proxyStatisticsMutex.Lock()
   329  		if proxyStats, ok := e.proxyStatistics[id]; ok {
   330  			updatedStats[redirectPort] = proxyStats
   331  			proxyStats.AllocatedProxyPort = 0
   332  		} else {
   333  			e.getLogger().WithField(logfields.L4PolicyID, id).Warn("Proxy stats not found")
   334  		}
   335  		e.proxyStatisticsMutex.Unlock()
   336  	}
   337  
   338  	return finalizeList.Finalize,
   339  		func() error {
   340  			e.getLogger().Debug("Reverting proxy redirect removals")
   341  
   342  			// Restore the proxy stats.
   343  			e.proxyStatisticsMutex.Lock()
   344  			for redirectPort, stats := range updatedStats {
   345  				stats.AllocatedProxyPort = int64(redirectPort)
   346  			}
   347  			e.proxyStatisticsMutex.Unlock()
   348  
   349  			for id, redirectPort := range removedRedirects {
   350  				e.realizedRedirects[id] = redirectPort
   351  			}
   352  
   353  			err := revertStack.Revert()
   354  
   355  			e.getLogger().Debug("Finished reverting proxy redirect removals")
   356  
   357  			return err
   358  		}
   359  }
   360  
   361  // regenerateBPF rewrites all headers and updates all BPF maps to reflect the
   362  // specified endpoint.
   363  // ReloadDatapath forces the datapath programs to be reloaded. It does
   364  // not guarantee recompilation of the programs.
   365  // Must be called with endpoint.Mutex not held and endpoint.buildMutex held.
   366  //
   367  // Returns the policy revision number when the regeneration has called,
   368  // Whether the new state dir is populated with all new BPF state files, and
   369  // and an error if something failed.
   370  func (e *Endpoint) regenerateBPF(regenContext *regenerationContext) (revnum uint64, stateDirComplete bool, reterr error) {
   371  	var (
   372  		err                 error
   373  		compilationExecuted bool
   374  		headerfileChanged   bool
   375  	)
   376  
   377  	stats := &regenContext.Stats
   378  	stats.waitingForLock.Start()
   379  
   380  	datapathRegenCtxt := regenContext.datapathRegenerationContext
   381  
   382  	// Make sure that owner is not compiling base programs while we are
   383  	// regenerating an endpoint.
   384  	e.owner.GetCompilationLock().RLock()
   385  	stats.waitingForLock.End(true)
   386  	defer e.owner.GetCompilationLock().RUnlock()
   387  
   388  	datapathRegenCtxt.prepareForProxyUpdates(regenContext.parentContext)
   389  	defer datapathRegenCtxt.completionCancel()
   390  
   391  	headerfileChanged, err = e.runPreCompilationSteps(regenContext)
   392  
   393  	// Keep track of the side-effects of the regeneration that need to be
   394  	// reverted in case of failure.
   395  	// Also keep track of the regeneration finalization code that can't be
   396  	// reverted, and execute it in case of regeneration success.
   397  	defer func() {
   398  		// Ignore finalizing of proxy state in dry mode.
   399  		if !option.Config.DryMode {
   400  			e.finalizeProxyState(regenContext, reterr)
   401  		}
   402  	}()
   403  
   404  	if err != nil {
   405  		return 0, compilationExecuted, err
   406  	}
   407  
   408  	// No need to compile BPF in dry mode.
   409  	if option.Config.DryMode {
   410  		return e.nextPolicyRevision, false, nil
   411  	}
   412  
   413  	// Wait for connection tracking cleaning to complete
   414  	stats.waitingForCTClean.Start()
   415  	<-datapathRegenCtxt.ctCleaned
   416  	stats.waitingForCTClean.End(true)
   417  
   418  	stats.prepareBuild.End(true)
   419  
   420  	compilationExecuted, err = e.realizeBPFState(regenContext)
   421  	if err != nil {
   422  		return datapathRegenCtxt.epInfoCache.revision, compilationExecuted, err
   423  	}
   424  
   425  	// Hook the endpoint into the endpoint and endpoint to policy tables then expose it
   426  	stats.mapSync.Start()
   427  	epErr := eppolicymap.WriteEndpoint(datapathRegenCtxt.epInfoCache.keys, e.PolicyMap)
   428  	err = lxcmap.WriteEndpoint(datapathRegenCtxt.epInfoCache)
   429  	stats.mapSync.End(err == nil)
   430  	if epErr != nil {
   431  		e.logStatusLocked(BPF, Warning, fmt.Sprintf("Unable to sync EpToPolicy Map continue with Sockmap support: %s", epErr))
   432  	}
   433  	if err != nil {
   434  		return 0, compilationExecuted, fmt.Errorf("Exposing new BPF failed: %s", err)
   435  	}
   436  
   437  	// Signal that BPF program has been generated.
   438  	// The endpoint has at least L3/L4 connectivity at this point.
   439  	e.CloseBPFProgramChannel()
   440  
   441  	// Allow another builder to start while we wait for the proxy
   442  	if regenContext.DoneFunc != nil {
   443  		regenContext.DoneFunc()
   444  	}
   445  
   446  	stats.proxyWaitForAck.Start()
   447  	err = e.WaitForProxyCompletions(datapathRegenCtxt.proxyWaitGroup)
   448  	stats.proxyWaitForAck.End(err == nil)
   449  	if err != nil {
   450  		return 0, compilationExecuted, fmt.Errorf("Error while configuring proxy redirects: %s", err)
   451  	}
   452  
   453  	stats.waitingForLock.Start()
   454  	err = e.LockAlive()
   455  	stats.waitingForLock.End(err == nil)
   456  	if err != nil {
   457  		return 0, compilationExecuted, err
   458  	}
   459  	defer e.Unlock()
   460  
   461  	e.ctCleaned = true
   462  
   463  	// Synchronously try to update PolicyMap for this endpoint. If any
   464  	// part of updating the PolicyMap fails, bail out.
   465  	// Unfortunately, this means that the map will be in an inconsistent
   466  	// state with the current program (if it exists) for this endpoint.
   467  	// GH-3897 would fix this by creating a new map to do an atomic swap
   468  	// with the old one.
   469  	//
   470  	// This must be done after allocating the new redirects, to update the
   471  	// policy map with the new proxy ports.
   472  	stats.mapSync.Start()
   473  	err = e.syncPolicyMap()
   474  	stats.mapSync.End(err == nil)
   475  	if err != nil {
   476  		return 0, compilationExecuted, fmt.Errorf("unable to regenerate policy because PolicyMap synchronization failed: %s", err)
   477  	}
   478  
   479  	stateDirComplete = headerfileChanged && compilationExecuted
   480  	return datapathRegenCtxt.epInfoCache.revision, stateDirComplete, err
   481  }
   482  
   483  func (e *Endpoint) realizeBPFState(regenContext *regenerationContext) (compilationExecuted bool, err error) {
   484  	stats := &regenContext.Stats
   485  	datapathRegenCtxt := regenContext.datapathRegenerationContext
   486  
   487  	e.getLogger().WithField(fieldRegenLevel, datapathRegenCtxt.regenerationLevel).Debug("Preparing to compile BPF")
   488  
   489  	if datapathRegenCtxt.regenerationLevel > regeneration.RegenerateWithoutDatapath {
   490  		if e.Options.IsEnabled(option.Debug) {
   491  			debugFunc := log.WithFields(logrus.Fields{logfields.EndpointID: e.StringID()}).Debugf
   492  			ctx, cancel := context.WithCancel(regenContext.parentContext)
   493  			defer cancel()
   494  			loadinfo.LogPeriodicSystemLoad(ctx, debugFunc, time.Second)
   495  		}
   496  
   497  		// Compile and install BPF programs for this endpoint
   498  		if datapathRegenCtxt.regenerationLevel == regeneration.RegenerateWithDatapathRebuild {
   499  			err = loader.CompileAndLoad(datapathRegenCtxt.completionCtx, datapathRegenCtxt.epInfoCache, &stats.datapathRealization)
   500  			e.getLogger().WithError(err).Info("Regenerated endpoint BPF program")
   501  			compilationExecuted = true
   502  		} else if datapathRegenCtxt.regenerationLevel == regeneration.RegenerateWithDatapathRewrite {
   503  			err = loader.CompileOrLoad(datapathRegenCtxt.completionCtx, datapathRegenCtxt.epInfoCache, &stats.datapathRealization)
   504  			if err == nil {
   505  				e.getLogger().Info("Rewrote endpoint BPF program")
   506  			} else {
   507  				e.getLogger().WithError(err).Error("Error while rewriting endpoint BPF program")
   508  			}
   509  			compilationExecuted = true
   510  		} else { // RegenerateWithDatapathLoad
   511  			err = loader.ReloadDatapath(datapathRegenCtxt.completionCtx, datapathRegenCtxt.epInfoCache, &stats.datapathRealization)
   512  			if err == nil {
   513  				e.getLogger().Info("Reloaded endpoint BPF program")
   514  			} else {
   515  				e.getLogger().WithError(err).Error("Error while reloading endpoint BPF program")
   516  			}
   517  		}
   518  
   519  		if err != nil {
   520  			return compilationExecuted, err
   521  		}
   522  		e.bpfHeaderfileHash = datapathRegenCtxt.bpfHeaderfilesHash
   523  	} else {
   524  		e.getLogger().WithField(logfields.BPFHeaderfileHash, datapathRegenCtxt.bpfHeaderfilesHash).
   525  			Debug("BPF header file unchanged, skipping BPF compilation and installation")
   526  	}
   527  
   528  	return compilationExecuted, nil
   529  }
   530  
   531  // runPreCompilationSteps runs all of the regeneration steps that are necessary
   532  // right before compiling the BPF for the given endpoint.
   533  // The endpoint mutex must not be held.
   534  //
   535  // Returns whether the headerfile changed and/or an error.
   536  func (e *Endpoint) runPreCompilationSteps(regenContext *regenerationContext) (headerfileChanged bool, preCompilationError error) {
   537  	stats := &regenContext.Stats
   538  	datapathRegenCtxt := regenContext.datapathRegenerationContext
   539  
   540  	stats.waitingForLock.Start()
   541  	err := e.LockAlive()
   542  	stats.waitingForLock.End(err == nil)
   543  	if err != nil {
   544  		return false, err
   545  	}
   546  
   547  	defer e.Unlock()
   548  
   549  	currentDir := datapathRegenCtxt.currentDir
   550  	nextDir := datapathRegenCtxt.nextDir
   551  
   552  	// In the first ever regeneration of the endpoint, the conntrack table
   553  	// is cleaned from the new endpoint IPs as it is guaranteed that any
   554  	// pre-existing connections using that IP are now invalid.
   555  	if !e.ctCleaned {
   556  		go func() {
   557  			if !option.Config.DryMode {
   558  				ipv4 := option.Config.EnableIPv4
   559  				ipv6 := option.Config.EnableIPv6
   560  				created := ctmap.Exists(nil, ipv4, ipv6)
   561  				if e.ConntrackLocal() {
   562  					created = ctmap.Exists(e, ipv4, ipv6)
   563  				}
   564  				if created {
   565  					e.scrubIPsInConntrackTable()
   566  				}
   567  			}
   568  			close(datapathRegenCtxt.ctCleaned)
   569  		}()
   570  	} else {
   571  		close(datapathRegenCtxt.ctCleaned)
   572  	}
   573  
   574  	// If dry mode is enabled, no further changes to BPF maps are performed
   575  	if option.Config.DryMode {
   576  
   577  		// Compute policy for this endpoint.
   578  		if err = e.regeneratePolicy(); err != nil {
   579  			return false, fmt.Errorf("Unable to regenerate policy: %s", err)
   580  		}
   581  
   582  		_ = e.updateAndOverrideEndpointOptions(nil)
   583  
   584  		// Dry mode needs Network Policy Updates, but the proxy wait group must
   585  		// not be initialized, as there is no proxy ACKing the changes.
   586  		if err, _ = e.updateNetworkPolicy(nil); err != nil {
   587  			return false, err
   588  		}
   589  
   590  		if err = e.writeHeaderfile(nextDir); err != nil {
   591  			return false, fmt.Errorf("Unable to write header file: %s", err)
   592  		}
   593  
   594  		log.WithField(logfields.EndpointID, e.ID).Debug("Skipping bpf updates due to dry mode")
   595  		return false, nil
   596  	}
   597  
   598  	if e.PolicyMap == nil {
   599  		e.PolicyMap, _, err = policymap.OpenOrCreate(e.PolicyMapPathLocked())
   600  		if err != nil {
   601  			return false, err
   602  		}
   603  		// Clean up map contents
   604  		e.getLogger().Debug("flushing old PolicyMap")
   605  		err = e.PolicyMap.DeleteAll()
   606  		if err != nil {
   607  			return false, err
   608  		}
   609  
   610  		// Also reset the in-memory state of the realized state as the
   611  		// BPF map content is guaranteed to be empty right now.
   612  		e.realizedPolicy.PolicyMapState = make(policy.MapState)
   613  	}
   614  
   615  	if e.bpfConfigMap == nil {
   616  		e.bpfConfigMap, _, err = bpfconfig.OpenMapWithName(e.BPFConfigMapPath())
   617  		if err != nil {
   618  			return false, err
   619  		}
   620  		// Also reset the in-memory state of the realized state as the
   621  		// BPF map content is guaranteed to be empty right now.
   622  		e.realizedBPFConfig = &bpfconfig.EndpointConfig{}
   623  	}
   624  
   625  	// Only generate & populate policy map if a security identity is set up for
   626  	// this endpoint.
   627  	if e.SecurityIdentity != nil {
   628  		stats.policyCalculation.Start()
   629  		err = e.regeneratePolicy()
   630  		stats.policyCalculation.End(err == nil)
   631  		if err != nil {
   632  			return false, fmt.Errorf("unable to regenerate policy for '%s': %s", e.StringID(), err)
   633  		}
   634  
   635  		_ = e.updateAndOverrideEndpointOptions(nil)
   636  
   637  		// Configure the new network policy with the proxies.
   638  		// Do this before updating the bpf policy maps, so that the proxy listeners have a chance to be
   639  		// ready when new traffic is redirected to them.
   640  		stats.proxyPolicyCalculation.Start()
   641  		err, networkPolicyRevertFunc := e.updateNetworkPolicy(datapathRegenCtxt.proxyWaitGroup)
   642  		stats.proxyPolicyCalculation.End(err == nil)
   643  		if err != nil {
   644  			return false, err
   645  		}
   646  		datapathRegenCtxt.revertStack.Push(networkPolicyRevertFunc)
   647  
   648  		// Walk the L4Policy to add new redirects and update the desired policy for existing redirects.
   649  		// Do this before updating the bpf policy maps, so that the proxies are ready when new traffic
   650  		// is redirected to them.
   651  		var desiredRedirects map[string]bool
   652  		var finalizeFunc revert.FinalizeFunc
   653  		var revertFunc revert.RevertFunc
   654  		if e.desiredPolicy != nil && e.desiredPolicy.L4Policy != nil && e.desiredPolicy.L4Policy.HasRedirect() {
   655  			stats.proxyConfiguration.Start()
   656  			desiredRedirects, err, finalizeFunc, revertFunc = e.addNewRedirects(e.desiredPolicy.L4Policy, datapathRegenCtxt.proxyWaitGroup)
   657  			stats.proxyConfiguration.End(err == nil)
   658  			if err != nil {
   659  				return false, err
   660  			}
   661  			datapathRegenCtxt.finalizeList.Append(finalizeFunc)
   662  			datapathRegenCtxt.revertStack.Push(revertFunc)
   663  		}
   664  
   665  		// realizedBPFConfig may be updated at any point after we figure out
   666  		// whether ingress/egress policy is enabled.
   667  		e.desiredBPFConfig = bpfconfig.GetConfig(e)
   668  
   669  		// Synchronously try to update PolicyMap for this endpoint. If any
   670  		// part of updating the PolicyMap fails, bail out and do not generate
   671  		// BPF. Unfortunately, this means that the map will be in an inconsistent
   672  		// state with the current program (if it exists) for this endpoint.
   673  		// GH-3897 would fix this by creating a new map to do an atomic swap
   674  		// with the old one.
   675  		stats.mapSync.Start()
   676  		err = e.syncPolicyMap()
   677  		stats.mapSync.End(err == nil)
   678  		if err != nil {
   679  			return false, fmt.Errorf("unable to regenerate policy because PolicyMap synchronization failed: %s", err)
   680  		}
   681  
   682  		// Synchronously update the BPF ConfigMap for this endpoint.
   683  		// This is unlikely to fail, but will have the same
   684  		// inconsistency issues as above if there is a failure. Long
   685  		// term the solution to this is to templatize this map in the
   686  		// ELF file, but there's no solution to this just yet.
   687  		if err = e.bpfConfigMap.Update(e.desiredBPFConfig); err != nil {
   688  			e.getLogger().WithError(err).Error("unable to update BPF config map")
   689  			return false, err
   690  		}
   691  
   692  		datapathRegenCtxt.revertStack.Push(func() error {
   693  			return e.bpfConfigMap.Update(e.realizedBPFConfig)
   694  		})
   695  
   696  		// At this point, traffic is no longer redirected to the proxy for
   697  		// now-obsolete redirects, since we synced the updated policy map above.
   698  		// It's now safe to remove the redirects from the proxy's configuration.
   699  		stats.proxyConfiguration.Start()
   700  		finalizeFunc, revertFunc = e.removeOldRedirects(desiredRedirects, datapathRegenCtxt.proxyWaitGroup)
   701  		datapathRegenCtxt.finalizeList.Append(finalizeFunc)
   702  		datapathRegenCtxt.revertStack.Push(revertFunc)
   703  		stats.proxyConfiguration.End(true)
   704  	}
   705  
   706  	stats.prepareBuild.Start()
   707  	defer func() {
   708  		stats.prepareBuild.End(preCompilationError == nil)
   709  	}()
   710  
   711  	// Avoid BPF program compilation and installation if the headerfile for the endpoint
   712  	// or the node have not changed.
   713  	datapathRegenCtxt.bpfHeaderfilesHash, err = loader.EndpointHash(e)
   714  	if err != nil {
   715  		e.getLogger().WithError(err).Warn("Unable to hash header file")
   716  		datapathRegenCtxt.bpfHeaderfilesHash = ""
   717  		headerfileChanged = true
   718  	} else {
   719  		headerfileChanged = (datapathRegenCtxt.bpfHeaderfilesHash != e.bpfHeaderfileHash)
   720  		e.getLogger().WithField(logfields.BPFHeaderfileHash, datapathRegenCtxt.bpfHeaderfilesHash).
   721  			Debugf("BPF header file hashed (was: %q)", e.bpfHeaderfileHash)
   722  	}
   723  
   724  	if headerfileChanged {
   725  		datapathRegenCtxt.regenerationLevel = regeneration.RegenerateWithDatapathRewrite
   726  	}
   727  	if datapathRegenCtxt.regenerationLevel >= regeneration.RegenerateWithDatapathRewrite {
   728  		if err := e.writeHeaderfile(nextDir); err != nil {
   729  			return false, fmt.Errorf("unable to write header file: %s", err)
   730  		}
   731  	}
   732  
   733  	// Cache endpoint information so that we can release the endpoint lock.
   734  	if datapathRegenCtxt.regenerationLevel >= regeneration.RegenerateWithDatapathRewrite {
   735  		datapathRegenCtxt.epInfoCache = e.createEpInfoCache(nextDir)
   736  	} else {
   737  		datapathRegenCtxt.epInfoCache = e.createEpInfoCache(currentDir)
   738  	}
   739  	if datapathRegenCtxt.epInfoCache == nil {
   740  		return headerfileChanged, fmt.Errorf("Unable to cache endpoint information")
   741  	}
   742  
   743  	return headerfileChanged, nil
   744  }
   745  
   746  func (e *Endpoint) finalizeProxyState(regenContext *regenerationContext, err error) {
   747  	datapathRegenCtx := regenContext.datapathRegenerationContext
   748  	if err == nil {
   749  		// Always execute the finalization code, even if the endpoint is
   750  		// terminating, in order to properly release resources.
   751  		e.UnconditionalLock()
   752  		e.getLogger().Debug("Finalizing successful endpoint regeneration")
   753  		datapathRegenCtx.finalizeList.Finalize()
   754  		e.Unlock()
   755  	} else {
   756  		if err := e.LockAlive(); err != nil {
   757  			e.getLogger().WithError(err).Debug("Skipping unnecessary reverting of endpoint regeneration changes")
   758  			return
   759  		}
   760  		e.getLogger().Debug("Reverting endpoint changes after BPF regeneration failed")
   761  		if err := datapathRegenCtx.revertStack.Revert(); err != nil {
   762  			e.getLogger().WithError(err).Error("Reverting endpoint regeneration changes failed")
   763  		}
   764  		e.getLogger().Debug("Finished reverting endpoint changes after BPF regeneration failed")
   765  		e.Unlock()
   766  	}
   767  }
   768  
   769  // DeleteMapsLocked releases references to all BPF maps associated with this
   770  // endpoint.
   771  //
   772  // For each error that occurs while releasing these references, an error is
   773  // added to the resulting error slice which is returned.
   774  //
   775  // Returns nil on success.
   776  func (e *Endpoint) DeleteMapsLocked() []error {
   777  	var errors []error
   778  
   779  	maps := map[string]string{
   780  		"config": e.BPFConfigMapPath(),
   781  		"policy": e.PolicyMapPathLocked(),
   782  		"calls":  e.CallsMapPathLocked(),
   783  		"egress": e.BPFIpvlanMapPath(),
   784  	}
   785  	for name, path := range maps {
   786  		if err := os.RemoveAll(path); err != nil {
   787  			errors = append(errors, fmt.Errorf("unable to remove %s map file %s: %s", name, path, err))
   788  		}
   789  	}
   790  
   791  	if e.ConntrackLocalLocked() {
   792  		// Remove local connection tracking maps
   793  		for _, m := range ctmap.LocalMaps(e, option.Config.EnableIPv4, option.Config.EnableIPv6) {
   794  			ctPath, err := m.Path()
   795  			if err == nil {
   796  				err = os.RemoveAll(ctPath)
   797  			}
   798  			if err != nil {
   799  				errors = append(errors, fmt.Errorf("unable to remove CT map %s: %s", ctPath, err))
   800  			}
   801  		}
   802  	}
   803  
   804  	// Remove handle_policy() tail call entry for EP
   805  	if err := policymap.RemoveGlobalMapping(uint32(e.ID)); err != nil {
   806  		errors = append(errors, fmt.Errorf("unable to remove endpoint from global policy map: %s", err))
   807  	}
   808  
   809  	return errors
   810  }
   811  
   812  // DeleteBPFProgramLocked delete the BPF program associated with the endpoint's
   813  // veth interface.
   814  func (e *Endpoint) DeleteBPFProgramLocked() error {
   815  	e.getLogger().Debug("deleting bpf program from endpoint")
   816  	return loader.DeleteDatapath(context.TODO(), e.IfName, "ingress")
   817  }
   818  
   819  // garbageCollectConntrack will run the ctmap.GC() on either the endpoint's
   820  // local conntrack table or the global conntrack table.
   821  //
   822  // The endpoint lock must be held
   823  func (e *Endpoint) garbageCollectConntrack(filter *ctmap.GCFilter) {
   824  	var maps []*ctmap.Map
   825  
   826  	if e.ConntrackLocalLocked() {
   827  		maps = ctmap.LocalMaps(e, option.Config.EnableIPv4, option.Config.EnableIPv6)
   828  	} else {
   829  		maps = ctmap.GlobalMaps(option.Config.EnableIPv4, option.Config.EnableIPv6)
   830  	}
   831  	for _, m := range maps {
   832  		if err := m.Open(); err != nil {
   833  			// If the CT table doesn't exist, there's nothing to GC.
   834  			scopedLog := log.WithError(err).WithField(logfields.EndpointID, e.ID)
   835  			if os.IsNotExist(err) {
   836  				scopedLog.WithError(err).Debug("Skipping GC for endpoint")
   837  			} else {
   838  				scopedLog.WithError(err).Warn("Unable to open map")
   839  			}
   840  			continue
   841  		}
   842  		defer m.Close()
   843  
   844  		ctmap.GC(m, filter)
   845  	}
   846  }
   847  
   848  func (e *Endpoint) scrubIPsInConntrackTableLocked() {
   849  	e.garbageCollectConntrack(&ctmap.GCFilter{
   850  		MatchIPs: map[string]struct{}{
   851  			e.IPv4.String(): {},
   852  			e.IPv6.String(): {},
   853  		},
   854  	})
   855  }
   856  
   857  func (e *Endpoint) scrubIPsInConntrackTable() {
   858  	e.UnconditionalLock()
   859  	e.scrubIPsInConntrackTableLocked()
   860  	e.Unlock()
   861  }
   862  
   863  // SkipStateClean can be called on a endpoint before its first build to skip
   864  // the cleaning of state such as the conntrack table. This is useful when an
   865  // endpoint is being restored from state and the datapath state should not be
   866  // claned.
   867  //
   868  // The endpoint lock must NOT be held.
   869  func (e *Endpoint) SkipStateClean() {
   870  	// Mark conntrack as already cleaned
   871  	e.UnconditionalLock()
   872  	e.ctCleaned = true
   873  	e.Unlock()
   874  }
   875  
   876  // GetBPFKeys returns all keys which should represent this endpoint in the BPF
   877  // endpoints map
   878  func (e *Endpoint) GetBPFKeys() []*lxcmap.EndpointKey {
   879  	keys := []*lxcmap.EndpointKey{}
   880  	if e.IPv6.IsSet() {
   881  		keys = append(keys, lxcmap.NewEndpointKey(e.IPv6.IP()))
   882  	}
   883  
   884  	if e.IPv4.IsSet() {
   885  		keys = append(keys, lxcmap.NewEndpointKey(e.IPv4.IP()))
   886  	}
   887  
   888  	return keys
   889  }
   890  
   891  // GetBPFValue returns the value which should represent this endpoint in the
   892  // BPF endpoints map
   893  func (e *Endpoint) GetBPFValue() (*lxcmap.EndpointInfo, error) {
   894  	mac, err := e.LXCMAC.Uint64()
   895  	if err != nil {
   896  		return nil, fmt.Errorf("invalid LXC MAC: %v", err)
   897  	}
   898  
   899  	nodeMAC, err := e.NodeMAC.Uint64()
   900  	if err != nil {
   901  		return nil, fmt.Errorf("invalid node MAC: %v", err)
   902  	}
   903  
   904  	info := &lxcmap.EndpointInfo{
   905  		IfIndex: uint32(e.IfIndex),
   906  		// Store security identity in network byte order so it can be
   907  		// written into the packet without an additional byte order
   908  		// conversion.
   909  		LxcID:   e.ID,
   910  		MAC:     lxcmap.MAC(mac),
   911  		NodeMAC: lxcmap.MAC(nodeMAC),
   912  	}
   913  
   914  	return info, nil
   915  }
   916  
   917  // The bool pointed by hadProxy, if not nil, will be set to 'true' if
   918  // the deleted entry had a proxy port assigned to it.  *hadProxy is
   919  // not otherwise changed (e.g., it is never set to 'false').
   920  func (e *Endpoint) deletePolicyKey(keyToDelete policy.Key, incremental bool, hadProxy *bool) bool {
   921  	// Convert from policy.Key to policymap.Key
   922  	policymapKey := policymap.PolicyKey{
   923  		Identity:         keyToDelete.Identity,
   924  		DestPort:         keyToDelete.DestPort,
   925  		Nexthdr:          keyToDelete.Nexthdr,
   926  		TrafficDirection: keyToDelete.TrafficDirection,
   927  	}
   928  
   929  	// Do not error out if the map entry was already deleted from the bpf map.
   930  	// Incremental updates depend on this being OK in cases where identity change
   931  	// events overlap with full policy computation.
   932  	// In other cases we only delete entries that exist, but even in that case it
   933  	// is better to not error out if somebody else has deleted the map entry in the
   934  	// meanwhile.
   935  	err, errno := e.PolicyMap.DeleteKeyWithErrno(policymapKey)
   936  	if err != nil && errno != syscall.ENOENT {
   937  		e.getLogger().WithError(err).WithField(logfields.BPFMapKey, policymapKey).Error("Failed to delete PolicyMap key")
   938  		return false
   939  	}
   940  
   941  	if hadProxy != nil {
   942  		if entry, ok := e.realizedPolicy.PolicyMapState[keyToDelete]; ok && entry.ProxyPort != 0 {
   943  			*hadProxy = true
   944  		}
   945  	}
   946  
   947  	// Operation was successful, remove from realized state.
   948  	delete(e.realizedPolicy.PolicyMapState, keyToDelete)
   949  
   950  	// Incremental updates need to update the desired state as well.
   951  	if incremental && e.desiredPolicy != e.realizedPolicy {
   952  		delete(e.desiredPolicy.PolicyMapState, keyToDelete)
   953  	}
   954  
   955  	return true
   956  }
   957  
   958  func (e *Endpoint) addPolicyKey(keyToAdd policy.Key, entry policy.MapStateEntry, incremental bool) bool {
   959  	// Convert from policy.Key to policymap.Key
   960  	policymapKey := policymap.PolicyKey{
   961  		Identity:         keyToAdd.Identity,
   962  		DestPort:         keyToAdd.DestPort,
   963  		Nexthdr:          keyToAdd.Nexthdr,
   964  		TrafficDirection: keyToAdd.TrafficDirection,
   965  	}
   966  
   967  	err := e.PolicyMap.AllowKey(policymapKey, entry.ProxyPort)
   968  	if err != nil {
   969  		e.getLogger().WithError(err).WithFields(logrus.Fields{
   970  			logfields.BPFMapKey: policymapKey,
   971  			logfields.Port:      entry.ProxyPort,
   972  		}).Error("Failed to add PolicyMap key")
   973  		return false
   974  	}
   975  
   976  	// Operation was successful, add to realized state.
   977  	e.realizedPolicy.PolicyMapState[keyToAdd] = entry
   978  
   979  	// Incremental updates need to update the desired state as well.
   980  	if incremental && e.desiredPolicy != e.realizedPolicy {
   981  		e.desiredPolicy.PolicyMapState[keyToAdd] = entry
   982  	}
   983  
   984  	return true
   985  }
   986  
   987  // ApplyPolicyMapChanges updates the Endpoint's PolicyMap with the changes
   988  // that have accumulated for the PolicyMap via various outside events (e.g.,
   989  // identities added / deleted).
   990  // 'proxyWaitGroup' may not be nil.
   991  func (e *Endpoint) ApplyPolicyMapChanges(proxyWaitGroup *completion.WaitGroup) error {
   992  	if err := e.LockAlive(); err != nil {
   993  		return err
   994  	}
   995  	defer e.Unlock()
   996  
   997  	proxyChanges, err := e.applyPolicyMapChanges()
   998  	if err != nil {
   999  		return err
  1000  	}
  1001  
  1002  	if proxyChanges {
  1003  		// Ignoring the revertFunc; keep all successful changes even if some fail.
  1004  		err, _ = e.updateNetworkPolicy(proxyWaitGroup)
  1005  	} else {
  1006  		// Allow caller to wait for the current network policy to be acked
  1007  		e.useCurrentNetworkPolicy(proxyWaitGroup)
  1008  	}
  1009  
  1010  	return err
  1011  }
  1012  
  1013  // applyPolicyMapChanges applies any incremental policy map changes
  1014  // collected on the desired policy.
  1015  func (e *Endpoint) applyPolicyMapChanges() (proxyChanges bool, err error) {
  1016  	errors := 0
  1017  
  1018  	//  Note that after successful endpoint regeneration the
  1019  	//  desired and realized policies are the same pointer. During
  1020  	//  the bpf regeneration possible incremental updates are
  1021  	//  collected on the newly computed desired policy, which is
  1022  	//  not fully realized yet. This is why we get the map changes
  1023  	//  from the desired policy here.
  1024  	adds, deletes := e.desiredPolicy.PolicyMapChanges.ConsumeMapChanges()
  1025  
  1026  	// Add policy map entries before deleting to avoid transient drops
  1027  	for keyToAdd, entry := range adds {
  1028  		// Keep the existing proxy port, if any
  1029  		entry.ProxyPort = e.realizedRedirects[policy.ProxyIDFromKey(e.ID, keyToAdd)]
  1030  		if entry.ProxyPort != 0 {
  1031  			proxyChanges = true
  1032  		}
  1033  		if !e.addPolicyKey(keyToAdd, entry, true) {
  1034  			errors++
  1035  		}
  1036  	}
  1037  
  1038  	for keyToDelete := range deletes {
  1039  		if !e.deletePolicyKey(keyToDelete, true, &proxyChanges) {
  1040  			errors++
  1041  		}
  1042  	}
  1043  
  1044  	if errors > 0 {
  1045  		return proxyChanges, fmt.Errorf("updating desired PolicyMap state failed")
  1046  	} else if len(adds)+len(deletes) > 0 {
  1047  		e.getLogger().WithFields(logrus.Fields{
  1048  			logfields.AddedPolicyID:   adds,
  1049  			logfields.DeletedPolicyID: deletes,
  1050  		}).Debug("Applied policy map updates due identity changes")
  1051  	}
  1052  
  1053  	return proxyChanges, nil
  1054  }
  1055  
  1056  // syncPolicyMap updates the bpf policy map state based on the
  1057  // difference between the realized and desired policy state without
  1058  // dumping the bpf policy map.
  1059  func (e *Endpoint) syncPolicyMap() error {
  1060  	// Nothing to do if the desired policy is already fully realized.
  1061  	if e.realizedPolicy != e.desiredPolicy {
  1062  		errors := 0
  1063  
  1064  		// Add policy map entries before deleting to avoid transient drops
  1065  		err := e.addPolicyMapDelta()
  1066  		if err != nil {
  1067  			errors++
  1068  		}
  1069  
  1070  		// Delete policy keys present in the realized state, but not present in the desired state
  1071  		for keyToDelete := range e.realizedPolicy.PolicyMapState {
  1072  			// If key that is in realized state is not in desired state, just remove it.
  1073  			if _, ok := e.desiredPolicy.PolicyMapState[keyToDelete]; !ok {
  1074  				if !e.deletePolicyKey(keyToDelete, false, nil) {
  1075  					errors++
  1076  				}
  1077  			}
  1078  		}
  1079  
  1080  		if errors > 0 {
  1081  			return fmt.Errorf("syncPolicyMapDelta failed")
  1082  		}
  1083  	}
  1084  
  1085  	// Still may have changes due to identities added and/or
  1086  	// deleted after the desired policy was computed.
  1087  	_, err := e.applyPolicyMapChanges()
  1088  	return err
  1089  }
  1090  
  1091  // addPolicyMapDelta adds new or updates existing bpf policy map state based
  1092  // on the difference between the realized and desired policy state without
  1093  // dumping the bpf policy map.
  1094  func (e *Endpoint) addPolicyMapDelta() error {
  1095  	// Nothing to do if the desired policy is already fully realized.
  1096  	if e.realizedPolicy == e.desiredPolicy {
  1097  		return nil
  1098  	}
  1099  
  1100  	errors := 0
  1101  
  1102  	for keyToAdd, entry := range e.desiredPolicy.PolicyMapState {
  1103  		if oldEntry, ok := e.realizedPolicy.PolicyMapState[keyToAdd]; !ok || oldEntry != entry {
  1104  			if !e.addPolicyKey(keyToAdd, entry, false) {
  1105  				errors++
  1106  			}
  1107  		}
  1108  	}
  1109  
  1110  	if errors > 0 {
  1111  		return fmt.Errorf("updating desired PolicyMap state failed")
  1112  	}
  1113  
  1114  	return nil
  1115  }
  1116  
  1117  // syncPolicyMapWithDump attempts to synchronize the PolicyMap for this endpoint to
  1118  // contain the set of PolicyKeys represented by the endpoint's desiredMapState.
  1119  // It checks the current contents of the endpoint's PolicyMap and deletes any
  1120  // PolicyKeys that are not present in the endpoint's desiredMapState. It then
  1121  // adds any keys that are not present in the map. When a key from desiredMapState
  1122  // is inserted successfully to the endpoint's BPF PolicyMap, it is added to the
  1123  // endpoint's realizedMapState field. Returns an error if the endpoint's BPF
  1124  // PolicyMap is unable to be dumped, or any update operation to the map fails.
  1125  // Must be called with e.Mutex locked.
  1126  func (e *Endpoint) syncPolicyMapWithDump() error {
  1127  
  1128  	if e.realizedPolicy.PolicyMapState == nil {
  1129  		e.realizedPolicy.PolicyMapState = make(policy.MapState)
  1130  	}
  1131  
  1132  	if e.desiredPolicy.PolicyMapState == nil {
  1133  		e.desiredPolicy.PolicyMapState = make(policy.MapState)
  1134  	}
  1135  
  1136  	if e.PolicyMap == nil {
  1137  		return fmt.Errorf("not syncing PolicyMap state for endpoint because PolicyMap is nil")
  1138  	}
  1139  
  1140  	currentMapContents, err := e.PolicyMap.DumpToSlice()
  1141  
  1142  	// If map is unable to be dumped, attempt to close map and open it again.
  1143  	// See GH-4229.
  1144  	if err != nil {
  1145  		e.getLogger().WithError(err).Error("unable to dump PolicyMap when trying to sync desired and realized PolicyMap state")
  1146  
  1147  		// Close to avoid leaking of file descriptors, but still continue in case
  1148  		// Close() does not succeed, because otherwise the map will never be
  1149  		// opened again unless the agent is restarted.
  1150  		err := e.PolicyMap.Close()
  1151  		if err != nil {
  1152  			e.getLogger().WithError(err).Error("unable to close PolicyMap which was not able to be dumped")
  1153  		}
  1154  
  1155  		e.PolicyMap, _, err = policymap.OpenOrCreate(e.PolicyMapPathLocked())
  1156  		if err != nil {
  1157  			return fmt.Errorf("unable to open PolicyMap for endpoint: %s", err)
  1158  		}
  1159  
  1160  		// Try to dump again, fail if error occurs.
  1161  		currentMapContents, err = e.PolicyMap.DumpToSlice()
  1162  		if err != nil {
  1163  			return err
  1164  		}
  1165  	}
  1166  
  1167  	errors := 0
  1168  
  1169  	for _, entry := range currentMapContents {
  1170  		// Convert key to host-byte order for lookup in the desiredMapState.
  1171  		keyHostOrder := entry.Key.ToHost()
  1172  
  1173  		// Convert from policymap.Key to policy.Key
  1174  		keyToDelete := policy.Key{
  1175  			Identity:         keyHostOrder.Identity,
  1176  			DestPort:         keyHostOrder.DestPort,
  1177  			Nexthdr:          keyHostOrder.Nexthdr,
  1178  			TrafficDirection: keyHostOrder.TrafficDirection,
  1179  		}
  1180  
  1181  		// If key that is in policy map is not in desired state, just remove it.
  1182  		if _, ok := e.desiredPolicy.PolicyMapState[keyToDelete]; !ok {
  1183  			e.getLogger().WithField(logfields.BPFMapKey, entry.Key.String()).Debug("syncPolicyMapWithDump removing a bpf policy entry not in the desired state")
  1184  			if !e.deletePolicyKey(keyToDelete, false, nil) {
  1185  				errors++
  1186  			}
  1187  		}
  1188  	}
  1189  
  1190  	err = e.addPolicyMapDelta()
  1191  
  1192  	if errors > 0 {
  1193  		return fmt.Errorf("synchronizing desired PolicyMap state failed")
  1194  	}
  1195  
  1196  	return err
  1197  }
  1198  
  1199  func (e *Endpoint) syncPolicyMapController() {
  1200  	ctrlName := fmt.Sprintf("sync-policymap-%d", e.ID)
  1201  	e.controllers.UpdateController(ctrlName,
  1202  		controller.ControllerParams{
  1203  			DoFunc: func(ctx context.Context) (reterr error) {
  1204  				// Failure to lock is not an error, it means
  1205  				// that the endpoint was disconnected and we
  1206  				// should exit gracefully.
  1207  				if err := e.LockAlive(); err != nil {
  1208  					return controller.NewExitReason("Endpoint disappeared")
  1209  				}
  1210  				defer e.Unlock()
  1211  				return e.syncPolicyMapWithDump()
  1212  			},
  1213  			RunInterval: 1 * time.Minute,
  1214  		},
  1215  	)
  1216  }
  1217  
  1218  // RequireARPPassthrough returns true if the datapath must implement ARP
  1219  // passthrough for this endpoint
  1220  func (e *Endpoint) RequireARPPassthrough() bool {
  1221  	return e.DatapathConfiguration.RequireArpPassthrough
  1222  }
  1223  
  1224  // RequireEgressProg returns true if the endpoint requires bpf_lxc with esction
  1225  // "to-container" to be attached at egress on the host facing veth pair
  1226  func (e *Endpoint) RequireEgressProg() bool {
  1227  	return e.DatapathConfiguration.RequireEgressProg
  1228  }
  1229  
  1230  // RequireRouting returns true if the endpoint requires BPF routing to be
  1231  // enabled, when disabled, routing is delegated to Linux routing
  1232  func (e *Endpoint) RequireRouting() (required bool) {
  1233  	required = true
  1234  	if e.DatapathConfiguration.RequireRouting != nil {
  1235  		required = *e.DatapathConfiguration.RequireRouting
  1236  	}
  1237  	return
  1238  }
  1239  
  1240  // RequireEndpointRoute returns if the endpoint wants a per endpoint route
  1241  func (e *Endpoint) RequireEndpointRoute() bool {
  1242  	return e.DatapathConfiguration.InstallEndpointRoute
  1243  }