github.com/aporeto-inc/trireme-lib@v10.358.0+incompatible/monitor/internal/docker/monitor.go (about)

     1  package dockermonitor
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"os"
     9  	"runtime"
    10  	"strconv"
    11  	"sync"
    12  	"time"
    13  
    14  	"github.com/dchest/siphash"
    15  	"github.com/docker/docker/api/types"
    16  	"github.com/docker/docker/api/types/events"
    17  	"github.com/docker/docker/api/types/filters"
    18  	dockerClient "github.com/docker/docker/client"
    19  	"go.aporeto.io/enforcerd/trireme-lib/common"
    20  	tevents "go.aporeto.io/enforcerd/trireme-lib/common"
    21  	"go.aporeto.io/enforcerd/trireme-lib/monitor/config"
    22  	"go.aporeto.io/enforcerd/trireme-lib/monitor/constants"
    23  	"go.aporeto.io/enforcerd/trireme-lib/monitor/extractors"
    24  	"go.aporeto.io/enforcerd/trireme-lib/monitor/registerer"
    25  	"go.aporeto.io/enforcerd/trireme-lib/policy"
    26  	"go.aporeto.io/enforcerd/trireme-lib/utils/cgnetcls"
    27  	"go.aporeto.io/enforcerd/trireme-lib/utils/portspec"
    28  	"go.uber.org/zap"
    29  )
    30  
    31  type lockedDockerClient struct {
    32  	client           dockerClient.CommonAPIClient
    33  	dockerClientLock sync.Mutex
    34  }
    35  
    36  // DockerMonitor implements the connection to Docker and monitoring based on docker events.
    37  type DockerMonitor struct {
    38  	clientHdl                  lockedDockerClient
    39  	socketType                 string
    40  	socketAddress              string
    41  	metadataExtractor          extractors.DockerMetadataExtractor
    42  	handlers                   map[Event]func(ctx context.Context, event *events.Message) error
    43  	eventnotifications         []chan *events.Message
    44  	stopprocessor              []chan bool
    45  	numberOfQueues             int
    46  	stoplistener               chan bool
    47  	config                     *config.ProcessorConfig
    48  	netcls                     cgnetcls.Cgroupnetcls
    49  	syncAtStart                bool
    50  	terminateStoppedContainers bool
    51  	ignoreHostModeContainers   bool
    52  }
    53  
    54  // New returns a new docker monitor.
    55  func New(context.Context) *DockerMonitor {
    56  	return &DockerMonitor{}
    57  }
    58  
    59  // SetupConfig provides a configuration to implmentations. Every implementation
    60  // can have its own config type.
    61  func (d *DockerMonitor) SetupConfig(registerer registerer.Registerer, cfg interface{}) (err error) {
    62  
    63  	defaultConfig := DefaultConfig()
    64  
    65  	if cfg == nil {
    66  		cfg = defaultConfig
    67  	}
    68  
    69  	dockerConfig, ok := cfg.(*Config)
    70  	if !ok {
    71  		return fmt.Errorf("Invalid configuration specified")
    72  	}
    73  
    74  	// Setup defaults
    75  	dockerConfig = SetupDefaultConfig(dockerConfig)
    76  
    77  	d.socketType = dockerConfig.SocketType
    78  	d.socketAddress = dockerConfig.SocketAddress
    79  	d.metadataExtractor = dockerConfig.EventMetadataExtractor
    80  	d.syncAtStart = dockerConfig.SyncAtStart
    81  	d.handlers = make(map[Event]func(ctx context.Context, event *events.Message) error)
    82  	d.stoplistener = make(chan bool)
    83  	d.netcls = cgnetcls.NewDockerCgroupNetController()
    84  	d.numberOfQueues = runtime.NumCPU() * 8
    85  	d.eventnotifications = make([]chan *events.Message, d.numberOfQueues)
    86  	d.stopprocessor = make([]chan bool, d.numberOfQueues)
    87  	d.terminateStoppedContainers = dockerConfig.DestroyStoppedContainers
    88  	d.ignoreHostModeContainers = dockerConfig.ignoreHostModeContainers
    89  	for i := 0; i < d.numberOfQueues; i++ {
    90  		d.eventnotifications[i] = make(chan *events.Message, 1000)
    91  		d.stopprocessor[i] = make(chan bool)
    92  	}
    93  
    94  	// Add handlers for the events that we know how to process
    95  	d.addHandler(EventCreate, d.handleCreateEvent)
    96  	d.addHandler(EventStart, d.handleStartEvent)
    97  	d.addHandler(EventDie, d.handleDieEvent)
    98  	d.addHandler(EventDestroy, d.handleDestroyEvent)
    99  	d.addHandler(EventPause, d.handlePauseEvent)
   100  	d.addHandler(EventUnpause, d.handleUnpauseEvent)
   101  
   102  	return nil
   103  }
   104  
   105  func (d *DockerMonitor) dockerClient() dockerClient.CommonAPIClient {
   106  	d.clientHdl.dockerClientLock.Lock()
   107  	defer d.clientHdl.dockerClientLock.Unlock()
   108  	client := d.clientHdl.client
   109  	return client
   110  }
   111  
   112  func (d *DockerMonitor) setDockerClient(client dockerClient.CommonAPIClient) {
   113  	d.clientHdl.dockerClientLock.Lock()
   114  	d.clientHdl.client = client
   115  	d.clientHdl.dockerClientLock.Unlock()
   116  
   117  }
   118  
   119  // SetupHandlers sets up handlers for monitors to invoke for various events such as
   120  // processing unit events and synchronization events. This will be called before Start()
   121  // by the consumer of the monitor
   122  func (d *DockerMonitor) SetupHandlers(c *config.ProcessorConfig) {
   123  
   124  	d.config = c
   125  }
   126  
   127  // Run will start the DockerPolicy Enforcement.
   128  // It applies a policy to each Container already Up and Running.
   129  // It listens to all ContainerEvents
   130  func (d *DockerMonitor) Run(ctx context.Context) error {
   131  
   132  	if err := d.config.IsComplete(); err != nil {
   133  		return fmt.Errorf("docker config issue: %s", err)
   134  	}
   135  
   136  	if err := d.waitForDockerDaemon(ctx); err != nil {
   137  		zap.L().Error("Docker daemon is not running at startup - skipping container processing. periodic retries will be attempted",
   138  			zap.Error(err),
   139  			zap.Duration("retry interval", dockerRetryTimer),
   140  		)
   141  		return nil
   142  	}
   143  
   144  	return nil
   145  }
   146  
   147  func (d *DockerMonitor) syncContainers(ctx context.Context) error {
   148  	if d.syncAtStart && d.config.Policy != nil {
   149  		options := types.ContainerListOptions{
   150  			All: !d.terminateStoppedContainers,
   151  		}
   152  		client := d.dockerClient()
   153  		if client == nil {
   154  			return errors.New("unable to init monitor: nil clienthdl")
   155  		}
   156  		containers, err := client.ContainerList(ctx, options)
   157  		if err != nil {
   158  			return fmt.Errorf("unable to get container list: %s", err)
   159  		}
   160  		// Syncing all Existing containers depending on MonitorSetting
   161  		if err := d.resyncContainers(ctx, containers); err != nil {
   162  			zap.L().Error("Unable to sync existing containers", zap.Error(err))
   163  		}
   164  	}
   165  	return nil
   166  }
   167  
   168  func (d *DockerMonitor) initMonitor(ctx context.Context) {
   169  
   170  	// Starting the eventListener and wait to hear on channel for it to be ready.
   171  	// We are not doing resync. We just start the listener.
   172  	listenerReady := make(chan struct{})
   173  	go d.eventListener(ctx, listenerReady)
   174  	<-listenerReady
   175  
   176  	// Start processing the events
   177  	go d.eventProcessors(ctx)
   178  }
   179  
   180  // addHandler adds a callback handler for the given docker event.
   181  // Interesting event names include 'start' and 'die'. For more on events see
   182  // https://docs.docker.com/engine/reference/api/docker_remote_api/
   183  // under the section 'Docker Events'.
   184  func (d *DockerMonitor) addHandler(event Event, handler EventHandler) {
   185  	d.handlers[event] = handler
   186  }
   187  
   188  // getHashKey returns key to loadbalance on. This ensures that all
   189  // events from a pod/container fall onto the same queue.
   190  func (d *DockerMonitor) getHashKey(r *events.Message) string {
   191  
   192  	if isKubernetesContainer(r.Actor.Attributes) {
   193  		return kubePodIdentifier(r.Actor.Attributes)
   194  	}
   195  	return r.ID
   196  }
   197  
   198  // sendRequestToQueue sends a request to a channel based on a hash function
   199  func (d *DockerMonitor) sendRequestToQueue(r *events.Message) {
   200  
   201  	key0 := uint64(256203161)
   202  	key1 := uint64(982451653)
   203  
   204  	key := d.getHashKey(r)
   205  	h := siphash.Hash(key0, key1, []byte(key))
   206  
   207  	d.eventnotifications[int(h%uint64(d.numberOfQueues))] <- r
   208  }
   209  
   210  // eventProcessor processes docker events. We are processing multiple
   211  // queues in parallel so that we can activate containers as fast
   212  // as possible.
   213  func (d *DockerMonitor) eventProcessors(ctx context.Context) {
   214  
   215  	for i := 0; i < d.numberOfQueues; i++ {
   216  		go func(i int) {
   217  			for {
   218  				select {
   219  				case event := <-d.eventnotifications[i]:
   220  					if f, ok := d.handlers[Event(event.Action)]; ok {
   221  						if err := f(ctx, event); err != nil {
   222  							zap.L().Error("Unable to handle docker event",
   223  								zap.String("action", event.Action),
   224  								zap.Error(err),
   225  							)
   226  						}
   227  						continue
   228  					}
   229  				case <-ctx.Done():
   230  					return
   231  				}
   232  			}
   233  		}(i)
   234  	}
   235  }
   236  
   237  // eventListener listens to Docker events from the daemon and passes to
   238  // to the processor through a buffered channel. This minimizes the chances
   239  // that we will miss events because the processor is delayed
   240  func (d *DockerMonitor) eventListener(ctx context.Context, listenerReady chan struct{}) {
   241  
   242  	// Once the buffered event channel was returned by Docker we return the ready status.
   243  	listenerReady <- struct{}{}
   244  
   245  	for {
   246  		select {
   247  		case <-ctx.Done():
   248  			return
   249  		default:
   250  			if d.dockerClient() == nil {
   251  				zap.L().Debug("Trying to setup docker daemon")
   252  				if err := d.setupDockerDaemon(ctx); err != nil {
   253  					d.setDockerClient(nil)
   254  					continue
   255  				}
   256  				// We are here means the docker daemon restarted. we need to resync
   257  				if err := d.Resync(ctx); err != nil {
   258  					zap.L().Error("Unable to resync containers after reconnecting to docker daemon", zap.Error(err))
   259  				}
   260  			}
   261  			d.listener(ctx)
   262  		}
   263  	}
   264  }
   265  
   266  func (d *DockerMonitor) listener(ctx context.Context) {
   267  	f := filters.NewArgs()
   268  	f.Add("type", "container")
   269  	options := types.EventsOptions{
   270  		Filters: f,
   271  	}
   272  	client := d.dockerClient()
   273  	if client == nil {
   274  		return
   275  	}
   276  	messages, errs := client.Events(ctx, options)
   277  	for {
   278  		select {
   279  		case message := <-messages:
   280  			zap.L().Debug("Got message from docker client",
   281  				zap.String("action", message.Action),
   282  				zap.String("ID", message.ID),
   283  			)
   284  			d.sendRequestToQueue(&message)
   285  
   286  		case err := <-errs:
   287  			if err != nil && err != io.EOF {
   288  				zap.L().Warn("Received docker event error",
   289  					zap.Error(err),
   290  				)
   291  			}
   292  			d.setDockerClient(nil)
   293  			return
   294  
   295  		case <-ctx.Done():
   296  			return
   297  		}
   298  	}
   299  
   300  }
   301  
   302  // Resync resyncs all the existing containers on the Host, using the
   303  // same process as when a container is initially spawn up
   304  func (d *DockerMonitor) Resync(ctx context.Context) error {
   305  
   306  	if !d.syncAtStart || d.config.Policy == nil {
   307  		zap.L().Debug("No synchronization of containers performed")
   308  		return nil
   309  	}
   310  
   311  	zap.L().Debug("Syncing all existing containers")
   312  	options := types.ContainerListOptions{
   313  		All: !d.terminateStoppedContainers,
   314  	}
   315  	client := d.dockerClient()
   316  	if client == nil {
   317  		return errors.New("unable to resync: nil clienthdl")
   318  	}
   319  	containers, err := client.ContainerList(ctx, options)
   320  	if err != nil {
   321  		return fmt.Errorf("unable to get container list: %s", err)
   322  	}
   323  
   324  	return d.resyncContainers(ctx, containers)
   325  }
   326  
   327  func (d *DockerMonitor) resyncContainers(ctx context.Context, containers []types.Container) error {
   328  
   329  	d.config.ResyncLock.RLock()
   330  	defer d.config.ResyncLock.RUnlock()
   331  	// resync containers that share host network first if we are not ignoring host mode container
   332  	if !d.ignoreHostModeContainers {
   333  		if err := d.resyncContainersByOrder(ctx, containers, true); err != nil {
   334  			zap.L().Error("Unable to sync container", zap.Error(err))
   335  		}
   336  	}
   337  
   338  	// resync remaining containers.
   339  	if err := d.resyncContainersByOrder(ctx, containers, false); err != nil {
   340  		zap.L().Error("Unable to sync container", zap.Error(err))
   341  	}
   342  
   343  	return nil
   344  }
   345  
   346  //container.HostConfig.NetworkMode == constants.DockerHostMode
   347  func (d *DockerMonitor) resyncContainersByOrder(ctx context.Context, containers []types.Container, syncHost bool) error {
   348  	for _, c := range containers {
   349  		client := d.dockerClient()
   350  		if client == nil {
   351  			return errors.New("unable to resync: nil clienthdl")
   352  		}
   353  		container, err := client.ContainerInspect(ctx, c.ID)
   354  		if err != nil {
   355  			continue
   356  		}
   357  
   358  		if (syncHost && container.HostConfig.NetworkMode != constants.DockerHostMode) ||
   359  			(!syncHost && container.HostConfig.NetworkMode == constants.DockerHostMode) {
   360  			continue
   361  		}
   362  
   363  		puID, _ := puIDFromDockerID(container.ID)
   364  
   365  		runtime, err := d.extractMetadata(&container)
   366  		if err != nil {
   367  			continue
   368  		}
   369  
   370  		event := common.EventStop
   371  		if container.State.Running {
   372  			if !container.State.Paused {
   373  				event = common.EventStart
   374  			} else {
   375  				event = common.EventPause
   376  			}
   377  		}
   378  
   379  		// If it is a host container, we need to activate it as a Linux process. We will
   380  		// override the options that the metadata extractor provided.
   381  		if container.HostConfig.NetworkMode == constants.DockerHostMode {
   382  			options := hostModeOptions(&container)
   383  			options.PolicyExtensions = runtime.Options().PolicyExtensions
   384  			runtime.SetOptions(*options)
   385  			runtime.SetPUType(common.LinuxProcessPU)
   386  		}
   387  
   388  		runtime.SetOptions(runtime.Options())
   389  
   390  		if err := d.config.Policy.HandlePUEvent(ctx, puID, event, runtime); err != nil {
   391  			zap.L().Error("Unable to sync existing Container",
   392  				zap.String("dockerID", c.ID),
   393  				zap.Error(err),
   394  			)
   395  		}
   396  
   397  		// if the container has hostnet set to true or is linked
   398  		// to container with hostnet set to true, program the cgroup.
   399  		if isHostNetworkContainer(runtime) {
   400  			if err = d.setupHostMode(puID, runtime, &container); err != nil {
   401  				return fmt.Errorf("unable to setup host mode for container %s: %s", puID, err)
   402  			}
   403  		}
   404  
   405  	}
   406  
   407  	return nil
   408  }
   409  
   410  // setupHostMode sets up the net_cls cgroup for the host mode
   411  func (d *DockerMonitor) setupHostMode(puID string, runtimeInfo policy.RuntimeReader, dockerInfo *types.ContainerJSON) (err error) {
   412  
   413  	pausePUID := puID
   414  	if dockerInfo.HostConfig.NetworkMode == constants.DockerHostMode {
   415  		if err = d.netcls.Creategroup(puID); err != nil {
   416  			return err
   417  		}
   418  
   419  		// Clean the cgroup on exit, if we have failed t activate.
   420  		defer func() {
   421  			if err != nil {
   422  				if derr := d.netcls.DeleteCgroup(puID); derr != nil {
   423  					zap.L().Warn("Failed to clean cgroup",
   424  						zap.String("puID", puID),
   425  						zap.Error(derr),
   426  						zap.Error(err),
   427  					)
   428  				}
   429  			}
   430  		}()
   431  
   432  		markval := runtimeInfo.Options().CgroupMark
   433  		if markval == "" {
   434  			return errors.New("mark value not found")
   435  		}
   436  
   437  		mark, _ := strconv.ParseUint(markval, 10, 32)
   438  		if err := d.netcls.AssignMark(puID, mark); err != nil {
   439  			return err
   440  		}
   441  	} else {
   442  		// Add the container pid that is linked to hostnet to
   443  		// the cgroup of the parent container.
   444  
   445  		pausePUID = getPausePUID(policyExtensions(runtimeInfo))
   446  	}
   447  
   448  	return d.netcls.AddProcess(pausePUID, dockerInfo.State.Pid)
   449  }
   450  
   451  func (d *DockerMonitor) retrieveDockerInfo(ctx context.Context, event *events.Message) (*types.ContainerJSON, error) {
   452  	client := d.dockerClient()
   453  	if client == nil {
   454  		return nil, errors.New("unable to get container info: nil clienthdl")
   455  	}
   456  	info, err := client.ContainerInspect(ctx, event.ID)
   457  	if err != nil {
   458  		return nil, fmt.Errorf("unable to read container information: container %s kept alive per policy: %s", event.ID, err)
   459  	}
   460  	return &info, nil
   461  }
   462  
   463  // ExtractMetadata generates the RuntimeInfo based on Docker primitive
   464  func (d *DockerMonitor) extractMetadata(dockerInfo *types.ContainerJSON) (*policy.PURuntime, error) {
   465  
   466  	if dockerInfo == nil {
   467  		return nil, errors.New("docker info is empty")
   468  	}
   469  
   470  	if d.metadataExtractor != nil {
   471  		return d.metadataExtractor(dockerInfo)
   472  	}
   473  
   474  	return extractors.DefaultMetadataExtractor(dockerInfo)
   475  }
   476  
   477  // handleCreateEvent generates a create event type. We extract the metadata
   478  // and start the policy resolution at the create event. No need to wait
   479  // for the start event.
   480  func (d *DockerMonitor) handleCreateEvent(ctx context.Context, event *events.Message) error {
   481  
   482  	puID, err := puIDFromDockerID(event.ID)
   483  	if err != nil {
   484  		return err
   485  	}
   486  
   487  	container, err := d.retrieveDockerInfo(ctx, event)
   488  	if err != nil {
   489  		return err
   490  	}
   491  
   492  	runtime, err := d.extractMetadata(container)
   493  	if err != nil {
   494  		return err
   495  	}
   496  
   497  	// If it is a host container, we need to activate it as a Linux process. We will
   498  	// override the options that the metadata extractor provided. We will maintain
   499  	// any policy extensions in the object.
   500  	if container.HostConfig.NetworkMode == constants.DockerHostMode {
   501  		options := hostModeOptions(container)
   502  		options.PolicyExtensions = runtime.Options().PolicyExtensions
   503  		runtime.SetOptions(*options)
   504  		runtime.SetPUType(common.LinuxProcessPU)
   505  	}
   506  
   507  	runtime.SetOptions(runtime.Options())
   508  
   509  	return d.config.Policy.HandlePUEvent(ctx, puID, tevents.EventCreate, runtime)
   510  }
   511  
   512  // handleStartEvent will notify the policy engine immediately about the event in order
   513  // to start the implementation of the functions. At this point we know the process ID
   514  // that is needed for the remote enforcers.
   515  func (d *DockerMonitor) handleStartEvent(ctx context.Context, event *events.Message) error {
   516  
   517  	container, err := d.retrieveDockerInfo(ctx, event)
   518  	if err != nil {
   519  		return err
   520  	}
   521  
   522  	if !container.State.Running {
   523  		return nil
   524  	}
   525  
   526  	puID, err := puIDFromDockerID(container.ID)
   527  	if err != nil {
   528  		return err
   529  	}
   530  
   531  	runtime, err := d.extractMetadata(container)
   532  	if err != nil {
   533  		return err
   534  	}
   535  
   536  	// If it is a host container, we need to activate it as a Linux process. We will
   537  	// override the options that the metadata extractor provided.
   538  	if container.HostConfig.NetworkMode == constants.DockerHostMode {
   539  		options := hostModeOptions(container)
   540  		options.PolicyExtensions = runtime.Options().PolicyExtensions
   541  		runtime.SetOptions(*options)
   542  		runtime.SetPUType(common.LinuxProcessPU)
   543  	}
   544  
   545  	runtime.SetOptions(runtime.Options())
   546  
   547  	if err = d.config.Policy.HandlePUEvent(ctx, puID, tevents.EventStart, runtime); err != nil {
   548  		return fmt.Errorf("unable to set policy: container %s kept alive per policy: %s", puID, err)
   549  	}
   550  
   551  	// if the container has hostnet set to true or is linked
   552  	// to container with hostnet set to true, program the cgroup.
   553  	if isHostNetworkContainer(runtime) && !d.ignoreHostModeContainers {
   554  		if err = d.setupHostMode(puID, runtime, container); err != nil {
   555  			return fmt.Errorf("unable to setup host mode for container %s: %s", puID, err)
   556  		}
   557  	}
   558  	return nil
   559  }
   560  
   561  //handleDie event is called when a container dies. It generates a "Stop" event.
   562  func (d *DockerMonitor) handleDieEvent(ctx context.Context, event *events.Message) error {
   563  
   564  	puID, err := puIDFromDockerID(event.ID)
   565  	if err != nil {
   566  		return err
   567  	}
   568  
   569  	runtime := policy.NewPURuntimeWithDefaults()
   570  	runtime.SetOptions(runtime.Options())
   571  
   572  	if err := d.config.Policy.HandlePUEvent(ctx, puID, tevents.EventStop, runtime); err != nil && !d.terminateStoppedContainers {
   573  		return err
   574  	}
   575  
   576  	if d.terminateStoppedContainers {
   577  		return d.config.Policy.HandlePUEvent(ctx, puID, tevents.EventDestroy, runtime)
   578  	}
   579  	return nil
   580  }
   581  
   582  // handleDestroyEvent handles destroy events from Docker. It generated a "Destroy event"
   583  func (d *DockerMonitor) handleDestroyEvent(ctx context.Context, event *events.Message) error {
   584  
   585  	puID, err := puIDFromDockerID(event.ID)
   586  	if err != nil {
   587  		return err
   588  	}
   589  	runtime := policy.NewPURuntimeWithDefaults()
   590  	runtime.SetOptions(runtime.Options())
   591  
   592  	if err = d.config.Policy.HandlePUEvent(ctx, puID, tevents.EventDestroy, runtime); err != nil {
   593  		zap.L().Error("Failed to handle delete event",
   594  			zap.Error(err),
   595  		)
   596  	}
   597  
   598  	if err = d.netcls.DeleteCgroup(puID); err != nil {
   599  		zap.L().Warn("Failed to clean netcls group",
   600  			zap.String("puID", puID),
   601  			zap.Error(err),
   602  		)
   603  	}
   604  
   605  	return nil
   606  }
   607  
   608  // handlePauseEvent generates a create event type.
   609  func (d *DockerMonitor) handlePauseEvent(ctx context.Context, event *events.Message) error {
   610  	zap.L().Info("UnPause Event for nativeID", zap.String("ID", event.ID))
   611  
   612  	puID, err := puIDFromDockerID(event.ID)
   613  	if err != nil {
   614  		return err
   615  	}
   616  
   617  	runtime := policy.NewPURuntimeWithDefaults()
   618  	runtime.SetOptions(runtime.Options())
   619  
   620  	return d.config.Policy.HandlePUEvent(ctx, puID, tevents.EventPause, runtime)
   621  }
   622  
   623  // handleCreateEvent generates a create event type.
   624  func (d *DockerMonitor) handleUnpauseEvent(ctx context.Context, event *events.Message) error {
   625  
   626  	puID, err := puIDFromDockerID(event.ID)
   627  	if err != nil {
   628  		return err
   629  	}
   630  
   631  	runtime := policy.NewPURuntimeWithDefaults()
   632  	runtime.SetOptions(runtime.Options())
   633  
   634  	return d.config.Policy.HandlePUEvent(ctx, puID, tevents.EventUnpause, runtime)
   635  }
   636  
   637  func puIDFromDockerID(dockerID string) (string, error) {
   638  
   639  	if dockerID == "" {
   640  		return "", errors.New("unable to generate context id: empty docker id")
   641  	}
   642  
   643  	if len(dockerID) < 12 {
   644  		return "", fmt.Errorf("unable to generate context id: dockerid smaller than 12 characters: %s", dockerID)
   645  	}
   646  
   647  	return dockerID[:12], nil
   648  }
   649  
   650  func initDockerClient(socketType string, socketAddress string) (*dockerClient.Client, error) {
   651  
   652  	var socket string
   653  
   654  	switch socketType {
   655  	case "tcp":
   656  		socket = "https://" + socketAddress
   657  	case "unix":
   658  		// Sanity check that this path exists
   659  		if _, oserr := os.Stat(socketAddress); os.IsNotExist(oserr) {
   660  			return nil, oserr
   661  		}
   662  		socket = "unix://" + socketAddress
   663  	default:
   664  		return nil, fmt.Errorf("bad socket type: %s", socketType)
   665  	}
   666  
   667  	defaultHeaders := map[string]string{"User-Agent": "engine-api-dockerClient-1.0"}
   668  
   669  	dc, err := dockerClient.NewClient(socket, DockerClientVersion, nil, defaultHeaders)
   670  	if err != nil {
   671  		return nil, fmt.Errorf("unable to create docker client: %s", err)
   672  	}
   673  
   674  	return dc, nil
   675  }
   676  
   677  func (d *DockerMonitor) setupDockerDaemon(ctx context.Context) (err error) {
   678  
   679  	if d.dockerClient() == nil {
   680  		// Initialize client
   681  		dockerClient, err := initDockerClient(d.socketType, d.socketAddress)
   682  		if err != nil {
   683  			// Reset this here since the interface = nil check will fail later this is partly initialized.
   684  			// cheaper than doing reflect and check later
   685  			return err
   686  		}
   687  		d.setDockerClient(dockerClient)
   688  	}
   689  
   690  	subctx, cancel := context.WithTimeout(ctx, dockerPingTimeout)
   691  	defer cancel()
   692  
   693  	client := d.dockerClient()
   694  	if client == nil {
   695  		return errors.New("unable to Ping: nil clienthdl")
   696  	}
   697  	_, err = client.Ping(subctx)
   698  	return err
   699  }
   700  
   701  // waitForDockerDaemon is a blocking call which will try to bring up docker, if not return err
   702  // with timeout
   703  func (d *DockerMonitor) waitForDockerDaemon(ctx context.Context) (err error) {
   704  
   705  	done := make(chan bool)
   706  	zap.L().Info("Trying to initialize docker monitor")
   707  	go func(gctx context.Context) {
   708  
   709  		for {
   710  			errg := d.setupDockerDaemon(gctx)
   711  			if errg == nil {
   712  				d.initMonitor(gctx)
   713  				break
   714  			}
   715  
   716  			select {
   717  			case <-gctx.Done():
   718  				return
   719  			case <-time.After(dockerRetryTimer):
   720  				continue
   721  			}
   722  
   723  		}
   724  		done <- true
   725  	}(ctx)
   726  
   727  	select {
   728  	case <-ctx.Done():
   729  		return nil
   730  	case <-time.After(dockerInitializationWait):
   731  		return fmt.Errorf("Unable to connect to docker daemon")
   732  	case <-done:
   733  		if err := d.syncContainers(ctx); err != nil {
   734  			zap.L().Error("Failed To Sync containers at start", zap.Error(err))
   735  		}
   736  		zap.L().Info("Started Docker Monitor")
   737  	}
   738  
   739  	return nil
   740  }
   741  
   742  // hostModeOptions creates the default options for a host-mode container. The
   743  // container must be activated as a Linux Process.
   744  func hostModeOptions(dockerInfo *types.ContainerJSON) *policy.OptionsType {
   745  
   746  	options := policy.OptionsType{
   747  		CgroupName:        strconv.Itoa(dockerInfo.State.Pid),
   748  		CgroupMark:        strconv.FormatUint(cgnetcls.MarkVal(), 10),
   749  		ConvertedDockerPU: true,
   750  		AutoPort:          true,
   751  	}
   752  
   753  	for p := range dockerInfo.Config.ExposedPorts {
   754  		if p.Proto() == "tcp" {
   755  			s, err := portspec.NewPortSpecFromString(p.Port(), nil)
   756  			if err != nil {
   757  				continue
   758  			}
   759  
   760  			options.Services = append(options.Services, common.Service{
   761  				Protocol: uint8(6),
   762  				Ports:    s,
   763  			})
   764  		}
   765  	}
   766  
   767  	return &options
   768  }