go.ligato.io/vpp-agent/v3@v3.5.0/tests/e2e/e2etest/containerruntime.go (about)

     1  // Copyright (c) 2020 Pantheon.tech
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at:
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package e2etest
    16  
    17  import (
    18  	"bytes"
    19  	"context"
    20  	"fmt"
    21  	"io"
    22  	"strings"
    23  	"time"
    24  
    25  	"github.com/docker/docker/pkg/stringid"
    26  	docker "github.com/fsouza/go-dockerclient"
    27  	"github.com/go-errors/errors"
    28  	"github.com/segmentio/textio"
    29  	"go.ligato.io/cn-infra/v2/logging"
    30  )
    31  
    32  const containerExecTimeout = 10 * time.Second
    33  
    34  // ContainerRuntime represents docker container environments for one component of test topology
    35  type ContainerRuntime struct {
    36  	ctx         *TestCtx
    37  	container   *docker.Container
    38  	logIdentity string
    39  	stopTimeout uint
    40  }
    41  
    42  // ContainerStartOptions are options for ComponentRuntime.Start(option) method implemented by ContainerRuntime
    43  type ContainerStartOptions struct {
    44  	ContainerOptions *docker.CreateContainerOptions
    45  	Pull             bool
    46  	AttachLogs       bool
    47  }
    48  
    49  // Start creates and starts container
    50  func (c *ContainerRuntime) Start(options interface{}) error {
    51  	// get options
    52  	if options == nil {
    53  		return errors.Errorf("can't start container without any information")
    54  	}
    55  	opts, ok := options.(*ContainerStartOptions)
    56  	if !ok {
    57  		return errors.Errorf("provided runtime start options "+
    58  			"are not for container component runtime (%v)", options)
    59  	}
    60  
    61  	// create and start container
    62  	_, err := c.createContainer(opts.ContainerOptions, opts.Pull)
    63  	if err != nil {
    64  		return errors.Errorf("can't create %s container due to: %v", c.logIdentity, err)
    65  	}
    66  	log := logging.DefaultLogger.WithField("name", c.logIdentity)
    67  	log.Debugf("starting container: %+v", *opts)
    68  	if err := c.startContainer(); err != nil {
    69  		return errors.Errorf("can't start %s container due to: %v", c.logIdentity, err)
    70  	}
    71  	log = log.WithField("container", c.container.Name)
    72  	log = log.WithField("cid", stringid.TruncateID(c.container.ID))
    73  	log.Debugf("container started")
    74  
    75  	// attach logs (using one buffer from testctx -> all logs from all containers are merged together)
    76  	if opts.AttachLogs {
    77  		logOutput := textio.NewPrefixWriter(c.ctx.logWriter, fmt.Sprintf("[container::%s/%v] ", c.container.Name, stringid.TruncateID(c.container.ID)))
    78  		if err = c.attachLoggingToContainer(logOutput); err != nil {
    79  			return errors.Errorf("can't attach logging to %s container due to: %v", c.logIdentity, err)
    80  		}
    81  	}
    82  	return nil
    83  }
    84  
    85  // Stop stops and removes container
    86  func (c *ContainerRuntime) Stop(options ...interface{}) error {
    87  	if err := c.stopContainer(); err != nil {
    88  		if errors.Is(err, &docker.NoSuchContainer{}) {
    89  			// container no longer exists -> nothing to do about container (state is the same
    90  			// as after successful termination)
    91  			return nil
    92  		}
    93  		return err
    94  	}
    95  	if err := c.removeContainer(); err != nil {
    96  		return err
    97  	}
    98  	return nil
    99  }
   100  
   101  // ExecCmd executes command inside docker container
   102  func (c *ContainerRuntime) ExecCmd(cmd string, args ...string) (string, string, error) {
   103  	c.ctx.Logger.Printf("[container:%v] ExecCmd(%s, %v)", c.container.ID, cmd, args)
   104  
   105  	opts := docker.CreateExecOptions{
   106  		Context:      c.ctx.ctx,
   107  		Container:    c.container.ID,
   108  		Cmd:          append([]string{cmd}, args...),
   109  		AttachStdout: true,
   110  		AttachStderr: true,
   111  	}
   112  	exec, err := c.ctx.dockerClient.CreateExec(opts)
   113  	if err != nil {
   114  		err = errors.Errorf("failed to create docker exec for command %v due to: %v", cmd, err)
   115  		return "", "", err
   116  	}
   117  
   118  	ctx, cancel := context.WithTimeout(c.ctx.ctx, containerExecTimeout)
   119  	defer cancel()
   120  
   121  	var stdoutBuf, stderrBuf bytes.Buffer
   122  	err = c.ctx.dockerClient.StartExec(exec.ID, docker.StartExecOptions{
   123  		Context:      ctx,
   124  		OutputStream: &stdoutBuf,
   125  		ErrorStream:  &stderrBuf,
   126  	})
   127  	stdout := stdoutBuf.String()
   128  	stderr := stderrBuf.String()
   129  
   130  	cmdStr := fmt.Sprintf("`%s %s`", cmd, strings.Join(args, " "))
   131  
   132  	if cmdStr != "`vppctl -s /run/vpp/cli.sock show trace`" {
   133  		c.ctx.Logger.Printf("docker exec: %v:\nstdout(%d): %v\nstderr(%d): %v", cmdStr, len(stdout), stdout, len(stderr), stderr)
   134  	}
   135  
   136  	if err != nil {
   137  		errMsg := fmt.Sprintf("exec command %v failed due to: %v", cmdStr, err)
   138  		c.ctx.Logger.Printf(errMsg)
   139  		err = errors.Errorf(errMsg)
   140  		return stdout, stderr, err
   141  	}
   142  
   143  	if info, e := c.ctx.dockerClient.InspectExec(exec.ID); err != nil {
   144  		c.ctx.t.Logf("exec inspect failed (ID %v, Cmd %s)s: %v", exec.ID, cmdStr, err)
   145  		err = errors.Errorf("inspect exec error: %v", e)
   146  	} else {
   147  		c.ctx.Logger.Printf("exec details (ID %v, Cmd %s): %+v", exec.ID, cmdStr, info)
   148  		if info.ExitCode != 0 {
   149  			err = errors.Errorf("exec error (exit code %v): %v", info.ExitCode, stderr)
   150  		}
   151  	}
   152  
   153  	return stdout, stderr, err
   154  }
   155  
   156  // IPAddress provides ip address for connecting to the component
   157  func (c *ContainerRuntime) IPAddress() string {
   158  	return c.container.NetworkSettings.IPAddress
   159  }
   160  
   161  // PID provides process id of the main process in component
   162  func (c *ContainerRuntime) PID() int {
   163  	return c.container.State.Pid
   164  }
   165  
   166  func (c *ContainerRuntime) createContainer(containerOptions *docker.CreateContainerOptions,
   167  	pull bool) (*docker.Container, error) {
   168  	// pull image
   169  	if pull {
   170  		repo, tag, err := c.parseImageName(containerOptions.Config.Image)
   171  		if err != nil {
   172  			return nil, errors.Errorf("can't parse docker image %s "+
   173  				"due to: %v", containerOptions.Config.Image, err)
   174  		}
   175  
   176  		err = c.ctx.dockerClient.PullImage(docker.PullImageOptions{
   177  			Repository: repo,
   178  			Tag:        tag,
   179  		}, docker.AuthConfiguration{})
   180  		if err != nil {
   181  			return nil, errors.Errorf("failed to pull %s image: %v", c.logIdentity, err)
   182  		}
   183  	}
   184  
   185  	// create container
   186  	var err error
   187  	c.container, err = c.ctx.dockerClient.CreateContainer(*containerOptions)
   188  	if err != nil {
   189  		return nil, errors.Errorf("failed to create %s container: %v", c.logIdentity, err)
   190  	}
   191  	return c.container, nil
   192  }
   193  
   194  func (c *ContainerRuntime) startContainer() error {
   195  	if c.container == nil {
   196  		return errors.Errorf("Reference to docker client container is nil. " +
   197  			"Please use create() before start().")
   198  	}
   199  
   200  	// start container
   201  	err := c.ctx.dockerClient.StartContainer(c.container.ID, nil)
   202  	if err != nil {
   203  		errRemove := c.ctx.dockerClient.RemoveContainer(docker.RemoveContainerOptions{
   204  			ID:    c.container.ID,
   205  			Force: true,
   206  		})
   207  		if errRemove != nil {
   208  			return errors.Errorf("failed to remove %s container: %v "+
   209  				"(after failed start due to: %v)", c.logIdentity, errRemove, err)
   210  		}
   211  		return errors.Errorf("failed to start %s container: %v", c.logIdentity, err)
   212  	}
   213  	c.ctx.t.Logf("started %s container %v", c.logIdentity, c.container.ID)
   214  
   215  	// update container reference (some attributes of container change by starting the container)
   216  	id := c.container.ID
   217  	c.container, err = c.inspectContainer(id)
   218  	if err != nil {
   219  		return errors.Errorf("can't update inner %s container reference for id %s "+
   220  			"due to failing container inspect due to: %v", c.logIdentity, id, err)
   221  	}
   222  	return nil
   223  }
   224  
   225  func (c *ContainerRuntime) stopContainer() error {
   226  	err := c.ctx.dockerClient.StopContainer(c.container.ID, c.stopTimeout)
   227  	if errors.Is(err, &docker.NoSuchContainer{}) {
   228  		return err
   229  	} else if err != nil {
   230  		return errors.Errorf("failed to stop %s container: %v", c.logIdentity, err)
   231  	}
   232  	return nil
   233  }
   234  
   235  func (c *ContainerRuntime) removeContainer() error {
   236  	err := c.ctx.dockerClient.RemoveContainer(docker.RemoveContainerOptions{
   237  		ID:    c.container.ID,
   238  		Force: true,
   239  	})
   240  	if err != nil {
   241  		return errors.Errorf("failed to remove %s container: %v", c.logIdentity, err)
   242  	}
   243  	c.ctx.t.Logf("removed %s container %v", c.logIdentity, c.container.ID)
   244  	return nil
   245  }
   246  
   247  // attachLoggingToContainer attaches nonblocking logging to current container. The logging doesn't use standard
   248  // log output, but it uses provided logOutput argument as its output. This provides more flexibility for
   249  // the caller of this method how the log output can be handled. The only exception is the final container exit
   250  // status that is logged using stadard output.
   251  func (c *ContainerRuntime) attachLoggingToContainer(logOutput io.Writer) error {
   252  	closeWaiter, err := c.ctx.dockerClient.AttachToContainerNonBlocking(docker.AttachToContainerOptions{
   253  		Container:    c.container.ID,
   254  		Stdout:       true,
   255  		Stderr:       true,
   256  		Stream:       true,
   257  		Logs:         true,
   258  		OutputStream: logOutput,
   259  		ErrorStream:  logOutput,
   260  	})
   261  	if err != nil {
   262  		return errors.Errorf("failed to attach logging to %s container: %v", c.logIdentity, err)
   263  	}
   264  
   265  	log := logging.DefaultLogger.WithField("name", c.logIdentity)
   266  	log = log.WithField("container", c.container.Name)
   267  	log = log.WithField("cid", stringid.TruncateID(c.container.ID))
   268  
   269  	go func() {
   270  		err := closeWaiter.Wait()
   271  		if err != nil {
   272  			log.Warnf("%s container exited: %v", c.logIdentity, err)
   273  		} else {
   274  			log.Debugf("%s container exited OK", c.logIdentity)
   275  		}
   276  	}()
   277  	return nil
   278  }
   279  
   280  func (c *ContainerRuntime) inspectContainer(containerID string) (*docker.Container, error) {
   281  	container, err := c.ctx.dockerClient.InspectContainerWithOptions(docker.InspectContainerOptions{
   282  		Context: c.ctx.ctx,
   283  		ID:      containerID,
   284  	})
   285  	if err != nil {
   286  		return nil, errors.Errorf("failed to inspect %s container with ID %v due to: %v",
   287  			c.logIdentity, containerID, err)
   288  	}
   289  	return container, nil
   290  }
   291  
   292  func (c *ContainerRuntime) parseImageName(imageName string) (repo, tag string, err error) {
   293  	repo = imageName
   294  	tag = "latest"
   295  	if strings.Contains(imageName, ":") {
   296  		split := strings.Split(imageName, ":")
   297  		if len(split) != 2 {
   298  			return repo, tag, errors.Errorf("image %s has is not valid "+
   299  				"due too many repo-tag separator characters", imageName)
   300  		}
   301  		repo = split[0]
   302  		tag = split[1]
   303  	}
   304  	return
   305  }