github.com/docker/compose-on-kubernetes@v0.5.0/cmd/e2e_benchmark/main.go (about)

     1  package main
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"io"
     7  	"os"
     8  	"strings"
     9  	"text/tabwriter"
    10  	"time"
    11  
    12  	"github.com/containerd/console"
    13  	clientset "github.com/docker/compose-on-kubernetes/api/client/clientset/typed/compose/v1beta2"
    14  	"github.com/docker/compose-on-kubernetes/api/constants"
    15  	"github.com/docker/compose-on-kubernetes/install"
    16  	e2ewait "github.com/docker/compose-on-kubernetes/internal/e2e/wait"
    17  	"github.com/morikuni/aec"
    18  	"github.com/pkg/errors"
    19  	"github.com/spf13/cobra"
    20  	"golang.org/x/sync/errgroup"
    21  	appstypes "k8s.io/api/apps/v1"
    22  	coretypes "k8s.io/api/core/v1"
    23  	kerrors "k8s.io/apimachinery/pkg/api/errors"
    24  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    25  	"k8s.io/apimachinery/pkg/runtime"
    26  	k8sclientset "k8s.io/client-go/kubernetes"
    27  	"k8s.io/client-go/rest"
    28  	"k8s.io/client-go/tools/clientcmd"
    29  )
    30  
    31  func (s *workerState) getPhaseTimings(start time.Time) map[string]time.Duration {
    32  	last := start
    33  	res := make(map[string]time.Duration)
    34  	for _, p := range s.PreviousPhases {
    35  		res[p.Name] = p.DoneTime.Sub(last)
    36  		last = p.DoneTime
    37  	}
    38  	return res
    39  }
    40  
    41  func computePhaseTimingAverages(start time.Time, states []*workerState) []timedPhase {
    42  	if len(states) == 0 {
    43  		return nil
    44  	}
    45  	timings := make([]map[string]time.Duration, len(states))
    46  	for ix, s := range states {
    47  		timings[ix] = s.getPhaseTimings(start)
    48  	}
    49  	var result []timedPhase
    50  	for _, phase := range states[0].PreviousPhases {
    51  		count := 0
    52  		var total time.Duration
    53  		for _, t := range timings {
    54  			if v, ok := t[phase.Name]; ok {
    55  				count++
    56  				total += v
    57  			}
    58  		}
    59  		result = append(result, timedPhase{
    60  			duration: time.Duration(int64(total) / int64(count)),
    61  			name:     phase.Name,
    62  		})
    63  	}
    64  	return result
    65  }
    66  
    67  type statusPrinter struct {
    68  	out               io.Writer
    69  	previousLineCount int
    70  }
    71  
    72  func (r *statusPrinter) print(states []*workerState, start time.Time, withConsole bool) {
    73  	if withConsole {
    74  		b := aec.EmptyBuilder
    75  		for ix := 0; ix < r.previousLineCount; ix++ {
    76  			b = b.Up(1).EraseLine(aec.EraseModes.All)
    77  		}
    78  		fmt.Fprintf(r.out, b.ANSI.Apply(""))
    79  	}
    80  	tw := tabwriter.NewWriter(r.out, 5, 1, 4, ' ', 0)
    81  	defer tw.Flush()
    82  	count := 0
    83  	// headers
    84  	fmt.Fprint(tw, " ")
    85  	maxPrevious := 0
    86  	for _, s := range states {
    87  		s.Lock()
    88  		fmt.Fprintf(tw, "\t%s", strings.ToUpper(s.ID))
    89  		if l := len(s.PreviousPhases); l > maxPrevious {
    90  			maxPrevious = l
    91  		}
    92  		s.Unlock()
    93  	}
    94  	fmt.Fprint(tw, "\n")
    95  	count++
    96  	for ix := 0; ix < len(states)+1; ix++ {
    97  		fmt.Fprint(tw, "---\t")
    98  	}
    99  	fmt.Fprint(tw, "\n")
   100  	count++
   101  
   102  	// previous steps
   103  	for ix := 0; ix < maxPrevious; ix++ {
   104  		if ix == 0 {
   105  			fmt.Fprint(tw, "PREVIOUS STEPS")
   106  		} else {
   107  			fmt.Fprint(tw, " ")
   108  		}
   109  		for _, s := range states {
   110  			s.Lock()
   111  			fmt.Fprint(tw, "\t")
   112  			if len(s.PreviousPhases) > ix {
   113  				baseDate := start
   114  				if ix > 0 {
   115  					baseDate = s.PreviousPhases[ix-1].DoneTime
   116  				}
   117  				duration := s.PreviousPhases[ix].DoneTime.Sub(baseDate)
   118  				fmt.Fprintf(tw, "%s: %v", s.PreviousPhases[ix].Name, duration)
   119  			} else {
   120  				fmt.Fprint(tw, " ")
   121  			}
   122  			s.Unlock()
   123  		}
   124  		fmt.Fprint(tw, "\n")
   125  		count++
   126  	}
   127  
   128  	for ix := 0; ix < len(states)+1; ix++ {
   129  		fmt.Fprint(tw, "---\t")
   130  	}
   131  	fmt.Fprint(tw, "\n")
   132  	count++
   133  	// current step
   134  	fmt.Fprint(tw, "CURRENT STEP")
   135  	for _, s := range states {
   136  		s.Lock()
   137  		fmt.Fprintf(tw, "\t%s", s.CurrentPhase)
   138  		s.Unlock()
   139  	}
   140  	fmt.Fprint(tw, "\n")
   141  	count++
   142  
   143  	tw.Write([]byte(" "))
   144  	for _, s := range states {
   145  		s.Lock()
   146  		fmt.Fprintf(tw, "\t%s", s.CurrentMessage)
   147  		s.Unlock()
   148  	}
   149  	fmt.Fprint(tw, "\n")
   150  	count++
   151  	r.previousLineCount = count
   152  }
   153  
   154  func main() {
   155  	opts := &options{}
   156  	cmd := &cobra.Command{
   157  		Use: "e2e_benchmark [options]",
   158  		RunE: func(_ *cobra.Command, _ []string) error {
   159  			return run(opts)
   160  		},
   161  	}
   162  	cmd.Flags().StringVar(&opts.kubeconfig, "kubeconfig", "", "kubeconfig path")
   163  	cmd.Flags().IntVar(&opts.workerCount, "worker-count", 5, "number of benchmark workers")
   164  	cmd.Flags().IntVar(&opts.totalStacks, "total-stacks", 200, "number of stacks created/removed per worker")
   165  	cmd.Flags().StringVarP(&opts.format, "format", "f", "auto", "output format: auto|json|interactive|report")
   166  	cmd.Flags().StringVar(&opts.collectLogsNamespace, "logs-namespace", "", "namespace to collect Compose on Kubernetes logs from")
   167  	cmd.Flags().DurationVar(&opts.maxDuration, "max-duration", 0, "maximum duration of the benchmark (fails if exceeded)")
   168  
   169  	if err := cmd.Execute(); err != nil {
   170  		panic(err)
   171  	}
   172  }
   173  
   174  func run(opts *options) error {
   175  	ctx := context.Background()
   176  	if opts.maxDuration > 0 {
   177  		var cancel func()
   178  		ctx, cancel = context.WithTimeout(ctx, opts.maxDuration)
   179  		defer cancel()
   180  	}
   181  	var (
   182  		out io.Writer
   183  		err error
   184  	)
   185  	if out, opts.format, err = configureOutput(opts.format); err != nil {
   186  		return err
   187  	}
   188  	restCfg, err := configureRest(opts.kubeconfig)
   189  	if err != nil {
   190  		return err
   191  	}
   192  	if opts.collectLogsNamespace != "" {
   193  		defer collectLogsToStderr(restCfg, opts.collectLogsNamespace)
   194  	}
   195  	if err := ensureInstalled(restCfg); err != nil {
   196  		return err
   197  	}
   198  
   199  	start := time.Now()
   200  
   201  	eg, _ := errgroup.WithContext(ctx)
   202  	var states []*workerState
   203  	stacksPerWorker := opts.totalStacks / opts.workerCount
   204  	for workerIX := 0; workerIX < opts.workerCount; workerIX++ {
   205  		workerID := fmt.Sprintf("bench-worker-%d", workerIX)
   206  		state := &workerState{
   207  			ID: workerID,
   208  		}
   209  		states = append(states, state)
   210  		stacksForThisWorker := stacksPerWorker
   211  		if workerIX < (opts.totalStacks % opts.workerCount) {
   212  			stacksForThisWorker++
   213  		}
   214  		eg.Go(func() error {
   215  			return benchmarkRun(restCfg, workerID, stacksForThisWorker, func(u stateUpdater) {
   216  				state.Lock()
   217  				defer state.Unlock()
   218  				u(state)
   219  			})
   220  		})
   221  	}
   222  	finishedC := make(chan error)
   223  
   224  	go func() {
   225  		defer close(finishedC)
   226  		finishedC <- eg.Wait()
   227  	}()
   228  	return reportBenchStatus(ctx, out, finishedC, start, opts.format, states)
   229  }
   230  
   231  func configureOutput(format string) (io.Writer, string, error) {
   232  	switch format {
   233  	case "interactive", "auto":
   234  		c, err := console.ConsoleFromFile(os.Stdout)
   235  		if err != nil {
   236  			if format == "auto" {
   237  				return os.Stdout, "report", nil
   238  			}
   239  			return nil, "", errors.Wrapf(err, "unable to set interactive console")
   240  		}
   241  		return c, "interactive", nil
   242  	case "json", "report":
   243  		return os.Stdout, format, nil
   244  	}
   245  	return nil, "", errors.Errorf("unexpected format %s. must be auto, json, interactive or report", format)
   246  }
   247  
   248  func configureRest(kubeconfig string) (*rest.Config, error) {
   249  	loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
   250  	loadingRules.ExplicitPath = kubeconfig
   251  	cmdConfig, err := loadingRules.Load()
   252  	if err != nil {
   253  		return nil, err
   254  	}
   255  	clientCfg := clientcmd.NewDefaultClientConfig(*cmdConfig, &clientcmd.ConfigOverrides{})
   256  	return clientCfg.ClientConfig()
   257  }
   258  
   259  func collectLogsToStderr(cfg *rest.Config, ns string) {
   260  	client, err := k8sclientset.NewForConfig(cfg)
   261  	if err != nil {
   262  		panic(err)
   263  	}
   264  	pods, err := client.CoreV1().Pods(ns).List(metav1.ListOptions{})
   265  	if err != nil {
   266  		panic(err)
   267  	}
   268  	for _, pod := range pods.Items {
   269  		for _, cont := range pod.Status.ContainerStatuses {
   270  			fmt.Fprintf(os.Stderr, "\nCurrent logs for %s/%s\n", pod.Name, cont.Name)
   271  			data, err := client.CoreV1().Pods(ns).GetLogs(pod.Name, &coretypes.PodLogOptions{Container: cont.Name}).Stream()
   272  			if err != nil {
   273  				panic(err)
   274  			}
   275  			io.Copy(os.Stderr, data)
   276  		}
   277  	}
   278  }
   279  
   280  func ensureInstalled(config *rest.Config) error {
   281  	stackclient, err := clientset.NewForConfig(config)
   282  	if err != nil {
   283  		return err
   284  	}
   285  	if _, err := stackclient.Stacks(metav1.NamespaceAll).List(metav1.ListOptions{}); err == nil {
   286  		// installed
   287  		return nil
   288  	}
   289  
   290  	tag := os.Getenv("TAG")
   291  	if tag == "" {
   292  		return errors.New("stacks API is not installed and TAG env var is not set. Cannot install")
   293  	}
   294  
   295  	k8sclient, err := k8sclientset.NewForConfig(config)
   296  	if err != nil {
   297  		return err
   298  	}
   299  	if _, err := k8sclient.CoreV1().Namespaces().Get("benchmark", metav1.GetOptions{}); err != nil {
   300  		if kerrors.IsNotFound(err) {
   301  			if _, err := k8sclient.CoreV1().Namespaces().Create(&coretypes.Namespace{
   302  				ObjectMeta: metav1.ObjectMeta{
   303  					Name: "benchmark",
   304  				},
   305  			}); err != nil {
   306  				return err
   307  			}
   308  		} else {
   309  			return err
   310  		}
   311  	}
   312  
   313  	if err := install.Do(context.Background(), config,
   314  		install.WithUnsafe(install.UnsafeOptions{
   315  			OptionsCommon: install.OptionsCommon{
   316  				Namespace:              "benchmark",
   317  				Tag:                    tag,
   318  				ReconciliationInterval: constants.DefaultFullSyncInterval,
   319  			}}),
   320  		install.WithObjectFilter(func(o runtime.Object) (bool, error) {
   321  			switch v := o.(type) {
   322  			case *appstypes.Deployment:
   323  				// change from pull always to pull never (image is already loaded, and not yet on hub)
   324  				// only apply to 1st container in POD (2nd container for API is etcd, and we might need to pull it)
   325  				v.Spec.Template.Spec.Containers[0].ImagePullPolicy = coretypes.PullNever
   326  			}
   327  			return true, nil
   328  		}),
   329  	); err != nil {
   330  		return err
   331  	}
   332  	if err = install.WaitNPods(config, "benchmark", 2, 2*time.Minute); err != nil {
   333  		return err
   334  	}
   335  	return e2ewait.For(300, func() (bool, error) {
   336  		_, err := stackclient.Stacks("default").List(metav1.ListOptions{})
   337  		return err == nil, err
   338  	})
   339  }