github.com/yrj2011/jx-test-infra@v0.0.0-20190529031832-7a2065ee98eb/kubetest/e2e.go (about)

     1  /*
     2  Copyright 2014 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package main
    18  
    19  import (
    20  	"fmt"
    21  	"io/ioutil"
    22  	"log"
    23  	"os"
    24  	"os/exec"
    25  	"path/filepath"
    26  	"regexp"
    27  	"strings"
    28  	"time"
    29  
    30  	"k8s.io/test-infra/kubetest/e2e"
    31  	"k8s.io/test-infra/kubetest/process"
    32  	"k8s.io/test-infra/kubetest/util"
    33  )
    34  
    35  // Add more default --test_args as we migrate them
    36  func argFields(args, dump, ipRange string) []string {
    37  	f := strings.Fields(args)
    38  	if dump != "" {
    39  		f = util.SetFieldDefault(f, "--report-dir", dump)
    40  		// Disable logdump within ginkgo as it'll be done in kubetest anyway now.
    41  		f = util.SetFieldDefault(f, "--disable-log-dump", "true")
    42  	}
    43  	if ipRange != "" {
    44  		f = util.SetFieldDefault(f, "--cluster-ip-range", ipRange)
    45  	}
    46  	return f
    47  }
    48  
    49  func run(deploy deployer, o options) error {
    50  	if o.checkSkew {
    51  		os.Setenv("KUBECTL", "./cluster/kubectl.sh --match-server-version")
    52  	} else {
    53  		os.Setenv("KUBECTL", "./cluster/kubectl.sh")
    54  	}
    55  	os.Setenv("KUBE_CONFIG_FILE", "config-test.sh")
    56  	os.Setenv("KUBE_RUNTIME_CONFIG", o.runtimeConfig)
    57  
    58  	dump := o.dump
    59  	if dump != "" {
    60  		if !filepath.IsAbs(dump) { // Directory may change
    61  			wd, err := os.Getwd()
    62  			if err != nil {
    63  				return fmt.Errorf("failed to os.Getwd(): %v", err)
    64  			}
    65  			dump = filepath.Join(wd, dump)
    66  		}
    67  	}
    68  
    69  	if o.up {
    70  		if o.federation {
    71  			if err := control.XMLWrap(&suite, "Federation TearDown Previous", fedDown); err != nil {
    72  				return fmt.Errorf("error tearing down previous federation control plane: %v", err)
    73  			}
    74  		}
    75  		if err := control.XMLWrap(&suite, "TearDown Previous", deploy.Down); err != nil {
    76  			return fmt.Errorf("error tearing down previous cluster: %s", err)
    77  		}
    78  	}
    79  
    80  	var err error
    81  	var errs []error
    82  
    83  	// Ensures that the cleanup/down action is performed exactly once.
    84  	var (
    85  		downDone           = false
    86  		federationDownDone = false
    87  	)
    88  
    89  	var (
    90  		beforeResources []byte
    91  		upResources     []byte
    92  		downResources   []byte
    93  		afterResources  []byte
    94  	)
    95  
    96  	if o.checkLeaks {
    97  		errs = util.AppendError(errs, control.XMLWrap(&suite, "listResources Before", func() error {
    98  			beforeResources, err = listResources()
    99  			return err
   100  		}))
   101  	}
   102  
   103  	if o.up {
   104  		// If we tried to bring the cluster up, make a courtesy
   105  		// attempt to bring it down so we're not leaving resources around.
   106  		if o.down {
   107  			defer control.XMLWrap(&suite, "Deferred TearDown", func() error {
   108  				if !downDone {
   109  					return deploy.Down()
   110  				}
   111  				return nil
   112  			})
   113  			// Deferred statements are executed in last-in-first-out order, so
   114  			// federation down defer must appear after the cluster teardown in
   115  			// order to execute that before cluster teardown.
   116  			if o.federation {
   117  				defer control.XMLWrap(&suite, "Deferred Federation TearDown", func() error {
   118  					if !federationDownDone {
   119  						return fedDown()
   120  					}
   121  					return nil
   122  				})
   123  			}
   124  		}
   125  		// Start the cluster using this version.
   126  		if err := control.XMLWrap(&suite, "Up", deploy.Up); err != nil {
   127  			if dump != "" {
   128  				control.XMLWrap(&suite, "DumpClusterLogs (--up failed)", func() error {
   129  					// This frequently means the cluster does not exist.
   130  					// Thus DumpClusterLogs() typically fails.
   131  					// Therefore always return null for this scenarios.
   132  					// TODO(fejta): report a green E in testgrid if it errors.
   133  					deploy.DumpClusterLogs(dump, o.logexporterGCSPath)
   134  					return nil
   135  				})
   136  			}
   137  			return fmt.Errorf("starting e2e cluster: %s", err)
   138  		}
   139  		if o.federation {
   140  			if err := control.XMLWrap(&suite, "Federation Up", fedUp); err != nil {
   141  				control.XMLWrap(&suite, "dumpFederationLogs", func() error {
   142  					return dumpFederationLogs(dump)
   143  				})
   144  				return fmt.Errorf("error starting federation: %s", err)
   145  			}
   146  		}
   147  
   148  		// The dind deployer checks that the control plane is healthy.
   149  		if !o.nodeTests && o.deployment != "dind" {
   150  			// Check that the api is reachable before proceeding with further steps.
   151  			errs = util.AppendError(errs, control.XMLWrap(&suite, "Check APIReachability", getKubectlVersion))
   152  			if dump != "" {
   153  				errs = util.AppendError(errs, control.XMLWrap(&suite, "list nodes", func() error {
   154  					return listNodes(dump)
   155  				}))
   156  			}
   157  		}
   158  	}
   159  
   160  	if o.checkLeaks {
   161  		errs = util.AppendError(errs, control.XMLWrap(&suite, "listResources Up", func() error {
   162  			upResources, err = listResources()
   163  			return err
   164  		}))
   165  	}
   166  
   167  	if o.upgradeArgs != "" {
   168  		if err := control.XMLWrap(&suite, "test setup", deploy.TestSetup); err != nil {
   169  			errs = util.AppendError(errs, err)
   170  		} else {
   171  			errs = util.AppendError(errs, control.XMLWrap(&suite, "UpgradeTest", func() error {
   172  				// upgrade tests really only run one spec
   173  				var env []string
   174  				for _, v := range os.Environ() {
   175  					if !strings.HasPrefix(v, "GINKGO_PARALLEL") {
   176  						env = append(env, v)
   177  					}
   178  				}
   179  				return skewTestEnv(env, argFields(o.upgradeArgs, dump, o.clusterIPRange), "upgrade", o.checkSkew)
   180  			}))
   181  		}
   182  	}
   183  
   184  	testArgs := argFields(o.testArgs, dump, o.clusterIPRange)
   185  	if o.test {
   186  		if err := control.XMLWrap(&suite, "test setup", deploy.TestSetup); err != nil {
   187  			errs = util.AppendError(errs, err)
   188  		} else if o.nodeTests {
   189  			nodeArgs := strings.Fields(o.nodeArgs)
   190  			errs = util.AppendError(errs, control.XMLWrap(&suite, "Node Tests", func() error {
   191  				return nodeTest(nodeArgs, o.testArgs, o.nodeTestArgs, o.gcpProject, o.gcpZone)
   192  			}))
   193  		} else if err := control.XMLWrap(&suite, "IsUp", deploy.IsUp); err != nil {
   194  			errs = util.AppendError(errs, err)
   195  		} else if o.federation {
   196  			errs = util.AppendError(errs, control.XMLWrap(&suite, "FederationTest", func() error {
   197  				return federationTest(testArgs)
   198  			}))
   199  		} else {
   200  			if o.deployment != "dind" && o.deployment != "conformance" {
   201  				errs = util.AppendError(errs, control.XMLWrap(&suite, "kubectl version", getKubectlVersion))
   202  			}
   203  
   204  			if o.skew {
   205  				errs = util.AppendError(errs, control.XMLWrap(&suite, "SkewTest", func() error {
   206  					return skewTest(testArgs, "skew", o.checkSkew)
   207  				}))
   208  			} else {
   209  				var tester e2e.Tester
   210  				tester = &GinkgoScriptTester{}
   211  				if testBuilder, ok := deploy.(e2e.TestBuilder); ok {
   212  					tester, err = testBuilder.BuildTester(toBuildTesterOptions(&o))
   213  					errs = util.AppendError(errs, err)
   214  				}
   215  				if tester != nil {
   216  					errs = util.AppendError(errs, control.XMLWrap(&suite, "Test", func() error {
   217  						return tester.Run(control, testArgs)
   218  					}))
   219  				}
   220  			}
   221  		}
   222  	}
   223  
   224  	if o.testCmd != "" {
   225  		if err := control.XMLWrap(&suite, "test setup", deploy.TestSetup); err != nil {
   226  			errs = util.AppendError(errs, err)
   227  		} else {
   228  			errs = util.AppendError(errs, control.XMLWrap(&suite, o.testCmdName, func() error {
   229  				cmdLine := os.ExpandEnv(o.testCmd)
   230  				return control.FinishRunning(exec.Command(cmdLine, o.testCmdArgs...))
   231  			}))
   232  		}
   233  	}
   234  
   235  	// TODO(bentheelder): consider remapping charts, etc to testCmd
   236  
   237  	if o.kubemark {
   238  		errs = util.AppendError(errs, control.XMLWrap(&suite, "Kubemark Overall", func() error {
   239  			return kubemarkTest(testArgs, dump, o, deploy)
   240  		}))
   241  	}
   242  
   243  	if o.charts {
   244  		errs = util.AppendError(errs, control.XMLWrap(&suite, "Helm Charts", chartsTest))
   245  	}
   246  
   247  	if o.perfTests {
   248  		errs = util.AppendError(errs, control.XMLWrap(&suite, "Perf Tests", perfTest))
   249  	}
   250  
   251  	if dump != "" {
   252  		errs = util.AppendError(errs, control.XMLWrap(&suite, "DumpClusterLogs", func() error {
   253  			return deploy.DumpClusterLogs(dump, o.logexporterGCSPath)
   254  		}))
   255  		if o.federation {
   256  			errs = util.AppendError(errs, control.XMLWrap(&suite, "dumpFederationLogs", func() error {
   257  				return dumpFederationLogs(dump)
   258  			}))
   259  		}
   260  	}
   261  
   262  	if o.checkLeaks {
   263  		errs = util.AppendError(errs, control.XMLWrap(&suite, "listResources Down", func() error {
   264  			downResources, err = listResources()
   265  			return err
   266  		}))
   267  	}
   268  
   269  	if o.down {
   270  		if o.federation {
   271  			errs = util.AppendError(errs, control.XMLWrap(&suite, "Federation TearDown", func() error {
   272  				if !federationDownDone {
   273  					err := fedDown()
   274  					if err != nil {
   275  						return err
   276  					}
   277  					federationDownDone = true
   278  				}
   279  				return nil
   280  			}))
   281  		}
   282  		errs = util.AppendError(errs, control.XMLWrap(&suite, "TearDown", func() error {
   283  			if !downDone {
   284  				err := deploy.Down()
   285  				if err != nil {
   286  					return err
   287  				}
   288  				downDone = true
   289  			}
   290  			return nil
   291  		}))
   292  	}
   293  
   294  	// Save the state if we upped a new cluster without downing it
   295  	// or we are turning up federated clusters without turning up
   296  	// the federation control plane.
   297  	if o.save != "" && ((!o.down && o.up) || (!o.federation && o.up && o.deployment != "none")) {
   298  		errs = util.AppendError(errs, control.XMLWrap(&suite, "Save Cluster State", func() error {
   299  			return saveState(o.save)
   300  		}))
   301  	}
   302  
   303  	if o.checkLeaks {
   304  		log.Print("Sleeping for 30 seconds...") // Wait for eventually consistent listing
   305  		time.Sleep(30 * time.Second)
   306  		if err := control.XMLWrap(&suite, "listResources After", func() error {
   307  			afterResources, err = listResources()
   308  			return err
   309  		}); err != nil {
   310  			errs = append(errs, err)
   311  		} else {
   312  			errs = util.AppendError(errs, control.XMLWrap(&suite, "diffResources", func() error {
   313  				return diffResources(beforeResources, upResources, downResources, afterResources, dump)
   314  			}))
   315  		}
   316  	}
   317  	if len(errs) == 0 {
   318  		if pub, ok := deploy.(publisher); ok {
   319  			errs = util.AppendError(errs, pub.Publish())
   320  		}
   321  	}
   322  	if len(errs) == 0 && o.publish != "" {
   323  		errs = util.AppendError(errs, control.XMLWrap(&suite, "Publish version", func() error {
   324  			// Use plaintext version file packaged with kubernetes.tar.gz
   325  			v, err := ioutil.ReadFile("version")
   326  			if err != nil {
   327  				return err
   328  			}
   329  			log.Printf("Set %s version to %s", o.publish, string(v))
   330  			return gcsWrite(o.publish, v)
   331  		}))
   332  	}
   333  
   334  	if len(errs) != 0 {
   335  		return fmt.Errorf("encountered %d errors: %v", len(errs), errs)
   336  	}
   337  	return nil
   338  }
   339  
   340  func getKubectlVersion() error {
   341  	retries := 5
   342  	for {
   343  		_, err := control.Output(exec.Command("./cluster/kubectl.sh", "--match-server-version=false", "version"))
   344  		if err == nil {
   345  			return nil
   346  		}
   347  		retries--
   348  		if retries == 0 {
   349  			return err
   350  		}
   351  		log.Print("Failed to reach api. Sleeping for 10 seconds before retrying...")
   352  		time.Sleep(10 * time.Second)
   353  	}
   354  }
   355  
   356  func listNodes(dump string) error {
   357  	b, err := control.Output(exec.Command("./cluster/kubectl.sh", "--match-server-version=false", "get", "nodes", "-oyaml"))
   358  	if err != nil {
   359  		return err
   360  	}
   361  	return ioutil.WriteFile(filepath.Join(dump, "nodes.yaml"), b, 0644)
   362  }
   363  
   364  func listKubemarkNodes(dump string) error {
   365  	b, err := control.Output(exec.Command("./cluster/kubectl.sh", "--match-server-version=false", "--kubeconfig=./test/kubemark/resources/kubeconfig.kubemark", "get", "nodes", "-oyaml"))
   366  	if err != nil {
   367  		return err
   368  	}
   369  	return ioutil.WriteFile(filepath.Join(dump, "kubemark_nodes.yaml"), b, 0644)
   370  }
   371  
   372  func diffResources(before, clusterUp, clusterDown, after []byte, location string) error {
   373  	if location == "" {
   374  		var err error
   375  		location, err = ioutil.TempDir("", "e2e-check-resources")
   376  		if err != nil {
   377  			return fmt.Errorf("Could not create e2e-check-resources temp dir: %s", err)
   378  		}
   379  	}
   380  
   381  	var mode os.FileMode = 0664
   382  	bp := filepath.Join(location, "gcp-resources-before.txt")
   383  	up := filepath.Join(location, "gcp-resources-cluster-up.txt")
   384  	cdp := filepath.Join(location, "gcp-resources-cluster-down.txt")
   385  	ap := filepath.Join(location, "gcp-resources-after.txt")
   386  	dp := filepath.Join(location, "gcp-resources-diff.txt")
   387  
   388  	if err := ioutil.WriteFile(bp, before, mode); err != nil {
   389  		return err
   390  	}
   391  	if err := ioutil.WriteFile(up, clusterUp, mode); err != nil {
   392  		return err
   393  	}
   394  	if err := ioutil.WriteFile(cdp, clusterDown, mode); err != nil {
   395  		return err
   396  	}
   397  	if err := ioutil.WriteFile(ap, after, mode); err != nil {
   398  		return err
   399  	}
   400  
   401  	stdout, cerr := control.Output(exec.Command("diff", "-sw", "-U0", "-F^\\[.*\\]$", bp, ap))
   402  	if err := ioutil.WriteFile(dp, stdout, mode); err != nil {
   403  		return err
   404  	}
   405  	if cerr == nil { // No diffs
   406  		return nil
   407  	}
   408  	lines := strings.Split(string(stdout), "\n")
   409  	if len(lines) < 3 { // Ignore the +++ and --- header lines
   410  		return nil
   411  	}
   412  	lines = lines[2:]
   413  
   414  	var added, report []string
   415  	resourceTypeRE := regexp.MustCompile(`^@@.+\s(\[\s\S+\s\])$`)
   416  	for _, l := range lines {
   417  		if matches := resourceTypeRE.FindStringSubmatch(l); matches != nil {
   418  			report = append(report, matches[1])
   419  		}
   420  		if strings.HasPrefix(l, "+") && len(strings.TrimPrefix(l, "+")) > 0 {
   421  			added = append(added, l)
   422  			report = append(report, l)
   423  		}
   424  	}
   425  	if len(added) > 0 {
   426  		return fmt.Errorf("Error: %d leaked resources\n%v", len(added), strings.Join(report, "\n"))
   427  	}
   428  	return nil
   429  }
   430  
   431  func listResources() ([]byte, error) {
   432  	log.Printf("Listing resources...")
   433  	stdout, err := control.Output(exec.Command("./cluster/gce/list-resources.sh"))
   434  	if err != nil {
   435  		return stdout, fmt.Errorf("Failed to list resources (%s):\n%s", err, string(stdout))
   436  	}
   437  	return stdout, err
   438  }
   439  
   440  func clusterSize(deploy deployer) (int, error) {
   441  	if err := deploy.TestSetup(); err != nil {
   442  		return -1, err
   443  	}
   444  	o, err := control.Output(exec.Command("kubectl", "get", "nodes", "--no-headers"))
   445  	if err != nil {
   446  		log.Printf("kubectl get nodes failed: %s\n%s", wrapError(err).Error(), string(o))
   447  		return -1, err
   448  	}
   449  	stdout := strings.TrimSpace(string(o))
   450  	log.Printf("Cluster nodes:\n%s", stdout)
   451  	return len(strings.Split(stdout, "\n")), nil
   452  }
   453  
   454  // commandError will provide stderr output (if available) from structured
   455  // exit errors
   456  type commandError struct {
   457  	err error
   458  }
   459  
   460  func wrapError(err error) *commandError {
   461  	if err == nil {
   462  		return nil
   463  	}
   464  	return &commandError{err: err}
   465  }
   466  
   467  func (e *commandError) Error() string {
   468  	if e == nil {
   469  		return ""
   470  	}
   471  	exitErr, ok := e.err.(*exec.ExitError)
   472  	if !ok {
   473  		return e.err.Error()
   474  	}
   475  
   476  	stderr := ""
   477  	if exitErr.Stderr != nil {
   478  		stderr = string(stderr)
   479  	}
   480  	return fmt.Sprintf("%q: %q", exitErr.Error(), stderr)
   481  }
   482  
   483  func isUp(d deployer) error {
   484  	n, err := clusterSize(d)
   485  	if err != nil {
   486  		return err
   487  	}
   488  	if n <= 0 {
   489  		return fmt.Errorf("cluster found, but %d nodes reported", n)
   490  	}
   491  	return nil
   492  }
   493  
   494  func defaultDumpClusterLogs(localArtifactsDir, logexporterGCSPath string) error {
   495  	logDumpPath := "./cluster/log-dump/log-dump.sh"
   496  	// cluster/log-dump/log-dump.sh only exists in the Kubernetes tree
   497  	// post-1.3. If it doesn't exist, print a debug log but do not report an error.
   498  	if _, err := os.Stat(logDumpPath); err != nil {
   499  		log.Printf("Could not find %s. This is expected if running tests against a Kubernetes 1.3 or older tree.", logDumpPath)
   500  		if cwd, err := os.Getwd(); err == nil {
   501  			log.Printf("CWD: %v", cwd)
   502  		}
   503  		return nil
   504  	}
   505  	var cmd *exec.Cmd
   506  	if logexporterGCSPath != "" {
   507  		log.Printf("Dumping logs from nodes to GCS directly at path: %v", logexporterGCSPath)
   508  		cmd = exec.Command(logDumpPath, localArtifactsDir, logexporterGCSPath)
   509  	} else {
   510  		log.Printf("Dumping logs locally to: %v", localArtifactsDir)
   511  		cmd = exec.Command(logDumpPath, localArtifactsDir)
   512  	}
   513  	return control.FinishRunning(cmd)
   514  }
   515  
   516  func dumpFederationLogs(location string) error {
   517  	// TODO(shashidharatd): Remove below logic of choosing the scripts to run from federation
   518  	// repo once the k8s deployment in federation jobs moves to kubernetes-anywhere
   519  	var logDumpPath string
   520  	if useFederationRepo() {
   521  		logDumpPath = "../federation/deploy/cluster/log-dump.sh"
   522  	} else {
   523  		logDumpPath = "./federation/cluster/log-dump.sh"
   524  	}
   525  	// federation/cluster/log-dump.sh only exists in the Kubernetes tree
   526  	// post-1.6. If it doesn't exist, do nothing and do not report an error.
   527  	if _, err := os.Stat(logDumpPath); err == nil {
   528  		log.Printf("Dumping Federation logs to: %v", location)
   529  		return control.FinishRunning(exec.Command(logDumpPath, location))
   530  	}
   531  	log.Printf("Could not find %s. This is expected if running tests against a Kubernetes 1.6 or older tree.", logDumpPath)
   532  	return nil
   533  }
   534  
   535  func perfTest() error {
   536  	// Run perf tests
   537  	cmdline := util.K8s("perf-tests", "clusterloader", "run-e2e.sh")
   538  	if err := control.FinishRunning(exec.Command(cmdline)); err != nil {
   539  		return err
   540  	}
   541  	return nil
   542  }
   543  
   544  func chartsTest() error {
   545  	// Run helm tests.
   546  	cmdline := util.K8s("charts", "test", "helm-test-e2e.sh")
   547  	return control.FinishRunning(exec.Command(cmdline))
   548  }
   549  
   550  func nodeTest(nodeArgs []string, testArgs, nodeTestArgs, project, zone string) error {
   551  	// Run node e2e tests.
   552  	// TODO(krzyzacy): remove once nodeTest is stable
   553  	if wd, err := os.Getwd(); err == nil {
   554  		log.Printf("cwd : %s", wd)
   555  	}
   556  
   557  	sshKeyPath := os.Getenv("JENKINS_GCE_SSH_PRIVATE_KEY_FILE")
   558  	if _, err := os.Stat(sshKeyPath); err != nil {
   559  		return fmt.Errorf("Cannot find ssh key from: %v, err : %v", sshKeyPath, err)
   560  	}
   561  
   562  	// prep node args
   563  	runner := []string{
   564  		"run",
   565  		util.K8s("kubernetes", "test", "e2e_node", "runner", "remote", "run_remote.go"),
   566  		"--cleanup",
   567  		"--logtostderr",
   568  		"--vmodule=*=4",
   569  		"--ssh-env=gce",
   570  		fmt.Sprintf("--results-dir=%s/_artifacts", os.Getenv("WORKSPACE")),
   571  		fmt.Sprintf("--project=%s", project),
   572  		fmt.Sprintf("--zone=%s", zone),
   573  		fmt.Sprintf("--ssh-user=%s", os.Getenv("USER")),
   574  		fmt.Sprintf("--ssh-key=%s", sshKeyPath),
   575  		fmt.Sprintf("--ginkgo-flags=%s", testArgs),
   576  		fmt.Sprintf("--test_args=%s", nodeTestArgs),
   577  		fmt.Sprintf("--test-timeout=%s", timeout.String()),
   578  	}
   579  
   580  	runner = append(runner, nodeArgs...)
   581  
   582  	return control.FinishRunning(exec.Command("go", runner...))
   583  }
   584  
   585  func kubemarkTest(testArgs []string, dump string, o options, deploy deployer) error {
   586  	// Stop previously running kubemark cluster (if any).
   587  	if err := control.XMLWrap(&suite, "Kubemark TearDown Previous", func() error {
   588  		return control.FinishRunning(exec.Command("./test/kubemark/stop-kubemark.sh"))
   589  	}); err != nil {
   590  		return err
   591  	}
   592  	// If we tried to bring the Kubemark cluster up, make a courtesy
   593  	// attempt to bring it down so we're not leaving resources around.
   594  	//
   595  	// TODO: We should try calling stop-kubemark exactly once. Though to
   596  	// stop the leaking resources for now, we want to be on the safe side
   597  	// and call it explicitly in defer if the other one is not called.
   598  	defer control.XMLWrap(&suite, "Kubemark TearDown (Deferred)", func() error {
   599  		return control.FinishRunning(exec.Command("./test/kubemark/stop-kubemark.sh"))
   600  	})
   601  
   602  	if err := control.XMLWrap(&suite, "IsUp", deploy.IsUp); err != nil {
   603  		return err
   604  	}
   605  
   606  	// Start kubemark cluster.
   607  	if err := control.XMLWrap(&suite, "Kubemark Up", func() error {
   608  		return control.FinishRunning(exec.Command("./test/kubemark/start-kubemark.sh"))
   609  	}); err != nil {
   610  		if dump != "" {
   611  			control.XMLWrap(&suite, "Kubemark MasterLogDump (--up failed)", func() error {
   612  				return control.FinishRunning(exec.Command("./test/kubemark/master-log-dump.sh", dump))
   613  			})
   614  		}
   615  		return err
   616  	}
   617  
   618  	// Check kubemark apiserver reachability by listing all nodes.
   619  	if dump != "" {
   620  		control.XMLWrap(&suite, "list kubemark nodes", func() error {
   621  			return listKubemarkNodes(dump)
   622  		})
   623  	}
   624  
   625  	// Run tests on the kubemark cluster.
   626  	if err := control.XMLWrap(&suite, "Kubemark Test", func() error {
   627  		testArgs = util.SetFieldDefault(testArgs, "--ginkgo.focus", "starting\\s30\\pods")
   628  
   629  		// detect master IP
   630  		if err := os.Setenv("MASTER_NAME", os.Getenv("INSTANCE_PREFIX")+"-kubemark-master"); err != nil {
   631  			return err
   632  		}
   633  
   634  		masterIP, err := control.Output(exec.Command(
   635  			"gcloud", "compute", "addresses", "describe",
   636  			os.Getenv("MASTER_NAME")+"-ip",
   637  			"--project="+o.gcpProject,
   638  			"--region="+o.gcpZone[:len(o.gcpZone)-2],
   639  			"--format=value(address)"))
   640  		if err != nil {
   641  			return fmt.Errorf("failed to get masterIP: %v", err)
   642  		}
   643  		if err := os.Setenv("KUBE_MASTER_IP", strings.TrimSpace(string(masterIP))); err != nil {
   644  			return err
   645  		}
   646  
   647  		if os.Getenv("ENABLE_KUBEMARK_CLUSTER_AUTOSCALER") == "true" {
   648  			testArgs = append(testArgs, "--kubemark-external-kubeconfig="+os.Getenv("DEFAULT_KUBECONFIG"))
   649  		}
   650  
   651  		cwd, err := os.Getwd()
   652  		if err != nil {
   653  			return err
   654  		}
   655  
   656  		// TODO(krzyzacy): unsure if the envs in kubemark/util.sh makes a difference to e2e tests
   657  		//                 will verify and remove (or uncomment) next
   658  		//util := os.Getenv("WORKSPACE") + "/kubernetes/cluster/kubemark/util.sh"
   659  		//testArgs = append([]string{"-c", "source", util, " ; ./hack/ginkgo-e2e.sh"}, testArgs...)
   660  		cmd := exec.Command("./hack/ginkgo-e2e.sh", testArgs...)
   661  		cmd.Env = append(
   662  			os.Environ(),
   663  			"KUBERNETES_PROVIDER=kubemark",
   664  			"KUBE_CONFIG_FILE=config-default.sh",
   665  			fmt.Sprintf("KUBECONFIG=%s/test/kubemark/resources/kubeconfig.kubemark", cwd),
   666  			"KUBE_MASTER_URL=https://"+os.Getenv("KUBE_MASTER_IP"),
   667  		)
   668  
   669  		return control.FinishRunning(cmd)
   670  	}); err != nil {
   671  		if dump != "" {
   672  			control.XMLWrap(&suite, "Kubemark MasterLogDump (--test failed)", func() error {
   673  				return control.FinishRunning(exec.Command("./test/kubemark/master-log-dump.sh", dump))
   674  			})
   675  		}
   676  		return err
   677  	}
   678  
   679  	// Dump logs from kubemark master.
   680  	control.XMLWrap(&suite, "Kubemark MasterLogDump", func() error {
   681  		return control.FinishRunning(exec.Command("./test/kubemark/master-log-dump.sh", dump))
   682  	})
   683  
   684  	// Stop the kubemark cluster.
   685  	if err := control.XMLWrap(&suite, "Kubemark TearDown", func() error {
   686  		return control.FinishRunning(exec.Command("./test/kubemark/stop-kubemark.sh"))
   687  	}); err != nil {
   688  		return err
   689  	}
   690  
   691  	return nil
   692  }
   693  
   694  // Runs tests in the kubernetes_skew directory, appending --report-prefix flag to the run
   695  func skewTest(args []string, prefix string, checkSkew bool) error {
   696  	return skewTestEnv(nil, args, prefix, checkSkew)
   697  }
   698  
   699  // Runs tests in the kubernetes_skew directory, appending --report-prefix flag to the run
   700  func skewTestEnv(env, args []string, prefix string, checkSkew bool) error {
   701  	// TODO(fejta): run this inside this kubetest process, do not spawn a new one.
   702  	popS, err := util.Pushd("../kubernetes_skew")
   703  	if err != nil {
   704  		return err
   705  	}
   706  	defer popS()
   707  	args = util.AppendField(args, "--report-prefix", prefix)
   708  	cmd := exec.Command(
   709  		"kubetest",
   710  		"--test",
   711  		"--test_args="+strings.Join(args, " "),
   712  		fmt.Sprintf("--check-version-skew=%t", checkSkew),
   713  	)
   714  	cmd.Env = env
   715  	return control.FinishRunning(cmd)
   716  }
   717  
   718  // GinkgoScriptTester implements Tester by calling the hack/ginkgo-e2e.sh script
   719  type GinkgoScriptTester struct {
   720  }
   721  
   722  // Run executes ./hack/ginkgo-e2e.sh
   723  func (t *GinkgoScriptTester) Run(control *process.Control, testArgs []string) error {
   724  	return control.FinishRunning(exec.Command("./hack/ginkgo-e2e.sh", testArgs...))
   725  }
   726  
   727  // toBuildTesterOptions builds the BuildTesterOptions data structure for passing to BuildTester
   728  func toBuildTesterOptions(o *options) *e2e.BuildTesterOptions {
   729  	return &e2e.BuildTesterOptions{
   730  		FocusRegex:  o.focusRegex,
   731  		SkipRegex:   o.skipRegex,
   732  		Parallelism: o.ginkgoParallel.Get(),
   733  	}
   734  }