github.com/getgauge/gauge@v1.6.9/execution/parallelExecution.go (about)

     1  /*----------------------------------------------------------------
     2   *  Copyright (c) ThoughtWorks, Inc.
     3   *  Licensed under the Apache License, Version 2.0
     4   *  See LICENSE in the project root for license information.
     5   *----------------------------------------------------------------*/
     6  
     7  package execution
     8  
     9  import (
    10  	"fmt"
    11  	"os"
    12  	"path"
    13  	"strconv"
    14  	"strings"
    15  	"sync"
    16  	"time"
    17  
    18  	"github.com/getgauge/common"
    19  	"github.com/getgauge/gauge-proto/go/gauge_messages"
    20  	"github.com/getgauge/gauge/config"
    21  	"github.com/getgauge/gauge/conn"
    22  	"github.com/getgauge/gauge/env"
    23  	"github.com/getgauge/gauge/execution/event"
    24  	"github.com/getgauge/gauge/execution/result"
    25  	"github.com/getgauge/gauge/filter"
    26  	"github.com/getgauge/gauge/gauge"
    27  	"github.com/getgauge/gauge/logger"
    28  	"github.com/getgauge/gauge/manifest"
    29  	"github.com/getgauge/gauge/plugin"
    30  	"github.com/getgauge/gauge/runner"
    31  )
    32  
    33  // Strategy for execution, can be either 'Eager' or 'Lazy'
    34  var Strategy string
    35  
    36  // Eager is a parallelization strategy for execution. In this case tests are distributed before execution, thus making them an equal number based distribution.
    37  const Eager string = "eager"
    38  
    39  // Lazy is a parallelization strategy for execution. In this case tests assignment will be dynamic during execution, i.e. assign the next spec in line to the stream that has completed it’s previous execution and is waiting for more work.
    40  const Lazy string = "lazy"
    41  
    42  const (
    43  	gaugeAPIPortsEnv            = "GAUGE_API_PORTS"
    44  	gaugeParallelStreamCountEnv = "GAUGE_PARALLEL_STREAMS_COUNT"
    45  )
    46  
    47  type parallelExecution struct {
    48  	wg                       sync.WaitGroup
    49  	manifest                 *manifest.Manifest
    50  	specCollection           *gauge.SpecCollection
    51  	pluginHandler            plugin.Handler
    52  	runners                  []runner.Runner
    53  	suiteResult              *result.SuiteResult
    54  	numberOfExecutionStreams int
    55  	tagsToFilter             string
    56  	errMaps                  *gauge.BuildErrors
    57  	startTime                time.Time
    58  	resultChan               chan *result.SuiteResult
    59  }
    60  
    61  func newParallelExecution(e *executionInfo) *parallelExecution {
    62  	return &parallelExecution{
    63  		manifest:                 e.manifest,
    64  		specCollection:           e.specs,
    65  		runners:                  []runner.Runner{e.runner},
    66  		pluginHandler:            e.pluginHandler,
    67  		numberOfExecutionStreams: e.numberOfStreams,
    68  		tagsToFilter:             e.tagsToFilter,
    69  		errMaps:                  e.errMaps,
    70  		resultChan:               make(chan *result.SuiteResult),
    71  	}
    72  }
    73  
    74  type streamExecError struct {
    75  	specsSkipped []string
    76  	message      string
    77  }
    78  
    79  func (s streamExecError) Error() string {
    80  	var specNames string
    81  	for _, spec := range s.specsSkipped {
    82  		specNames += fmt.Sprintf("%s\n", spec)
    83  	}
    84  	return fmt.Sprintf("The following specifications could not be executed:\n%sReason : %s.", specNames, s.message)
    85  }
    86  
    87  func (e *parallelExecution) numberOfStreams() int {
    88  	nStreams := e.numberOfExecutionStreams
    89  	size := e.specCollection.Size()
    90  	if nStreams > size {
    91  		nStreams = size
    92  	}
    93  	return nStreams
    94  }
    95  
    96  func (e *parallelExecution) start() {
    97  	e.startTime = time.Now()
    98  	event.Notify(event.NewExecutionEvent(event.SuiteStart, nil, nil, 0, &gauge_messages.ExecutionInfo{}))
    99  	e.pluginHandler = plugin.StartPlugins(e.manifest)
   100  }
   101  
   102  func (e *parallelExecution) startRunnersForRemainingStreams() {
   103  	totalStreams := e.numberOfStreams()
   104  	rChan := make(chan runner.Runner, totalStreams-1)
   105  	for i := 2; i <= totalStreams; i++ {
   106  		go func(stream int) {
   107  			r, err := e.startRunner(e.specCollection, stream)
   108  			if len(err) > 0 {
   109  				e.resultChan <- &result.SuiteResult{UnhandledErrors: err}
   110  				return
   111  			}
   112  			rChan <- r
   113  		}(i)
   114  	}
   115  	for i := 1; i < totalStreams; i++ {
   116  		e.runners = append(e.runners, <-rChan)
   117  	}
   118  }
   119  
   120  func (e *parallelExecution) run() *result.SuiteResult {
   121  	e.start()
   122  	var res []*result.SuiteResult
   123  	if env.AllowFilteredParallelExecution() && e.tagsToFilter != "" {
   124  		parallesSpecs, serialSpecs := filter.FilterSpecForParallelRun(e.specCollection.Specs(), e.tagsToFilter)
   125  		if Verbose {
   126  			logger.Infof(true, "Applied tags '%s' to filter specs for parallel execution", e.tagsToFilter)
   127  			logger.Infof(true, "No of specs to be executed in serial : %d", len(serialSpecs))
   128  			logger.Infof(true, "No of specs to be executed in parallel : %d", len(parallesSpecs))
   129  		}
   130  		if len(serialSpecs) > 0 {
   131  			logger.Infof(true, "Executing %d specs in serial.", len(serialSpecs))
   132  			e.specCollection = gauge.NewSpecCollection(parallesSpecs, false)
   133  			res = append(res, e.executeSpecsInSerial(gauge.NewSpecCollection(serialSpecs, true)))
   134  		}
   135  	}
   136  
   137  	if e.specCollection.Size() > 0 {
   138  		logger.Infof(true, "Executing in %d parallel streams.", e.numberOfStreams())
   139  		// skipcq CRT-A0013
   140  		if e.isMultithreaded() {
   141  			logger.Debugf(true, "Using multithreading for parallel execution.")
   142  			if e.runners[0].Info().GRPCSupport {
   143  				go e.executeGrpcMultithreaded()
   144  			} else {
   145  				go e.executeLegacyMultithreaded()
   146  			}
   147  		} else if isLazy() {
   148  			go e.executeLazily()
   149  		} else {
   150  			go e.executeEagerly()
   151  		}
   152  
   153  		for r := range e.resultChan {
   154  			res = append(res, r)
   155  		}
   156  	} else {
   157  		logger.Infof(true, "No specs remains to execute in parallel.")
   158  	}
   159  	e.aggregateResults(res)
   160  	e.finish()
   161  	return e.suiteResult
   162  }
   163  
   164  func (e *parallelExecution) executeLazily() {
   165  	defer close(e.resultChan)
   166  	e.wg.Add(e.numberOfStreams())
   167  	e.startRunnersForRemainingStreams()
   168  
   169  	for i := 1; i <= len(e.runners); i++ {
   170  		go func(stream int) {
   171  			defer e.wg.Done()
   172  			e.startSpecsExecutionWithRunner(e.specCollection, e.runners[stream-1], stream)
   173  		}(i)
   174  	}
   175  	e.wg.Wait()
   176  }
   177  
   178  func (e *parallelExecution) executeLegacyMultithreaded() {
   179  	defer close(e.resultChan)
   180  	totalStreams := e.numberOfStreams()
   181  	e.wg.Add(totalStreams)
   182  	handlers := make([]*conn.GaugeConnectionHandler, 0)
   183  	var ports []string
   184  	for i := 0; i < totalStreams; i++ {
   185  		port, err := conn.GetPortFromEnvironmentVariable(common.GaugePortEnvName)
   186  		if err != nil {
   187  			port = 0
   188  		}
   189  		handler, err := conn.NewGaugeConnectionHandler(port, nil)
   190  		if err != nil {
   191  			logger.Errorf(true, "failed to create handler. %s", err.Error())
   192  		}
   193  		ports = append(ports, strconv.Itoa(handler.ConnectionPortNumber()))
   194  		handlers = append(handlers, handler)
   195  	}
   196  	os.Setenv(gaugeAPIPortsEnv, strings.Join(ports, ","))
   197  	writer := logger.NewLogWriter(e.manifest.Language, true, 0)
   198  	r, err := runner.StartLegacyRunner(e.manifest, "0", writer, make(chan bool), false)
   199  	if err != nil {
   200  		logger.Fatalf(true, "failed to start runner. %s", err.Error())
   201  	}
   202  	for i := 0; i < totalStreams; i++ {
   203  		connection, err := handlers[i].AcceptConnection(config.RunnerConnectionTimeout(), make(chan error))
   204  		if err != nil {
   205  			logger.Error(true, err.Error())
   206  		}
   207  		crapRunner := &runner.MultithreadedRunner{}
   208  		crapRunner.SetConnection(connection)
   209  		go e.startMultithreaded(crapRunner, e.resultChan, i+1)
   210  	}
   211  	e.wg.Wait()
   212  	err = r.Cmd.Process.Kill()
   213  	if err != nil {
   214  		logger.Infof(true, "unable to kill runner: %s", err.Error())
   215  	}
   216  }
   217  
   218  func (e *parallelExecution) startMultithreaded(r runner.Runner, resChan chan *result.SuiteResult, stream int) {
   219  	defer e.wg.Done()
   220  	e.startSpecsExecutionWithRunner(e.specCollection, r, stream)
   221  }
   222  
   223  func (e *parallelExecution) executeEagerly() {
   224  	defer close(e.resultChan)
   225  	distributions := e.numberOfStreams()
   226  	specs := filter.DistributeSpecs(e.specCollection.Specs(), distributions)
   227  	e.wg.Add(distributions)
   228  	e.startRunnersForRemainingStreams()
   229  
   230  	for i, s := range specs {
   231  		i, s := i, s
   232  		go func(j int) {
   233  			defer e.wg.Done()
   234  			e.startSpecsExecutionWithRunner(s, e.runners[j], j+1)
   235  		}(i)
   236  	}
   237  	e.wg.Wait()
   238  }
   239  
   240  func (e *parallelExecution) startRunner(s *gauge.SpecCollection, stream int) (runner.Runner, []error) {
   241  	if os.Getenv("GAUGE_CUSTOM_BUILD_PATH") == "" {
   242  		os.Setenv("GAUGE_CUSTOM_BUILD_PATH", path.Join(os.Getenv("GAUGE_PROJECT_ROOT"), "gauge_bin"))
   243  	}
   244  	runner, err := runner.Start(e.manifest, stream, make(chan bool), false)
   245  	if err != nil {
   246  		logger.Errorf(true, "Failed to start runner. %s", err.Error())
   247  		logger.Debugf(true, "Skipping %d specifications", s.Size())
   248  		if isLazy() {
   249  			return nil, []error{fmt.Errorf("Failed to start runner. %s", err.Error())}
   250  		}
   251  		return nil, []error{streamExecError{specsSkipped: s.SpecNames(), message: fmt.Sprintf("Failed to start runner. %s", err.Error())}}
   252  	}
   253  	return runner, nil
   254  }
   255  
   256  func (e *parallelExecution) startSpecsExecutionWithRunner(s *gauge.SpecCollection, runner runner.Runner, stream int) {
   257  	executionInfo := newExecutionInfo(s, runner, e.pluginHandler, e.errMaps, false, stream)
   258  	se := newSimpleExecution(executionInfo, false, false)
   259  	se.execute()
   260  	err := runner.Kill()
   261  	if err != nil {
   262  		logger.Errorf(true, "Failed to kill runner. %s", err.Error())
   263  	}
   264  	e.resultChan <- se.suiteResult
   265  }
   266  
   267  func (e *parallelExecution) executeSpecsInSerial(s *gauge.SpecCollection) *result.SuiteResult {
   268  	runner, err := e.startRunner(s, 1)
   269  	if err != nil {
   270  		return &result.SuiteResult{UnhandledErrors: err}
   271  	}
   272  	executionInfo := newExecutionInfo(s, runner, e.pluginHandler, e.errMaps, false, 1)
   273  	se := newSimpleExecution(executionInfo, false, false)
   274  	se.execute()
   275  	er := runner.Kill()
   276  	if er != nil {
   277  		logger.Errorf(true, "Failed to kill runner. %s", er.Error())
   278  	}
   279  
   280  	return se.suiteResult
   281  }
   282  
   283  func (e *parallelExecution) finish() {
   284  	e.suiteResult = mergeDataTableSpecResults(e.suiteResult)
   285  	event.Notify(event.NewExecutionEvent(event.SuiteEnd, nil, e.suiteResult, 0, &gauge_messages.ExecutionInfo{}))
   286  	message := &gauge_messages.Message{
   287  		MessageType: gauge_messages.Message_SuiteExecutionResult,
   288  		SuiteExecutionResult: &gauge_messages.SuiteExecutionResult{
   289  			SuiteResult: gauge.ConvertToProtoSuiteResult(e.suiteResult),
   290  		},
   291  	}
   292  	e.pluginHandler.NotifyPlugins(message)
   293  	e.pluginHandler.GracefullyKillPlugins()
   294  }
   295  
   296  func (e *parallelExecution) aggregateResults(suiteResults []*result.SuiteResult) {
   297  	r := result.NewSuiteResult(ExecuteTags, e.startTime)
   298  	for _, result := range suiteResults {
   299  		r.SpecsFailedCount += result.SpecsFailedCount
   300  		r.SpecResults = append(r.SpecResults, result.SpecResults...)
   301  		if result.IsFailed {
   302  			r.IsFailed = true
   303  		}
   304  		if result.PreSuite != nil {
   305  			r.PreSuite = result.PreSuite
   306  		}
   307  		if result.PostSuite != nil {
   308  			r.PostSuite = result.PostSuite
   309  		}
   310  		if result.UnhandledErrors != nil {
   311  			r.UnhandledErrors = append(r.UnhandledErrors, result.UnhandledErrors...)
   312  		}
   313  	}
   314  	r.ExecutionTime = int64(time.Since(e.startTime) / 1e6)
   315  	e.suiteResult = r
   316  	e.suiteResult.SetSpecsSkippedCount()
   317  }
   318  
   319  func isLazy() bool {
   320  	return strings.ToLower(Strategy) == Lazy
   321  }
   322  
   323  func isValidStrategy(strategy string) bool {
   324  	strategy = strings.ToLower(strategy)
   325  	return strategy == Lazy || strategy == Eager
   326  }
   327  
   328  func (e *parallelExecution) isMultithreaded() bool {
   329  	if !env.EnableMultiThreadedExecution() {
   330  		return false
   331  	}
   332  	if !e.runners[0].IsMultithreaded() {
   333  		logger.Warningf(true, "Runner doesn't support mutithreading, using multiprocess parallel execution.")
   334  		return false
   335  	}
   336  	return true
   337  }