github.com/onsi/ginkgo@v1.16.6-0.20211118180735-4e1925ba4c95/internal/suite.go (about)

     1  package internal
     2  
     3  import (
     4  	"fmt"
     5  	"time"
     6  
     7  	"github.com/onsi/ginkgo/formatter"
     8  	"github.com/onsi/ginkgo/internal/interrupt_handler"
     9  	"github.com/onsi/ginkgo/internal/parallel_support"
    10  	"github.com/onsi/ginkgo/reporters"
    11  	"github.com/onsi/ginkgo/types"
    12  )
    13  
    14  type Phase uint
    15  
    16  const (
    17  	PhaseBuildTopLevel Phase = iota
    18  	PhaseBuildTree
    19  	PhaseRun
    20  )
    21  
    22  type Suite struct {
    23  	tree               *TreeNode
    24  	topLevelContainers Nodes
    25  
    26  	phase Phase
    27  
    28  	suiteNodes   Nodes
    29  	cleanupNodes Nodes
    30  
    31  	failer            *Failer
    32  	reporter          reporters.Reporter
    33  	writer            WriterInterface
    34  	outputInterceptor OutputInterceptor
    35  	interruptHandler  interrupt_handler.InterruptHandlerInterface
    36  	config            types.SuiteConfig
    37  
    38  	skipAll           bool
    39  	report            types.Report
    40  	currentSpecReport types.SpecReport
    41  	currentNode       Node
    42  
    43  	client parallel_support.Client
    44  }
    45  
    46  func NewSuite() *Suite {
    47  	return &Suite{
    48  		tree:  &TreeNode{},
    49  		phase: PhaseBuildTopLevel,
    50  	}
    51  }
    52  
    53  func (suite *Suite) BuildTree() error {
    54  	// During PhaseBuildTopLevel, the top level containers are stored in suite.topLevelCotainers and entered
    55  	// We now enter PhaseBuildTree where these top level containers are entered and added to the spec tree
    56  	suite.phase = PhaseBuildTree
    57  	for _, topLevelContainer := range suite.topLevelContainers {
    58  		err := suite.PushNode(topLevelContainer)
    59  		if err != nil {
    60  			return err
    61  		}
    62  	}
    63  	return nil
    64  }
    65  
    66  func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, suiteConfig types.SuiteConfig) (bool, bool) {
    67  	if suite.phase != PhaseBuildTree {
    68  		panic("cannot run before building the tree = call suite.BuildTree() first")
    69  	}
    70  	ApplyNestedFocusPolicyToTree(suite.tree)
    71  	specs := GenerateSpecsFromTreeRoot(suite.tree)
    72  	specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig)
    73  
    74  	suite.phase = PhaseRun
    75  	suite.client = client
    76  	suite.failer = failer
    77  	suite.reporter = reporter
    78  	suite.writer = writer
    79  	suite.outputInterceptor = outputInterceptor
    80  	suite.interruptHandler = interruptHandler
    81  	suite.config = suiteConfig
    82  
    83  	success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs)
    84  
    85  	return success, hasProgrammaticFocus
    86  }
    87  
    88  /*
    89    Tree Construction methods
    90  
    91    PushNode is used during PhaseBuildTopLevel and PhaseBuildTree
    92  */
    93  
    94  func (suite *Suite) PushNode(node Node) error {
    95  	if node.NodeType.Is(types.NodeTypeCleanupInvalid | types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
    96  		return suite.pushCleanupNode(node)
    97  	}
    98  
    99  	if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeReportAfterSuite) {
   100  		return suite.pushSuiteNode(node)
   101  	}
   102  
   103  	if suite.phase == PhaseRun {
   104  		return types.GinkgoErrors.PushingNodeInRunPhase(node.NodeType, node.CodeLocation)
   105  	}
   106  
   107  	if node.MarkedSerial {
   108  		firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
   109  		if !firstOrderedNode.IsZero() && !firstOrderedNode.MarkedSerial {
   110  			return types.GinkgoErrors.InvalidSerialNodeInNonSerialOrderedContainer(node.CodeLocation, node.NodeType)
   111  		}
   112  	}
   113  
   114  	if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) {
   115  		firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
   116  		if firstOrderedNode.IsZero() {
   117  			return types.GinkgoErrors.SetupNodeNotInOrderedContainer(node.CodeLocation, node.NodeType)
   118  		}
   119  	}
   120  
   121  	if node.NodeType == types.NodeTypeContainer {
   122  		// During PhaseBuildTopLevel we only track the top level containers without entering them
   123  		// We only enter the top level container nodes during PhaseBuildTree
   124  		//
   125  		// This ensures the tree is only constructed after `go spec` has called `flag.Parse()` and gives
   126  		// the user an opportunity to load suiteConfiguration information in the `TestX` go spec hook just before `RunSpecs`
   127  		// is invoked.  This makes the lifecycle easier to reason about and solves issues like #693.
   128  		if suite.phase == PhaseBuildTopLevel {
   129  			suite.topLevelContainers = append(suite.topLevelContainers, node)
   130  			return nil
   131  		}
   132  		if suite.phase == PhaseBuildTree {
   133  			parentTree := suite.tree
   134  			suite.tree = &TreeNode{Node: node}
   135  			parentTree.AppendChild(suite.tree)
   136  			err := func() (err error) {
   137  				defer func() {
   138  					if e := recover(); e != nil {
   139  						err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation)
   140  					}
   141  				}()
   142  				node.Body()
   143  				return err
   144  			}()
   145  			suite.tree = parentTree
   146  			return err
   147  		}
   148  	} else {
   149  		suite.tree.AppendChild(&TreeNode{Node: node})
   150  		return nil
   151  	}
   152  
   153  	return nil
   154  }
   155  
   156  func (suite *Suite) pushSuiteNode(node Node) error {
   157  	if suite.phase == PhaseBuildTree {
   158  		return types.GinkgoErrors.SuiteNodeInNestedContext(node.NodeType, node.CodeLocation)
   159  	}
   160  
   161  	if suite.phase == PhaseRun {
   162  		return types.GinkgoErrors.SuiteNodeDuringRunPhase(node.NodeType, node.CodeLocation)
   163  	}
   164  
   165  	switch node.NodeType {
   166  	case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite:
   167  		existingBefores := suite.suiteNodes.WithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
   168  		if len(existingBefores) > 0 {
   169  			return types.GinkgoErrors.MultipleBeforeSuiteNodes(node.NodeType, node.CodeLocation, existingBefores[0].NodeType, existingBefores[0].CodeLocation)
   170  		}
   171  	case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
   172  		existingAfters := suite.suiteNodes.WithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
   173  		if len(existingAfters) > 0 {
   174  			return types.GinkgoErrors.MultipleAfterSuiteNodes(node.NodeType, node.CodeLocation, existingAfters[0].NodeType, existingAfters[0].CodeLocation)
   175  		}
   176  	}
   177  
   178  	suite.suiteNodes = append(suite.suiteNodes, node)
   179  	return nil
   180  }
   181  
   182  func (suite *Suite) pushCleanupNode(node Node) error {
   183  	if suite.phase != PhaseRun || suite.currentNode.IsZero() {
   184  		return types.GinkgoErrors.PushingCleanupNodeDuringTreeConstruction(node.CodeLocation)
   185  	}
   186  
   187  	switch suite.currentNode.NodeType {
   188  	case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite, types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
   189  		node.NodeType = types.NodeTypeCleanupAfterSuite
   190  	case types.NodeTypeBeforeAll, types.NodeTypeAfterAll:
   191  		node.NodeType = types.NodeTypeCleanupAfterAll
   192  	case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportAfterSuite:
   193  		return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType)
   194  	case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite:
   195  		return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation)
   196  	default:
   197  		node.NodeType = types.NodeTypeCleanupAfterEach
   198  	}
   199  
   200  	node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID
   201  	node.NestingLevel = suite.currentNode.NestingLevel
   202  	suite.cleanupNodes = append(suite.cleanupNodes, node)
   203  
   204  	return nil
   205  }
   206  
   207  /*
   208    Spec Running methods - used during PhaseRun
   209  */
   210  func (suite *Suite) CurrentSpecReport() types.SpecReport {
   211  	report := suite.currentSpecReport
   212  	if suite.writer != nil {
   213  		report.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
   214  	}
   215  	return report
   216  }
   217  
   218  func (suite *Suite) AddReportEntry(entry ReportEntry) error {
   219  	if suite.phase != PhaseRun {
   220  		return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location)
   221  	}
   222  	suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry)
   223  	return nil
   224  }
   225  
   226  func (suite *Suite) isRunningInParallel() bool {
   227  	return suite.config.ParallelTotal > 1
   228  }
   229  
   230  func (suite *Suite) processCurrentSpecReport() {
   231  	suite.reporter.DidRun(suite.currentSpecReport)
   232  	if suite.isRunningInParallel() {
   233  		suite.client.PostDidRun(suite.currentSpecReport)
   234  	}
   235  	suite.report.SpecReports = append(suite.report.SpecReports, suite.currentSpecReport)
   236  
   237  	if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
   238  		suite.report.SuiteSucceeded = false
   239  		if suite.config.FailFast || suite.currentSpecReport.State.Is(types.SpecStateAborted) {
   240  			suite.skipAll = true
   241  			if suite.isRunningInParallel() {
   242  				suite.client.PostAbort()
   243  			}
   244  		}
   245  	}
   246  }
   247  
   248  func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool {
   249  	numSpecsThatWillBeRun := specs.CountWithoutSkip()
   250  
   251  	suite.report = types.Report{
   252  		SuitePath:                 suitePath,
   253  		SuiteDescription:          description,
   254  		SuiteLabels:               suiteLabels,
   255  		SuiteConfig:               suite.config,
   256  		SuiteHasProgrammaticFocus: hasProgrammaticFocus,
   257  		PreRunStats: types.PreRunStats{
   258  			TotalSpecs:       len(specs),
   259  			SpecsThatWillRun: numSpecsThatWillBeRun,
   260  		},
   261  		StartTime: time.Now(),
   262  	}
   263  
   264  	suite.reporter.SuiteWillBegin(suite.report)
   265  	if suite.isRunningInParallel() {
   266  		suite.client.PostSuiteWillBegin(suite.report)
   267  	}
   268  
   269  	suite.report.SuiteSucceeded = true
   270  	suite.runBeforeSuite(numSpecsThatWillBeRun)
   271  
   272  	if suite.report.SuiteSucceeded {
   273  		groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config)
   274  		nextIndex := MakeIncrementingIndexCounter()
   275  		if suite.isRunningInParallel() {
   276  			nextIndex = suite.client.FetchNextCounter
   277  		}
   278  
   279  		for {
   280  			groupedSpecIdx, err := nextIndex()
   281  			if err != nil {
   282  				suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, fmt.Sprintf("Failed to iterate over specs:\n%s", err.Error()))
   283  				suite.report.SuiteSucceeded = false
   284  				break
   285  			}
   286  
   287  			if groupedSpecIdx >= len(groupedSpecIndices) {
   288  				if suite.config.ParallelProcess == 1 && len(serialGroupedSpecIndices) > 0 {
   289  					groupedSpecIndices, serialGroupedSpecIndices, nextIndex = serialGroupedSpecIndices, GroupedSpecIndices{}, MakeIncrementingIndexCounter()
   290  					suite.client.BlockUntilNonprimaryProcsHaveFinished()
   291  					continue
   292  				}
   293  				break
   294  			}
   295  
   296  			suite.runGroup(specs.AtIndices(groupedSpecIndices[groupedSpecIdx]))
   297  		}
   298  
   299  		if specs.HasAnySpecsMarkedPending() && suite.config.FailOnPending {
   300  			suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set")
   301  			suite.report.SuiteSucceeded = false
   302  		}
   303  	}
   304  
   305  	suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
   306  
   307  	interruptStatus := suite.interruptHandler.Status()
   308  	if interruptStatus.Interrupted {
   309  		suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, interruptStatus.Cause.String())
   310  		suite.report.SuiteSucceeded = false
   311  	}
   312  	suite.report.EndTime = time.Now()
   313  	suite.report.RunTime = suite.report.EndTime.Sub(suite.report.StartTime)
   314  
   315  	if suite.config.ParallelProcess == 1 {
   316  		suite.runReportAfterSuite()
   317  	}
   318  	suite.reporter.SuiteDidEnd(suite.report)
   319  	if suite.isRunningInParallel() {
   320  		suite.client.PostSuiteDidEnd(suite.report)
   321  	}
   322  
   323  	return suite.report.SuiteSucceeded
   324  }
   325  
   326  func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) {
   327  	interruptStatus := suite.interruptHandler.Status()
   328  	beforeSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
   329  	if !beforeSuiteNode.IsZero() && !interruptStatus.Interrupted && numSpecsThatWillBeRun > 0 {
   330  		suite.currentSpecReport = types.SpecReport{
   331  			LeafNodeType:     beforeSuiteNode.NodeType,
   332  			LeafNodeLocation: beforeSuiteNode.CodeLocation,
   333  			ParallelProcess:  suite.config.ParallelProcess,
   334  		}
   335  		suite.reporter.WillRun(suite.currentSpecReport)
   336  		suite.runSuiteNode(beforeSuiteNode, interruptStatus.Channel)
   337  		if suite.currentSpecReport.State.Is(types.SpecStateSkipped) {
   338  			suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite skipped in BeforeSuite")
   339  			suite.skipAll = true
   340  		}
   341  		suite.processCurrentSpecReport()
   342  	}
   343  }
   344  
   345  func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
   346  	afterSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
   347  	if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
   348  		suite.currentSpecReport = types.SpecReport{
   349  			LeafNodeType:     afterSuiteNode.NodeType,
   350  			LeafNodeLocation: afterSuiteNode.CodeLocation,
   351  			ParallelProcess:  suite.config.ParallelProcess,
   352  		}
   353  		suite.reporter.WillRun(suite.currentSpecReport)
   354  		suite.runSuiteNode(afterSuiteNode, suite.interruptHandler.Status().Channel)
   355  		suite.processCurrentSpecReport()
   356  	}
   357  
   358  	afterSuiteCleanup := suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterSuite).Reverse()
   359  	if len(afterSuiteCleanup) > 0 {
   360  		for _, cleanupNode := range afterSuiteCleanup {
   361  			suite.currentSpecReport = types.SpecReport{
   362  				LeafNodeType:     cleanupNode.NodeType,
   363  				LeafNodeLocation: cleanupNode.CodeLocation,
   364  				ParallelProcess:  suite.config.ParallelProcess,
   365  			}
   366  			suite.reporter.WillRun(suite.currentSpecReport)
   367  			suite.runSuiteNode(cleanupNode, suite.interruptHandler.Status().Channel)
   368  			suite.processCurrentSpecReport()
   369  		}
   370  	}
   371  }
   372  
   373  func (suite *Suite) runReportAfterSuite() {
   374  	for _, node := range suite.suiteNodes.WithType(types.NodeTypeReportAfterSuite) {
   375  		suite.currentSpecReport = types.SpecReport{
   376  			LeafNodeType:     node.NodeType,
   377  			LeafNodeLocation: node.CodeLocation,
   378  			LeafNodeText:     node.Text,
   379  			ParallelProcess:  suite.config.ParallelProcess,
   380  		}
   381  		suite.reporter.WillRun(suite.currentSpecReport)
   382  		suite.runReportAfterSuiteNode(node, suite.report)
   383  		suite.processCurrentSpecReport()
   384  	}
   385  }
   386  
   387  func (suite *Suite) runGroup(specs Specs) {
   388  	nodeState := map[uint]types.SpecState{}
   389  	groupSucceeded := true
   390  
   391  	indexOfLastSpecContainingNodeID := func(id uint) int {
   392  		lastIdx := -1
   393  		for idx := range specs {
   394  			if specs[idx].Nodes.ContainsNodeID(id) && !specs[idx].Skip {
   395  				lastIdx = idx
   396  			}
   397  		}
   398  		return lastIdx
   399  	}
   400  
   401  	for i, spec := range specs {
   402  		suite.currentSpecReport = types.SpecReport{
   403  			ContainerHierarchyTexts:     spec.Nodes.WithType(types.NodeTypeContainer).Texts(),
   404  			ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(),
   405  			ContainerHierarchyLabels:    spec.Nodes.WithType(types.NodeTypeContainer).Labels(),
   406  			LeafNodeLocation:            spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation,
   407  			LeafNodeType:                types.NodeTypeIt,
   408  			LeafNodeText:                spec.FirstNodeWithType(types.NodeTypeIt).Text,
   409  			LeafNodeLabels:              []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
   410  			ParallelProcess:             suite.config.ParallelProcess,
   411  			IsSerial:                    spec.Nodes.HasNodeMarkedSerial(),
   412  			IsInOrderedContainer:        !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
   413  		}
   414  
   415  		skip := spec.Skip
   416  		if spec.Nodes.HasNodeMarkedPending() {
   417  			skip = true
   418  			suite.currentSpecReport.State = types.SpecStatePending
   419  		} else {
   420  			if suite.interruptHandler.Status().Interrupted || suite.skipAll {
   421  				skip = true
   422  			}
   423  			if !groupSucceeded {
   424  				skip = true
   425  				suite.currentSpecReport.Failure = suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
   426  					"Spec skipped because an earlier spec in an ordered container failed")
   427  			}
   428  			for _, node := range spec.Nodes.WithType(types.NodeTypeBeforeAll) {
   429  				if nodeState[node.ID] == types.SpecStateSkipped {
   430  					skip = true
   431  					suite.currentSpecReport.Failure = suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
   432  						"Spec skipped because Skip() was called in BeforeAll")
   433  					break
   434  				}
   435  			}
   436  			if skip {
   437  				suite.currentSpecReport.State = types.SpecStateSkipped
   438  			}
   439  		}
   440  
   441  		if suite.config.DryRun && !skip {
   442  			skip = true
   443  			suite.currentSpecReport.State = types.SpecStatePassed
   444  		}
   445  
   446  		suite.reporter.WillRun(suite.currentSpecReport)
   447  		//send the spec report to any attached ReportBeforeEach blocks - this will update suite.currentSpecReport if failures occur in these blocks
   448  		suite.reportEach(spec, types.NodeTypeReportBeforeEach)
   449  		if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
   450  			//the reportEach failed, skip this spec
   451  			skip = true
   452  		}
   453  
   454  		suite.currentSpecReport.StartTime = time.Now()
   455  		maxAttempts := max(1, spec.FlakeAttempts())
   456  		if suite.config.FlakeAttempts > 0 {
   457  			maxAttempts = suite.config.FlakeAttempts
   458  		}
   459  
   460  		for attempt := 0; !skip && (attempt < maxAttempts); attempt++ {
   461  			suite.currentSpecReport.NumAttempts = attempt + 1
   462  			suite.writer.Truncate()
   463  			suite.outputInterceptor.StartInterceptingOutput()
   464  			if attempt > 0 {
   465  				fmt.Fprintf(suite.writer, "\nGinkgo: Attempt #%d Failed.  Retrying...\n", attempt)
   466  			}
   467  			isFinalAttempt := (attempt == maxAttempts-1)
   468  
   469  			interruptStatus := suite.interruptHandler.Status()
   470  			deepestNestingLevelAttained := -1
   471  			var nodes = spec.Nodes.WithType(types.NodeTypeBeforeAll).Filter(func(n Node) bool {
   472  				return nodeState[n.ID] != types.SpecStatePassed
   473  			})
   474  			nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel()
   475  			nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...)
   476  			nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeIt)...)
   477  
   478  			var terminatingNode Node
   479  			for j := range nodes {
   480  				deepestNestingLevelAttained = max(deepestNestingLevelAttained, nodes[j].NestingLevel)
   481  				suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(nodes[j], interruptStatus.Channel, spec.Nodes.BestTextFor(nodes[j]))
   482  				suite.currentSpecReport.RunTime = time.Since(suite.currentSpecReport.StartTime)
   483  				nodeState[nodes[j].ID] = suite.currentSpecReport.State
   484  				if suite.currentSpecReport.State != types.SpecStatePassed {
   485  					terminatingNode = nodes[j]
   486  					break
   487  				}
   488  			}
   489  
   490  			afterAllNodesThatRan := map[uint]bool{}
   491  			// pull out some shared code so we aren't repeating ourselves down below. this just runs after and cleanup nodes
   492  			runAfterAndCleanupNodes := func(nodes Nodes) {
   493  				for j := range nodes {
   494  					state, failure := suite.runNode(nodes[j], suite.interruptHandler.Status().Channel, spec.Nodes.BestTextFor(nodes[j]))
   495  					suite.currentSpecReport.RunTime = time.Since(suite.currentSpecReport.StartTime)
   496  					nodeState[nodes[j].ID] = state
   497  					if suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted {
   498  						suite.currentSpecReport.State = state
   499  						suite.currentSpecReport.Failure = failure
   500  						if state != types.SpecStatePassed {
   501  							terminatingNode = nodes[j]
   502  						}
   503  					}
   504  					if nodes[j].NodeType.Is(types.NodeTypeAfterAll) {
   505  						afterAllNodesThatRan[nodes[j].ID] = true
   506  					}
   507  				}
   508  			}
   509  
   510  			// pull out a helper that captures the logic of whether or not we should run a given After node.
   511  			// there is complexity here stemming from the fact that we allow nested ordered contexts and flakey retries
   512  			shouldRunAfterNode := func(n Node) bool {
   513  				if n.NodeType.Is(types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) {
   514  					return true
   515  				}
   516  				var id uint
   517  				if n.NodeType.Is(types.NodeTypeAfterAll) {
   518  					id = n.ID
   519  					if afterAllNodesThatRan[id] { //we've already run on this attempt. don't run again.
   520  						return false
   521  					}
   522  				}
   523  				if n.NodeType.Is(types.NodeTypeCleanupAfterAll) {
   524  					id = n.NodeIDWhereCleanupWasGenerated
   525  				}
   526  				isLastSpecWithNode := indexOfLastSpecContainingNodeID(id) == i
   527  
   528  				switch suite.currentSpecReport.State {
   529  				case types.SpecStatePassed: //we've passed so far...
   530  					return isLastSpecWithNode //... and we're the last spec with this AfterNode, so we should run it
   531  				case types.SpecStateSkipped: //the spec was skipped by the user...
   532  					if isLastSpecWithNode {
   533  						return true //...we're the last spec, so we should run the AfterNode
   534  					}
   535  					if terminatingNode.NodeType.Is(types.NodeTypeBeforeAll) && terminatingNode.NestingLevel == n.NestingLevel {
   536  						return true //...or, a BeforeAll was skipped and it's at our nesting level, so our subgroup is going to skip
   537  					}
   538  				case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed...
   539  					if isFinalAttempt {
   540  						return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run
   541  					}
   542  					if terminatingNode.NodeType.Is(types.NodeTypeBeforeAll) {
   543  						//...we'll be rerunning a BeforeAll so we should cleanup after it if...
   544  						if n.NodeType.Is(types.NodeTypeAfterAll) && terminatingNode.NestingLevel == n.NestingLevel {
   545  							return true //we're at the same nesting level
   546  						}
   547  						if n.NodeType.Is(types.NodeTypeCleanupAfterAll) && terminatingNode.ID == n.NodeIDWhereCleanupWasGenerated {
   548  							return true //we're a DeferCleanup generated by it
   549  						}
   550  					}
   551  					if terminatingNode.NodeType.Is(types.NodeTypeAfterAll) {
   552  						//...we'll be rerunning an AfterAll so we should cleanup after it if...
   553  						if n.NodeType.Is(types.NodeTypeCleanupAfterAll) && terminatingNode.ID == n.NodeIDWhereCleanupWasGenerated {
   554  							return true //we're a DeferCleanup generated by it
   555  						}
   556  					}
   557  				case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted
   558  					return true //...that means the test run is over and we should clean up the stack.  Run the AfterNode
   559  				}
   560  				return false
   561  			}
   562  
   563  			// first pass - run all the JustAfterEach, Aftereach, and AfterAlls.  Our shoudlRunAfterNode filter function will clean up the AfterAlls for us.
   564  			afterNodes := spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel()
   565  			afterNodes = afterNodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeAfterEach).CopyAppend(spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel()...)
   566  			afterNodes = afterNodes.WithinNestingLevel(deepestNestingLevelAttained)
   567  			afterNodes = afterNodes.Filter(shouldRunAfterNode)
   568  			runAfterAndCleanupNodes(afterNodes)
   569  
   570  			// second-pass perhaps we didn't run the AfterAlls but a state change due to an AfterEach now requires us to run the AfterAlls:
   571  			afterNodes = spec.Nodes.WithType(types.NodeTypeAfterAll).WithinNestingLevel(deepestNestingLevelAttained).Filter(shouldRunAfterNode)
   572  			runAfterAndCleanupNodes(afterNodes)
   573  
   574  			// now we run any DeferCleanups
   575  			afterNodes = suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse()
   576  			afterNodes = append(afterNodes, suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Filter(shouldRunAfterNode).Reverse()...)
   577  			runAfterAndCleanupNodes(afterNodes)
   578  
   579  			// third-pass, perhaps a DeferCleanup failed and now we need to run the AfterAlls.
   580  			afterNodes = spec.Nodes.WithType(types.NodeTypeAfterAll).WithinNestingLevel(deepestNestingLevelAttained).Filter(shouldRunAfterNode)
   581  			runAfterAndCleanupNodes(afterNodes)
   582  
   583  			// and finally - running AfterAlls may have generated some new DeferCleanup nodes, let's run them to finish up
   584  			afterNodes = suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse().Filter(shouldRunAfterNode)
   585  			runAfterAndCleanupNodes(afterNodes)
   586  
   587  			suite.currentSpecReport.EndTime = time.Now()
   588  			suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
   589  			suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes())
   590  			suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
   591  
   592  			if suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) {
   593  				break
   594  			}
   595  		}
   596  
   597  		//send the spec report to any attached ReportAfterEach blocks - this will update suite.currentSpecReport if failures occur in these blocks
   598  		suite.reportEach(spec, types.NodeTypeReportAfterEach)
   599  		suite.processCurrentSpecReport()
   600  		if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
   601  			groupSucceeded = false
   602  		}
   603  		suite.currentSpecReport = types.SpecReport{}
   604  	}
   605  }
   606  
   607  func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
   608  	if suite.config.DryRun {
   609  		return
   610  	}
   611  
   612  	nodes := spec.Nodes.WithType(nodeType)
   613  	if nodeType == types.NodeTypeReportAfterEach {
   614  		nodes = nodes.SortedByDescendingNestingLevel()
   615  	}
   616  	if nodeType == types.NodeTypeReportBeforeEach {
   617  		nodes = nodes.SortedByAscendingNestingLevel()
   618  	}
   619  	if len(nodes) == 0 {
   620  		return
   621  	}
   622  
   623  	for i := range nodes {
   624  		suite.writer.Truncate()
   625  		suite.outputInterceptor.StartInterceptingOutput()
   626  		report := suite.currentSpecReport
   627  		nodes[i].Body = func() {
   628  			nodes[i].ReportEachBody(report)
   629  		}
   630  		suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS,
   631  			"{{yellow}}Ginkgo received an interrupt signal but is currently running a %s node.  To avoid an invalid report the %s node will not be interrupted however subsequent tests will be skipped.{{/}}\n\n{{bold}}The running %s node is at:\n%s.{{/}}",
   632  			nodeType, nodeType, nodeType,
   633  			nodes[i].CodeLocation,
   634  		))
   635  		state, failure := suite.runNode(nodes[i], nil, spec.Nodes.BestTextFor(nodes[i]))
   636  		suite.interruptHandler.ClearInterruptPlaceholderMessage()
   637  		// If the spec is not in a failure state (i.e. it's Passed/Skipped/Pending) and the reporter has failed, override the state.
   638  		// Also, if the reporter is every aborted - always override the state to propagate the abort
   639  		if (!suite.currentSpecReport.State.Is(types.SpecStateFailureStates) && state.Is(types.SpecStateFailureStates)) || state.Is(types.SpecStateAborted) {
   640  			suite.currentSpecReport.State = state
   641  			suite.currentSpecReport.Failure = failure
   642  		}
   643  		suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes())
   644  		suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
   645  	}
   646  }
   647  
   648  func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) {
   649  	if suite.config.DryRun {
   650  		suite.currentSpecReport.State = types.SpecStatePassed
   651  		return
   652  	}
   653  
   654  	suite.writer.Truncate()
   655  	suite.outputInterceptor.StartInterceptingOutput()
   656  	suite.currentSpecReport.StartTime = time.Now()
   657  
   658  	var err error
   659  	switch node.NodeType {
   660  	case types.NodeTypeBeforeSuite, types.NodeTypeAfterSuite:
   661  		suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
   662  	case types.NodeTypeCleanupAfterSuite:
   663  		if suite.config.ParallelTotal > 1 && suite.config.ParallelProcess == 1 {
   664  			err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
   665  		}
   666  		if err == nil {
   667  			suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
   668  		}
   669  	case types.NodeTypeSynchronizedBeforeSuite:
   670  		var data []byte
   671  		var runAllProcs bool
   672  		if suite.config.ParallelProcess == 1 {
   673  			if suite.config.ParallelTotal > 1 {
   674  				suite.outputInterceptor.StopInterceptingAndReturnOutput()
   675  				suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
   676  			}
   677  			node.Body = func() { data = node.SynchronizedBeforeSuiteProc1Body() }
   678  			suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
   679  			if suite.config.ParallelTotal > 1 {
   680  				suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
   681  				suite.outputInterceptor.StartInterceptingOutput()
   682  				if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
   683  					err = suite.client.PostSynchronizedBeforeSuiteCompleted(types.SpecStatePassed, data)
   684  				} else {
   685  					err = suite.client.PostSynchronizedBeforeSuiteCompleted(suite.currentSpecReport.State, nil)
   686  				}
   687  			}
   688  			runAllProcs = suite.currentSpecReport.State.Is(types.SpecStatePassed) && err == nil
   689  		} else {
   690  			var proc1State types.SpecState
   691  			proc1State, data, err = suite.client.BlockUntilSynchronizedBeforeSuiteData()
   692  			switch proc1State {
   693  			case types.SpecStatePassed:
   694  				runAllProcs = true
   695  			case types.SpecStateFailed, types.SpecStatePanicked:
   696  				err = types.GinkgoErrors.SynchronizedBeforeSuiteFailedOnProc1()
   697  			case types.SpecStateInterrupted, types.SpecStateAborted, types.SpecStateSkipped:
   698  				suite.currentSpecReport.State = proc1State
   699  			}
   700  		}
   701  		if runAllProcs {
   702  			node.Body = func() { node.SynchronizedBeforeSuiteAllProcsBody(data) }
   703  			suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
   704  		}
   705  	case types.NodeTypeSynchronizedAfterSuite:
   706  		node.Body = node.SynchronizedAfterSuiteAllProcsBody
   707  		suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "")
   708  		if suite.config.ParallelProcess == 1 {
   709  			if suite.config.ParallelTotal > 1 {
   710  				err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
   711  			}
   712  			if err == nil {
   713  				if suite.config.ParallelTotal > 1 {
   714  					suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
   715  					suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
   716  				}
   717  
   718  				node.Body = node.SynchronizedAfterSuiteProc1Body
   719  				state, failure := suite.runNode(node, interruptChannel, "")
   720  				if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
   721  					suite.currentSpecReport.State, suite.currentSpecReport.Failure = state, failure
   722  				}
   723  			}
   724  		}
   725  	}
   726  
   727  	if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
   728  		suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
   729  	}
   730  
   731  	suite.currentSpecReport.EndTime = time.Now()
   732  	suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
   733  	suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
   734  	suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
   735  
   736  	return
   737  }
   738  
   739  func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) {
   740  	if suite.config.DryRun {
   741  		suite.currentSpecReport.State = types.SpecStatePassed
   742  		return
   743  	}
   744  
   745  	suite.writer.Truncate()
   746  	suite.outputInterceptor.StartInterceptingOutput()
   747  	suite.currentSpecReport.StartTime = time.Now()
   748  
   749  	if suite.config.ParallelTotal > 1 {
   750  		aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport()
   751  		if err != nil {
   752  			suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
   753  			return
   754  		}
   755  		report = report.Add(aggregatedReport)
   756  	}
   757  
   758  	node.Body = func() { node.ReportAfterSuiteBody(report) }
   759  	suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS,
   760  		"{{yellow}}Ginkgo received an interrupt signal but is currently running a ReportAfterSuite node.  To avoid an invalid report the ReportAfterSuite node will not be interrupted.{{/}}\n\n{{bold}}The running ReportAfterSuite node is at:\n%s.{{/}}",
   761  		node.CodeLocation,
   762  	))
   763  	suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, nil, "")
   764  	suite.interruptHandler.ClearInterruptPlaceholderMessage()
   765  
   766  	suite.currentSpecReport.EndTime = time.Now()
   767  	suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
   768  	suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
   769  	suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput()
   770  
   771  	return
   772  }
   773  
   774  func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text string) (types.SpecState, types.Failure) {
   775  	if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
   776  		suite.cleanupNodes = suite.cleanupNodes.WithoutNode(node)
   777  	}
   778  
   779  	suite.currentNode = node
   780  	defer func() {
   781  		suite.currentNode = Node{}
   782  	}()
   783  
   784  	if suite.config.EmitSpecProgress {
   785  		if text == "" {
   786  			text = "TOP-LEVEL"
   787  		}
   788  		s := fmt.Sprintf("[%s] %s\n  %s\n", node.NodeType.String(), text, node.CodeLocation.String())
   789  		suite.writer.Write([]byte(s))
   790  	}
   791  
   792  	var failure types.Failure
   793  	failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation
   794  	if node.NodeType.Is(types.NodeTypeIt) || node.NodeType.Is(types.NodeTypesForSuiteLevelNodes) {
   795  		failure.FailureNodeContext = types.FailureNodeIsLeafNode
   796  	} else if node.NestingLevel <= 0 {
   797  		failure.FailureNodeContext = types.FailureNodeAtTopLevel
   798  	} else {
   799  		failure.FailureNodeContext, failure.FailureNodeContainerIndex = types.FailureNodeInContainer, node.NestingLevel-1
   800  	}
   801  
   802  	outcomeC := make(chan types.SpecState)
   803  	failureC := make(chan types.Failure)
   804  
   805  	go func() {
   806  		finished := false
   807  		defer func() {
   808  			if e := recover(); e != nil || !finished {
   809  				suite.failer.Panic(types.NewCodeLocationWithStackTrace(2), e)
   810  			}
   811  
   812  			outcome, failureFromRun := suite.failer.Drain()
   813  			outcomeC <- outcome
   814  			failureC <- failureFromRun
   815  		}()
   816  
   817  		node.Body()
   818  		finished = true
   819  	}()
   820  
   821  	select {
   822  	case outcome := <-outcomeC:
   823  		failureFromRun := <-failureC
   824  		if outcome == types.SpecStatePassed {
   825  			return outcome, types.Failure{}
   826  		}
   827  		failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic
   828  		return outcome, failure
   829  	case <-interruptChannel:
   830  		failure.Message, failure.Location = suite.interruptHandler.InterruptMessageWithStackTraces(), node.CodeLocation
   831  		return types.SpecStateInterrupted, failure
   832  	}
   833  }
   834  
   835  func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure {
   836  	return types.Failure{
   837  		Message:             message,
   838  		Location:            node.CodeLocation,
   839  		FailureNodeContext:  types.FailureNodeIsLeafNode,
   840  		FailureNodeType:     node.NodeType,
   841  		FailureNodeLocation: node.CodeLocation,
   842  	}
   843  }
   844  
   845  func max(a, b int) int {
   846  	if a > b {
   847  		return a
   848  	}
   849  	return b
   850  }