github.com/onsi/ginkgo@v1.16.6-0.20211118180735-4e1925ba4c95/internal/ordering.go (about) 1 package internal 2 3 import ( 4 "math/rand" 5 "sort" 6 7 "github.com/onsi/ginkgo/types" 8 ) 9 10 type GroupedSpecIndices []SpecIndices 11 type SpecIndices []int 12 13 func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, GroupedSpecIndices) { 14 /* 15 Ginkgo has sophisticated suport for randomizing specs. Specs are guaranteed to have the same 16 order for a given seed across test runs. 17 18 By default only top-level containers and specs are shuffled - this makes for a more intuitive debugging 19 experience - specs within a given container run in the order they appear in the file. 20 21 Developers can set -randomizeAllSpecs to shuffle _all_ specs. 22 23 In addition, spec containers can be marked as Ordered. Specs within an Ordered container are never shuffled. 24 25 Finally, specs and spec containers can be marked as Serial. When running in parallel, serial specs run on Process #1 _after_ all other processes have finished. 26 */ 27 28 // Seed a new random source based on thee configured random seed. 29 r := rand.New(rand.NewSource(suiteConfig.RandomSeed)) 30 31 // Decide how to group specs for shuffling. By default we shuffle top-level containers, 32 // but setting --randomize-all-specs causes us to shuffle all specs (excpect for Ordered specs) 33 nodeTypesToGroup := types.NodeTypesForContainerAndIt 34 if suiteConfig.RandomizeAllSpecs { 35 nodeTypesToGroup = types.NodeTypeIt 36 } 37 38 // Go through all specs and build the permutable groups. These are groupings that can be shuffled. 39 // Along the way we extract sort keys to ensure a consistent order of specs before we permute them. 40 permutableGroups := map[uint]SpecIndices{} 41 groupIsMarkedOrdered := map[uint]bool{} 42 groupSortKeys := map[uint]string{} 43 groupIDs := []uint{} 44 for idx, spec := range specs { 45 groupingNode := spec.Nodes.FirstNodeMarkedOrdered() 46 if groupingNode.IsZero() { 47 // If a spec is not in an ordered container... 48 // ...we group based on the first node with a nodetype satisfying `nodeTypesToGroup` 49 groupingNode = spec.Nodes.FirstNodeWithType(nodeTypesToGroup) 50 } else { 51 // If a spec is in an ordered container... 52 // ...we group based on the outermost ordered container 53 groupIsMarkedOrdered[groupingNode.ID] = true 54 } 55 // we've figured out which group we're in, so we add this specs index to the group. 56 permutableGroups[groupingNode.ID] = append(permutableGroups[groupingNode.ID], idx) 57 // and, while we're at it, extract the sort key for this group if we haven't already. 58 if groupSortKeys[groupingNode.ID] == "" { 59 groupSortKeys[groupingNode.ID] = groupingNode.CodeLocation.String() 60 groupIDs = append(groupIDs, groupingNode.ID) 61 } 62 } 63 64 // now sort the groups by the sort key. We use the grouping node's code location and break ties using group ID 65 sort.SliceStable(groupIDs, func(i, j int) bool { 66 keyA := groupSortKeys[groupIDs[i]] 67 keyB := groupSortKeys[groupIDs[j]] 68 if keyA == keyB { 69 return groupIDs[i] < groupIDs[j] 70 } else { 71 return keyA < keyB 72 } 73 }) 74 75 // now permute the sorted group IDs and build the ordered Groups 76 orderedGroups := GroupedSpecIndices{} 77 permutation := r.Perm(len(groupIDs)) 78 for _, j := range permutation { 79 if groupIsMarkedOrdered[groupIDs[j]] { 80 // If the group is marked ordered, we preserve the grouping to ensure ordered specs always run on the same Ginkgo process 81 orderedGroups = append(orderedGroups, permutableGroups[groupIDs[j]]) 82 } else { 83 // If the group is _not_ marked ordered, we expand the grouping (it has served its purpose for permutation), in order to allow parallelizing across the specs in the group. 84 for _, idx := range permutableGroups[groupIDs[j]] { 85 orderedGroups = append(orderedGroups, SpecIndices{idx}) 86 } 87 } 88 } 89 90 // If we're running in series, we're done. 91 if suiteConfig.ParallelTotal == 1 { 92 return orderedGroups, GroupedSpecIndices{} 93 } 94 95 // We're running in parallel so we need to partition the ordered groups into a parallelizable set and a serialized set. 96 // The parallelizable groups will run across all Ginkgo processes... 97 // ...the serial groups will only run on Process #1 after all other processes have exited. 98 parallelizableGroups, serialGroups := GroupedSpecIndices{}, GroupedSpecIndices{} 99 for _, specIndices := range orderedGroups { 100 if specs[specIndices[0]].Nodes.HasNodeMarkedSerial() { 101 serialGroups = append(serialGroups, specIndices) 102 } else { 103 parallelizableGroups = append(parallelizableGroups, specIndices) 104 } 105 } 106 107 return parallelizableGroups, serialGroups 108 }