k8s.io/apiserver@v0.31.1/pkg/util/flowcontrol/controller_test.go (about)

     1  /*
     2  Copyright 2019 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package flowcontrol
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"math/rand"
    23  	"os"
    24  	"reflect"
    25  	"sync"
    26  	"testing"
    27  	"time"
    28  
    29  	flowcontrol "k8s.io/api/flowcontrol/v1"
    30  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    31  	"k8s.io/apimachinery/pkg/util/sets"
    32  	"k8s.io/apimachinery/pkg/util/wait"
    33  	fcboot "k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap"
    34  	"k8s.io/apiserver/pkg/util/flowcontrol/debug"
    35  	fq "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing"
    36  	fcfmt "k8s.io/apiserver/pkg/util/flowcontrol/format"
    37  	"k8s.io/apiserver/pkg/util/flowcontrol/metrics"
    38  	fcrequest "k8s.io/apiserver/pkg/util/flowcontrol/request"
    39  	"k8s.io/client-go/informers"
    40  	clientsetfake "k8s.io/client-go/kubernetes/fake"
    41  	fcclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1"
    42  	"k8s.io/klog/v2"
    43  	"k8s.io/utils/clock"
    44  	"k8s.io/utils/ptr"
    45  )
    46  
    47  // Some tests print a lot of debug logs which slows down tests considerably,
    48  // causing them to even timeout.
    49  var testDebugLogs = false
    50  
    51  func TestMain(m *testing.M) {
    52  	klog.InitFlags(nil)
    53  	os.Exit(m.Run())
    54  }
    55  
    56  var mandPLs = func() map[string]*flowcontrol.PriorityLevelConfiguration {
    57  	ans := make(map[string]*flowcontrol.PriorityLevelConfiguration)
    58  	for _, mand := range fcboot.MandatoryPriorityLevelConfigurations {
    59  		ans[mand.Name] = mand
    60  	}
    61  	return ans
    62  }()
    63  
    64  // in general usage, the boolean returned may be inaccurate by the time the caller examines it.
    65  func (cfgCtlr *configController) hasPriorityLevelState(plName string) bool {
    66  	cfgCtlr.lock.Lock()
    67  	defer cfgCtlr.lock.Unlock()
    68  	return cfgCtlr.priorityLevelStates[plName] != nil
    69  }
    70  
    71  type ctlrTestState struct {
    72  	t               *testing.T
    73  	cfgCtlr         *configController
    74  	fcIfc           fcclient.FlowcontrolV1Interface
    75  	existingPLs     map[string]*flowcontrol.PriorityLevelConfiguration
    76  	existingFSs     map[string]*flowcontrol.FlowSchema
    77  	heldRequestsMap map[string][]heldRequest
    78  	requestWG       sync.WaitGroup
    79  	lock            sync.Mutex
    80  	queues          map[string]*ctlrTestQueueSet
    81  }
    82  
    83  type heldRequest struct {
    84  	rd       RequestDigest
    85  	finishCh chan struct{}
    86  }
    87  
    88  var _ fq.QueueSetFactory = (*ctlrTestState)(nil)
    89  
    90  type ctlrTestQueueSetCompleter struct {
    91  	cts *ctlrTestState
    92  	cqs *ctlrTestQueueSet
    93  	qc  fq.QueuingConfig
    94  }
    95  
    96  type ctlrTestQueueSet struct {
    97  	cts         *ctlrTestState
    98  	qc          fq.QueuingConfig
    99  	dc          fq.DispatchingConfig
   100  	countActive int
   101  }
   102  
   103  type ctlrTestRequest struct {
   104  	cqs            *ctlrTestQueueSet
   105  	qsName         string
   106  	descr1, descr2 interface{}
   107  }
   108  
   109  func (cts *ctlrTestState) BeginConstruction(qc fq.QueuingConfig, rip metrics.RatioedGaugePair, eso metrics.RatioedGauge, sdi metrics.Gauge) (fq.QueueSetCompleter, error) {
   110  	return ctlrTestQueueSetCompleter{cts, nil, qc}, nil
   111  }
   112  
   113  func (cqs *ctlrTestQueueSet) BeginConfigChange(qc fq.QueuingConfig) (fq.QueueSetCompleter, error) {
   114  	return ctlrTestQueueSetCompleter{cqs.cts, cqs, qc}, nil
   115  }
   116  
   117  func (cqs *ctlrTestQueueSet) Dump(bool) debug.QueueSetDump {
   118  	return debug.QueueSetDump{}
   119  }
   120  
   121  func (cqc ctlrTestQueueSetCompleter) Complete(dc fq.DispatchingConfig) fq.QueueSet {
   122  	cqc.cts.lock.Lock()
   123  	defer cqc.cts.lock.Unlock()
   124  	qs := cqc.cqs
   125  	if qs == nil {
   126  		qs = &ctlrTestQueueSet{cts: cqc.cts, qc: cqc.qc, dc: dc}
   127  		cqc.cts.queues[cqc.qc.Name] = qs
   128  	} else {
   129  		qs.qc, qs.dc = cqc.qc, dc
   130  	}
   131  	return qs
   132  }
   133  
   134  func (cqs *ctlrTestQueueSet) IsIdle() bool {
   135  	cqs.cts.lock.Lock()
   136  	defer cqs.cts.lock.Unlock()
   137  	klog.V(7).Infof("For %p QS %s, countActive==%d", cqs, cqs.qc.Name, cqs.countActive)
   138  	return cqs.countActive == 0
   139  }
   140  
   141  func (cqs *ctlrTestQueueSet) StartRequest(ctx context.Context, width *fcrequest.WorkEstimate, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) (req fq.Request, idle bool) {
   142  	cqs.cts.lock.Lock()
   143  	defer cqs.cts.lock.Unlock()
   144  	cqs.countActive++
   145  	if testDebugLogs {
   146  		cqs.cts.t.Logf("Queued %q %#+v %#+v for %p QS=%s, countActive:=%d", fsName, descr1, descr2, cqs, cqs.qc.Name, cqs.countActive)
   147  	}
   148  	return &ctlrTestRequest{cqs, cqs.qc.Name, descr1, descr2}, false
   149  }
   150  
   151  func (ctr *ctlrTestRequest) Finish(execute func()) bool {
   152  	execute()
   153  	ctr.cqs.cts.lock.Lock()
   154  	defer ctr.cqs.cts.lock.Unlock()
   155  	ctr.cqs.countActive--
   156  	if testDebugLogs {
   157  		ctr.cqs.cts.t.Logf("Finished %#+v %#+v for %p QS=%s, countActive:=%d", ctr.descr1, ctr.descr2, ctr.cqs, ctr.cqs.qc.Name, ctr.cqs.countActive)
   158  	}
   159  	return ctr.cqs.countActive == 0
   160  }
   161  
   162  func (cts *ctlrTestState) getQueueSetNames() sets.String {
   163  	cts.lock.Lock()
   164  	defer cts.lock.Unlock()
   165  	return sets.StringKeySet(cts.queues)
   166  }
   167  
   168  func (cts *ctlrTestState) getNonIdleQueueSetNames() sets.String {
   169  	cts.lock.Lock()
   170  	defer cts.lock.Unlock()
   171  	ans := sets.NewString()
   172  	for name, qs := range cts.queues {
   173  		if qs.countActive > 0 {
   174  			ans.Insert(name)
   175  		}
   176  	}
   177  	return ans
   178  }
   179  
   180  func (cts *ctlrTestState) hasNonIdleQueueSet(name string) bool {
   181  	cts.lock.Lock()
   182  	defer cts.lock.Unlock()
   183  	qs := cts.queues[name]
   184  	return qs != nil && qs.countActive > 0
   185  }
   186  
   187  func (cts *ctlrTestState) addHeldRequest(plName string, rd RequestDigest, finishCh chan struct{}) {
   188  	cts.lock.Lock()
   189  	defer cts.lock.Unlock()
   190  	hrs := cts.heldRequestsMap[plName]
   191  	hrs = append(hrs, heldRequest{rd, finishCh})
   192  	cts.heldRequestsMap[plName] = hrs
   193  	if testDebugLogs {
   194  		cts.t.Logf("Holding %#+v for %s, count:=%d", rd, plName, len(hrs))
   195  	}
   196  }
   197  
   198  func (cts *ctlrTestState) popHeldRequest() (plName string, hr *heldRequest, nCount int) {
   199  	cts.lock.Lock()
   200  	defer cts.lock.Unlock()
   201  	var hrs []heldRequest
   202  	for {
   203  		for plName, hrs = range cts.heldRequestsMap {
   204  			goto GotOne
   205  		}
   206  		return "", nil, 0
   207  	GotOne:
   208  		if nhr := len(hrs); nhr > 0 {
   209  			hrv := hrs[nhr-1]
   210  			hrs = hrs[:nhr-1]
   211  			hr = &hrv
   212  		}
   213  		if len(hrs) == 0 {
   214  			delete(cts.heldRequestsMap, plName)
   215  		} else {
   216  			cts.heldRequestsMap[plName] = hrs
   217  		}
   218  		if hr != nil {
   219  			nCount = len(hrs)
   220  			return
   221  		}
   222  	}
   223  }
   224  
   225  var mandQueueSetNames = func() sets.String {
   226  	mandQueueSetNames := sets.NewString()
   227  	for _, mpl := range fcboot.MandatoryPriorityLevelConfigurations {
   228  		mandQueueSetNames.Insert(mpl.Name)
   229  	}
   230  	return mandQueueSetNames
   231  }()
   232  
   233  func TestConfigConsumer(t *testing.T) {
   234  	rngOuter := rand.New(rand.NewSource(1234567890123456789))
   235  	for i := 1; i <= 10; i++ {
   236  		rng := rand.New(rand.NewSource(int64(rngOuter.Uint64())))
   237  		t.Run(fmt.Sprintf("trial%d:", i), func(t *testing.T) {
   238  			clientset := clientsetfake.NewSimpleClientset()
   239  			informerFactory := informers.NewSharedInformerFactory(clientset, 0)
   240  			flowcontrolClient := clientset.FlowcontrolV1()
   241  			cts := &ctlrTestState{t: t,
   242  				fcIfc:           flowcontrolClient,
   243  				existingFSs:     map[string]*flowcontrol.FlowSchema{},
   244  				existingPLs:     map[string]*flowcontrol.PriorityLevelConfiguration{},
   245  				heldRequestsMap: map[string][]heldRequest{},
   246  				queues:          map[string]*ctlrTestQueueSet{},
   247  			}
   248  			ctlr := newTestableController(TestableConfig{
   249  				Name:                   "Controller",
   250  				Clock:                  clock.RealClock{},
   251  				AsFieldManager:         ConfigConsumerAsFieldManager,
   252  				FoundToDangling:        func(found bool) bool { return !found },
   253  				InformerFactory:        informerFactory,
   254  				FlowcontrolClient:      flowcontrolClient,
   255  				ServerConcurrencyLimit: 100, // server concurrency limit
   256  				ReqsGaugeVec:           metrics.PriorityLevelConcurrencyGaugeVec,
   257  				ExecSeatsGaugeVec:      metrics.PriorityLevelExecutionSeatsGaugeVec,
   258  				QueueSetFactory:        cts,
   259  			})
   260  			cts.cfgCtlr = ctlr
   261  			persistingPLNames := sets.NewString()
   262  			trialStep := fmt.Sprintf("trial%d-0", i)
   263  			_, _, desiredPLNames, newBadPLNames := genPLs(rng, trialStep, persistingPLNames, 0)
   264  			_, _, newFTRs, newCatchAlls := genFSs(t, rng, trialStep, desiredPLNames, newBadPLNames, 0)
   265  			for j := 0; ; {
   266  				if testDebugLogs {
   267  					t.Logf("For %s, desiredPLNames=%#+v", trialStep, desiredPLNames)
   268  					t.Logf("For %s, newFTRs=%#+v", trialStep, newFTRs)
   269  				}
   270  				// Check that the latest digestion did the right thing
   271  				nextPLNames := sets.NewString()
   272  				for oldPLName := range persistingPLNames {
   273  					if mandPLs[oldPLName] != nil || cts.hasNonIdleQueueSet(oldPLName) {
   274  						nextPLNames.Insert(oldPLName)
   275  					}
   276  				}
   277  				persistingPLNames = nextPLNames.Union(desiredPLNames)
   278  				expectedQueueSetNames := persistingPLNames.Union(mandQueueSetNames)
   279  				allQueueSetNames := cts.getQueueSetNames()
   280  				missingQueueSetNames := expectedQueueSetNames.Difference(allQueueSetNames)
   281  				if len(missingQueueSetNames) > 0 {
   282  					t.Errorf("Fail: missing QueueSets %v", missingQueueSetNames)
   283  				}
   284  				nonIdleQueueSetNames := cts.getNonIdleQueueSetNames()
   285  				extraQueueSetNames := nonIdleQueueSetNames.Difference(expectedQueueSetNames)
   286  				if len(extraQueueSetNames) > 0 {
   287  					t.Errorf("Fail: unexpected QueueSets %v", extraQueueSetNames)
   288  				}
   289  				for plName, hr, nCount := cts.popHeldRequest(); hr != nil; plName, hr, nCount = cts.popHeldRequest() {
   290  					desired := desiredPLNames.Has(plName) || mandPLs[plName] != nil
   291  					if testDebugLogs {
   292  						t.Logf("Releasing held request %#+v, desired=%v, plName=%s, count:=%d", hr.rd, desired, plName, nCount)
   293  					}
   294  					close(hr.finishCh)
   295  				}
   296  				cts.requestWG.Wait()
   297  				for _, ftr := range newFTRs {
   298  					checkNewFS(cts, rng, trialStep, ftr, newCatchAlls)
   299  				}
   300  
   301  				j++
   302  				if j > 20 {
   303  					break
   304  				}
   305  
   306  				// Calculate expected survivors
   307  
   308  				// Now create a new config and digest it
   309  				trialStep = fmt.Sprintf("trial%d-%d", i, j)
   310  				var newPLs []*flowcontrol.PriorityLevelConfiguration
   311  				var newFSs []*flowcontrol.FlowSchema
   312  				newPLs, _, desiredPLNames, newBadPLNames = genPLs(rng, trialStep, persistingPLNames, 1+rng.Intn(4))
   313  				newFSs, _, newFTRs, newCatchAlls = genFSs(t, rng, trialStep, desiredPLNames, newBadPLNames, 1+rng.Intn(6))
   314  
   315  				if testDebugLogs {
   316  					for _, newPL := range newPLs {
   317  						t.Logf("For %s, digesting newPL=%s", trialStep, fcfmt.Fmt(newPL))
   318  					}
   319  					for _, newFS := range newFSs {
   320  						t.Logf("For %s, digesting newFS=%s", trialStep, fcfmt.Fmt(newFS))
   321  					}
   322  				}
   323  				_ = ctlr.lockAndDigestConfigObjects(newPLs, newFSs)
   324  			}
   325  			for plName, hr, nCount := cts.popHeldRequest(); hr != nil; plName, hr, nCount = cts.popHeldRequest() {
   326  				if testDebugLogs {
   327  					desired := desiredPLNames.Has(plName) || mandPLs[plName] != nil
   328  					t.Logf("Releasing held request %#+v, desired=%v, plName=%s, count:=%d", hr.rd, desired, plName, nCount)
   329  				}
   330  				close(hr.finishCh)
   331  			}
   332  			cts.requestWG.Wait()
   333  		})
   334  	}
   335  }
   336  
   337  func TestAPFControllerWithGracefulShutdown(t *testing.T) {
   338  	const plName = "test-ps"
   339  	fs := &flowcontrol.FlowSchema{
   340  		ObjectMeta: metav1.ObjectMeta{
   341  			Name: "test-fs",
   342  		},
   343  		Spec: flowcontrol.FlowSchemaSpec{
   344  			MatchingPrecedence: 100,
   345  			PriorityLevelConfiguration: flowcontrol.PriorityLevelConfigurationReference{
   346  				Name: plName,
   347  			},
   348  			DistinguisherMethod: &flowcontrol.FlowDistinguisherMethod{
   349  				Type: flowcontrol.FlowDistinguisherMethodByUserType,
   350  			},
   351  		},
   352  	}
   353  
   354  	pl := &flowcontrol.PriorityLevelConfiguration{
   355  		ObjectMeta: metav1.ObjectMeta{
   356  			Name: plName,
   357  		},
   358  		Spec: flowcontrol.PriorityLevelConfigurationSpec{
   359  			Type: flowcontrol.PriorityLevelEnablementLimited,
   360  			Limited: &flowcontrol.LimitedPriorityLevelConfiguration{
   361  				NominalConcurrencyShares: ptr.To(int32(10)),
   362  				LimitResponse: flowcontrol.LimitResponse{
   363  					Type: flowcontrol.LimitResponseTypeReject,
   364  				},
   365  			},
   366  		},
   367  	}
   368  
   369  	clientset := clientsetfake.NewSimpleClientset(fs, pl)
   370  	informerFactory := informers.NewSharedInformerFactory(clientset, time.Second)
   371  	flowcontrolClient := clientset.FlowcontrolV1()
   372  	cts := &ctlrTestState{t: t,
   373  		fcIfc:           flowcontrolClient,
   374  		existingFSs:     map[string]*flowcontrol.FlowSchema{},
   375  		existingPLs:     map[string]*flowcontrol.PriorityLevelConfiguration{},
   376  		heldRequestsMap: map[string][]heldRequest{},
   377  		queues:          map[string]*ctlrTestQueueSet{},
   378  	}
   379  	controller := newTestableController(TestableConfig{
   380  		Name:                   "Controller",
   381  		Clock:                  clock.RealClock{},
   382  		AsFieldManager:         ConfigConsumerAsFieldManager,
   383  		FoundToDangling:        func(found bool) bool { return !found },
   384  		InformerFactory:        informerFactory,
   385  		FlowcontrolClient:      flowcontrolClient,
   386  		ServerConcurrencyLimit: 100,
   387  		ReqsGaugeVec:           metrics.PriorityLevelConcurrencyGaugeVec,
   388  		ExecSeatsGaugeVec:      metrics.PriorityLevelExecutionSeatsGaugeVec,
   389  		QueueSetFactory:        cts,
   390  	})
   391  
   392  	stopCh, controllerCompletedCh := make(chan struct{}), make(chan struct{})
   393  	var controllerErr error
   394  
   395  	informerFactory.Start(stopCh)
   396  
   397  	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
   398  	defer cancel()
   399  	status := informerFactory.WaitForCacheSync(ctx.Done())
   400  	if names := unsynced(status); len(names) > 0 {
   401  		t.Fatalf("WaitForCacheSync did not successfully complete, resources=%#v", names)
   402  	}
   403  
   404  	go func() {
   405  		defer close(controllerCompletedCh)
   406  		controllerErr = controller.Run(stopCh)
   407  	}()
   408  
   409  	// ensure that the controller has run its first loop.
   410  	err := wait.PollImmediate(100*time.Millisecond, 5*time.Second, func() (done bool, err error) {
   411  		return controller.hasPriorityLevelState(plName), nil
   412  	})
   413  	if err != nil {
   414  		t.Errorf("expected the controller to reconcile the priority level configuration object: %s, error: %s", plName, err)
   415  	}
   416  
   417  	close(stopCh)
   418  	t.Log("waiting for the controller Run function to shutdown gracefully")
   419  	<-controllerCompletedCh
   420  
   421  	if controllerErr != nil {
   422  		t.Errorf("expected nil error from controller Run function, but got: %#v", controllerErr)
   423  	}
   424  }
   425  
   426  func unsynced(status map[reflect.Type]bool) []string {
   427  	names := make([]string, 0)
   428  
   429  	for objType, synced := range status {
   430  		if !synced {
   431  			names = append(names, objType.Name())
   432  		}
   433  	}
   434  
   435  	return names
   436  }
   437  
   438  func checkNewFS(cts *ctlrTestState, rng *rand.Rand, trialName string, ftr *fsTestingRecord, catchAlls map[bool]*flowcontrol.FlowSchema) {
   439  	t := cts.t
   440  	ctlr := cts.cfgCtlr
   441  	fs := ftr.fs
   442  	expectedPLName := fs.Spec.PriorityLevelConfiguration.Name
   443  	ctx := context.Background()
   444  	// Use this to make sure all these requests have started executing
   445  	// before the next reconfiguration
   446  	var startWG sync.WaitGroup
   447  	for matches, digests1 := range ftr.digests {
   448  		for isResource, digests2 := range digests1 {
   449  			for _, rd := range digests2 {
   450  				finishCh := make(chan struct{})
   451  				rdu := uniqify(rd)
   452  				cts.requestWG.Add(1)
   453  				startWG.Add(1)
   454  				go func(matches, isResource bool, rdu RequestDigest) {
   455  					expectedMatch := matches && ftr.wellFormed && (fsPrecedes(fs, catchAlls[isResource]) || fs.Name == catchAlls[isResource].Name)
   456  					ctlr.Handle(ctx, rdu, func(matchFS *flowcontrol.FlowSchema, matchPL *flowcontrol.PriorityLevelConfiguration, _ string) {
   457  						matchIsExempt := matchPL.Spec.Type == flowcontrol.PriorityLevelEnablementExempt
   458  						if testDebugLogs {
   459  							t.Logf("Considering FlowSchema %s, expectedMatch=%v, isResource=%v: Handle(%#+v) => note(fs=%s, pl=%s, isExempt=%v)", fs.Name, expectedMatch, isResource, rdu, matchFS.Name, matchPL.Name, matchIsExempt)
   460  						}
   461  						if a := matchFS.Name == fs.Name; expectedMatch != a {
   462  							t.Errorf("Fail at %s/%s: rd=%#+v, expectedMatch=%v, actualMatch=%v, matchFSName=%q, catchAlls=%#+v", trialName, fs.Name, rdu, expectedMatch, a, matchFS.Name, catchAlls)
   463  						}
   464  						if matchFS.Name == fs.Name {
   465  							if fs.Spec.PriorityLevelConfiguration.Name != matchPL.Name {
   466  								t.Errorf("Fail at %s/%s: expected=%v, actual=%v", trialName, fs.Name, fs.Spec.PriorityLevelConfiguration.Name, matchPL.Name)
   467  							}
   468  						}
   469  					}, func() fcrequest.WorkEstimate {
   470  						return fcrequest.WorkEstimate{InitialSeats: 1}
   471  					}, func(inQueue bool) {
   472  					}, func() {
   473  						startWG.Done()
   474  						_ = <-finishCh
   475  					})
   476  					cts.requestWG.Done()
   477  				}(matches, isResource, rdu)
   478  				if rng.Float32() < 0.8 {
   479  					if testDebugLogs {
   480  						t.Logf("Immediate request %#+v, plName=%s", rdu, expectedPLName)
   481  					}
   482  					close(finishCh)
   483  				} else {
   484  					cts.addHeldRequest(expectedPLName, rdu, finishCh)
   485  				}
   486  			}
   487  		}
   488  	}
   489  	startWG.Wait()
   490  }
   491  
   492  func genPLs(rng *rand.Rand, trial string, oldPLNames sets.String, n int) (pls []*flowcontrol.PriorityLevelConfiguration, plMap map[string]*flowcontrol.PriorityLevelConfiguration, goodNames, badNames sets.String) {
   493  	pls = make([]*flowcontrol.PriorityLevelConfiguration, 0, n)
   494  	plMap = make(map[string]*flowcontrol.PriorityLevelConfiguration, n)
   495  	goodNames = sets.NewString()
   496  	badNames = sets.NewString(trial+"-nopl1", trial+"-nopl2")
   497  	addGood := func(pl *flowcontrol.PriorityLevelConfiguration) {
   498  		pls = append(pls, pl)
   499  		plMap[pl.Name] = pl
   500  		goodNames.Insert(pl.Name)
   501  	}
   502  	for i := 1; i <= n; i++ {
   503  		pl := genPL(rng, fmt.Sprintf("%s-pl%d", trial, i))
   504  		addGood(pl)
   505  	}
   506  	for oldPLName := range oldPLNames {
   507  		if _, has := mandPLs[oldPLName]; has {
   508  			continue
   509  		}
   510  		if rng.Float32() < 0.67 {
   511  			pl := genPL(rng, oldPLName)
   512  			addGood(pl)
   513  		}
   514  	}
   515  	for _, pl := range mandPLs {
   516  		if n > 0 && rng.Float32() < 0.5 && !(goodNames.Has(pl.Name) || badNames.Has(pl.Name)) {
   517  			addGood(pl)
   518  		}
   519  	}
   520  	return
   521  }
   522  
   523  func genFSs(t *testing.T, rng *rand.Rand, trial string, goodPLNames, badPLNames sets.String, n int) (newFSs []*flowcontrol.FlowSchema, newFSMap map[string]*flowcontrol.FlowSchema, newFTRs map[string]*fsTestingRecord, catchAlls map[bool]*flowcontrol.FlowSchema) {
   524  	newFTRs = map[string]*fsTestingRecord{}
   525  	catchAlls = map[bool]*flowcontrol.FlowSchema{
   526  		false: fcboot.MandatoryFlowSchemaCatchAll,
   527  		true:  fcboot.MandatoryFlowSchemaCatchAll}
   528  	newFSMap = map[string]*flowcontrol.FlowSchema{}
   529  	add := func(ftr *fsTestingRecord) {
   530  		newFSs = append(newFSs, ftr.fs)
   531  		newFSMap[ftr.fs.Name] = ftr.fs
   532  		newFTRs[ftr.fs.Name] = ftr
   533  		if ftr.wellFormed {
   534  			if ftr.matchesAllNonResourceRequests && fsPrecedes(ftr.fs, catchAlls[false]) {
   535  				catchAlls[false] = ftr.fs
   536  			}
   537  			if ftr.matchesAllResourceRequests && fsPrecedes(ftr.fs, catchAlls[true]) {
   538  				catchAlls[true] = ftr.fs
   539  			}
   540  		}
   541  		if testDebugLogs {
   542  			t.Logf("For trial %s, adding wf=%v FlowSchema %s", trial, ftr.wellFormed, fcfmt.Fmt(ftr.fs))
   543  		}
   544  	}
   545  	if n == 0 || rng.Float32() < 0.5 {
   546  		add(mandFTRCatchAll)
   547  	}
   548  	for i := 1; i <= n; i++ {
   549  		ftr := genFS(t, rng, fmt.Sprintf("%s-fs%d", trial, i), false, goodPLNames, badPLNames)
   550  		add(ftr)
   551  	}
   552  	if n == 0 || rng.Float32() < 0.5 {
   553  		add(mandFTRExempt)
   554  	}
   555  	return
   556  }
   557  
   558  func fsPrecedes(a, b *flowcontrol.FlowSchema) bool {
   559  	if a.Spec.MatchingPrecedence < b.Spec.MatchingPrecedence {
   560  		return true
   561  	}
   562  	if a.Spec.MatchingPrecedence == b.Spec.MatchingPrecedence {
   563  		return a.Name < b.Name
   564  	}
   565  	return false
   566  }