k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/pkg/controller/garbagecollector/garbagecollector_test.go (about)

     1  /*
     2  Copyright 2016 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package garbagecollector
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"net/http"
    23  	"net/http/httptest"
    24  	"reflect"
    25  	"strings"
    26  	"sync"
    27  	"testing"
    28  	"time"
    29  
    30  	"golang.org/x/time/rate"
    31  
    32  	"k8s.io/klog/v2"
    33  
    34  	"github.com/golang/groupcache/lru"
    35  	"github.com/google/go-cmp/cmp"
    36  	"github.com/stretchr/testify/assert"
    37  
    38  	_ "k8s.io/kubernetes/pkg/apis/core/install"
    39  	"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
    40  	"k8s.io/utils/pointer"
    41  
    42  	v1 "k8s.io/api/core/v1"
    43  	"k8s.io/apimachinery/pkg/api/meta"
    44  	"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
    45  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    46  	"k8s.io/apimachinery/pkg/runtime"
    47  	"k8s.io/apimachinery/pkg/runtime/schema"
    48  	"k8s.io/apimachinery/pkg/types"
    49  	"k8s.io/apimachinery/pkg/util/json"
    50  	"k8s.io/apimachinery/pkg/util/sets"
    51  	"k8s.io/apimachinery/pkg/util/strategicpatch"
    52  	"k8s.io/client-go/discovery"
    53  	"k8s.io/client-go/informers"
    54  	"k8s.io/client-go/kubernetes"
    55  	"k8s.io/client-go/kubernetes/fake"
    56  	"k8s.io/client-go/metadata"
    57  	fakemetadata "k8s.io/client-go/metadata/fake"
    58  	"k8s.io/client-go/metadata/metadatainformer"
    59  	restclient "k8s.io/client-go/rest"
    60  	clientgotesting "k8s.io/client-go/testing"
    61  	"k8s.io/client-go/tools/record"
    62  	"k8s.io/client-go/util/workqueue"
    63  	"k8s.io/controller-manager/pkg/informerfactory"
    64  	"k8s.io/kubernetes/pkg/api/legacyscheme"
    65  	c "k8s.io/kubernetes/pkg/controller"
    66  	"k8s.io/kubernetes/test/utils/ktesting"
    67  )
    68  
    69  type testRESTMapper struct {
    70  	meta.RESTMapper
    71  }
    72  
    73  func (m *testRESTMapper) Reset() {
    74  	meta.MaybeResetRESTMapper(m.RESTMapper)
    75  }
    76  
    77  func TestGarbageCollectorConstruction(t *testing.T) {
    78  	config := &restclient.Config{}
    79  	tweakableRM := meta.NewDefaultRESTMapper(nil)
    80  	rm := &testRESTMapper{meta.MultiRESTMapper{tweakableRM, testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}}
    81  	metadataClient, err := metadata.NewForConfig(config)
    82  	if err != nil {
    83  		t.Fatal(err)
    84  	}
    85  
    86  	podResource := map[schema.GroupVersionResource]struct{}{
    87  		{Version: "v1", Resource: "pods"}: {},
    88  	}
    89  	twoResources := map[schema.GroupVersionResource]struct{}{
    90  		{Version: "v1", Resource: "pods"}:                     {},
    91  		{Group: "tpr.io", Version: "v1", Resource: "unknown"}: {},
    92  	}
    93  	client := fake.NewSimpleClientset()
    94  
    95  	sharedInformers := informers.NewSharedInformerFactory(client, 0)
    96  	metadataInformers := metadatainformer.NewSharedInformerFactory(metadataClient, 0)
    97  	// No monitor will be constructed for the non-core resource, but the GC
    98  	// construction will not fail.
    99  	alwaysStarted := make(chan struct{})
   100  	close(alwaysStarted)
   101  	logger, tCtx := ktesting.NewTestContext(t)
   102  	gc, err := NewGarbageCollector(tCtx, client, metadataClient, rm, map[schema.GroupResource]struct{}{},
   103  		informerfactory.NewInformerFactory(sharedInformers, metadataInformers), alwaysStarted)
   104  	if err != nil {
   105  		t.Fatal(err)
   106  	}
   107  	assert.Equal(t, 0, len(gc.dependencyGraphBuilder.monitors))
   108  
   109  	// Make sure resource monitor syncing creates and stops resource monitors.
   110  	tweakableRM.Add(schema.GroupVersionKind{Group: "tpr.io", Version: "v1", Kind: "unknown"}, nil)
   111  	err = gc.resyncMonitors(logger, twoResources)
   112  	if err != nil {
   113  		t.Errorf("Failed adding a monitor: %v", err)
   114  	}
   115  	assert.Equal(t, 2, len(gc.dependencyGraphBuilder.monitors))
   116  
   117  	err = gc.resyncMonitors(logger, podResource)
   118  	if err != nil {
   119  		t.Errorf("Failed removing a monitor: %v", err)
   120  	}
   121  	assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors))
   122  
   123  	go gc.Run(tCtx, 1)
   124  
   125  	err = gc.resyncMonitors(logger, twoResources)
   126  	if err != nil {
   127  		t.Errorf("Failed adding a monitor: %v", err)
   128  	}
   129  	assert.Equal(t, 2, len(gc.dependencyGraphBuilder.monitors))
   130  
   131  	err = gc.resyncMonitors(logger, podResource)
   132  	if err != nil {
   133  		t.Errorf("Failed removing a monitor: %v", err)
   134  	}
   135  	assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors))
   136  }
   137  
   138  // fakeAction records information about requests to aid in testing.
   139  type fakeAction struct {
   140  	method string
   141  	path   string
   142  	query  string
   143  }
   144  
   145  // String returns method=path to aid in testing
   146  func (f *fakeAction) String() string {
   147  	return strings.Join([]string{f.method, f.path}, "=")
   148  }
   149  
   150  type FakeResponse struct {
   151  	statusCode int
   152  	content    []byte
   153  }
   154  
   155  // fakeActionHandler holds a list of fakeActions received
   156  type fakeActionHandler struct {
   157  	// statusCode and content returned by this handler for different method + path.
   158  	response map[string]FakeResponse
   159  
   160  	lock    sync.Mutex
   161  	actions []fakeAction
   162  }
   163  
   164  // ServeHTTP logs the action that occurred and always returns the associated status code
   165  func (f *fakeActionHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) {
   166  	func() {
   167  		f.lock.Lock()
   168  		defer f.lock.Unlock()
   169  
   170  		f.actions = append(f.actions, fakeAction{method: request.Method, path: request.URL.Path, query: request.URL.RawQuery})
   171  		fakeResponse, ok := f.response[request.Method+request.URL.Path]
   172  		if !ok {
   173  			fakeResponse.statusCode = 200
   174  			fakeResponse.content = []byte(`{"apiVersion": "v1", "kind": "List"}`)
   175  		}
   176  		response.Header().Set("Content-Type", "application/json")
   177  		response.WriteHeader(fakeResponse.statusCode)
   178  		response.Write(fakeResponse.content)
   179  	}()
   180  
   181  	// This is to allow the fakeActionHandler to simulate a watch being opened
   182  	if strings.Contains(request.URL.RawQuery, "watch=true") {
   183  		hijacker, ok := response.(http.Hijacker)
   184  		if !ok {
   185  			return
   186  		}
   187  		connection, _, err := hijacker.Hijack()
   188  		if err != nil {
   189  			return
   190  		}
   191  		defer connection.Close()
   192  		time.Sleep(30 * time.Second)
   193  	}
   194  }
   195  
   196  // testServerAndClientConfig returns a server that listens and a config that can reference it
   197  func testServerAndClientConfig(handler func(http.ResponseWriter, *http.Request)) (*httptest.Server, *restclient.Config) {
   198  	srv := httptest.NewServer(http.HandlerFunc(handler))
   199  	config := &restclient.Config{
   200  		Host: srv.URL,
   201  	}
   202  	return srv, config
   203  }
   204  
   205  type garbageCollector struct {
   206  	*GarbageCollector
   207  	stop chan struct{}
   208  }
   209  
   210  func setupGC(t *testing.T, config *restclient.Config) garbageCollector {
   211  	_, ctx := ktesting.NewTestContext(t)
   212  	metadataClient, err := metadata.NewForConfig(config)
   213  	if err != nil {
   214  		t.Fatal(err)
   215  	}
   216  
   217  	client := fake.NewSimpleClientset()
   218  	sharedInformers := informers.NewSharedInformerFactory(client, 0)
   219  	alwaysStarted := make(chan struct{})
   220  	close(alwaysStarted)
   221  	gc, err := NewGarbageCollector(ctx, client, metadataClient, &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}, ignoredResources, sharedInformers, alwaysStarted)
   222  	if err != nil {
   223  		t.Fatal(err)
   224  	}
   225  	stop := make(chan struct{})
   226  	go sharedInformers.Start(stop)
   227  	return garbageCollector{gc, stop}
   228  }
   229  
   230  func getPod(podName string, ownerReferences []metav1.OwnerReference) *v1.Pod {
   231  	return &v1.Pod{
   232  		TypeMeta: metav1.TypeMeta{
   233  			Kind:       "Pod",
   234  			APIVersion: "v1",
   235  		},
   236  		ObjectMeta: metav1.ObjectMeta{
   237  			Name:            podName,
   238  			Namespace:       "ns1",
   239  			UID:             "456",
   240  			OwnerReferences: ownerReferences,
   241  		},
   242  	}
   243  }
   244  
   245  func serilizeOrDie(t *testing.T, object interface{}) []byte {
   246  	data, err := json.Marshal(object)
   247  	if err != nil {
   248  		t.Fatal(err)
   249  	}
   250  	return data
   251  }
   252  
   253  // test the attemptToDeleteItem function making the expected actions.
   254  func TestAttemptToDeleteItem(t *testing.T) {
   255  	pod := getPod("ToBeDeletedPod", []metav1.OwnerReference{
   256  		{
   257  			Kind:       "ReplicationController",
   258  			Name:       "owner1",
   259  			UID:        "123",
   260  			APIVersion: "v1",
   261  		},
   262  	})
   263  	testHandler := &fakeActionHandler{
   264  		response: map[string]FakeResponse{
   265  			"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/owner1": {
   266  				404,
   267  				[]byte{},
   268  			},
   269  			"GET" + "/api/v1/namespaces/ns1/pods/ToBeDeletedPod": {
   270  				200,
   271  				serilizeOrDie(t, pod),
   272  			},
   273  		},
   274  	}
   275  	srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
   276  	defer srv.Close()
   277  
   278  	gc := setupGC(t, clientConfig)
   279  	defer close(gc.stop)
   280  
   281  	item := &node{
   282  		identity: objectReference{
   283  			OwnerReference: metav1.OwnerReference{
   284  				Kind:       pod.Kind,
   285  				APIVersion: pod.APIVersion,
   286  				Name:       pod.Name,
   287  				UID:        pod.UID,
   288  			},
   289  			Namespace: pod.Namespace,
   290  		},
   291  		// owners are intentionally left empty. The attemptToDeleteItem routine should get the latest item from the server.
   292  		owners:  nil,
   293  		virtual: true,
   294  	}
   295  	err := gc.attemptToDeleteItem(context.TODO(), item)
   296  	if err != nil {
   297  		t.Errorf("Unexpected Error: %v", err)
   298  	}
   299  	if !item.virtual {
   300  		t.Errorf("attemptToDeleteItem changed virtual to false unexpectedly")
   301  	}
   302  	expectedActionSet := sets.NewString()
   303  	expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/replicationcontrollers/owner1")
   304  	expectedActionSet.Insert("DELETE=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")
   305  	expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")
   306  
   307  	actualActionSet := sets.NewString()
   308  	for _, action := range testHandler.actions {
   309  		actualActionSet.Insert(action.String())
   310  	}
   311  	if !expectedActionSet.Equal(actualActionSet) {
   312  		t.Errorf("expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet,
   313  			actualActionSet, expectedActionSet.Difference(actualActionSet))
   314  	}
   315  }
   316  
   317  // verifyGraphInvariants verifies that all of a node's owners list the node as a
   318  // dependent and vice versa. uidToNode has all the nodes in the graph.
   319  func verifyGraphInvariants(scenario string, uidToNode map[types.UID]*node, t *testing.T) {
   320  	for myUID, node := range uidToNode {
   321  		for dependentNode := range node.dependents {
   322  			found := false
   323  			for _, owner := range dependentNode.owners {
   324  				if owner.UID == myUID {
   325  					found = true
   326  					break
   327  				}
   328  			}
   329  			if !found {
   330  				t.Errorf("scenario: %s: node %s has node %s as a dependent, but it's not present in the latter node's owners list", scenario, node.identity, dependentNode.identity)
   331  			}
   332  		}
   333  
   334  		for _, owner := range node.owners {
   335  			ownerNode, ok := uidToNode[owner.UID]
   336  			if !ok {
   337  				// It's possible that the owner node doesn't exist
   338  				continue
   339  			}
   340  			if _, ok := ownerNode.dependents[node]; !ok {
   341  				t.Errorf("node %s has node %s as an owner, but it's not present in the latter node's dependents list", node.identity, ownerNode.identity)
   342  			}
   343  		}
   344  	}
   345  }
   346  
   347  func createEvent(eventType eventType, selfUID string, owners []string) event {
   348  	var ownerReferences []metav1.OwnerReference
   349  	for i := 0; i < len(owners); i++ {
   350  		ownerReferences = append(ownerReferences, metav1.OwnerReference{UID: types.UID(owners[i])})
   351  	}
   352  	return event{
   353  		eventType: eventType,
   354  		obj: &v1.Pod{
   355  			ObjectMeta: metav1.ObjectMeta{
   356  				UID:             types.UID(selfUID),
   357  				OwnerReferences: ownerReferences,
   358  			},
   359  		},
   360  	}
   361  }
   362  
   363  func TestProcessEvent(t *testing.T) {
   364  	var testScenarios = []struct {
   365  		name string
   366  		// a series of events that will be supplied to the
   367  		// GraphBuilder.graphChanges.
   368  		events []event
   369  	}{
   370  		{
   371  			name: "test1",
   372  			events: []event{
   373  				createEvent(addEvent, "1", []string{}),
   374  				createEvent(addEvent, "2", []string{"1"}),
   375  				createEvent(addEvent, "3", []string{"1", "2"}),
   376  			},
   377  		},
   378  		{
   379  			name: "test2",
   380  			events: []event{
   381  				createEvent(addEvent, "1", []string{}),
   382  				createEvent(addEvent, "2", []string{"1"}),
   383  				createEvent(addEvent, "3", []string{"1", "2"}),
   384  				createEvent(addEvent, "4", []string{"2"}),
   385  				createEvent(deleteEvent, "2", []string{"doesn't matter"}),
   386  			},
   387  		},
   388  		{
   389  			name: "test3",
   390  			events: []event{
   391  				createEvent(addEvent, "1", []string{}),
   392  				createEvent(addEvent, "2", []string{"1"}),
   393  				createEvent(addEvent, "3", []string{"1", "2"}),
   394  				createEvent(addEvent, "4", []string{"3"}),
   395  				createEvent(updateEvent, "2", []string{"4"}),
   396  			},
   397  		},
   398  		{
   399  			name: "reverse test2",
   400  			events: []event{
   401  				createEvent(addEvent, "4", []string{"2"}),
   402  				createEvent(addEvent, "3", []string{"1", "2"}),
   403  				createEvent(addEvent, "2", []string{"1"}),
   404  				createEvent(addEvent, "1", []string{}),
   405  				createEvent(deleteEvent, "2", []string{"doesn't matter"}),
   406  			},
   407  		},
   408  	}
   409  
   410  	alwaysStarted := make(chan struct{})
   411  	close(alwaysStarted)
   412  	for _, scenario := range testScenarios {
   413  		logger, _ := ktesting.NewTestContext(t)
   414  
   415  		dependencyGraphBuilder := &GraphBuilder{
   416  			informersStarted: alwaysStarted,
   417  			graphChanges:     workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[*event]()),
   418  			uidToNode: &concurrentUIDToNode{
   419  				uidToNodeLock: sync.RWMutex{},
   420  				uidToNode:     make(map[types.UID]*node),
   421  			},
   422  			attemptToDelete:  workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[*node]()),
   423  			absentOwnerCache: NewReferenceCache(2),
   424  		}
   425  		for i := 0; i < len(scenario.events); i++ {
   426  			dependencyGraphBuilder.graphChanges.Add(&scenario.events[i])
   427  			dependencyGraphBuilder.processGraphChanges(logger)
   428  			verifyGraphInvariants(scenario.name, dependencyGraphBuilder.uidToNode.uidToNode, t)
   429  		}
   430  	}
   431  }
   432  
   433  func BenchmarkReferencesDiffs(t *testing.B) {
   434  	t.ReportAllocs()
   435  	t.ResetTimer()
   436  	for n := 0; n < t.N; n++ {
   437  		old := []metav1.OwnerReference{{UID: "1"}, {UID: "2"}}
   438  		new := []metav1.OwnerReference{{UID: "2"}, {UID: "3"}}
   439  		referencesDiffs(old, new)
   440  	}
   441  }
   442  
   443  // TestDependentsRace relies on golang's data race detector to check if there is
   444  // data race among in the dependents field.
   445  func TestDependentsRace(t *testing.T) {
   446  	logger, _ := ktesting.NewTestContext(t)
   447  
   448  	gc := setupGC(t, &restclient.Config{})
   449  	defer close(gc.stop)
   450  
   451  	const updates = 100
   452  	owner := &node{dependents: make(map[*node]struct{})}
   453  	ownerUID := types.UID("owner")
   454  	gc.dependencyGraphBuilder.uidToNode.Write(owner)
   455  	var wg sync.WaitGroup
   456  	wg.Add(2)
   457  	go func() {
   458  		defer wg.Done()
   459  		for i := 0; i < updates; i++ {
   460  			dependent := &node{}
   461  			gc.dependencyGraphBuilder.addDependentToOwners(logger, dependent, []metav1.OwnerReference{{UID: ownerUID}})
   462  			gc.dependencyGraphBuilder.removeDependentFromOwners(dependent, []metav1.OwnerReference{{UID: ownerUID}})
   463  		}
   464  	}()
   465  	go func() {
   466  		defer wg.Done()
   467  		for i := 0; i < updates; i++ {
   468  			gc.attemptToOrphan.Add(owner)
   469  			gc.processAttemptToOrphanWorker(logger)
   470  		}
   471  	}()
   472  	wg.Wait()
   473  }
   474  
   475  func podToGCNode(pod *v1.Pod) *node {
   476  	return &node{
   477  		identity: objectReference{
   478  			OwnerReference: metav1.OwnerReference{
   479  				Kind:       pod.Kind,
   480  				APIVersion: pod.APIVersion,
   481  				Name:       pod.Name,
   482  				UID:        pod.UID,
   483  			},
   484  			Namespace: pod.Namespace,
   485  		},
   486  		// owners are intentionally left empty. The attemptToDeleteItem routine should get the latest item from the server.
   487  		owners: nil,
   488  	}
   489  }
   490  
   491  func TestAbsentOwnerCache(t *testing.T) {
   492  	rc1Pod1 := getPod("rc1Pod1", []metav1.OwnerReference{
   493  		{
   494  			Kind:       "ReplicationController",
   495  			Name:       "rc1",
   496  			UID:        "1",
   497  			APIVersion: "v1",
   498  			Controller: pointer.Bool(true),
   499  		},
   500  	})
   501  	rc1Pod2 := getPod("rc1Pod2", []metav1.OwnerReference{
   502  		{
   503  			Kind:       "ReplicationController",
   504  			Name:       "rc1",
   505  			UID:        "1",
   506  			APIVersion: "v1",
   507  			Controller: pointer.Bool(false),
   508  		},
   509  	})
   510  	rc2Pod1 := getPod("rc2Pod1", []metav1.OwnerReference{
   511  		{
   512  			Kind:       "ReplicationController",
   513  			Name:       "rc2",
   514  			UID:        "2",
   515  			APIVersion: "v1",
   516  		},
   517  	})
   518  	rc3Pod1 := getPod("rc3Pod1", []metav1.OwnerReference{
   519  		{
   520  			Kind:       "ReplicationController",
   521  			Name:       "rc3",
   522  			UID:        "3",
   523  			APIVersion: "v1",
   524  		},
   525  	})
   526  	testHandler := &fakeActionHandler{
   527  		response: map[string]FakeResponse{
   528  			"GET" + "/api/v1/namespaces/ns1/pods/rc1Pod1": {
   529  				200,
   530  				serilizeOrDie(t, rc1Pod1),
   531  			},
   532  			"GET" + "/api/v1/namespaces/ns1/pods/rc1Pod2": {
   533  				200,
   534  				serilizeOrDie(t, rc1Pod2),
   535  			},
   536  			"GET" + "/api/v1/namespaces/ns1/pods/rc2Pod1": {
   537  				200,
   538  				serilizeOrDie(t, rc2Pod1),
   539  			},
   540  			"GET" + "/api/v1/namespaces/ns1/pods/rc3Pod1": {
   541  				200,
   542  				serilizeOrDie(t, rc3Pod1),
   543  			},
   544  			"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/rc1": {
   545  				404,
   546  				[]byte{},
   547  			},
   548  			"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/rc2": {
   549  				404,
   550  				[]byte{},
   551  			},
   552  			"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/rc3": {
   553  				404,
   554  				[]byte{},
   555  			},
   556  		},
   557  	}
   558  	srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
   559  	defer srv.Close()
   560  	gc := setupGC(t, clientConfig)
   561  	defer close(gc.stop)
   562  	gc.absentOwnerCache = NewReferenceCache(2)
   563  	gc.attemptToDeleteItem(context.TODO(), podToGCNode(rc1Pod1))
   564  	gc.attemptToDeleteItem(context.TODO(), podToGCNode(rc2Pod1))
   565  	// rc1 should already be in the cache, no request should be sent. rc1 should be promoted in the UIDCache
   566  	gc.attemptToDeleteItem(context.TODO(), podToGCNode(rc1Pod2))
   567  	// after this call, rc2 should be evicted from the UIDCache
   568  	gc.attemptToDeleteItem(context.TODO(), podToGCNode(rc3Pod1))
   569  	// check cache
   570  	if !gc.absentOwnerCache.Has(objectReference{Namespace: "ns1", OwnerReference: metav1.OwnerReference{Kind: "ReplicationController", Name: "rc1", UID: "1", APIVersion: "v1"}}) {
   571  		t.Errorf("expected rc1 to be in the cache")
   572  	}
   573  	if gc.absentOwnerCache.Has(objectReference{Namespace: "ns1", OwnerReference: metav1.OwnerReference{Kind: "ReplicationController", Name: "rc2", UID: "2", APIVersion: "v1"}}) {
   574  		t.Errorf("expected rc2 to not exist in the cache")
   575  	}
   576  	if !gc.absentOwnerCache.Has(objectReference{Namespace: "ns1", OwnerReference: metav1.OwnerReference{Kind: "ReplicationController", Name: "rc3", UID: "3", APIVersion: "v1"}}) {
   577  		t.Errorf("expected rc3 to be in the cache")
   578  	}
   579  	// check the request sent to the server
   580  	count := 0
   581  	for _, action := range testHandler.actions {
   582  		if action.String() == "GET=/api/v1/namespaces/ns1/replicationcontrollers/rc1" {
   583  			count++
   584  		}
   585  	}
   586  	if count != 1 {
   587  		t.Errorf("expected only 1 GET rc1 request, got %d", count)
   588  	}
   589  }
   590  
   591  func TestDeleteOwnerRefPatch(t *testing.T) {
   592  	original := v1.Pod{
   593  		ObjectMeta: metav1.ObjectMeta{
   594  			UID: "100",
   595  			OwnerReferences: []metav1.OwnerReference{
   596  				{UID: "1"},
   597  				{UID: "2"},
   598  				{UID: "3"},
   599  			},
   600  		},
   601  	}
   602  	originalData := serilizeOrDie(t, original)
   603  	expected := v1.Pod{
   604  		ObjectMeta: metav1.ObjectMeta{
   605  			UID: "100",
   606  			OwnerReferences: []metav1.OwnerReference{
   607  				{UID: "1"},
   608  			},
   609  		},
   610  	}
   611  	p, err := c.GenerateDeleteOwnerRefStrategicMergeBytes("100", []types.UID{"2", "3"})
   612  	if err != nil {
   613  		t.Fatal(err)
   614  	}
   615  	patched, err := strategicpatch.StrategicMergePatch(originalData, p, v1.Pod{})
   616  	if err != nil {
   617  		t.Fatal(err)
   618  	}
   619  	var got v1.Pod
   620  	if err := json.Unmarshal(patched, &got); err != nil {
   621  		t.Fatal(err)
   622  	}
   623  	if !reflect.DeepEqual(expected, got) {
   624  		t.Errorf("expected: %#v,\ngot: %#v", expected, got)
   625  	}
   626  }
   627  
   628  func TestUnblockOwnerReference(t *testing.T) {
   629  	trueVar := true
   630  	falseVar := false
   631  	original := v1.Pod{
   632  		ObjectMeta: metav1.ObjectMeta{
   633  			UID: "100",
   634  			OwnerReferences: []metav1.OwnerReference{
   635  				{UID: "1", BlockOwnerDeletion: &trueVar},
   636  				{UID: "2", BlockOwnerDeletion: &falseVar},
   637  				{UID: "3"},
   638  			},
   639  		},
   640  	}
   641  	originalData := serilizeOrDie(t, original)
   642  	expected := v1.Pod{
   643  		ObjectMeta: metav1.ObjectMeta{
   644  			UID: "100",
   645  			OwnerReferences: []metav1.OwnerReference{
   646  				{UID: "1", BlockOwnerDeletion: &falseVar},
   647  				{UID: "2", BlockOwnerDeletion: &falseVar},
   648  				{UID: "3"},
   649  			},
   650  		},
   651  	}
   652  	accessor, err := meta.Accessor(&original)
   653  	if err != nil {
   654  		t.Fatal(err)
   655  	}
   656  	n := node{
   657  		owners: accessor.GetOwnerReferences(),
   658  	}
   659  	patch, err := n.unblockOwnerReferencesStrategicMergePatch()
   660  	if err != nil {
   661  		t.Fatal(err)
   662  	}
   663  	patched, err := strategicpatch.StrategicMergePatch(originalData, patch, v1.Pod{})
   664  	if err != nil {
   665  		t.Fatal(err)
   666  	}
   667  	var got v1.Pod
   668  	if err := json.Unmarshal(patched, &got); err != nil {
   669  		t.Fatal(err)
   670  	}
   671  	if !reflect.DeepEqual(expected, got) {
   672  		t.Errorf("expected: %#v,\ngot: %#v", expected, got)
   673  		t.Errorf("expected: %#v,\ngot: %#v", expected.OwnerReferences, got.OwnerReferences)
   674  		for _, ref := range got.OwnerReferences {
   675  			t.Errorf("ref.UID=%s, ref.BlockOwnerDeletion=%v", ref.UID, *ref.BlockOwnerDeletion)
   676  		}
   677  	}
   678  }
   679  
   680  func TestOrphanDependentsFailure(t *testing.T) {
   681  	logger, _ := ktesting.NewTestContext(t)
   682  
   683  	testHandler := &fakeActionHandler{
   684  		response: map[string]FakeResponse{
   685  			"PATCH" + "/api/v1/namespaces/ns1/pods/pod": {
   686  				409,
   687  				[]byte{},
   688  			},
   689  		},
   690  	}
   691  	srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
   692  	defer srv.Close()
   693  
   694  	gc := setupGC(t, clientConfig)
   695  	defer close(gc.stop)
   696  
   697  	dependents := []*node{
   698  		{
   699  			identity: objectReference{
   700  				OwnerReference: metav1.OwnerReference{
   701  					Kind:       "Pod",
   702  					APIVersion: "v1",
   703  					Name:       "pod",
   704  				},
   705  				Namespace: "ns1",
   706  			},
   707  		},
   708  	}
   709  	err := gc.orphanDependents(logger, objectReference{}, dependents)
   710  	expected := `the server reported a conflict`
   711  	if err == nil || !strings.Contains(err.Error(), expected) {
   712  		if err != nil {
   713  			t.Errorf("expected error contains text %q, got %q", expected, err.Error())
   714  		} else {
   715  			t.Errorf("expected error contains text %q, got nil", expected)
   716  		}
   717  	}
   718  }
   719  
   720  // TestGetDeletableResources ensures GetDeletableResources always returns
   721  // something usable regardless of discovery output.
   722  func TestGetDeletableResources(t *testing.T) {
   723  	tests := map[string]struct {
   724  		serverResources    []*metav1.APIResourceList
   725  		err                error
   726  		deletableResources map[schema.GroupVersionResource]struct{}
   727  	}{
   728  		"no error": {
   729  			serverResources: []*metav1.APIResourceList{
   730  				{
   731  					// Valid GroupVersion
   732  					GroupVersion: "apps/v1",
   733  					APIResources: []metav1.APIResource{
   734  						{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
   735  						{Name: "services", Namespaced: true, Kind: "Service"},
   736  					},
   737  				},
   738  				{
   739  					// Invalid GroupVersion, should be ignored
   740  					GroupVersion: "foo//whatever",
   741  					APIResources: []metav1.APIResource{
   742  						{Name: "bars", Namespaced: true, Kind: "Bar", Verbs: metav1.Verbs{"delete", "list", "watch"}},
   743  					},
   744  				},
   745  				{
   746  					// Valid GroupVersion, missing required verbs, should be ignored
   747  					GroupVersion: "acme/v1",
   748  					APIResources: []metav1.APIResource{
   749  						{Name: "widgets", Namespaced: true, Kind: "Widget", Verbs: metav1.Verbs{"delete"}},
   750  					},
   751  				},
   752  			},
   753  			err: nil,
   754  			deletableResources: map[schema.GroupVersionResource]struct{}{
   755  				{Group: "apps", Version: "v1", Resource: "pods"}: {},
   756  			},
   757  		},
   758  		"nonspecific failure, includes usable results": {
   759  			serverResources: []*metav1.APIResourceList{
   760  				{
   761  					GroupVersion: "apps/v1",
   762  					APIResources: []metav1.APIResource{
   763  						{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
   764  						{Name: "services", Namespaced: true, Kind: "Service"},
   765  					},
   766  				},
   767  			},
   768  			err: fmt.Errorf("internal error"),
   769  			deletableResources: map[schema.GroupVersionResource]struct{}{
   770  				{Group: "apps", Version: "v1", Resource: "pods"}: {},
   771  			},
   772  		},
   773  		"partial discovery failure, includes usable results": {
   774  			serverResources: []*metav1.APIResourceList{
   775  				{
   776  					GroupVersion: "apps/v1",
   777  					APIResources: []metav1.APIResource{
   778  						{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
   779  						{Name: "services", Namespaced: true, Kind: "Service"},
   780  					},
   781  				},
   782  			},
   783  			err: &discovery.ErrGroupDiscoveryFailed{
   784  				Groups: map[schema.GroupVersion]error{
   785  					{Group: "foo", Version: "v1"}: fmt.Errorf("discovery failure"),
   786  				},
   787  			},
   788  			deletableResources: map[schema.GroupVersionResource]struct{}{
   789  				{Group: "apps", Version: "v1", Resource: "pods"}: {},
   790  			},
   791  		},
   792  		"discovery failure, no results": {
   793  			serverResources:    nil,
   794  			err:                fmt.Errorf("internal error"),
   795  			deletableResources: map[schema.GroupVersionResource]struct{}{},
   796  		},
   797  	}
   798  
   799  	logger, _ := ktesting.NewTestContext(t)
   800  	for name, test := range tests {
   801  		t.Logf("testing %q", name)
   802  		client := &fakeServerResources{
   803  			PreferredResources: test.serverResources,
   804  			Error:              test.err,
   805  		}
   806  		actual, actualErr := GetDeletableResources(logger, client)
   807  		if !reflect.DeepEqual(test.deletableResources, actual) {
   808  			t.Errorf("expected resources:\n%v\ngot:\n%v", test.deletableResources, actual)
   809  		}
   810  		if !reflect.DeepEqual(test.err, actualErr) {
   811  			t.Errorf("expected error:\n%v\ngot:\n%v", test.err, actualErr)
   812  		}
   813  	}
   814  }
   815  
   816  // TestGarbageCollectorSync ensures that a discovery client error
   817  // will not cause the garbage collector to block infinitely.
   818  func TestGarbageCollectorSync(t *testing.T) {
   819  	serverResources := []*metav1.APIResourceList{
   820  		{
   821  			GroupVersion: "v1",
   822  			APIResources: []metav1.APIResource{
   823  				{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
   824  			},
   825  		},
   826  		{
   827  			GroupVersion: "apps/v1",
   828  			APIResources: []metav1.APIResource{
   829  				{Name: "deployments", Namespaced: true, Kind: "Deployment", Verbs: metav1.Verbs{"delete", "list", "watch"}},
   830  			},
   831  		},
   832  	}
   833  	appsV1Error := &discovery.ErrGroupDiscoveryFailed{Groups: map[schema.GroupVersion]error{{Group: "apps", Version: "v1"}: fmt.Errorf(":-/")}}
   834  
   835  	unsyncableServerResources := []*metav1.APIResourceList{
   836  		{
   837  			GroupVersion: "v1",
   838  			APIResources: []metav1.APIResource{
   839  				{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
   840  				{Name: "secrets", Namespaced: true, Kind: "Secret", Verbs: metav1.Verbs{"delete", "list", "watch"}},
   841  			},
   842  		},
   843  	}
   844  	fakeDiscoveryClient := &fakeServerResources{
   845  		PreferredResources: serverResources,
   846  		Error:              nil,
   847  		Lock:               sync.Mutex{},
   848  		InterfaceUsedCount: 0,
   849  	}
   850  
   851  	testHandler := &fakeActionHandler{
   852  		response: map[string]FakeResponse{
   853  			"GET" + "/api/v1/pods": {
   854  				200,
   855  				[]byte("{}"),
   856  			},
   857  			"GET" + "/apis/apps/v1/deployments": {
   858  				200,
   859  				[]byte("{}"),
   860  			},
   861  			"GET" + "/api/v1/secrets": {
   862  				404,
   863  				[]byte("{}"),
   864  			},
   865  		},
   866  	}
   867  	srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
   868  	defer srv.Close()
   869  	clientConfig.ContentConfig.NegotiatedSerializer = nil
   870  	client, err := kubernetes.NewForConfig(clientConfig)
   871  	if err != nil {
   872  		t.Fatal(err)
   873  	}
   874  
   875  	tweakableRM := meta.NewDefaultRESTMapper(nil)
   876  	tweakableRM.AddSpecific(schema.GroupVersionKind{Version: "v1", Kind: "Pod"}, schema.GroupVersionResource{Version: "v1", Resource: "pods"}, schema.GroupVersionResource{Version: "v1", Resource: "pod"}, meta.RESTScopeNamespace)
   877  	tweakableRM.AddSpecific(schema.GroupVersionKind{Version: "v1", Kind: "Secret"}, schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, schema.GroupVersionResource{Version: "v1", Resource: "secret"}, meta.RESTScopeNamespace)
   878  	tweakableRM.AddSpecific(schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}, schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployment"}, meta.RESTScopeNamespace)
   879  	rm := &testRESTMapper{meta.MultiRESTMapper{tweakableRM, testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}}
   880  	metadataClient, err := metadata.NewForConfig(clientConfig)
   881  	if err != nil {
   882  		t.Fatal(err)
   883  	}
   884  
   885  	sharedInformers := informers.NewSharedInformerFactory(client, 0)
   886  
   887  	tCtx := ktesting.Init(t)
   888  	defer tCtx.Cancel("test has completed")
   889  	alwaysStarted := make(chan struct{})
   890  	close(alwaysStarted)
   891  	gc, err := NewGarbageCollector(tCtx, client, metadataClient, rm, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted)
   892  	if err != nil {
   893  		t.Fatal(err)
   894  	}
   895  
   896  	go gc.Run(tCtx, 1)
   897  	// The pseudo-code of GarbageCollector.Sync():
   898  	// GarbageCollector.Sync(client, period, stopCh):
   899  	//    wait.Until() loops with `period` until the `stopCh` is closed :
   900  	//        wait.PollImmediateUntil() loops with 100ms (hardcode) util the `stopCh` is closed:
   901  	//            GetDeletableResources()
   902  	//            gc.resyncMonitors()
   903  	//            cache.WaitForNamedCacheSync() loops with `syncedPollPeriod` (hardcoded to 100ms), until either its stop channel is closed after `period`, or all caches synced.
   904  	//
   905  	// Setting the period to 200ms allows the WaitForCacheSync() to check
   906  	// for cache sync ~2 times in every wait.PollImmediateUntil() loop.
   907  	//
   908  	// The 1s sleep in the test allows GetDeletableResources and
   909  	// gc.resyncMonitors to run ~5 times to ensure the changes to the
   910  	// fakeDiscoveryClient are picked up.
   911  	go gc.Sync(tCtx, fakeDiscoveryClient, 200*time.Millisecond)
   912  
   913  	// Wait until the sync discovers the initial resources
   914  	time.Sleep(1 * time.Second)
   915  
   916  	err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
   917  	if err != nil {
   918  		t.Fatalf("Expected garbagecollector.Sync to be running but it is blocked: %v", err)
   919  	}
   920  	assertMonitors(t, gc, "pods", "deployments")
   921  
   922  	// Simulate the discovery client returning an error
   923  	fakeDiscoveryClient.setPreferredResources(nil, fmt.Errorf("error calling discoveryClient.ServerPreferredResources()"))
   924  
   925  	// Wait until sync discovers the change
   926  	time.Sleep(1 * time.Second)
   927  	// No monitor changes
   928  	assertMonitors(t, gc, "pods", "deployments")
   929  
   930  	// Remove the error from being returned and see if the garbage collector sync is still working
   931  	fakeDiscoveryClient.setPreferredResources(serverResources, nil)
   932  
   933  	err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
   934  	if err != nil {
   935  		t.Fatalf("Expected garbagecollector.Sync to still be running but it is blocked: %v", err)
   936  	}
   937  	assertMonitors(t, gc, "pods", "deployments")
   938  
   939  	// Simulate the discovery client returning a resource the restmapper can resolve, but will not sync caches
   940  	fakeDiscoveryClient.setPreferredResources(unsyncableServerResources, nil)
   941  
   942  	// Wait until sync discovers the change
   943  	time.Sleep(1 * time.Second)
   944  	assertMonitors(t, gc, "pods", "secrets")
   945  
   946  	// Put the resources back to normal and ensure garbage collector sync recovers
   947  	fakeDiscoveryClient.setPreferredResources(serverResources, nil)
   948  
   949  	err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
   950  	if err != nil {
   951  		t.Fatalf("Expected garbagecollector.Sync to still be running but it is blocked: %v", err)
   952  	}
   953  	assertMonitors(t, gc, "pods", "deployments")
   954  
   955  	// Partial discovery failure
   956  	fakeDiscoveryClient.setPreferredResources(unsyncableServerResources, appsV1Error)
   957  	// Wait until sync discovers the change
   958  	time.Sleep(1 * time.Second)
   959  	// Deployments monitor kept
   960  	assertMonitors(t, gc, "pods", "deployments", "secrets")
   961  
   962  	// Put the resources back to normal and ensure garbage collector sync recovers
   963  	fakeDiscoveryClient.setPreferredResources(serverResources, nil)
   964  	// Wait until sync discovers the change
   965  	time.Sleep(1 * time.Second)
   966  	err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
   967  	if err != nil {
   968  		t.Fatalf("Expected garbagecollector.Sync to still be running but it is blocked: %v", err)
   969  	}
   970  	// Unsyncable monitor removed
   971  	assertMonitors(t, gc, "pods", "deployments")
   972  }
   973  
   974  func assertMonitors(t *testing.T, gc *GarbageCollector, resources ...string) {
   975  	t.Helper()
   976  	expected := sets.NewString(resources...)
   977  	actual := sets.NewString()
   978  	for m := range gc.dependencyGraphBuilder.monitors {
   979  		actual.Insert(m.Resource)
   980  	}
   981  	if !actual.Equal(expected) {
   982  		t.Fatalf("expected monitors %v, got %v", expected.List(), actual.List())
   983  	}
   984  }
   985  
   986  func expectSyncNotBlocked(fakeDiscoveryClient *fakeServerResources, workerLock *sync.RWMutex) error {
   987  	before := fakeDiscoveryClient.getInterfaceUsedCount()
   988  	t := 1 * time.Second
   989  	time.Sleep(t)
   990  	after := fakeDiscoveryClient.getInterfaceUsedCount()
   991  	if before == after {
   992  		return fmt.Errorf("discoveryClient.ServerPreferredResources() called %d times over %v", after-before, t)
   993  	}
   994  
   995  	workerLockAcquired := make(chan struct{})
   996  	go func() {
   997  		workerLock.Lock()
   998  		defer workerLock.Unlock()
   999  		close(workerLockAcquired)
  1000  	}()
  1001  	select {
  1002  	case <-workerLockAcquired:
  1003  		return nil
  1004  	case <-time.After(t):
  1005  		return fmt.Errorf("workerLock blocked for at least %v", t)
  1006  	}
  1007  }
  1008  
  1009  type fakeServerResources struct {
  1010  	PreferredResources []*metav1.APIResourceList
  1011  	Error              error
  1012  	Lock               sync.Mutex
  1013  	InterfaceUsedCount int
  1014  }
  1015  
  1016  func (*fakeServerResources) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) {
  1017  	return nil, nil
  1018  }
  1019  
  1020  func (*fakeServerResources) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) {
  1021  	return nil, nil, nil
  1022  }
  1023  
  1024  func (f *fakeServerResources) ServerPreferredResources() ([]*metav1.APIResourceList, error) {
  1025  	f.Lock.Lock()
  1026  	defer f.Lock.Unlock()
  1027  	f.InterfaceUsedCount++
  1028  	return f.PreferredResources, f.Error
  1029  }
  1030  
  1031  func (f *fakeServerResources) setPreferredResources(resources []*metav1.APIResourceList, err error) {
  1032  	f.Lock.Lock()
  1033  	defer f.Lock.Unlock()
  1034  	f.PreferredResources = resources
  1035  	f.Error = err
  1036  }
  1037  
  1038  func (f *fakeServerResources) getInterfaceUsedCount() int {
  1039  	f.Lock.Lock()
  1040  	defer f.Lock.Unlock()
  1041  	return f.InterfaceUsedCount
  1042  }
  1043  
  1044  func (*fakeServerResources) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) {
  1045  	return nil, nil
  1046  }
  1047  
  1048  func TestConflictingData(t *testing.T) {
  1049  	pod1ns1 := makeID("v1", "Pod", "ns1", "podname1", "poduid1")
  1050  	pod2ns1 := makeID("v1", "Pod", "ns1", "podname2", "poduid2")
  1051  	pod2ns2 := makeID("v1", "Pod", "ns2", "podname2", "poduid2")
  1052  	node1 := makeID("v1", "Node", "", "nodename", "nodeuid1")
  1053  
  1054  	role1v1beta1 := makeID("rbac.authorization.k8s.io/v1beta1", "Role", "ns1", "role1", "roleuid1")
  1055  	role1v1 := makeID("rbac.authorization.k8s.io/v1", "Role", "ns1", "role1", "roleuid1")
  1056  
  1057  	deployment1apps := makeID("apps/v1", "Deployment", "ns1", "deployment1", "deploymentuid1")
  1058  	deployment1extensions := makeID("extensions/v1beta1", "Deployment", "ns1", "deployment1", "deploymentuid1") // not served, still referenced
  1059  
  1060  	// when a reference is made to node1 from a namespaced resource, the virtual node inserted has namespace coordinates
  1061  	node1WithNamespace := makeID("v1", "Node", "ns1", "nodename", "nodeuid1")
  1062  
  1063  	// when a reference is made to pod1 from a cluster-scoped resource, the virtual node inserted has no namespace
  1064  	pod1nonamespace := makeID("v1", "Pod", "", "podname1", "poduid1")
  1065  
  1066  	badSecretReferenceWithDeploymentUID := makeID("v1", "Secret", "ns1", "secretname", string(deployment1apps.UID))
  1067  	badChildPod := makeID("v1", "Pod", "ns1", "badpod", "badpoduid")
  1068  	goodChildPod := makeID("v1", "Pod", "ns1", "goodpod", "goodpoduid")
  1069  
  1070  	var testScenarios = []struct {
  1071  		name           string
  1072  		initialObjects []runtime.Object
  1073  		steps          []step
  1074  	}{
  1075  		{
  1076  			name: "good child in ns1 -> cluster-scoped owner",
  1077  			steps: []step{
  1078  				// setup
  1079  				createObjectInClient("", "v1", "nodes", "", makeMetadataObj(node1)),
  1080  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, node1)),
  1081  				// observe namespaced child with not-yet-observed cluster-scoped parent
  1082  				processEvent(makeAddEvent(pod1ns1, node1)),
  1083  				assertState(state{
  1084  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(node1)), makeNode(node1WithNamespace, virtual)}, // virtual node1 (matching child namespace)
  1085  					pendingAttemptToDelete: []*node{makeNode(node1WithNamespace, virtual)},                                       // virtual node1 queued for attempted delete
  1086  				}),
  1087  				// handle queued delete of virtual node
  1088  				processAttemptToDelete(1),
  1089  				assertState(state{
  1090  					clientActions:          []string{"get /v1, Resource=nodes name=nodename"},
  1091  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(node1)), makeNode(node1WithNamespace, virtual)}, // virtual node1 (matching child namespace)
  1092  					pendingAttemptToDelete: []*node{makeNode(node1WithNamespace, virtual)},                                       // virtual node1 still not observed, got requeued
  1093  				}),
  1094  				// observe cluster-scoped parent
  1095  				processEvent(makeAddEvent(node1)),
  1096  				assertState(state{
  1097  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(node1)), makeNode(node1)}, // node1 switched to observed, fixed namespace coordinate
  1098  					pendingAttemptToDelete: []*node{makeNode(node1WithNamespace, virtual)},                 // virtual node1 queued for attempted delete
  1099  				}),
  1100  				// handle queued delete of virtual node
  1101  				// final state: child and parent present in graph, no queued actions
  1102  				processAttemptToDelete(1),
  1103  				assertState(state{
  1104  					graphNodes: []*node{makeNode(pod1ns1, withOwners(node1)), makeNode(node1)},
  1105  				}),
  1106  			},
  1107  		},
  1108  		// child in namespace A with owner reference to namespaced type in namespace B
  1109  		// * should be deleted immediately
  1110  		// * event should be logged in namespace A with involvedObject of bad-child indicating the error
  1111  		{
  1112  			name: "bad child in ns1 -> owner in ns2 (child first)",
  1113  			steps: []step{
  1114  				// 0,1: setup
  1115  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, pod2ns1)),
  1116  				createObjectInClient("", "v1", "pods", "ns2", makeMetadataObj(pod2ns2)),
  1117  				// 2,3: observe namespaced child with not-yet-observed namespace-scoped parent
  1118  				processEvent(makeAddEvent(pod1ns1, pod2ns2)),
  1119  				assertState(state{
  1120  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(pod2ns2)), makeNode(pod2ns1, virtual)}, // virtual pod2 (matching child namespace)
  1121  					pendingAttemptToDelete: []*node{makeNode(pod2ns1, virtual)},                                         // virtual pod2 queued for attempted delete
  1122  				}),
  1123  				// 4,5: observe parent
  1124  				processEvent(makeAddEvent(pod2ns2)),
  1125  				assertState(state{
  1126  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(pod2ns2)), makeNode(pod2ns2)}, // pod2 is no longer virtual, namespace coordinate is corrected
  1127  					pendingAttemptToDelete: []*node{makeNode(pod2ns1, virtual), makeNode(pod1ns1)},             // virtual pod2 still queued for attempted delete, bad child pod1 queued because it disagreed with observed parent
  1128  					events:                 []string{`Warning OwnerRefInvalidNamespace ownerRef [v1/Pod, namespace: ns1, name: podname2, uid: poduid2] does not exist in namespace "ns1" involvedObject{kind=Pod,apiVersion=v1}`},
  1129  				}),
  1130  				// 6,7: handle queued delete of virtual parent
  1131  				processAttemptToDelete(1),
  1132  				assertState(state{
  1133  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(pod2ns2)), makeNode(pod2ns2)},
  1134  					pendingAttemptToDelete: []*node{makeNode(pod1ns1)}, // bad child pod1 queued because it disagreed with observed parent
  1135  				}),
  1136  				// 8,9: handle queued delete of bad child
  1137  				processAttemptToDelete(1),
  1138  				assertState(state{
  1139  					clientActions: []string{
  1140  						"get /v1, Resource=pods ns=ns1 name=podname1",    // lookup of pod1 pre-delete
  1141  						"get /v1, Resource=pods ns=ns1 name=podname2",    // verification bad parent reference is absent
  1142  						"delete /v1, Resource=pods ns=ns1 name=podname1", // pod1 delete
  1143  					},
  1144  					graphNodes:       []*node{makeNode(pod1ns1, withOwners(pod2ns2)), makeNode(pod2ns2)},
  1145  					absentOwnerCache: []objectReference{pod2ns1}, // cached absence of bad parent
  1146  				}),
  1147  				// 10,11: observe delete issued in step 8
  1148  				// final state: parent present in graph, no queued actions
  1149  				processEvent(makeDeleteEvent(pod1ns1)),
  1150  				assertState(state{
  1151  					graphNodes:       []*node{makeNode(pod2ns2)}, // only good parent remains
  1152  					absentOwnerCache: []objectReference{pod2ns1}, // cached absence of bad parent
  1153  				}),
  1154  			},
  1155  		},
  1156  		{
  1157  			name: "bad child in ns1 -> owner in ns2 (owner first)",
  1158  			steps: []step{
  1159  				// 0,1: setup
  1160  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, pod2ns1)),
  1161  				createObjectInClient("", "v1", "pods", "ns2", makeMetadataObj(pod2ns2)),
  1162  				// 2,3: observe parent
  1163  				processEvent(makeAddEvent(pod2ns2)),
  1164  				assertState(state{
  1165  					graphNodes: []*node{makeNode(pod2ns2)},
  1166  				}),
  1167  				// 4,5: observe namespaced child with invalid cross-namespace reference to parent
  1168  				processEvent(makeAddEvent(pod1ns1, pod2ns1)),
  1169  				assertState(state{
  1170  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(pod2ns1)), makeNode(pod2ns2)},
  1171  					pendingAttemptToDelete: []*node{makeNode(pod1ns1)}, // bad child queued for attempted delete
  1172  					events:                 []string{`Warning OwnerRefInvalidNamespace ownerRef [v1/Pod, namespace: ns1, name: podname2, uid: poduid2] does not exist in namespace "ns1" involvedObject{kind=Pod,apiVersion=v1}`},
  1173  				}),
  1174  				// 6,7: handle queued delete of bad child
  1175  				processAttemptToDelete(1),
  1176  				assertState(state{
  1177  					clientActions: []string{
  1178  						"get /v1, Resource=pods ns=ns1 name=podname1",    // lookup of pod1 pre-delete
  1179  						"get /v1, Resource=pods ns=ns1 name=podname2",    // verification bad parent reference is absent
  1180  						"delete /v1, Resource=pods ns=ns1 name=podname1", // pod1 delete
  1181  					},
  1182  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(pod2ns1)), makeNode(pod2ns2)},
  1183  					pendingAttemptToDelete: []*node{},
  1184  					absentOwnerCache:       []objectReference{pod2ns1}, // cached absence of bad parent
  1185  				}),
  1186  				// 8,9: observe delete issued in step 6
  1187  				// final state: parent present in graph, no queued actions
  1188  				processEvent(makeDeleteEvent(pod1ns1)),
  1189  				assertState(state{
  1190  					graphNodes:       []*node{makeNode(pod2ns2)}, // only good parent remains
  1191  					absentOwnerCache: []objectReference{pod2ns1}, // cached absence of bad parent
  1192  				}),
  1193  			},
  1194  		},
  1195  		// child that is cluster-scoped with owner reference to namespaced type in namespace B
  1196  		// * should not be deleted
  1197  		// * event should be logged in namespace kube-system with involvedObject of bad-child indicating the error
  1198  		{
  1199  			name: "bad cluster-scoped child -> owner in ns1 (child first)",
  1200  			steps: []step{
  1201  				// setup
  1202  				createObjectInClient("", "v1", "nodes", "", makeMetadataObj(node1, pod1ns1)),
  1203  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1)),
  1204  				// 2,3: observe cluster-scoped child with not-yet-observed namespaced parent
  1205  				processEvent(makeAddEvent(node1, pod1ns1)),
  1206  				assertState(state{
  1207  					graphNodes:             []*node{makeNode(node1, withOwners(pod1nonamespace)), makeNode(pod1nonamespace, virtual)}, // virtual pod1 (with no namespace)
  1208  					pendingAttemptToDelete: []*node{makeNode(pod1nonamespace, virtual)},                                               // virtual pod1 queued for attempted delete
  1209  				}),
  1210  				// 4,5: handle queued delete of virtual pod1
  1211  				processAttemptToDelete(1),
  1212  				assertState(state{
  1213  					graphNodes:             []*node{makeNode(node1, withOwners(pod1nonamespace)), makeNode(pod1nonamespace, virtual)}, // virtual pod1 (with no namespace)
  1214  					pendingAttemptToDelete: []*node{},                                                                                 // namespace-scoped virtual object without a namespace coordinate not re-queued
  1215  				}),
  1216  				// 6,7: observe namespace-scoped parent
  1217  				processEvent(makeAddEvent(pod1ns1)),
  1218  				assertState(state{
  1219  					graphNodes:             []*node{makeNode(node1, withOwners(pod1nonamespace)), makeNode(pod1ns1)}, // pod1 namespace coordinate corrected, made non-virtual
  1220  					events:                 []string{`Warning OwnerRefInvalidNamespace ownerRef [v1/Pod, namespace: , name: podname1, uid: poduid1] does not exist in namespace "" involvedObject{kind=Node,apiVersion=v1}`},
  1221  					pendingAttemptToDelete: []*node{makeNode(node1, withOwners(pod1ns1))}, // bad cluster-scoped child added to attemptToDelete queue
  1222  				}),
  1223  				// 8,9: handle queued attempted delete of bad cluster-scoped child
  1224  				// final state: parent and child present in graph, no queued actions
  1225  				processAttemptToDelete(1),
  1226  				assertState(state{
  1227  					clientActions: []string{
  1228  						"get /v1, Resource=nodes name=nodename", // lookup of node pre-delete
  1229  					},
  1230  					graphNodes: []*node{makeNode(node1, withOwners(pod1nonamespace)), makeNode(pod1ns1)},
  1231  				}),
  1232  			},
  1233  		},
  1234  		{
  1235  			name: "bad cluster-scoped child -> owner in ns1 (owner first)",
  1236  			steps: []step{
  1237  				// setup
  1238  				createObjectInClient("", "v1", "nodes", "", makeMetadataObj(node1, pod1ns1)),
  1239  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1)),
  1240  				// 2,3: observe namespace-scoped parent
  1241  				processEvent(makeAddEvent(pod1ns1)),
  1242  				assertState(state{
  1243  					graphNodes: []*node{makeNode(pod1ns1)},
  1244  				}),
  1245  				// 4,5: observe cluster-scoped child
  1246  				processEvent(makeAddEvent(node1, pod1ns1)),
  1247  				assertState(state{
  1248  					graphNodes:             []*node{makeNode(node1, withOwners(pod1nonamespace)), makeNode(pod1ns1)},
  1249  					events:                 []string{`Warning OwnerRefInvalidNamespace ownerRef [v1/Pod, namespace: , name: podname1, uid: poduid1] does not exist in namespace "" involvedObject{kind=Node,apiVersion=v1}`},
  1250  					pendingAttemptToDelete: []*node{makeNode(node1, withOwners(pod1ns1))}, // bad cluster-scoped child added to attemptToDelete queue
  1251  				}),
  1252  				// 6,7: handle queued attempted delete of bad cluster-scoped child
  1253  				// final state: parent and child present in graph, no queued actions
  1254  				processAttemptToDelete(1),
  1255  				assertState(state{
  1256  					clientActions: []string{
  1257  						"get /v1, Resource=nodes name=nodename", // lookup of node pre-delete
  1258  					},
  1259  					graphNodes: []*node{makeNode(node1, withOwners(pod1nonamespace)), makeNode(pod1ns1)},
  1260  				}),
  1261  			},
  1262  		},
  1263  		// child pointing at non-preferred still-served apiVersion of parent object (e.g. rbac/v1beta1)
  1264  		// * should not be deleted prematurely
  1265  		// * should not repeatedly poll attemptToDelete while waiting
  1266  		// * should be deleted when the actual parent is deleted
  1267  		{
  1268  			name: "good child -> existing owner with non-preferred accessible API version",
  1269  			steps: []step{
  1270  				// setup
  1271  				createObjectInClient("rbac.authorization.k8s.io", "v1", "roles", "ns1", makeMetadataObj(role1v1)),
  1272  				createObjectInClient("rbac.authorization.k8s.io", "v1beta1", "roles", "ns1", makeMetadataObj(role1v1beta1)),
  1273  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, role1v1beta1)),
  1274  				// 3,4: observe child
  1275  				processEvent(makeAddEvent(pod1ns1, role1v1beta1)),
  1276  				assertState(state{
  1277  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(role1v1beta1)), makeNode(role1v1beta1, virtual)},
  1278  					pendingAttemptToDelete: []*node{makeNode(role1v1beta1, virtual)}, // virtual parent enqueued for delete attempt
  1279  				}),
  1280  				// 5,6: handle queued attempted delete of virtual parent
  1281  				processAttemptToDelete(1),
  1282  				assertState(state{
  1283  					clientActions: []string{
  1284  						"get rbac.authorization.k8s.io/v1beta1, Resource=roles ns=ns1 name=role1", // lookup of node pre-delete
  1285  					},
  1286  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(role1v1beta1)), makeNode(role1v1beta1, virtual)},
  1287  					pendingAttemptToDelete: []*node{makeNode(role1v1beta1, virtual)}, // not yet observed, still in the attemptToDelete queue
  1288  				}),
  1289  				// 7,8: observe parent via v1
  1290  				processEvent(makeAddEvent(role1v1)),
  1291  				assertState(state{
  1292  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(role1v1beta1)), makeNode(role1v1)},               // parent version/virtual state gets corrected
  1293  					pendingAttemptToDelete: []*node{makeNode(role1v1beta1, virtual), makeNode(pod1ns1, withOwners(role1v1beta1))}, // virtual parent and mismatched child enqueued for delete attempt
  1294  				}),
  1295  				// 9,10: process attemptToDelete
  1296  				// virtual node dropped from attemptToDelete with no further action because the real node has been observed now
  1297  				processAttemptToDelete(1),
  1298  				assertState(state{
  1299  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(role1v1beta1)), makeNode(role1v1)},
  1300  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(role1v1beta1))}, // mismatched child enqueued for delete attempt
  1301  				}),
  1302  				// 11,12: process attemptToDelete for mismatched parent
  1303  				processAttemptToDelete(1),
  1304  				assertState(state{
  1305  					clientActions: []string{
  1306  						"get /v1, Resource=pods ns=ns1 name=podname1",                             // lookup of child pre-delete
  1307  						"get rbac.authorization.k8s.io/v1beta1, Resource=roles ns=ns1 name=role1", // verifying parent is solid
  1308  					},
  1309  					graphNodes: []*node{makeNode(pod1ns1, withOwners(role1v1beta1)), makeNode(role1v1)},
  1310  				}),
  1311  				// 13,14: teardown
  1312  				deleteObjectFromClient("rbac.authorization.k8s.io", "v1", "roles", "ns1", "role1"),
  1313  				deleteObjectFromClient("rbac.authorization.k8s.io", "v1beta1", "roles", "ns1", "role1"),
  1314  				// 15,16: observe delete via v1
  1315  				processEvent(makeDeleteEvent(role1v1)),
  1316  				assertState(state{
  1317  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(role1v1beta1))}, // only child remains
  1318  					absentOwnerCache:       []objectReference{role1v1},                           // cached absence of parent via v1
  1319  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(role1v1beta1))},
  1320  				}),
  1321  				// 17,18: process attemptToDelete for child
  1322  				processAttemptToDelete(1),
  1323  				assertState(state{
  1324  					clientActions: []string{
  1325  						"get /v1, Resource=pods ns=ns1 name=podname1",                             // lookup of child pre-delete
  1326  						"get rbac.authorization.k8s.io/v1beta1, Resource=roles ns=ns1 name=role1", // verifying parent is solid
  1327  						"delete /v1, Resource=pods ns=ns1 name=podname1",
  1328  					},
  1329  					absentOwnerCache: []objectReference{role1v1, role1v1beta1}, // cached absence of v1beta1 role
  1330  					graphNodes:       []*node{makeNode(pod1ns1, withOwners(role1v1beta1))},
  1331  				}),
  1332  				// 19,20: observe delete issued in step 17
  1333  				// final state: empty graph, no queued actions
  1334  				processEvent(makeDeleteEvent(pod1ns1)),
  1335  				assertState(state{
  1336  					absentOwnerCache: []objectReference{role1v1, role1v1beta1},
  1337  				}),
  1338  			},
  1339  		},
  1340  		// child pointing at no-longer-served apiVersion of still-existing parent object (e.g. extensions/v1beta1 deployment)
  1341  		// * should not be deleted (this is indistinguishable from referencing an unknown kind/version)
  1342  		// * virtual parent should not repeatedly poll attemptToDelete once real parent is observed
  1343  		{
  1344  			name: "child -> existing owner with inaccessible API version (child first)",
  1345  			steps: []step{
  1346  				// setup
  1347  				createObjectInClient("apps", "v1", "deployments", "ns1", makeMetadataObj(deployment1apps)),
  1348  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, deployment1extensions)),
  1349  				// 2,3: observe child
  1350  				processEvent(makeAddEvent(pod1ns1, deployment1extensions)),
  1351  				assertState(state{
  1352  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual)},
  1353  					pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual)}, // virtual parent enqueued for delete attempt
  1354  				}),
  1355  				// 4,5: handle queued attempted delete of virtual parent
  1356  				processAttemptToDelete(1),
  1357  				assertState(state{
  1358  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual)},
  1359  					pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual)}, // requeued on restmapper error
  1360  				}),
  1361  				// 6,7: observe parent via v1
  1362  				processEvent(makeAddEvent(deployment1apps)),
  1363  				assertState(state{
  1364  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1apps)},                // parent version/virtual state gets corrected
  1365  					pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual), makeNode(pod1ns1, withOwners(deployment1extensions))}, // virtual parent and mismatched child enqueued for delete attempt
  1366  				}),
  1367  				// 8,9: process attemptToDelete
  1368  				// virtual node dropped from attemptToDelete with no further action because the real node has been observed now
  1369  				processAttemptToDelete(1),
  1370  				assertState(state{
  1371  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1apps)},
  1372  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // mismatched child enqueued for delete attempt
  1373  				}),
  1374  				// 10,11: process attemptToDelete for mismatched child
  1375  				processAttemptToDelete(1),
  1376  				assertState(state{
  1377  					clientActions: []string{
  1378  						"get /v1, Resource=pods ns=ns1 name=podname1", // lookup of child pre-delete
  1379  					},
  1380  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1apps)},
  1381  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // mismatched child still enqueued - restmapper error
  1382  				}),
  1383  				// 12: teardown
  1384  				deleteObjectFromClient("apps", "v1", "deployments", "ns1", "deployment1"),
  1385  				// 13,14: observe delete via v1
  1386  				processEvent(makeDeleteEvent(deployment1apps)),
  1387  				assertState(state{
  1388  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // only child remains
  1389  					absentOwnerCache:       []objectReference{deployment1apps},                            // cached absence of parent via v1
  1390  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))},
  1391  				}),
  1392  				// 17,18: process attemptToDelete for child
  1393  				processAttemptToDelete(1),
  1394  				assertState(state{
  1395  					clientActions: []string{
  1396  						"get /v1, Resource=pods ns=ns1 name=podname1", // lookup of child pre-delete
  1397  					},
  1398  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // only child remains
  1399  					absentOwnerCache:       []objectReference{deployment1apps},
  1400  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // mismatched child still enqueued - restmapper error
  1401  				}),
  1402  			},
  1403  		},
  1404  		{
  1405  			name: "child -> existing owner with inaccessible API version (owner first)",
  1406  			steps: []step{
  1407  				// setup
  1408  				createObjectInClient("apps", "v1", "deployments", "ns1", makeMetadataObj(deployment1apps)),
  1409  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, deployment1extensions)),
  1410  				// 2,3: observe parent via v1
  1411  				processEvent(makeAddEvent(deployment1apps)),
  1412  				assertState(state{
  1413  					graphNodes: []*node{makeNode(deployment1apps)},
  1414  				}),
  1415  				// 4,5: observe child
  1416  				processEvent(makeAddEvent(pod1ns1, deployment1extensions)),
  1417  				assertState(state{
  1418  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1apps)},
  1419  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // mismatched child enqueued for delete attempt
  1420  				}),
  1421  				// 6,7: process attemptToDelete for mismatched child
  1422  				processAttemptToDelete(1),
  1423  				assertState(state{
  1424  					clientActions: []string{
  1425  						"get /v1, Resource=pods ns=ns1 name=podname1", // lookup of child pre-delete
  1426  					},
  1427  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1apps)},
  1428  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // mismatched child still enqueued - restmapper error
  1429  				}),
  1430  				// 8: teardown
  1431  				deleteObjectFromClient("apps", "v1", "deployments", "ns1", "deployment1"),
  1432  				// 9,10: observe delete via v1
  1433  				processEvent(makeDeleteEvent(deployment1apps)),
  1434  				assertState(state{
  1435  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // only child remains
  1436  					absentOwnerCache:       []objectReference{deployment1apps},                            // cached absence of parent via v1
  1437  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))},
  1438  				}),
  1439  				// 11,12: process attemptToDelete for child
  1440  				// final state: child with unresolveable ownerRef remains, queued in pendingAttemptToDelete
  1441  				processAttemptToDelete(1),
  1442  				assertState(state{
  1443  					clientActions: []string{
  1444  						"get /v1, Resource=pods ns=ns1 name=podname1", // lookup of child pre-delete
  1445  					},
  1446  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // only child remains
  1447  					absentOwnerCache:       []objectReference{deployment1apps},
  1448  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // mismatched child still enqueued - restmapper error
  1449  				}),
  1450  			},
  1451  		},
  1452  		// child pointing at no-longer-served apiVersion of no-longer-existing parent object (e.g. extensions/v1beta1 deployment)
  1453  		// * should not be deleted (this is indistinguishable from referencing an unknown kind/version)
  1454  		// * should repeatedly poll attemptToDelete
  1455  		// * should not block deletion of legitimate children of missing deployment
  1456  		{
  1457  			name: "child -> non-existent owner with inaccessible API version (inaccessible parent apiVersion first)",
  1458  			steps: []step{
  1459  				// setup
  1460  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, deployment1extensions)),
  1461  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod2ns1, deployment1apps)),
  1462  				// 2,3: observe child pointing at no-longer-served apiVersion
  1463  				processEvent(makeAddEvent(pod1ns1, deployment1extensions)),
  1464  				assertState(state{
  1465  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual)},
  1466  					pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual)}, // virtual parent enqueued for delete attempt
  1467  				}),
  1468  				// 4,5: observe child pointing at served apiVersion where owner does not exist
  1469  				processEvent(makeAddEvent(pod2ns1, deployment1apps)),
  1470  				assertState(state{
  1471  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual), makeNode(pod2ns1, withOwners(deployment1apps))},
  1472  					pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual), makeNode(pod2ns1, withOwners(deployment1apps))}, // mismatched child enqueued for delete attempt
  1473  				}),
  1474  				// 6,7: handle attempt to delete virtual parent for inaccessible apiVersion
  1475  				processAttemptToDelete(1),
  1476  				assertState(state{
  1477  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual), makeNode(pod2ns1, withOwners(deployment1apps))},
  1478  					pendingAttemptToDelete: []*node{makeNode(pod2ns1, withOwners(deployment1apps)), makeNode(deployment1extensions, virtual)}, // inaccessible parent requeued to end
  1479  				}),
  1480  				// 8,9: handle attempt to delete mismatched child
  1481  				processAttemptToDelete(1),
  1482  				assertState(state{
  1483  					clientActions: []string{
  1484  						"get /v1, Resource=pods ns=ns1 name=podname2",               // lookup of child pre-delete
  1485  						"get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of parent
  1486  						"delete /v1, Resource=pods ns=ns1 name=podname2",            // delete child
  1487  					},
  1488  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual), makeNode(pod2ns1, withOwners(deployment1apps))},
  1489  					absentOwnerCache:       []objectReference{deployment1apps},                // verifiably absent parent remembered
  1490  					pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual)}, // mismatched child with verifiably absent parent deleted
  1491  				}),
  1492  				// 10,11: observe delete issued in step 8
  1493  				processEvent(makeDeleteEvent(pod2ns1)),
  1494  				assertState(state{
  1495  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual)},
  1496  					absentOwnerCache:       []objectReference{deployment1apps},
  1497  					pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual)},
  1498  				}),
  1499  				// 12,13: final state: inaccessible parent requeued in attemptToDelete
  1500  				processAttemptToDelete(1),
  1501  				assertState(state{
  1502  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual)},
  1503  					absentOwnerCache:       []objectReference{deployment1apps},
  1504  					pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual)},
  1505  				}),
  1506  			},
  1507  		},
  1508  
  1509  		{
  1510  			name: "child -> non-existent owner with inaccessible API version (accessible parent apiVersion first)",
  1511  			steps: []step{
  1512  				// setup
  1513  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, deployment1extensions)),
  1514  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod2ns1, deployment1apps)),
  1515  				// 2,3: observe child pointing at served apiVersion where owner does not exist
  1516  				processEvent(makeAddEvent(pod2ns1, deployment1apps)),
  1517  				assertState(state{
  1518  					graphNodes: []*node{
  1519  						makeNode(pod2ns1, withOwners(deployment1apps)),
  1520  						makeNode(deployment1apps, virtual)},
  1521  					pendingAttemptToDelete: []*node{
  1522  						makeNode(deployment1apps, virtual)}, // virtual parent enqueued for delete attempt
  1523  				}),
  1524  				// 4,5: observe child pointing at no-longer-served apiVersion
  1525  				processEvent(makeAddEvent(pod1ns1, deployment1extensions)),
  1526  				assertState(state{
  1527  					graphNodes: []*node{
  1528  						makeNode(pod2ns1, withOwners(deployment1apps)),
  1529  						makeNode(deployment1apps, virtual),
  1530  						makeNode(pod1ns1, withOwners(deployment1extensions))},
  1531  					pendingAttemptToDelete: []*node{
  1532  						makeNode(deployment1apps, virtual),
  1533  						makeNode(pod1ns1, withOwners(deployment1extensions))}, // mismatched child enqueued for delete attempt
  1534  				}),
  1535  				// 6,7: handle attempt to delete virtual parent for accessible apiVersion
  1536  				processAttemptToDelete(1),
  1537  				assertState(state{
  1538  					clientActions: []string{
  1539  						"get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of parent, gets 404
  1540  					},
  1541  					pendingGraphChanges: []*event{makeVirtualDeleteEvent(deployment1apps)}, // virtual parent not found, queued virtual delete event
  1542  					graphNodes: []*node{
  1543  						makeNode(pod2ns1, withOwners(deployment1apps)),
  1544  						makeNode(deployment1apps, virtual),
  1545  						makeNode(pod1ns1, withOwners(deployment1extensions))},
  1546  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))},
  1547  				}),
  1548  				// 8,9: handle attempt to delete mismatched child
  1549  				processAttemptToDelete(1),
  1550  				assertState(state{
  1551  					clientActions: []string{
  1552  						"get /v1, Resource=pods ns=ns1 name=podname1", // lookup of child pre-delete
  1553  					},
  1554  					pendingGraphChanges: []*event{makeVirtualDeleteEvent(deployment1apps)},
  1555  					graphNodes: []*node{
  1556  						makeNode(pod2ns1, withOwners(deployment1apps)),
  1557  						makeNode(deployment1apps, virtual),
  1558  						makeNode(pod1ns1, withOwners(deployment1extensions))},
  1559  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // restmapper on inaccessible parent, requeued
  1560  				}),
  1561  				// 10,11: handle queued virtual delete event
  1562  				processPendingGraphChanges(1),
  1563  				assertState(state{
  1564  					graphNodes: []*node{
  1565  						makeNode(pod2ns1, withOwners(deployment1apps)),
  1566  						makeNode(deployment1extensions, virtual), // deployment node changed identity to alternative virtual identity
  1567  						makeNode(pod1ns1, withOwners(deployment1extensions)),
  1568  					},
  1569  					absentOwnerCache: []objectReference{deployment1apps}, // absent apps/v1 parent remembered
  1570  					pendingAttemptToDelete: []*node{
  1571  						makeNode(pod1ns1, withOwners(deployment1extensions)), // child referencing inaccessible apiVersion
  1572  						makeNode(pod2ns1, withOwners(deployment1apps)),       // children of absent apps/v1 parent queued for delete attempt
  1573  						makeNode(deployment1extensions, virtual),             // new virtual parent queued for delete attempt
  1574  					},
  1575  				}),
  1576  
  1577  				// 12,13: handle attempt to delete child referencing inaccessible apiVersion
  1578  				processAttemptToDelete(1),
  1579  				assertState(state{
  1580  					clientActions: []string{
  1581  						"get /v1, Resource=pods ns=ns1 name=podname1", // lookup of child pre-delete
  1582  					},
  1583  					graphNodes: []*node{
  1584  						makeNode(pod2ns1, withOwners(deployment1apps)),
  1585  						makeNode(deployment1extensions, virtual),
  1586  						makeNode(pod1ns1, withOwners(deployment1extensions))},
  1587  					absentOwnerCache: []objectReference{deployment1apps},
  1588  					pendingAttemptToDelete: []*node{
  1589  						makeNode(pod2ns1, withOwners(deployment1apps)),       // children of absent apps/v1 parent queued for delete attempt
  1590  						makeNode(deployment1extensions, virtual),             // new virtual parent queued for delete attempt
  1591  						makeNode(pod1ns1, withOwners(deployment1extensions)), // child referencing inaccessible apiVersion - requeued to end
  1592  					},
  1593  				}),
  1594  
  1595  				// 14,15: handle attempt to delete child referencing accessible apiVersion
  1596  				processAttemptToDelete(1),
  1597  				assertState(state{
  1598  					clientActions: []string{
  1599  						"get /v1, Resource=pods ns=ns1 name=podname2",    // lookup of child pre-delete
  1600  						"delete /v1, Resource=pods ns=ns1 name=podname2", // parent absent, delete
  1601  					},
  1602  					graphNodes: []*node{
  1603  						makeNode(pod2ns1, withOwners(deployment1apps)),
  1604  						makeNode(deployment1extensions, virtual),
  1605  						makeNode(pod1ns1, withOwners(deployment1extensions))},
  1606  					absentOwnerCache: []objectReference{deployment1apps},
  1607  					pendingAttemptToDelete: []*node{
  1608  						makeNode(deployment1extensions, virtual),             // new virtual parent queued for delete attempt
  1609  						makeNode(pod1ns1, withOwners(deployment1extensions)), // child referencing inaccessible apiVersion
  1610  					},
  1611  				}),
  1612  
  1613  				// 16,17: handle attempt to delete virtual parent in inaccessible apiVersion
  1614  				processAttemptToDelete(1),
  1615  				assertState(state{
  1616  					graphNodes: []*node{
  1617  						makeNode(pod2ns1, withOwners(deployment1apps)),
  1618  						makeNode(deployment1extensions, virtual),
  1619  						makeNode(pod1ns1, withOwners(deployment1extensions))},
  1620  					absentOwnerCache: []objectReference{deployment1apps},
  1621  					pendingAttemptToDelete: []*node{
  1622  						makeNode(pod1ns1, withOwners(deployment1extensions)), // child referencing inaccessible apiVersion
  1623  						makeNode(deployment1extensions, virtual),             // virtual parent with inaccessible apiVersion - requeued to end
  1624  					},
  1625  				}),
  1626  
  1627  				// 18,19: observe delete of pod2 from step 14
  1628  				// final state: virtual parent for inaccessible apiVersion and child of that parent remain in graph, queued for delete attempts with backoff
  1629  				processEvent(makeDeleteEvent(pod2ns1)),
  1630  				assertState(state{
  1631  					graphNodes: []*node{
  1632  						makeNode(deployment1extensions, virtual),
  1633  						makeNode(pod1ns1, withOwners(deployment1extensions))},
  1634  					absentOwnerCache: []objectReference{deployment1apps},
  1635  					pendingAttemptToDelete: []*node{
  1636  						makeNode(pod1ns1, withOwners(deployment1extensions)), // child referencing inaccessible apiVersion
  1637  						makeNode(deployment1extensions, virtual),             // virtual parent with inaccessible apiVersion
  1638  					},
  1639  				}),
  1640  			},
  1641  		},
  1642  		// child pointing at incorrect apiVersion/kind of still-existing parent object (e.g. core/v1 Secret with uid=123, where an apps/v1 Deployment with uid=123 exists)
  1643  		// * should be deleted immediately
  1644  		// * should not trigger deletion of legitimate children of parent
  1645  		{
  1646  			name: "bad child -> existing owner with incorrect API version (bad child, good child, bad parent delete, good parent)",
  1647  			steps: []step{
  1648  				// setup
  1649  				createObjectInClient("apps", "v1", "deployments", "ns1", makeMetadataObj(deployment1apps)),
  1650  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(badChildPod, badSecretReferenceWithDeploymentUID)),
  1651  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(goodChildPod, deployment1apps)),
  1652  				// 3,4: observe bad child
  1653  				processEvent(makeAddEvent(badChildPod, badSecretReferenceWithDeploymentUID)),
  1654  				assertState(state{
  1655  					graphNodes: []*node{
  1656  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1657  						makeNode(badSecretReferenceWithDeploymentUID, virtual)},
  1658  					pendingAttemptToDelete: []*node{
  1659  						makeNode(badSecretReferenceWithDeploymentUID, virtual)}, // virtual parent enqueued for delete attempt
  1660  				}),
  1661  
  1662  				// 5,6: observe good child
  1663  				processEvent(makeAddEvent(goodChildPod, deployment1apps)),
  1664  				assertState(state{
  1665  					graphNodes: []*node{
  1666  						makeNode(goodChildPod, withOwners(deployment1apps)), // good child added
  1667  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1668  						makeNode(badSecretReferenceWithDeploymentUID, virtual)},
  1669  					pendingAttemptToDelete: []*node{
  1670  						makeNode(badSecretReferenceWithDeploymentUID, virtual), // virtual parent enqueued for delete attempt
  1671  						makeNode(goodChildPod, withOwners(deployment1apps)),    // good child enqueued for delete attempt
  1672  					},
  1673  				}),
  1674  
  1675  				// 7,8: process pending delete of virtual parent
  1676  				processAttemptToDelete(1),
  1677  				assertState(state{
  1678  					clientActions: []string{
  1679  						"get /v1, Resource=secrets ns=ns1 name=secretname", // lookup of bad parent reference
  1680  					},
  1681  					pendingGraphChanges: []*event{makeVirtualDeleteEvent(badSecretReferenceWithDeploymentUID)}, // bad virtual parent not found, queued virtual delete event
  1682  					graphNodes: []*node{
  1683  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1684  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1685  						makeNode(badSecretReferenceWithDeploymentUID, virtual)},
  1686  					pendingAttemptToDelete: []*node{
  1687  						makeNode(goodChildPod, withOwners(deployment1apps)), // good child enqueued for delete attempt
  1688  					},
  1689  				}),
  1690  
  1691  				// 9,10: process pending delete of good child, gets 200, remains
  1692  				processAttemptToDelete(1),
  1693  				assertState(state{
  1694  					clientActions: []string{
  1695  						"get /v1, Resource=pods ns=ns1 name=goodpod",                // lookup of child pre-delete
  1696  						"get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of good parent reference, returns 200
  1697  					},
  1698  					pendingGraphChanges: []*event{makeVirtualDeleteEvent(badSecretReferenceWithDeploymentUID)}, // bad virtual parent not found, queued virtual delete event
  1699  					graphNodes: []*node{
  1700  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1701  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1702  						makeNode(badSecretReferenceWithDeploymentUID, virtual)},
  1703  				}),
  1704  
  1705  				// 11,12: process virtual delete event of bad parent reference
  1706  				processPendingGraphChanges(1),
  1707  				assertState(state{
  1708  					graphNodes: []*node{
  1709  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1710  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1711  						makeNode(deployment1apps, virtual)}, // parent node switched to alternate identity, still virtual
  1712  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID}, // remember absence of bad parent coordinates
  1713  					pendingAttemptToDelete: []*node{
  1714  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), // child of bad parent coordinates enqueued for delete attempt
  1715  						makeNode(deployment1apps, virtual),                                     // new alternate virtual parent identity queued for delete attempt
  1716  					},
  1717  				}),
  1718  
  1719  				// 13,14: process pending delete of bad child
  1720  				processAttemptToDelete(1),
  1721  				assertState(state{
  1722  					clientActions: []string{
  1723  						"get /v1, Resource=pods ns=ns1 name=badpod",    // lookup of child pre-delete
  1724  						"delete /v1, Resource=pods ns=ns1 name=badpod", // delete of bad child (absence of bad parent is cached)
  1725  					},
  1726  					graphNodes: []*node{
  1727  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1728  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1729  						makeNode(deployment1apps, virtual)}, // parent node switched to alternate identity, still virtual
  1730  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID},
  1731  					pendingAttemptToDelete: []*node{
  1732  						makeNode(deployment1apps, virtual), // new alternate virtual parent identity queued for delete attempt
  1733  					},
  1734  				}),
  1735  
  1736  				// 15,16: process pending delete of new virtual parent
  1737  				processAttemptToDelete(1),
  1738  				assertState(state{
  1739  					clientActions: []string{
  1740  						"get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of virtual parent, returns 200
  1741  					},
  1742  					graphNodes: []*node{
  1743  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1744  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1745  						makeNode(deployment1apps, virtual)}, // parent node switched to alternate identity, still virtual
  1746  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID},
  1747  					pendingAttemptToDelete: []*node{
  1748  						makeNode(deployment1apps, virtual), // requeued, not yet observed
  1749  					},
  1750  				}),
  1751  
  1752  				// 17,18: observe good parent
  1753  				processEvent(makeAddEvent(deployment1apps)),
  1754  				assertState(state{
  1755  					graphNodes: []*node{
  1756  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1757  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1758  						makeNode(deployment1apps)}, // parent node made non-virtual
  1759  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID},
  1760  					pendingAttemptToDelete: []*node{
  1761  						makeNode(deployment1apps), // still queued, no longer virtual
  1762  					},
  1763  				}),
  1764  
  1765  				// 19,20: observe delete of bad child from step 13
  1766  				processEvent(makeDeleteEvent(badChildPod, badSecretReferenceWithDeploymentUID)),
  1767  				assertState(state{
  1768  					graphNodes: []*node{
  1769  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1770  						// bad child node removed
  1771  						makeNode(deployment1apps)},
  1772  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID},
  1773  					pendingAttemptToDelete: []*node{
  1774  						makeNode(deployment1apps), // still queued, no longer virtual
  1775  					},
  1776  				}),
  1777  
  1778  				// 21,22: process pending delete of good parent
  1779  				// final state: good parent in graph with correct coordinates, good children remain, no pending deletions
  1780  				processAttemptToDelete(1),
  1781  				assertState(state{
  1782  					clientActions: []string{
  1783  						"get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of good parent, returns 200
  1784  					},
  1785  					graphNodes: []*node{
  1786  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1787  						makeNode(deployment1apps)},
  1788  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID},
  1789  				}),
  1790  			},
  1791  		},
  1792  		{
  1793  			name: "bad child -> existing owner with incorrect API version (bad child, good child, good parent, bad parent delete)",
  1794  			steps: []step{
  1795  				// setup
  1796  				createObjectInClient("apps", "v1", "deployments", "ns1", makeMetadataObj(deployment1apps)),
  1797  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(badChildPod, badSecretReferenceWithDeploymentUID)),
  1798  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(goodChildPod, deployment1apps)),
  1799  				// 3,4: observe bad child
  1800  				processEvent(makeAddEvent(badChildPod, badSecretReferenceWithDeploymentUID)),
  1801  				assertState(state{
  1802  					graphNodes: []*node{
  1803  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1804  						makeNode(badSecretReferenceWithDeploymentUID, virtual)},
  1805  					pendingAttemptToDelete: []*node{
  1806  						makeNode(badSecretReferenceWithDeploymentUID, virtual)}, // virtual parent enqueued for delete attempt
  1807  				}),
  1808  
  1809  				// 5,6: observe good child
  1810  				processEvent(makeAddEvent(goodChildPod, deployment1apps)),
  1811  				assertState(state{
  1812  					graphNodes: []*node{
  1813  						makeNode(goodChildPod, withOwners(deployment1apps)), // good child added
  1814  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1815  						makeNode(badSecretReferenceWithDeploymentUID, virtual)},
  1816  					pendingAttemptToDelete: []*node{
  1817  						makeNode(badSecretReferenceWithDeploymentUID, virtual), // virtual parent enqueued for delete attempt
  1818  						makeNode(goodChildPod, withOwners(deployment1apps)),    // good child enqueued for delete attempt
  1819  					},
  1820  				}),
  1821  
  1822  				// 7,8: process pending delete of virtual parent
  1823  				processAttemptToDelete(1),
  1824  				assertState(state{
  1825  					clientActions: []string{
  1826  						"get /v1, Resource=secrets ns=ns1 name=secretname", // lookup of bad parent reference
  1827  					},
  1828  					pendingGraphChanges: []*event{makeVirtualDeleteEvent(badSecretReferenceWithDeploymentUID)}, // bad virtual parent not found, queued virtual delete event
  1829  					graphNodes: []*node{
  1830  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1831  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1832  						makeNode(badSecretReferenceWithDeploymentUID, virtual)},
  1833  					pendingAttemptToDelete: []*node{
  1834  						makeNode(goodChildPod, withOwners(deployment1apps)), // good child enqueued for delete attempt
  1835  					},
  1836  				}),
  1837  
  1838  				// 9,10: process pending delete of good child, gets 200, remains
  1839  				processAttemptToDelete(1),
  1840  				assertState(state{
  1841  					clientActions: []string{
  1842  						"get /v1, Resource=pods ns=ns1 name=goodpod",                // lookup of child pre-delete
  1843  						"get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of good parent reference, returns 200
  1844  					},
  1845  					pendingGraphChanges: []*event{makeVirtualDeleteEvent(badSecretReferenceWithDeploymentUID)}, // bad virtual parent not found, queued virtual delete event
  1846  					graphNodes: []*node{
  1847  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1848  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1849  						makeNode(badSecretReferenceWithDeploymentUID, virtual)},
  1850  				}),
  1851  
  1852  				// 11,12: good parent add event
  1853  				insertEvent(makeAddEvent(deployment1apps)),
  1854  				assertState(state{
  1855  					pendingGraphChanges: []*event{
  1856  						makeAddEvent(deployment1apps),                                // good parent observation sneaked in
  1857  						makeVirtualDeleteEvent(badSecretReferenceWithDeploymentUID)}, // bad virtual parent not found, queued virtual delete event
  1858  					graphNodes: []*node{
  1859  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1860  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1861  						makeNode(badSecretReferenceWithDeploymentUID, virtual)},
  1862  				}),
  1863  
  1864  				// 13,14: process good parent add
  1865  				processPendingGraphChanges(1),
  1866  				assertState(state{
  1867  					pendingGraphChanges: []*event{
  1868  						makeVirtualDeleteEvent(badSecretReferenceWithDeploymentUID)}, // bad virtual parent still queued virtual delete event
  1869  					graphNodes: []*node{
  1870  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1871  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1872  						makeNode(deployment1apps)}, // parent node gets fixed, no longer virtual
  1873  					pendingAttemptToDelete: []*node{
  1874  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID))}, // child of bad parent coordinates enqueued for delete attempt
  1875  				}),
  1876  
  1877  				// 15,16: process virtual delete event of bad parent reference
  1878  				processPendingGraphChanges(1),
  1879  				assertState(state{
  1880  					graphNodes: []*node{
  1881  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1882  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1883  						makeNode(deployment1apps)},
  1884  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID}, // remember absence of bad parent coordinates
  1885  					pendingAttemptToDelete: []*node{
  1886  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), // child of bad parent coordinates enqueued for delete attempt
  1887  					},
  1888  				}),
  1889  
  1890  				// 17,18: process pending delete of bad child
  1891  				processAttemptToDelete(1),
  1892  				assertState(state{
  1893  					clientActions: []string{
  1894  						"get /v1, Resource=pods ns=ns1 name=badpod",    // lookup of child pre-delete
  1895  						"delete /v1, Resource=pods ns=ns1 name=badpod", // delete of bad child (absence of bad parent is cached)
  1896  					},
  1897  					graphNodes: []*node{
  1898  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1899  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1900  						makeNode(deployment1apps)},
  1901  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID},
  1902  				}),
  1903  
  1904  				// 19,20: observe delete of bad child from step 17
  1905  				// final state: good parent in graph with correct coordinates, good children remain, no pending deletions
  1906  				processEvent(makeDeleteEvent(badChildPod, badSecretReferenceWithDeploymentUID)),
  1907  				assertState(state{
  1908  					graphNodes: []*node{
  1909  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1910  						// bad child node removed
  1911  						makeNode(deployment1apps)},
  1912  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID},
  1913  				}),
  1914  			},
  1915  		},
  1916  		{
  1917  			name: "bad child -> existing owner with incorrect API version (good child, bad child, good parent)",
  1918  			steps: []step{
  1919  				// setup
  1920  				createObjectInClient("apps", "v1", "deployments", "ns1", makeMetadataObj(deployment1apps)),
  1921  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(badChildPod, badSecretReferenceWithDeploymentUID)),
  1922  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(goodChildPod, deployment1apps)),
  1923  				// 3,4: observe good child
  1924  				processEvent(makeAddEvent(goodChildPod, deployment1apps)),
  1925  				assertState(state{
  1926  					graphNodes: []*node{
  1927  						makeNode(goodChildPod, withOwners(deployment1apps)), // good child added
  1928  						makeNode(deployment1apps, virtual)},                 // virtual parent added
  1929  					pendingAttemptToDelete: []*node{
  1930  						makeNode(deployment1apps, virtual), // virtual parent enqueued for delete attempt
  1931  					},
  1932  				}),
  1933  
  1934  				// 5,6: observe bad child
  1935  				processEvent(makeAddEvent(badChildPod, badSecretReferenceWithDeploymentUID)),
  1936  				assertState(state{
  1937  					graphNodes: []*node{
  1938  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1939  						makeNode(deployment1apps, virtual),
  1940  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID))}, // bad child added
  1941  					pendingAttemptToDelete: []*node{
  1942  						makeNode(deployment1apps, virtual),                                     // virtual parent enqueued for delete attempt
  1943  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), // bad child enqueued for delete attempt
  1944  					},
  1945  				}),
  1946  
  1947  				// 7,8: process pending delete of virtual parent
  1948  				processAttemptToDelete(1),
  1949  				assertState(state{
  1950  					clientActions: []string{
  1951  						"get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of good parent reference, returns 200
  1952  					},
  1953  					graphNodes: []*node{
  1954  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1955  						makeNode(deployment1apps, virtual),
  1956  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID))},
  1957  					pendingAttemptToDelete: []*node{
  1958  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), // bad child enqueued for delete attempt
  1959  						makeNode(deployment1apps, virtual),                                     // virtual parent requeued to end, still virtual
  1960  					},
  1961  				}),
  1962  
  1963  				// 9,10: process pending delete of bad child
  1964  				processAttemptToDelete(1),
  1965  				assertState(state{
  1966  					clientActions: []string{
  1967  						"get /v1, Resource=pods ns=ns1 name=badpod",        // lookup of child pre-delete
  1968  						"get /v1, Resource=secrets ns=ns1 name=secretname", // lookup of bad parent reference, returns 404
  1969  						"delete /v1, Resource=pods ns=ns1 name=badpod",     // delete of bad child
  1970  					},
  1971  					graphNodes: []*node{
  1972  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1973  						makeNode(deployment1apps, virtual),
  1974  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID))},
  1975  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID}, // remember absence of bad parent
  1976  					pendingAttemptToDelete: []*node{
  1977  						makeNode(deployment1apps, virtual), // virtual parent requeued to end, still virtual
  1978  					},
  1979  				}),
  1980  
  1981  				// 11,12: observe good parent
  1982  				processEvent(makeAddEvent(deployment1apps)),
  1983  				assertState(state{
  1984  					graphNodes: []*node{
  1985  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1986  						makeNode(deployment1apps), // good parent no longer virtual
  1987  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID))},
  1988  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID},
  1989  					pendingAttemptToDelete: []*node{
  1990  						makeNode(deployment1apps), // parent requeued to end, no longer virtual
  1991  					},
  1992  				}),
  1993  
  1994  				// 13,14: observe delete of bad child from step 9
  1995  				processEvent(makeDeleteEvent(badChildPod, badSecretReferenceWithDeploymentUID)),
  1996  				assertState(state{
  1997  					graphNodes: []*node{
  1998  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1999  						// bad child node removed
  2000  						makeNode(deployment1apps)},
  2001  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID},
  2002  					pendingAttemptToDelete: []*node{
  2003  						makeNode(deployment1apps), // parent requeued to end, no longer virtual
  2004  					},
  2005  				}),
  2006  
  2007  				// 15,16: process pending delete of good parent
  2008  				// final state: good parent in graph with correct coordinates, good children remain, no pending deletions
  2009  				processAttemptToDelete(1),
  2010  				assertState(state{
  2011  					clientActions: []string{
  2012  						"get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of good parent, returns 200
  2013  					},
  2014  					graphNodes: []*node{
  2015  						makeNode(goodChildPod, withOwners(deployment1apps)),
  2016  						makeNode(deployment1apps)},
  2017  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID},
  2018  				}),
  2019  			},
  2020  		},
  2021  		{
  2022  			// https://github.com/kubernetes/kubernetes/issues/98040
  2023  			name: "cluster-scoped bad child, namespaced good child, missing parent",
  2024  			steps: []step{
  2025  				// setup
  2026  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod2ns1, pod1ns1)),     // good child
  2027  				createObjectInClient("", "v1", "nodes", "", makeMetadataObj(node1, pod1nonamespace)), // bad child
  2028  
  2029  				// 2,3: observe bad child
  2030  				processEvent(makeAddEvent(node1, pod1nonamespace)),
  2031  				assertState(state{
  2032  					graphNodes: []*node{
  2033  						makeNode(node1, withOwners(pod1nonamespace)),
  2034  						makeNode(pod1nonamespace, virtual)},
  2035  					pendingAttemptToDelete: []*node{
  2036  						makeNode(pod1nonamespace, virtual)}, // virtual parent queued for deletion
  2037  				}),
  2038  
  2039  				// 4,5: observe good child
  2040  				processEvent(makeAddEvent(pod2ns1, pod1ns1)),
  2041  				assertState(state{
  2042  					graphNodes: []*node{
  2043  						makeNode(node1, withOwners(pod1nonamespace)),
  2044  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2045  						makeNode(pod1nonamespace, virtual)},
  2046  					pendingAttemptToDelete: []*node{
  2047  						makeNode(pod1nonamespace, virtual),     // virtual parent queued for deletion
  2048  						makeNode(pod2ns1, withOwners(pod1ns1)), // mismatched child queued for deletion
  2049  					},
  2050  				}),
  2051  
  2052  				// 6,7: process attemptToDelete of bad virtual parent coordinates
  2053  				processAttemptToDelete(1),
  2054  				assertState(state{
  2055  					graphNodes: []*node{
  2056  						makeNode(node1, withOwners(pod1nonamespace)),
  2057  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2058  						makeNode(pod1nonamespace, virtual)},
  2059  					pendingAttemptToDelete: []*node{
  2060  						makeNode(pod2ns1, withOwners(pod1ns1))}, // mismatched child queued for deletion
  2061  				}),
  2062  
  2063  				// 8,9: process attemptToDelete of good child
  2064  				processAttemptToDelete(1),
  2065  				assertState(state{
  2066  					clientActions: []string{
  2067  						"get /v1, Resource=pods ns=ns1 name=podname2",    // get good child, returns 200
  2068  						"get /v1, Resource=pods ns=ns1 name=podname1",    // get missing parent, returns 404
  2069  						"delete /v1, Resource=pods ns=ns1 name=podname2", // delete good child
  2070  					},
  2071  					graphNodes: []*node{
  2072  						makeNode(node1, withOwners(pod1nonamespace)),
  2073  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2074  						makeNode(pod1nonamespace, virtual)},
  2075  					absentOwnerCache: []objectReference{pod1ns1}, // missing parent cached
  2076  				}),
  2077  
  2078  				// 10,11: observe deletion of good child
  2079  				// steady-state is bad cluster child and bad virtual parent coordinates, with no retries
  2080  				processEvent(makeDeleteEvent(pod2ns1, pod1ns1)),
  2081  				assertState(state{
  2082  					graphNodes: []*node{
  2083  						makeNode(node1, withOwners(pod1nonamespace)),
  2084  						makeNode(pod1nonamespace, virtual)},
  2085  					absentOwnerCache: []objectReference{pod1ns1},
  2086  				}),
  2087  			},
  2088  		},
  2089  		{
  2090  			// https://github.com/kubernetes/kubernetes/issues/98040
  2091  			name: "cluster-scoped bad child, namespaced good child, late observed parent",
  2092  			steps: []step{
  2093  				// setup
  2094  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1)),              // good parent
  2095  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod2ns1, pod1ns1)),     // good child
  2096  				createObjectInClient("", "v1", "nodes", "", makeMetadataObj(node1, pod1nonamespace)), // bad child
  2097  
  2098  				// 3,4: observe bad child
  2099  				processEvent(makeAddEvent(node1, pod1nonamespace)),
  2100  				assertState(state{
  2101  					graphNodes: []*node{
  2102  						makeNode(node1, withOwners(pod1nonamespace)),
  2103  						makeNode(pod1nonamespace, virtual)},
  2104  					pendingAttemptToDelete: []*node{
  2105  						makeNode(pod1nonamespace, virtual)}, // virtual parent queued for deletion
  2106  				}),
  2107  
  2108  				// 5,6: observe good child
  2109  				processEvent(makeAddEvent(pod2ns1, pod1ns1)),
  2110  				assertState(state{
  2111  					graphNodes: []*node{
  2112  						makeNode(node1, withOwners(pod1nonamespace)),
  2113  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2114  						makeNode(pod1nonamespace, virtual)},
  2115  					pendingAttemptToDelete: []*node{
  2116  						makeNode(pod1nonamespace, virtual),      // virtual parent queued for deletion
  2117  						makeNode(pod2ns1, withOwners(pod1ns1))}, // mismatched child queued for deletion
  2118  				}),
  2119  
  2120  				// 7,8: process attemptToDelete of bad virtual parent coordinates
  2121  				processAttemptToDelete(1),
  2122  				assertState(state{
  2123  					graphNodes: []*node{
  2124  						makeNode(node1, withOwners(pod1nonamespace)),
  2125  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2126  						makeNode(pod1nonamespace, virtual)},
  2127  					pendingAttemptToDelete: []*node{
  2128  						makeNode(pod2ns1, withOwners(pod1ns1))}, // mismatched child queued for deletion
  2129  				}),
  2130  
  2131  				// 9,10: process attemptToDelete of good child
  2132  				processAttemptToDelete(1),
  2133  				assertState(state{
  2134  					clientActions: []string{
  2135  						"get /v1, Resource=pods ns=ns1 name=podname2", // get good child, returns 200
  2136  						"get /v1, Resource=pods ns=ns1 name=podname1", // get late-observed parent, returns 200
  2137  					},
  2138  					graphNodes: []*node{
  2139  						makeNode(node1, withOwners(pod1nonamespace)),
  2140  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2141  						makeNode(pod1nonamespace, virtual)},
  2142  				}),
  2143  
  2144  				// 11,12: late observe good parent
  2145  				processEvent(makeAddEvent(pod1ns1)),
  2146  				assertState(state{
  2147  					graphNodes: []*node{
  2148  						makeNode(node1, withOwners(pod1nonamespace)),
  2149  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2150  						makeNode(pod1ns1)},
  2151  					// warn about bad node reference
  2152  					events: []string{`Warning OwnerRefInvalidNamespace ownerRef [v1/Pod, namespace: , name: podname1, uid: poduid1] does not exist in namespace "" involvedObject{kind=Node,apiVersion=v1}`},
  2153  					pendingAttemptToDelete: []*node{
  2154  						makeNode(node1, withOwners(pod1nonamespace))}, // queue bad cluster-scoped child for delete attempt
  2155  				}),
  2156  
  2157  				// 13,14: process attemptToDelete of bad child
  2158  				// steady state is bad cluster-scoped child remaining with no retries, good parent and good child in graph
  2159  				processAttemptToDelete(1),
  2160  				assertState(state{
  2161  					clientActions: []string{
  2162  						"get /v1, Resource=nodes name=nodename", // get bad child, returns 200
  2163  					},
  2164  					graphNodes: []*node{
  2165  						makeNode(node1, withOwners(pod1nonamespace)),
  2166  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2167  						makeNode(pod1ns1)},
  2168  				}),
  2169  			},
  2170  		},
  2171  		{
  2172  			// https://github.com/kubernetes/kubernetes/issues/98040
  2173  			name: "namespaced good child, cluster-scoped bad child, missing parent",
  2174  			steps: []step{
  2175  				// setup
  2176  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod2ns1, pod1ns1)),     // good child
  2177  				createObjectInClient("", "v1", "nodes", "", makeMetadataObj(node1, pod1nonamespace)), // bad child
  2178  
  2179  				// 2,3: observe good child
  2180  				processEvent(makeAddEvent(pod2ns1, pod1ns1)),
  2181  				assertState(state{
  2182  					graphNodes: []*node{
  2183  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2184  						makeNode(pod1ns1, virtual)},
  2185  					pendingAttemptToDelete: []*node{
  2186  						makeNode(pod1ns1, virtual)}, // virtual parent queued for deletion
  2187  				}),
  2188  
  2189  				// 4,5: observe bad child
  2190  				processEvent(makeAddEvent(node1, pod1nonamespace)),
  2191  				assertState(state{
  2192  					graphNodes: []*node{
  2193  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2194  						makeNode(node1, withOwners(pod1nonamespace)),
  2195  						makeNode(pod1ns1, virtual)},
  2196  					pendingAttemptToDelete: []*node{
  2197  						makeNode(pod1ns1, virtual),                   // virtual parent queued for deletion
  2198  						makeNode(node1, withOwners(pod1nonamespace)), // mismatched child queued for deletion
  2199  					},
  2200  				}),
  2201  
  2202  				// 6,7: process attemptToDelete of good virtual parent coordinates
  2203  				processAttemptToDelete(1),
  2204  				assertState(state{
  2205  					clientActions: []string{
  2206  						"get /v1, Resource=pods ns=ns1 name=podname1", // lookup of missing parent, returns 404
  2207  					},
  2208  					graphNodes: []*node{
  2209  						makeNode(node1, withOwners(pod1nonamespace)),
  2210  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2211  						makeNode(pod1ns1, virtual)},
  2212  					pendingGraphChanges: []*event{makeVirtualDeleteEvent(pod1ns1)}, // virtual parent not found, queued virtual delete event
  2213  					pendingAttemptToDelete: []*node{
  2214  						makeNode(node1, withOwners(pod1nonamespace)), // mismatched child still queued for deletion
  2215  					},
  2216  				}),
  2217  
  2218  				// 8,9: process attemptToDelete of bad cluster child
  2219  				processAttemptToDelete(1),
  2220  				assertState(state{
  2221  					clientActions: []string{
  2222  						"get /v1, Resource=nodes name=nodename", // lookup of existing node
  2223  					},
  2224  					graphNodes: []*node{
  2225  						makeNode(node1, withOwners(pod1nonamespace)),
  2226  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2227  						makeNode(pod1ns1, virtual)},
  2228  					pendingGraphChanges: []*event{makeVirtualDeleteEvent(pod1ns1)}, // virtual parent virtual delete event still enqueued
  2229  				}),
  2230  
  2231  				// 10,11: process virtual delete event for good virtual parent coordinates
  2232  				processPendingGraphChanges(1),
  2233  				assertState(state{
  2234  					graphNodes: []*node{
  2235  						makeNode(node1, withOwners(pod1nonamespace)),
  2236  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2237  						makeNode(pod1nonamespace, virtual)}, // missing virtual parent replaced with alternate coordinates, still virtual
  2238  					absentOwnerCache: []objectReference{pod1ns1}, // cached absence of missing parent
  2239  					pendingAttemptToDelete: []*node{
  2240  						makeNode(pod2ns1, withOwners(pod1ns1)), // good child of missing parent enqueued for deletion
  2241  						makeNode(pod1nonamespace, virtual),     // new virtual parent coordinates enqueued for deletion
  2242  					},
  2243  				}),
  2244  
  2245  				// 12,13: process attemptToDelete of good child
  2246  				processAttemptToDelete(1),
  2247  				assertState(state{
  2248  					clientActions: []string{
  2249  						"get /v1, Resource=pods ns=ns1 name=podname2",    // lookup of good child
  2250  						"delete /v1, Resource=pods ns=ns1 name=podname2", // delete of good child
  2251  					},
  2252  					graphNodes: []*node{
  2253  						makeNode(node1, withOwners(pod1nonamespace)),
  2254  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2255  						makeNode(pod1nonamespace, virtual)},
  2256  					absentOwnerCache: []objectReference{pod1ns1},
  2257  					pendingAttemptToDelete: []*node{
  2258  						makeNode(pod1nonamespace, virtual), // new virtual parent coordinates enqueued for deletion
  2259  					},
  2260  				}),
  2261  
  2262  				// 14,15: observe deletion of good child
  2263  				processEvent(makeDeleteEvent(pod2ns1, pod1ns1)),
  2264  				assertState(state{
  2265  					graphNodes: []*node{
  2266  						makeNode(node1, withOwners(pod1nonamespace)),
  2267  						makeNode(pod1nonamespace, virtual)},
  2268  					absentOwnerCache: []objectReference{pod1ns1},
  2269  					pendingAttemptToDelete: []*node{
  2270  						makeNode(pod1nonamespace, virtual), // new virtual parent coordinates enqueued for deletion
  2271  					},
  2272  				}),
  2273  
  2274  				// 16,17: process attemptToDelete of bad virtual parent coordinates
  2275  				// steady-state is bad cluster child and bad virtual parent coordinates, with no retries
  2276  				processAttemptToDelete(1),
  2277  				assertState(state{
  2278  					graphNodes: []*node{
  2279  						makeNode(node1, withOwners(pod1nonamespace)),
  2280  						makeNode(pod1nonamespace, virtual)},
  2281  					absentOwnerCache: []objectReference{pod1ns1},
  2282  				}),
  2283  			},
  2284  		},
  2285  	}
  2286  
  2287  	alwaysStarted := make(chan struct{})
  2288  	close(alwaysStarted)
  2289  	for _, scenario := range testScenarios {
  2290  		t.Run(scenario.name, func(t *testing.T) {
  2291  
  2292  			absentOwnerCache := NewReferenceCache(100)
  2293  
  2294  			eventRecorder := record.NewFakeRecorder(100)
  2295  			eventRecorder.IncludeObject = true
  2296  
  2297  			metadataClient := fakemetadata.NewSimpleMetadataClient(fakemetadata.NewTestScheme())
  2298  
  2299  			tweakableRM := meta.NewDefaultRESTMapper(nil)
  2300  			tweakableRM.AddSpecific(
  2301  				schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "Role"},
  2302  				schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "roles"},
  2303  				schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "role"},
  2304  				meta.RESTScopeNamespace,
  2305  			)
  2306  			tweakableRM.AddSpecific(
  2307  				schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "Role"},
  2308  				schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "roles"},
  2309  				schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "role"},
  2310  				meta.RESTScopeNamespace,
  2311  			)
  2312  			tweakableRM.AddSpecific(
  2313  				schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"},
  2314  				schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"},
  2315  				schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployment"},
  2316  				meta.RESTScopeNamespace,
  2317  			)
  2318  			restMapper := &testRESTMapper{meta.MultiRESTMapper{tweakableRM, testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}}
  2319  
  2320  			// set up our workqueues
  2321  			attemptToDelete := newTrackingWorkqueue[*node]()
  2322  			attemptToOrphan := newTrackingWorkqueue[*node]()
  2323  			graphChanges := newTrackingWorkqueue[*event]()
  2324  
  2325  			gc := &GarbageCollector{
  2326  				metadataClient:   metadataClient,
  2327  				restMapper:       restMapper,
  2328  				attemptToDelete:  attemptToDelete,
  2329  				attemptToOrphan:  attemptToOrphan,
  2330  				absentOwnerCache: absentOwnerCache,
  2331  				dependencyGraphBuilder: &GraphBuilder{
  2332  					eventRecorder:    eventRecorder,
  2333  					metadataClient:   metadataClient,
  2334  					informersStarted: alwaysStarted,
  2335  					graphChanges:     graphChanges,
  2336  					uidToNode: &concurrentUIDToNode{
  2337  						uidToNodeLock: sync.RWMutex{},
  2338  						uidToNode:     make(map[types.UID]*node),
  2339  					},
  2340  					attemptToDelete:  attemptToDelete,
  2341  					absentOwnerCache: absentOwnerCache,
  2342  				},
  2343  			}
  2344  
  2345  			logger, _ := ktesting.NewTestContext(t)
  2346  
  2347  			ctx := stepContext{
  2348  				t:               t,
  2349  				logger:          logger,
  2350  				gc:              gc,
  2351  				eventRecorder:   eventRecorder,
  2352  				metadataClient:  metadataClient,
  2353  				attemptToDelete: attemptToDelete,
  2354  				attemptToOrphan: attemptToOrphan,
  2355  				graphChanges:    graphChanges,
  2356  			}
  2357  			for i, s := range scenario.steps {
  2358  				ctx.t.Logf("%d: %s", i, s.name)
  2359  				s.check(ctx)
  2360  				if ctx.t.Failed() {
  2361  					return
  2362  				}
  2363  				verifyGraphInvariants(fmt.Sprintf("after step %d", i), gc.dependencyGraphBuilder.uidToNode.uidToNode, t)
  2364  				if ctx.t.Failed() {
  2365  					return
  2366  				}
  2367  			}
  2368  		})
  2369  	}
  2370  }
  2371  
  2372  func makeID(groupVersion string, kind string, namespace, name, uid string) objectReference {
  2373  	return objectReference{
  2374  		OwnerReference: metav1.OwnerReference{APIVersion: groupVersion, Kind: kind, Name: name, UID: types.UID(uid)},
  2375  		Namespace:      namespace,
  2376  	}
  2377  }
  2378  
  2379  type nodeTweak func(*node) *node
  2380  
  2381  func virtual(n *node) *node {
  2382  	n.virtual = true
  2383  	return n
  2384  }
  2385  func withOwners(ownerReferences ...objectReference) nodeTweak {
  2386  	return func(n *node) *node {
  2387  		var owners []metav1.OwnerReference
  2388  		for _, o := range ownerReferences {
  2389  			owners = append(owners, o.OwnerReference)
  2390  		}
  2391  		n.owners = owners
  2392  		return n
  2393  	}
  2394  }
  2395  
  2396  func makeNode(identity objectReference, tweaks ...nodeTweak) *node {
  2397  	n := &node{identity: identity}
  2398  	for _, tweak := range tweaks {
  2399  		n = tweak(n)
  2400  	}
  2401  	return n
  2402  }
  2403  
  2404  func makeAddEvent(identity objectReference, owners ...objectReference) *event {
  2405  	gv, err := schema.ParseGroupVersion(identity.APIVersion)
  2406  	if err != nil {
  2407  		panic(err)
  2408  	}
  2409  	return &event{
  2410  		eventType: addEvent,
  2411  		gvk:       gv.WithKind(identity.Kind),
  2412  		obj:       makeObj(identity, owners...),
  2413  	}
  2414  }
  2415  
  2416  func makeVirtualDeleteEvent(identity objectReference, owners ...objectReference) *event {
  2417  	e := makeDeleteEvent(identity, owners...)
  2418  	e.virtual = true
  2419  	return e
  2420  }
  2421  
  2422  func makeDeleteEvent(identity objectReference, owners ...objectReference) *event {
  2423  	gv, err := schema.ParseGroupVersion(identity.APIVersion)
  2424  	if err != nil {
  2425  		panic(err)
  2426  	}
  2427  	return &event{
  2428  		eventType: deleteEvent,
  2429  		gvk:       gv.WithKind(identity.Kind),
  2430  		obj:       makeObj(identity, owners...),
  2431  	}
  2432  }
  2433  
  2434  func makeObj(identity objectReference, owners ...objectReference) *metaonly.MetadataOnlyObject {
  2435  	obj := &metaonly.MetadataOnlyObject{
  2436  		TypeMeta:   metav1.TypeMeta{APIVersion: identity.APIVersion, Kind: identity.Kind},
  2437  		ObjectMeta: metav1.ObjectMeta{Namespace: identity.Namespace, UID: identity.UID, Name: identity.Name},
  2438  	}
  2439  	for _, owner := range owners {
  2440  		obj.ObjectMeta.OwnerReferences = append(obj.ObjectMeta.OwnerReferences, owner.OwnerReference)
  2441  	}
  2442  	return obj
  2443  }
  2444  
  2445  func makeMetadataObj(identity objectReference, owners ...objectReference) *metav1.PartialObjectMetadata {
  2446  	obj := &metav1.PartialObjectMetadata{
  2447  		TypeMeta:   metav1.TypeMeta{APIVersion: identity.APIVersion, Kind: identity.Kind},
  2448  		ObjectMeta: metav1.ObjectMeta{Namespace: identity.Namespace, UID: identity.UID, Name: identity.Name},
  2449  	}
  2450  	for _, owner := range owners {
  2451  		obj.ObjectMeta.OwnerReferences = append(obj.ObjectMeta.OwnerReferences, owner.OwnerReference)
  2452  	}
  2453  	return obj
  2454  }
  2455  
  2456  type stepContext struct {
  2457  	t               *testing.T
  2458  	logger          klog.Logger
  2459  	gc              *GarbageCollector
  2460  	eventRecorder   *record.FakeRecorder
  2461  	metadataClient  *fakemetadata.FakeMetadataClient
  2462  	attemptToDelete *trackingWorkqueue[*node]
  2463  	attemptToOrphan *trackingWorkqueue[*node]
  2464  	graphChanges    *trackingWorkqueue[*event]
  2465  }
  2466  
  2467  type step struct {
  2468  	name  string
  2469  	check func(stepContext)
  2470  }
  2471  
  2472  func processPendingGraphChanges(count int) step {
  2473  	return step{
  2474  		name: "processPendingGraphChanges",
  2475  		check: func(ctx stepContext) {
  2476  			ctx.t.Helper()
  2477  			if count <= 0 {
  2478  				// process all
  2479  				for ctx.gc.dependencyGraphBuilder.graphChanges.Len() != 0 {
  2480  					ctx.gc.dependencyGraphBuilder.processGraphChanges(ctx.logger)
  2481  				}
  2482  			} else {
  2483  				for i := 0; i < count; i++ {
  2484  					if ctx.gc.dependencyGraphBuilder.graphChanges.Len() == 0 {
  2485  						ctx.t.Errorf("expected at least %d pending changes, got %d", count, i+1)
  2486  						return
  2487  					}
  2488  					ctx.gc.dependencyGraphBuilder.processGraphChanges(ctx.logger)
  2489  				}
  2490  			}
  2491  		},
  2492  	}
  2493  }
  2494  
  2495  func processAttemptToDelete(count int) step {
  2496  	return step{
  2497  		name: "processAttemptToDelete",
  2498  		check: func(ctx stepContext) {
  2499  			ctx.t.Helper()
  2500  			if count <= 0 {
  2501  				// process all
  2502  				for ctx.gc.dependencyGraphBuilder.attemptToDelete.Len() != 0 {
  2503  					ctx.gc.processAttemptToDeleteWorker(context.TODO())
  2504  				}
  2505  			} else {
  2506  				for i := 0; i < count; i++ {
  2507  					if ctx.gc.dependencyGraphBuilder.attemptToDelete.Len() == 0 {
  2508  						ctx.t.Errorf("expected at least %d pending changes, got %d", count, i+1)
  2509  						return
  2510  					}
  2511  					ctx.gc.processAttemptToDeleteWorker(context.TODO())
  2512  				}
  2513  			}
  2514  		},
  2515  	}
  2516  }
  2517  
  2518  func insertEvent(e *event) step {
  2519  	return step{
  2520  		name: "insertEvent",
  2521  		check: func(ctx stepContext) {
  2522  			ctx.t.Helper()
  2523  			// drain queue into items
  2524  			var items []*event
  2525  			for ctx.gc.dependencyGraphBuilder.graphChanges.Len() > 0 {
  2526  				item, _ := ctx.gc.dependencyGraphBuilder.graphChanges.Get()
  2527  				ctx.gc.dependencyGraphBuilder.graphChanges.Done(item)
  2528  				items = append(items, item)
  2529  			}
  2530  
  2531  			// add the new event
  2532  			ctx.gc.dependencyGraphBuilder.graphChanges.Add(e)
  2533  
  2534  			// reappend the items
  2535  			for _, item := range items {
  2536  				ctx.gc.dependencyGraphBuilder.graphChanges.Add(item)
  2537  			}
  2538  		},
  2539  	}
  2540  }
  2541  
  2542  func processEvent(e *event) step {
  2543  	return step{
  2544  		name: "processEvent",
  2545  		check: func(ctx stepContext) {
  2546  			ctx.t.Helper()
  2547  			if ctx.gc.dependencyGraphBuilder.graphChanges.Len() != 0 {
  2548  				ctx.t.Fatalf("events present in graphChanges, must process pending graphChanges before calling processEvent")
  2549  			}
  2550  			ctx.gc.dependencyGraphBuilder.graphChanges.Add(e)
  2551  			ctx.gc.dependencyGraphBuilder.processGraphChanges(ctx.logger)
  2552  		},
  2553  	}
  2554  }
  2555  
  2556  func createObjectInClient(group, version, resource, namespace string, obj *metav1.PartialObjectMetadata) step {
  2557  	return step{
  2558  		name: "createObjectInClient",
  2559  		check: func(ctx stepContext) {
  2560  			ctx.t.Helper()
  2561  			if len(ctx.metadataClient.Actions()) > 0 {
  2562  				ctx.t.Fatal("cannot call createObjectInClient with pending client actions, call assertClientActions to check and clear first")
  2563  			}
  2564  			gvr := schema.GroupVersionResource{Group: group, Version: version, Resource: resource}
  2565  			var c fakemetadata.MetadataClient
  2566  			if namespace == "" {
  2567  				c = ctx.metadataClient.Resource(gvr).(fakemetadata.MetadataClient)
  2568  			} else {
  2569  				c = ctx.metadataClient.Resource(gvr).Namespace(namespace).(fakemetadata.MetadataClient)
  2570  			}
  2571  			if _, err := c.CreateFake(obj, metav1.CreateOptions{}); err != nil {
  2572  				ctx.t.Fatal(err)
  2573  			}
  2574  			ctx.metadataClient.ClearActions()
  2575  		},
  2576  	}
  2577  }
  2578  
  2579  func deleteObjectFromClient(group, version, resource, namespace, name string) step {
  2580  	return step{
  2581  		name: "deleteObjectFromClient",
  2582  		check: func(ctx stepContext) {
  2583  			ctx.t.Helper()
  2584  			if len(ctx.metadataClient.Actions()) > 0 {
  2585  				ctx.t.Fatal("cannot call deleteObjectFromClient with pending client actions, call assertClientActions to check and clear first")
  2586  			}
  2587  			gvr := schema.GroupVersionResource{Group: group, Version: version, Resource: resource}
  2588  			var c fakemetadata.MetadataClient
  2589  			if namespace == "" {
  2590  				c = ctx.metadataClient.Resource(gvr).(fakemetadata.MetadataClient)
  2591  			} else {
  2592  				c = ctx.metadataClient.Resource(gvr).Namespace(namespace).(fakemetadata.MetadataClient)
  2593  			}
  2594  			if err := c.Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil {
  2595  				ctx.t.Fatal(err)
  2596  			}
  2597  			ctx.metadataClient.ClearActions()
  2598  		},
  2599  	}
  2600  }
  2601  
  2602  type state struct {
  2603  	events                 []string
  2604  	clientActions          []string
  2605  	graphNodes             []*node
  2606  	pendingGraphChanges    []*event
  2607  	pendingAttemptToDelete []*node
  2608  	pendingAttemptToOrphan []*node
  2609  	absentOwnerCache       []objectReference
  2610  }
  2611  
  2612  func assertState(s state) step {
  2613  	return step{
  2614  		name: "assertState",
  2615  		check: func(ctx stepContext) {
  2616  			ctx.t.Helper()
  2617  
  2618  			{
  2619  				for _, absent := range s.absentOwnerCache {
  2620  					if !ctx.gc.absentOwnerCache.Has(absent) {
  2621  						ctx.t.Errorf("expected absent owner %s was not in the absentOwnerCache", absent)
  2622  					}
  2623  				}
  2624  				if len(s.absentOwnerCache) != ctx.gc.absentOwnerCache.cache.Len() {
  2625  					// only way to inspect is to drain them all, but that's ok because we're failing the test anyway
  2626  					ctx.gc.absentOwnerCache.cache.OnEvicted = func(key lru.Key, item interface{}) {
  2627  						found := false
  2628  						for _, absent := range s.absentOwnerCache {
  2629  							if absent == key {
  2630  								found = true
  2631  								break
  2632  							}
  2633  						}
  2634  						if !found {
  2635  							ctx.t.Errorf("unexpected item in absent owner cache: %s", key)
  2636  						}
  2637  					}
  2638  					ctx.gc.absentOwnerCache.cache.Clear()
  2639  					ctx.t.Error("unexpected items in absent owner cache")
  2640  				}
  2641  			}
  2642  
  2643  			{
  2644  				var actualEvents []string
  2645  				// drain sent events
  2646  			loop:
  2647  				for {
  2648  					select {
  2649  					case event := <-ctx.eventRecorder.Events:
  2650  						actualEvents = append(actualEvents, event)
  2651  					default:
  2652  						break loop
  2653  					}
  2654  				}
  2655  				if !reflect.DeepEqual(actualEvents, s.events) {
  2656  					ctx.t.Logf("expected:\n%s", strings.Join(s.events, "\n"))
  2657  					ctx.t.Logf("actual:\n%s", strings.Join(actualEvents, "\n"))
  2658  					ctx.t.Fatalf("did not get expected events")
  2659  				}
  2660  			}
  2661  
  2662  			{
  2663  				var actualClientActions []string
  2664  				for _, action := range ctx.metadataClient.Actions() {
  2665  					s := fmt.Sprintf("%s %s", action.GetVerb(), action.GetResource())
  2666  					if action.GetNamespace() != "" {
  2667  						s += " ns=" + action.GetNamespace()
  2668  					}
  2669  					if get, ok := action.(clientgotesting.GetAction); ok && get.GetName() != "" {
  2670  						s += " name=" + get.GetName()
  2671  					}
  2672  					actualClientActions = append(actualClientActions, s)
  2673  				}
  2674  				if (len(s.clientActions) > 0 || len(actualClientActions) > 0) && !reflect.DeepEqual(s.clientActions, actualClientActions) {
  2675  					ctx.t.Logf("expected:\n%s", strings.Join(s.clientActions, "\n"))
  2676  					ctx.t.Logf("actual:\n%s", strings.Join(actualClientActions, "\n"))
  2677  					ctx.t.Fatalf("did not get expected client actions")
  2678  				}
  2679  				ctx.metadataClient.ClearActions()
  2680  			}
  2681  
  2682  			{
  2683  				if l := len(ctx.gc.dependencyGraphBuilder.uidToNode.uidToNode); l != len(s.graphNodes) {
  2684  					ctx.t.Errorf("expected %d nodes, got %d", len(s.graphNodes), l)
  2685  				}
  2686  				for _, n := range s.graphNodes {
  2687  					graphNode, ok := ctx.gc.dependencyGraphBuilder.uidToNode.Read(n.identity.UID)
  2688  					if !ok {
  2689  						ctx.t.Errorf("%s: no node in graph with uid=%s", n.identity.UID, n.identity.UID)
  2690  						continue
  2691  					}
  2692  					if graphNode.identity != n.identity {
  2693  						ctx.t.Errorf("%s: expected identity %v, got %v", n.identity.UID, n.identity, graphNode.identity)
  2694  					}
  2695  					if graphNode.virtual != n.virtual {
  2696  						ctx.t.Errorf("%s: expected virtual %v, got %v", n.identity.UID, n.virtual, graphNode.virtual)
  2697  					}
  2698  					if (len(graphNode.owners) > 0 || len(n.owners) > 0) && !reflect.DeepEqual(graphNode.owners, n.owners) {
  2699  						expectedJSON, _ := json.Marshal(n.owners)
  2700  						actualJSON, _ := json.Marshal(graphNode.owners)
  2701  						ctx.t.Errorf("%s: expected owners %s, got %s", n.identity.UID, expectedJSON, actualJSON)
  2702  					}
  2703  				}
  2704  			}
  2705  
  2706  			{
  2707  				for i := range s.pendingGraphChanges {
  2708  					e := s.pendingGraphChanges[i]
  2709  					if len(ctx.graphChanges.pendingList) < i+1 {
  2710  						ctx.t.Errorf("graphChanges: expected %d events, got %d", len(s.pendingGraphChanges), ctx.graphChanges.Len())
  2711  						break
  2712  					}
  2713  
  2714  					a := ctx.graphChanges.pendingList[i]
  2715  					if !reflect.DeepEqual(e, a) {
  2716  						objectDiff := ""
  2717  						if !reflect.DeepEqual(e.obj, a.obj) {
  2718  							objectDiff = "\nobjectDiff:\n" + cmp.Diff(e.obj, a.obj)
  2719  						}
  2720  						oldObjectDiff := ""
  2721  						if !reflect.DeepEqual(e.oldObj, a.oldObj) {
  2722  							oldObjectDiff = "\noldObjectDiff:\n" + cmp.Diff(e.oldObj, a.oldObj)
  2723  						}
  2724  						ctx.t.Errorf("graphChanges[%d]: expected\n%#v\ngot\n%#v%s%s", i, e, a, objectDiff, oldObjectDiff)
  2725  					}
  2726  				}
  2727  				if ctx.graphChanges.Len() > len(s.pendingGraphChanges) {
  2728  					for i, a := range ctx.graphChanges.pendingList[len(s.pendingGraphChanges):] {
  2729  						ctx.t.Errorf("graphChanges[%d]: unexpected event: %v", len(s.pendingGraphChanges)+i, a)
  2730  					}
  2731  				}
  2732  			}
  2733  
  2734  			{
  2735  				for i := range s.pendingAttemptToDelete {
  2736  					e := s.pendingAttemptToDelete[i].identity
  2737  					e_virtual := s.pendingAttemptToDelete[i].virtual
  2738  					if ctx.attemptToDelete.Len() < i+1 {
  2739  						ctx.t.Errorf("attemptToDelete: expected %d events, got %d", len(s.pendingAttemptToDelete), ctx.attemptToDelete.Len())
  2740  						break
  2741  					}
  2742  					a := ctx.attemptToDelete.pendingList[i].identity
  2743  					aVirtual := ctx.attemptToDelete.pendingList[i].virtual
  2744  					if !reflect.DeepEqual(e, a) {
  2745  						ctx.t.Errorf("attemptToDelete[%d]: expected %v, got %v", i, e, a)
  2746  					}
  2747  					if e_virtual != aVirtual {
  2748  						ctx.t.Errorf("attemptToDelete[%d]: expected virtual node %v, got non-virtual node %v", i, e, a)
  2749  					}
  2750  				}
  2751  				if ctx.attemptToDelete.Len() > len(s.pendingAttemptToDelete) {
  2752  					for i, a := range ctx.attemptToDelete.pendingList[len(s.pendingAttemptToDelete):] {
  2753  						ctx.t.Errorf("attemptToDelete[%d]: unexpected node: %v", len(s.pendingAttemptToDelete)+i, a.identity)
  2754  					}
  2755  				}
  2756  			}
  2757  
  2758  			{
  2759  				for i := range s.pendingAttemptToOrphan {
  2760  					e := s.pendingAttemptToOrphan[i].identity
  2761  					if ctx.attemptToOrphan.Len() < i+1 {
  2762  						ctx.t.Errorf("attemptToOrphan: expected %d events, got %d", len(s.pendingAttemptToOrphan), ctx.attemptToOrphan.Len())
  2763  						break
  2764  					}
  2765  					a := ctx.attemptToOrphan.pendingList[i].identity
  2766  					if !reflect.DeepEqual(e, a) {
  2767  						ctx.t.Errorf("attemptToOrphan[%d]: expected %v, got %v", i, e, a)
  2768  					}
  2769  				}
  2770  				if ctx.attemptToOrphan.Len() > len(s.pendingAttemptToOrphan) {
  2771  					for i, a := range ctx.attemptToOrphan.pendingList[len(s.pendingAttemptToOrphan):] {
  2772  						ctx.t.Errorf("attemptToOrphan[%d]: unexpected node: %v", len(s.pendingAttemptToOrphan)+i, a.identity)
  2773  					}
  2774  				}
  2775  			}
  2776  		},
  2777  	}
  2778  
  2779  }
  2780  
  2781  // trackingWorkqueue implements RateLimitingInterface,
  2782  // allows introspection of the items in the queue,
  2783  // and treats AddAfter and AddRateLimited the same as Add
  2784  // so they are always synchronous.
  2785  type trackingWorkqueue[T comparable] struct {
  2786  	limiter     workqueue.TypedRateLimitingInterface[T]
  2787  	pendingList []T
  2788  	pendingMap  map[T]struct{}
  2789  }
  2790  
  2791  var _ = workqueue.TypedRateLimitingInterface[string](&trackingWorkqueue[string]{})
  2792  
  2793  func newTrackingWorkqueue[T comparable]() *trackingWorkqueue[T] {
  2794  	return &trackingWorkqueue[T]{
  2795  		limiter:    workqueue.NewTypedRateLimitingQueue[T](&workqueue.TypedBucketRateLimiter[T]{Limiter: rate.NewLimiter(rate.Inf, 100)}),
  2796  		pendingMap: map[T]struct{}{},
  2797  	}
  2798  }
  2799  
  2800  func (t *trackingWorkqueue[T]) Add(item T) {
  2801  	t.queue(item)
  2802  	t.limiter.Add(item)
  2803  }
  2804  func (t *trackingWorkqueue[T]) AddAfter(item T, duration time.Duration) {
  2805  	t.Add(item)
  2806  }
  2807  func (t *trackingWorkqueue[T]) AddRateLimited(item T) {
  2808  	t.Add(item)
  2809  }
  2810  func (t *trackingWorkqueue[T]) Get() (T, bool) {
  2811  	item, shutdown := t.limiter.Get()
  2812  	t.dequeue(item)
  2813  	return item, shutdown
  2814  }
  2815  func (t *trackingWorkqueue[T]) Done(item T) {
  2816  	t.limiter.Done(item)
  2817  }
  2818  func (t *trackingWorkqueue[T]) Forget(item T) {
  2819  	t.limiter.Forget(item)
  2820  }
  2821  func (t *trackingWorkqueue[T]) NumRequeues(item T) int {
  2822  	return 0
  2823  }
  2824  func (t *trackingWorkqueue[T]) Len() int {
  2825  	if e, a := len(t.pendingList), len(t.pendingMap); e != a {
  2826  		panic(fmt.Errorf("pendingList != pendingMap: %d / %d", e, a))
  2827  	}
  2828  	if e, a := len(t.pendingList), t.limiter.Len(); e != a {
  2829  		panic(fmt.Errorf("pendingList != limiter.Len(): %d / %d", e, a))
  2830  	}
  2831  	return len(t.pendingList)
  2832  }
  2833  func (t *trackingWorkqueue[T]) ShutDown() {
  2834  	t.limiter.ShutDown()
  2835  }
  2836  func (t *trackingWorkqueue[T]) ShutDownWithDrain() {
  2837  	t.limiter.ShutDownWithDrain()
  2838  }
  2839  func (t *trackingWorkqueue[T]) ShuttingDown() bool {
  2840  	return t.limiter.ShuttingDown()
  2841  }
  2842  
  2843  func (t *trackingWorkqueue[T]) queue(item T) {
  2844  	if _, queued := t.pendingMap[item]; queued {
  2845  		// fmt.Printf("already queued: %#v\n", item)
  2846  		return
  2847  	}
  2848  	t.pendingMap[item] = struct{}{}
  2849  	t.pendingList = append(t.pendingList, item)
  2850  }
  2851  func (t *trackingWorkqueue[T]) dequeue(item T) {
  2852  	if _, queued := t.pendingMap[item]; !queued {
  2853  		// fmt.Printf("not queued: %#v\n", item)
  2854  		return
  2855  	}
  2856  	delete(t.pendingMap, item)
  2857  	newPendingList := []T{}
  2858  	for _, p := range t.pendingList {
  2859  		if p == item {
  2860  			continue
  2861  		}
  2862  		newPendingList = append(newPendingList, p)
  2863  	}
  2864  	t.pendingList = newPendingList
  2865  }