k8s.io/kubernetes@v1.29.3/pkg/controller/garbagecollector/garbagecollector_test.go (about)

     1  /*
     2  Copyright 2016 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package garbagecollector
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"net/http"
    23  	"net/http/httptest"
    24  	"reflect"
    25  	"strings"
    26  	"sync"
    27  	"testing"
    28  	"time"
    29  
    30  	"golang.org/x/time/rate"
    31  
    32  	"k8s.io/klog/v2"
    33  	"k8s.io/klog/v2/ktesting"
    34  
    35  	"github.com/golang/groupcache/lru"
    36  	"github.com/google/go-cmp/cmp"
    37  	"github.com/stretchr/testify/assert"
    38  
    39  	_ "k8s.io/kubernetes/pkg/apis/core/install"
    40  	"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
    41  	"k8s.io/utils/pointer"
    42  
    43  	v1 "k8s.io/api/core/v1"
    44  	"k8s.io/apimachinery/pkg/api/meta"
    45  	"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
    46  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    47  	"k8s.io/apimachinery/pkg/runtime"
    48  	"k8s.io/apimachinery/pkg/runtime/schema"
    49  	"k8s.io/apimachinery/pkg/types"
    50  	"k8s.io/apimachinery/pkg/util/json"
    51  	"k8s.io/apimachinery/pkg/util/sets"
    52  	"k8s.io/apimachinery/pkg/util/strategicpatch"
    53  	"k8s.io/client-go/discovery"
    54  	"k8s.io/client-go/informers"
    55  	"k8s.io/client-go/kubernetes"
    56  	"k8s.io/client-go/kubernetes/fake"
    57  	"k8s.io/client-go/metadata"
    58  	fakemetadata "k8s.io/client-go/metadata/fake"
    59  	"k8s.io/client-go/metadata/metadatainformer"
    60  	restclient "k8s.io/client-go/rest"
    61  	clientgotesting "k8s.io/client-go/testing"
    62  	"k8s.io/client-go/tools/record"
    63  	"k8s.io/client-go/util/workqueue"
    64  	"k8s.io/controller-manager/pkg/informerfactory"
    65  	"k8s.io/kubernetes/pkg/api/legacyscheme"
    66  	c "k8s.io/kubernetes/pkg/controller"
    67  )
    68  
    69  type testRESTMapper struct {
    70  	meta.RESTMapper
    71  }
    72  
    73  func (m *testRESTMapper) Reset() {
    74  	meta.MaybeResetRESTMapper(m.RESTMapper)
    75  }
    76  
    77  func TestGarbageCollectorConstruction(t *testing.T) {
    78  	config := &restclient.Config{}
    79  	tweakableRM := meta.NewDefaultRESTMapper(nil)
    80  	rm := &testRESTMapper{meta.MultiRESTMapper{tweakableRM, testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}}
    81  	metadataClient, err := metadata.NewForConfig(config)
    82  	if err != nil {
    83  		t.Fatal(err)
    84  	}
    85  
    86  	podResource := map[schema.GroupVersionResource]struct{}{
    87  		{Version: "v1", Resource: "pods"}: {},
    88  	}
    89  	twoResources := map[schema.GroupVersionResource]struct{}{
    90  		{Version: "v1", Resource: "pods"}:                     {},
    91  		{Group: "tpr.io", Version: "v1", Resource: "unknown"}: {},
    92  	}
    93  	client := fake.NewSimpleClientset()
    94  
    95  	sharedInformers := informers.NewSharedInformerFactory(client, 0)
    96  	metadataInformers := metadatainformer.NewSharedInformerFactory(metadataClient, 0)
    97  	// No monitor will be constructed for the non-core resource, but the GC
    98  	// construction will not fail.
    99  	alwaysStarted := make(chan struct{})
   100  	close(alwaysStarted)
   101  	gc, err := NewGarbageCollector(client, metadataClient, rm, map[schema.GroupResource]struct{}{},
   102  		informerfactory.NewInformerFactory(sharedInformers, metadataInformers), alwaysStarted)
   103  	if err != nil {
   104  		t.Fatal(err)
   105  	}
   106  	assert.Equal(t, 0, len(gc.dependencyGraphBuilder.monitors))
   107  
   108  	logger, _ := ktesting.NewTestContext(t)
   109  
   110  	// Make sure resource monitor syncing creates and stops resource monitors.
   111  	tweakableRM.Add(schema.GroupVersionKind{Group: "tpr.io", Version: "v1", Kind: "unknown"}, nil)
   112  	err = gc.resyncMonitors(logger, twoResources)
   113  	if err != nil {
   114  		t.Errorf("Failed adding a monitor: %v", err)
   115  	}
   116  	assert.Equal(t, 2, len(gc.dependencyGraphBuilder.monitors))
   117  
   118  	err = gc.resyncMonitors(logger, podResource)
   119  	if err != nil {
   120  		t.Errorf("Failed removing a monitor: %v", err)
   121  	}
   122  	assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors))
   123  
   124  	// Make sure the syncing mechanism also works after Run() has been called
   125  	ctx, cancel := context.WithCancel(context.Background())
   126  	defer cancel()
   127  	go gc.Run(ctx, 1)
   128  
   129  	err = gc.resyncMonitors(logger, twoResources)
   130  	if err != nil {
   131  		t.Errorf("Failed adding a monitor: %v", err)
   132  	}
   133  	assert.Equal(t, 2, len(gc.dependencyGraphBuilder.monitors))
   134  
   135  	err = gc.resyncMonitors(logger, podResource)
   136  	if err != nil {
   137  		t.Errorf("Failed removing a monitor: %v", err)
   138  	}
   139  	assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors))
   140  }
   141  
   142  // fakeAction records information about requests to aid in testing.
   143  type fakeAction struct {
   144  	method string
   145  	path   string
   146  	query  string
   147  }
   148  
   149  // String returns method=path to aid in testing
   150  func (f *fakeAction) String() string {
   151  	return strings.Join([]string{f.method, f.path}, "=")
   152  }
   153  
   154  type FakeResponse struct {
   155  	statusCode int
   156  	content    []byte
   157  }
   158  
   159  // fakeActionHandler holds a list of fakeActions received
   160  type fakeActionHandler struct {
   161  	// statusCode and content returned by this handler for different method + path.
   162  	response map[string]FakeResponse
   163  
   164  	lock    sync.Mutex
   165  	actions []fakeAction
   166  }
   167  
   168  // ServeHTTP logs the action that occurred and always returns the associated status code
   169  func (f *fakeActionHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) {
   170  	func() {
   171  		f.lock.Lock()
   172  		defer f.lock.Unlock()
   173  
   174  		f.actions = append(f.actions, fakeAction{method: request.Method, path: request.URL.Path, query: request.URL.RawQuery})
   175  		fakeResponse, ok := f.response[request.Method+request.URL.Path]
   176  		if !ok {
   177  			fakeResponse.statusCode = 200
   178  			fakeResponse.content = []byte(`{"apiVersion": "v1", "kind": "List"}`)
   179  		}
   180  		response.Header().Set("Content-Type", "application/json")
   181  		response.WriteHeader(fakeResponse.statusCode)
   182  		response.Write(fakeResponse.content)
   183  	}()
   184  
   185  	// This is to allow the fakeActionHandler to simulate a watch being opened
   186  	if strings.Contains(request.URL.RawQuery, "watch=true") {
   187  		hijacker, ok := response.(http.Hijacker)
   188  		if !ok {
   189  			return
   190  		}
   191  		connection, _, err := hijacker.Hijack()
   192  		if err != nil {
   193  			return
   194  		}
   195  		defer connection.Close()
   196  		time.Sleep(30 * time.Second)
   197  	}
   198  }
   199  
   200  // testServerAndClientConfig returns a server that listens and a config that can reference it
   201  func testServerAndClientConfig(handler func(http.ResponseWriter, *http.Request)) (*httptest.Server, *restclient.Config) {
   202  	srv := httptest.NewServer(http.HandlerFunc(handler))
   203  	config := &restclient.Config{
   204  		Host: srv.URL,
   205  	}
   206  	return srv, config
   207  }
   208  
   209  type garbageCollector struct {
   210  	*GarbageCollector
   211  	stop chan struct{}
   212  }
   213  
   214  func setupGC(t *testing.T, config *restclient.Config) garbageCollector {
   215  	metadataClient, err := metadata.NewForConfig(config)
   216  	if err != nil {
   217  		t.Fatal(err)
   218  	}
   219  
   220  	client := fake.NewSimpleClientset()
   221  	sharedInformers := informers.NewSharedInformerFactory(client, 0)
   222  	alwaysStarted := make(chan struct{})
   223  	close(alwaysStarted)
   224  	gc, err := NewGarbageCollector(client, metadataClient, &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}, ignoredResources, sharedInformers, alwaysStarted)
   225  	if err != nil {
   226  		t.Fatal(err)
   227  	}
   228  	stop := make(chan struct{})
   229  	go sharedInformers.Start(stop)
   230  	return garbageCollector{gc, stop}
   231  }
   232  
   233  func getPod(podName string, ownerReferences []metav1.OwnerReference) *v1.Pod {
   234  	return &v1.Pod{
   235  		TypeMeta: metav1.TypeMeta{
   236  			Kind:       "Pod",
   237  			APIVersion: "v1",
   238  		},
   239  		ObjectMeta: metav1.ObjectMeta{
   240  			Name:            podName,
   241  			Namespace:       "ns1",
   242  			UID:             "456",
   243  			OwnerReferences: ownerReferences,
   244  		},
   245  	}
   246  }
   247  
   248  func serilizeOrDie(t *testing.T, object interface{}) []byte {
   249  	data, err := json.Marshal(object)
   250  	if err != nil {
   251  		t.Fatal(err)
   252  	}
   253  	return data
   254  }
   255  
   256  // test the attemptToDeleteItem function making the expected actions.
   257  func TestAttemptToDeleteItem(t *testing.T) {
   258  	pod := getPod("ToBeDeletedPod", []metav1.OwnerReference{
   259  		{
   260  			Kind:       "ReplicationController",
   261  			Name:       "owner1",
   262  			UID:        "123",
   263  			APIVersion: "v1",
   264  		},
   265  	})
   266  	testHandler := &fakeActionHandler{
   267  		response: map[string]FakeResponse{
   268  			"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/owner1": {
   269  				404,
   270  				[]byte{},
   271  			},
   272  			"GET" + "/api/v1/namespaces/ns1/pods/ToBeDeletedPod": {
   273  				200,
   274  				serilizeOrDie(t, pod),
   275  			},
   276  		},
   277  	}
   278  	srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
   279  	defer srv.Close()
   280  
   281  	gc := setupGC(t, clientConfig)
   282  	defer close(gc.stop)
   283  
   284  	item := &node{
   285  		identity: objectReference{
   286  			OwnerReference: metav1.OwnerReference{
   287  				Kind:       pod.Kind,
   288  				APIVersion: pod.APIVersion,
   289  				Name:       pod.Name,
   290  				UID:        pod.UID,
   291  			},
   292  			Namespace: pod.Namespace,
   293  		},
   294  		// owners are intentionally left empty. The attemptToDeleteItem routine should get the latest item from the server.
   295  		owners:  nil,
   296  		virtual: true,
   297  	}
   298  	err := gc.attemptToDeleteItem(context.TODO(), item)
   299  	if err != nil {
   300  		t.Errorf("Unexpected Error: %v", err)
   301  	}
   302  	if !item.virtual {
   303  		t.Errorf("attemptToDeleteItem changed virtual to false unexpectedly")
   304  	}
   305  	expectedActionSet := sets.NewString()
   306  	expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/replicationcontrollers/owner1")
   307  	expectedActionSet.Insert("DELETE=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")
   308  	expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")
   309  
   310  	actualActionSet := sets.NewString()
   311  	for _, action := range testHandler.actions {
   312  		actualActionSet.Insert(action.String())
   313  	}
   314  	if !expectedActionSet.Equal(actualActionSet) {
   315  		t.Errorf("expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet,
   316  			actualActionSet, expectedActionSet.Difference(actualActionSet))
   317  	}
   318  }
   319  
   320  // verifyGraphInvariants verifies that all of a node's owners list the node as a
   321  // dependent and vice versa. uidToNode has all the nodes in the graph.
   322  func verifyGraphInvariants(scenario string, uidToNode map[types.UID]*node, t *testing.T) {
   323  	for myUID, node := range uidToNode {
   324  		for dependentNode := range node.dependents {
   325  			found := false
   326  			for _, owner := range dependentNode.owners {
   327  				if owner.UID == myUID {
   328  					found = true
   329  					break
   330  				}
   331  			}
   332  			if !found {
   333  				t.Errorf("scenario: %s: node %s has node %s as a dependent, but it's not present in the latter node's owners list", scenario, node.identity, dependentNode.identity)
   334  			}
   335  		}
   336  
   337  		for _, owner := range node.owners {
   338  			ownerNode, ok := uidToNode[owner.UID]
   339  			if !ok {
   340  				// It's possible that the owner node doesn't exist
   341  				continue
   342  			}
   343  			if _, ok := ownerNode.dependents[node]; !ok {
   344  				t.Errorf("node %s has node %s as an owner, but it's not present in the latter node's dependents list", node.identity, ownerNode.identity)
   345  			}
   346  		}
   347  	}
   348  }
   349  
   350  func createEvent(eventType eventType, selfUID string, owners []string) event {
   351  	var ownerReferences []metav1.OwnerReference
   352  	for i := 0; i < len(owners); i++ {
   353  		ownerReferences = append(ownerReferences, metav1.OwnerReference{UID: types.UID(owners[i])})
   354  	}
   355  	return event{
   356  		eventType: eventType,
   357  		obj: &v1.Pod{
   358  			ObjectMeta: metav1.ObjectMeta{
   359  				UID:             types.UID(selfUID),
   360  				OwnerReferences: ownerReferences,
   361  			},
   362  		},
   363  	}
   364  }
   365  
   366  func TestProcessEvent(t *testing.T) {
   367  	var testScenarios = []struct {
   368  		name string
   369  		// a series of events that will be supplied to the
   370  		// GraphBuilder.graphChanges.
   371  		events []event
   372  	}{
   373  		{
   374  			name: "test1",
   375  			events: []event{
   376  				createEvent(addEvent, "1", []string{}),
   377  				createEvent(addEvent, "2", []string{"1"}),
   378  				createEvent(addEvent, "3", []string{"1", "2"}),
   379  			},
   380  		},
   381  		{
   382  			name: "test2",
   383  			events: []event{
   384  				createEvent(addEvent, "1", []string{}),
   385  				createEvent(addEvent, "2", []string{"1"}),
   386  				createEvent(addEvent, "3", []string{"1", "2"}),
   387  				createEvent(addEvent, "4", []string{"2"}),
   388  				createEvent(deleteEvent, "2", []string{"doesn't matter"}),
   389  			},
   390  		},
   391  		{
   392  			name: "test3",
   393  			events: []event{
   394  				createEvent(addEvent, "1", []string{}),
   395  				createEvent(addEvent, "2", []string{"1"}),
   396  				createEvent(addEvent, "3", []string{"1", "2"}),
   397  				createEvent(addEvent, "4", []string{"3"}),
   398  				createEvent(updateEvent, "2", []string{"4"}),
   399  			},
   400  		},
   401  		{
   402  			name: "reverse test2",
   403  			events: []event{
   404  				createEvent(addEvent, "4", []string{"2"}),
   405  				createEvent(addEvent, "3", []string{"1", "2"}),
   406  				createEvent(addEvent, "2", []string{"1"}),
   407  				createEvent(addEvent, "1", []string{}),
   408  				createEvent(deleteEvent, "2", []string{"doesn't matter"}),
   409  			},
   410  		},
   411  	}
   412  
   413  	alwaysStarted := make(chan struct{})
   414  	close(alwaysStarted)
   415  	for _, scenario := range testScenarios {
   416  		logger, _ := ktesting.NewTestContext(t)
   417  
   418  		dependencyGraphBuilder := &GraphBuilder{
   419  			informersStarted: alwaysStarted,
   420  			graphChanges:     workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
   421  			uidToNode: &concurrentUIDToNode{
   422  				uidToNodeLock: sync.RWMutex{},
   423  				uidToNode:     make(map[types.UID]*node),
   424  			},
   425  			attemptToDelete:  workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
   426  			absentOwnerCache: NewReferenceCache(2),
   427  		}
   428  		for i := 0; i < len(scenario.events); i++ {
   429  			dependencyGraphBuilder.graphChanges.Add(&scenario.events[i])
   430  			dependencyGraphBuilder.processGraphChanges(logger)
   431  			verifyGraphInvariants(scenario.name, dependencyGraphBuilder.uidToNode.uidToNode, t)
   432  		}
   433  	}
   434  }
   435  
   436  func BenchmarkReferencesDiffs(t *testing.B) {
   437  	t.ReportAllocs()
   438  	t.ResetTimer()
   439  	for n := 0; n < t.N; n++ {
   440  		old := []metav1.OwnerReference{{UID: "1"}, {UID: "2"}}
   441  		new := []metav1.OwnerReference{{UID: "2"}, {UID: "3"}}
   442  		referencesDiffs(old, new)
   443  	}
   444  }
   445  
   446  // TestDependentsRace relies on golang's data race detector to check if there is
   447  // data race among in the dependents field.
   448  func TestDependentsRace(t *testing.T) {
   449  	logger, _ := ktesting.NewTestContext(t)
   450  
   451  	gc := setupGC(t, &restclient.Config{})
   452  	defer close(gc.stop)
   453  
   454  	const updates = 100
   455  	owner := &node{dependents: make(map[*node]struct{})}
   456  	ownerUID := types.UID("owner")
   457  	gc.dependencyGraphBuilder.uidToNode.Write(owner)
   458  	var wg sync.WaitGroup
   459  	wg.Add(2)
   460  	go func() {
   461  		defer wg.Done()
   462  		for i := 0; i < updates; i++ {
   463  			dependent := &node{}
   464  			gc.dependencyGraphBuilder.addDependentToOwners(logger, dependent, []metav1.OwnerReference{{UID: ownerUID}})
   465  			gc.dependencyGraphBuilder.removeDependentFromOwners(dependent, []metav1.OwnerReference{{UID: ownerUID}})
   466  		}
   467  	}()
   468  	go func() {
   469  		defer wg.Done()
   470  		for i := 0; i < updates; i++ {
   471  			gc.attemptToOrphan.Add(owner)
   472  			gc.processAttemptToOrphanWorker(logger)
   473  		}
   474  	}()
   475  	wg.Wait()
   476  }
   477  
   478  func podToGCNode(pod *v1.Pod) *node {
   479  	return &node{
   480  		identity: objectReference{
   481  			OwnerReference: metav1.OwnerReference{
   482  				Kind:       pod.Kind,
   483  				APIVersion: pod.APIVersion,
   484  				Name:       pod.Name,
   485  				UID:        pod.UID,
   486  			},
   487  			Namespace: pod.Namespace,
   488  		},
   489  		// owners are intentionally left empty. The attemptToDeleteItem routine should get the latest item from the server.
   490  		owners: nil,
   491  	}
   492  }
   493  
   494  func TestAbsentOwnerCache(t *testing.T) {
   495  	rc1Pod1 := getPod("rc1Pod1", []metav1.OwnerReference{
   496  		{
   497  			Kind:       "ReplicationController",
   498  			Name:       "rc1",
   499  			UID:        "1",
   500  			APIVersion: "v1",
   501  			Controller: pointer.Bool(true),
   502  		},
   503  	})
   504  	rc1Pod2 := getPod("rc1Pod2", []metav1.OwnerReference{
   505  		{
   506  			Kind:       "ReplicationController",
   507  			Name:       "rc1",
   508  			UID:        "1",
   509  			APIVersion: "v1",
   510  			Controller: pointer.Bool(false),
   511  		},
   512  	})
   513  	rc2Pod1 := getPod("rc2Pod1", []metav1.OwnerReference{
   514  		{
   515  			Kind:       "ReplicationController",
   516  			Name:       "rc2",
   517  			UID:        "2",
   518  			APIVersion: "v1",
   519  		},
   520  	})
   521  	rc3Pod1 := getPod("rc3Pod1", []metav1.OwnerReference{
   522  		{
   523  			Kind:       "ReplicationController",
   524  			Name:       "rc3",
   525  			UID:        "3",
   526  			APIVersion: "v1",
   527  		},
   528  	})
   529  	testHandler := &fakeActionHandler{
   530  		response: map[string]FakeResponse{
   531  			"GET" + "/api/v1/namespaces/ns1/pods/rc1Pod1": {
   532  				200,
   533  				serilizeOrDie(t, rc1Pod1),
   534  			},
   535  			"GET" + "/api/v1/namespaces/ns1/pods/rc1Pod2": {
   536  				200,
   537  				serilizeOrDie(t, rc1Pod2),
   538  			},
   539  			"GET" + "/api/v1/namespaces/ns1/pods/rc2Pod1": {
   540  				200,
   541  				serilizeOrDie(t, rc2Pod1),
   542  			},
   543  			"GET" + "/api/v1/namespaces/ns1/pods/rc3Pod1": {
   544  				200,
   545  				serilizeOrDie(t, rc3Pod1),
   546  			},
   547  			"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/rc1": {
   548  				404,
   549  				[]byte{},
   550  			},
   551  			"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/rc2": {
   552  				404,
   553  				[]byte{},
   554  			},
   555  			"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/rc3": {
   556  				404,
   557  				[]byte{},
   558  			},
   559  		},
   560  	}
   561  	srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
   562  	defer srv.Close()
   563  	gc := setupGC(t, clientConfig)
   564  	defer close(gc.stop)
   565  	gc.absentOwnerCache = NewReferenceCache(2)
   566  	gc.attemptToDeleteItem(context.TODO(), podToGCNode(rc1Pod1))
   567  	gc.attemptToDeleteItem(context.TODO(), podToGCNode(rc2Pod1))
   568  	// rc1 should already be in the cache, no request should be sent. rc1 should be promoted in the UIDCache
   569  	gc.attemptToDeleteItem(context.TODO(), podToGCNode(rc1Pod2))
   570  	// after this call, rc2 should be evicted from the UIDCache
   571  	gc.attemptToDeleteItem(context.TODO(), podToGCNode(rc3Pod1))
   572  	// check cache
   573  	if !gc.absentOwnerCache.Has(objectReference{Namespace: "ns1", OwnerReference: metav1.OwnerReference{Kind: "ReplicationController", Name: "rc1", UID: "1", APIVersion: "v1"}}) {
   574  		t.Errorf("expected rc1 to be in the cache")
   575  	}
   576  	if gc.absentOwnerCache.Has(objectReference{Namespace: "ns1", OwnerReference: metav1.OwnerReference{Kind: "ReplicationController", Name: "rc2", UID: "2", APIVersion: "v1"}}) {
   577  		t.Errorf("expected rc2 to not exist in the cache")
   578  	}
   579  	if !gc.absentOwnerCache.Has(objectReference{Namespace: "ns1", OwnerReference: metav1.OwnerReference{Kind: "ReplicationController", Name: "rc3", UID: "3", APIVersion: "v1"}}) {
   580  		t.Errorf("expected rc3 to be in the cache")
   581  	}
   582  	// check the request sent to the server
   583  	count := 0
   584  	for _, action := range testHandler.actions {
   585  		if action.String() == "GET=/api/v1/namespaces/ns1/replicationcontrollers/rc1" {
   586  			count++
   587  		}
   588  	}
   589  	if count != 1 {
   590  		t.Errorf("expected only 1 GET rc1 request, got %d", count)
   591  	}
   592  }
   593  
   594  func TestDeleteOwnerRefPatch(t *testing.T) {
   595  	original := v1.Pod{
   596  		ObjectMeta: metav1.ObjectMeta{
   597  			UID: "100",
   598  			OwnerReferences: []metav1.OwnerReference{
   599  				{UID: "1"},
   600  				{UID: "2"},
   601  				{UID: "3"},
   602  			},
   603  		},
   604  	}
   605  	originalData := serilizeOrDie(t, original)
   606  	expected := v1.Pod{
   607  		ObjectMeta: metav1.ObjectMeta{
   608  			UID: "100",
   609  			OwnerReferences: []metav1.OwnerReference{
   610  				{UID: "1"},
   611  			},
   612  		},
   613  	}
   614  	p, err := c.GenerateDeleteOwnerRefStrategicMergeBytes("100", []types.UID{"2", "3"})
   615  	if err != nil {
   616  		t.Fatal(err)
   617  	}
   618  	patched, err := strategicpatch.StrategicMergePatch(originalData, p, v1.Pod{})
   619  	if err != nil {
   620  		t.Fatal(err)
   621  	}
   622  	var got v1.Pod
   623  	if err := json.Unmarshal(patched, &got); err != nil {
   624  		t.Fatal(err)
   625  	}
   626  	if !reflect.DeepEqual(expected, got) {
   627  		t.Errorf("expected: %#v,\ngot: %#v", expected, got)
   628  	}
   629  }
   630  
   631  func TestUnblockOwnerReference(t *testing.T) {
   632  	trueVar := true
   633  	falseVar := false
   634  	original := v1.Pod{
   635  		ObjectMeta: metav1.ObjectMeta{
   636  			UID: "100",
   637  			OwnerReferences: []metav1.OwnerReference{
   638  				{UID: "1", BlockOwnerDeletion: &trueVar},
   639  				{UID: "2", BlockOwnerDeletion: &falseVar},
   640  				{UID: "3"},
   641  			},
   642  		},
   643  	}
   644  	originalData := serilizeOrDie(t, original)
   645  	expected := v1.Pod{
   646  		ObjectMeta: metav1.ObjectMeta{
   647  			UID: "100",
   648  			OwnerReferences: []metav1.OwnerReference{
   649  				{UID: "1", BlockOwnerDeletion: &falseVar},
   650  				{UID: "2", BlockOwnerDeletion: &falseVar},
   651  				{UID: "3"},
   652  			},
   653  		},
   654  	}
   655  	accessor, err := meta.Accessor(&original)
   656  	if err != nil {
   657  		t.Fatal(err)
   658  	}
   659  	n := node{
   660  		owners: accessor.GetOwnerReferences(),
   661  	}
   662  	patch, err := n.unblockOwnerReferencesStrategicMergePatch()
   663  	if err != nil {
   664  		t.Fatal(err)
   665  	}
   666  	patched, err := strategicpatch.StrategicMergePatch(originalData, patch, v1.Pod{})
   667  	if err != nil {
   668  		t.Fatal(err)
   669  	}
   670  	var got v1.Pod
   671  	if err := json.Unmarshal(patched, &got); err != nil {
   672  		t.Fatal(err)
   673  	}
   674  	if !reflect.DeepEqual(expected, got) {
   675  		t.Errorf("expected: %#v,\ngot: %#v", expected, got)
   676  		t.Errorf("expected: %#v,\ngot: %#v", expected.OwnerReferences, got.OwnerReferences)
   677  		for _, ref := range got.OwnerReferences {
   678  			t.Errorf("ref.UID=%s, ref.BlockOwnerDeletion=%v", ref.UID, *ref.BlockOwnerDeletion)
   679  		}
   680  	}
   681  }
   682  
   683  func TestOrphanDependentsFailure(t *testing.T) {
   684  	logger, _ := ktesting.NewTestContext(t)
   685  
   686  	testHandler := &fakeActionHandler{
   687  		response: map[string]FakeResponse{
   688  			"PATCH" + "/api/v1/namespaces/ns1/pods/pod": {
   689  				409,
   690  				[]byte{},
   691  			},
   692  		},
   693  	}
   694  	srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
   695  	defer srv.Close()
   696  
   697  	gc := setupGC(t, clientConfig)
   698  	defer close(gc.stop)
   699  
   700  	dependents := []*node{
   701  		{
   702  			identity: objectReference{
   703  				OwnerReference: metav1.OwnerReference{
   704  					Kind:       "Pod",
   705  					APIVersion: "v1",
   706  					Name:       "pod",
   707  				},
   708  				Namespace: "ns1",
   709  			},
   710  		},
   711  	}
   712  	err := gc.orphanDependents(logger, objectReference{}, dependents)
   713  	expected := `the server reported a conflict`
   714  	if err == nil || !strings.Contains(err.Error(), expected) {
   715  		if err != nil {
   716  			t.Errorf("expected error contains text %q, got %q", expected, err.Error())
   717  		} else {
   718  			t.Errorf("expected error contains text %q, got nil", expected)
   719  		}
   720  	}
   721  }
   722  
   723  // TestGetDeletableResources ensures GetDeletableResources always returns
   724  // something usable regardless of discovery output.
   725  func TestGetDeletableResources(t *testing.T) {
   726  	tests := map[string]struct {
   727  		serverResources    []*metav1.APIResourceList
   728  		err                error
   729  		deletableResources map[schema.GroupVersionResource]struct{}
   730  	}{
   731  		"no error": {
   732  			serverResources: []*metav1.APIResourceList{
   733  				{
   734  					// Valid GroupVersion
   735  					GroupVersion: "apps/v1",
   736  					APIResources: []metav1.APIResource{
   737  						{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
   738  						{Name: "services", Namespaced: true, Kind: "Service"},
   739  					},
   740  				},
   741  				{
   742  					// Invalid GroupVersion, should be ignored
   743  					GroupVersion: "foo//whatever",
   744  					APIResources: []metav1.APIResource{
   745  						{Name: "bars", Namespaced: true, Kind: "Bar", Verbs: metav1.Verbs{"delete", "list", "watch"}},
   746  					},
   747  				},
   748  				{
   749  					// Valid GroupVersion, missing required verbs, should be ignored
   750  					GroupVersion: "acme/v1",
   751  					APIResources: []metav1.APIResource{
   752  						{Name: "widgets", Namespaced: true, Kind: "Widget", Verbs: metav1.Verbs{"delete"}},
   753  					},
   754  				},
   755  			},
   756  			err: nil,
   757  			deletableResources: map[schema.GroupVersionResource]struct{}{
   758  				{Group: "apps", Version: "v1", Resource: "pods"}: {},
   759  			},
   760  		},
   761  		"nonspecific failure, includes usable results": {
   762  			serverResources: []*metav1.APIResourceList{
   763  				{
   764  					GroupVersion: "apps/v1",
   765  					APIResources: []metav1.APIResource{
   766  						{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
   767  						{Name: "services", Namespaced: true, Kind: "Service"},
   768  					},
   769  				},
   770  			},
   771  			err: fmt.Errorf("internal error"),
   772  			deletableResources: map[schema.GroupVersionResource]struct{}{
   773  				{Group: "apps", Version: "v1", Resource: "pods"}: {},
   774  			},
   775  		},
   776  		"partial discovery failure, includes usable results": {
   777  			serverResources: []*metav1.APIResourceList{
   778  				{
   779  					GroupVersion: "apps/v1",
   780  					APIResources: []metav1.APIResource{
   781  						{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
   782  						{Name: "services", Namespaced: true, Kind: "Service"},
   783  					},
   784  				},
   785  			},
   786  			err: &discovery.ErrGroupDiscoveryFailed{
   787  				Groups: map[schema.GroupVersion]error{
   788  					{Group: "foo", Version: "v1"}: fmt.Errorf("discovery failure"),
   789  				},
   790  			},
   791  			deletableResources: map[schema.GroupVersionResource]struct{}{
   792  				{Group: "apps", Version: "v1", Resource: "pods"}: {},
   793  			},
   794  		},
   795  		"discovery failure, no results": {
   796  			serverResources:    nil,
   797  			err:                fmt.Errorf("internal error"),
   798  			deletableResources: map[schema.GroupVersionResource]struct{}{},
   799  		},
   800  	}
   801  
   802  	logger, _ := ktesting.NewTestContext(t)
   803  	for name, test := range tests {
   804  		t.Logf("testing %q", name)
   805  		client := &fakeServerResources{
   806  			PreferredResources: test.serverResources,
   807  			Error:              test.err,
   808  		}
   809  		actual, actualErr := GetDeletableResources(logger, client)
   810  		if !reflect.DeepEqual(test.deletableResources, actual) {
   811  			t.Errorf("expected resources:\n%v\ngot:\n%v", test.deletableResources, actual)
   812  		}
   813  		if !reflect.DeepEqual(test.err, actualErr) {
   814  			t.Errorf("expected error:\n%v\ngot:\n%v", test.err, actualErr)
   815  		}
   816  	}
   817  }
   818  
   819  // TestGarbageCollectorSync ensures that a discovery client error
   820  // will not cause the garbage collector to block infinitely.
   821  func TestGarbageCollectorSync(t *testing.T) {
   822  	serverResources := []*metav1.APIResourceList{
   823  		{
   824  			GroupVersion: "v1",
   825  			APIResources: []metav1.APIResource{
   826  				{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
   827  			},
   828  		},
   829  		{
   830  			GroupVersion: "apps/v1",
   831  			APIResources: []metav1.APIResource{
   832  				{Name: "deployments", Namespaced: true, Kind: "Deployment", Verbs: metav1.Verbs{"delete", "list", "watch"}},
   833  			},
   834  		},
   835  	}
   836  	appsV1Error := &discovery.ErrGroupDiscoveryFailed{Groups: map[schema.GroupVersion]error{{Group: "apps", Version: "v1"}: fmt.Errorf(":-/")}}
   837  
   838  	unsyncableServerResources := []*metav1.APIResourceList{
   839  		{
   840  			GroupVersion: "v1",
   841  			APIResources: []metav1.APIResource{
   842  				{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
   843  				{Name: "secrets", Namespaced: true, Kind: "Secret", Verbs: metav1.Verbs{"delete", "list", "watch"}},
   844  			},
   845  		},
   846  	}
   847  	fakeDiscoveryClient := &fakeServerResources{
   848  		PreferredResources: serverResources,
   849  		Error:              nil,
   850  		Lock:               sync.Mutex{},
   851  		InterfaceUsedCount: 0,
   852  	}
   853  
   854  	testHandler := &fakeActionHandler{
   855  		response: map[string]FakeResponse{
   856  			"GET" + "/api/v1/pods": {
   857  				200,
   858  				[]byte("{}"),
   859  			},
   860  			"GET" + "/apis/apps/v1/deployments": {
   861  				200,
   862  				[]byte("{}"),
   863  			},
   864  			"GET" + "/api/v1/secrets": {
   865  				404,
   866  				[]byte("{}"),
   867  			},
   868  		},
   869  	}
   870  	srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
   871  	defer srv.Close()
   872  	clientConfig.ContentConfig.NegotiatedSerializer = nil
   873  	client, err := kubernetes.NewForConfig(clientConfig)
   874  	if err != nil {
   875  		t.Fatal(err)
   876  	}
   877  
   878  	tweakableRM := meta.NewDefaultRESTMapper(nil)
   879  	tweakableRM.AddSpecific(schema.GroupVersionKind{Version: "v1", Kind: "Pod"}, schema.GroupVersionResource{Version: "v1", Resource: "pods"}, schema.GroupVersionResource{Version: "v1", Resource: "pod"}, meta.RESTScopeNamespace)
   880  	tweakableRM.AddSpecific(schema.GroupVersionKind{Version: "v1", Kind: "Secret"}, schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, schema.GroupVersionResource{Version: "v1", Resource: "secret"}, meta.RESTScopeNamespace)
   881  	tweakableRM.AddSpecific(schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}, schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployment"}, meta.RESTScopeNamespace)
   882  	rm := &testRESTMapper{meta.MultiRESTMapper{tweakableRM, testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}}
   883  	metadataClient, err := metadata.NewForConfig(clientConfig)
   884  	if err != nil {
   885  		t.Fatal(err)
   886  	}
   887  
   888  	sharedInformers := informers.NewSharedInformerFactory(client, 0)
   889  	alwaysStarted := make(chan struct{})
   890  	close(alwaysStarted)
   891  	gc, err := NewGarbageCollector(client, metadataClient, rm, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted)
   892  	if err != nil {
   893  		t.Fatal(err)
   894  	}
   895  
   896  	_, ctx := ktesting.NewTestContext(t)
   897  	ctx, cancel := context.WithCancel(ctx)
   898  	defer cancel()
   899  	go gc.Run(ctx, 1)
   900  	// The pseudo-code of GarbageCollector.Sync():
   901  	// GarbageCollector.Sync(client, period, stopCh):
   902  	//    wait.Until() loops with `period` until the `stopCh` is closed :
   903  	//        wait.PollImmediateUntil() loops with 100ms (hardcode) util the `stopCh` is closed:
   904  	//            GetDeletableResources()
   905  	//            gc.resyncMonitors()
   906  	//            cache.WaitForNamedCacheSync() loops with `syncedPollPeriod` (hardcoded to 100ms), until either its stop channel is closed after `period`, or all caches synced.
   907  	//
   908  	// Setting the period to 200ms allows the WaitForCacheSync() to check
   909  	// for cache sync ~2 times in every wait.PollImmediateUntil() loop.
   910  	//
   911  	// The 1s sleep in the test allows GetDeletableResources and
   912  	// gc.resyncMonitors to run ~5 times to ensure the changes to the
   913  	// fakeDiscoveryClient are picked up.
   914  	go gc.Sync(ctx, fakeDiscoveryClient, 200*time.Millisecond)
   915  
   916  	// Wait until the sync discovers the initial resources
   917  	time.Sleep(1 * time.Second)
   918  
   919  	err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
   920  	if err != nil {
   921  		t.Fatalf("Expected garbagecollector.Sync to be running but it is blocked: %v", err)
   922  	}
   923  	assertMonitors(t, gc, "pods", "deployments")
   924  
   925  	// Simulate the discovery client returning an error
   926  	fakeDiscoveryClient.setPreferredResources(nil, fmt.Errorf("error calling discoveryClient.ServerPreferredResources()"))
   927  
   928  	// Wait until sync discovers the change
   929  	time.Sleep(1 * time.Second)
   930  	// No monitor changes
   931  	assertMonitors(t, gc, "pods", "deployments")
   932  
   933  	// Remove the error from being returned and see if the garbage collector sync is still working
   934  	fakeDiscoveryClient.setPreferredResources(serverResources, nil)
   935  
   936  	err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
   937  	if err != nil {
   938  		t.Fatalf("Expected garbagecollector.Sync to still be running but it is blocked: %v", err)
   939  	}
   940  	assertMonitors(t, gc, "pods", "deployments")
   941  
   942  	// Simulate the discovery client returning a resource the restmapper can resolve, but will not sync caches
   943  	fakeDiscoveryClient.setPreferredResources(unsyncableServerResources, nil)
   944  
   945  	// Wait until sync discovers the change
   946  	time.Sleep(1 * time.Second)
   947  	assertMonitors(t, gc, "pods", "secrets")
   948  
   949  	// Put the resources back to normal and ensure garbage collector sync recovers
   950  	fakeDiscoveryClient.setPreferredResources(serverResources, nil)
   951  
   952  	err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
   953  	if err != nil {
   954  		t.Fatalf("Expected garbagecollector.Sync to still be running but it is blocked: %v", err)
   955  	}
   956  	assertMonitors(t, gc, "pods", "deployments")
   957  
   958  	// Partial discovery failure
   959  	fakeDiscoveryClient.setPreferredResources(unsyncableServerResources, appsV1Error)
   960  	// Wait until sync discovers the change
   961  	time.Sleep(1 * time.Second)
   962  	// Deployments monitor kept
   963  	assertMonitors(t, gc, "pods", "deployments", "secrets")
   964  
   965  	// Put the resources back to normal and ensure garbage collector sync recovers
   966  	fakeDiscoveryClient.setPreferredResources(serverResources, nil)
   967  	// Wait until sync discovers the change
   968  	time.Sleep(1 * time.Second)
   969  	err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
   970  	if err != nil {
   971  		t.Fatalf("Expected garbagecollector.Sync to still be running but it is blocked: %v", err)
   972  	}
   973  	// Unsyncable monitor removed
   974  	assertMonitors(t, gc, "pods", "deployments")
   975  }
   976  
   977  func assertMonitors(t *testing.T, gc *GarbageCollector, resources ...string) {
   978  	t.Helper()
   979  	expected := sets.NewString(resources...)
   980  	actual := sets.NewString()
   981  	for m := range gc.dependencyGraphBuilder.monitors {
   982  		actual.Insert(m.Resource)
   983  	}
   984  	if !actual.Equal(expected) {
   985  		t.Fatalf("expected monitors %v, got %v", expected.List(), actual.List())
   986  	}
   987  }
   988  
   989  func expectSyncNotBlocked(fakeDiscoveryClient *fakeServerResources, workerLock *sync.RWMutex) error {
   990  	before := fakeDiscoveryClient.getInterfaceUsedCount()
   991  	t := 1 * time.Second
   992  	time.Sleep(t)
   993  	after := fakeDiscoveryClient.getInterfaceUsedCount()
   994  	if before == after {
   995  		return fmt.Errorf("discoveryClient.ServerPreferredResources() called %d times over %v", after-before, t)
   996  	}
   997  
   998  	workerLockAcquired := make(chan struct{})
   999  	go func() {
  1000  		workerLock.Lock()
  1001  		defer workerLock.Unlock()
  1002  		close(workerLockAcquired)
  1003  	}()
  1004  	select {
  1005  	case <-workerLockAcquired:
  1006  		return nil
  1007  	case <-time.After(t):
  1008  		return fmt.Errorf("workerLock blocked for at least %v", t)
  1009  	}
  1010  }
  1011  
  1012  type fakeServerResources struct {
  1013  	PreferredResources []*metav1.APIResourceList
  1014  	Error              error
  1015  	Lock               sync.Mutex
  1016  	InterfaceUsedCount int
  1017  }
  1018  
  1019  func (*fakeServerResources) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) {
  1020  	return nil, nil
  1021  }
  1022  
  1023  func (*fakeServerResources) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) {
  1024  	return nil, nil, nil
  1025  }
  1026  
  1027  func (f *fakeServerResources) ServerPreferredResources() ([]*metav1.APIResourceList, error) {
  1028  	f.Lock.Lock()
  1029  	defer f.Lock.Unlock()
  1030  	f.InterfaceUsedCount++
  1031  	return f.PreferredResources, f.Error
  1032  }
  1033  
  1034  func (f *fakeServerResources) setPreferredResources(resources []*metav1.APIResourceList, err error) {
  1035  	f.Lock.Lock()
  1036  	defer f.Lock.Unlock()
  1037  	f.PreferredResources = resources
  1038  	f.Error = err
  1039  }
  1040  
  1041  func (f *fakeServerResources) getInterfaceUsedCount() int {
  1042  	f.Lock.Lock()
  1043  	defer f.Lock.Unlock()
  1044  	return f.InterfaceUsedCount
  1045  }
  1046  
  1047  func (*fakeServerResources) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) {
  1048  	return nil, nil
  1049  }
  1050  
  1051  func TestConflictingData(t *testing.T) {
  1052  	pod1ns1 := makeID("v1", "Pod", "ns1", "podname1", "poduid1")
  1053  	pod2ns1 := makeID("v1", "Pod", "ns1", "podname2", "poduid2")
  1054  	pod2ns2 := makeID("v1", "Pod", "ns2", "podname2", "poduid2")
  1055  	node1 := makeID("v1", "Node", "", "nodename", "nodeuid1")
  1056  
  1057  	role1v1beta1 := makeID("rbac.authorization.k8s.io/v1beta1", "Role", "ns1", "role1", "roleuid1")
  1058  	role1v1 := makeID("rbac.authorization.k8s.io/v1", "Role", "ns1", "role1", "roleuid1")
  1059  
  1060  	deployment1apps := makeID("apps/v1", "Deployment", "ns1", "deployment1", "deploymentuid1")
  1061  	deployment1extensions := makeID("extensions/v1beta1", "Deployment", "ns1", "deployment1", "deploymentuid1") // not served, still referenced
  1062  
  1063  	// when a reference is made to node1 from a namespaced resource, the virtual node inserted has namespace coordinates
  1064  	node1WithNamespace := makeID("v1", "Node", "ns1", "nodename", "nodeuid1")
  1065  
  1066  	// when a reference is made to pod1 from a cluster-scoped resource, the virtual node inserted has no namespace
  1067  	pod1nonamespace := makeID("v1", "Pod", "", "podname1", "poduid1")
  1068  
  1069  	badSecretReferenceWithDeploymentUID := makeID("v1", "Secret", "ns1", "secretname", string(deployment1apps.UID))
  1070  	badChildPod := makeID("v1", "Pod", "ns1", "badpod", "badpoduid")
  1071  	goodChildPod := makeID("v1", "Pod", "ns1", "goodpod", "goodpoduid")
  1072  
  1073  	var testScenarios = []struct {
  1074  		name           string
  1075  		initialObjects []runtime.Object
  1076  		steps          []step
  1077  	}{
  1078  		{
  1079  			name: "good child in ns1 -> cluster-scoped owner",
  1080  			steps: []step{
  1081  				// setup
  1082  				createObjectInClient("", "v1", "nodes", "", makeMetadataObj(node1)),
  1083  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, node1)),
  1084  				// observe namespaced child with not-yet-observed cluster-scoped parent
  1085  				processEvent(makeAddEvent(pod1ns1, node1)),
  1086  				assertState(state{
  1087  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(node1)), makeNode(node1WithNamespace, virtual)}, // virtual node1 (matching child namespace)
  1088  					pendingAttemptToDelete: []*node{makeNode(node1WithNamespace, virtual)},                                       // virtual node1 queued for attempted delete
  1089  				}),
  1090  				// handle queued delete of virtual node
  1091  				processAttemptToDelete(1),
  1092  				assertState(state{
  1093  					clientActions:          []string{"get /v1, Resource=nodes name=nodename"},
  1094  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(node1)), makeNode(node1WithNamespace, virtual)}, // virtual node1 (matching child namespace)
  1095  					pendingAttemptToDelete: []*node{makeNode(node1WithNamespace, virtual)},                                       // virtual node1 still not observed, got requeued
  1096  				}),
  1097  				// observe cluster-scoped parent
  1098  				processEvent(makeAddEvent(node1)),
  1099  				assertState(state{
  1100  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(node1)), makeNode(node1)}, // node1 switched to observed, fixed namespace coordinate
  1101  					pendingAttemptToDelete: []*node{makeNode(node1WithNamespace, virtual)},                 // virtual node1 queued for attempted delete
  1102  				}),
  1103  				// handle queued delete of virtual node
  1104  				// final state: child and parent present in graph, no queued actions
  1105  				processAttemptToDelete(1),
  1106  				assertState(state{
  1107  					graphNodes: []*node{makeNode(pod1ns1, withOwners(node1)), makeNode(node1)},
  1108  				}),
  1109  			},
  1110  		},
  1111  		// child in namespace A with owner reference to namespaced type in namespace B
  1112  		// * should be deleted immediately
  1113  		// * event should be logged in namespace A with involvedObject of bad-child indicating the error
  1114  		{
  1115  			name: "bad child in ns1 -> owner in ns2 (child first)",
  1116  			steps: []step{
  1117  				// 0,1: setup
  1118  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, pod2ns1)),
  1119  				createObjectInClient("", "v1", "pods", "ns2", makeMetadataObj(pod2ns2)),
  1120  				// 2,3: observe namespaced child with not-yet-observed namespace-scoped parent
  1121  				processEvent(makeAddEvent(pod1ns1, pod2ns2)),
  1122  				assertState(state{
  1123  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(pod2ns2)), makeNode(pod2ns1, virtual)}, // virtual pod2 (matching child namespace)
  1124  					pendingAttemptToDelete: []*node{makeNode(pod2ns1, virtual)},                                         // virtual pod2 queued for attempted delete
  1125  				}),
  1126  				// 4,5: observe parent
  1127  				processEvent(makeAddEvent(pod2ns2)),
  1128  				assertState(state{
  1129  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(pod2ns2)), makeNode(pod2ns2)}, // pod2 is no longer virtual, namespace coordinate is corrected
  1130  					pendingAttemptToDelete: []*node{makeNode(pod2ns1, virtual), makeNode(pod1ns1)},             // virtual pod2 still queued for attempted delete, bad child pod1 queued because it disagreed with observed parent
  1131  					events:                 []string{`Warning OwnerRefInvalidNamespace ownerRef [v1/Pod, namespace: ns1, name: podname2, uid: poduid2] does not exist in namespace "ns1" involvedObject{kind=Pod,apiVersion=v1}`},
  1132  				}),
  1133  				// 6,7: handle queued delete of virtual parent
  1134  				processAttemptToDelete(1),
  1135  				assertState(state{
  1136  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(pod2ns2)), makeNode(pod2ns2)},
  1137  					pendingAttemptToDelete: []*node{makeNode(pod1ns1)}, // bad child pod1 queued because it disagreed with observed parent
  1138  				}),
  1139  				// 8,9: handle queued delete of bad child
  1140  				processAttemptToDelete(1),
  1141  				assertState(state{
  1142  					clientActions: []string{
  1143  						"get /v1, Resource=pods ns=ns1 name=podname1",    // lookup of pod1 pre-delete
  1144  						"get /v1, Resource=pods ns=ns1 name=podname2",    // verification bad parent reference is absent
  1145  						"delete /v1, Resource=pods ns=ns1 name=podname1", // pod1 delete
  1146  					},
  1147  					graphNodes:       []*node{makeNode(pod1ns1, withOwners(pod2ns2)), makeNode(pod2ns2)},
  1148  					absentOwnerCache: []objectReference{pod2ns1}, // cached absence of bad parent
  1149  				}),
  1150  				// 10,11: observe delete issued in step 8
  1151  				// final state: parent present in graph, no queued actions
  1152  				processEvent(makeDeleteEvent(pod1ns1)),
  1153  				assertState(state{
  1154  					graphNodes:       []*node{makeNode(pod2ns2)}, // only good parent remains
  1155  					absentOwnerCache: []objectReference{pod2ns1}, // cached absence of bad parent
  1156  				}),
  1157  			},
  1158  		},
  1159  		{
  1160  			name: "bad child in ns1 -> owner in ns2 (owner first)",
  1161  			steps: []step{
  1162  				// 0,1: setup
  1163  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, pod2ns1)),
  1164  				createObjectInClient("", "v1", "pods", "ns2", makeMetadataObj(pod2ns2)),
  1165  				// 2,3: observe parent
  1166  				processEvent(makeAddEvent(pod2ns2)),
  1167  				assertState(state{
  1168  					graphNodes: []*node{makeNode(pod2ns2)},
  1169  				}),
  1170  				// 4,5: observe namespaced child with invalid cross-namespace reference to parent
  1171  				processEvent(makeAddEvent(pod1ns1, pod2ns1)),
  1172  				assertState(state{
  1173  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(pod2ns1)), makeNode(pod2ns2)},
  1174  					pendingAttemptToDelete: []*node{makeNode(pod1ns1)}, // bad child queued for attempted delete
  1175  					events:                 []string{`Warning OwnerRefInvalidNamespace ownerRef [v1/Pod, namespace: ns1, name: podname2, uid: poduid2] does not exist in namespace "ns1" involvedObject{kind=Pod,apiVersion=v1}`},
  1176  				}),
  1177  				// 6,7: handle queued delete of bad child
  1178  				processAttemptToDelete(1),
  1179  				assertState(state{
  1180  					clientActions: []string{
  1181  						"get /v1, Resource=pods ns=ns1 name=podname1",    // lookup of pod1 pre-delete
  1182  						"get /v1, Resource=pods ns=ns1 name=podname2",    // verification bad parent reference is absent
  1183  						"delete /v1, Resource=pods ns=ns1 name=podname1", // pod1 delete
  1184  					},
  1185  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(pod2ns1)), makeNode(pod2ns2)},
  1186  					pendingAttemptToDelete: []*node{},
  1187  					absentOwnerCache:       []objectReference{pod2ns1}, // cached absence of bad parent
  1188  				}),
  1189  				// 8,9: observe delete issued in step 6
  1190  				// final state: parent present in graph, no queued actions
  1191  				processEvent(makeDeleteEvent(pod1ns1)),
  1192  				assertState(state{
  1193  					graphNodes:       []*node{makeNode(pod2ns2)}, // only good parent remains
  1194  					absentOwnerCache: []objectReference{pod2ns1}, // cached absence of bad parent
  1195  				}),
  1196  			},
  1197  		},
  1198  		// child that is cluster-scoped with owner reference to namespaced type in namespace B
  1199  		// * should not be deleted
  1200  		// * event should be logged in namespace kube-system with involvedObject of bad-child indicating the error
  1201  		{
  1202  			name: "bad cluster-scoped child -> owner in ns1 (child first)",
  1203  			steps: []step{
  1204  				// setup
  1205  				createObjectInClient("", "v1", "nodes", "", makeMetadataObj(node1, pod1ns1)),
  1206  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1)),
  1207  				// 2,3: observe cluster-scoped child with not-yet-observed namespaced parent
  1208  				processEvent(makeAddEvent(node1, pod1ns1)),
  1209  				assertState(state{
  1210  					graphNodes:             []*node{makeNode(node1, withOwners(pod1nonamespace)), makeNode(pod1nonamespace, virtual)}, // virtual pod1 (with no namespace)
  1211  					pendingAttemptToDelete: []*node{makeNode(pod1nonamespace, virtual)},                                               // virtual pod1 queued for attempted delete
  1212  				}),
  1213  				// 4,5: handle queued delete of virtual pod1
  1214  				processAttemptToDelete(1),
  1215  				assertState(state{
  1216  					graphNodes:             []*node{makeNode(node1, withOwners(pod1nonamespace)), makeNode(pod1nonamespace, virtual)}, // virtual pod1 (with no namespace)
  1217  					pendingAttemptToDelete: []*node{},                                                                                 // namespace-scoped virtual object without a namespace coordinate not re-queued
  1218  				}),
  1219  				// 6,7: observe namespace-scoped parent
  1220  				processEvent(makeAddEvent(pod1ns1)),
  1221  				assertState(state{
  1222  					graphNodes:             []*node{makeNode(node1, withOwners(pod1nonamespace)), makeNode(pod1ns1)}, // pod1 namespace coordinate corrected, made non-virtual
  1223  					events:                 []string{`Warning OwnerRefInvalidNamespace ownerRef [v1/Pod, namespace: , name: podname1, uid: poduid1] does not exist in namespace "" involvedObject{kind=Node,apiVersion=v1}`},
  1224  					pendingAttemptToDelete: []*node{makeNode(node1, withOwners(pod1ns1))}, // bad cluster-scoped child added to attemptToDelete queue
  1225  				}),
  1226  				// 8,9: handle queued attempted delete of bad cluster-scoped child
  1227  				// final state: parent and child present in graph, no queued actions
  1228  				processAttemptToDelete(1),
  1229  				assertState(state{
  1230  					clientActions: []string{
  1231  						"get /v1, Resource=nodes name=nodename", // lookup of node pre-delete
  1232  					},
  1233  					graphNodes: []*node{makeNode(node1, withOwners(pod1nonamespace)), makeNode(pod1ns1)},
  1234  				}),
  1235  			},
  1236  		},
  1237  		{
  1238  			name: "bad cluster-scoped child -> owner in ns1 (owner first)",
  1239  			steps: []step{
  1240  				// setup
  1241  				createObjectInClient("", "v1", "nodes", "", makeMetadataObj(node1, pod1ns1)),
  1242  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1)),
  1243  				// 2,3: observe namespace-scoped parent
  1244  				processEvent(makeAddEvent(pod1ns1)),
  1245  				assertState(state{
  1246  					graphNodes: []*node{makeNode(pod1ns1)},
  1247  				}),
  1248  				// 4,5: observe cluster-scoped child
  1249  				processEvent(makeAddEvent(node1, pod1ns1)),
  1250  				assertState(state{
  1251  					graphNodes:             []*node{makeNode(node1, withOwners(pod1nonamespace)), makeNode(pod1ns1)},
  1252  					events:                 []string{`Warning OwnerRefInvalidNamespace ownerRef [v1/Pod, namespace: , name: podname1, uid: poduid1] does not exist in namespace "" involvedObject{kind=Node,apiVersion=v1}`},
  1253  					pendingAttemptToDelete: []*node{makeNode(node1, withOwners(pod1ns1))}, // bad cluster-scoped child added to attemptToDelete queue
  1254  				}),
  1255  				// 6,7: handle queued attempted delete of bad cluster-scoped child
  1256  				// final state: parent and child present in graph, no queued actions
  1257  				processAttemptToDelete(1),
  1258  				assertState(state{
  1259  					clientActions: []string{
  1260  						"get /v1, Resource=nodes name=nodename", // lookup of node pre-delete
  1261  					},
  1262  					graphNodes: []*node{makeNode(node1, withOwners(pod1nonamespace)), makeNode(pod1ns1)},
  1263  				}),
  1264  			},
  1265  		},
  1266  		// child pointing at non-preferred still-served apiVersion of parent object (e.g. rbac/v1beta1)
  1267  		// * should not be deleted prematurely
  1268  		// * should not repeatedly poll attemptToDelete while waiting
  1269  		// * should be deleted when the actual parent is deleted
  1270  		{
  1271  			name: "good child -> existing owner with non-preferred accessible API version",
  1272  			steps: []step{
  1273  				// setup
  1274  				createObjectInClient("rbac.authorization.k8s.io", "v1", "roles", "ns1", makeMetadataObj(role1v1)),
  1275  				createObjectInClient("rbac.authorization.k8s.io", "v1beta1", "roles", "ns1", makeMetadataObj(role1v1beta1)),
  1276  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, role1v1beta1)),
  1277  				// 3,4: observe child
  1278  				processEvent(makeAddEvent(pod1ns1, role1v1beta1)),
  1279  				assertState(state{
  1280  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(role1v1beta1)), makeNode(role1v1beta1, virtual)},
  1281  					pendingAttemptToDelete: []*node{makeNode(role1v1beta1, virtual)}, // virtual parent enqueued for delete attempt
  1282  				}),
  1283  				// 5,6: handle queued attempted delete of virtual parent
  1284  				processAttemptToDelete(1),
  1285  				assertState(state{
  1286  					clientActions: []string{
  1287  						"get rbac.authorization.k8s.io/v1beta1, Resource=roles ns=ns1 name=role1", // lookup of node pre-delete
  1288  					},
  1289  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(role1v1beta1)), makeNode(role1v1beta1, virtual)},
  1290  					pendingAttemptToDelete: []*node{makeNode(role1v1beta1, virtual)}, // not yet observed, still in the attemptToDelete queue
  1291  				}),
  1292  				// 7,8: observe parent via v1
  1293  				processEvent(makeAddEvent(role1v1)),
  1294  				assertState(state{
  1295  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(role1v1beta1)), makeNode(role1v1)},               // parent version/virtual state gets corrected
  1296  					pendingAttemptToDelete: []*node{makeNode(role1v1beta1, virtual), makeNode(pod1ns1, withOwners(role1v1beta1))}, // virtual parent and mismatched child enqueued for delete attempt
  1297  				}),
  1298  				// 9,10: process attemptToDelete
  1299  				// virtual node dropped from attemptToDelete with no further action because the real node has been observed now
  1300  				processAttemptToDelete(1),
  1301  				assertState(state{
  1302  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(role1v1beta1)), makeNode(role1v1)},
  1303  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(role1v1beta1))}, // mismatched child enqueued for delete attempt
  1304  				}),
  1305  				// 11,12: process attemptToDelete for mismatched parent
  1306  				processAttemptToDelete(1),
  1307  				assertState(state{
  1308  					clientActions: []string{
  1309  						"get /v1, Resource=pods ns=ns1 name=podname1",                             // lookup of child pre-delete
  1310  						"get rbac.authorization.k8s.io/v1beta1, Resource=roles ns=ns1 name=role1", // verifying parent is solid
  1311  					},
  1312  					graphNodes: []*node{makeNode(pod1ns1, withOwners(role1v1beta1)), makeNode(role1v1)},
  1313  				}),
  1314  				// 13,14: teardown
  1315  				deleteObjectFromClient("rbac.authorization.k8s.io", "v1", "roles", "ns1", "role1"),
  1316  				deleteObjectFromClient("rbac.authorization.k8s.io", "v1beta1", "roles", "ns1", "role1"),
  1317  				// 15,16: observe delete via v1
  1318  				processEvent(makeDeleteEvent(role1v1)),
  1319  				assertState(state{
  1320  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(role1v1beta1))}, // only child remains
  1321  					absentOwnerCache:       []objectReference{role1v1},                           // cached absence of parent via v1
  1322  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(role1v1beta1))},
  1323  				}),
  1324  				// 17,18: process attemptToDelete for child
  1325  				processAttemptToDelete(1),
  1326  				assertState(state{
  1327  					clientActions: []string{
  1328  						"get /v1, Resource=pods ns=ns1 name=podname1",                             // lookup of child pre-delete
  1329  						"get rbac.authorization.k8s.io/v1beta1, Resource=roles ns=ns1 name=role1", // verifying parent is solid
  1330  						"delete /v1, Resource=pods ns=ns1 name=podname1",
  1331  					},
  1332  					absentOwnerCache: []objectReference{role1v1, role1v1beta1}, // cached absence of v1beta1 role
  1333  					graphNodes:       []*node{makeNode(pod1ns1, withOwners(role1v1beta1))},
  1334  				}),
  1335  				// 19,20: observe delete issued in step 17
  1336  				// final state: empty graph, no queued actions
  1337  				processEvent(makeDeleteEvent(pod1ns1)),
  1338  				assertState(state{
  1339  					absentOwnerCache: []objectReference{role1v1, role1v1beta1},
  1340  				}),
  1341  			},
  1342  		},
  1343  		// child pointing at no-longer-served apiVersion of still-existing parent object (e.g. extensions/v1beta1 deployment)
  1344  		// * should not be deleted (this is indistinguishable from referencing an unknown kind/version)
  1345  		// * virtual parent should not repeatedly poll attemptToDelete once real parent is observed
  1346  		{
  1347  			name: "child -> existing owner with inaccessible API version (child first)",
  1348  			steps: []step{
  1349  				// setup
  1350  				createObjectInClient("apps", "v1", "deployments", "ns1", makeMetadataObj(deployment1apps)),
  1351  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, deployment1extensions)),
  1352  				// 2,3: observe child
  1353  				processEvent(makeAddEvent(pod1ns1, deployment1extensions)),
  1354  				assertState(state{
  1355  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual)},
  1356  					pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual)}, // virtual parent enqueued for delete attempt
  1357  				}),
  1358  				// 4,5: handle queued attempted delete of virtual parent
  1359  				processAttemptToDelete(1),
  1360  				assertState(state{
  1361  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual)},
  1362  					pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual)}, // requeued on restmapper error
  1363  				}),
  1364  				// 6,7: observe parent via v1
  1365  				processEvent(makeAddEvent(deployment1apps)),
  1366  				assertState(state{
  1367  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1apps)},                // parent version/virtual state gets corrected
  1368  					pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual), makeNode(pod1ns1, withOwners(deployment1extensions))}, // virtual parent and mismatched child enqueued for delete attempt
  1369  				}),
  1370  				// 8,9: process attemptToDelete
  1371  				// virtual node dropped from attemptToDelete with no further action because the real node has been observed now
  1372  				processAttemptToDelete(1),
  1373  				assertState(state{
  1374  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1apps)},
  1375  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // mismatched child enqueued for delete attempt
  1376  				}),
  1377  				// 10,11: process attemptToDelete for mismatched child
  1378  				processAttemptToDelete(1),
  1379  				assertState(state{
  1380  					clientActions: []string{
  1381  						"get /v1, Resource=pods ns=ns1 name=podname1", // lookup of child pre-delete
  1382  					},
  1383  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1apps)},
  1384  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // mismatched child still enqueued - restmapper error
  1385  				}),
  1386  				// 12: teardown
  1387  				deleteObjectFromClient("apps", "v1", "deployments", "ns1", "deployment1"),
  1388  				// 13,14: observe delete via v1
  1389  				processEvent(makeDeleteEvent(deployment1apps)),
  1390  				assertState(state{
  1391  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // only child remains
  1392  					absentOwnerCache:       []objectReference{deployment1apps},                            // cached absence of parent via v1
  1393  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))},
  1394  				}),
  1395  				// 17,18: process attemptToDelete for child
  1396  				processAttemptToDelete(1),
  1397  				assertState(state{
  1398  					clientActions: []string{
  1399  						"get /v1, Resource=pods ns=ns1 name=podname1", // lookup of child pre-delete
  1400  					},
  1401  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // only child remains
  1402  					absentOwnerCache:       []objectReference{deployment1apps},
  1403  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // mismatched child still enqueued - restmapper error
  1404  				}),
  1405  			},
  1406  		},
  1407  		{
  1408  			name: "child -> existing owner with inaccessible API version (owner first)",
  1409  			steps: []step{
  1410  				// setup
  1411  				createObjectInClient("apps", "v1", "deployments", "ns1", makeMetadataObj(deployment1apps)),
  1412  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, deployment1extensions)),
  1413  				// 2,3: observe parent via v1
  1414  				processEvent(makeAddEvent(deployment1apps)),
  1415  				assertState(state{
  1416  					graphNodes: []*node{makeNode(deployment1apps)},
  1417  				}),
  1418  				// 4,5: observe child
  1419  				processEvent(makeAddEvent(pod1ns1, deployment1extensions)),
  1420  				assertState(state{
  1421  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1apps)},
  1422  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // mismatched child enqueued for delete attempt
  1423  				}),
  1424  				// 6,7: process attemptToDelete for mismatched child
  1425  				processAttemptToDelete(1),
  1426  				assertState(state{
  1427  					clientActions: []string{
  1428  						"get /v1, Resource=pods ns=ns1 name=podname1", // lookup of child pre-delete
  1429  					},
  1430  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1apps)},
  1431  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // mismatched child still enqueued - restmapper error
  1432  				}),
  1433  				// 8: teardown
  1434  				deleteObjectFromClient("apps", "v1", "deployments", "ns1", "deployment1"),
  1435  				// 9,10: observe delete via v1
  1436  				processEvent(makeDeleteEvent(deployment1apps)),
  1437  				assertState(state{
  1438  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // only child remains
  1439  					absentOwnerCache:       []objectReference{deployment1apps},                            // cached absence of parent via v1
  1440  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))},
  1441  				}),
  1442  				// 11,12: process attemptToDelete for child
  1443  				// final state: child with unresolveable ownerRef remains, queued in pendingAttemptToDelete
  1444  				processAttemptToDelete(1),
  1445  				assertState(state{
  1446  					clientActions: []string{
  1447  						"get /v1, Resource=pods ns=ns1 name=podname1", // lookup of child pre-delete
  1448  					},
  1449  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // only child remains
  1450  					absentOwnerCache:       []objectReference{deployment1apps},
  1451  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // mismatched child still enqueued - restmapper error
  1452  				}),
  1453  			},
  1454  		},
  1455  		// child pointing at no-longer-served apiVersion of no-longer-existing parent object (e.g. extensions/v1beta1 deployment)
  1456  		// * should not be deleted (this is indistinguishable from referencing an unknown kind/version)
  1457  		// * should repeatedly poll attemptToDelete
  1458  		// * should not block deletion of legitimate children of missing deployment
  1459  		{
  1460  			name: "child -> non-existent owner with inaccessible API version (inaccessible parent apiVersion first)",
  1461  			steps: []step{
  1462  				// setup
  1463  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, deployment1extensions)),
  1464  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod2ns1, deployment1apps)),
  1465  				// 2,3: observe child pointing at no-longer-served apiVersion
  1466  				processEvent(makeAddEvent(pod1ns1, deployment1extensions)),
  1467  				assertState(state{
  1468  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual)},
  1469  					pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual)}, // virtual parent enqueued for delete attempt
  1470  				}),
  1471  				// 4,5: observe child pointing at served apiVersion where owner does not exist
  1472  				processEvent(makeAddEvent(pod2ns1, deployment1apps)),
  1473  				assertState(state{
  1474  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual), makeNode(pod2ns1, withOwners(deployment1apps))},
  1475  					pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual), makeNode(pod2ns1, withOwners(deployment1apps))}, // mismatched child enqueued for delete attempt
  1476  				}),
  1477  				// 6,7: handle attempt to delete virtual parent for inaccessible apiVersion
  1478  				processAttemptToDelete(1),
  1479  				assertState(state{
  1480  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual), makeNode(pod2ns1, withOwners(deployment1apps))},
  1481  					pendingAttemptToDelete: []*node{makeNode(pod2ns1, withOwners(deployment1apps)), makeNode(deployment1extensions, virtual)}, // inaccessible parent requeued to end
  1482  				}),
  1483  				// 8,9: handle attempt to delete mismatched child
  1484  				processAttemptToDelete(1),
  1485  				assertState(state{
  1486  					clientActions: []string{
  1487  						"get /v1, Resource=pods ns=ns1 name=podname2",               // lookup of child pre-delete
  1488  						"get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of parent
  1489  						"delete /v1, Resource=pods ns=ns1 name=podname2",            // delete child
  1490  					},
  1491  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual), makeNode(pod2ns1, withOwners(deployment1apps))},
  1492  					absentOwnerCache:       []objectReference{deployment1apps},                // verifiably absent parent remembered
  1493  					pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual)}, // mismatched child with verifiably absent parent deleted
  1494  				}),
  1495  				// 10,11: observe delete issued in step 8
  1496  				processEvent(makeDeleteEvent(pod2ns1)),
  1497  				assertState(state{
  1498  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual)},
  1499  					absentOwnerCache:       []objectReference{deployment1apps},
  1500  					pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual)},
  1501  				}),
  1502  				// 12,13: final state: inaccessible parent requeued in attemptToDelete
  1503  				processAttemptToDelete(1),
  1504  				assertState(state{
  1505  					graphNodes:             []*node{makeNode(pod1ns1, withOwners(deployment1extensions)), makeNode(deployment1extensions, virtual)},
  1506  					absentOwnerCache:       []objectReference{deployment1apps},
  1507  					pendingAttemptToDelete: []*node{makeNode(deployment1extensions, virtual)},
  1508  				}),
  1509  			},
  1510  		},
  1511  
  1512  		{
  1513  			name: "child -> non-existent owner with inaccessible API version (accessible parent apiVersion first)",
  1514  			steps: []step{
  1515  				// setup
  1516  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1, deployment1extensions)),
  1517  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod2ns1, deployment1apps)),
  1518  				// 2,3: observe child pointing at served apiVersion where owner does not exist
  1519  				processEvent(makeAddEvent(pod2ns1, deployment1apps)),
  1520  				assertState(state{
  1521  					graphNodes: []*node{
  1522  						makeNode(pod2ns1, withOwners(deployment1apps)),
  1523  						makeNode(deployment1apps, virtual)},
  1524  					pendingAttemptToDelete: []*node{
  1525  						makeNode(deployment1apps, virtual)}, // virtual parent enqueued for delete attempt
  1526  				}),
  1527  				// 4,5: observe child pointing at no-longer-served apiVersion
  1528  				processEvent(makeAddEvent(pod1ns1, deployment1extensions)),
  1529  				assertState(state{
  1530  					graphNodes: []*node{
  1531  						makeNode(pod2ns1, withOwners(deployment1apps)),
  1532  						makeNode(deployment1apps, virtual),
  1533  						makeNode(pod1ns1, withOwners(deployment1extensions))},
  1534  					pendingAttemptToDelete: []*node{
  1535  						makeNode(deployment1apps, virtual),
  1536  						makeNode(pod1ns1, withOwners(deployment1extensions))}, // mismatched child enqueued for delete attempt
  1537  				}),
  1538  				// 6,7: handle attempt to delete virtual parent for accessible apiVersion
  1539  				processAttemptToDelete(1),
  1540  				assertState(state{
  1541  					clientActions: []string{
  1542  						"get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of parent, gets 404
  1543  					},
  1544  					pendingGraphChanges: []*event{makeVirtualDeleteEvent(deployment1apps)}, // virtual parent not found, queued virtual delete event
  1545  					graphNodes: []*node{
  1546  						makeNode(pod2ns1, withOwners(deployment1apps)),
  1547  						makeNode(deployment1apps, virtual),
  1548  						makeNode(pod1ns1, withOwners(deployment1extensions))},
  1549  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))},
  1550  				}),
  1551  				// 8,9: handle attempt to delete mismatched child
  1552  				processAttemptToDelete(1),
  1553  				assertState(state{
  1554  					clientActions: []string{
  1555  						"get /v1, Resource=pods ns=ns1 name=podname1", // lookup of child pre-delete
  1556  					},
  1557  					pendingGraphChanges: []*event{makeVirtualDeleteEvent(deployment1apps)},
  1558  					graphNodes: []*node{
  1559  						makeNode(pod2ns1, withOwners(deployment1apps)),
  1560  						makeNode(deployment1apps, virtual),
  1561  						makeNode(pod1ns1, withOwners(deployment1extensions))},
  1562  					pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))}, // restmapper on inaccessible parent, requeued
  1563  				}),
  1564  				// 10,11: handle queued virtual delete event
  1565  				processPendingGraphChanges(1),
  1566  				assertState(state{
  1567  					graphNodes: []*node{
  1568  						makeNode(pod2ns1, withOwners(deployment1apps)),
  1569  						makeNode(deployment1extensions, virtual), // deployment node changed identity to alternative virtual identity
  1570  						makeNode(pod1ns1, withOwners(deployment1extensions)),
  1571  					},
  1572  					absentOwnerCache: []objectReference{deployment1apps}, // absent apps/v1 parent remembered
  1573  					pendingAttemptToDelete: []*node{
  1574  						makeNode(pod1ns1, withOwners(deployment1extensions)), // child referencing inaccessible apiVersion
  1575  						makeNode(pod2ns1, withOwners(deployment1apps)),       // children of absent apps/v1 parent queued for delete attempt
  1576  						makeNode(deployment1extensions, virtual),             // new virtual parent queued for delete attempt
  1577  					},
  1578  				}),
  1579  
  1580  				// 12,13: handle attempt to delete child referencing inaccessible apiVersion
  1581  				processAttemptToDelete(1),
  1582  				assertState(state{
  1583  					clientActions: []string{
  1584  						"get /v1, Resource=pods ns=ns1 name=podname1", // lookup of child pre-delete
  1585  					},
  1586  					graphNodes: []*node{
  1587  						makeNode(pod2ns1, withOwners(deployment1apps)),
  1588  						makeNode(deployment1extensions, virtual),
  1589  						makeNode(pod1ns1, withOwners(deployment1extensions))},
  1590  					absentOwnerCache: []objectReference{deployment1apps},
  1591  					pendingAttemptToDelete: []*node{
  1592  						makeNode(pod2ns1, withOwners(deployment1apps)),       // children of absent apps/v1 parent queued for delete attempt
  1593  						makeNode(deployment1extensions, virtual),             // new virtual parent queued for delete attempt
  1594  						makeNode(pod1ns1, withOwners(deployment1extensions)), // child referencing inaccessible apiVersion - requeued to end
  1595  					},
  1596  				}),
  1597  
  1598  				// 14,15: handle attempt to delete child referencing accessible apiVersion
  1599  				processAttemptToDelete(1),
  1600  				assertState(state{
  1601  					clientActions: []string{
  1602  						"get /v1, Resource=pods ns=ns1 name=podname2",    // lookup of child pre-delete
  1603  						"delete /v1, Resource=pods ns=ns1 name=podname2", // parent absent, delete
  1604  					},
  1605  					graphNodes: []*node{
  1606  						makeNode(pod2ns1, withOwners(deployment1apps)),
  1607  						makeNode(deployment1extensions, virtual),
  1608  						makeNode(pod1ns1, withOwners(deployment1extensions))},
  1609  					absentOwnerCache: []objectReference{deployment1apps},
  1610  					pendingAttemptToDelete: []*node{
  1611  						makeNode(deployment1extensions, virtual),             // new virtual parent queued for delete attempt
  1612  						makeNode(pod1ns1, withOwners(deployment1extensions)), // child referencing inaccessible apiVersion
  1613  					},
  1614  				}),
  1615  
  1616  				// 16,17: handle attempt to delete virtual parent in inaccessible apiVersion
  1617  				processAttemptToDelete(1),
  1618  				assertState(state{
  1619  					graphNodes: []*node{
  1620  						makeNode(pod2ns1, withOwners(deployment1apps)),
  1621  						makeNode(deployment1extensions, virtual),
  1622  						makeNode(pod1ns1, withOwners(deployment1extensions))},
  1623  					absentOwnerCache: []objectReference{deployment1apps},
  1624  					pendingAttemptToDelete: []*node{
  1625  						makeNode(pod1ns1, withOwners(deployment1extensions)), // child referencing inaccessible apiVersion
  1626  						makeNode(deployment1extensions, virtual),             // virtual parent with inaccessible apiVersion - requeued to end
  1627  					},
  1628  				}),
  1629  
  1630  				// 18,19: observe delete of pod2 from step 14
  1631  				// final state: virtual parent for inaccessible apiVersion and child of that parent remain in graph, queued for delete attempts with backoff
  1632  				processEvent(makeDeleteEvent(pod2ns1)),
  1633  				assertState(state{
  1634  					graphNodes: []*node{
  1635  						makeNode(deployment1extensions, virtual),
  1636  						makeNode(pod1ns1, withOwners(deployment1extensions))},
  1637  					absentOwnerCache: []objectReference{deployment1apps},
  1638  					pendingAttemptToDelete: []*node{
  1639  						makeNode(pod1ns1, withOwners(deployment1extensions)), // child referencing inaccessible apiVersion
  1640  						makeNode(deployment1extensions, virtual),             // virtual parent with inaccessible apiVersion
  1641  					},
  1642  				}),
  1643  			},
  1644  		},
  1645  		// child pointing at incorrect apiVersion/kind of still-existing parent object (e.g. core/v1 Secret with uid=123, where an apps/v1 Deployment with uid=123 exists)
  1646  		// * should be deleted immediately
  1647  		// * should not trigger deletion of legitimate children of parent
  1648  		{
  1649  			name: "bad child -> existing owner with incorrect API version (bad child, good child, bad parent delete, good parent)",
  1650  			steps: []step{
  1651  				// setup
  1652  				createObjectInClient("apps", "v1", "deployments", "ns1", makeMetadataObj(deployment1apps)),
  1653  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(badChildPod, badSecretReferenceWithDeploymentUID)),
  1654  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(goodChildPod, deployment1apps)),
  1655  				// 3,4: observe bad child
  1656  				processEvent(makeAddEvent(badChildPod, badSecretReferenceWithDeploymentUID)),
  1657  				assertState(state{
  1658  					graphNodes: []*node{
  1659  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1660  						makeNode(badSecretReferenceWithDeploymentUID, virtual)},
  1661  					pendingAttemptToDelete: []*node{
  1662  						makeNode(badSecretReferenceWithDeploymentUID, virtual)}, // virtual parent enqueued for delete attempt
  1663  				}),
  1664  
  1665  				// 5,6: observe good child
  1666  				processEvent(makeAddEvent(goodChildPod, deployment1apps)),
  1667  				assertState(state{
  1668  					graphNodes: []*node{
  1669  						makeNode(goodChildPod, withOwners(deployment1apps)), // good child added
  1670  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1671  						makeNode(badSecretReferenceWithDeploymentUID, virtual)},
  1672  					pendingAttemptToDelete: []*node{
  1673  						makeNode(badSecretReferenceWithDeploymentUID, virtual), // virtual parent enqueued for delete attempt
  1674  						makeNode(goodChildPod, withOwners(deployment1apps)),    // good child enqueued for delete attempt
  1675  					},
  1676  				}),
  1677  
  1678  				// 7,8: process pending delete of virtual parent
  1679  				processAttemptToDelete(1),
  1680  				assertState(state{
  1681  					clientActions: []string{
  1682  						"get /v1, Resource=secrets ns=ns1 name=secretname", // lookup of bad parent reference
  1683  					},
  1684  					pendingGraphChanges: []*event{makeVirtualDeleteEvent(badSecretReferenceWithDeploymentUID)}, // bad virtual parent not found, queued virtual delete event
  1685  					graphNodes: []*node{
  1686  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1687  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1688  						makeNode(badSecretReferenceWithDeploymentUID, virtual)},
  1689  					pendingAttemptToDelete: []*node{
  1690  						makeNode(goodChildPod, withOwners(deployment1apps)), // good child enqueued for delete attempt
  1691  					},
  1692  				}),
  1693  
  1694  				// 9,10: process pending delete of good child, gets 200, remains
  1695  				processAttemptToDelete(1),
  1696  				assertState(state{
  1697  					clientActions: []string{
  1698  						"get /v1, Resource=pods ns=ns1 name=goodpod",                // lookup of child pre-delete
  1699  						"get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of good parent reference, returns 200
  1700  					},
  1701  					pendingGraphChanges: []*event{makeVirtualDeleteEvent(badSecretReferenceWithDeploymentUID)}, // bad virtual parent not found, queued virtual delete event
  1702  					graphNodes: []*node{
  1703  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1704  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1705  						makeNode(badSecretReferenceWithDeploymentUID, virtual)},
  1706  				}),
  1707  
  1708  				// 11,12: process virtual delete event of bad parent reference
  1709  				processPendingGraphChanges(1),
  1710  				assertState(state{
  1711  					graphNodes: []*node{
  1712  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1713  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1714  						makeNode(deployment1apps, virtual)}, // parent node switched to alternate identity, still virtual
  1715  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID}, // remember absence of bad parent coordinates
  1716  					pendingAttemptToDelete: []*node{
  1717  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), // child of bad parent coordinates enqueued for delete attempt
  1718  						makeNode(deployment1apps, virtual),                                     // new alternate virtual parent identity queued for delete attempt
  1719  					},
  1720  				}),
  1721  
  1722  				// 13,14: process pending delete of bad child
  1723  				processAttemptToDelete(1),
  1724  				assertState(state{
  1725  					clientActions: []string{
  1726  						"get /v1, Resource=pods ns=ns1 name=badpod",    // lookup of child pre-delete
  1727  						"delete /v1, Resource=pods ns=ns1 name=badpod", // delete of bad child (absence of bad parent is cached)
  1728  					},
  1729  					graphNodes: []*node{
  1730  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1731  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1732  						makeNode(deployment1apps, virtual)}, // parent node switched to alternate identity, still virtual
  1733  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID},
  1734  					pendingAttemptToDelete: []*node{
  1735  						makeNode(deployment1apps, virtual), // new alternate virtual parent identity queued for delete attempt
  1736  					},
  1737  				}),
  1738  
  1739  				// 15,16: process pending delete of new virtual parent
  1740  				processAttemptToDelete(1),
  1741  				assertState(state{
  1742  					clientActions: []string{
  1743  						"get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of virtual parent, returns 200
  1744  					},
  1745  					graphNodes: []*node{
  1746  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1747  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1748  						makeNode(deployment1apps, virtual)}, // parent node switched to alternate identity, still virtual
  1749  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID},
  1750  					pendingAttemptToDelete: []*node{
  1751  						makeNode(deployment1apps, virtual), // requeued, not yet observed
  1752  					},
  1753  				}),
  1754  
  1755  				// 17,18: observe good parent
  1756  				processEvent(makeAddEvent(deployment1apps)),
  1757  				assertState(state{
  1758  					graphNodes: []*node{
  1759  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1760  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1761  						makeNode(deployment1apps)}, // parent node made non-virtual
  1762  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID},
  1763  					pendingAttemptToDelete: []*node{
  1764  						makeNode(deployment1apps), // still queued, no longer virtual
  1765  					},
  1766  				}),
  1767  
  1768  				// 19,20: observe delete of bad child from step 13
  1769  				processEvent(makeDeleteEvent(badChildPod, badSecretReferenceWithDeploymentUID)),
  1770  				assertState(state{
  1771  					graphNodes: []*node{
  1772  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1773  						// bad child node removed
  1774  						makeNode(deployment1apps)},
  1775  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID},
  1776  					pendingAttemptToDelete: []*node{
  1777  						makeNode(deployment1apps), // still queued, no longer virtual
  1778  					},
  1779  				}),
  1780  
  1781  				// 21,22: process pending delete of good parent
  1782  				// final state: good parent in graph with correct coordinates, good children remain, no pending deletions
  1783  				processAttemptToDelete(1),
  1784  				assertState(state{
  1785  					clientActions: []string{
  1786  						"get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of good parent, returns 200
  1787  					},
  1788  					graphNodes: []*node{
  1789  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1790  						makeNode(deployment1apps)},
  1791  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID},
  1792  				}),
  1793  			},
  1794  		},
  1795  		{
  1796  			name: "bad child -> existing owner with incorrect API version (bad child, good child, good parent, bad parent delete)",
  1797  			steps: []step{
  1798  				// setup
  1799  				createObjectInClient("apps", "v1", "deployments", "ns1", makeMetadataObj(deployment1apps)),
  1800  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(badChildPod, badSecretReferenceWithDeploymentUID)),
  1801  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(goodChildPod, deployment1apps)),
  1802  				// 3,4: observe bad child
  1803  				processEvent(makeAddEvent(badChildPod, badSecretReferenceWithDeploymentUID)),
  1804  				assertState(state{
  1805  					graphNodes: []*node{
  1806  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1807  						makeNode(badSecretReferenceWithDeploymentUID, virtual)},
  1808  					pendingAttemptToDelete: []*node{
  1809  						makeNode(badSecretReferenceWithDeploymentUID, virtual)}, // virtual parent enqueued for delete attempt
  1810  				}),
  1811  
  1812  				// 5,6: observe good child
  1813  				processEvent(makeAddEvent(goodChildPod, deployment1apps)),
  1814  				assertState(state{
  1815  					graphNodes: []*node{
  1816  						makeNode(goodChildPod, withOwners(deployment1apps)), // good child added
  1817  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1818  						makeNode(badSecretReferenceWithDeploymentUID, virtual)},
  1819  					pendingAttemptToDelete: []*node{
  1820  						makeNode(badSecretReferenceWithDeploymentUID, virtual), // virtual parent enqueued for delete attempt
  1821  						makeNode(goodChildPod, withOwners(deployment1apps)),    // good child enqueued for delete attempt
  1822  					},
  1823  				}),
  1824  
  1825  				// 7,8: process pending delete of virtual parent
  1826  				processAttemptToDelete(1),
  1827  				assertState(state{
  1828  					clientActions: []string{
  1829  						"get /v1, Resource=secrets ns=ns1 name=secretname", // lookup of bad parent reference
  1830  					},
  1831  					pendingGraphChanges: []*event{makeVirtualDeleteEvent(badSecretReferenceWithDeploymentUID)}, // bad virtual parent not found, queued virtual delete event
  1832  					graphNodes: []*node{
  1833  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1834  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1835  						makeNode(badSecretReferenceWithDeploymentUID, virtual)},
  1836  					pendingAttemptToDelete: []*node{
  1837  						makeNode(goodChildPod, withOwners(deployment1apps)), // good child enqueued for delete attempt
  1838  					},
  1839  				}),
  1840  
  1841  				// 9,10: process pending delete of good child, gets 200, remains
  1842  				processAttemptToDelete(1),
  1843  				assertState(state{
  1844  					clientActions: []string{
  1845  						"get /v1, Resource=pods ns=ns1 name=goodpod",                // lookup of child pre-delete
  1846  						"get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of good parent reference, returns 200
  1847  					},
  1848  					pendingGraphChanges: []*event{makeVirtualDeleteEvent(badSecretReferenceWithDeploymentUID)}, // bad virtual parent not found, queued virtual delete event
  1849  					graphNodes: []*node{
  1850  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1851  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1852  						makeNode(badSecretReferenceWithDeploymentUID, virtual)},
  1853  				}),
  1854  
  1855  				// 11,12: good parent add event
  1856  				insertEvent(makeAddEvent(deployment1apps)),
  1857  				assertState(state{
  1858  					pendingGraphChanges: []*event{
  1859  						makeAddEvent(deployment1apps),                                // good parent observation sneaked in
  1860  						makeVirtualDeleteEvent(badSecretReferenceWithDeploymentUID)}, // bad virtual parent not found, queued virtual delete event
  1861  					graphNodes: []*node{
  1862  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1863  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1864  						makeNode(badSecretReferenceWithDeploymentUID, virtual)},
  1865  				}),
  1866  
  1867  				// 13,14: process good parent add
  1868  				processPendingGraphChanges(1),
  1869  				assertState(state{
  1870  					pendingGraphChanges: []*event{
  1871  						makeVirtualDeleteEvent(badSecretReferenceWithDeploymentUID)}, // bad virtual parent still queued virtual delete event
  1872  					graphNodes: []*node{
  1873  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1874  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1875  						makeNode(deployment1apps)}, // parent node gets fixed, no longer virtual
  1876  					pendingAttemptToDelete: []*node{
  1877  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID))}, // child of bad parent coordinates enqueued for delete attempt
  1878  				}),
  1879  
  1880  				// 15,16: process virtual delete event of bad parent reference
  1881  				processPendingGraphChanges(1),
  1882  				assertState(state{
  1883  					graphNodes: []*node{
  1884  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1885  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1886  						makeNode(deployment1apps)},
  1887  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID}, // remember absence of bad parent coordinates
  1888  					pendingAttemptToDelete: []*node{
  1889  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), // child of bad parent coordinates enqueued for delete attempt
  1890  					},
  1891  				}),
  1892  
  1893  				// 17,18: process pending delete of bad child
  1894  				processAttemptToDelete(1),
  1895  				assertState(state{
  1896  					clientActions: []string{
  1897  						"get /v1, Resource=pods ns=ns1 name=badpod",    // lookup of child pre-delete
  1898  						"delete /v1, Resource=pods ns=ns1 name=badpod", // delete of bad child (absence of bad parent is cached)
  1899  					},
  1900  					graphNodes: []*node{
  1901  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1902  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)),
  1903  						makeNode(deployment1apps)},
  1904  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID},
  1905  				}),
  1906  
  1907  				// 19,20: observe delete of bad child from step 17
  1908  				// final state: good parent in graph with correct coordinates, good children remain, no pending deletions
  1909  				processEvent(makeDeleteEvent(badChildPod, badSecretReferenceWithDeploymentUID)),
  1910  				assertState(state{
  1911  					graphNodes: []*node{
  1912  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1913  						// bad child node removed
  1914  						makeNode(deployment1apps)},
  1915  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID},
  1916  				}),
  1917  			},
  1918  		},
  1919  		{
  1920  			name: "bad child -> existing owner with incorrect API version (good child, bad child, good parent)",
  1921  			steps: []step{
  1922  				// setup
  1923  				createObjectInClient("apps", "v1", "deployments", "ns1", makeMetadataObj(deployment1apps)),
  1924  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(badChildPod, badSecretReferenceWithDeploymentUID)),
  1925  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(goodChildPod, deployment1apps)),
  1926  				// 3,4: observe good child
  1927  				processEvent(makeAddEvent(goodChildPod, deployment1apps)),
  1928  				assertState(state{
  1929  					graphNodes: []*node{
  1930  						makeNode(goodChildPod, withOwners(deployment1apps)), // good child added
  1931  						makeNode(deployment1apps, virtual)},                 // virtual parent added
  1932  					pendingAttemptToDelete: []*node{
  1933  						makeNode(deployment1apps, virtual), // virtual parent enqueued for delete attempt
  1934  					},
  1935  				}),
  1936  
  1937  				// 5,6: observe bad child
  1938  				processEvent(makeAddEvent(badChildPod, badSecretReferenceWithDeploymentUID)),
  1939  				assertState(state{
  1940  					graphNodes: []*node{
  1941  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1942  						makeNode(deployment1apps, virtual),
  1943  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID))}, // bad child added
  1944  					pendingAttemptToDelete: []*node{
  1945  						makeNode(deployment1apps, virtual),                                     // virtual parent enqueued for delete attempt
  1946  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), // bad child enqueued for delete attempt
  1947  					},
  1948  				}),
  1949  
  1950  				// 7,8: process pending delete of virtual parent
  1951  				processAttemptToDelete(1),
  1952  				assertState(state{
  1953  					clientActions: []string{
  1954  						"get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of good parent reference, returns 200
  1955  					},
  1956  					graphNodes: []*node{
  1957  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1958  						makeNode(deployment1apps, virtual),
  1959  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID))},
  1960  					pendingAttemptToDelete: []*node{
  1961  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID)), // bad child enqueued for delete attempt
  1962  						makeNode(deployment1apps, virtual),                                     // virtual parent requeued to end, still virtual
  1963  					},
  1964  				}),
  1965  
  1966  				// 9,10: process pending delete of bad child
  1967  				processAttemptToDelete(1),
  1968  				assertState(state{
  1969  					clientActions: []string{
  1970  						"get /v1, Resource=pods ns=ns1 name=badpod",        // lookup of child pre-delete
  1971  						"get /v1, Resource=secrets ns=ns1 name=secretname", // lookup of bad parent reference, returns 404
  1972  						"delete /v1, Resource=pods ns=ns1 name=badpod",     // delete of bad child
  1973  					},
  1974  					graphNodes: []*node{
  1975  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1976  						makeNode(deployment1apps, virtual),
  1977  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID))},
  1978  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID}, // remember absence of bad parent
  1979  					pendingAttemptToDelete: []*node{
  1980  						makeNode(deployment1apps, virtual), // virtual parent requeued to end, still virtual
  1981  					},
  1982  				}),
  1983  
  1984  				// 11,12: observe good parent
  1985  				processEvent(makeAddEvent(deployment1apps)),
  1986  				assertState(state{
  1987  					graphNodes: []*node{
  1988  						makeNode(goodChildPod, withOwners(deployment1apps)),
  1989  						makeNode(deployment1apps), // good parent no longer virtual
  1990  						makeNode(badChildPod, withOwners(badSecretReferenceWithDeploymentUID))},
  1991  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID},
  1992  					pendingAttemptToDelete: []*node{
  1993  						makeNode(deployment1apps), // parent requeued to end, no longer virtual
  1994  					},
  1995  				}),
  1996  
  1997  				// 13,14: observe delete of bad child from step 9
  1998  				processEvent(makeDeleteEvent(badChildPod, badSecretReferenceWithDeploymentUID)),
  1999  				assertState(state{
  2000  					graphNodes: []*node{
  2001  						makeNode(goodChildPod, withOwners(deployment1apps)),
  2002  						// bad child node removed
  2003  						makeNode(deployment1apps)},
  2004  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID},
  2005  					pendingAttemptToDelete: []*node{
  2006  						makeNode(deployment1apps), // parent requeued to end, no longer virtual
  2007  					},
  2008  				}),
  2009  
  2010  				// 15,16: process pending delete of good parent
  2011  				// final state: good parent in graph with correct coordinates, good children remain, no pending deletions
  2012  				processAttemptToDelete(1),
  2013  				assertState(state{
  2014  					clientActions: []string{
  2015  						"get apps/v1, Resource=deployments ns=ns1 name=deployment1", // lookup of good parent, returns 200
  2016  					},
  2017  					graphNodes: []*node{
  2018  						makeNode(goodChildPod, withOwners(deployment1apps)),
  2019  						makeNode(deployment1apps)},
  2020  					absentOwnerCache: []objectReference{badSecretReferenceWithDeploymentUID},
  2021  				}),
  2022  			},
  2023  		},
  2024  		{
  2025  			// https://github.com/kubernetes/kubernetes/issues/98040
  2026  			name: "cluster-scoped bad child, namespaced good child, missing parent",
  2027  			steps: []step{
  2028  				// setup
  2029  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod2ns1, pod1ns1)),     // good child
  2030  				createObjectInClient("", "v1", "nodes", "", makeMetadataObj(node1, pod1nonamespace)), // bad child
  2031  
  2032  				// 2,3: observe bad child
  2033  				processEvent(makeAddEvent(node1, pod1nonamespace)),
  2034  				assertState(state{
  2035  					graphNodes: []*node{
  2036  						makeNode(node1, withOwners(pod1nonamespace)),
  2037  						makeNode(pod1nonamespace, virtual)},
  2038  					pendingAttemptToDelete: []*node{
  2039  						makeNode(pod1nonamespace, virtual)}, // virtual parent queued for deletion
  2040  				}),
  2041  
  2042  				// 4,5: observe good child
  2043  				processEvent(makeAddEvent(pod2ns1, pod1ns1)),
  2044  				assertState(state{
  2045  					graphNodes: []*node{
  2046  						makeNode(node1, withOwners(pod1nonamespace)),
  2047  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2048  						makeNode(pod1nonamespace, virtual)},
  2049  					pendingAttemptToDelete: []*node{
  2050  						makeNode(pod1nonamespace, virtual),     // virtual parent queued for deletion
  2051  						makeNode(pod2ns1, withOwners(pod1ns1)), // mismatched child queued for deletion
  2052  					},
  2053  				}),
  2054  
  2055  				// 6,7: process attemptToDelete of bad virtual parent coordinates
  2056  				processAttemptToDelete(1),
  2057  				assertState(state{
  2058  					graphNodes: []*node{
  2059  						makeNode(node1, withOwners(pod1nonamespace)),
  2060  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2061  						makeNode(pod1nonamespace, virtual)},
  2062  					pendingAttemptToDelete: []*node{
  2063  						makeNode(pod2ns1, withOwners(pod1ns1))}, // mismatched child queued for deletion
  2064  				}),
  2065  
  2066  				// 8,9: process attemptToDelete of good child
  2067  				processAttemptToDelete(1),
  2068  				assertState(state{
  2069  					clientActions: []string{
  2070  						"get /v1, Resource=pods ns=ns1 name=podname2",    // get good child, returns 200
  2071  						"get /v1, Resource=pods ns=ns1 name=podname1",    // get missing parent, returns 404
  2072  						"delete /v1, Resource=pods ns=ns1 name=podname2", // delete good child
  2073  					},
  2074  					graphNodes: []*node{
  2075  						makeNode(node1, withOwners(pod1nonamespace)),
  2076  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2077  						makeNode(pod1nonamespace, virtual)},
  2078  					absentOwnerCache: []objectReference{pod1ns1}, // missing parent cached
  2079  				}),
  2080  
  2081  				// 10,11: observe deletion of good child
  2082  				// steady-state is bad cluster child and bad virtual parent coordinates, with no retries
  2083  				processEvent(makeDeleteEvent(pod2ns1, pod1ns1)),
  2084  				assertState(state{
  2085  					graphNodes: []*node{
  2086  						makeNode(node1, withOwners(pod1nonamespace)),
  2087  						makeNode(pod1nonamespace, virtual)},
  2088  					absentOwnerCache: []objectReference{pod1ns1},
  2089  				}),
  2090  			},
  2091  		},
  2092  		{
  2093  			// https://github.com/kubernetes/kubernetes/issues/98040
  2094  			name: "cluster-scoped bad child, namespaced good child, late observed parent",
  2095  			steps: []step{
  2096  				// setup
  2097  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod1ns1)),              // good parent
  2098  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod2ns1, pod1ns1)),     // good child
  2099  				createObjectInClient("", "v1", "nodes", "", makeMetadataObj(node1, pod1nonamespace)), // bad child
  2100  
  2101  				// 3,4: observe bad child
  2102  				processEvent(makeAddEvent(node1, pod1nonamespace)),
  2103  				assertState(state{
  2104  					graphNodes: []*node{
  2105  						makeNode(node1, withOwners(pod1nonamespace)),
  2106  						makeNode(pod1nonamespace, virtual)},
  2107  					pendingAttemptToDelete: []*node{
  2108  						makeNode(pod1nonamespace, virtual)}, // virtual parent queued for deletion
  2109  				}),
  2110  
  2111  				// 5,6: observe good child
  2112  				processEvent(makeAddEvent(pod2ns1, pod1ns1)),
  2113  				assertState(state{
  2114  					graphNodes: []*node{
  2115  						makeNode(node1, withOwners(pod1nonamespace)),
  2116  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2117  						makeNode(pod1nonamespace, virtual)},
  2118  					pendingAttemptToDelete: []*node{
  2119  						makeNode(pod1nonamespace, virtual),      // virtual parent queued for deletion
  2120  						makeNode(pod2ns1, withOwners(pod1ns1))}, // mismatched child queued for deletion
  2121  				}),
  2122  
  2123  				// 7,8: process attemptToDelete of bad virtual parent coordinates
  2124  				processAttemptToDelete(1),
  2125  				assertState(state{
  2126  					graphNodes: []*node{
  2127  						makeNode(node1, withOwners(pod1nonamespace)),
  2128  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2129  						makeNode(pod1nonamespace, virtual)},
  2130  					pendingAttemptToDelete: []*node{
  2131  						makeNode(pod2ns1, withOwners(pod1ns1))}, // mismatched child queued for deletion
  2132  				}),
  2133  
  2134  				// 9,10: process attemptToDelete of good child
  2135  				processAttemptToDelete(1),
  2136  				assertState(state{
  2137  					clientActions: []string{
  2138  						"get /v1, Resource=pods ns=ns1 name=podname2", // get good child, returns 200
  2139  						"get /v1, Resource=pods ns=ns1 name=podname1", // get late-observed parent, returns 200
  2140  					},
  2141  					graphNodes: []*node{
  2142  						makeNode(node1, withOwners(pod1nonamespace)),
  2143  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2144  						makeNode(pod1nonamespace, virtual)},
  2145  				}),
  2146  
  2147  				// 11,12: late observe good parent
  2148  				processEvent(makeAddEvent(pod1ns1)),
  2149  				assertState(state{
  2150  					graphNodes: []*node{
  2151  						makeNode(node1, withOwners(pod1nonamespace)),
  2152  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2153  						makeNode(pod1ns1)},
  2154  					// warn about bad node reference
  2155  					events: []string{`Warning OwnerRefInvalidNamespace ownerRef [v1/Pod, namespace: , name: podname1, uid: poduid1] does not exist in namespace "" involvedObject{kind=Node,apiVersion=v1}`},
  2156  					pendingAttemptToDelete: []*node{
  2157  						makeNode(node1, withOwners(pod1nonamespace))}, // queue bad cluster-scoped child for delete attempt
  2158  				}),
  2159  
  2160  				// 13,14: process attemptToDelete of bad child
  2161  				// steady state is bad cluster-scoped child remaining with no retries, good parent and good child in graph
  2162  				processAttemptToDelete(1),
  2163  				assertState(state{
  2164  					clientActions: []string{
  2165  						"get /v1, Resource=nodes name=nodename", // get bad child, returns 200
  2166  					},
  2167  					graphNodes: []*node{
  2168  						makeNode(node1, withOwners(pod1nonamespace)),
  2169  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2170  						makeNode(pod1ns1)},
  2171  				}),
  2172  			},
  2173  		},
  2174  		{
  2175  			// https://github.com/kubernetes/kubernetes/issues/98040
  2176  			name: "namespaced good child, cluster-scoped bad child, missing parent",
  2177  			steps: []step{
  2178  				// setup
  2179  				createObjectInClient("", "v1", "pods", "ns1", makeMetadataObj(pod2ns1, pod1ns1)),     // good child
  2180  				createObjectInClient("", "v1", "nodes", "", makeMetadataObj(node1, pod1nonamespace)), // bad child
  2181  
  2182  				// 2,3: observe good child
  2183  				processEvent(makeAddEvent(pod2ns1, pod1ns1)),
  2184  				assertState(state{
  2185  					graphNodes: []*node{
  2186  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2187  						makeNode(pod1ns1, virtual)},
  2188  					pendingAttemptToDelete: []*node{
  2189  						makeNode(pod1ns1, virtual)}, // virtual parent queued for deletion
  2190  				}),
  2191  
  2192  				// 4,5: observe bad child
  2193  				processEvent(makeAddEvent(node1, pod1nonamespace)),
  2194  				assertState(state{
  2195  					graphNodes: []*node{
  2196  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2197  						makeNode(node1, withOwners(pod1nonamespace)),
  2198  						makeNode(pod1ns1, virtual)},
  2199  					pendingAttemptToDelete: []*node{
  2200  						makeNode(pod1ns1, virtual),                   // virtual parent queued for deletion
  2201  						makeNode(node1, withOwners(pod1nonamespace)), // mismatched child queued for deletion
  2202  					},
  2203  				}),
  2204  
  2205  				// 6,7: process attemptToDelete of good virtual parent coordinates
  2206  				processAttemptToDelete(1),
  2207  				assertState(state{
  2208  					clientActions: []string{
  2209  						"get /v1, Resource=pods ns=ns1 name=podname1", // lookup of missing parent, returns 404
  2210  					},
  2211  					graphNodes: []*node{
  2212  						makeNode(node1, withOwners(pod1nonamespace)),
  2213  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2214  						makeNode(pod1ns1, virtual)},
  2215  					pendingGraphChanges: []*event{makeVirtualDeleteEvent(pod1ns1)}, // virtual parent not found, queued virtual delete event
  2216  					pendingAttemptToDelete: []*node{
  2217  						makeNode(node1, withOwners(pod1nonamespace)), // mismatched child still queued for deletion
  2218  					},
  2219  				}),
  2220  
  2221  				// 8,9: process attemptToDelete of bad cluster child
  2222  				processAttemptToDelete(1),
  2223  				assertState(state{
  2224  					clientActions: []string{
  2225  						"get /v1, Resource=nodes name=nodename", // lookup of existing node
  2226  					},
  2227  					graphNodes: []*node{
  2228  						makeNode(node1, withOwners(pod1nonamespace)),
  2229  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2230  						makeNode(pod1ns1, virtual)},
  2231  					pendingGraphChanges: []*event{makeVirtualDeleteEvent(pod1ns1)}, // virtual parent virtual delete event still enqueued
  2232  				}),
  2233  
  2234  				// 10,11: process virtual delete event for good virtual parent coordinates
  2235  				processPendingGraphChanges(1),
  2236  				assertState(state{
  2237  					graphNodes: []*node{
  2238  						makeNode(node1, withOwners(pod1nonamespace)),
  2239  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2240  						makeNode(pod1nonamespace, virtual)}, // missing virtual parent replaced with alternate coordinates, still virtual
  2241  					absentOwnerCache: []objectReference{pod1ns1}, // cached absence of missing parent
  2242  					pendingAttemptToDelete: []*node{
  2243  						makeNode(pod2ns1, withOwners(pod1ns1)), // good child of missing parent enqueued for deletion
  2244  						makeNode(pod1nonamespace, virtual),     // new virtual parent coordinates enqueued for deletion
  2245  					},
  2246  				}),
  2247  
  2248  				// 12,13: process attemptToDelete of good child
  2249  				processAttemptToDelete(1),
  2250  				assertState(state{
  2251  					clientActions: []string{
  2252  						"get /v1, Resource=pods ns=ns1 name=podname2",    // lookup of good child
  2253  						"delete /v1, Resource=pods ns=ns1 name=podname2", // delete of good child
  2254  					},
  2255  					graphNodes: []*node{
  2256  						makeNode(node1, withOwners(pod1nonamespace)),
  2257  						makeNode(pod2ns1, withOwners(pod1ns1)),
  2258  						makeNode(pod1nonamespace, virtual)},
  2259  					absentOwnerCache: []objectReference{pod1ns1},
  2260  					pendingAttemptToDelete: []*node{
  2261  						makeNode(pod1nonamespace, virtual), // new virtual parent coordinates enqueued for deletion
  2262  					},
  2263  				}),
  2264  
  2265  				// 14,15: observe deletion of good child
  2266  				processEvent(makeDeleteEvent(pod2ns1, pod1ns1)),
  2267  				assertState(state{
  2268  					graphNodes: []*node{
  2269  						makeNode(node1, withOwners(pod1nonamespace)),
  2270  						makeNode(pod1nonamespace, virtual)},
  2271  					absentOwnerCache: []objectReference{pod1ns1},
  2272  					pendingAttemptToDelete: []*node{
  2273  						makeNode(pod1nonamespace, virtual), // new virtual parent coordinates enqueued for deletion
  2274  					},
  2275  				}),
  2276  
  2277  				// 16,17: process attemptToDelete of bad virtual parent coordinates
  2278  				// steady-state is bad cluster child and bad virtual parent coordinates, with no retries
  2279  				processAttemptToDelete(1),
  2280  				assertState(state{
  2281  					graphNodes: []*node{
  2282  						makeNode(node1, withOwners(pod1nonamespace)),
  2283  						makeNode(pod1nonamespace, virtual)},
  2284  					absentOwnerCache: []objectReference{pod1ns1},
  2285  				}),
  2286  			},
  2287  		},
  2288  	}
  2289  
  2290  	alwaysStarted := make(chan struct{})
  2291  	close(alwaysStarted)
  2292  	for _, scenario := range testScenarios {
  2293  		t.Run(scenario.name, func(t *testing.T) {
  2294  
  2295  			absentOwnerCache := NewReferenceCache(100)
  2296  
  2297  			eventRecorder := record.NewFakeRecorder(100)
  2298  			eventRecorder.IncludeObject = true
  2299  
  2300  			metadataClient := fakemetadata.NewSimpleMetadataClient(fakemetadata.NewTestScheme())
  2301  
  2302  			tweakableRM := meta.NewDefaultRESTMapper(nil)
  2303  			tweakableRM.AddSpecific(
  2304  				schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "Role"},
  2305  				schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "roles"},
  2306  				schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "role"},
  2307  				meta.RESTScopeNamespace,
  2308  			)
  2309  			tweakableRM.AddSpecific(
  2310  				schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "Role"},
  2311  				schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "roles"},
  2312  				schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "role"},
  2313  				meta.RESTScopeNamespace,
  2314  			)
  2315  			tweakableRM.AddSpecific(
  2316  				schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"},
  2317  				schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"},
  2318  				schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployment"},
  2319  				meta.RESTScopeNamespace,
  2320  			)
  2321  			restMapper := &testRESTMapper{meta.MultiRESTMapper{tweakableRM, testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}}
  2322  
  2323  			// set up our workqueues
  2324  			attemptToDelete := newTrackingWorkqueue()
  2325  			attemptToOrphan := newTrackingWorkqueue()
  2326  			graphChanges := newTrackingWorkqueue()
  2327  
  2328  			gc := &GarbageCollector{
  2329  				metadataClient:   metadataClient,
  2330  				restMapper:       restMapper,
  2331  				attemptToDelete:  attemptToDelete,
  2332  				attemptToOrphan:  attemptToOrphan,
  2333  				absentOwnerCache: absentOwnerCache,
  2334  				dependencyGraphBuilder: &GraphBuilder{
  2335  					eventRecorder:    eventRecorder,
  2336  					metadataClient:   metadataClient,
  2337  					informersStarted: alwaysStarted,
  2338  					graphChanges:     graphChanges,
  2339  					uidToNode: &concurrentUIDToNode{
  2340  						uidToNodeLock: sync.RWMutex{},
  2341  						uidToNode:     make(map[types.UID]*node),
  2342  					},
  2343  					attemptToDelete:  attemptToDelete,
  2344  					absentOwnerCache: absentOwnerCache,
  2345  				},
  2346  			}
  2347  
  2348  			logger, _ := ktesting.NewTestContext(t)
  2349  
  2350  			ctx := stepContext{
  2351  				t:               t,
  2352  				logger:          logger,
  2353  				gc:              gc,
  2354  				eventRecorder:   eventRecorder,
  2355  				metadataClient:  metadataClient,
  2356  				attemptToDelete: attemptToDelete,
  2357  				attemptToOrphan: attemptToOrphan,
  2358  				graphChanges:    graphChanges,
  2359  			}
  2360  			for i, s := range scenario.steps {
  2361  				ctx.t.Logf("%d: %s", i, s.name)
  2362  				s.check(ctx)
  2363  				if ctx.t.Failed() {
  2364  					return
  2365  				}
  2366  				verifyGraphInvariants(fmt.Sprintf("after step %d", i), gc.dependencyGraphBuilder.uidToNode.uidToNode, t)
  2367  				if ctx.t.Failed() {
  2368  					return
  2369  				}
  2370  			}
  2371  		})
  2372  	}
  2373  }
  2374  
  2375  func makeID(groupVersion string, kind string, namespace, name, uid string) objectReference {
  2376  	return objectReference{
  2377  		OwnerReference: metav1.OwnerReference{APIVersion: groupVersion, Kind: kind, Name: name, UID: types.UID(uid)},
  2378  		Namespace:      namespace,
  2379  	}
  2380  }
  2381  
  2382  type nodeTweak func(*node) *node
  2383  
  2384  func virtual(n *node) *node {
  2385  	n.virtual = true
  2386  	return n
  2387  }
  2388  func withOwners(ownerReferences ...objectReference) nodeTweak {
  2389  	return func(n *node) *node {
  2390  		var owners []metav1.OwnerReference
  2391  		for _, o := range ownerReferences {
  2392  			owners = append(owners, o.OwnerReference)
  2393  		}
  2394  		n.owners = owners
  2395  		return n
  2396  	}
  2397  }
  2398  
  2399  func makeNode(identity objectReference, tweaks ...nodeTweak) *node {
  2400  	n := &node{identity: identity}
  2401  	for _, tweak := range tweaks {
  2402  		n = tweak(n)
  2403  	}
  2404  	return n
  2405  }
  2406  
  2407  func makeAddEvent(identity objectReference, owners ...objectReference) *event {
  2408  	gv, err := schema.ParseGroupVersion(identity.APIVersion)
  2409  	if err != nil {
  2410  		panic(err)
  2411  	}
  2412  	return &event{
  2413  		eventType: addEvent,
  2414  		gvk:       gv.WithKind(identity.Kind),
  2415  		obj:       makeObj(identity, owners...),
  2416  	}
  2417  }
  2418  
  2419  func makeVirtualDeleteEvent(identity objectReference, owners ...objectReference) *event {
  2420  	e := makeDeleteEvent(identity, owners...)
  2421  	e.virtual = true
  2422  	return e
  2423  }
  2424  
  2425  func makeDeleteEvent(identity objectReference, owners ...objectReference) *event {
  2426  	gv, err := schema.ParseGroupVersion(identity.APIVersion)
  2427  	if err != nil {
  2428  		panic(err)
  2429  	}
  2430  	return &event{
  2431  		eventType: deleteEvent,
  2432  		gvk:       gv.WithKind(identity.Kind),
  2433  		obj:       makeObj(identity, owners...),
  2434  	}
  2435  }
  2436  
  2437  func makeObj(identity objectReference, owners ...objectReference) *metaonly.MetadataOnlyObject {
  2438  	obj := &metaonly.MetadataOnlyObject{
  2439  		TypeMeta:   metav1.TypeMeta{APIVersion: identity.APIVersion, Kind: identity.Kind},
  2440  		ObjectMeta: metav1.ObjectMeta{Namespace: identity.Namespace, UID: identity.UID, Name: identity.Name},
  2441  	}
  2442  	for _, owner := range owners {
  2443  		obj.ObjectMeta.OwnerReferences = append(obj.ObjectMeta.OwnerReferences, owner.OwnerReference)
  2444  	}
  2445  	return obj
  2446  }
  2447  
  2448  func makeMetadataObj(identity objectReference, owners ...objectReference) *metav1.PartialObjectMetadata {
  2449  	obj := &metav1.PartialObjectMetadata{
  2450  		TypeMeta:   metav1.TypeMeta{APIVersion: identity.APIVersion, Kind: identity.Kind},
  2451  		ObjectMeta: metav1.ObjectMeta{Namespace: identity.Namespace, UID: identity.UID, Name: identity.Name},
  2452  	}
  2453  	for _, owner := range owners {
  2454  		obj.ObjectMeta.OwnerReferences = append(obj.ObjectMeta.OwnerReferences, owner.OwnerReference)
  2455  	}
  2456  	return obj
  2457  }
  2458  
  2459  type stepContext struct {
  2460  	t               *testing.T
  2461  	logger          klog.Logger
  2462  	gc              *GarbageCollector
  2463  	eventRecorder   *record.FakeRecorder
  2464  	metadataClient  *fakemetadata.FakeMetadataClient
  2465  	attemptToDelete *trackingWorkqueue
  2466  	attemptToOrphan *trackingWorkqueue
  2467  	graphChanges    *trackingWorkqueue
  2468  }
  2469  
  2470  type step struct {
  2471  	name  string
  2472  	check func(stepContext)
  2473  }
  2474  
  2475  func processPendingGraphChanges(count int) step {
  2476  	return step{
  2477  		name: "processPendingGraphChanges",
  2478  		check: func(ctx stepContext) {
  2479  			ctx.t.Helper()
  2480  			if count <= 0 {
  2481  				// process all
  2482  				for ctx.gc.dependencyGraphBuilder.graphChanges.Len() != 0 {
  2483  					ctx.gc.dependencyGraphBuilder.processGraphChanges(ctx.logger)
  2484  				}
  2485  			} else {
  2486  				for i := 0; i < count; i++ {
  2487  					if ctx.gc.dependencyGraphBuilder.graphChanges.Len() == 0 {
  2488  						ctx.t.Errorf("expected at least %d pending changes, got %d", count, i+1)
  2489  						return
  2490  					}
  2491  					ctx.gc.dependencyGraphBuilder.processGraphChanges(ctx.logger)
  2492  				}
  2493  			}
  2494  		},
  2495  	}
  2496  }
  2497  
  2498  func processAttemptToDelete(count int) step {
  2499  	return step{
  2500  		name: "processAttemptToDelete",
  2501  		check: func(ctx stepContext) {
  2502  			ctx.t.Helper()
  2503  			if count <= 0 {
  2504  				// process all
  2505  				for ctx.gc.dependencyGraphBuilder.attemptToDelete.Len() != 0 {
  2506  					ctx.gc.processAttemptToDeleteWorker(context.TODO())
  2507  				}
  2508  			} else {
  2509  				for i := 0; i < count; i++ {
  2510  					if ctx.gc.dependencyGraphBuilder.attemptToDelete.Len() == 0 {
  2511  						ctx.t.Errorf("expected at least %d pending changes, got %d", count, i+1)
  2512  						return
  2513  					}
  2514  					ctx.gc.processAttemptToDeleteWorker(context.TODO())
  2515  				}
  2516  			}
  2517  		},
  2518  	}
  2519  }
  2520  
  2521  func insertEvent(e *event) step {
  2522  	return step{
  2523  		name: "insertEvent",
  2524  		check: func(ctx stepContext) {
  2525  			ctx.t.Helper()
  2526  			// drain queue into items
  2527  			var items []interface{}
  2528  			for ctx.gc.dependencyGraphBuilder.graphChanges.Len() > 0 {
  2529  				item, _ := ctx.gc.dependencyGraphBuilder.graphChanges.Get()
  2530  				ctx.gc.dependencyGraphBuilder.graphChanges.Done(item)
  2531  				items = append(items, item)
  2532  			}
  2533  
  2534  			// add the new event
  2535  			ctx.gc.dependencyGraphBuilder.graphChanges.Add(e)
  2536  
  2537  			// reappend the items
  2538  			for _, item := range items {
  2539  				ctx.gc.dependencyGraphBuilder.graphChanges.Add(item)
  2540  			}
  2541  		},
  2542  	}
  2543  }
  2544  
  2545  func processEvent(e *event) step {
  2546  	return step{
  2547  		name: "processEvent",
  2548  		check: func(ctx stepContext) {
  2549  			ctx.t.Helper()
  2550  			if ctx.gc.dependencyGraphBuilder.graphChanges.Len() != 0 {
  2551  				ctx.t.Fatalf("events present in graphChanges, must process pending graphChanges before calling processEvent")
  2552  			}
  2553  			ctx.gc.dependencyGraphBuilder.graphChanges.Add(e)
  2554  			ctx.gc.dependencyGraphBuilder.processGraphChanges(ctx.logger)
  2555  		},
  2556  	}
  2557  }
  2558  
  2559  func createObjectInClient(group, version, resource, namespace string, obj *metav1.PartialObjectMetadata) step {
  2560  	return step{
  2561  		name: "createObjectInClient",
  2562  		check: func(ctx stepContext) {
  2563  			ctx.t.Helper()
  2564  			if len(ctx.metadataClient.Actions()) > 0 {
  2565  				ctx.t.Fatal("cannot call createObjectInClient with pending client actions, call assertClientActions to check and clear first")
  2566  			}
  2567  			gvr := schema.GroupVersionResource{Group: group, Version: version, Resource: resource}
  2568  			var c fakemetadata.MetadataClient
  2569  			if namespace == "" {
  2570  				c = ctx.metadataClient.Resource(gvr).(fakemetadata.MetadataClient)
  2571  			} else {
  2572  				c = ctx.metadataClient.Resource(gvr).Namespace(namespace).(fakemetadata.MetadataClient)
  2573  			}
  2574  			if _, err := c.CreateFake(obj, metav1.CreateOptions{}); err != nil {
  2575  				ctx.t.Fatal(err)
  2576  			}
  2577  			ctx.metadataClient.ClearActions()
  2578  		},
  2579  	}
  2580  }
  2581  
  2582  func deleteObjectFromClient(group, version, resource, namespace, name string) step {
  2583  	return step{
  2584  		name: "deleteObjectFromClient",
  2585  		check: func(ctx stepContext) {
  2586  			ctx.t.Helper()
  2587  			if len(ctx.metadataClient.Actions()) > 0 {
  2588  				ctx.t.Fatal("cannot call deleteObjectFromClient with pending client actions, call assertClientActions to check and clear first")
  2589  			}
  2590  			gvr := schema.GroupVersionResource{Group: group, Version: version, Resource: resource}
  2591  			var c fakemetadata.MetadataClient
  2592  			if namespace == "" {
  2593  				c = ctx.metadataClient.Resource(gvr).(fakemetadata.MetadataClient)
  2594  			} else {
  2595  				c = ctx.metadataClient.Resource(gvr).Namespace(namespace).(fakemetadata.MetadataClient)
  2596  			}
  2597  			if err := c.Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil {
  2598  				ctx.t.Fatal(err)
  2599  			}
  2600  			ctx.metadataClient.ClearActions()
  2601  		},
  2602  	}
  2603  }
  2604  
  2605  type state struct {
  2606  	events                 []string
  2607  	clientActions          []string
  2608  	graphNodes             []*node
  2609  	pendingGraphChanges    []*event
  2610  	pendingAttemptToDelete []*node
  2611  	pendingAttemptToOrphan []*node
  2612  	absentOwnerCache       []objectReference
  2613  }
  2614  
  2615  func assertState(s state) step {
  2616  	return step{
  2617  		name: "assertState",
  2618  		check: func(ctx stepContext) {
  2619  			ctx.t.Helper()
  2620  
  2621  			{
  2622  				for _, absent := range s.absentOwnerCache {
  2623  					if !ctx.gc.absentOwnerCache.Has(absent) {
  2624  						ctx.t.Errorf("expected absent owner %s was not in the absentOwnerCache", absent)
  2625  					}
  2626  				}
  2627  				if len(s.absentOwnerCache) != ctx.gc.absentOwnerCache.cache.Len() {
  2628  					// only way to inspect is to drain them all, but that's ok because we're failing the test anyway
  2629  					ctx.gc.absentOwnerCache.cache.OnEvicted = func(key lru.Key, item interface{}) {
  2630  						found := false
  2631  						for _, absent := range s.absentOwnerCache {
  2632  							if absent == key {
  2633  								found = true
  2634  								break
  2635  							}
  2636  						}
  2637  						if !found {
  2638  							ctx.t.Errorf("unexpected item in absent owner cache: %s", key)
  2639  						}
  2640  					}
  2641  					ctx.gc.absentOwnerCache.cache.Clear()
  2642  					ctx.t.Error("unexpected items in absent owner cache")
  2643  				}
  2644  			}
  2645  
  2646  			{
  2647  				var actualEvents []string
  2648  				// drain sent events
  2649  			loop:
  2650  				for {
  2651  					select {
  2652  					case event := <-ctx.eventRecorder.Events:
  2653  						actualEvents = append(actualEvents, event)
  2654  					default:
  2655  						break loop
  2656  					}
  2657  				}
  2658  				if !reflect.DeepEqual(actualEvents, s.events) {
  2659  					ctx.t.Logf("expected:\n%s", strings.Join(s.events, "\n"))
  2660  					ctx.t.Logf("actual:\n%s", strings.Join(actualEvents, "\n"))
  2661  					ctx.t.Fatalf("did not get expected events")
  2662  				}
  2663  			}
  2664  
  2665  			{
  2666  				var actualClientActions []string
  2667  				for _, action := range ctx.metadataClient.Actions() {
  2668  					s := fmt.Sprintf("%s %s", action.GetVerb(), action.GetResource())
  2669  					if action.GetNamespace() != "" {
  2670  						s += " ns=" + action.GetNamespace()
  2671  					}
  2672  					if get, ok := action.(clientgotesting.GetAction); ok && get.GetName() != "" {
  2673  						s += " name=" + get.GetName()
  2674  					}
  2675  					actualClientActions = append(actualClientActions, s)
  2676  				}
  2677  				if (len(s.clientActions) > 0 || len(actualClientActions) > 0) && !reflect.DeepEqual(s.clientActions, actualClientActions) {
  2678  					ctx.t.Logf("expected:\n%s", strings.Join(s.clientActions, "\n"))
  2679  					ctx.t.Logf("actual:\n%s", strings.Join(actualClientActions, "\n"))
  2680  					ctx.t.Fatalf("did not get expected client actions")
  2681  				}
  2682  				ctx.metadataClient.ClearActions()
  2683  			}
  2684  
  2685  			{
  2686  				if l := len(ctx.gc.dependencyGraphBuilder.uidToNode.uidToNode); l != len(s.graphNodes) {
  2687  					ctx.t.Errorf("expected %d nodes, got %d", len(s.graphNodes), l)
  2688  				}
  2689  				for _, n := range s.graphNodes {
  2690  					graphNode, ok := ctx.gc.dependencyGraphBuilder.uidToNode.Read(n.identity.UID)
  2691  					if !ok {
  2692  						ctx.t.Errorf("%s: no node in graph with uid=%s", n.identity.UID, n.identity.UID)
  2693  						continue
  2694  					}
  2695  					if graphNode.identity != n.identity {
  2696  						ctx.t.Errorf("%s: expected identity %v, got %v", n.identity.UID, n.identity, graphNode.identity)
  2697  					}
  2698  					if graphNode.virtual != n.virtual {
  2699  						ctx.t.Errorf("%s: expected virtual %v, got %v", n.identity.UID, n.virtual, graphNode.virtual)
  2700  					}
  2701  					if (len(graphNode.owners) > 0 || len(n.owners) > 0) && !reflect.DeepEqual(graphNode.owners, n.owners) {
  2702  						expectedJSON, _ := json.Marshal(n.owners)
  2703  						actualJSON, _ := json.Marshal(graphNode.owners)
  2704  						ctx.t.Errorf("%s: expected owners %s, got %s", n.identity.UID, expectedJSON, actualJSON)
  2705  					}
  2706  				}
  2707  			}
  2708  
  2709  			{
  2710  				for i := range s.pendingGraphChanges {
  2711  					e := s.pendingGraphChanges[i]
  2712  					if len(ctx.graphChanges.pendingList) < i+1 {
  2713  						ctx.t.Errorf("graphChanges: expected %d events, got %d", len(s.pendingGraphChanges), ctx.graphChanges.Len())
  2714  						break
  2715  					}
  2716  
  2717  					a := ctx.graphChanges.pendingList[i].(*event)
  2718  					if !reflect.DeepEqual(e, a) {
  2719  						objectDiff := ""
  2720  						if !reflect.DeepEqual(e.obj, a.obj) {
  2721  							objectDiff = "\nobjectDiff:\n" + cmp.Diff(e.obj, a.obj)
  2722  						}
  2723  						oldObjectDiff := ""
  2724  						if !reflect.DeepEqual(e.oldObj, a.oldObj) {
  2725  							oldObjectDiff = "\noldObjectDiff:\n" + cmp.Diff(e.oldObj, a.oldObj)
  2726  						}
  2727  						ctx.t.Errorf("graphChanges[%d]: expected\n%#v\ngot\n%#v%s%s", i, e, a, objectDiff, oldObjectDiff)
  2728  					}
  2729  				}
  2730  				if ctx.graphChanges.Len() > len(s.pendingGraphChanges) {
  2731  					for i, a := range ctx.graphChanges.pendingList[len(s.pendingGraphChanges):] {
  2732  						ctx.t.Errorf("graphChanges[%d]: unexpected event: %v", len(s.pendingGraphChanges)+i, a)
  2733  					}
  2734  				}
  2735  			}
  2736  
  2737  			{
  2738  				for i := range s.pendingAttemptToDelete {
  2739  					e := s.pendingAttemptToDelete[i].identity
  2740  					e_virtual := s.pendingAttemptToDelete[i].virtual
  2741  					if ctx.attemptToDelete.Len() < i+1 {
  2742  						ctx.t.Errorf("attemptToDelete: expected %d events, got %d", len(s.pendingAttemptToDelete), ctx.attemptToDelete.Len())
  2743  						break
  2744  					}
  2745  					a := ctx.attemptToDelete.pendingList[i].(*node).identity
  2746  					a_virtual := ctx.attemptToDelete.pendingList[i].(*node).virtual
  2747  					if !reflect.DeepEqual(e, a) {
  2748  						ctx.t.Errorf("attemptToDelete[%d]: expected %v, got %v", i, e, a)
  2749  					}
  2750  					if e_virtual != a_virtual {
  2751  						ctx.t.Errorf("attemptToDelete[%d]: expected virtual node %v, got non-virtual node %v", i, e, a)
  2752  					}
  2753  				}
  2754  				if ctx.attemptToDelete.Len() > len(s.pendingAttemptToDelete) {
  2755  					for i, a := range ctx.attemptToDelete.pendingList[len(s.pendingAttemptToDelete):] {
  2756  						ctx.t.Errorf("attemptToDelete[%d]: unexpected node: %v", len(s.pendingAttemptToDelete)+i, a.(*node).identity)
  2757  					}
  2758  				}
  2759  			}
  2760  
  2761  			{
  2762  				for i := range s.pendingAttemptToOrphan {
  2763  					e := s.pendingAttemptToOrphan[i].identity
  2764  					if ctx.attemptToOrphan.Len() < i+1 {
  2765  						ctx.t.Errorf("attemptToOrphan: expected %d events, got %d", len(s.pendingAttemptToOrphan), ctx.attemptToOrphan.Len())
  2766  						break
  2767  					}
  2768  					a := ctx.attemptToOrphan.pendingList[i].(*node).identity
  2769  					if !reflect.DeepEqual(e, a) {
  2770  						ctx.t.Errorf("attemptToOrphan[%d]: expected %v, got %v", i, e, a)
  2771  					}
  2772  				}
  2773  				if ctx.attemptToOrphan.Len() > len(s.pendingAttemptToOrphan) {
  2774  					for i, a := range ctx.attemptToOrphan.pendingList[len(s.pendingAttemptToOrphan):] {
  2775  						ctx.t.Errorf("attemptToOrphan[%d]: unexpected node: %v", len(s.pendingAttemptToOrphan)+i, a.(*node).identity)
  2776  					}
  2777  				}
  2778  			}
  2779  		},
  2780  	}
  2781  
  2782  }
  2783  
  2784  // trackingWorkqueue implements RateLimitingInterface,
  2785  // allows introspection of the items in the queue,
  2786  // and treats AddAfter and AddRateLimited the same as Add
  2787  // so they are always synchronous.
  2788  type trackingWorkqueue struct {
  2789  	limiter     workqueue.RateLimitingInterface
  2790  	pendingList []interface{}
  2791  	pendingMap  map[interface{}]struct{}
  2792  }
  2793  
  2794  var _ = workqueue.RateLimitingInterface(&trackingWorkqueue{})
  2795  
  2796  func newTrackingWorkqueue() *trackingWorkqueue {
  2797  	return &trackingWorkqueue{
  2798  		limiter:    workqueue.NewRateLimitingQueue(&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Inf, 100)}),
  2799  		pendingMap: map[interface{}]struct{}{},
  2800  	}
  2801  }
  2802  
  2803  func (t *trackingWorkqueue) Add(item interface{}) {
  2804  	t.queue(item)
  2805  	t.limiter.Add(item)
  2806  }
  2807  func (t *trackingWorkqueue) AddAfter(item interface{}, duration time.Duration) {
  2808  	t.Add(item)
  2809  }
  2810  func (t *trackingWorkqueue) AddRateLimited(item interface{}) {
  2811  	t.Add(item)
  2812  }
  2813  func (t *trackingWorkqueue) Get() (interface{}, bool) {
  2814  	item, shutdown := t.limiter.Get()
  2815  	t.dequeue(item)
  2816  	return item, shutdown
  2817  }
  2818  func (t *trackingWorkqueue) Done(item interface{}) {
  2819  	t.limiter.Done(item)
  2820  }
  2821  func (t *trackingWorkqueue) Forget(item interface{}) {
  2822  	t.limiter.Forget(item)
  2823  }
  2824  func (t *trackingWorkqueue) NumRequeues(item interface{}) int {
  2825  	return 0
  2826  }
  2827  func (t *trackingWorkqueue) Len() int {
  2828  	if e, a := len(t.pendingList), len(t.pendingMap); e != a {
  2829  		panic(fmt.Errorf("pendingList != pendingMap: %d / %d", e, a))
  2830  	}
  2831  	if e, a := len(t.pendingList), t.limiter.Len(); e != a {
  2832  		panic(fmt.Errorf("pendingList != limiter.Len(): %d / %d", e, a))
  2833  	}
  2834  	return len(t.pendingList)
  2835  }
  2836  func (t *trackingWorkqueue) ShutDown() {
  2837  	t.limiter.ShutDown()
  2838  }
  2839  func (t *trackingWorkqueue) ShutDownWithDrain() {
  2840  	t.limiter.ShutDownWithDrain()
  2841  }
  2842  func (t *trackingWorkqueue) ShuttingDown() bool {
  2843  	return t.limiter.ShuttingDown()
  2844  }
  2845  
  2846  func (t *trackingWorkqueue) queue(item interface{}) {
  2847  	if _, queued := t.pendingMap[item]; queued {
  2848  		// fmt.Printf("already queued: %#v\n", item)
  2849  		return
  2850  	}
  2851  	t.pendingMap[item] = struct{}{}
  2852  	t.pendingList = append(t.pendingList, item)
  2853  }
  2854  func (t *trackingWorkqueue) dequeue(item interface{}) {
  2855  	if _, queued := t.pendingMap[item]; !queued {
  2856  		// fmt.Printf("not queued: %#v\n", item)
  2857  		return
  2858  	}
  2859  	delete(t.pendingMap, item)
  2860  	newPendingList := []interface{}{}
  2861  	for _, p := range t.pendingList {
  2862  		if p == item {
  2863  			continue
  2864  		}
  2865  		newPendingList = append(newPendingList, p)
  2866  	}
  2867  	t.pendingList = newPendingList
  2868  }