github.com/tilt-dev/tilt@v0.33.15-0.20240515162809-0a22ed45d8a0/internal/k8s/fake_client.go (about)

     1  package k8s
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"io"
     7  	"os"
     8  	"strings"
     9  	"sync"
    10  	"testing"
    11  	"time"
    12  
    13  	"github.com/distribution/reference"
    14  	"github.com/google/uuid"
    15  	"github.com/pkg/errors"
    16  	v1 "k8s.io/api/core/v1"
    17  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    18  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    19  	"k8s.io/apimachinery/pkg/runtime/schema"
    20  	"k8s.io/apimachinery/pkg/types"
    21  	"k8s.io/apimachinery/pkg/version"
    22  	"k8s.io/client-go/tools/clientcmd/api"
    23  
    24  	"github.com/tilt-dev/tilt/internal/container"
    25  	"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
    26  	"github.com/tilt-dev/tilt/pkg/logger"
    27  )
    28  
    29  // A magic constant. If the docker client returns this constant, we always match
    30  // even if the container doesn't have the correct image name.
    31  const MagicTestContainerID = "tilt-testcontainer"
    32  
    33  // MagicTestExplodingPort causes FakePortForwarder to fail to initialize (i.e. return an error as soon as ForwardPorts
    34  // is called without ever becoming ready).
    35  const MagicTestExplodingPort = 34743
    36  
    37  var _ Client = &FakeK8sClient{}
    38  
    39  // For keying PodLogsByPodAndContainer
    40  type PodAndCName struct {
    41  	PID   PodID
    42  	CName container.Name
    43  }
    44  
    45  type FakeK8sClient struct {
    46  	t            testing.TB
    47  	mu           sync.Mutex
    48  	ownerFetcher OwnerFetcher
    49  
    50  	FakePortForwardClient
    51  
    52  	Yaml string
    53  	Lb   LoadBalancerSpec
    54  
    55  	DeletedYaml string
    56  	DeleteError error
    57  
    58  	LastPodQueryNamespace Namespace
    59  	LastPodQueryImage     reference.NamedTagged
    60  
    61  	PodLogsByPodAndContainer map[PodAndCName]ReaderCloser
    62  	LastPodLogStartTime      time.Time
    63  	LastPodLogContext        context.Context
    64  	LastPodLogPipeWriter     *io.PipeWriter
    65  	ContainerLogsError       error
    66  
    67  	podWatches     []fakePodWatch
    68  	serviceWatches []fakeServiceWatch
    69  	eventWatches   []fakeEventWatch
    70  	events         map[types.NamespacedName]*v1.Event
    71  	services       map[types.NamespacedName]*v1.Service
    72  	pods           map[types.NamespacedName]*v1.Pod
    73  
    74  	EventsWatchErr error
    75  
    76  	UpsertError      error
    77  	UpsertResult     []K8sEntity
    78  	LastUpsertResult []K8sEntity
    79  	UpsertTimeout    time.Duration
    80  
    81  	Runtime    container.Runtime
    82  	Registry   *v1alpha1.RegistryHosting
    83  	FakeNodeIP NodeIP
    84  
    85  	// entities are injected objects keyed by UID.
    86  	entities map[types.UID]K8sEntity
    87  	// currentVersions maintains a mapping of object name to UID which represents the most recently injected value.
    88  	//
    89  	// In real K8s, you'd need to delete the old object before being able to store the new one with the same name.
    90  	// For testing purposes, it's useful to be able to simulate out of order/stale data type scenarios, so the fake
    91  	// client doesn't enforce name uniqueness for storage. When appropriate (e.g. ListMeta), this map ensures that
    92  	// multiple objects for the same name aren't returned.
    93  	currentVersions         map[string]types.UID
    94  	getByReferenceCallCount int
    95  	listCallCount           int
    96  	listReturnsEmpty        bool
    97  
    98  	ExecCalls           []ExecCall
    99  	ExecOutputs         []io.Reader
   100  	ExecErrors          []error
   101  	ClusterHealthStatus *ClusterHealth
   102  	ClusterHealthError  error
   103  	FakeAPIConfig       *api.Config
   104  }
   105  
   106  var _ Client = &FakeK8sClient{}
   107  
   108  type ExecCall struct {
   109  	PID   PodID
   110  	CName container.Name
   111  	Ns    Namespace
   112  	Cmd   []string
   113  	Stdin []byte
   114  }
   115  
   116  type fakeServiceWatch struct {
   117  	cancel func()
   118  	ns     Namespace
   119  	ch     chan *v1.Service
   120  }
   121  
   122  type fakePodWatch struct {
   123  	cancel func()
   124  	ns     Namespace
   125  	ch     chan ObjectUpdate
   126  }
   127  
   128  type fakeEventWatch struct {
   129  	cancel func()
   130  	ns     Namespace
   131  	ch     chan *v1.Event
   132  }
   133  
   134  func (c *FakeK8sClient) UpsertService(s *v1.Service) {
   135  	c.mu.Lock()
   136  	defer c.mu.Unlock()
   137  
   138  	s = s.DeepCopy()
   139  	c.services[types.NamespacedName{Name: s.Name, Namespace: s.Namespace}] = s
   140  	for _, w := range c.serviceWatches {
   141  		if w.ns != Namespace(s.Namespace) {
   142  			continue
   143  		}
   144  
   145  		w.ch <- s
   146  	}
   147  }
   148  
   149  func (c *FakeK8sClient) UpsertPod(pod *v1.Pod) {
   150  	c.mu.Lock()
   151  	defer c.mu.Unlock()
   152  
   153  	pod = pod.DeepCopy()
   154  	c.pods[types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}] = pod
   155  	for _, w := range c.podWatches {
   156  		if w.ns != Namespace(pod.Namespace) {
   157  			continue
   158  		}
   159  
   160  		w.ch <- ObjectUpdate{obj: pod}
   161  	}
   162  }
   163  
   164  func (c *FakeK8sClient) UpsertEvent(event *v1.Event) {
   165  	c.mu.Lock()
   166  	defer c.mu.Unlock()
   167  
   168  	event = event.DeepCopy()
   169  	c.events[types.NamespacedName{Name: event.Name, Namespace: event.Namespace}] = event
   170  	for _, w := range c.eventWatches {
   171  		if w.ns != Namespace(event.Namespace) {
   172  			continue
   173  		}
   174  
   175  		w.ch <- event
   176  	}
   177  }
   178  
   179  func (c *FakeK8sClient) PodFromInformerCache(ctx context.Context, nn types.NamespacedName) (*v1.Pod, error) {
   180  	if nn.Namespace == "" {
   181  		return nil, fmt.Errorf("missing namespace from pod request")
   182  	}
   183  
   184  	c.mu.Lock()
   185  	defer c.mu.Unlock()
   186  	pod, ok := c.pods[nn]
   187  	if !ok {
   188  		return nil, apierrors.NewNotFound(PodGVR.GroupResource(), nn.Name)
   189  	}
   190  	return pod, nil
   191  }
   192  
   193  func (c *FakeK8sClient) WatchServices(ctx context.Context, ns Namespace) (<-chan *v1.Service, error) {
   194  	if ns == "" {
   195  		return nil, fmt.Errorf("missing namespace from watch request")
   196  	}
   197  
   198  	ctx, cancel := context.WithCancel(ctx)
   199  
   200  	c.mu.Lock()
   201  	ch := make(chan *v1.Service, 20)
   202  	c.serviceWatches = append(c.serviceWatches, fakeServiceWatch{cancel, ns, ch})
   203  	toEmit := []*v1.Service{}
   204  	for _, service := range c.services {
   205  		if Namespace(service.Namespace) == ns {
   206  			toEmit = append(toEmit, service)
   207  		}
   208  	}
   209  	c.mu.Unlock()
   210  
   211  	go func() {
   212  		// Initial list of objects
   213  		for _, obj := range toEmit {
   214  			ch <- obj
   215  		}
   216  
   217  		// when ctx is canceled, remove the label selector from the list of watched label selectors
   218  		<-ctx.Done()
   219  
   220  		c.mu.Lock()
   221  		var newWatches []fakeServiceWatch
   222  		for _, e := range c.serviceWatches {
   223  			if e.ns != ns {
   224  				newWatches = append(newWatches, e)
   225  			}
   226  		}
   227  		c.serviceWatches = newWatches
   228  		c.mu.Unlock()
   229  
   230  		close(ch)
   231  	}()
   232  	return ch, nil
   233  }
   234  
   235  func (c *FakeK8sClient) WatchEvents(ctx context.Context, ns Namespace) (<-chan *v1.Event, error) {
   236  	if ns == "" {
   237  		return nil, fmt.Errorf("missing namespace from watch request")
   238  	}
   239  
   240  	if c.EventsWatchErr != nil {
   241  		err := c.EventsWatchErr
   242  		c.EventsWatchErr = nil
   243  		return nil, err
   244  	}
   245  
   246  	ctx, cancel := context.WithCancel(ctx)
   247  
   248  	c.mu.Lock()
   249  	ch := make(chan *v1.Event, 20)
   250  	c.eventWatches = append(c.eventWatches, fakeEventWatch{cancel, ns, ch})
   251  	toEmit := []*v1.Event{}
   252  	for _, event := range c.events {
   253  		if Namespace(event.Namespace) == ns {
   254  			toEmit = append(toEmit, event)
   255  		}
   256  	}
   257  	c.mu.Unlock()
   258  
   259  	go func() {
   260  		// Initial list of objects
   261  		for _, obj := range toEmit {
   262  			ch <- obj
   263  		}
   264  
   265  		<-ctx.Done()
   266  
   267  		c.mu.Lock()
   268  		var newWatches []fakeEventWatch
   269  		for _, e := range c.eventWatches {
   270  			if e.ns != ns {
   271  				newWatches = append(newWatches, e)
   272  			}
   273  		}
   274  		c.eventWatches = newWatches
   275  		c.mu.Unlock()
   276  
   277  		close(ch)
   278  	}()
   279  	return ch, nil
   280  }
   281  
   282  func (c *FakeK8sClient) WatchMeta(ctx context.Context, gvk schema.GroupVersionKind, ns Namespace) (<-chan metav1.Object, error) {
   283  	if ns == "" {
   284  		return nil, fmt.Errorf("missing namespace from watch request")
   285  	}
   286  
   287  	return make(chan metav1.Object), nil
   288  }
   289  
   290  func (c *FakeK8sClient) EmitPodDelete(p *v1.Pod) {
   291  	c.mu.Lock()
   292  	defer c.mu.Unlock()
   293  
   294  	delete(c.pods, types.NamespacedName{Name: p.Name, Namespace: p.Namespace})
   295  	for _, w := range c.podWatches {
   296  		if w.ns != Namespace(p.Namespace) {
   297  			continue
   298  		}
   299  
   300  		w.ch <- ObjectUpdate{obj: p, isDelete: true}
   301  	}
   302  }
   303  
   304  func (c *FakeK8sClient) WatchPods(ctx context.Context, ns Namespace) (<-chan ObjectUpdate, error) {
   305  	if ns == "" {
   306  		return nil, fmt.Errorf("missing namespace from watch request")
   307  	}
   308  
   309  	ctx, cancel := context.WithCancel(ctx)
   310  
   311  	c.mu.Lock()
   312  	ch := make(chan ObjectUpdate, 20)
   313  	c.podWatches = append(c.podWatches, fakePodWatch{cancel, ns, ch})
   314  	toEmit := []*v1.Pod{}
   315  	for _, pod := range c.pods {
   316  		if Namespace(pod.Namespace) == ns {
   317  			toEmit = append(toEmit, pod)
   318  		}
   319  	}
   320  	c.mu.Unlock()
   321  
   322  	go func() {
   323  		// Initial list of objects
   324  		for _, obj := range toEmit {
   325  			ch <- ObjectUpdate{obj: obj}
   326  		}
   327  
   328  		<-ctx.Done()
   329  
   330  		c.mu.Lock()
   331  		var newWatches []fakePodWatch
   332  		for _, e := range c.podWatches {
   333  			if e.ns != ns {
   334  				newWatches = append(newWatches, e)
   335  			}
   336  		}
   337  		c.podWatches = newWatches
   338  		c.mu.Unlock()
   339  
   340  		close(ch)
   341  	}()
   342  
   343  	return ch, nil
   344  }
   345  
   346  func NewFakeK8sClient(t testing.TB) *FakeK8sClient {
   347  	cli := &FakeK8sClient{
   348  		t:                        t,
   349  		PodLogsByPodAndContainer: make(map[PodAndCName]ReaderCloser),
   350  		pods:                     make(map[types.NamespacedName]*v1.Pod),
   351  		services:                 make(map[types.NamespacedName]*v1.Service),
   352  		events:                   make(map[types.NamespacedName]*v1.Event),
   353  		entities:                 make(map[types.UID]K8sEntity),
   354  		currentVersions:          make(map[string]types.UID),
   355  		FakeAPIConfig: &api.Config{
   356  			CurrentContext: "default",
   357  			Contexts: map[string]*api.Context{
   358  				"default": &api.Context{
   359  					Cluster:   "default",
   360  					Namespace: "default",
   361  				},
   362  			},
   363  			Clusters: map[string]*api.Cluster{
   364  				"default": &api.Cluster{},
   365  			},
   366  		},
   367  	}
   368  	ctx, cancel := context.WithCancel(logger.WithLogger(context.Background(), logger.NewTestLogger(os.Stdout)))
   369  	t.Cleanup(cancel)
   370  	t.Cleanup(cli.tearDown)
   371  	cli.ownerFetcher = NewOwnerFetcher(ctx, cli)
   372  	return cli
   373  }
   374  
   375  func (c *FakeK8sClient) tearDown() {
   376  	c.mu.Lock()
   377  	podWatches := append([]fakePodWatch{}, c.podWatches...)
   378  	serviceWatches := append([]fakeServiceWatch{}, c.serviceWatches...)
   379  	eventWatches := append([]fakeEventWatch{}, c.eventWatches...)
   380  	c.mu.Unlock()
   381  
   382  	for _, watch := range podWatches {
   383  		watch.cancel()
   384  		for range watch.ch {
   385  		}
   386  	}
   387  	for _, watch := range serviceWatches {
   388  		watch.cancel()
   389  		for range watch.ch {
   390  		}
   391  	}
   392  	for _, watch := range eventWatches {
   393  		watch.cancel()
   394  		for range watch.ch {
   395  		}
   396  	}
   397  }
   398  
   399  func (c *FakeK8sClient) Upsert(_ context.Context, entities []K8sEntity, timeout time.Duration) ([]K8sEntity, error) {
   400  	c.mu.Lock()
   401  	defer c.mu.Unlock()
   402  
   403  	if c.UpsertError != nil {
   404  		return nil, c.UpsertError
   405  	}
   406  
   407  	var result []K8sEntity
   408  	if c.UpsertResult != nil {
   409  		result = c.UpsertResult
   410  	} else {
   411  		yaml, err := SerializeSpecYAML(entities)
   412  		if err != nil {
   413  			return nil, errors.Wrap(err, "kubectl apply")
   414  		}
   415  		c.Yaml = yaml
   416  
   417  		for _, e := range entities {
   418  			clone := e.DeepCopy()
   419  			clone.SetUID(uuid.New().String())
   420  			result = append(result, clone)
   421  		}
   422  	}
   423  
   424  	c.LastUpsertResult = result
   425  	c.UpsertTimeout = timeout
   426  
   427  	return result, nil
   428  }
   429  
   430  func (c *FakeK8sClient) Delete(_ context.Context, entities []K8sEntity, wait time.Duration) error {
   431  	c.mu.Lock()
   432  	defer c.mu.Unlock()
   433  
   434  	if c.DeleteError != nil {
   435  		err := c.DeleteError
   436  		c.DeleteError = nil
   437  		return err
   438  	}
   439  
   440  	yaml, err := SerializeSpecYAML(entities)
   441  	if err != nil {
   442  		return errors.Wrap(err, "kubectl delete")
   443  	}
   444  	c.DeletedYaml = yaml
   445  	return nil
   446  }
   447  
   448  // Inject adds an entity or replaces it for subsequent retrieval.
   449  //
   450  // Entities are keyed by UID.
   451  func (c *FakeK8sClient) Inject(entities ...K8sEntity) {
   452  	c.mu.Lock()
   453  	defer c.mu.Unlock()
   454  
   455  	c.t.Helper()
   456  	for i, entity := range entities {
   457  		if entity.UID() == "" {
   458  			c.t.Fatalf("Entity with name[%s] at index[%d] had no UID", entity.Name(), i)
   459  		}
   460  		c.entities[entity.UID()] = entity
   461  		c.currentVersions[entity.Name()] = entity.UID()
   462  	}
   463  }
   464  
   465  func (c *FakeK8sClient) GetMetaByReference(ctx context.Context, ref v1.ObjectReference) (metav1.Object, error) {
   466  	c.mu.Lock()
   467  	defer c.mu.Unlock()
   468  
   469  	c.getByReferenceCallCount++
   470  	resp, ok := c.entities[ref.UID]
   471  	if !ok {
   472  		logger.Get(ctx).Infof("FakeK8sClient.GetMetaByReference: resource not found: %s", ref.Name)
   473  		return nil, apierrors.NewNotFound(v1.Resource(ref.Kind), ref.Name)
   474  	}
   475  	return resp.Meta(), nil
   476  }
   477  
   478  func (c *FakeK8sClient) ListMeta(_ context.Context, gvk schema.GroupVersionKind, ns Namespace) ([]metav1.Object, error) {
   479  	c.mu.Lock()
   480  	defer c.mu.Unlock()
   481  
   482  	c.listCallCount++
   483  	if c.listReturnsEmpty {
   484  		return nil, nil
   485  	}
   486  
   487  	result := make([]metav1.Object, 0)
   488  	for _, uid := range c.currentVersions {
   489  		entity := c.entities[uid]
   490  		if entity.Namespace().String() != ns.String() {
   491  			continue
   492  		}
   493  		if entity.GVK() != gvk {
   494  			continue
   495  		}
   496  		result = append(result, entity.Meta())
   497  	}
   498  	return result, nil
   499  }
   500  
   501  func (c *FakeK8sClient) SetLogsForPodContainer(pID PodID, cName container.Name, logs string) {
   502  
   503  	c.SetLogReaderForPodContainer(pID, cName, strings.NewReader(logs))
   504  }
   505  
   506  func (c *FakeK8sClient) SetLogReaderForPodContainer(pID PodID, cName container.Name, reader io.Reader) {
   507  	c.mu.Lock()
   508  	defer c.mu.Unlock()
   509  
   510  	c.PodLogsByPodAndContainer[PodAndCName{pID, cName}] = ReaderCloser{Reader: reader}
   511  }
   512  
   513  func (c *FakeK8sClient) ContainerLogs(ctx context.Context, pID PodID, cName container.Name, n Namespace, startTime time.Time) (io.ReadCloser, error) {
   514  	c.mu.Lock()
   515  	defer c.mu.Unlock()
   516  
   517  	if c.ContainerLogsError != nil {
   518  		return nil, c.ContainerLogsError
   519  	}
   520  
   521  	// metav1.Time truncates to the nearest second when serializing across the
   522  	// wire, so truncate here to replicate that behavior.
   523  	c.LastPodLogStartTime = startTime.Truncate(time.Second)
   524  	c.LastPodLogContext = ctx
   525  
   526  	// If we have specific logs for this pod/container combo, return those
   527  	if buf, ok := c.PodLogsByPodAndContainer[PodAndCName{pID, cName}]; ok {
   528  		return buf, nil
   529  	}
   530  
   531  	r, w := io.Pipe()
   532  	c.LastPodLogPipeWriter = w
   533  
   534  	return ReaderCloser{Reader: r}, nil
   535  }
   536  
   537  func (c *FakeK8sClient) APIConfig() *api.Config {
   538  	return c.FakeAPIConfig
   539  }
   540  
   541  func (c *FakeK8sClient) ClusterHealth(_ context.Context, _ bool) (ClusterHealth, error) {
   542  	c.mu.Lock()
   543  	defer c.mu.Unlock()
   544  
   545  	if c.ClusterHealthStatus != nil {
   546  		return *c.ClusterHealthStatus, nil
   547  	}
   548  
   549  	if c.ClusterHealthError != nil {
   550  		return ClusterHealth{}, c.ClusterHealthError
   551  	}
   552  
   553  	return ClusterHealth{
   554  		Live:  true,
   555  		Ready: true,
   556  		// these are not meant to be machine-readable, but they're formatted to
   557  		// look like the actual output in K8s for consistency
   558  		// https://kubernetes.io/docs/reference/using-api/health-checks/
   559  		LiveOutput:  "[+]fake-tilt ok\nlivez check passed\n",
   560  		ReadyOutput: "[+]fake-tilt ok\nreadyz check passed\n",
   561  	}, nil
   562  }
   563  
   564  func FakePodStatus(image reference.NamedTagged, phase string) v1.PodStatus {
   565  	return v1.PodStatus{
   566  		Phase: v1.PodPhase(phase),
   567  		ContainerStatuses: []v1.ContainerStatus{
   568  			{
   569  				Name:        "main",
   570  				ContainerID: "docker://" + MagicTestContainerID,
   571  				Image:       image.String(),
   572  				Ready:       true,
   573  			},
   574  			{
   575  				Name:        "tilt-synclet",
   576  				ContainerID: "docker://tilt-testsynclet",
   577  				// can't use the constants in synclet because that would create a dep cycle
   578  				Image: "gcr.io/windmill-public-containers/tilt-synclet:latest",
   579  				Ready: true,
   580  			},
   581  		},
   582  	}
   583  }
   584  
   585  func FakePodSpec(image reference.NamedTagged) v1.PodSpec {
   586  	return v1.PodSpec{
   587  		Containers: []v1.Container{
   588  			{
   589  				Name:  "main",
   590  				Image: image.String(),
   591  				Ports: []v1.ContainerPort{
   592  					{
   593  						ContainerPort: 8080,
   594  					},
   595  				},
   596  			},
   597  			{
   598  				Name:  "tilt-synclet",
   599  				Image: "gcr.io/windmill-public-containers/tilt-synclet:latest",
   600  			},
   601  		},
   602  	}
   603  }
   604  
   605  func (c *FakeK8sClient) CreatePortForwarder(ctx context.Context, namespace Namespace, podID PodID, optionalLocalPort, remotePort int, host string) (PortForwarder, error) {
   606  	pfc := &(c.FakePortForwardClient)
   607  	return pfc.CreatePortForwarder(ctx, namespace, podID, optionalLocalPort, remotePort, host)
   608  }
   609  
   610  func (c *FakeK8sClient) ContainerRuntime(ctx context.Context) container.Runtime {
   611  	c.mu.Lock()
   612  	defer c.mu.Unlock()
   613  
   614  	if c.Runtime != "" {
   615  		return c.Runtime
   616  	}
   617  	return container.RuntimeDocker
   618  }
   619  
   620  func (c *FakeK8sClient) LocalRegistry(_ context.Context) *v1alpha1.RegistryHosting {
   621  	c.mu.Lock()
   622  	defer c.mu.Unlock()
   623  
   624  	if c.Registry == nil {
   625  		return nil
   626  	}
   627  	return c.Registry.DeepCopy()
   628  }
   629  
   630  func (c *FakeK8sClient) NodeIP(ctx context.Context) NodeIP {
   631  	c.mu.Lock()
   632  	defer c.mu.Unlock()
   633  
   634  	return c.FakeNodeIP
   635  }
   636  
   637  func (c *FakeK8sClient) Exec(ctx context.Context, podID PodID, cName container.Name, n Namespace, cmd []string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error {
   638  	c.mu.Lock()
   639  	defer c.mu.Unlock()
   640  
   641  	var stdinBytes []byte
   642  	var err error
   643  	if stdin != nil {
   644  		stdinBytes, err = io.ReadAll(stdin)
   645  		if err != nil {
   646  			return errors.Wrap(err, "reading Exec stdin")
   647  		}
   648  	}
   649  
   650  	c.ExecCalls = append(c.ExecCalls, ExecCall{
   651  		PID:   podID,
   652  		CName: cName,
   653  		Ns:    n,
   654  		Cmd:   cmd,
   655  		Stdin: stdinBytes,
   656  	})
   657  
   658  	if len(c.ExecOutputs) > 0 {
   659  		out := c.ExecOutputs[0]
   660  		c.ExecOutputs = c.ExecOutputs[1:]
   661  		_, _ = io.Copy(stdout, out)
   662  	}
   663  
   664  	if len(c.ExecErrors) > 0 {
   665  		err = c.ExecErrors[0]
   666  		c.ExecErrors = c.ExecErrors[1:]
   667  		return err
   668  	}
   669  	return nil
   670  }
   671  
   672  func (c *FakeK8sClient) CheckConnected(ctx context.Context) (*version.Info, error) {
   673  	return &version.Info{}, nil
   674  }
   675  
   676  func (c *FakeK8sClient) OwnerFetcher() OwnerFetcher {
   677  	return c.ownerFetcher
   678  }
   679  
   680  type ReaderCloser struct {
   681  	io.Reader
   682  }
   683  
   684  func (b ReaderCloser) Close() error {
   685  	return nil
   686  }
   687  
   688  var _ io.ReadCloser = ReaderCloser{}
   689  
   690  type FakePortForwarder struct {
   691  	localPort int
   692  	namespace Namespace
   693  	ctx       context.Context
   694  	ready     chan struct{}
   695  	done      chan error
   696  }
   697  
   698  var _ PortForwarder = FakePortForwarder{}
   699  
   700  func NewFakePortForwarder(ctx context.Context, localPort int, namespace Namespace) FakePortForwarder {
   701  	return FakePortForwarder{
   702  		localPort: localPort,
   703  		namespace: namespace,
   704  		ctx:       ctx,
   705  		ready:     make(chan struct{}, 1),
   706  		done:      make(chan error),
   707  	}
   708  }
   709  
   710  func (pf FakePortForwarder) Addresses() []string {
   711  	return []string{"127.0.0.1", "::1"}
   712  }
   713  
   714  func (pf FakePortForwarder) LocalPort() int {
   715  	return pf.localPort
   716  }
   717  func (pf FakePortForwarder) Namespace() Namespace {
   718  	return pf.namespace
   719  }
   720  
   721  func (pf FakePortForwarder) ForwardPorts() error {
   722  	// in the real port forwarder, the binding/listening logic can fail before the forwarder signals it's ready
   723  	// to simulate this in tests, there's a magic port number
   724  	if pf.localPort == MagicTestExplodingPort {
   725  		return errors.New("fake error starting port forwarding")
   726  	}
   727  
   728  	close(pf.ready)
   729  
   730  	select {
   731  	case <-pf.ctx.Done():
   732  		// NOTE: the context error should NOT be returned here
   733  		return nil
   734  	case err := <-pf.done:
   735  		return err
   736  	}
   737  }
   738  
   739  func (pf FakePortForwarder) ReadyCh() <-chan struct{} {
   740  	return pf.ready
   741  }
   742  
   743  // TriggerFailure allows tests to inject errors during forwarding that will be returned by ForwardPorts.
   744  func (pf FakePortForwarder) TriggerFailure(err error) {
   745  	pf.done <- err
   746  }
   747  
   748  type FakePortForwardClient struct {
   749  	mu               sync.Mutex
   750  	portForwardCalls []PortForwardCall
   751  }
   752  
   753  func NewFakePortForwardClient() *FakePortForwardClient {
   754  	return &FakePortForwardClient{
   755  		portForwardCalls: []PortForwardCall{},
   756  	}
   757  }
   758  
   759  type PortForwardCall struct {
   760  	PodID      PodID
   761  	RemotePort int
   762  	Host       string
   763  	Forwarder  FakePortForwarder
   764  	Context    context.Context
   765  }
   766  
   767  func (c *FakePortForwardClient) CreatePortForwarder(ctx context.Context, namespace Namespace, podID PodID, optionalLocalPort, remotePort int, host string) (PortForwarder, error) {
   768  	c.mu.Lock()
   769  	defer c.mu.Unlock()
   770  
   771  	result := NewFakePortForwarder(ctx, optionalLocalPort, namespace)
   772  	c.portForwardCalls = append(c.portForwardCalls, PortForwardCall{
   773  		PodID:      podID,
   774  		RemotePort: remotePort,
   775  		Host:       host,
   776  		Forwarder:  result,
   777  		Context:    ctx,
   778  	})
   779  
   780  	return result, nil
   781  }
   782  
   783  func (c *FakePortForwardClient) CreatePortForwardCallCount() int {
   784  	c.mu.Lock()
   785  	defer c.mu.Unlock()
   786  
   787  	return len(c.portForwardCalls)
   788  }
   789  func (c *FakePortForwardClient) LastForwardPortPodID() PodID {
   790  	c.mu.Lock()
   791  	defer c.mu.Unlock()
   792  
   793  	if len(c.portForwardCalls) == 0 {
   794  		return ""
   795  	}
   796  	return c.portForwardCalls[len(c.portForwardCalls)-1].PodID
   797  }
   798  func (c *FakePortForwardClient) LastForwardPortRemotePort() int {
   799  	c.mu.Lock()
   800  	defer c.mu.Unlock()
   801  
   802  	if len(c.portForwardCalls) == 0 {
   803  		return 0
   804  	}
   805  	return c.portForwardCalls[len(c.portForwardCalls)-1].RemotePort
   806  }
   807  func (c *FakePortForwardClient) LastForwardPortHost() string {
   808  	c.mu.Lock()
   809  	defer c.mu.Unlock()
   810  
   811  	if len(c.portForwardCalls) == 0 {
   812  		return ""
   813  	}
   814  	return c.portForwardCalls[len(c.portForwardCalls)-1].Host
   815  }
   816  func (c *FakePortForwardClient) LastForwarder() FakePortForwarder {
   817  	c.mu.Lock()
   818  	defer c.mu.Unlock()
   819  
   820  	if len(c.portForwardCalls) == 0 {
   821  		return FakePortForwarder{}
   822  	}
   823  	return c.portForwardCalls[len(c.portForwardCalls)-1].Forwarder
   824  }
   825  func (c *FakePortForwardClient) LastForwardContext() context.Context {
   826  	c.mu.Lock()
   827  	defer c.mu.Unlock()
   828  
   829  	if len(c.portForwardCalls) == 0 {
   830  		return nil
   831  	}
   832  	return c.portForwardCalls[len(c.portForwardCalls)-1].Context
   833  }
   834  func (c *FakePortForwardClient) PortForwardCalls() []PortForwardCall {
   835  	c.mu.Lock()
   836  	defer c.mu.Unlock()
   837  
   838  	return append([]PortForwardCall(nil), c.portForwardCalls...)
   839  }