github.com/tilt-dev/tilt@v0.33.15-0.20240515162809-0a22ed45d8a0/internal/controllers/core/portforward/reconciler.go (about)

     1  package portforward
     2  
     3  import (
     4  	"context"
     5  	"sort"
     6  	"sync"
     7  	"time"
     8  
     9  	"k8s.io/apimachinery/pkg/runtime"
    10  	"sigs.k8s.io/controller-runtime/pkg/builder"
    11  	"sigs.k8s.io/controller-runtime/pkg/client"
    12  
    13  	"github.com/tilt-dev/tilt/internal/controllers/apicmp"
    14  	"github.com/tilt-dev/tilt/internal/controllers/apis/cluster"
    15  	"github.com/tilt-dev/tilt/internal/controllers/indexer"
    16  	"github.com/tilt-dev/tilt/internal/timecmp"
    17  	"github.com/tilt-dev/tilt/pkg/apis"
    18  	"github.com/tilt-dev/tilt/pkg/logger"
    19  
    20  	"k8s.io/apimachinery/pkg/api/equality"
    21  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    22  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    23  	"k8s.io/apimachinery/pkg/types"
    24  	ctrl "sigs.k8s.io/controller-runtime"
    25  	ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
    26  	"sigs.k8s.io/controller-runtime/pkg/reconcile"
    27  
    28  	"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
    29  
    30  	"k8s.io/apimachinery/pkg/util/wait"
    31  
    32  	"github.com/tilt-dev/tilt/internal/k8s"
    33  	"github.com/tilt-dev/tilt/internal/store"
    34  )
    35  
    36  var clusterGVK = v1alpha1.SchemeGroupVersion.WithKind("Cluster")
    37  
    38  type Reconciler struct {
    39  	store      store.RStore
    40  	ctrlClient ctrlclient.Client
    41  	clients    *cluster.ClientManager
    42  	requeuer   *indexer.Requeuer
    43  	indexer    *indexer.Indexer
    44  
    45  	// map of PortForward object name --> running forward(s)
    46  	activeForwards map[types.NamespacedName]*portForwardEntry
    47  }
    48  
    49  var _ store.TearDowner = &Reconciler{}
    50  var _ reconcile.Reconciler = &Reconciler{}
    51  
    52  func NewReconciler(
    53  	ctrlClient ctrlclient.Client,
    54  	scheme *runtime.Scheme,
    55  	store store.RStore,
    56  	clients cluster.ClientProvider,
    57  ) *Reconciler {
    58  	return &Reconciler{
    59  		store:          store,
    60  		ctrlClient:     ctrlClient,
    61  		clients:        cluster.NewClientManager(clients),
    62  		requeuer:       indexer.NewRequeuer(),
    63  		indexer:        indexer.NewIndexer(scheme, indexPortForward),
    64  		activeForwards: make(map[types.NamespacedName]*portForwardEntry),
    65  	}
    66  }
    67  
    68  func (r *Reconciler) CreateBuilder(mgr ctrl.Manager) (*builder.Builder, error) {
    69  	b := ctrl.NewControllerManagedBy(mgr).
    70  		For(&PortForward{}).
    71  		WatchesRawSource(r.requeuer)
    72  
    73  	return b, nil
    74  }
    75  
    76  func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
    77  	err := r.reconcile(ctx, req.NamespacedName)
    78  	return ctrl.Result{}, err
    79  }
    80  
    81  func (r *Reconciler) reconcile(ctx context.Context, name types.NamespacedName) error {
    82  	pf := &PortForward{}
    83  	err := r.ctrlClient.Get(ctx, name, pf)
    84  	if err != nil && !apierrors.IsNotFound(err) {
    85  		return err
    86  	}
    87  
    88  	r.indexer.OnReconcile(name, pf)
    89  	if apierrors.IsNotFound(err) || pf.ObjectMeta.DeletionTimestamp != nil {
    90  		// PortForward deleted in API server -- stop and remove it
    91  		r.stop(name)
    92  		return nil
    93  	}
    94  
    95  	var clusterObj v1alpha1.Cluster
    96  	if err := r.ctrlClient.Get(ctx, clusterNN(pf), &clusterObj); err != nil {
    97  		return err
    98  	}
    99  	clusterUpToDate := !r.clients.Refresh(pf, &clusterObj)
   100  
   101  	needsCreate := true
   102  	if active, ok := r.activeForwards[name]; ok {
   103  		if clusterUpToDate &&
   104  			equality.Semantic.DeepEqual(active.spec, pf.Spec) &&
   105  			equality.Semantic.DeepEqual(active.meta.Annotations[v1alpha1.AnnotationManifest],
   106  				pf.ObjectMeta.Annotations[v1alpha1.AnnotationManifest]) {
   107  
   108  			// No change needed.
   109  			needsCreate = false
   110  		} else {
   111  			// An update to a PortForward we're already running -- stop the existing one
   112  			r.stop(name)
   113  		}
   114  	}
   115  
   116  	if needsCreate {
   117  		kCli, err := r.clients.GetK8sClient(pf, &clusterObj)
   118  		if err != nil {
   119  			// TODO(milas): a top-level error field on PortForwardStatus is
   120  			// 	likely warranted to report issues like this
   121  			return err
   122  		}
   123  
   124  		// Create a new PortForward OR recreate a modified PortForward (stopped above)
   125  		entry := newEntry(ctx, pf, kCli)
   126  		r.activeForwards[name] = entry
   127  
   128  		// Treat port-forwarding errors as part of the pod log
   129  		ctx = store.MustObjectLogHandler(entry.ctx, r.store, pf)
   130  
   131  		for _, forward := range entry.spec.Forwards {
   132  			go r.portForwardLoop(ctx, entry, forward)
   133  		}
   134  	}
   135  
   136  	return r.maybeUpdateStatus(ctx, pf, r.activeForwards[name])
   137  }
   138  
   139  func (r *Reconciler) portForwardLoop(ctx context.Context, entry *portForwardEntry, forward Forward) {
   140  	originalBackoff := wait.Backoff{
   141  		Steps:    1000,
   142  		Duration: 50 * time.Millisecond,
   143  		Factor:   2.0,
   144  		Jitter:   0.1,
   145  		Cap:      15 * time.Second,
   146  	}
   147  	currentBackoff := originalBackoff
   148  
   149  	for {
   150  		start := time.Now()
   151  		r.onePortForward(ctx, entry, forward)
   152  		if ctx.Err() != nil {
   153  			// If the context was canceled, there's nothing more to do;
   154  			// we cannot even update the status because we no longer have
   155  			// a valid context, but that's fine because that means this
   156  			// PortForward is being deleted.
   157  			return
   158  		}
   159  
   160  		// If this failed in less than a second, then we should advance the backoff.
   161  		// Otherwise, reset the backoff.
   162  		if time.Since(start) < time.Second {
   163  			time.Sleep(currentBackoff.Step())
   164  		} else {
   165  			currentBackoff = originalBackoff
   166  		}
   167  	}
   168  }
   169  
   170  func (r *Reconciler) maybeUpdateStatus(ctx context.Context, pf *v1alpha1.PortForward, entry *portForwardEntry) error {
   171  	newStatuses := entry.statuses()
   172  	if apicmp.DeepEqual(pf.Status.ForwardStatuses, newStatuses) {
   173  		// the forwards didn't actually change, so skip the update
   174  		return nil
   175  	}
   176  
   177  	update := pf.DeepCopy()
   178  	update.Status.ForwardStatuses = newStatuses
   179  	return client.IgnoreNotFound(r.ctrlClient.Status().Update(ctx, update))
   180  }
   181  
   182  func (r *Reconciler) onePortForward(ctx context.Context, entry *portForwardEntry, forward Forward) {
   183  	logError := func(err error) {
   184  		logger.Get(ctx).Infof("Reconnecting... Error port-forwarding %s (%d -> %d): %v",
   185  			entry.meta.Annotations[v1alpha1.AnnotationManifest],
   186  			forward.LocalPort, forward.ContainerPort, err)
   187  	}
   188  
   189  	pf, err := entry.client.CreatePortForwarder(
   190  		ctx,
   191  		k8s.Namespace(entry.spec.Namespace),
   192  		k8s.PodID(entry.spec.PodName),
   193  		int(forward.LocalPort),
   194  		int(forward.ContainerPort),
   195  		forward.Host)
   196  	if err != nil {
   197  		logError(err)
   198  		entry.setStatus(forward, ForwardStatus{
   199  			LocalPort:     forward.LocalPort,
   200  			ContainerPort: forward.ContainerPort,
   201  			Error:         err.Error(),
   202  		})
   203  		r.requeuer.Add(entry.name)
   204  		return
   205  	}
   206  
   207  	// wait in the background for the port forwarder to signal that it's ready to update the status
   208  	// the doneCh ensures we don't leak the goroutine if ForwardPorts() errors out early without
   209  	// ever becoming ready
   210  	doneCh := make(chan struct{}, 1)
   211  	go func() {
   212  		readyCh := pf.ReadyCh()
   213  		if readyCh == nil {
   214  			return
   215  		}
   216  		select {
   217  		case <-ctx.Done():
   218  			// context canceled before forward was every ready
   219  			return
   220  		case <-doneCh:
   221  			// forward initialization errored at start before ready
   222  			return
   223  		case <-readyCh:
   224  			entry.setStatus(forward, ForwardStatus{
   225  				LocalPort:     int32(pf.LocalPort()),
   226  				ContainerPort: forward.ContainerPort,
   227  				Addresses:     pf.Addresses(),
   228  				StartedAt:     apis.NowMicro(),
   229  			})
   230  			r.requeuer.Add(entry.name)
   231  		}
   232  	}()
   233  
   234  	err = pf.ForwardPorts()
   235  	close(doneCh)
   236  	if err != nil {
   237  		logError(err)
   238  		entry.setStatus(forward, ForwardStatus{
   239  			LocalPort:     int32(pf.LocalPort()),
   240  			ContainerPort: forward.ContainerPort,
   241  			Addresses:     pf.Addresses(),
   242  			Error:         err.Error(),
   243  		})
   244  		r.requeuer.Add(entry.name)
   245  		return
   246  	}
   247  }
   248  
   249  func (r *Reconciler) TearDown(_ context.Context) {
   250  	for name := range r.activeForwards {
   251  		r.stop(name)
   252  	}
   253  }
   254  
   255  func (r *Reconciler) stop(name types.NamespacedName) {
   256  	entry, ok := r.activeForwards[name]
   257  	if !ok {
   258  		return
   259  	}
   260  	entry.cancel()
   261  	delete(r.activeForwards, name)
   262  }
   263  
   264  type portForwardEntry struct {
   265  	name   types.NamespacedName
   266  	meta   metav1.ObjectMeta
   267  	spec   v1alpha1.PortForwardSpec
   268  	ctx    context.Context
   269  	cancel func()
   270  
   271  	mu     sync.Mutex
   272  	status map[Forward]ForwardStatus
   273  	client k8s.Client
   274  }
   275  
   276  func newEntry(ctx context.Context, pf *PortForward, cli k8s.Client) *portForwardEntry {
   277  	ctx, cancel := context.WithCancel(ctx)
   278  	return &portForwardEntry{
   279  		name:   types.NamespacedName{Name: pf.Name, Namespace: pf.Namespace},
   280  		meta:   pf.ObjectMeta,
   281  		spec:   pf.Spec,
   282  		ctx:    ctx,
   283  		cancel: cancel,
   284  		status: make(map[Forward]ForwardStatus),
   285  		client: cli,
   286  	}
   287  }
   288  
   289  func (e *portForwardEntry) setStatus(spec Forward, status ForwardStatus) {
   290  	e.mu.Lock()
   291  	defer e.mu.Unlock()
   292  	e.status[spec] = status
   293  }
   294  
   295  func (e *portForwardEntry) statuses() []ForwardStatus {
   296  	e.mu.Lock()
   297  	defer e.mu.Unlock()
   298  
   299  	var statuses []ForwardStatus
   300  	for _, s := range e.status {
   301  		statuses = append(statuses, *s.DeepCopy())
   302  	}
   303  	sort.SliceStable(statuses, func(i, j int) bool {
   304  		if statuses[i].ContainerPort < statuses[j].ContainerPort {
   305  			return true
   306  		}
   307  		if statuses[i].LocalPort < statuses[j].LocalPort {
   308  			return true
   309  		}
   310  		return timecmp.BeforeOrEqual(statuses[i].StartedAt, statuses[j].StartedAt)
   311  	})
   312  	return statuses
   313  }
   314  
   315  func indexPortForward(obj ctrlclient.Object) []indexer.Key {
   316  	var keys []indexer.Key
   317  	pf := obj.(*v1alpha1.PortForward)
   318  
   319  	if pf.Spec.Cluster != "" {
   320  		keys = append(keys, indexer.Key{
   321  			Name: clusterNN(pf),
   322  			GVK:  clusterGVK,
   323  		})
   324  	}
   325  
   326  	return keys
   327  }
   328  
   329  func clusterNN(pf *v1alpha1.PortForward) types.NamespacedName {
   330  	return types.NamespacedName{
   331  		Namespace: pf.ObjectMeta.Namespace,
   332  		Name:      pf.Spec.Cluster,
   333  	}
   334  }