github.com/webmeshproj/webmesh-cni@v0.0.27/internal/controllers/peercontainer_test.go (about)

     1  /*
     2  Copyright 2023 Avi Zimmerman <avi.zimmerman@gmail.com>.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package controllers
    18  
    19  import (
    20  	"context"
    21  	"os"
    22  	"testing"
    23  	"time"
    24  
    25  	"github.com/google/uuid"
    26  	storagev1 "github.com/webmeshproj/storage-provider-k8s/api/storage/v1"
    27  	storageprovider "github.com/webmeshproj/storage-provider-k8s/provider"
    28  	meshconfig "github.com/webmeshproj/webmesh/pkg/config"
    29  	meshnode "github.com/webmeshproj/webmesh/pkg/meshnode"
    30  	"github.com/webmeshproj/webmesh/pkg/storage/testutil"
    31  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    32  	"k8s.io/apimachinery/pkg/runtime"
    33  	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
    34  	clientgoscheme "k8s.io/client-go/kubernetes/scheme"
    35  	"k8s.io/client-go/rest"
    36  	ctrl "sigs.k8s.io/controller-runtime"
    37  	"sigs.k8s.io/controller-runtime/pkg/client"
    38  	ctrlconfig "sigs.k8s.io/controller-runtime/pkg/config"
    39  	"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
    40  	"sigs.k8s.io/controller-runtime/pkg/envtest"
    41  	"sigs.k8s.io/controller-runtime/pkg/log/zap"
    42  	metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
    43  
    44  	cniv1 "github.com/webmeshproj/webmesh-cni/api/v1"
    45  	"github.com/webmeshproj/webmesh-cni/internal/config"
    46  	"github.com/webmeshproj/webmesh-cni/internal/host"
    47  )
    48  
    49  func TestReconciler(t *testing.T) {
    50  	NewNode = meshnode.NewTestNodeWithLogger
    51  	host.NewMeshNode = meshnode.NewTestNodeWithLogger
    52  
    53  	t.Run("SingleNode", func(t *testing.T) {
    54  		rs := newTestReconcilers(t, 1)
    55  		r := rs[0]
    56  		cli := r.Client
    57  
    58  		t.Run("ValidContainer", func(t *testing.T) {
    59  			container := newTestContainerFor(r)
    60  			err := cli.Create(context.Background(), &container)
    61  			if err != nil {
    62  				t.Fatal("Failed to create container", err)
    63  			}
    64  			ValidateReconciledContainer(t, r, cli, client.ObjectKeyFromObject(&container))
    65  		})
    66  	})
    67  }
    68  
    69  func ValidateReconciledContainer(t *testing.T, r *PeerContainerReconciler, cli client.Client, key client.ObjectKey) {
    70  	// The finalizer should eventually be set.
    71  	var err error
    72  	ok := testutil.Eventually[bool](func() bool {
    73  		var container cniv1.PeerContainer
    74  		err = cli.Get(context.Background(), key, &container)
    75  		if err != nil {
    76  			t.Log("Failed to get container", err)
    77  			return false
    78  		}
    79  		return controllerutil.ContainsFinalizer(&container, cniv1.PeerContainerFinalizer)
    80  	}).ShouldEqual(time.Second*10, time.Second, true)
    81  	if !ok {
    82  		t.Fatalf("Failed to see finalizer on peer container")
    83  	}
    84  	// The node should eventually be in the reconcilers node list.
    85  	ok = testutil.Eventually[bool](func() bool {
    86  		r.mu.Lock()
    87  		defer r.mu.Unlock()
    88  		_, ok := r.containerNodes[key]
    89  		return ok
    90  	}).ShouldEqual(time.Second*10, time.Second, true)
    91  	if !ok {
    92  		t.Fatalf("Failed to see node in reconciler")
    93  	}
    94  	// The node should eventually be started.
    95  	ok = testutil.Eventually[bool](func() bool {
    96  		r.mu.Lock()
    97  		defer r.mu.Unlock()
    98  		node, ok := r.containerNodes[key]
    99  		if !ok {
   100  			// Would be very strange at this point
   101  			t.Log("Failed to find node in reconciler")
   102  			return false
   103  		}
   104  		return node.Started()
   105  	}).ShouldEqual(time.Second*10, time.Second, true)
   106  	if !ok {
   107  		t.Fatalf("Failed to see node in started state")
   108  	}
   109  	// The peer container status should eventually be set to Running
   110  	var container cniv1.PeerContainer
   111  	ok = testutil.Eventually[bool](func() bool {
   112  		err = cli.Get(context.Background(), key, &container)
   113  		if err != nil {
   114  			t.Log("Failed to get container", err)
   115  			return false
   116  		}
   117  		t.Log("Container status", container.Status)
   118  		return container.Status.InterfaceStatus == cniv1.InterfaceStatusRunning
   119  	}).ShouldEqual(time.Second*10, time.Second, true)
   120  	if !ok {
   121  		t.Fatalf("Failed to see container in running state")
   122  	}
   123  	// All status fields should be populated
   124  	if container.Status.InterfaceName != container.Spec.IfName {
   125  		t.Fatal("Interface name not set correctly, got:", container.Status.InterfaceName, "expected:", container.Spec.IfName)
   126  	}
   127  	if !container.Status.HasNetworkInfo() {
   128  		t.Fatalf("Network info not set")
   129  	}
   130  }
   131  
   132  func newTestContainerFor(r *PeerContainerReconciler) cniv1.PeerContainer {
   133  	containerID := uuid.NewString()
   134  	return cniv1.PeerContainer{
   135  		TypeMeta: metav1.TypeMeta{
   136  			Kind:       "PeerContainer",
   137  			APIVersion: cniv1.GroupVersion.String(),
   138  		},
   139  		ObjectMeta: metav1.ObjectMeta{
   140  			Name:      containerID,
   141  			Namespace: "default",
   142  		},
   143  		Spec: cniv1.PeerContainerSpec{
   144  			NodeID:   containerID,
   145  			Netns:    "/proc/1/ns/net",
   146  			IfName:   containerID[:min(9, len(containerID))] + "0",
   147  			NodeName: r.Host.ID().String(),
   148  			MTU:      1500,
   149  		},
   150  	}
   151  }
   152  
   153  func newTestReconcilers(t *testing.T, count int) []*PeerContainerReconciler {
   154  	t.Helper()
   155  	mgr := newTestManager(t)
   156  	var out []*PeerContainerReconciler
   157  	for i := 0; i < count; i++ {
   158  		id := uuid.NewString()
   159  		// Create the storage provider.
   160  		t.Log("Creating webmesh storage provider for reconciler")
   161  		storageOpts := storageprovider.Options{
   162  			NodeID:                      id,
   163  			Namespace:                   "default",
   164  			ListenPort:                  0,
   165  			LeaderElectionLeaseDuration: time.Second * 15,
   166  			LeaderElectionRenewDeadline: time.Second * 10,
   167  			LeaderElectionRetryPeriod:   time.Second * 2,
   168  			ShutdownTimeout:             time.Second * 10,
   169  		}
   170  		provider, err := storageprovider.NewWithManager(mgr, storageOpts)
   171  		if err != nil {
   172  			t.Fatal("Failed to create storage provider", err)
   173  		}
   174  		t.Log("Creating test reconciler", id)
   175  		host := host.NewNode(provider, host.Config{
   176  			NodeID:             id,
   177  			Namespace:          "default",
   178  			LockDuration:       time.Second * 10,
   179  			LockAcquireTimeout: time.Second * 5,
   180  			ConnectTimeout:     time.Second * 30,
   181  			Network: host.NetworkConfig{
   182  				PodCIDR:       "10.42.0.0/16",
   183  				ClusterDomain: "cluster.local",
   184  				DisableIPv4:   false,
   185  				DisableIPv6:   false,
   186  			},
   187  			Services: meshconfig.NewServiceOptions(true),
   188  			LogLevel: "info",
   189  		})
   190  		r := &PeerContainerReconciler{
   191  			Client:   mgr.GetClient(),
   192  			Provider: provider,
   193  			Host:     host,
   194  			Config: config.Config{
   195  				Manager: config.ManagerConfig{
   196  					ReconcileTimeout: time.Second * 15,
   197  				},
   198  				Storage: config.StorageConfig{
   199  					LeaderElectLeaseDuration: time.Second * 15,
   200  					LeaderElectRenewDeadline: time.Second * 10,
   201  					LeaderElectRetryPeriod:   time.Second * 2,
   202  					CacheSyncTimeout:         time.Second * 10,
   203  				},
   204  			},
   205  		}
   206  		t.Log("Setting up reconciler with manager")
   207  		err = r.SetupWithManager(mgr)
   208  		if err != nil {
   209  			t.Fatal("Failed to setup reconciler", err)
   210  		}
   211  		out = append(out, r)
   212  	}
   213  	t.Log("Starting manager and storage provider")
   214  	ctx, cancel := context.WithCancel(context.Background())
   215  	t.Cleanup(cancel)
   216  	go func() {
   217  		err := mgr.Start(ctx)
   218  		if err != nil {
   219  			t.Log("Failed to start manager", err)
   220  		}
   221  	}()
   222  	for _, r := range out {
   223  		t.Log("Starting storage provider for reconciler")
   224  		err := r.Provider.StartUnmanaged(context.Background())
   225  		if err != nil {
   226  			t.Fatal("Failed to start storage provider", err)
   227  		}
   228  		t.Cleanup(func() {
   229  			err := r.Provider.Close()
   230  			if err != nil {
   231  				t.Log("Failed to stop storage provider", err)
   232  			}
   233  		})
   234  		t.Log("Starting host node for reconciler")
   235  		err = r.Host.Start(ctx, mgr.GetConfig())
   236  		if err != nil {
   237  			t.Fatal("Failed to start host node", err)
   238  		}
   239  		t.Cleanup(func() {
   240  			err := r.Host.Stop(context.Background())
   241  			if err != nil {
   242  				t.Log("Failed to stop host node", err)
   243  			}
   244  		})
   245  	}
   246  	return out
   247  }
   248  
   249  func newTestManager(t *testing.T) ctrl.Manager {
   250  	t.Helper()
   251  	cfg := newTestEnv(t)
   252  	t.Log("Setting up test manager")
   253  	scheme := runtime.NewScheme()
   254  	utilruntime.Must(clientgoscheme.AddToScheme(scheme))
   255  	utilruntime.Must(cniv1.AddToScheme(scheme))
   256  	utilruntime.Must(storagev1.AddToScheme(scheme))
   257  	shutdownTimeout := time.Second * 10
   258  	mgr, err := ctrl.NewManager(cfg, ctrl.Options{
   259  		Scheme: scheme,
   260  		Metrics: metricsserver.Options{
   261  			BindAddress: "0",
   262  		},
   263  		HealthProbeBindAddress:  "0",
   264  		GracefulShutdownTimeout: &shutdownTimeout,
   265  		Controller: ctrlconfig.Controller{
   266  			GroupKindConcurrency: map[string]int{
   267  				"PeerContainer.cni.webmesh.io": 1,
   268  			},
   269  			NeedLeaderElection: &[]bool{false}[0],
   270  		},
   271  		Client: client.Options{
   272  			Cache: &client.CacheOptions{
   273  				DisableFor: storagev1.CustomObjects,
   274  			},
   275  		},
   276  	})
   277  	if err != nil {
   278  		t.Fatal("Failed to create manager", err)
   279  	}
   280  	return mgr
   281  }
   282  
   283  func newTestEnv(t *testing.T) *rest.Config {
   284  	t.Helper()
   285  	t.Log("Starting test environment")
   286  	ctrl.SetLogger(zap.New(zap.UseFlagOptions(&zap.Options{Development: true})))
   287  	testenv := envtest.Environment{
   288  		CRDs:                     storagev1.GetCustomResourceDefintions(),
   289  		CRDDirectoryPaths:        []string{os.Getenv("CRD_PATHS")},
   290  		ErrorIfCRDPathMissing:    true,
   291  		ControlPlaneStartTimeout: time.Second * 20,
   292  		ControlPlaneStopTimeout:  time.Second * 10,
   293  	}
   294  	cfg, err := testenv.Start()
   295  	if err != nil {
   296  		t.Fatal("Failed to start test environment", err)
   297  	}
   298  	t.Cleanup(func() {
   299  		t.Log("Stopping test environment")
   300  		err := testenv.Stop()
   301  		if err != nil {
   302  			t.Log("Failed to stop test environment", err)
   303  		}
   304  	})
   305  	return cfg
   306  }