github.com/webmeshproj/webmesh-cni@v0.0.27/internal/cmd/e2e_test.go (about) 1 //go:build e2e 2 3 /* 4 Copyright 2023 Avi Zimmerman <avi.zimmerman@gmail.com>. 5 6 Licensed under the Apache License, Version 2.0 (the "License"); 7 you may not use this file except in compliance with the License. 8 You may obtain a copy of the License at 9 10 http://www.apache.org/licenses/LICENSE-2.0 11 12 Unless required by applicable law or agreed to in writing, software 13 distributed under the License is distributed on an "AS IS" BASIS, 14 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 See the License for the specific language governing permissions and 16 limitations under the License. 17 */ 18 19 package e2e_test 20 21 import ( 22 "bytes" 23 "context" 24 "fmt" 25 "net/netip" 26 "os" 27 "os/exec" 28 "path/filepath" 29 "strings" 30 "testing" 31 "time" 32 33 "github.com/webmeshproj/webmesh/pkg/storage/testutil" 34 "gopkg.in/yaml.v3" 35 corev1 "k8s.io/api/core/v1" 36 "k8s.io/client-go/tools/clientcmd" 37 clientcmdapi "k8s.io/client-go/tools/clientcmd/api" 38 ctrl "sigs.k8s.io/controller-runtime" 39 "sigs.k8s.io/controller-runtime/pkg/client" 40 "sigs.k8s.io/controller-runtime/pkg/log/zap" 41 42 "github.com/webmeshproj/webmesh-cni/internal/types" 43 ) 44 45 var ( 46 testImageEnvVar = "E2E_TEST_IMAGE" 47 kindExec = os.Getenv("E2E_KIND_EXEC") 48 kustomizeExec = os.Getenv("E2E_KUSTOMIZE_EXEC") 49 kubectlExec = os.Getenv("E2E_KUBECTL_EXEC") 50 defaultKindExec = "kind" 51 defaultKustomizeExec = "kustomize" 52 defaultKubectlExec = "kubectl" 53 testImage = "ghcr.io/webmeshproj/webmesh-cni:latest" 54 defaultKustomization = mustAbsolute("../../deploy/kustomization.yaml") 55 kustomizeImageName = "ghcr.io/webmeshproj/webmesh-cni" 56 testDirs = []string{ 57 mustAbsolute("../../examples/single-cluster"), 58 } 59 ) 60 61 // E2ESpec is the spec for an end-to-end test. 62 type E2ESpec struct { 63 // Clusters is the list of clusters to create for the test. 64 Clusters []E2ECluster `yaml:"clusters,omitempty"` 65 } 66 67 func (e *E2ESpec) Default() { 68 for i, cfg := range e.Clusters { 69 c := cfg.Default() 70 e.Clusters[i] = c 71 } 72 } 73 74 // E2ECluster is the spec of a cluster in an end-to-end test. 75 type E2ECluster struct { 76 // Name is the name of the cluster. 77 Name string `yaml:"name,omitempty"` 78 // CNINamespace is the namespace the kustomization installs the CNI in. 79 // Defaults to kube-system. 80 CNINamespace string `yaml:"cniNamespace,omitempty"` 81 // KindConfig is the path to the kind config to use for the cluster. 82 // If left empty, it will be automatically detected. 83 KindConfig string `yaml:"kindConfig,omitempty"` 84 // Kustomization is the path to the kustomization file to use for the test. 85 // Empty or "default" means to use the default kustomization. 86 Kustomization string `yaml:"kustomization,omitempty"` 87 // NodeCount is the number of nodes the cluster creates. This will be used 88 // to verify how many webmesh-nodes should become ready. 89 NodeCount int `yaml:"nodeCount,omitempty"` 90 // PodCIDR is the pod CIDR to use for the cluster. This will be used to verify 91 // that all nodes in the cluster are assigned an IP address from the pod CIDR. 92 PodCIDR Prefix `yaml:"podCIDR,omitempty"` 93 // PodCount is the number of pods that the test will create. 94 // This will be used to verify that all containers become ready 95 // and are assigned an IP address from the pod CIDR. Defaults to 96 // 3 which is the assumed number of coredns pods and local-path-provisioners. 97 // 98 // Any provided value must take coredns pods and any local-path-provisioners 99 // into account. This can be set to -1 to skip the test. 100 PodCount int `yaml:"podCount,omitempty"` 101 } 102 103 func (e *E2ECluster) Default() E2ECluster { 104 if e.CNINamespace == "" { 105 e.CNINamespace = "kube-system" 106 } 107 if e.Kustomization == "" || e.Kustomization == "default" { 108 e.Kustomization = defaultKustomization 109 } 110 if e.PodCount == 0 { 111 e.PodCount = 3 112 } 113 return *e 114 } 115 116 type Prefix struct{ netip.Prefix } 117 118 func (p Prefix) MarshalYAML() (interface{}, error) { 119 return p.String(), nil 120 } 121 122 func (p *Prefix) UnmarshalYAML(value *yaml.Node) error { 123 s := value.Value 124 if s == "" { 125 return nil 126 } 127 prefix, err := netip.ParsePrefix(s) 128 if err != nil { 129 return err 130 } 131 *p = Prefix{prefix} 132 return nil 133 } 134 135 func TestWebmeshCNIEndToEnd(t *testing.T) { 136 Init(t) 137 for _, dir := range testDirs { 138 t.Run(filepath.Base(dir), func(t *testing.T) { 139 // Setup the test. 140 t.Log("Changing directory to: ", dir) 141 err := os.Chdir(dir) 142 if err != nil { 143 t.Fatalf("Failed to change directory to %s: %v", dir, err) 144 } 145 // Expect an e2e.yaml file in the test directory. 146 e2eFile := filepath.Join(dir, "e2e.yaml") 147 t.Logf("Reading e2e spec from file: %s", e2eFile) 148 data, err := os.ReadFile(e2eFile) 149 if err != nil { 150 t.Fatalf("Failed to read e2e spec: %v", err) 151 } 152 var e2eSpec E2ESpec 153 err = yaml.Unmarshal(data, &e2eSpec) 154 if err != nil { 155 t.Fatalf("Failed to unmarshal e2e spec: %v", err) 156 } 157 e2eSpec.Default() 158 kubeConfigs := make(map[string]string, len(e2eSpec.Clusters)) 159 clusters := make(map[string]struct{}, len(e2eSpec.Clusters)) 160 t.Cleanup(func() { 161 for _, kubeconf := range kubeConfigs { 162 t.Log("Deleting kubeconf: ", kubeconf) 163 err := os.Remove(kubeconf) 164 if err != nil { 165 t.Logf("Failed to remove kubeconf %s: %v", kubeconf, err) 166 } 167 } 168 for cluster := range clusters { 169 t.Log("Deleting kind cluster: ", cluster) 170 execCmd(t, kindExec, "delete", "cluster", "--name", cluster) 171 } 172 }) 173 t.Logf("Parsed E2E spec: %+v", e2eSpec) 174 175 t.Run("Setup", func(t *testing.T) { 176 // Create the clusters for the test. 177 // kindConfigs := findKindConfigs(t, dir) 178 for _, cfg := range e2eSpec.Clusters { 179 t.Logf("Creating cluster for test: %s", cfg.Name) 180 // Create a kind clusterfor the kind config. 181 kindConfig := cfg.KindConfig 182 if kindConfig == "" { 183 configs := findKindConfigs(t, dir) 184 if len(configs) == 0 { 185 t.Fatal("No kind configs found") 186 } 187 kindConfig = configs[0] 188 } 189 clusterName := fmt.Sprintf("cni-e2e-%s", filepath.Base(filepath.Dir(kindConfig))) 190 clusters[clusterName] = struct{}{} 191 kubeconfDir, err := os.MkdirTemp("", "webmesh-cni-e2e-*") 192 if err != nil { 193 t.Fatalf("Failed to create kubeconf: %v", err) 194 } 195 kubeConf := filepath.Join(kubeconfDir, "kubeconfig") 196 kubeConfigs[cfg.Name] = kubeConf 197 t.Logf("Using temporary kubeconf: %s", kubeConf) 198 t.Logf("Creating kind cluster %q for config: %s", clusterName, kindConfig) 199 execCmd(t, 200 kindExec, "create", "cluster", 201 "--config", kindConfig, 202 "--name", clusterName, 203 "--kubeconfig", kubeConf, 204 ) 205 t.Logf("Importing image %q into kind cluster %q", testImage, clusterName) 206 execCmd(t, 207 kindExec, "load", "docker-image", 208 "--name", clusterName, 209 testImage, 210 ) 211 kustomization, err := filepath.Abs(cfg.Kustomization) 212 if err != nil { 213 t.Fatalf("Failed to get absolute path for %s: %v", cfg.Kustomization, err) 214 } 215 kustomizationDir := filepath.Dir(kustomization) 216 // Edit the kustomization to use the test image. 217 t.Logf("Editing kustomization %s to use image %s", kustomization, testImage) 218 doInDir(t, kustomizationDir, func() { 219 execCmd(t, 220 kustomizeExec, "edit", "set", "image", kustomizeImageName+"="+testImage, 221 ) 222 }) 223 t.Logf("Installing webmesh-cni using kustomization %s", kustomization) 224 execCmd(t, 225 kubectlExec, 226 "--kubeconfig", kubeConf, 227 "apply", "-k", kustomizationDir, 228 ) 229 } 230 }) 231 232 // Run the test specs. 233 234 t.Run("ReadyWebmeshCNIPods", func(t *testing.T) { 235 // We should have a ready webmesh-node for each node in the cluster. 236 ctx := context.Background() 237 for _, cfg := range e2eSpec.Clusters { 238 t.Run(cfg.Name, func(t *testing.T) { 239 kubeconf := kubeConfigs[cfg.Name] 240 cli := getClient(t, kubeconf) 241 expectedNodes := cfg.NodeCount 242 var got int 243 // There should eventually be running CNI pods for each node in the cluster. 244 var pods []client.ObjectKey 245 ok := testutil.Eventually[int](func() int { 246 var podList corev1.PodList 247 err := cli.List(ctx, &podList, client.InNamespace(cfg.CNINamespace)) 248 if err != nil { 249 t.Fatalf("Failed to list pods: %v", err) 250 return -1 251 } 252 pods = make([]client.ObjectKey, 0, len(podList.Items)) 253 Pods: 254 for _, pod := range podList.Items { 255 // Ignore pods we've already seen 256 for _, seen := range pods { 257 if seen.Namespace == pod.Namespace && seen.Name == pod.Name { 258 continue Pods 259 } 260 } 261 if pod.GetDeletionTimestamp() != nil { 262 continue 263 } 264 if !strings.HasPrefix(pod.GetName(), "webmesh-node-") { 265 continue 266 } 267 t.Log("Found webmesh-node pod: ", pod.Name) 268 if pod.Status.Phase != corev1.PodRunning { 269 t.Log("Pod is not running: ", pod.Name) 270 continue 271 } 272 t.Log("webmesh-node pod is running: ", pod.Name) 273 pods = append(pods, client.ObjectKey{Namespace: pod.Namespace, Name: pod.Name}) 274 } 275 got = len(pods) 276 return got 277 }).ShouldEqual(time.Minute, time.Second*2, expectedNodes) 278 if !ok { 279 t.Fatalf("Failed to get expected number of CNI pods: %d, got: %d", expectedNodes, got) 280 } 281 // Each of the above pods should eventually reach the ready state. 282 for _, podKey := range pods { 283 t.Log("Waiting for CNI pod to reach ready state: ", podKey) 284 ok := testutil.Eventually[bool](func() bool { 285 var pod corev1.Pod 286 err := cli.Get(ctx, podKey, &pod) 287 if err != nil { 288 t.Fatalf("Failed to get CNI pod %s: %v", podKey, err) 289 return false 290 } 291 for _, cond := range pod.Status.Conditions { 292 if cond.Type == corev1.PodReady && cond.Status == corev1.ConditionTrue { 293 return true 294 } 295 } 296 return false 297 }).ShouldEqual(time.Second*30, time.Second, true) 298 if !ok { 299 t.Error("Failed to get CNI pod to ready state: ", podKey) 300 } 301 } 302 }) 303 } 304 }) 305 306 t.Run("CoreDNSPodsAssignedIPs", func(t *testing.T) { 307 // The clusters are assumed to use coredns and each coredns pod 308 // should eventually be assigned an IP address from the pod CIDR. 309 ctx := context.Background() 310 for _, cfg := range e2eSpec.Clusters { 311 t.Run(cfg.Name, func(t *testing.T) { 312 kubeconf := kubeConfigs[cfg.Name] 313 cli := getClient(t, kubeconf) 314 expectedNodes := cfg.NodeCount 315 var got int 316 // There should eventually be running CoreDNS pods for each node in the cluster. 317 var pods []client.ObjectKey 318 ok := testutil.Eventually[int](func() int { 319 var podList corev1.PodList 320 err := cli.List(ctx, &podList, client.InNamespace("kube-system"), client.MatchingLabels{ 321 "k8s-app": "kube-dns", 322 }) 323 if err != nil { 324 t.Fatalf("Failed to list pods: %v", err) 325 return -1 326 } 327 pods = make([]client.ObjectKey, 0, len(podList.Items)) 328 Pods: 329 for _, pod := range podList.Items { 330 // Ignore pods we've already seen 331 for _, seen := range pods { 332 if seen.Namespace == pod.Namespace && seen.Name == pod.Name { 333 continue Pods 334 } 335 } 336 if pod.GetDeletionTimestamp() != nil { 337 continue 338 } 339 t.Log("Found CoreDNS pod: ", pod.Name) 340 if pod.Status.Phase != corev1.PodRunning { 341 t.Log("Pod is not running: ", pod.Name) 342 continue 343 } 344 t.Log("CoreDNS pod is running: ", pod.Name) 345 pods = append(pods, client.ObjectKey{Namespace: pod.Namespace, Name: pod.Name}) 346 } 347 got = len(pods) 348 return got 349 }).ShouldEqual(time.Minute, time.Second, expectedNodes) 350 if !ok { 351 t.Fatalf("Failed to get expected number of CoreDNS pods: %d, got: %d", expectedNodes, got) 352 } 353 // Each of the above pods should eventually reach the ready state. 354 for _, podKey := range pods { 355 t.Log("Waiting for CoreDNS pod to reach ready state: ", podKey) 356 ok := testutil.Eventually[bool](func() bool { 357 var pod corev1.Pod 358 err := cli.Get(ctx, podKey, &pod) 359 if err != nil { 360 t.Fatalf("Failed to get pod %s: %v", podKey, err) 361 return false 362 } 363 for _, cond := range pod.Status.Conditions { 364 if cond.Type == corev1.PodReady && cond.Status == corev1.ConditionTrue { 365 return true 366 } 367 } 368 return false 369 }).ShouldEqual(time.Second*30, time.Second, true) 370 if !ok { 371 t.Error("Failed to get CoreDNS pod to ready state: ", podKey) 372 } 373 } 374 // Each of the above pods IP addresses should be in the pod CIDR. 375 seen := make(map[netip.Addr]struct{}) 376 for _, podKey := range pods { 377 t.Logf("Checking that %s has an IP address from the pod CIDR", podKey) 378 var pod corev1.Pod 379 err := cli.Get(ctx, podKey, &pod) 380 if err != nil { 381 t.Fatalf("Failed to get pod %s: %v", podKey, err) 382 } 383 var hasIP bool 384 for _, ip := range pod.Status.PodIPs { 385 addr, err := netip.ParseAddr(ip.IP) 386 if err != nil { 387 t.Fatalf("Failed to parse IP address %s: %v", ip.IP, err) 388 } 389 if _, ok := seen[addr]; ok { 390 t.Errorf("CoreDNS pod %s has duplicate IP address %s", podKey, ip.IP) 391 continue 392 } 393 seen[addr] = struct{}{} 394 if cfg.PodCIDR.Contains(addr) { 395 t.Logf("CoreDNS pod %s has unique IP address %s from pod CIDR %s", podKey, ip.IP, cfg.PodCIDR) 396 hasIP = true 397 return 398 } 399 } 400 if !hasIP { 401 t.Errorf("Pod %s does not have an IP address from pod CIDR %s", podKey, cfg.PodCIDR) 402 } 403 } 404 }) 405 } 406 }) 407 408 t.Run("ReadyPods", func(t *testing.T) { 409 // The number of containers provided in the spec should exist and 410 // each have a unique IP address from the pod CIDR. 411 // We should have a ready webmesh-node for each node in the cluster. 412 ctx := context.Background() 413 for _, cfg := range e2eSpec.Clusters { 414 t.Run(cfg.Name, func(t *testing.T) { 415 if cfg.PodCount <= 0 { 416 t.Skip("No pod count specified for cluster") 417 } 418 kubeconf := kubeConfigs[cfg.Name] 419 cli := getClient(t, kubeconf) 420 // There should eventually be the following running pods. 421 /// podCount + 422 // cni-nodes (nodeCount) + 423 // etcd (1) + 424 // control-plane (3) 425 // kube-proxy (nodeCount) 426 var got int 427 expectedPods := cfg.PodCount + cfg.NodeCount + 1 + 3 + cfg.NodeCount 428 var pods []client.ObjectKey 429 ok := testutil.Eventually[int](func() int { 430 var podList corev1.PodList 431 err := cli.List(ctx, &podList, client.InNamespace("")) 432 if err != nil { 433 t.Fatalf("Failed to list pods: %v", err) 434 return -1 435 } 436 pods = make([]client.ObjectKey, 0, len(podList.Items)) 437 Pods: 438 for _, pod := range podList.Items { 439 // Ignore pods we've already seen. 440 for _, seen := range pods { 441 if seen.Namespace == pod.Namespace && seen.Name == pod.Name { 442 continue Pods 443 } 444 } 445 if pod.GetDeletionTimestamp() != nil { 446 continue 447 } 448 t.Log("Found pod: ", pod.Name) 449 if pod.Status.Phase != corev1.PodRunning { 450 t.Log("Pod is not running: ", pod.Name) 451 continue 452 } 453 t.Log("Pod is running: ", pod.Name) 454 pods = append(pods, client.ObjectKey{Namespace: pod.Namespace, Name: pod.Name}) 455 } 456 got = len(pods) 457 return got 458 }).ShouldEqual(time.Minute, time.Second, expectedPods) 459 if !ok { 460 t.Fatalf("Failed to get expected number of running pods: %d, got: %d", expectedPods, got) 461 } 462 // The podCount of those pods should eventually be ready with unique IP addresses from the pod CIDR. 463 var cniManagedPods []*corev1.Pod 464 expectedPods = cfg.PodCount 465 got = 0 466 ok = testutil.Eventually[int](func() int { 467 var podList corev1.PodList 468 err := cli.List(ctx, &podList, client.InNamespace("")) 469 if err != nil { 470 t.Fatalf("Failed to list pods: %v", err) 471 return -1 472 } 473 Pods: 474 for _, pod := range podList.Items { 475 // If we've already seen the pod continue. 476 p := pod 477 for _, seen := range cniManagedPods { 478 if seen.Name == pod.Name && seen.Namespace == pod.Namespace { 479 continue Pods 480 } 481 } 482 if pod.GetDeletionTimestamp() != nil { 483 continue 484 } 485 name := fmt.Sprintf("%s/%s", pod.Namespace, pod.Name) 486 t.Log("Found pod: ", pod.Name) 487 if pod.Status.Phase != corev1.PodRunning { 488 t.Log("Pod is not running: ", name) 489 continue 490 } 491 t.Log("Pod is running: ", name) 492 var isReady bool 493 for _, cond := range pod.Status.Conditions { 494 if cond.Type == corev1.PodReady && cond.Status == corev1.ConditionTrue { 495 isReady = true 496 break 497 } 498 } 499 if !isReady { 500 t.Log("Pod is not ready: ", name) 501 continue 502 } 503 t.Logf("Checking that %s has an IP address from the pod CIDR", name) 504 var hasIP bool 505 for _, ip := range pod.Status.PodIPs { 506 addr, err := netip.ParseAddr(ip.IP) 507 if err != nil { 508 t.Fatalf("Failed to parse IP address %s: %v", ip.IP, err) 509 } 510 if cfg.PodCIDR.Contains(addr) { 511 t.Logf("Pod %s has IP address %s from pod CIDR %s", name, ip.IP, cfg.PodCIDR) 512 hasIP = true 513 break 514 } 515 } 516 if !hasIP { 517 t.Log("Ignoring pod without IP address from pod CIDR: ", name) 518 continue 519 } 520 cniManagedPods = append(cniManagedPods, &p) 521 } 522 got = len(cniManagedPods) 523 return got 524 }).ShouldEqual(time.Second*30, time.Second, expectedPods) 525 if !ok { 526 t.Fatalf("Failed to get expected number of pods in the CNI network: %d, got: %d", expectedPods, got) 527 } 528 seen := make(map[netip.Addr]struct{}) 529 Pods: 530 for _, pod := range cniManagedPods { 531 IPs: 532 for _, ip := range pod.Status.PodIPs { 533 addr, err := netip.ParseAddr(ip.IP) 534 if err != nil { 535 t.Fatalf("Failed to parse IP address %s: %v", ip.IP, err) 536 } 537 if !cfg.PodCIDR.Contains(addr) { 538 continue IPs 539 } 540 if _, ok := seen[addr]; ok { 541 t.Errorf("Pod %s has duplicate IP address %s", pod.Name, ip.IP) 542 continue IPs 543 } 544 t.Logf("Pod %s has unique IP address %s", pod.Name, ip.IP) 545 seen[addr] = struct{}{} 546 continue Pods 547 } 548 } 549 }) 550 } 551 }) 552 }) 553 } 554 } 555 556 // Init initializes the end-to-end test. 557 func Init(t *testing.T) { 558 t.Helper() 559 // Set the controller-runtime logger. 560 ctrl.SetLogger(zap.New(zap.UseFlagOptions(&zap.Options{Development: true}))) 561 // Set the path to the kind binary. 562 var err error 563 if kindExec == "" { 564 kindExec = defaultKindExec 565 } 566 kindExec, err = exec.LookPath(defaultKindExec) 567 if err != nil { 568 t.Fatal("Failed to resolve kind executable path:", err) 569 } 570 // Set the path to the kustomize binary. 571 if kustomizeExec == "" { 572 kustomizeExec = defaultKustomizeExec 573 } 574 kustomizeExec, err = exec.LookPath(kustomizeExec) 575 if err != nil { 576 t.Fatal("Failed to resolve kustomize executable path:", err) 577 } 578 // Set the path to the kubectl binary. 579 if kubectlExec == "" { 580 kubectlExec = defaultKubectlExec 581 } 582 kubectlExec, err = exec.LookPath(kubectlExec) 583 if err != nil { 584 t.Fatal("Failed to resolve kubectl executable path:", err) 585 } 586 envTestImage := os.Getenv(testImageEnvVar) 587 if envTestImage != "" { 588 testImage = envTestImage 589 } 590 } 591 592 func findKindConfigs(t *testing.T, path string) []string { 593 t.Helper() 594 var configs []string 595 err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error { 596 if info.IsDir() { 597 return nil 598 } 599 if filepath.Ext(path) != ".yaml" { 600 return nil 601 } 602 t.Log("Checking if path is a cluster config: ", path) 603 data, err := os.ReadFile(path) 604 if err != nil { 605 return err 606 } 607 var config map[string]any 608 err = yaml.Unmarshal(data, &config) 609 if err != nil { 610 return err 611 } 612 if config["kind"] == "Cluster" { 613 t.Log("Found kind cluster config: ", path) 614 configs = append(configs, path) 615 } 616 return nil 617 }) 618 if err != nil { 619 t.Fatalf("Failed to walk %s: %v", path, err) 620 } 621 return configs 622 } 623 624 func getClient(t *testing.T, kubeconfig string) client.Client { 625 t.Helper() 626 cfg, err := clientcmd.BuildConfigFromKubeconfigGetter("", func() (*clientcmdapi.Config, error) { 627 conf, err := clientcmd.LoadFromFile(kubeconfig) 628 if err != nil { 629 return nil, fmt.Errorf("failed to load kubeconfig from file: %w", err) 630 } 631 return conf, nil 632 }) 633 if err != nil { 634 t.Fatalf("Failed to create REST config: %v", err) 635 } 636 cli, err := types.NewRawClientForConfig(cfg) 637 if err != nil { 638 t.Fatalf("Failed to create client: %v", err) 639 } 640 return cli 641 } 642 643 func doInDir(t *testing.T, dir string, fn func()) { 644 t.Helper() 645 curdir := mustAbsolute(".") 646 t.Log("Changing directory to: ", dir) 647 err := os.Chdir(dir) 648 if err != nil { 649 t.Fatalf("Failed to change directory to %s: %v", dir, err) 650 } 651 defer func() { 652 t.Log("Changing directory back to: ", curdir) 653 err := os.Chdir(curdir) 654 if err != nil { 655 t.Fatalf("Failed to change directory to %s: %v", curdir, err) 656 } 657 }() 658 fn() 659 } 660 661 func execCmd(t *testing.T, cmd string, args ...string) { 662 t.Helper() 663 cmdStr := func() string { 664 cmdStr := cmd + " " 665 for _, arg := range args { 666 cmdStr += fmt.Sprintf("%v ", arg) 667 } 668 return cmdStr 669 }() 670 t.Log("Running command: ", cmdStr) 671 execCmd := exec.Command(cmd, args...) 672 execCmd.Stdout = &testLogWriter{t: t} 673 execCmd.Stderr = &testLogWriter{t: t} 674 if err := execCmd.Run(); err != nil { 675 t.Fatalf("Failed to run %q: %v", cmdStr, err) 676 } 677 } 678 679 func mustAbsolute(path string) string { 680 absPath, err := filepath.Abs(path) 681 if err != nil { 682 panic(err) 683 } 684 return absPath 685 } 686 687 type testLogWriter struct { 688 t *testing.T 689 } 690 691 func (w *testLogWriter) Write(p []byte) (n int, err error) { 692 n = len(p) 693 data := bytes.TrimSpace(p) 694 if len(data) == 0 { 695 return 696 } 697 w.t.Log(string(data)) 698 return 699 } 700 701 func toAnySlice[T any](slice []T) []any { 702 var anySlice []any 703 for _, s := range slice { 704 anySlice = append(anySlice, s) 705 } 706 return anySlice 707 }