github.com/cilium/cilium@v1.16.2/test/k8s/updates.go (about) 1 // SPDX-License-Identifier: Apache-2.0 2 // Copyright Authors of Cilium 3 4 package k8sTest 5 6 import ( 7 "fmt" 8 "path/filepath" 9 "strconv" 10 "strings" 11 "time" 12 13 . "github.com/onsi/gomega" 14 15 "github.com/cilium/cilium/pkg/versioncheck" 16 . "github.com/cilium/cilium/test/ginkgo-ext" 17 "github.com/cilium/cilium/test/helpers" 18 ) 19 20 const ( 21 // cluster name and ID are set to provide coverage for changes to 22 // ClusterMesh, including updates to BPF map updates 23 clusterName = "test-upgrade-downgrade" 24 // clusterID of 170(10101010) was chosen to utilize all 8 bits of the 25 // default ClusterID size. 26 // note: It's possible to extend the size of ClusterID by setting 27 // MaxConnectedClusters, however this feature is only available to new 28 // clusters - thus upgrade and downgrade are not possible. Test coverage 29 // for this feature should be added after it is released. 30 clusterID = "170" 31 ) 32 33 var ( 34 // These are set in BeforeAll 35 demoPath string 36 l7Policy string 37 migrateSVCClient string 38 migrateSVCServer string 39 stableChartPath string 40 ) 41 42 var _ = Describe("K8sUpdates", func() { 43 44 // This test runs 8 steps as following: 45 // 1 - delete all pods. Clean cilium, this can be, and should be achieved by 46 // `clean-cilium-state: "true"` option that we have in configmap 47 // 2 - install cilium `cilium:v${LATEST_STABLE}` 48 // 3 - make endpoints talk with each other with policy 49 // 4 - upgrade cilium to `k8s1:5000/cilium/cilium-dev:latest` 50 // 5 - make endpoints talk with each other with policy 51 // 6 - downgrade cilium to `cilium:v${LATEST_STABLE}` 52 // 7 - make endpoints talk with each other with policy 53 // 8 - delete all pods. Clean cilium, this can be, and should be achieved by 54 // `clean-cilium-state: "true"` option that we have in configmap. 55 // This makes sure the upgrade tests won't affect any other test 56 // 9 - re install cilium:latest image for remaining tests. 57 58 var ( 59 kubectl *helpers.Kubectl 60 61 cleanupCallback = func() {} 62 ) 63 BeforeAll(func() { 64 SkipIfIntegration(helpers.CIIntegrationAKS) 65 canRun, err := helpers.CanRunK8sVersion(helpers.CiliumStableVersion, helpers.GetCurrentK8SEnv()) 66 ExpectWithOffset(1, err).To(BeNil(), "Unable to get k8s constraints for %s", helpers.CiliumStableVersion) 67 if !canRun { 68 Skip(fmt.Sprintf( 69 "Cilium %q is not supported in K8s %q. Skipping upgrade/downgrade tests.", 70 helpers.CiliumStableVersion, helpers.GetCurrentK8SEnv())) 71 return 72 } 73 74 kubectl = helpers.CreateKubectl(helpers.K8s1VMName(), logger) 75 76 demoPath = helpers.ManifestGet(kubectl.BasePath(), "demo.yaml") 77 l7Policy = helpers.ManifestGet(kubectl.BasePath(), "l7-policy.yaml") 78 migrateSVCClient = helpers.ManifestGet(kubectl.BasePath(), "migrate-svc-client.yaml") 79 migrateSVCServer = helpers.ManifestGet(kubectl.BasePath(), "migrate-svc-server.yaml") 80 81 kubectl.Delete(migrateSVCClient) 82 kubectl.Delete(migrateSVCServer) 83 kubectl.Delete(l7Policy) 84 kubectl.Delete(demoPath) 85 86 _ = kubectl.DeleteResource( 87 "deploy", fmt.Sprintf("-n %s cilium-operator", helpers.CiliumNamespace)) 88 // Sometimes PolicyGen has a lot of pods running around without delete 89 // it. Using this we are sure that we delete before this test start 90 kubectl.Exec(fmt.Sprintf( 91 "%s delete --all pods,svc,cnp -n %s", helpers.KubectlCmd, helpers.DefaultNamespace)) 92 93 By("Waiting for pods to be terminated") 94 ExpectAllPodsTerminated(kubectl) 95 96 // Download the stable helm chart from GitHub 97 versionPath := filepath.Join(kubectl.BasePath(), "../old-charts", helpers.CiliumStableVersion) 98 stableChartPath = filepath.Join(versionPath, fmt.Sprintf("cilium-%s/install/kubernetes/cilium", helpers.CiliumStableHelmChartVersion)) 99 100 cmd := kubectl.Exec(fmt.Sprintf("mkdir -p %s && "+ 101 "cd %s && "+ 102 "rm -rf * && "+ 103 "wget https://github.com/cilium/cilium/archive/refs/heads/%s.tar.gz && "+ 104 "tar -xf %s.tar.gz", 105 versionPath, 106 versionPath, 107 helpers.CiliumStableVersion, 108 helpers.CiliumStableVersion)) 109 ExpectWithOffset(1, cmd).To(helpers.CMDSuccess(), "Unable to download helm chart %s from GitHub", helpers.CiliumStableVersion) 110 }) 111 112 AfterAll(func() { 113 removeCilium(kubectl) 114 kubectl.CloseSSHClient() 115 }) 116 117 AfterFailed(func() { 118 kubectl.CiliumReport("cilium-dbg endpoint list") 119 }) 120 121 JustAfterEach(func() { 122 blacklist := helpers.GetBadLogMessages() 123 delete(blacklist, helpers.RemovingMapMsg) 124 kubectl.ValidateListOfErrorsInLogs(CurrentGinkgoTestDescription().Duration, blacklist) 125 }) 126 127 AfterEach(func() { 128 cleanupCallback() 129 ExpectAllPodsTerminated(kubectl) 130 }) 131 132 It("Tests upgrade and downgrade from a Cilium stable image to master", func() { 133 var assertUpgradeSuccessful func() 134 assertUpgradeSuccessful, cleanupCallback = 135 InstallAndValidateCiliumUpgrades( 136 kubectl, 137 helpers.CiliumStableHelmChartVersion, 138 helpers.CiliumStableVersion, 139 helpers.CiliumLatestHelmChartVersion, 140 helpers.GetLatestImageVersion(), 141 ) 142 assertUpgradeSuccessful() 143 }) 144 }) 145 146 func removeCilium(kubectl *helpers.Kubectl) { 147 _ = kubectl.ExecMiddle("helm delete cilium-preflight --namespace=" + helpers.CiliumNamespace) 148 _ = kubectl.ExecMiddle("helm delete cilium --namespace=" + helpers.CiliumNamespace) 149 150 kubectl.CleanupCiliumComponents() 151 ExpectAllPodsTerminated(kubectl) 152 } 153 154 // InstallAndValidateCiliumUpgrades installs and tests if the oldVersion can be 155 // upgrade to the newVersion and if the newVersion can be downgraded to the 156 // oldVersion. It returns two callbacks, the first one is the assertfunction 157 // that need to run, and the second one are the cleanup actions 158 func InstallAndValidateCiliumUpgrades(kubectl *helpers.Kubectl, oldHelmChartVersion, oldImageVersion, newHelmChartVersion, newImageVersion string) (func(), func()) { 159 var ( 160 err error 161 162 timeout = 5 * time.Minute 163 ) 164 165 canRun, err := helpers.CanRunK8sVersion(oldImageVersion, helpers.GetCurrentK8SEnv()) 166 ExpectWithOffset(1, err).To(BeNil(), "Unable to get k8s constraints for %s", oldImageVersion) 167 if !canRun { 168 Skip(fmt.Sprintf( 169 "Cilium %q is not supported in K8s %q. Skipping upgrade/downgrade tests.", 170 oldImageVersion, helpers.GetCurrentK8SEnv())) 171 return func() {}, func() {} 172 } 173 174 apps := []string{helpers.App1, helpers.App2, helpers.App3} 175 app1Service := "app1-service" 176 177 cleanupCiliumState := func(helmPath, chartVersion, imageName, imageTag, registry string) { 178 removeCilium(kubectl) 179 180 opts := map[string]string{ 181 "cleanState": "true", 182 "image.tag": imageTag, 183 "sleepAfterInit": "true", 184 "operator.enabled": "false ", 185 "hubble.tls.enabled": "false", 186 "cluster.name": clusterName, 187 "cluster.id": clusterID, 188 } 189 if imageName != "" { 190 opts["image.repository"] = imageName 191 opts["preflight.image.repository"] = imageName // preflight must match the target agent image 192 } 193 194 EventuallyWithOffset(1, func() (*helpers.CmdRes, error) { 195 return kubectl.RunHelm( 196 "install", 197 helmPath, 198 "cilium", 199 chartVersion, 200 helpers.CiliumNamespace, 201 opts, 202 ) 203 }, time.Second*30, time.Second*1).Should(helpers.CMDSuccess(), fmt.Sprintf("Cilium clean state %q was not able to be deployed", chartVersion)) 204 205 kubectl.WaitForCiliumReadiness(1, fmt.Sprintf("Cilium %q did not become ready in time", chartVersion)) 206 err = kubectl.WaitForCiliumInitContainerToFinish() 207 ExpectWithOffset(1, err).To(BeNil(), "Cilium %q was not able to be clean up environment", chartVersion) 208 cmd := kubectl.ExecMiddle("helm delete cilium --namespace=" + helpers.CiliumNamespace) 209 ExpectWithOffset(1, cmd).To(helpers.CMDSuccess(), "Cilium %q was not able to be deleted", chartVersion) 210 ExpectAllPodsTerminated(kubectl) 211 } 212 213 cleanupCallback := func() { 214 kubectl.Delete(migrateSVCClient) 215 kubectl.Delete(migrateSVCServer) 216 kubectl.Delete(l7Policy) 217 kubectl.Delete(demoPath) 218 219 if res := kubectl.DeleteResource("pod", fmt.Sprintf("-n %s -l k8s-app=kube-dns", helpers.KubeSystemNamespace)); !res.WasSuccessful() { 220 log.Warningf("Unable to delete DNS pods: %s", res.OutputPrettyPrint()) 221 } 222 223 // make sure we clean everything up before doing any other test 224 cleanupCiliumState(filepath.Join(kubectl.BasePath(), helpers.HelmTemplate), newHelmChartVersion, "", newImageVersion, "") 225 } 226 227 testfunc := func() { 228 By("Deleting Cilium and CoreDNS") 229 // Making sure that we deleted the cilium ds. No assert 230 // message because maybe is not present 231 if res := kubectl.DeleteResource("ds", fmt.Sprintf("-n %s cilium", helpers.CiliumNamespace)); !res.WasSuccessful() { 232 log.Warningf("Unable to delete Cilium DaemonSet: %s", res.OutputPrettyPrint()) 233 } 234 235 By("Waiting for pods to be terminated") 236 ExpectAllPodsTerminated(kubectl) 237 238 EventuallyWithOffset(1, func() *helpers.CmdRes { 239 return kubectl.HelmAddCiliumRepo() 240 }, time.Second*30, time.Second*1).Should(helpers.CMDSuccess(), "Unable to install helm repository") 241 242 // New version must come first given prior CI tests may have run on new Cilium version. 243 By("Cleaning Cilium state (%s)", newImageVersion) 244 cleanupCiliumState(filepath.Join(kubectl.BasePath(), helpers.HelmTemplate), newHelmChartVersion, "", newImageVersion, "") 245 246 By("Cleaning Cilium state (%s)", oldImageVersion) 247 cleanupCiliumState(stableChartPath, oldHelmChartVersion, "quay.io/cilium/cilium-ci", oldImageVersion, "") 248 249 By("Deploying Cilium %s", oldHelmChartVersion) 250 251 opts := map[string]string{ 252 "image.tag": oldImageVersion, 253 "operator.image.tag": oldImageVersion, 254 "hubble.relay.image.tag": oldImageVersion, 255 "clustermesh.apiserver.image.tag": oldImageVersion, 256 "image.repository": "quay.io/cilium/cilium-ci", 257 "operator.image.repository": "quay.io/cilium/operator", 258 "hubble.relay.image.repository": "quay.io/cilium/hubble-relay-ci", 259 "clustermesh.apiserver.image.repository": "quay.io/cilium/clustermesh-apiserver-ci", 260 "cluster.name": clusterName, 261 "cluster.id": clusterID, 262 } 263 264 hasNewHelmValues := versioncheck.MustCompile(">=1.12.90") 265 if hasNewHelmValues(versioncheck.MustVersion(newHelmChartVersion)) { 266 opts["bandwidthManager.enabled"] = "false " 267 } else { 268 opts["bandwidthManager"] = "false " 269 } 270 271 // Eventually allows multiple return values, and performs the assertion 272 // on the first return value, and expects that all other return values 273 // are zero values (nil, etc.). 274 EventuallyWithOffset(1, func() (*helpers.CmdRes, error) { 275 return kubectl.RunHelm( 276 "install", 277 stableChartPath, 278 "cilium", 279 oldHelmChartVersion, 280 helpers.CiliumNamespace, 281 opts) 282 }, time.Second*30, time.Second*1).Should(helpers.CMDSuccess(), fmt.Sprintf("Cilium %q was not able to be deployed", oldHelmChartVersion)) 283 284 // Cilium is only ready if kvstore is ready, the kvstore is ready if 285 // kube-dns is running. 286 ExpectCiliumReady(kubectl) 287 ExpectCiliumOperatorReady(kubectl) 288 By("Cilium %q is installed and running", oldHelmChartVersion) 289 290 By("Restarting DNS Pods") 291 if res := kubectl.DeleteResource("pod", fmt.Sprintf("-n %s -l k8s-app=kube-dns", helpers.KubeSystemNamespace)); !res.WasSuccessful() { 292 log.Warningf("Unable to delete DNS pods: %s", res.OutputPrettyPrint()) 293 } 294 ExpectKubeDNSReady(kubectl) 295 296 validatedImage := func(image string) { 297 By("Checking that installed image is %q", image) 298 299 filter := `{.items[*].status.containerStatuses[0].image}` 300 data, err := kubectl.GetPods( 301 helpers.CiliumNamespace, "-l k8s-app=cilium").Filter(filter) 302 ExpectWithOffset(1, err).To(BeNil(), "Cannot get cilium pods") 303 304 for _, val := range strings.Split(data.String(), " ") { 305 ExpectWithOffset(1, val).To(ContainSubstring(image), "Cilium image didn't update correctly") 306 } 307 } 308 309 validateEndpointsConnection := func() { 310 By("Validate that endpoints are ready before making any connection") 311 err := kubectl.CiliumEndpointWaitReady() 312 ExpectWithOffset(1, err).To(BeNil(), "Endpoints are not ready after timeout") 313 314 ExpectKubeDNSReady(kubectl) 315 316 err = kubectl.WaitForKubeDNSEntry(app1Service, helpers.DefaultNamespace) 317 ExpectWithOffset(1, err).To(BeNil(), "DNS entry is not ready after timeout") 318 319 appPods := helpers.GetAppPods(apps, helpers.DefaultNamespace, kubectl, "id") 320 321 err = kubectl.WaitForKubeDNSEntry(app1Service, helpers.DefaultNamespace) 322 ExpectWithOffset(1, err).To(BeNil(), "DNS entry is not ready after timeout") 323 324 By("Making L7 requests between endpoints") 325 res := kubectl.ExecPodCmd( 326 helpers.DefaultNamespace, appPods[helpers.App2], 327 helpers.CurlFail("http://%s/public", app1Service)) 328 ExpectWithOffset(1, res).Should(helpers.CMDSuccess(), "Cannot curl app1-service") 329 330 res = kubectl.ExecPodCmd( 331 helpers.DefaultNamespace, appPods[helpers.App2], 332 helpers.CurlFail("http://%s/private", app1Service)) 333 ExpectWithOffset(1, res).ShouldNot(helpers.CMDSuccess(), "Expect a 403 from app1-service") 334 } 335 336 // checkNoInteruptsInSVCFlows checks whether there are no 337 // interrupts in established connections to the migrate-svc service 338 // after Cilium has been upgraded / downgraded. 339 // 340 // The check is based on restart count of the Pods. We can do it so, because 341 // any interrupt in the flow makes a client to panic which makes the Pod 342 // to restart. 343 lastCount := -1 344 checkNoInteruptsInSVCFlows := func() { 345 By("No interrupts in migrated svc flows") 346 347 filter := `{.items[*].status.containerStatuses[0].restartCount}` 348 restartCount, err := kubectl.GetPods(helpers.DefaultNamespace, 349 "-l zgroup=migrate-svc").Filter(filter) 350 ExpectWithOffset(1, err).To(BeNil(), "Failed to query \"migrate-svc-server\" Pod") 351 352 currentCount := 0 353 for _, c := range strings.Split(restartCount.String(), " ") { 354 count, err := strconv.Atoi(c) 355 ExpectWithOffset(1, err).To(BeNil(), "Failed to convert count value") 356 currentCount += count 357 } 358 // The check is invoked for the first time 359 if lastCount == -1 { 360 lastCount = currentCount 361 } 362 Expect(lastCount).Should(BeIdenticalTo(currentCount), 363 "migrate-svc restart count values do not match") 364 } 365 366 By("Creating some endpoints and L7 policy") 367 368 res := kubectl.ApplyDefault(demoPath) 369 ExpectWithOffset(1, res).To(helpers.CMDSuccess(), "cannot apply dempo application") 370 371 err = kubectl.WaitforPods(helpers.DefaultNamespace, "-l zgroup=testapp", timeout) 372 Expect(err).Should(BeNil(), "Test pods are not ready after timeout") 373 374 _, err = kubectl.CiliumPolicyAction( 375 helpers.DefaultNamespace, l7Policy, helpers.KubectlApply, timeout) 376 Expect(err).Should(BeNil(), "cannot import l7 policy: %v", l7Policy) 377 378 By("Creating service and clients for migration") 379 380 res = kubectl.ApplyDefault(migrateSVCServer) 381 ExpectWithOffset(1, res).To(helpers.CMDSuccess(), "cannot apply migrate-svc-server") 382 err = kubectl.WaitforPods(helpers.DefaultNamespace, "-l app=migrate-svc-server", timeout) 383 Expect(err).Should(BeNil(), "migrate-svc-server pods are not ready after timeout") 384 385 res = kubectl.ApplyDefault(migrateSVCClient) 386 ExpectWithOffset(1, res).To(helpers.CMDSuccess(), "cannot apply migrate-svc-client") 387 err = kubectl.WaitforPods(helpers.DefaultNamespace, "-l app=migrate-svc-client", timeout) 388 Expect(err).Should(BeNil(), "migrate-svc-client pods are not ready after timeout") 389 390 validateEndpointsConnection() 391 checkNoInteruptsInSVCFlows() 392 393 waitForUpdateImage := func(image string) func() bool { 394 return func() bool { 395 pods, err := kubectl.GetCiliumPods() 396 if err != nil { 397 return false 398 } 399 400 filter := `{.items[*].status.containerStatuses[0].image}` 401 data, err := kubectl.GetPods( 402 helpers.CiliumNamespace, "-l k8s-app=cilium").Filter(filter) 403 if err != nil { 404 return false 405 } 406 number := strings.Count(data.String(), image) 407 if number == len(pods) { 408 return true 409 } 410 log.Infof("Only '%v' of '%v' cilium pods updated to the new image", 411 number, len(pods)) 412 return false 413 } 414 } 415 416 By("Install Cilium pre-flight check DaemonSet") 417 418 opts = map[string]string{ 419 "preflight.enabled": "true ", 420 "config.enabled": "false ", 421 "operator.enabled": "false ", 422 "preflight.image.tag": newImageVersion, 423 "nodeinit.enabled": "false", 424 "agent": "false ", 425 } 426 427 EventuallyWithOffset(1, func() (*helpers.CmdRes, error) { 428 return kubectl.RunHelm( 429 "install", 430 filepath.Join(kubectl.BasePath(), helpers.HelmTemplate), 431 "cilium-preflight", 432 newHelmChartVersion, 433 helpers.CiliumNamespace, 434 opts) 435 }, time.Second*30, time.Second*1).Should(helpers.CMDSuccess(), "Unable to deploy preflight manifest") 436 437 ExpectCiliumPreFlightInstallReady(kubectl) 438 439 // Once they are installed we can remove it 440 By("Removing Cilium pre-flight check DaemonSet") 441 cmd := kubectl.ExecMiddle("helm delete cilium-preflight --namespace=" + helpers.CiliumNamespace) 442 ExpectWithOffset(1, cmd).To(helpers.CMDSuccess(), "Unable to delete preflight") 443 444 kubectl.WaitForCiliumReadiness(1, "Cilium is not ready after timeout") 445 // Need to run using the kvstore-based allocator because upgrading from 446 // kvstore-based allocator to CRD-based allocator is not currently 447 // supported at this time. 448 By("Upgrading Cilium to %s", newHelmChartVersion) 449 opts = map[string]string{ 450 "image.tag": newImageVersion, 451 "operator.image.tag": newImageVersion, 452 "hubble.relay.image.tag": newImageVersion, 453 "cluster.name": clusterName, 454 "cluster.id": clusterID, 455 // Remove this after 1.16 is released 456 "envoy.enabled": "false", 457 } 458 459 upgradeCompatibilityVer := strings.TrimSuffix(oldHelmChartVersion, "-dev") 460 // Ensure compatibility in the ConfigMap. This tests the 461 // upgrade as instructed in the documentation 462 opts["upgradeCompatibility"] = upgradeCompatibilityVer 463 464 EventuallyWithOffset(1, func() (*helpers.CmdRes, error) { 465 return kubectl.RunHelm( 466 "upgrade", 467 filepath.Join(kubectl.BasePath(), helpers.HelmTemplate), 468 "cilium", 469 newHelmChartVersion, 470 helpers.CiliumNamespace, 471 opts) 472 }, time.Second*30, time.Second*1).Should(helpers.CMDSuccess(), fmt.Sprintf("Cilium %q was not able to be deployed", newHelmChartVersion)) 473 474 By("Validating pods have the right image version upgraded") 475 err = helpers.WithTimeout( 476 waitForUpdateImage(newImageVersion), 477 fmt.Sprintf("Cilium Pods are not updating correctly to %s", newImageVersion), 478 &helpers.TimeoutConfig{Timeout: timeout}) 479 ExpectWithOffset(1, err).To(BeNil(), "Pods are not updating") 480 481 err = kubectl.WaitforPods( 482 helpers.CiliumNamespace, "-l k8s-app=cilium", timeout) 483 ExpectWithOffset(1, err).Should(BeNil(), "Cilium is not ready after timeout") 484 485 validatedImage(newImageVersion) 486 ExpectCiliumReady(kubectl) 487 ExpectCiliumOperatorReady(kubectl) 488 489 validateEndpointsConnection() 490 checkNoInteruptsInSVCFlows() 491 492 nbMissedTailCalls, err := kubectl.CountMissedTailCalls() 493 ExpectWithOffset(1, err).Should(BeNil(), "Failed to retrieve number of missed tail calls") 494 ExpectWithOffset(1, nbMissedTailCalls).To(BeNumerically("==", 0), "Unexpected missed tail calls") 495 496 By("Check whether svc flows are not interrupted upon cilium-agent restart") 497 ciliumFilter := "k8s-app=cilium" 498 res = kubectl.Exec(fmt.Sprintf( 499 "%s -n %s delete pods -l %s", 500 helpers.KubectlCmd, helpers.CiliumNamespace, ciliumFilter)) 501 res.ExpectSuccess("Failed to delete cilium pods") 502 ExpectAllPodsTerminated(kubectl) 503 err = kubectl.WaitforPods( 504 helpers.CiliumNamespace, fmt.Sprintf("-l %s", ciliumFilter), helpers.HelperTimeout) 505 Expect(err).Should(BeNil(), "Pods are not ready after timeout") 506 err = kubectl.CiliumEndpointWaitReady() 507 Expect(err).To(BeNil(), "Endpoints are not ready after timeout") 508 checkNoInteruptsInSVCFlows() 509 510 By("Downgrading cilium to %s image", oldHelmChartVersion) 511 // rollback cilium 1 because it's the version that we have started 512 // cilium with in this updates test. 513 cmd = kubectl.ExecMiddle("helm rollback cilium 1 --namespace=" + helpers.CiliumNamespace) 514 ExpectWithOffset(1, cmd).To(helpers.CMDSuccess(), "Cilium %q was not able to be deployed", oldHelmChartVersion) 515 516 err = helpers.WithTimeout( 517 waitForUpdateImage(oldImageVersion), 518 "Cilium Pods are not updating correctly", 519 &helpers.TimeoutConfig{Timeout: timeout}) 520 ExpectWithOffset(1, err).To(BeNil(), "Pods are not updating") 521 522 err = kubectl.WaitforPods( 523 helpers.CiliumNamespace, "-l k8s-app=cilium", timeout) 524 ExpectWithOffset(1, err).Should(BeNil(), "Cilium is not ready after timeout") 525 526 validatedImage(oldImageVersion) 527 ExpectCiliumOperatorReady(kubectl) 528 529 validateEndpointsConnection() 530 checkNoInteruptsInSVCFlows() 531 532 nbMissedTailCalls, err = kubectl.CountMissedTailCalls() 533 ExpectWithOffset(1, err).Should(BeNil(), "Failed to retrieve number of missed tail calls") 534 ExpectWithOffset(1, nbMissedTailCalls).To(BeNumerically("==", 0), "Unexpected missed tail call") 535 } 536 return testfunc, cleanupCallback 537 }