k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/test/e2e/upgrades/apps/job.go (about)

     1  /*
     2  Copyright 2017 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package apps
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"strings"
    23  
    24  	batchv1 "k8s.io/api/batch/v1"
    25  	v1 "k8s.io/api/core/v1"
    26  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    27  	"k8s.io/apimachinery/pkg/labels"
    28  	clientset "k8s.io/client-go/kubernetes"
    29  	"k8s.io/kubernetes/test/e2e/framework"
    30  	e2ejob "k8s.io/kubernetes/test/e2e/framework/job"
    31  	"k8s.io/kubernetes/test/e2e/upgrades"
    32  
    33  	"github.com/onsi/ginkgo/v2"
    34  )
    35  
    36  // JobUpgradeTest is a test harness for batch Jobs.
    37  type JobUpgradeTest struct {
    38  	job       *batchv1.Job
    39  	namespace string
    40  }
    41  
    42  // Name returns the tracking name of the test.
    43  func (JobUpgradeTest) Name() string { return "[sig-apps] job-upgrade" }
    44  
    45  // Setup starts a Job with a parallelism of 2 and 2 completions running.
    46  func (t *JobUpgradeTest) Setup(ctx context.Context, f *framework.Framework) {
    47  	t.namespace = f.Namespace.Name
    48  
    49  	ginkgo.By("Creating a job")
    50  	t.job = e2ejob.NewTestJob("neverTerminate", "foo", v1.RestartPolicyOnFailure, 2, 2, nil, 6)
    51  	job, err := e2ejob.CreateJob(ctx, f.ClientSet, t.namespace, t.job)
    52  	t.job = job
    53  	framework.ExpectNoError(err)
    54  
    55  	ginkgo.By("Ensuring active pods == parallelism")
    56  	err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, t.namespace, job.Name, 2)
    57  	framework.ExpectNoError(err)
    58  }
    59  
    60  // Test verifies that the Jobs Pods are running after the an upgrade
    61  func (t *JobUpgradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
    62  	<-done
    63  	ginkgo.By("Ensuring job is running")
    64  	err := ensureJobRunning(ctx, f.ClientSet, t.namespace, t.job.Name)
    65  	framework.ExpectNoError(err)
    66  	ginkgo.By("Ensuring active pods == parallelism")
    67  	err = ensureAllJobPodsRunning(ctx, f.ClientSet, t.namespace, t.job.Name, 2)
    68  	framework.ExpectNoError(err)
    69  }
    70  
    71  // Teardown cleans up any remaining resources.
    72  func (t *JobUpgradeTest) Teardown(ctx context.Context, f *framework.Framework) {
    73  	// rely on the namespace deletion to clean up everything
    74  }
    75  
    76  // ensureAllJobPodsRunning uses c to check if the Job named jobName in ns
    77  // is running, returning an error if the expected parallelism is not
    78  // satisfied.
    79  func ensureAllJobPodsRunning(ctx context.Context, c clientset.Interface, ns, jobName string, parallelism int32) error {
    80  	label := labels.SelectorFromSet(labels.Set(map[string]string{e2ejob.JobSelectorKey: jobName}))
    81  	options := metav1.ListOptions{LabelSelector: label.String()}
    82  	pods, err := c.CoreV1().Pods(ns).List(ctx, options)
    83  	if err != nil {
    84  		return err
    85  	}
    86  	podsSummary := make([]string, 0, parallelism)
    87  	count := int32(0)
    88  	for _, p := range pods.Items {
    89  		if p.Status.Phase == v1.PodRunning {
    90  			count++
    91  		}
    92  		podsSummary = append(podsSummary, fmt.Sprintf("%s (%s: %s)", p.ObjectMeta.Name, p.Status.Phase, p.Status.Message))
    93  	}
    94  	if count != parallelism {
    95  		return fmt.Errorf("job has %d of %d expected running pods: %s", count, parallelism, strings.Join(podsSummary, ", "))
    96  	}
    97  	return nil
    98  }
    99  
   100  // ensureJobRunning uses c to check if the Job named jobName in ns is running,
   101  // (not completed, nor failed, nor suspended) returning an error if it can't
   102  // read the job or when it's not runnig
   103  func ensureJobRunning(ctx context.Context, c clientset.Interface, ns, jobName string) error {
   104  	job, err := e2ejob.GetJob(ctx, c, ns, jobName)
   105  	if err != nil {
   106  		return err
   107  	}
   108  	for _, c := range job.Status.Conditions {
   109  		if (c.Type == batchv1.JobComplete || c.Type == batchv1.JobFailed || c.Type == batchv1.JobSuspended) && c.Status == v1.ConditionTrue {
   110  			return fmt.Errorf("job is not running %#v", job)
   111  		}
   112  	}
   113  	return nil
   114  }