k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/pkg/controller/daemon/update_test.go (about)

     1  /*
     2  Copyright 2017 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package daemon
    18  
    19  import (
    20  	"context"
    21  	"reflect"
    22  	"testing"
    23  	"time"
    24  
    25  	apps "k8s.io/api/apps/v1"
    26  	v1 "k8s.io/api/core/v1"
    27  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    28  	"k8s.io/apimachinery/pkg/labels"
    29  	"k8s.io/apimachinery/pkg/util/intstr"
    30  	"k8s.io/klog/v2/ktesting"
    31  	podutil "k8s.io/kubernetes/pkg/api/v1/pod"
    32  	"k8s.io/kubernetes/pkg/controller/daemon/util"
    33  	testingclock "k8s.io/utils/clock/testing"
    34  )
    35  
    36  func TestDaemonSetUpdatesPods(t *testing.T) {
    37  	_, ctx := ktesting.NewTestContext(t)
    38  	ds := newDaemonSet("foo")
    39  	manager, podControl, _, err := newTestController(ctx, ds)
    40  	if err != nil {
    41  		t.Fatalf("error creating DaemonSets controller: %v", err)
    42  	}
    43  	maxUnavailable := 2
    44  	addNodes(manager.nodeStore, 0, 5, nil)
    45  	manager.dsStore.Add(ds)
    46  	expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
    47  	markPodsReady(podControl.podStore)
    48  
    49  	ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
    50  	ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
    51  	intStr := intstr.FromInt32(int32(maxUnavailable))
    52  	ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
    53  	manager.dsStore.Update(ds)
    54  
    55  	clearExpectations(t, manager, ds, podControl)
    56  	expectSyncDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
    57  	clearExpectations(t, manager, ds, podControl)
    58  	expectSyncDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
    59  	markPodsReady(podControl.podStore)
    60  
    61  	clearExpectations(t, manager, ds, podControl)
    62  	expectSyncDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
    63  	clearExpectations(t, manager, ds, podControl)
    64  	expectSyncDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
    65  	markPodsReady(podControl.podStore)
    66  
    67  	clearExpectations(t, manager, ds, podControl)
    68  	expectSyncDaemonSets(t, manager, ds, podControl, 0, 1, 0)
    69  	clearExpectations(t, manager, ds, podControl)
    70  	expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
    71  	markPodsReady(podControl.podStore)
    72  
    73  	clearExpectations(t, manager, ds, podControl)
    74  	expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
    75  	clearExpectations(t, manager, ds, podControl)
    76  }
    77  
    78  func TestDaemonSetUpdatesPodsWithMaxSurge(t *testing.T) {
    79  	_, ctx := ktesting.NewTestContext(t)
    80  	ds := newDaemonSet("foo")
    81  	manager, podControl, _, err := newTestController(ctx, ds)
    82  	if err != nil {
    83  		t.Fatalf("error creating DaemonSets controller: %v", err)
    84  	}
    85  	addNodes(manager.nodeStore, 0, 5, nil)
    86  	manager.dsStore.Add(ds)
    87  	expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
    88  	markPodsReady(podControl.podStore)
    89  
    90  	// surge is thhe controlling amount
    91  	maxSurge := 2
    92  	ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
    93  	ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(int32(maxSurge)))
    94  	manager.dsStore.Update(ds)
    95  
    96  	clearExpectations(t, manager, ds, podControl)
    97  	expectSyncDaemonSets(t, manager, ds, podControl, maxSurge, 0, 0)
    98  	clearExpectations(t, manager, ds, podControl)
    99  	expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
   100  	markPodsReady(podControl.podStore)
   101  
   102  	clearExpectations(t, manager, ds, podControl)
   103  	expectSyncDaemonSets(t, manager, ds, podControl, maxSurge, maxSurge, 0)
   104  	clearExpectations(t, manager, ds, podControl)
   105  	expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
   106  	markPodsReady(podControl.podStore)
   107  
   108  	clearExpectations(t, manager, ds, podControl)
   109  	expectSyncDaemonSets(t, manager, ds, podControl, 5%maxSurge, maxSurge, 0)
   110  	clearExpectations(t, manager, ds, podControl)
   111  	expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
   112  	markPodsReady(podControl.podStore)
   113  
   114  	clearExpectations(t, manager, ds, podControl)
   115  	expectSyncDaemonSets(t, manager, ds, podControl, 0, 5%maxSurge, 0)
   116  	clearExpectations(t, manager, ds, podControl)
   117  	expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
   118  }
   119  
   120  func TestDaemonSetUpdatesPodsNotMatchTainstWithMaxSurge(t *testing.T) {
   121  	_, ctx := ktesting.NewTestContext(t)
   122  
   123  	ds := newDaemonSet("foo")
   124  	maxSurge := 1
   125  	ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(maxSurge))
   126  	tolerations := []v1.Toleration{
   127  		{Key: "node-role.kubernetes.io/control-plane", Operator: v1.TolerationOpExists},
   128  	}
   129  	setDaemonSetToleration(ds, tolerations)
   130  	manager, podControl, _, err := newTestController(ctx, ds)
   131  	if err != nil {
   132  		t.Fatalf("error creating DaemonSets controller: %v", err)
   133  	}
   134  	err = manager.dsStore.Add(ds)
   135  	if err != nil {
   136  		t.Fatal(err)
   137  	}
   138  
   139  	// Add five nodes and taint to one node
   140  	addNodes(manager.nodeStore, 0, 5, nil)
   141  	taints := []v1.Taint{
   142  		{Key: "node-role.kubernetes.io/control-plane", Effect: v1.TaintEffectNoSchedule},
   143  	}
   144  	node := newNode("node-0", nil)
   145  	setNodeTaint(node, taints)
   146  	err = manager.nodeStore.Update(node)
   147  	if err != nil {
   148  		t.Fatal(err)
   149  	}
   150  
   151  	// Create DaemonSet with toleration
   152  	expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
   153  	markPodsReady(podControl.podStore)
   154  
   155  	// RollingUpdate DaemonSet without toleration
   156  	ds.Spec.Template.Spec.Tolerations = nil
   157  	err = manager.dsStore.Update(ds)
   158  	if err != nil {
   159  		t.Fatal(err)
   160  	}
   161  
   162  	clearExpectations(t, manager, ds, podControl)
   163  	expectSyncDaemonSets(t, manager, ds, podControl, maxSurge, 1, 0)
   164  	clearExpectations(t, manager, ds, podControl)
   165  	expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
   166  	markPodsReady(podControl.podStore)
   167  
   168  	clearExpectations(t, manager, ds, podControl)
   169  	expectSyncDaemonSets(t, manager, ds, podControl, maxSurge, maxSurge, 0)
   170  	clearExpectations(t, manager, ds, podControl)
   171  	expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
   172  	markPodsReady(podControl.podStore)
   173  
   174  	clearExpectations(t, manager, ds, podControl)
   175  	expectSyncDaemonSets(t, manager, ds, podControl, maxSurge, maxSurge, 0)
   176  	clearExpectations(t, manager, ds, podControl)
   177  	expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
   178  	markPodsReady(podControl.podStore)
   179  
   180  	clearExpectations(t, manager, ds, podControl)
   181  	expectSyncDaemonSets(t, manager, ds, podControl, maxSurge, maxSurge, 0)
   182  	clearExpectations(t, manager, ds, podControl)
   183  	expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
   184  	markPodsReady(podControl.podStore)
   185  
   186  	clearExpectations(t, manager, ds, podControl)
   187  	expectSyncDaemonSets(t, manager, ds, podControl, 0, maxSurge, 0)
   188  	clearExpectations(t, manager, ds, podControl)
   189  	expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
   190  }
   191  
   192  func TestDaemonSetUpdatesWhenNewPodIsNotReady(t *testing.T) {
   193  	_, ctx := ktesting.NewTestContext(t)
   194  	ds := newDaemonSet("foo")
   195  	manager, podControl, _, err := newTestController(ctx, ds)
   196  	if err != nil {
   197  		t.Fatalf("error creating DaemonSets controller: %v", err)
   198  	}
   199  	maxUnavailable := 3
   200  	addNodes(manager.nodeStore, 0, 5, nil)
   201  	err = manager.dsStore.Add(ds)
   202  	if err != nil {
   203  		t.Fatal(err)
   204  	}
   205  	expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
   206  	markPodsReady(podControl.podStore)
   207  
   208  	ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
   209  	ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
   210  	intStr := intstr.FromInt32(int32(maxUnavailable))
   211  	ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
   212  	err = manager.dsStore.Update(ds)
   213  	if err != nil {
   214  		t.Fatal(err)
   215  	}
   216  
   217  	// new pods are not ready numUnavailable == maxUnavailable
   218  	clearExpectations(t, manager, ds, podControl)
   219  	expectSyncDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
   220  
   221  	clearExpectations(t, manager, ds, podControl)
   222  	expectSyncDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
   223  
   224  	clearExpectations(t, manager, ds, podControl)
   225  	expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
   226  	clearExpectations(t, manager, ds, podControl)
   227  }
   228  
   229  func TestDaemonSetUpdatesSomeOldPodsNotReady(t *testing.T) {
   230  	_, ctx := ktesting.NewTestContext(t)
   231  	ds := newDaemonSet("foo")
   232  	manager, podControl, _, err := newTestController(ctx, ds)
   233  	if err != nil {
   234  		t.Fatalf("error creating DaemonSets controller: %v", err)
   235  	}
   236  	maxUnavailable := 2
   237  	addNodes(manager.nodeStore, 0, 5, nil)
   238  	err = manager.dsStore.Add(ds)
   239  	if err != nil {
   240  		t.Fatal(err)
   241  	}
   242  	expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
   243  	markPodsReady(podControl.podStore)
   244  
   245  	ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
   246  	ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
   247  	intStr := intstr.FromInt(maxUnavailable)
   248  	ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
   249  	err = manager.dsStore.Update(ds)
   250  	if err != nil {
   251  		t.Fatal(err)
   252  	}
   253  
   254  	// All old pods are available, should update 2
   255  	clearExpectations(t, manager, ds, podControl)
   256  	expectSyncDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
   257  
   258  	clearExpectations(t, manager, ds, podControl)
   259  	expectSyncDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
   260  
   261  	clearExpectations(t, manager, ds, podControl)
   262  	expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
   263  	clearExpectations(t, manager, ds, podControl)
   264  
   265  	// Perform another update, verify we delete and create 2 pods, three available should remain untouched
   266  
   267  	ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar3"
   268  	err = manager.dsStore.Update(ds)
   269  	if err != nil {
   270  		t.Fatal(err)
   271  	}
   272  
   273  	clearExpectations(t, manager, ds, podControl)
   274  	expectSyncDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
   275  
   276  	clearExpectations(t, manager, ds, podControl)
   277  	expectSyncDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
   278  
   279  	clearExpectations(t, manager, ds, podControl)
   280  	expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
   281  	clearExpectations(t, manager, ds, podControl)
   282  
   283  	readyCount := 0
   284  	expectedReadyCount := 5 - maxUnavailable
   285  	for _, obj := range podControl.podStore.List() {
   286  		pod := obj.(*v1.Pod)
   287  		n, condition := podutil.GetPodCondition(&pod.Status, v1.PodReady)
   288  		if n != -1 && condition.Status == v1.ConditionTrue {
   289  			readyCount++
   290  		}
   291  	}
   292  
   293  	if readyCount != expectedReadyCount {
   294  		t.Fatalf("Expected %d old ready pods, but found %d", expectedReadyCount, readyCount)
   295  	}
   296  }
   297  
   298  func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) {
   299  	_, ctx := ktesting.NewTestContext(t)
   300  	ds := newDaemonSet("foo")
   301  	manager, podControl, _, err := newTestController(ctx, ds)
   302  	if err != nil {
   303  		t.Fatalf("error creating DaemonSets controller: %v", err)
   304  	}
   305  	maxUnavailable := 3
   306  	addNodes(manager.nodeStore, 0, 5, nil)
   307  	err = manager.dsStore.Add(ds)
   308  	if err != nil {
   309  		t.Fatal(err)
   310  	}
   311  	expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
   312  
   313  	ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
   314  	ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
   315  	intStr := intstr.FromInt32(int32(maxUnavailable))
   316  	ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
   317  	err = manager.dsStore.Update(ds)
   318  	if err != nil {
   319  		t.Fatal(err)
   320  	}
   321  
   322  	// all old pods are unavailable so should be removed
   323  	clearExpectations(t, manager, ds, podControl)
   324  	expectSyncDaemonSets(t, manager, ds, podControl, 0, 5, 0)
   325  
   326  	clearExpectations(t, manager, ds, podControl)
   327  	expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
   328  
   329  	clearExpectations(t, manager, ds, podControl)
   330  	expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
   331  	clearExpectations(t, manager, ds, podControl)
   332  }
   333  
   334  func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
   335  	_, ctx := ktesting.NewTestContext(t)
   336  	ds := newDaemonSet("foo")
   337  	manager, podControl, _, err := newTestController(ctx, ds)
   338  	if err != nil {
   339  		t.Fatalf("error creating DaemonSets controller: %v", err)
   340  	}
   341  	addNodes(manager.nodeStore, 0, 5, nil)
   342  	manager.dsStore.Add(ds)
   343  	expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
   344  
   345  	maxSurge := 3
   346  	ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
   347  	ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(int32(maxSurge)))
   348  	manager.dsStore.Update(ds)
   349  
   350  	// all old pods are unavailable so should be surged
   351  	manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(100, 0))
   352  	clearExpectations(t, manager, ds, podControl)
   353  	expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
   354  
   355  	// waiting for pods to go ready, old pods are deleted
   356  	manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(200, 0))
   357  	clearExpectations(t, manager, ds, podControl)
   358  	expectSyncDaemonSets(t, manager, ds, podControl, 0, 5, 0)
   359  
   360  	setPodReadiness(t, manager, true, 5, func(_ *v1.Pod) bool { return true })
   361  	ds.Spec.MinReadySeconds = 15
   362  	ds.Spec.Template.Spec.Containers[0].Image = "foo3/bar3"
   363  	manager.dsStore.Update(ds)
   364  
   365  	manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(300, 0))
   366  	clearExpectations(t, manager, ds, podControl)
   367  	expectSyncDaemonSets(t, manager, ds, podControl, 3, 0, 0)
   368  
   369  	hash, err := currentDSHash(manager, ds)
   370  	if err != nil {
   371  		t.Fatal(err)
   372  	}
   373  	currentPods := podsByNodeMatchingHash(manager, hash)
   374  	// mark two updated pods as ready at time 300
   375  	setPodReadiness(t, manager, true, 2, func(pod *v1.Pod) bool {
   376  		return pod.Labels[apps.ControllerRevisionHashLabelKey] == hash
   377  	})
   378  	// mark one of the old pods that is on a node without an updated pod as unready
   379  	setPodReadiness(t, manager, false, 1, func(pod *v1.Pod) bool {
   380  		nodeName, err := util.GetTargetNodeName(pod)
   381  		if err != nil {
   382  			t.Fatal(err)
   383  		}
   384  		return pod.Labels[apps.ControllerRevisionHashLabelKey] != hash && len(currentPods[nodeName]) == 0
   385  	})
   386  
   387  	// the new pods should still be considered waiting to hit min readiness, so one pod should be created to replace
   388  	// the deleted old pod
   389  	manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(310, 0))
   390  	clearExpectations(t, manager, ds, podControl)
   391  	expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
   392  
   393  	// the new pods are now considered available, so delete the old pods
   394  	manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(320, 0))
   395  	clearExpectations(t, manager, ds, podControl)
   396  	expectSyncDaemonSets(t, manager, ds, podControl, 1, 3, 0)
   397  
   398  	// mark all updated pods as ready at time 320
   399  	currentPods = podsByNodeMatchingHash(manager, hash)
   400  	setPodReadiness(t, manager, true, 3, func(pod *v1.Pod) bool {
   401  		return pod.Labels[apps.ControllerRevisionHashLabelKey] == hash
   402  	})
   403  
   404  	// the new pods are now considered available, so delete the old pods
   405  	manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(340, 0))
   406  	clearExpectations(t, manager, ds, podControl)
   407  	expectSyncDaemonSets(t, manager, ds, podControl, 0, 2, 0)
   408  
   409  	// controller has completed upgrade
   410  	manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(350, 0))
   411  	clearExpectations(t, manager, ds, podControl)
   412  	expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
   413  }
   414  
   415  func podsByNodeMatchingHash(dsc *daemonSetsController, hash string) map[string][]string {
   416  	byNode := make(map[string][]string)
   417  	for _, obj := range dsc.podStore.List() {
   418  		pod := obj.(*v1.Pod)
   419  		if pod.Labels[apps.ControllerRevisionHashLabelKey] != hash {
   420  			continue
   421  		}
   422  		nodeName, err := util.GetTargetNodeName(pod)
   423  		if err != nil {
   424  			panic(err)
   425  		}
   426  		byNode[nodeName] = append(byNode[nodeName], pod.Name)
   427  	}
   428  	return byNode
   429  }
   430  
   431  func setPodReadiness(t *testing.T, dsc *daemonSetsController, ready bool, count int, fn func(*v1.Pod) bool) {
   432  	t.Helper()
   433  	logger, _ := ktesting.NewTestContext(t)
   434  	for _, obj := range dsc.podStore.List() {
   435  		if count <= 0 {
   436  			break
   437  		}
   438  		pod := obj.(*v1.Pod)
   439  		if pod.DeletionTimestamp != nil {
   440  			continue
   441  		}
   442  		if podutil.IsPodReady(pod) == ready {
   443  			continue
   444  		}
   445  		if !fn(pod) {
   446  			continue
   447  		}
   448  		condition := v1.PodCondition{Type: v1.PodReady}
   449  		if ready {
   450  			condition.Status = v1.ConditionTrue
   451  		} else {
   452  			condition.Status = v1.ConditionFalse
   453  		}
   454  		if !podutil.UpdatePodCondition(&pod.Status, &condition) {
   455  			t.Fatal("failed to update pod")
   456  		}
   457  		// TODO: workaround UpdatePodCondition calling time.Now() directly
   458  		setCondition := podutil.GetPodReadyCondition(pod.Status)
   459  		setCondition.LastTransitionTime.Time = dsc.failedPodsBackoff.Clock.Now()
   460  		logger.Info("marked pod ready", "pod", pod.Name, "ready", ready)
   461  		count--
   462  	}
   463  	if count > 0 {
   464  		t.Fatalf("could not mark %d pods ready=%t", count, ready)
   465  	}
   466  }
   467  
   468  func currentDSHash(dsc *daemonSetsController, ds *apps.DaemonSet) (string, error) {
   469  	// Construct histories of the DaemonSet, and get the hash of current history
   470  	cur, _, err := dsc.constructHistory(context.TODO(), ds)
   471  	if err != nil {
   472  		return "", err
   473  	}
   474  	return cur.Labels[apps.DefaultDaemonSetUniqueLabelKey], nil
   475  
   476  }
   477  
   478  func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) {
   479  	_, ctx := ktesting.NewTestContext(t)
   480  	ds := newDaemonSet("foo")
   481  	manager, podControl, _, err := newTestController(ctx, ds)
   482  	if err != nil {
   483  		t.Fatalf("error creating DaemonSets controller: %v", err)
   484  	}
   485  	maxUnavailable := 3
   486  	addNodes(manager.nodeStore, 0, 5, nil)
   487  	manager.dsStore.Add(ds)
   488  	expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
   489  
   490  	ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
   491  	intStr := intstr.FromInt32(int32(maxUnavailable))
   492  	ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
   493  	manager.dsStore.Update(ds)
   494  
   495  	// template is not changed no pod should be removed
   496  	clearExpectations(t, manager, ds, podControl)
   497  	expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
   498  	clearExpectations(t, manager, ds, podControl)
   499  }
   500  
   501  func newUpdateSurge(value intstr.IntOrString) apps.DaemonSetUpdateStrategy {
   502  	zero := intstr.FromInt32(0)
   503  	return apps.DaemonSetUpdateStrategy{
   504  		Type: apps.RollingUpdateDaemonSetStrategyType,
   505  		RollingUpdate: &apps.RollingUpdateDaemonSet{
   506  			MaxUnavailable: &zero,
   507  			MaxSurge:       &value,
   508  		},
   509  	}
   510  }
   511  
   512  func newUpdateUnavailable(value intstr.IntOrString) apps.DaemonSetUpdateStrategy {
   513  	return apps.DaemonSetUpdateStrategy{
   514  		Type: apps.RollingUpdateDaemonSetStrategyType,
   515  		RollingUpdate: &apps.RollingUpdateDaemonSet{
   516  			MaxUnavailable: &value,
   517  		},
   518  	}
   519  }
   520  
   521  func TestGetUnavailableNumbers(t *testing.T) {
   522  	cases := []struct {
   523  		name                   string
   524  		ManagerFunc            func(ctx context.Context) *daemonSetsController
   525  		ds                     *apps.DaemonSet
   526  		nodeToPods             map[string][]*v1.Pod
   527  		maxSurge               int
   528  		maxUnavailable         int
   529  		desiredNumberScheduled int
   530  		emptyNodes             int
   531  		Err                    error
   532  	}{
   533  		{
   534  			name: "No nodes",
   535  			ManagerFunc: func(ctx context.Context) *daemonSetsController {
   536  				manager, _, _, err := newTestController(ctx)
   537  				if err != nil {
   538  					t.Fatalf("error creating DaemonSets controller: %v", err)
   539  				}
   540  				return manager
   541  			},
   542  			ds: func() *apps.DaemonSet {
   543  				ds := newDaemonSet("x")
   544  				ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt32(0))
   545  				return ds
   546  			}(),
   547  			nodeToPods:     make(map[string][]*v1.Pod),
   548  			maxUnavailable: 0,
   549  			emptyNodes:     0,
   550  		},
   551  		{
   552  			name: "Two nodes with ready pods",
   553  			ManagerFunc: func(ctx context.Context) *daemonSetsController {
   554  				manager, _, _, err := newTestController(ctx)
   555  				if err != nil {
   556  					t.Fatalf("error creating DaemonSets controller: %v", err)
   557  				}
   558  				addNodes(manager.nodeStore, 0, 2, nil)
   559  				return manager
   560  			},
   561  			ds: func() *apps.DaemonSet {
   562  				ds := newDaemonSet("x")
   563  				ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt32(1))
   564  				return ds
   565  			}(),
   566  			nodeToPods: func() map[string][]*v1.Pod {
   567  				mapping := make(map[string][]*v1.Pod)
   568  				pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
   569  				pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
   570  				markPodReady(pod0)
   571  				markPodReady(pod1)
   572  				mapping["node-0"] = []*v1.Pod{pod0}
   573  				mapping["node-1"] = []*v1.Pod{pod1}
   574  				return mapping
   575  			}(),
   576  			maxUnavailable:         1,
   577  			desiredNumberScheduled: 2,
   578  			emptyNodes:             0,
   579  		},
   580  		{
   581  			name: "Two nodes, one node without pods",
   582  			ManagerFunc: func(ctx context.Context) *daemonSetsController {
   583  				manager, _, _, err := newTestController(ctx)
   584  				if err != nil {
   585  					t.Fatalf("error creating DaemonSets controller: %v", err)
   586  				}
   587  				addNodes(manager.nodeStore, 0, 2, nil)
   588  				return manager
   589  			},
   590  			ds: func() *apps.DaemonSet {
   591  				ds := newDaemonSet("x")
   592  				ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt32(0))
   593  				return ds
   594  			}(),
   595  			nodeToPods: func() map[string][]*v1.Pod {
   596  				mapping := make(map[string][]*v1.Pod)
   597  				pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
   598  				markPodReady(pod0)
   599  				mapping["node-0"] = []*v1.Pod{pod0}
   600  				return mapping
   601  			}(),
   602  			maxUnavailable:         1,
   603  			desiredNumberScheduled: 2,
   604  			emptyNodes:             1,
   605  		},
   606  		{
   607  			name: "Two nodes, one node without pods, surge",
   608  			ManagerFunc: func(ctx context.Context) *daemonSetsController {
   609  				manager, _, _, err := newTestController(ctx)
   610  				if err != nil {
   611  					t.Fatalf("error creating DaemonSets controller: %v", err)
   612  				}
   613  				addNodes(manager.nodeStore, 0, 2, nil)
   614  				return manager
   615  			},
   616  			ds: func() *apps.DaemonSet {
   617  				ds := newDaemonSet("x")
   618  				ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(0))
   619  				return ds
   620  			}(),
   621  			nodeToPods: func() map[string][]*v1.Pod {
   622  				mapping := make(map[string][]*v1.Pod)
   623  				pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
   624  				markPodReady(pod0)
   625  				mapping["node-0"] = []*v1.Pod{pod0}
   626  				return mapping
   627  			}(),
   628  			maxUnavailable:         1,
   629  			desiredNumberScheduled: 2,
   630  			emptyNodes:             1,
   631  		},
   632  		{
   633  			name: "Two nodes with pods, MaxUnavailable in percents",
   634  			ManagerFunc: func(ctx context.Context) *daemonSetsController {
   635  				manager, _, _, err := newTestController(ctx)
   636  				if err != nil {
   637  					t.Fatalf("error creating DaemonSets controller: %v", err)
   638  				}
   639  				addNodes(manager.nodeStore, 0, 2, nil)
   640  				return manager
   641  			},
   642  			ds: func() *apps.DaemonSet {
   643  				ds := newDaemonSet("x")
   644  				ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromString("50%"))
   645  				return ds
   646  			}(),
   647  			nodeToPods: func() map[string][]*v1.Pod {
   648  				mapping := make(map[string][]*v1.Pod)
   649  				pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
   650  				pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
   651  				markPodReady(pod0)
   652  				markPodReady(pod1)
   653  				mapping["node-0"] = []*v1.Pod{pod0}
   654  				mapping["node-1"] = []*v1.Pod{pod1}
   655  				return mapping
   656  			}(),
   657  			maxUnavailable:         1,
   658  			desiredNumberScheduled: 2,
   659  			emptyNodes:             0,
   660  		},
   661  		{
   662  			name: "Two nodes with pods, MaxUnavailable in percents, surge",
   663  			ManagerFunc: func(ctx context.Context) *daemonSetsController {
   664  				manager, _, _, err := newTestController(ctx)
   665  				if err != nil {
   666  					t.Fatalf("error creating DaemonSets controller: %v", err)
   667  				}
   668  				addNodes(manager.nodeStore, 0, 2, nil)
   669  				return manager
   670  			},
   671  			ds: func() *apps.DaemonSet {
   672  				ds := newDaemonSet("x")
   673  				ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromString("50%"))
   674  				return ds
   675  			}(),
   676  			nodeToPods: func() map[string][]*v1.Pod {
   677  				mapping := make(map[string][]*v1.Pod)
   678  				pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
   679  				pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
   680  				markPodReady(pod0)
   681  				markPodReady(pod1)
   682  				mapping["node-0"] = []*v1.Pod{pod0}
   683  				mapping["node-1"] = []*v1.Pod{pod1}
   684  				return mapping
   685  			}(),
   686  			maxSurge:               1,
   687  			maxUnavailable:         0,
   688  			desiredNumberScheduled: 2,
   689  			emptyNodes:             0,
   690  		},
   691  		{
   692  			name: "Two nodes with pods, MaxUnavailable is 100%, surge",
   693  			ManagerFunc: func(ctx context.Context) *daemonSetsController {
   694  				manager, _, _, err := newTestController(ctx)
   695  				if err != nil {
   696  					t.Fatalf("error creating DaemonSets controller: %v", err)
   697  				}
   698  				addNodes(manager.nodeStore, 0, 2, nil)
   699  				return manager
   700  			},
   701  			ds: func() *apps.DaemonSet {
   702  				ds := newDaemonSet("x")
   703  				ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromString("100%"))
   704  				return ds
   705  			}(),
   706  			nodeToPods: func() map[string][]*v1.Pod {
   707  				mapping := make(map[string][]*v1.Pod)
   708  				pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
   709  				pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
   710  				markPodReady(pod0)
   711  				markPodReady(pod1)
   712  				mapping["node-0"] = []*v1.Pod{pod0}
   713  				mapping["node-1"] = []*v1.Pod{pod1}
   714  				return mapping
   715  			}(),
   716  			maxSurge:               2,
   717  			maxUnavailable:         0,
   718  			desiredNumberScheduled: 2,
   719  			emptyNodes:             0,
   720  		},
   721  		{
   722  			name: "Two nodes with pods, MaxUnavailable in percents, pod terminating",
   723  			ManagerFunc: func(ctx context.Context) *daemonSetsController {
   724  				manager, _, _, err := newTestController(ctx)
   725  				if err != nil {
   726  					t.Fatalf("error creating DaemonSets controller: %v", err)
   727  				}
   728  				addNodes(manager.nodeStore, 0, 3, nil)
   729  				return manager
   730  			},
   731  			ds: func() *apps.DaemonSet {
   732  				ds := newDaemonSet("x")
   733  				ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromString("50%"))
   734  				return ds
   735  			}(),
   736  			nodeToPods: func() map[string][]*v1.Pod {
   737  				mapping := make(map[string][]*v1.Pod)
   738  				pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
   739  				pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
   740  				now := metav1.Now()
   741  				markPodReady(pod0)
   742  				markPodReady(pod1)
   743  				pod1.DeletionTimestamp = &now
   744  				mapping["node-0"] = []*v1.Pod{pod0}
   745  				mapping["node-1"] = []*v1.Pod{pod1}
   746  				return mapping
   747  			}(),
   748  			maxUnavailable:         2,
   749  			desiredNumberScheduled: 3,
   750  			emptyNodes:             1,
   751  		},
   752  	}
   753  
   754  	for _, c := range cases {
   755  		t.Run(c.name, func(t *testing.T) {
   756  			_, ctx := ktesting.NewTestContext(t)
   757  			manager := c.ManagerFunc(ctx)
   758  			manager.dsStore.Add(c.ds)
   759  			nodeList, err := manager.nodeLister.List(labels.Everything())
   760  			if err != nil {
   761  				t.Fatalf("error listing nodes: %v", err)
   762  			}
   763  			maxSurge, maxUnavailable, desiredNumberScheduled, err := manager.updatedDesiredNodeCounts(ctx, c.ds, nodeList, c.nodeToPods)
   764  			if err != nil && c.Err != nil {
   765  				if c.Err != err {
   766  					t.Fatalf("Expected error: %v but got: %v", c.Err, err)
   767  				}
   768  			}
   769  			if err != nil {
   770  				t.Fatalf("Unexpected error: %v", err)
   771  			}
   772  			if maxSurge != c.maxSurge || maxUnavailable != c.maxUnavailable || desiredNumberScheduled != c.desiredNumberScheduled {
   773  				t.Errorf("Wrong values. maxSurge: %d, expected %d, maxUnavailable: %d, expected: %d, desiredNumberScheduled: %d, expected: %d", maxSurge, c.maxSurge, maxUnavailable, c.maxUnavailable, desiredNumberScheduled, c.desiredNumberScheduled)
   774  			}
   775  			var emptyNodes int
   776  			for _, pods := range c.nodeToPods {
   777  				if len(pods) == 0 {
   778  					emptyNodes++
   779  				}
   780  			}
   781  			if emptyNodes != c.emptyNodes {
   782  				t.Errorf("expected numEmpty to be %d, was %d", c.emptyNodes, emptyNodes)
   783  			}
   784  		})
   785  	}
   786  }
   787  
   788  func TestControlledHistories(t *testing.T) {
   789  	ds1 := newDaemonSet("ds1")
   790  	crOfDs1 := newControllerRevision(ds1.GetName()+"-x1", ds1.GetNamespace(), ds1.Spec.Template.Labels,
   791  		[]metav1.OwnerReference{*metav1.NewControllerRef(ds1, controllerKind)})
   792  	orphanCrInSameNsWithDs1 := newControllerRevision(ds1.GetName()+"-x2", ds1.GetNamespace(), ds1.Spec.Template.Labels, nil)
   793  	orphanCrNotInSameNsWithDs1 := newControllerRevision(ds1.GetName()+"-x3", ds1.GetNamespace()+"-other", ds1.Spec.Template.Labels, nil)
   794  	cases := []struct {
   795  		name                      string
   796  		managerFunc               func(ctx context.Context) *daemonSetsController
   797  		historyCRAll              []*apps.ControllerRevision
   798  		expectControllerRevisions []*apps.ControllerRevision
   799  	}{
   800  		{
   801  			name: "controller revision in the same namespace",
   802  			managerFunc: func(ctx context.Context) *daemonSetsController {
   803  				manager, _, _, err := newTestController(ctx, ds1, crOfDs1, orphanCrInSameNsWithDs1)
   804  				if err != nil {
   805  					t.Fatalf("error creating DaemonSets controller: %v", err)
   806  				}
   807  				manager.dsStore.Add(ds1)
   808  				return manager
   809  			},
   810  			historyCRAll:              []*apps.ControllerRevision{crOfDs1, orphanCrInSameNsWithDs1},
   811  			expectControllerRevisions: []*apps.ControllerRevision{crOfDs1, orphanCrInSameNsWithDs1},
   812  		},
   813  		{
   814  			name: "Skip adopting the controller revision in namespace other than the one in which DS lives",
   815  			managerFunc: func(ctx context.Context) *daemonSetsController {
   816  				manager, _, _, err := newTestController(ctx, ds1, orphanCrNotInSameNsWithDs1)
   817  				if err != nil {
   818  					t.Fatalf("error creating DaemonSets controller: %v", err)
   819  				}
   820  				manager.dsStore.Add(ds1)
   821  				return manager
   822  			},
   823  			historyCRAll:              []*apps.ControllerRevision{orphanCrNotInSameNsWithDs1},
   824  			expectControllerRevisions: []*apps.ControllerRevision{},
   825  		},
   826  	}
   827  	for _, c := range cases {
   828  		_, ctx := ktesting.NewTestContext(t)
   829  		manager := c.managerFunc(ctx)
   830  		for _, h := range c.historyCRAll {
   831  			manager.historyStore.Add(h)
   832  		}
   833  		crList, err := manager.controlledHistories(context.TODO(), ds1)
   834  		if err != nil {
   835  			t.Fatalf("Test case: %s. Unexpected error: %v", c.name, err)
   836  		}
   837  		if len(crList) != len(c.expectControllerRevisions) {
   838  			t.Errorf("Test case: %s, expect controllerrevision count %d but got:%d",
   839  				c.name, len(c.expectControllerRevisions), len(crList))
   840  		} else {
   841  			// check controller revisions match
   842  			for _, cr := range crList {
   843  				found := false
   844  				for _, expectCr := range c.expectControllerRevisions {
   845  					if reflect.DeepEqual(cr, expectCr) {
   846  						found = true
   847  						break
   848  					}
   849  				}
   850  				if !found {
   851  					t.Errorf("Test case: %s, controllerrevision %v not expected",
   852  						c.name, cr)
   853  				}
   854  			}
   855  			t.Logf("Test case: %s done", c.name)
   856  		}
   857  	}
   858  }