github.com/kaisenlinux/docker.io@v0.0.0-20230510090727-ea55db55fac7/swarmkit/manager/orchestrator/constraintenforcer/constraint_enforcer_test.go (about)

     1  package constraintenforcer
     2  
     3  import (
     4  	"testing"
     5  
     6  	"github.com/docker/swarmkit/api"
     7  	"github.com/docker/swarmkit/manager/orchestrator/testutils"
     8  	"github.com/docker/swarmkit/manager/state"
     9  	"github.com/docker/swarmkit/manager/state/store"
    10  	"github.com/stretchr/testify/assert"
    11  	"github.com/stretchr/testify/require"
    12  )
    13  
    14  func TestConstraintEnforcer(t *testing.T) {
    15  	nodes := []*api.Node{
    16  		// this node starts as a worker, but then is changed to a manager.
    17  		{
    18  			ID: "id1",
    19  			Spec: api.NodeSpec{
    20  				Annotations: api.Annotations{
    21  					Name: "name1",
    22  				},
    23  				Availability: api.NodeAvailabilityActive,
    24  			},
    25  			Status: api.NodeStatus{
    26  				State: api.NodeStatus_READY,
    27  			},
    28  			Role: api.NodeRoleWorker,
    29  		},
    30  		{
    31  			ID: "id2",
    32  			Spec: api.NodeSpec{
    33  				Annotations: api.Annotations{
    34  					Name: "name2",
    35  				},
    36  				Availability: api.NodeAvailabilityActive,
    37  			},
    38  			Status: api.NodeStatus{
    39  				State: api.NodeStatus_READY,
    40  			},
    41  			Description: &api.NodeDescription{
    42  				Resources: &api.Resources{
    43  					NanoCPUs:    1e9,
    44  					MemoryBytes: 1e9,
    45  				},
    46  			},
    47  		},
    48  	}
    49  
    50  	tasks := []*api.Task{
    51  		// This task should not run, because id1 is a worker
    52  		{
    53  			ID:           "id0",
    54  			DesiredState: api.TaskStateRunning,
    55  			Spec: api.TaskSpec{
    56  				Placement: &api.Placement{
    57  					Constraints: []string{"node.role == manager"},
    58  				},
    59  			},
    60  			Status: api.TaskStatus{
    61  				State: api.TaskStateNew,
    62  			},
    63  			NodeID: "id1",
    64  		},
    65  		// this task should run without question
    66  		{
    67  			ID:           "id1",
    68  			DesiredState: api.TaskStateRunning,
    69  			Status: api.TaskStatus{
    70  				State: api.TaskStateNew,
    71  			},
    72  			NodeID: "id1",
    73  		},
    74  		// this task, which might belong to a job, should run.
    75  		{
    76  			ID:           "id5",
    77  			DesiredState: api.TaskStateCompleted,
    78  			Status: api.TaskStatus{
    79  				State: api.TaskStateNew,
    80  			},
    81  			NodeID: "id1",
    82  		},
    83  		// this task should run fine and not shut down at first, because node
    84  		// id1 is correctly a worker. but when the node is updated to be a
    85  		// manager, it should be rejected
    86  		{
    87  			ID:           "id2",
    88  			DesiredState: api.TaskStateRunning,
    89  			Spec: api.TaskSpec{
    90  				Placement: &api.Placement{
    91  					Constraints: []string{"node.role == worker"},
    92  				},
    93  			},
    94  			Status: api.TaskStatus{
    95  				State: api.TaskStateRunning,
    96  			},
    97  			NodeID: "id1",
    98  		},
    99  		{
   100  			ID:           "id3",
   101  			DesiredState: api.TaskStateNew,
   102  			Status: api.TaskStatus{
   103  				State: api.TaskStateNew,
   104  			},
   105  			NodeID: "id2",
   106  		},
   107  		{
   108  			ID:           "id4",
   109  			DesiredState: api.TaskStateReady,
   110  			Spec: api.TaskSpec{
   111  				Resources: &api.ResourceRequirements{
   112  					Reservations: &api.Resources{
   113  						MemoryBytes: 9e8,
   114  					},
   115  				},
   116  			},
   117  			Status: api.TaskStatus{
   118  				State: api.TaskStatePending,
   119  			},
   120  			NodeID: "id2",
   121  		},
   122  	}
   123  
   124  	s := store.NewMemoryStore(nil)
   125  	assert.NotNil(t, s)
   126  	defer s.Close()
   127  
   128  	err := s.Update(func(tx store.Tx) error {
   129  		// Prepoulate nodes
   130  		for _, n := range nodes {
   131  			assert.NoError(t, store.CreateNode(tx, n))
   132  		}
   133  
   134  		// Prepopulate tasks
   135  		for _, task := range tasks {
   136  			assert.NoError(t, store.CreateTask(tx, task))
   137  		}
   138  		return nil
   139  	})
   140  	assert.NoError(t, err)
   141  
   142  	watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{})
   143  	defer cancel()
   144  
   145  	constraintEnforcer := New(s)
   146  	defer constraintEnforcer.Stop()
   147  
   148  	go constraintEnforcer.Run()
   149  
   150  	// id0 should be rejected immediately
   151  	shutdown1 := testutils.WatchTaskUpdate(t, watch)
   152  	assert.Equal(t, "id0", shutdown1.ID)
   153  	assert.Equal(t, api.TaskStateRejected, shutdown1.Status.State)
   154  
   155  	// Change node id1 to a manager
   156  	err = s.Update(func(tx store.Tx) error {
   157  		node := store.GetNode(tx, "id1")
   158  		if node == nil {
   159  			t.Fatal("could not get node id1")
   160  		}
   161  		node.Role = api.NodeRoleManager
   162  		assert.NoError(t, store.UpdateNode(tx, node))
   163  		return nil
   164  	})
   165  	assert.NoError(t, err)
   166  
   167  	// since we've changed the node from a worker to a manager, this task
   168  	// should now shut down
   169  	shutdown2 := testutils.WatchTaskUpdate(t, watch)
   170  	assert.Equal(t, "id2", shutdown2.ID)
   171  	assert.Equal(t, api.TaskStateRejected, shutdown2.Status.State)
   172  
   173  	// Change resources on node id2
   174  	err = s.Update(func(tx store.Tx) error {
   175  		node := store.GetNode(tx, "id2")
   176  		if node == nil {
   177  			t.Fatal("could not get node id2")
   178  		}
   179  		node.Description.Resources.MemoryBytes = 5e8
   180  		assert.NoError(t, store.UpdateNode(tx, node))
   181  		return nil
   182  	})
   183  	assert.NoError(t, err)
   184  
   185  	shutdown3 := testutils.WatchTaskUpdate(t, watch)
   186  	assert.Equal(t, "id4", shutdown3.ID)
   187  	assert.Equal(t, api.TaskStateRejected, shutdown3.Status.State)
   188  }
   189  
   190  // TestOutdatedPlacementConstraints tests the following scenario: If a task is
   191  // associacted with a service then we must use the constraints from the current
   192  // service spec rather than the constraints from the task spec because they may
   193  // be outdated. This will happen if the service was previously updated in a way
   194  // which only changes the placement constraints and the node matched the
   195  // placement constraints both before and after that update. In the case of such
   196  // updates, the tasks are not considered "dirty" and are not restarted but it
   197  // will mean that the task spec's placement constraints are outdated. Consider
   198  // this example:
   199  // - A service is created with no constraints and a task is scheduled
   200  //   to a node.
   201  // - The node is updated to add a label, this doesn't affect the task
   202  //   on that node because it has no constraints.
   203  // - The service is updated to add a node label constraint which
   204  //   matches the label which was just added to the node. The updater
   205  //   does not shut down the task because the only the constraints have
   206  //   changed and the node still matches the updated constraints.
   207  // This test initializes a new in-memory store with the expected state from
   208  // above, starts a new constraint enforcer, and then updates the node to remove
   209  // the node label. Since the node no longer satisfies the placement constraints
   210  // of the service spec, the task should be shutdown despite the fact that the
   211  // task's own spec still has the original placement constraints.
   212  func TestOutdatedTaskPlacementConstraints(t *testing.T) {
   213  	node := &api.Node{
   214  		ID: "id0",
   215  		Spec: api.NodeSpec{
   216  			Annotations: api.Annotations{
   217  				Name: "node1",
   218  				Labels: map[string]string{
   219  					"foo": "bar",
   220  				},
   221  			},
   222  			Availability: api.NodeAvailabilityActive,
   223  		},
   224  		Status: api.NodeStatus{
   225  			State: api.NodeStatus_READY,
   226  		},
   227  		Role: api.NodeRoleWorker,
   228  	}
   229  
   230  	service := &api.Service{
   231  		ID: "id1",
   232  		Spec: api.ServiceSpec{
   233  			Annotations: api.Annotations{
   234  				Name: "service1",
   235  			},
   236  			Task: api.TaskSpec{
   237  				Placement: &api.Placement{
   238  					Constraints: []string{
   239  						"node.labels.foo == bar",
   240  					},
   241  				},
   242  			},
   243  		},
   244  	}
   245  
   246  	task := &api.Task{
   247  		ID: "id2",
   248  		Spec: api.TaskSpec{
   249  			Placement: nil, // Note: No placement constraints.
   250  		},
   251  		ServiceID: service.ID,
   252  		NodeID:    node.ID,
   253  		Status: api.TaskStatus{
   254  			State: api.TaskStateRunning,
   255  		},
   256  		DesiredState: api.TaskStateRunning,
   257  	}
   258  
   259  	s := store.NewMemoryStore(nil)
   260  	require.NotNil(t, s)
   261  	defer s.Close()
   262  
   263  	require.NoError(t, s.Update(func(tx store.Tx) error {
   264  		// Prepoulate node, service, and task.
   265  		for _, err := range []error{
   266  			store.CreateNode(tx, node),
   267  			store.CreateService(tx, service),
   268  			store.CreateTask(tx, task),
   269  		} {
   270  			if err != nil {
   271  				return err
   272  			}
   273  		}
   274  		return nil
   275  	}))
   276  
   277  	watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{})
   278  	defer cancel()
   279  
   280  	constraintEnforcer := New(s)
   281  	defer constraintEnforcer.Stop()
   282  
   283  	go constraintEnforcer.Run()
   284  
   285  	// Update the node to remove the node label.
   286  	require.NoError(t, s.Update(func(tx store.Tx) error {
   287  		node = store.GetNode(tx, node.ID)
   288  		delete(node.Spec.Annotations.Labels, "foo")
   289  		return store.UpdateNode(tx, node)
   290  	}))
   291  
   292  	// The task should be rejected immediately.
   293  	task = testutils.WatchTaskUpdate(t, watch)
   294  	assert.Equal(t, api.TaskStateRejected, task.Status.State)
   295  }