github.com/kaisenlinux/docker@v0.0.0-20230510090727-ea55db55fac7/swarmkit/manager/orchestrator/task.go (about)

     1  package orchestrator
     2  
     3  import (
     4  	"reflect"
     5  	"time"
     6  
     7  	"github.com/docker/swarmkit/api"
     8  	"github.com/docker/swarmkit/api/defaults"
     9  	"github.com/docker/swarmkit/identity"
    10  	"github.com/docker/swarmkit/manager/constraint"
    11  	"github.com/docker/swarmkit/protobuf/ptypes"
    12  	google_protobuf "github.com/gogo/protobuf/types"
    13  )
    14  
    15  // NewTask creates a new task.
    16  func NewTask(cluster *api.Cluster, service *api.Service, slot uint64, nodeID string) *api.Task {
    17  	var logDriver *api.Driver
    18  	if service.Spec.Task.LogDriver != nil {
    19  		// use the log driver specific to the task, if we have it.
    20  		logDriver = service.Spec.Task.LogDriver
    21  	} else if cluster != nil {
    22  		// pick up the cluster default, if available.
    23  		logDriver = cluster.Spec.TaskDefaults.LogDriver // nil is okay here.
    24  	}
    25  
    26  	taskID := identity.NewID()
    27  	task := api.Task{
    28  		ID:                 taskID,
    29  		ServiceAnnotations: service.Spec.Annotations,
    30  		Spec:               service.Spec.Task,
    31  		SpecVersion:        service.SpecVersion,
    32  		ServiceID:          service.ID,
    33  		Slot:               slot,
    34  		Status: api.TaskStatus{
    35  			State:     api.TaskStateNew,
    36  			Timestamp: ptypes.MustTimestampProto(time.Now()),
    37  			Message:   "created",
    38  		},
    39  		Endpoint: &api.Endpoint{
    40  			Spec: service.Spec.Endpoint.Copy(),
    41  		},
    42  		DesiredState: api.TaskStateRunning,
    43  		LogDriver:    logDriver,
    44  	}
    45  
    46  	// In global mode we also set the NodeID
    47  	if nodeID != "" {
    48  		task.NodeID = nodeID
    49  	}
    50  
    51  	return &task
    52  }
    53  
    54  // RestartCondition returns the restart condition to apply to this task.
    55  func RestartCondition(task *api.Task) api.RestartPolicy_RestartCondition {
    56  	restartCondition := defaults.Service.Task.Restart.Condition
    57  	if task.Spec.Restart != nil {
    58  		restartCondition = task.Spec.Restart.Condition
    59  	}
    60  	return restartCondition
    61  }
    62  
    63  // IsTaskDirty determines whether a task matches the given service's spec and
    64  // if the given node satisfies the placement constraints.
    65  // Returns false if the spec version didn't change,
    66  // only the task placement constraints changed and the assigned node
    67  // satisfies the new constraints, or the service task spec and the endpoint spec
    68  // didn't change at all.
    69  // Returns true otherwise.
    70  // Note: for non-failed tasks with a container spec runtime that have already
    71  // pulled the required image (i.e., current state is between READY and
    72  // RUNNING inclusively), the value of the `PullOptions` is ignored.
    73  func IsTaskDirty(s *api.Service, t *api.Task, n *api.Node) bool {
    74  	// If the spec version matches, we know the task is not dirty. However,
    75  	// if it does not match, that doesn't mean the task is dirty, since
    76  	// only a portion of the spec is included in the comparison.
    77  	if t.SpecVersion != nil && s.SpecVersion != nil && *s.SpecVersion == *t.SpecVersion {
    78  		return false
    79  	}
    80  
    81  	// Make a deep copy of the service and task spec for the comparison.
    82  	serviceTaskSpec := *s.Spec.Task.Copy()
    83  
    84  	// Task is not dirty if the placement constraints alone changed
    85  	// and the node currently assigned can satisfy the changed constraints.
    86  	if IsTaskDirtyPlacementConstraintsOnly(serviceTaskSpec, t) && nodeMatches(s, n) {
    87  		return false
    88  	}
    89  
    90  	// For non-failed tasks with a container spec runtime that have already
    91  	// pulled the required image (i.e., current state is between READY and
    92  	// RUNNING inclusively), ignore the value of the `PullOptions` field by
    93  	// setting the copied service to have the same PullOptions value as the
    94  	// task. A difference in only the `PullOptions` field should not cause
    95  	// a running (or ready to run) task to be considered 'dirty' when we
    96  	// handle updates.
    97  	// See https://github.com/docker/swarmkit/issues/971
    98  	currentState := t.Status.State
    99  	// Ignore PullOpts if the task is desired to be in a "runnable" state
   100  	// and its last known current state is between READY and RUNNING in
   101  	// which case we know that the task either successfully pulled its
   102  	// container image or didn't need to.
   103  	ignorePullOpts := t.DesiredState <= api.TaskStateRunning &&
   104  		currentState >= api.TaskStateReady &&
   105  		currentState <= api.TaskStateRunning
   106  	if ignorePullOpts && serviceTaskSpec.GetContainer() != nil && t.Spec.GetContainer() != nil {
   107  		// Modify the service's container spec.
   108  		serviceTaskSpec.GetContainer().PullOptions = t.Spec.GetContainer().PullOptions
   109  	}
   110  
   111  	return !reflect.DeepEqual(serviceTaskSpec, t.Spec) ||
   112  		(t.Endpoint != nil && !reflect.DeepEqual(s.Spec.Endpoint, t.Endpoint.Spec))
   113  }
   114  
   115  // Checks if the current assigned node matches the Placement.Constraints
   116  // specified in the task spec for Updater.newService.
   117  func nodeMatches(s *api.Service, n *api.Node) bool {
   118  	if n == nil {
   119  		return false
   120  	}
   121  
   122  	constraints, _ := constraint.Parse(s.Spec.Task.Placement.Constraints)
   123  	return constraint.NodeMatches(constraints, n)
   124  }
   125  
   126  // IsTaskDirtyPlacementConstraintsOnly checks if the Placement field alone
   127  // in the spec has changed.
   128  func IsTaskDirtyPlacementConstraintsOnly(serviceTaskSpec api.TaskSpec, t *api.Task) bool {
   129  	// Compare the task placement constraints.
   130  	if reflect.DeepEqual(serviceTaskSpec.Placement, t.Spec.Placement) {
   131  		return false
   132  	}
   133  
   134  	// Update spec placement to only the fields
   135  	// other than the placement constraints in the spec.
   136  	serviceTaskSpec.Placement = t.Spec.Placement
   137  	return reflect.DeepEqual(serviceTaskSpec, t.Spec)
   138  }
   139  
   140  // InvalidNode is true if the node is nil, down, or drained
   141  func InvalidNode(n *api.Node) bool {
   142  	return n == nil ||
   143  		n.Status.State == api.NodeStatus_DOWN ||
   144  		n.Spec.Availability == api.NodeAvailabilityDrain
   145  }
   146  
   147  func taskTimestamp(t *api.Task) *google_protobuf.Timestamp {
   148  	if t.Status.AppliedAt != nil {
   149  		return t.Status.AppliedAt
   150  	}
   151  
   152  	return t.Status.Timestamp
   153  }
   154  
   155  // TasksByTimestamp sorts tasks by applied timestamp if available, otherwise
   156  // status timestamp.
   157  type TasksByTimestamp []*api.Task
   158  
   159  // Len implements the Len method for sorting.
   160  func (t TasksByTimestamp) Len() int {
   161  	return len(t)
   162  }
   163  
   164  // Swap implements the Swap method for sorting.
   165  func (t TasksByTimestamp) Swap(i, j int) {
   166  	t[i], t[j] = t[j], t[i]
   167  }
   168  
   169  // Less implements the Less method for sorting.
   170  func (t TasksByTimestamp) Less(i, j int) bool {
   171  	iTimestamp := taskTimestamp(t[i])
   172  	jTimestamp := taskTimestamp(t[j])
   173  
   174  	if iTimestamp == nil {
   175  		return true
   176  	}
   177  	if jTimestamp == nil {
   178  		return false
   179  	}
   180  	if iTimestamp.Seconds < jTimestamp.Seconds {
   181  		return true
   182  	}
   183  	if iTimestamp.Seconds > jTimestamp.Seconds {
   184  		return false
   185  	}
   186  	return iTimestamp.Nanos < jTimestamp.Nanos
   187  }