github.com/kaisenlinux/docker.io@v0.0.0-20230510090727-ea55db55fac7/swarmkit/manager/orchestrator/replicated/slot.go (about)

     1  package replicated
     2  
     3  import (
     4  	"context"
     5  
     6  	"github.com/docker/swarmkit/api"
     7  	"github.com/docker/swarmkit/manager/orchestrator"
     8  	"github.com/docker/swarmkit/manager/state/store"
     9  )
    10  
    11  type slotsByRunningState []orchestrator.Slot
    12  
    13  func (is slotsByRunningState) Len() int      { return len(is) }
    14  func (is slotsByRunningState) Swap(i, j int) { is[i], is[j] = is[j], is[i] }
    15  
    16  // Less returns true if the first task should be preferred over the second task,
    17  // all other things being equal in terms of node balance.
    18  func (is slotsByRunningState) Less(i, j int) bool {
    19  	iRunning := false
    20  	jRunning := false
    21  
    22  	for _, ii := range is[i] {
    23  		if ii.Status.State == api.TaskStateRunning {
    24  			iRunning = true
    25  			break
    26  		}
    27  	}
    28  	for _, ij := range is[j] {
    29  		if ij.Status.State == api.TaskStateRunning {
    30  			jRunning = true
    31  			break
    32  		}
    33  	}
    34  
    35  	if iRunning && !jRunning {
    36  		return true
    37  	}
    38  
    39  	if !iRunning && jRunning {
    40  		return false
    41  	}
    42  
    43  	// Use Slot number as a tie-breaker to prefer to remove tasks in reverse
    44  	// order of Slot number. This would help us avoid unnecessary master
    45  	// migration when scaling down a stateful service because the master
    46  	// task of a stateful service is usually in a low numbered Slot.
    47  	return is[i][0].Slot < is[j][0].Slot
    48  }
    49  
    50  type slotWithIndex struct {
    51  	slot orchestrator.Slot
    52  
    53  	// index is a counter that counts this task as the nth instance of
    54  	// the service on its node. This is used for sorting the tasks so that
    55  	// when scaling down we leave tasks more evenly balanced.
    56  	index int
    57  }
    58  
    59  type slotsByIndex []slotWithIndex
    60  
    61  func (is slotsByIndex) Len() int      { return len(is) }
    62  func (is slotsByIndex) Swap(i, j int) { is[i], is[j] = is[j], is[i] }
    63  
    64  func (is slotsByIndex) Less(i, j int) bool {
    65  	if is[i].index < 0 && is[j].index >= 0 {
    66  		return false
    67  	}
    68  	if is[j].index < 0 && is[i].index >= 0 {
    69  		return true
    70  	}
    71  	return is[i].index < is[j].index
    72  }
    73  
    74  // updatableAndDeadSlots returns two maps of slots. The first contains slots
    75  // that have at least one task with a desired state above NEW and lesser or
    76  // equal to RUNNING, or a task that shouldn't be restarted. The second contains
    77  // all other slots with at least one task.
    78  func (r *Orchestrator) updatableAndDeadSlots(ctx context.Context, service *api.Service) (map[uint64]orchestrator.Slot, map[uint64]orchestrator.Slot, error) {
    79  	var (
    80  		tasks []*api.Task
    81  		err   error
    82  	)
    83  	r.store.View(func(tx store.ReadTx) {
    84  		tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID))
    85  	})
    86  	if err != nil {
    87  		return nil, nil, err
    88  	}
    89  
    90  	updatableSlots := make(map[uint64]orchestrator.Slot)
    91  	for _, t := range tasks {
    92  		updatableSlots[t.Slot] = append(updatableSlots[t.Slot], t)
    93  	}
    94  
    95  	deadSlots := make(map[uint64]orchestrator.Slot)
    96  	for slotID, slot := range updatableSlots {
    97  		updatable := r.restarts.UpdatableTasksInSlot(ctx, slot, service)
    98  		if len(updatable) != 0 {
    99  			updatableSlots[slotID] = updatable
   100  		} else {
   101  			delete(updatableSlots, slotID)
   102  			deadSlots[slotID] = slot
   103  		}
   104  	}
   105  
   106  	return updatableSlots, deadSlots, nil
   107  }
   108  
   109  // SlotTuple returns a slot tuple for the replicated service task.
   110  func (r *Orchestrator) SlotTuple(t *api.Task) orchestrator.SlotTuple {
   111  	return orchestrator.SlotTuple{
   112  		ServiceID: t.ServiceID,
   113  		Slot:      t.Slot,
   114  	}
   115  }