github.com/projecteru2/core@v0.0.0-20240321043226-06bcc1c23f58/resource/plugins/cpumem/schedule/schedule.go (about)

     1  package schedule
     2  
     3  import (
     4  	"container/heap"
     5  	"sort"
     6  
     7  	"github.com/projecteru2/core/resource/plugins/cpumem/types"
     8  	"github.com/projecteru2/core/utils"
     9  )
    10  
    11  type cpuCore struct {
    12  	ID     string
    13  	pieces int
    14  }
    15  
    16  func (c cpuCore) Less(c1 *cpuCore) bool {
    17  	if c.pieces == c1.pieces {
    18  		return c.ID < c1.ID
    19  	}
    20  	return c.pieces < c1.pieces
    21  }
    22  
    23  type cpuCoreHeap []*cpuCore
    24  
    25  // Len .
    26  func (c cpuCoreHeap) Len() int {
    27  	return len(c)
    28  }
    29  
    30  // Less .
    31  func (c cpuCoreHeap) Less(i, j int) bool {
    32  	return !c[i].Less(c[j])
    33  }
    34  
    35  // Swap .
    36  func (c cpuCoreHeap) Swap(i, j int) {
    37  	c[i], c[j] = c[j], c[i]
    38  }
    39  
    40  // Push .
    41  func (c *cpuCoreHeap) Push(x any) {
    42  	*c = append(*c, x.(*cpuCore))
    43  }
    44  
    45  // Pop .
    46  func (c *cpuCoreHeap) Pop() any {
    47  	old := *c
    48  	n := len(old)
    49  	x := old[n-1]
    50  	*c = old[:n-1]
    51  	return x
    52  }
    53  
    54  type host struct {
    55  	shareBase        int
    56  	maxFragmentCores int
    57  	fullCores        []*cpuCore
    58  	fragmentCores    []*cpuCore
    59  	affinity         bool
    60  }
    61  
    62  // GetCPUPlans .
    63  func GetCPUPlans(resourceInfo *types.NodeResourceInfo, originCPUMap types.CPUMap, shareBase int, maxFragmentCores int, req *types.WorkloadResourceRequest) []*types.CPUPlan {
    64  	cpuPlans := []*types.CPUPlan{}
    65  	availableResource := resourceInfo.GetAvailableResource()
    66  
    67  	numaCPUMap := map[string]types.CPUMap{}
    68  	for cpuID, numaNodeID := range resourceInfo.Capacity.NUMA {
    69  		if _, ok := numaCPUMap[numaNodeID]; !ok {
    70  			numaCPUMap[numaNodeID] = types.CPUMap{}
    71  		}
    72  		numaCPUMap[numaNodeID][cpuID] = availableResource.CPUMap[cpuID]
    73  	}
    74  
    75  	// get cpu plan for each numa node
    76  	for numaNodeID, cpuMap := range numaCPUMap {
    77  		numaCPUPlans := doGetCPUPlans(originCPUMap, cpuMap, availableResource.NUMAMemory[numaNodeID], shareBase, maxFragmentCores, req.CPURequest, req.MemRequest)
    78  		for _, workloadCPUMap := range numaCPUPlans {
    79  			cpuPlans = append(cpuPlans, &types.CPUPlan{
    80  				NUMANode: numaNodeID,
    81  				CPUMap:   workloadCPUMap,
    82  			})
    83  			availableResource.Sub(&types.NodeResource{
    84  				CPU:        req.CPURequest,
    85  				CPUMap:     workloadCPUMap,
    86  				Memory:     req.MemRequest,
    87  				NUMAMemory: types.NUMAMemory{numaNodeID: req.MemRequest},
    88  			})
    89  		}
    90  	}
    91  
    92  	// get cpu plan with the remaining resource
    93  	crossNUMACPUPlans := doGetCPUPlans(originCPUMap, availableResource.CPUMap, availableResource.Memory, shareBase, maxFragmentCores, req.CPURequest, req.MemRequest)
    94  	for _, workloadCPUMap := range crossNUMACPUPlans {
    95  		cpuPlans = append(cpuPlans, &types.CPUPlan{
    96  			CPUMap: workloadCPUMap,
    97  		})
    98  	}
    99  
   100  	return cpuPlans
   101  }
   102  
   103  // ensure that the old cpu core will still be allocated first
   104  func reorderByAffinity(oldH, newH *host) {
   105  	oldFull := map[string]int{}
   106  	oldFragment := map[string]int{}
   107  
   108  	for i, core := range oldH.fullCores {
   109  		oldFull[core.ID] = i + 1
   110  	}
   111  	for i, core := range oldH.fragmentCores {
   112  		oldFragment[core.ID] = i + 1
   113  	}
   114  
   115  	sortFunc := func(orderMap map[string]int, cores []*cpuCore) func(i, j int) bool {
   116  		return func(i, j int) bool {
   117  			idxI := orderMap[cores[i].ID]
   118  			idxJ := orderMap[cores[j].ID]
   119  
   120  			if idxI == 0 && idxJ == 0 {
   121  				return i < j
   122  			}
   123  			if idxI == 0 || idxJ == 0 {
   124  				return idxI > idxJ
   125  			}
   126  			return idxI < idxJ
   127  		}
   128  	}
   129  
   130  	sort.SliceStable(newH.fullCores, sortFunc(oldFull, newH.fullCores))
   131  	sort.SliceStable(newH.fragmentCores, sortFunc(oldFragment, newH.fragmentCores))
   132  	newH.affinity = true
   133  }
   134  
   135  // doGetCPUPlans .
   136  func doGetCPUPlans(originCPUMap, availableCPUMap types.CPUMap, availableMemory int64, shareBase int, maxFragmentCores int, cpuRequest float64, memoryRequest int64) []types.CPUMap {
   137  	h := newHost(availableCPUMap, shareBase, maxFragmentCores)
   138  
   139  	// affinity
   140  	if len(originCPUMap) > 0 {
   141  		originH := newHost(originCPUMap, shareBase, maxFragmentCores)
   142  		reorderByAffinity(originH, h)
   143  	}
   144  
   145  	cpuPlans := h.getCPUPlans(cpuRequest)
   146  	if memoryRequest > 0 {
   147  		memoryCapacity := int(availableMemory / memoryRequest)
   148  		if memoryCapacity < len(cpuPlans) {
   149  			cpuPlans = cpuPlans[:memoryCapacity]
   150  		}
   151  	}
   152  	return cpuPlans
   153  }
   154  
   155  func newHost(cpuMap types.CPUMap, shareBase int, maxFragmentCores int) *host {
   156  	h := &host{
   157  		shareBase:        shareBase,
   158  		maxFragmentCores: maxFragmentCores,
   159  		fullCores:        []*cpuCore{},
   160  		fragmentCores:    []*cpuCore{},
   161  	}
   162  
   163  	for cpu, pieces := range cpuMap {
   164  		if pieces >= shareBase && pieces%shareBase == 0 {
   165  			h.fullCores = append(h.fullCores, &cpuCore{ID: cpu, pieces: pieces})
   166  		} else if pieces > 0 {
   167  			h.fragmentCores = append(h.fragmentCores, &cpuCore{ID: cpu, pieces: pieces})
   168  		}
   169  	}
   170  
   171  	sortFunc := func(cores []*cpuCore) func(i, j int) bool {
   172  		return func(i, j int) bool {
   173  			// give priority to the CPU cores with higher load
   174  			return cores[i].Less(cores[j])
   175  		}
   176  	}
   177  
   178  	sort.SliceStable(h.fullCores, sortFunc(h.fullCores))
   179  	sort.SliceStable(h.fragmentCores, sortFunc(h.fragmentCores))
   180  
   181  	return h
   182  }
   183  
   184  func (h *host) getCPUPlans(cpuRequest float64) []types.CPUMap {
   185  	piecesRequest := int(cpuRequest * float64(h.shareBase))
   186  	full := piecesRequest / h.shareBase
   187  	fragment := piecesRequest % h.shareBase
   188  
   189  	maxFragmentCores := len(h.fullCores) + len(h.fragmentCores) - full
   190  	if h.maxFragmentCores == -1 || h.maxFragmentCores > maxFragmentCores {
   191  		h.maxFragmentCores = maxFragmentCores
   192  	}
   193  
   194  	if fragment == 0 {
   195  		return h.getFullCPUPlans(h.fullCores, full)
   196  	}
   197  
   198  	if full == 0 {
   199  		diff := h.maxFragmentCores - len(h.fragmentCores)
   200  		h.fragmentCores = append(h.fragmentCores, h.fullCores[:diff]...)
   201  		h.fullCores = h.fullCores[diff:]
   202  		return h.getFragmentCPUPlans(h.fragmentCores, fragment)
   203  	}
   204  
   205  	fragmentCapacityMap := map[string]int{}
   206  	totalFragmentCapacity := 0 // for lazy loading
   207  	bestCPUPlans := [2][]types.CPUMap{h.getFullCPUPlans(h.fullCores, full), h.getFragmentCPUPlans(h.fragmentCores, fragment)}
   208  	bestCapacity := utils.Min(len(bestCPUPlans[0]), len(bestCPUPlans[1]))
   209  
   210  	for _, core := range h.fullCores {
   211  		fragmentCapacityMap[core.ID] = core.pieces / fragment
   212  	}
   213  
   214  	for _, core := range h.fragmentCores {
   215  		fragmentCapacityMap[core.ID] = core.pieces / fragment
   216  		totalFragmentCapacity += fragmentCapacityMap[core.ID]
   217  	}
   218  
   219  	for len(h.fragmentCores) < h.maxFragmentCores {
   220  		// convert a full core to fragment core
   221  		newFragmentCore := h.fullCores[0]
   222  		h.fragmentCores = append(h.fragmentCores, newFragmentCore)
   223  		h.fullCores = h.fullCores[1:]
   224  		totalFragmentCapacity += fragmentCapacityMap[newFragmentCore.ID]
   225  
   226  		fullCPUPlans := h.getFullCPUPlans(h.fullCores, full)
   227  		capacity := utils.Min(len(fullCPUPlans), totalFragmentCapacity)
   228  		if capacity > bestCapacity {
   229  			bestCPUPlans[0] = fullCPUPlans
   230  			bestCPUPlans[1] = h.getFragmentCPUPlans(h.fragmentCores, fragment)
   231  			bestCapacity = capacity
   232  		}
   233  	}
   234  
   235  	cpuPlans := []types.CPUMap{}
   236  	for i := 0; i < bestCapacity; i++ {
   237  		fullCPUPlans := bestCPUPlans[0]
   238  		fragmentCPUPlans := bestCPUPlans[1]
   239  
   240  		cpuMap := types.CPUMap{}
   241  		cpuMap.Add(fullCPUPlans[i])
   242  		cpuMap.Add(fragmentCPUPlans[i])
   243  
   244  		cpuPlans = append(cpuPlans, cpuMap)
   245  	}
   246  
   247  	return cpuPlans
   248  }
   249  
   250  func (h *host) getFullCPUPlans(cores []*cpuCore, full int) []types.CPUMap {
   251  	if h.affinity {
   252  		return h.getFullCPUPlansWithAffinity(cores, full)
   253  	}
   254  
   255  	result := []types.CPUMap{}
   256  	cpuHeap := &cpuCoreHeap{}
   257  	indexMap := map[string]int{}
   258  	for i, core := range cores {
   259  		indexMap[core.ID] = i
   260  		cpuHeap.Push(&cpuCore{ID: core.ID, pieces: core.pieces})
   261  	}
   262  	heap.Init(cpuHeap)
   263  
   264  	for cpuHeap.Len() >= full {
   265  		plan := types.CPUMap{}
   266  		resourcesToPush := []*cpuCore{}
   267  
   268  		for i := 0; i < full; i++ {
   269  			core := heap.Pop(cpuHeap).(*cpuCore)
   270  			plan[core.ID] = h.shareBase
   271  
   272  			core.pieces -= h.shareBase
   273  			if core.pieces > 0 {
   274  				resourcesToPush = append(resourcesToPush, core)
   275  			}
   276  		}
   277  
   278  		result = append(result, plan)
   279  		for _, core := range resourcesToPush {
   280  			heap.Push(cpuHeap, core)
   281  		}
   282  	}
   283  
   284  	// Try to ensure the effectiveness of the previous priority
   285  	sumOfIDs := func(c types.CPUMap) int {
   286  		sum := 0
   287  		for ID := range c {
   288  			sum += indexMap[ID]
   289  		}
   290  		return sum
   291  	}
   292  
   293  	sort.Slice(result, func(i, j int) bool { return sumOfIDs(result[i]) < sumOfIDs(result[j]) })
   294  
   295  	return result
   296  }
   297  
   298  func (h *host) getFullCPUPlansWithAffinity(cores []*cpuCore, full int) []types.CPUMap {
   299  	result := []types.CPUMap{}
   300  
   301  	for len(cores) >= full {
   302  		count := len(cores) / full
   303  		tempCores := []*cpuCore{}
   304  		for i := 0; i < count; i++ {
   305  			cpuMap := types.CPUMap{}
   306  			for j := i * full; j < i*full+full; j++ {
   307  				cpuMap[cores[j].ID] = h.shareBase
   308  
   309  				remainingPieces := cores[j].pieces - h.shareBase
   310  				if remainingPieces > 0 {
   311  					tempCores = append(tempCores, &cpuCore{ID: cores[j].ID, pieces: remainingPieces})
   312  				}
   313  			}
   314  			result = append(result, cpuMap)
   315  		}
   316  
   317  		cores = append(tempCores, cores[len(cores)/full*full:]...)
   318  	}
   319  
   320  	return result
   321  }
   322  
   323  func (h *host) getFragmentCPUPlans(cores []*cpuCore, fragment int) []types.CPUMap {
   324  	result := []types.CPUMap{}
   325  	for _, core := range cores {
   326  		for i := 0; i < core.pieces/fragment; i++ {
   327  			result = append(result, types.CPUMap{core.ID: fragment})
   328  		}
   329  	}
   330  	return result
   331  }