github.com/kaisenlinux/docker@v0.0.0-20230510090727-ea55db55fac7/swarmkit/manager/scheduler/filter.go (about) 1 package scheduler 2 3 import ( 4 "fmt" 5 "strings" 6 7 "github.com/docker/swarmkit/api" 8 "github.com/docker/swarmkit/api/genericresource" 9 "github.com/docker/swarmkit/manager/constraint" 10 ) 11 12 // Filter checks whether the given task can run on the given node. 13 // A filter may only operate 14 type Filter interface { 15 // SetTask returns true when the filter is enabled for a given task 16 // and assigns the task to the filter. It returns false if the filter 17 // isn't applicable to this task. For instance, a constraints filter 18 // would return `false` if the task doesn't contain any constraints. 19 SetTask(*api.Task) bool 20 21 // Check returns true if the task assigned by SetTask can be scheduled 22 // into the given node. This function should not be called if SetTask 23 // returned false. 24 Check(*NodeInfo) bool 25 26 // Explain what a failure of this filter means 27 Explain(nodes int) string 28 } 29 30 // ReadyFilter checks that the node is ready to schedule tasks. 31 type ReadyFilter struct { 32 } 33 34 // SetTask returns true when the filter is enabled for a given task. 35 func (f *ReadyFilter) SetTask(_ *api.Task) bool { 36 return true 37 } 38 39 // Check returns true if the task can be scheduled into the given node. 40 func (f *ReadyFilter) Check(n *NodeInfo) bool { 41 return n.Status.State == api.NodeStatus_READY && 42 n.Spec.Availability == api.NodeAvailabilityActive 43 } 44 45 // Explain returns an explanation of a failure. 46 func (f *ReadyFilter) Explain(nodes int) string { 47 if nodes == 1 { 48 return "1 node not available for new tasks" 49 } 50 return fmt.Sprintf("%d nodes not available for new tasks", nodes) 51 } 52 53 // ResourceFilter checks that the node has enough resources available to run 54 // the task. 55 type ResourceFilter struct { 56 reservations *api.Resources 57 } 58 59 // SetTask returns true when the filter is enabled for a given task. 60 func (f *ResourceFilter) SetTask(t *api.Task) bool { 61 r := t.Spec.Resources 62 if r == nil || r.Reservations == nil { 63 return false 64 } 65 66 res := r.Reservations 67 if res.NanoCPUs == 0 && res.MemoryBytes == 0 && len(res.Generic) == 0 { 68 return false 69 } 70 71 f.reservations = r.Reservations 72 return true 73 } 74 75 // Check returns true if the task can be scheduled into the given node. 76 func (f *ResourceFilter) Check(n *NodeInfo) bool { 77 if f.reservations.NanoCPUs > n.AvailableResources.NanoCPUs { 78 return false 79 } 80 81 if f.reservations.MemoryBytes > n.AvailableResources.MemoryBytes { 82 return false 83 } 84 85 for _, v := range f.reservations.Generic { 86 enough, err := genericresource.HasEnough(n.AvailableResources.Generic, v) 87 if err != nil || !enough { 88 return false 89 } 90 } 91 92 return true 93 } 94 95 // Explain returns an explanation of a failure. 96 func (f *ResourceFilter) Explain(nodes int) string { 97 if nodes == 1 { 98 return "insufficient resources on 1 node" 99 } 100 return fmt.Sprintf("insufficient resources on %d nodes", nodes) 101 } 102 103 // PluginFilter checks that the node has a specific volume plugin installed 104 type PluginFilter struct { 105 t *api.Task 106 } 107 108 func referencesVolumePlugin(mount api.Mount) bool { 109 return mount.Type == api.MountTypeVolume && 110 mount.VolumeOptions != nil && 111 mount.VolumeOptions.DriverConfig != nil && 112 mount.VolumeOptions.DriverConfig.Name != "" && 113 mount.VolumeOptions.DriverConfig.Name != "local" 114 115 } 116 117 // SetTask returns true when the filter is enabled for a given task. 118 func (f *PluginFilter) SetTask(t *api.Task) bool { 119 c := t.Spec.GetContainer() 120 121 var volumeTemplates bool 122 if c != nil { 123 for _, mount := range c.Mounts { 124 if referencesVolumePlugin(mount) { 125 volumeTemplates = true 126 break 127 } 128 } 129 } 130 131 if (c != nil && volumeTemplates) || len(t.Networks) > 0 || t.Spec.LogDriver != nil { 132 f.t = t 133 return true 134 } 135 136 return false 137 } 138 139 // Check returns true if the task can be scheduled into the given node. 140 // TODO(amitshukla): investigate storing Plugins as a map so it can be easily probed 141 func (f *PluginFilter) Check(n *NodeInfo) bool { 142 if n.Description == nil || n.Description.Engine == nil { 143 // If the node is not running Engine, plugins are not 144 // supported. 145 return true 146 } 147 148 // Get list of plugins on the node 149 nodePlugins := n.Description.Engine.Plugins 150 151 // Check if all volume plugins required by task are installed on node 152 container := f.t.Spec.GetContainer() 153 if container != nil { 154 for _, mount := range container.Mounts { 155 if referencesVolumePlugin(mount) { 156 if _, exists := f.pluginExistsOnNode("Volume", mount.VolumeOptions.DriverConfig.Name, nodePlugins); !exists { 157 return false 158 } 159 } 160 } 161 } 162 163 // Check if all network plugins required by task are installed on node 164 for _, tn := range f.t.Networks { 165 if tn.Network != nil && tn.Network.DriverState != nil && tn.Network.DriverState.Name != "" { 166 if _, exists := f.pluginExistsOnNode("Network", tn.Network.DriverState.Name, nodePlugins); !exists { 167 return false 168 } 169 } 170 } 171 172 // It's possible that the LogDriver object does not carry a name, just some 173 // configuration options. In that case, the plugin filter shouldn't fail to 174 // schedule the task 175 if f.t.Spec.LogDriver != nil && f.t.Spec.LogDriver.Name != "none" && f.t.Spec.LogDriver.Name != "" { 176 // If there are no log driver types in the list at all, most likely this is 177 // an older daemon that did not report this information. In this case don't filter 178 if typeFound, exists := f.pluginExistsOnNode("Log", f.t.Spec.LogDriver.Name, nodePlugins); !exists && typeFound { 179 return false 180 } 181 } 182 return true 183 } 184 185 // pluginExistsOnNode returns true if the (pluginName, pluginType) pair is present in nodePlugins 186 func (f *PluginFilter) pluginExistsOnNode(pluginType string, pluginName string, nodePlugins []api.PluginDescription) (bool, bool) { 187 var typeFound bool 188 189 for _, np := range nodePlugins { 190 if pluginType != np.Type { 191 continue 192 } 193 typeFound = true 194 195 if pluginName == np.Name { 196 return true, true 197 } 198 // This does not use the reference package to avoid the 199 // overhead of parsing references as part of the scheduling 200 // loop. This is okay only because plugin names are a very 201 // strict subset of the reference grammar that is always 202 // name:tag. 203 if strings.HasPrefix(np.Name, pluginName) && np.Name[len(pluginName):] == ":latest" { 204 return true, true 205 } 206 } 207 return typeFound, false 208 } 209 210 // Explain returns an explanation of a failure. 211 func (f *PluginFilter) Explain(nodes int) string { 212 if nodes == 1 { 213 return "missing plugin on 1 node" 214 } 215 return fmt.Sprintf("missing plugin on %d nodes", nodes) 216 } 217 218 // ConstraintFilter selects only nodes that match certain labels. 219 type ConstraintFilter struct { 220 constraints []constraint.Constraint 221 } 222 223 // SetTask returns true when the filter is enable for a given task. 224 func (f *ConstraintFilter) SetTask(t *api.Task) bool { 225 if t.Spec.Placement == nil || len(t.Spec.Placement.Constraints) == 0 { 226 return false 227 } 228 229 constraints, err := constraint.Parse(t.Spec.Placement.Constraints) 230 if err != nil { 231 // constraints have been validated at controlapi 232 // if in any case it finds an error here, treat this task 233 // as constraint filter disabled. 234 return false 235 } 236 f.constraints = constraints 237 return true 238 } 239 240 // Check returns true if the task's constraint is supported by the given node. 241 func (f *ConstraintFilter) Check(n *NodeInfo) bool { 242 return constraint.NodeMatches(f.constraints, n.Node) 243 } 244 245 // Explain returns an explanation of a failure. 246 func (f *ConstraintFilter) Explain(nodes int) string { 247 if nodes == 1 { 248 return "scheduling constraints not satisfied on 1 node" 249 } 250 return fmt.Sprintf("scheduling constraints not satisfied on %d nodes", nodes) 251 } 252 253 // PlatformFilter selects only nodes that run the required platform. 254 type PlatformFilter struct { 255 supportedPlatforms []*api.Platform 256 } 257 258 // SetTask returns true when the filter is enabled for a given task. 259 func (f *PlatformFilter) SetTask(t *api.Task) bool { 260 placement := t.Spec.Placement 261 if placement != nil { 262 // copy the platform information 263 f.supportedPlatforms = placement.Platforms 264 if len(placement.Platforms) > 0 { 265 return true 266 } 267 } 268 return false 269 } 270 271 // Check returns true if the task can be scheduled into the given node. 272 func (f *PlatformFilter) Check(n *NodeInfo) bool { 273 // if the supportedPlatforms field is empty, then either it wasn't 274 // provided or there are no constraints 275 if len(f.supportedPlatforms) == 0 { 276 return true 277 } 278 // check if the platform for the node is supported 279 if n.Description != nil { 280 if nodePlatform := n.Description.Platform; nodePlatform != nil { 281 for _, p := range f.supportedPlatforms { 282 if f.platformEqual(*p, *nodePlatform) { 283 return true 284 } 285 } 286 } 287 } 288 return false 289 } 290 291 func (f *PlatformFilter) platformEqual(imgPlatform, nodePlatform api.Platform) bool { 292 // normalize "x86_64" architectures to "amd64" 293 if imgPlatform.Architecture == "x86_64" { 294 imgPlatform.Architecture = "amd64" 295 } 296 if nodePlatform.Architecture == "x86_64" { 297 nodePlatform.Architecture = "amd64" 298 } 299 300 // normalize "aarch64" architectures to "arm64" 301 if imgPlatform.Architecture == "aarch64" { 302 imgPlatform.Architecture = "arm64" 303 } 304 if nodePlatform.Architecture == "aarch64" { 305 nodePlatform.Architecture = "arm64" 306 } 307 308 if (imgPlatform.Architecture == "" || imgPlatform.Architecture == nodePlatform.Architecture) && (imgPlatform.OS == "" || imgPlatform.OS == nodePlatform.OS) { 309 return true 310 } 311 return false 312 } 313 314 // Explain returns an explanation of a failure. 315 func (f *PlatformFilter) Explain(nodes int) string { 316 if nodes == 1 { 317 return "unsupported platform on 1 node" 318 } 319 return fmt.Sprintf("unsupported platform on %d nodes", nodes) 320 } 321 322 // HostPortFilter checks that the node has a specific port available. 323 type HostPortFilter struct { 324 t *api.Task 325 } 326 327 // SetTask returns true when the filter is enabled for a given task. 328 func (f *HostPortFilter) SetTask(t *api.Task) bool { 329 if t.Endpoint != nil { 330 for _, port := range t.Endpoint.Ports { 331 if port.PublishMode == api.PublishModeHost && port.PublishedPort != 0 { 332 f.t = t 333 return true 334 } 335 } 336 } 337 338 return false 339 } 340 341 // Check returns true if the task can be scheduled into the given node. 342 func (f *HostPortFilter) Check(n *NodeInfo) bool { 343 for _, port := range f.t.Endpoint.Ports { 344 if port.PublishMode == api.PublishModeHost && port.PublishedPort != 0 { 345 portSpec := hostPortSpec{protocol: port.Protocol, publishedPort: port.PublishedPort} 346 if _, ok := n.usedHostPorts[portSpec]; ok { 347 return false 348 } 349 } 350 } 351 352 return true 353 } 354 355 // Explain returns an explanation of a failure. 356 func (f *HostPortFilter) Explain(nodes int) string { 357 if nodes == 1 { 358 return "host-mode port already in use on 1 node" 359 } 360 return fmt.Sprintf("host-mode port already in use on %d nodes", nodes) 361 } 362 363 // MaxReplicasFilter selects only nodes that does not exceed max replicas per node. 364 type MaxReplicasFilter struct { 365 t *api.Task 366 } 367 368 // SetTask returns true when max replicas per node filter > 0 for a given task. 369 func (f *MaxReplicasFilter) SetTask(t *api.Task) bool { 370 if t.Spec.Placement != nil && t.Spec.Placement.MaxReplicas > 0 { 371 f.t = t 372 return true 373 } 374 375 return false 376 } 377 378 // Check returns true if there is less active (assigned or pre-assigned) tasks for this service on current node than set to MaxReplicas limit 379 func (f *MaxReplicasFilter) Check(n *NodeInfo) bool { 380 return uint64(n.ActiveTasksCountByService[f.t.ServiceID]) < f.t.Spec.Placement.MaxReplicas 381 } 382 383 // Explain returns an explanation of a failure. 384 func (f *MaxReplicasFilter) Explain(nodes int) string { 385 return "max replicas per node limit exceed" 386 }