github.com/vmware/govmomi@v0.37.2/simulator/cluster_compute_resource.go (about) 1 /* 2 Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package simulator 18 19 import ( 20 "log" 21 "math/rand" 22 "sync/atomic" 23 "time" 24 25 "github.com/google/uuid" 26 27 "github.com/vmware/govmomi/simulator/esx" 28 "github.com/vmware/govmomi/vim25/methods" 29 "github.com/vmware/govmomi/vim25/mo" 30 "github.com/vmware/govmomi/vim25/soap" 31 "github.com/vmware/govmomi/vim25/types" 32 ) 33 34 type ClusterComputeResource struct { 35 mo.ClusterComputeResource 36 37 ruleKey int32 38 } 39 40 func (c *ClusterComputeResource) RenameTask(ctx *Context, req *types.Rename_Task) soap.HasFault { 41 return RenameTask(ctx, c, req) 42 } 43 44 type addHost struct { 45 *ClusterComputeResource 46 47 req *types.AddHost_Task 48 } 49 50 func (add *addHost) Run(task *Task) (types.AnyType, types.BaseMethodFault) { 51 spec := add.req.Spec 52 53 if spec.HostName == "" { 54 return nil, &types.NoHost{} 55 } 56 57 cr := add.ClusterComputeResource 58 template := esx.HostSystem 59 60 if h := task.ctx.Map.FindByName(spec.UserName, cr.Host); h != nil { 61 // "clone" an existing host from the inventory 62 template = h.(*HostSystem).HostSystem 63 template.Vm = nil 64 } else { 65 template.Network = cr.Network[:1] // VM Network 66 } 67 68 host := NewHostSystem(template) 69 host.configure(task.ctx, spec, add.req.AsConnected) 70 71 task.ctx.Map.PutEntity(cr, task.ctx.Map.NewEntity(host)) 72 host.Summary.Host = &host.Self 73 host.Config.Host = host.Self 74 75 task.ctx.Map.WithLock(task.ctx, *cr.EnvironmentBrowser, func() { 76 eb := task.ctx.Map.Get(*cr.EnvironmentBrowser).(*EnvironmentBrowser) 77 eb.addHost(task.ctx, host.Self) 78 }) 79 80 cr.Host = append(cr.Host, host.Reference()) 81 addComputeResource(cr.Summary.GetComputeResourceSummary(), host) 82 83 return host.Reference(), nil 84 } 85 86 func (c *ClusterComputeResource) AddHostTask(ctx *Context, add *types.AddHost_Task) soap.HasFault { 87 return &methods.AddHost_TaskBody{ 88 Res: &types.AddHost_TaskResponse{ 89 Returnval: NewTask(&addHost{c, add}).Run(ctx), 90 }, 91 } 92 } 93 94 func (c *ClusterComputeResource) update(cfg *types.ClusterConfigInfoEx, cspec *types.ClusterConfigSpecEx) types.BaseMethodFault { 95 if cspec.DasConfig != nil { 96 if val := cspec.DasConfig.Enabled; val != nil { 97 cfg.DasConfig.Enabled = val 98 } 99 if val := cspec.DasConfig.AdmissionControlEnabled; val != nil { 100 cfg.DasConfig.AdmissionControlEnabled = val 101 } 102 } 103 if cspec.DrsConfig != nil { 104 if val := cspec.DrsConfig.Enabled; val != nil { 105 cfg.DrsConfig.Enabled = val 106 } 107 } 108 109 return nil 110 } 111 112 func (c *ClusterComputeResource) updateRules(cfg *types.ClusterConfigInfoEx, cspec *types.ClusterConfigSpecEx) types.BaseMethodFault { 113 for _, spec := range cspec.RulesSpec { 114 var i int 115 exists := false 116 117 match := func(info types.BaseClusterRuleInfo) bool { 118 return info.GetClusterRuleInfo().Name == spec.Info.GetClusterRuleInfo().Name 119 } 120 121 if spec.Operation == types.ArrayUpdateOperationRemove { 122 match = func(rule types.BaseClusterRuleInfo) bool { 123 return rule.GetClusterRuleInfo().Key == spec.ArrayUpdateSpec.RemoveKey.(int32) 124 } 125 } 126 127 for i = range cfg.Rule { 128 if match(cfg.Rule[i].GetClusterRuleInfo()) { 129 exists = true 130 break 131 } 132 } 133 134 switch spec.Operation { 135 case types.ArrayUpdateOperationAdd: 136 if exists { 137 return new(types.InvalidArgument) 138 } 139 info := spec.Info.GetClusterRuleInfo() 140 info.Key = atomic.AddInt32(&c.ruleKey, 1) 141 info.RuleUuid = uuid.New().String() 142 cfg.Rule = append(cfg.Rule, spec.Info) 143 case types.ArrayUpdateOperationEdit: 144 if !exists { 145 return new(types.InvalidArgument) 146 } 147 cfg.Rule[i] = spec.Info 148 case types.ArrayUpdateOperationRemove: 149 if !exists { 150 return new(types.InvalidArgument) 151 } 152 cfg.Rule = append(cfg.Rule[:i], cfg.Rule[i+1:]...) 153 } 154 } 155 156 return nil 157 } 158 159 func (c *ClusterComputeResource) updateGroups(cfg *types.ClusterConfigInfoEx, cspec *types.ClusterConfigSpecEx) types.BaseMethodFault { 160 for _, spec := range cspec.GroupSpec { 161 var i int 162 exists := false 163 164 match := func(info types.BaseClusterGroupInfo) bool { 165 return info.GetClusterGroupInfo().Name == spec.Info.GetClusterGroupInfo().Name 166 } 167 168 if spec.Operation == types.ArrayUpdateOperationRemove { 169 match = func(info types.BaseClusterGroupInfo) bool { 170 return info.GetClusterGroupInfo().Name == spec.ArrayUpdateSpec.RemoveKey.(string) 171 } 172 } 173 174 for i = range cfg.Group { 175 if match(cfg.Group[i].GetClusterGroupInfo()) { 176 exists = true 177 break 178 } 179 } 180 181 switch spec.Operation { 182 case types.ArrayUpdateOperationAdd: 183 if exists { 184 return new(types.InvalidArgument) 185 } 186 cfg.Group = append(cfg.Group, spec.Info) 187 case types.ArrayUpdateOperationEdit: 188 if !exists { 189 return new(types.InvalidArgument) 190 } 191 cfg.Group[i] = spec.Info 192 case types.ArrayUpdateOperationRemove: 193 if !exists { 194 return new(types.InvalidArgument) 195 } 196 cfg.Group = append(cfg.Group[:i], cfg.Group[i+1:]...) 197 } 198 } 199 200 return nil 201 } 202 203 func (c *ClusterComputeResource) updateOverridesDAS(cfg *types.ClusterConfigInfoEx, cspec *types.ClusterConfigSpecEx) types.BaseMethodFault { 204 for _, spec := range cspec.DasVmConfigSpec { 205 var i int 206 var key types.ManagedObjectReference 207 exists := false 208 209 if spec.Operation == types.ArrayUpdateOperationRemove { 210 key = spec.RemoveKey.(types.ManagedObjectReference) 211 } else { 212 key = spec.Info.Key 213 } 214 215 for i = range cfg.DasVmConfig { 216 if cfg.DasVmConfig[i].Key == key { 217 exists = true 218 break 219 } 220 } 221 222 switch spec.Operation { 223 case types.ArrayUpdateOperationAdd: 224 if exists { 225 return new(types.InvalidArgument) 226 } 227 cfg.DasVmConfig = append(cfg.DasVmConfig, *spec.Info) 228 case types.ArrayUpdateOperationEdit: 229 if !exists { 230 return new(types.InvalidArgument) 231 } 232 src := spec.Info.DasSettings 233 if src == nil { 234 return new(types.InvalidArgument) 235 } 236 dst := cfg.DasVmConfig[i].DasSettings 237 if src.RestartPriority != "" { 238 dst.RestartPriority = src.RestartPriority 239 } 240 if src.RestartPriorityTimeout != 0 { 241 dst.RestartPriorityTimeout = src.RestartPriorityTimeout 242 } 243 case types.ArrayUpdateOperationRemove: 244 if !exists { 245 return new(types.InvalidArgument) 246 } 247 cfg.DasVmConfig = append(cfg.DasVmConfig[:i], cfg.DasVmConfig[i+1:]...) 248 } 249 } 250 251 return nil 252 } 253 254 func (c *ClusterComputeResource) updateOverridesDRS(cfg *types.ClusterConfigInfoEx, cspec *types.ClusterConfigSpecEx) types.BaseMethodFault { 255 for _, spec := range cspec.DrsVmConfigSpec { 256 var i int 257 var key types.ManagedObjectReference 258 exists := false 259 260 if spec.Operation == types.ArrayUpdateOperationRemove { 261 key = spec.RemoveKey.(types.ManagedObjectReference) 262 } else { 263 key = spec.Info.Key 264 } 265 266 for i = range cfg.DrsVmConfig { 267 if cfg.DrsVmConfig[i].Key == key { 268 exists = true 269 break 270 } 271 } 272 273 switch spec.Operation { 274 case types.ArrayUpdateOperationAdd: 275 if exists { 276 return new(types.InvalidArgument) 277 } 278 cfg.DrsVmConfig = append(cfg.DrsVmConfig, *spec.Info) 279 case types.ArrayUpdateOperationEdit: 280 if !exists { 281 return new(types.InvalidArgument) 282 } 283 if spec.Info.Enabled != nil { 284 cfg.DrsVmConfig[i].Enabled = spec.Info.Enabled 285 } 286 if spec.Info.Behavior != "" { 287 cfg.DrsVmConfig[i].Behavior = spec.Info.Behavior 288 } 289 case types.ArrayUpdateOperationRemove: 290 if !exists { 291 return new(types.InvalidArgument) 292 } 293 cfg.DrsVmConfig = append(cfg.DrsVmConfig[:i], cfg.DrsVmConfig[i+1:]...) 294 } 295 } 296 297 return nil 298 } 299 300 func (c *ClusterComputeResource) updateOverridesVmOrchestration(cfg *types.ClusterConfigInfoEx, cspec *types.ClusterConfigSpecEx) types.BaseMethodFault { 301 for _, spec := range cspec.VmOrchestrationSpec { 302 var i int 303 var key types.ManagedObjectReference 304 exists := false 305 306 if spec.Operation == types.ArrayUpdateOperationRemove { 307 key = spec.RemoveKey.(types.ManagedObjectReference) 308 } else { 309 key = spec.Info.Vm 310 } 311 312 for i = range cfg.VmOrchestration { 313 if cfg.VmOrchestration[i].Vm == key { 314 exists = true 315 break 316 } 317 } 318 319 switch spec.Operation { 320 case types.ArrayUpdateOperationAdd: 321 if exists { 322 return new(types.InvalidArgument) 323 } 324 cfg.VmOrchestration = append(cfg.VmOrchestration, *spec.Info) 325 case types.ArrayUpdateOperationEdit: 326 if !exists { 327 return new(types.InvalidArgument) 328 } 329 if spec.Info.VmReadiness.ReadyCondition != "" { 330 cfg.VmOrchestration[i].VmReadiness.ReadyCondition = spec.Info.VmReadiness.ReadyCondition 331 } 332 if spec.Info.VmReadiness.PostReadyDelay != 0 { 333 cfg.VmOrchestration[i].VmReadiness.PostReadyDelay = spec.Info.VmReadiness.PostReadyDelay 334 } 335 case types.ArrayUpdateOperationRemove: 336 if !exists { 337 return new(types.InvalidArgument) 338 } 339 cfg.VmOrchestration = append(cfg.VmOrchestration[:i], cfg.VmOrchestration[i+1:]...) 340 } 341 } 342 343 return nil 344 } 345 346 func (c *ClusterComputeResource) ReconfigureComputeResourceTask(ctx *Context, req *types.ReconfigureComputeResource_Task) soap.HasFault { 347 task := CreateTask(c, "reconfigureCluster", func(*Task) (types.AnyType, types.BaseMethodFault) { 348 spec, ok := req.Spec.(*types.ClusterConfigSpecEx) 349 if !ok { 350 return nil, new(types.InvalidArgument) 351 } 352 353 updates := []func(*types.ClusterConfigInfoEx, *types.ClusterConfigSpecEx) types.BaseMethodFault{ 354 c.update, 355 c.updateRules, 356 c.updateGroups, 357 c.updateOverridesDAS, 358 c.updateOverridesDRS, 359 c.updateOverridesVmOrchestration, 360 } 361 362 for _, update := range updates { 363 if err := update(c.ConfigurationEx.(*types.ClusterConfigInfoEx), spec); err != nil { 364 return nil, err 365 } 366 } 367 368 return nil, nil 369 }) 370 371 return &methods.ReconfigureComputeResource_TaskBody{ 372 Res: &types.ReconfigureComputeResource_TaskResponse{ 373 Returnval: task.Run(ctx), 374 }, 375 } 376 } 377 378 func (c *ClusterComputeResource) MoveIntoTask(ctx *Context, req *types.MoveInto_Task) soap.HasFault { 379 task := CreateTask(c, "moveInto", func(*Task) (types.AnyType, types.BaseMethodFault) { 380 for _, ref := range req.Host { 381 host := ctx.Map.Get(ref).(*HostSystem) 382 383 if *host.Parent == c.Self { 384 return nil, new(types.DuplicateName) // host already in this cluster 385 } 386 387 switch parent := ctx.Map.Get(*host.Parent).(type) { 388 case *ClusterComputeResource: 389 if !host.Runtime.InMaintenanceMode { 390 return nil, new(types.InvalidState) 391 } 392 393 RemoveReference(&parent.Host, ref) 394 case *mo.ComputeResource: 395 ctx.Map.Remove(ctx, parent.Self) 396 } 397 398 c.Host = append(c.Host, ref) 399 host.Parent = &c.Self 400 } 401 402 return nil, nil 403 }) 404 405 return &methods.MoveInto_TaskBody{ 406 Res: &types.MoveInto_TaskResponse{ 407 Returnval: task.Run(ctx), 408 }, 409 } 410 } 411 412 func (c *ClusterComputeResource) PlaceVm(ctx *Context, req *types.PlaceVm) soap.HasFault { 413 body := new(methods.PlaceVmBody) 414 415 if len(c.Host) == 0 { 416 body.Fault_ = Fault("", new(types.InvalidState)) 417 return body 418 } 419 420 res := types.ClusterRecommendation{ 421 Key: "1", 422 Type: "V1", 423 Time: time.Now(), 424 Rating: 1, 425 Reason: string(types.RecommendationReasonCodeXvmotionPlacement), 426 ReasonText: string(types.RecommendationReasonCodeXvmotionPlacement), 427 Target: &c.Self, 428 } 429 430 hosts := req.PlacementSpec.Hosts 431 if len(hosts) == 0 { 432 hosts = c.Host 433 } 434 435 datastores := req.PlacementSpec.Datastores 436 if len(datastores) == 0 { 437 datastores = c.Datastore 438 } 439 440 spec := &types.VirtualMachineRelocateSpec{ 441 Datastore: &datastores[rand.Intn(len(c.Datastore))], 442 Host: &hosts[rand.Intn(len(c.Host))], 443 Pool: c.ResourcePool, 444 } 445 446 switch types.PlacementSpecPlacementType(req.PlacementSpec.PlacementType) { 447 case types.PlacementSpecPlacementTypeClone, types.PlacementSpecPlacementTypeCreate: 448 res.Action = append(res.Action, &types.PlacementAction{ 449 Vm: req.PlacementSpec.Vm, 450 TargetHost: spec.Host, 451 RelocateSpec: spec, 452 }) 453 default: 454 log.Printf("unsupported placement type: %s", req.PlacementSpec.PlacementType) 455 body.Fault_ = Fault("", new(types.NotSupported)) 456 return body 457 } 458 459 body.Res = &types.PlaceVmResponse{ 460 Returnval: types.PlacementResult{ 461 Recommendations: []types.ClusterRecommendation{res}, 462 }, 463 } 464 465 return body 466 } 467 468 func CreateClusterComputeResource(ctx *Context, f *Folder, name string, spec types.ClusterConfigSpecEx) (*ClusterComputeResource, types.BaseMethodFault) { 469 if e := ctx.Map.FindByName(name, f.ChildEntity); e != nil { 470 return nil, &types.DuplicateName{ 471 Name: e.Entity().Name, 472 Object: e.Reference(), 473 } 474 } 475 476 cluster := &ClusterComputeResource{} 477 cluster.EnvironmentBrowser = newEnvironmentBrowser(ctx) 478 cluster.Name = name 479 cluster.Network = ctx.Map.getEntityDatacenter(f).defaultNetwork() 480 cluster.Summary = &types.ClusterComputeResourceSummary{ 481 UsageSummary: new(types.ClusterUsageSummary), 482 } 483 484 config := &types.ClusterConfigInfoEx{} 485 cluster.ConfigurationEx = config 486 487 config.VmSwapPlacement = string(types.VirtualMachineConfigInfoSwapPlacementTypeVmDirectory) 488 config.DrsConfig.Enabled = types.NewBool(true) 489 490 pool := NewResourcePool() 491 ctx.Map.PutEntity(cluster, ctx.Map.NewEntity(pool)) 492 cluster.ResourcePool = &pool.Self 493 494 folderPutChild(ctx, &f.Folder, cluster) 495 pool.Owner = cluster.Self 496 497 return cluster, nil 498 }