github.com/ferranbt/nomad@v0.9.3-0.20190607002617-85c449b7667c/plugins/drivers/utils.go (about) 1 package drivers 2 3 import ( 4 "time" 5 6 "github.com/golang/protobuf/ptypes" 7 "github.com/hashicorp/nomad/nomad/structs" 8 "github.com/hashicorp/nomad/plugins/drivers/proto" 9 ) 10 11 var taskStateToProtoMap = map[TaskState]proto.TaskState{ 12 TaskStateUnknown: proto.TaskState_UNKNOWN, 13 TaskStateRunning: proto.TaskState_RUNNING, 14 TaskStateExited: proto.TaskState_EXITED, 15 } 16 17 var taskStateFromProtoMap = map[proto.TaskState]TaskState{ 18 proto.TaskState_UNKNOWN: TaskStateUnknown, 19 proto.TaskState_RUNNING: TaskStateRunning, 20 proto.TaskState_EXITED: TaskStateExited, 21 } 22 23 func healthStateToProto(health HealthState) proto.FingerprintResponse_HealthState { 24 switch health { 25 case HealthStateUndetected: 26 return proto.FingerprintResponse_UNDETECTED 27 case HealthStateUnhealthy: 28 return proto.FingerprintResponse_UNHEALTHY 29 case HealthStateHealthy: 30 return proto.FingerprintResponse_HEALTHY 31 } 32 return proto.FingerprintResponse_UNDETECTED 33 } 34 35 func healthStateFromProto(pb proto.FingerprintResponse_HealthState) HealthState { 36 switch pb { 37 case proto.FingerprintResponse_UNDETECTED: 38 return HealthStateUndetected 39 case proto.FingerprintResponse_UNHEALTHY: 40 return HealthStateUnhealthy 41 case proto.FingerprintResponse_HEALTHY: 42 return HealthStateHealthy 43 } 44 return HealthStateUndetected 45 } 46 47 func taskConfigFromProto(pb *proto.TaskConfig) *TaskConfig { 48 if pb == nil { 49 return &TaskConfig{} 50 } 51 return &TaskConfig{ 52 ID: pb.Id, 53 JobName: pb.JobName, 54 TaskGroupName: pb.TaskGroupName, 55 Name: pb.Name, 56 Env: pb.Env, 57 DeviceEnv: pb.DeviceEnv, 58 rawDriverConfig: pb.MsgpackDriverConfig, 59 Resources: ResourcesFromProto(pb.Resources), 60 Devices: DevicesFromProto(pb.Devices), 61 Mounts: MountsFromProto(pb.Mounts), 62 User: pb.User, 63 AllocDir: pb.AllocDir, 64 StdoutPath: pb.StdoutPath, 65 StderrPath: pb.StderrPath, 66 AllocID: pb.AllocId, 67 } 68 } 69 70 func taskConfigToProto(cfg *TaskConfig) *proto.TaskConfig { 71 if cfg == nil { 72 return &proto.TaskConfig{} 73 } 74 pb := &proto.TaskConfig{ 75 Id: cfg.ID, 76 JobName: cfg.JobName, 77 TaskGroupName: cfg.TaskGroupName, 78 Name: cfg.Name, 79 Env: cfg.Env, 80 DeviceEnv: cfg.DeviceEnv, 81 Resources: ResourcesToProto(cfg.Resources), 82 Devices: DevicesToProto(cfg.Devices), 83 Mounts: MountsToProto(cfg.Mounts), 84 User: cfg.User, 85 AllocDir: cfg.AllocDir, 86 MsgpackDriverConfig: cfg.rawDriverConfig, 87 StdoutPath: cfg.StdoutPath, 88 StderrPath: cfg.StderrPath, 89 AllocId: cfg.AllocID, 90 } 91 return pb 92 } 93 94 func ResourcesFromProto(pb *proto.Resources) *Resources { 95 var r Resources 96 if pb == nil { 97 return &r 98 } 99 100 if pb.AllocatedResources != nil { 101 r.NomadResources = &structs.AllocatedTaskResources{} 102 103 if pb.AllocatedResources.Cpu != nil { 104 r.NomadResources.Cpu.CpuShares = pb.AllocatedResources.Cpu.CpuShares 105 } 106 107 if pb.AllocatedResources.Memory != nil { 108 r.NomadResources.Memory.MemoryMB = pb.AllocatedResources.Memory.MemoryMb 109 } 110 111 for _, network := range pb.AllocatedResources.Networks { 112 var n structs.NetworkResource 113 n.Device = network.Device 114 n.IP = network.Ip 115 n.CIDR = network.Cidr 116 n.MBits = int(network.Mbits) 117 for _, port := range network.ReservedPorts { 118 n.ReservedPorts = append(n.ReservedPorts, structs.Port{ 119 Label: port.Label, 120 Value: int(port.Value), 121 }) 122 } 123 for _, port := range network.DynamicPorts { 124 n.DynamicPorts = append(n.DynamicPorts, structs.Port{ 125 Label: port.Label, 126 Value: int(port.Value), 127 }) 128 } 129 r.NomadResources.Networks = append(r.NomadResources.Networks, &n) 130 } 131 } 132 133 if pb.LinuxResources != nil { 134 r.LinuxResources = &LinuxResources{ 135 CPUPeriod: pb.LinuxResources.CpuPeriod, 136 CPUQuota: pb.LinuxResources.CpuQuota, 137 CPUShares: pb.LinuxResources.CpuShares, 138 MemoryLimitBytes: pb.LinuxResources.MemoryLimitBytes, 139 OOMScoreAdj: pb.LinuxResources.OomScoreAdj, 140 CpusetCPUs: pb.LinuxResources.CpusetCpus, 141 CpusetMems: pb.LinuxResources.CpusetMems, 142 PercentTicks: pb.LinuxResources.PercentTicks, 143 } 144 } 145 146 return &r 147 } 148 149 func ResourcesToProto(r *Resources) *proto.Resources { 150 if r == nil { 151 return nil 152 } 153 154 var pb proto.Resources 155 if r.NomadResources != nil { 156 pb.AllocatedResources = &proto.AllocatedTaskResources{ 157 Cpu: &proto.AllocatedCpuResources{ 158 CpuShares: r.NomadResources.Cpu.CpuShares, 159 }, 160 Memory: &proto.AllocatedMemoryResources{ 161 MemoryMb: r.NomadResources.Memory.MemoryMB, 162 }, 163 Networks: make([]*proto.NetworkResource, len(r.NomadResources.Networks)), 164 } 165 166 for i, network := range r.NomadResources.Networks { 167 var n proto.NetworkResource 168 n.Device = network.Device 169 n.Ip = network.IP 170 n.Cidr = network.CIDR 171 n.Mbits = int32(network.MBits) 172 n.ReservedPorts = []*proto.NetworkPort{} 173 for _, port := range network.ReservedPorts { 174 n.ReservedPorts = append(n.ReservedPorts, &proto.NetworkPort{ 175 Label: port.Label, 176 Value: int32(port.Value), 177 }) 178 } 179 for _, port := range network.DynamicPorts { 180 n.DynamicPorts = append(n.DynamicPorts, &proto.NetworkPort{ 181 Label: port.Label, 182 Value: int32(port.Value), 183 }) 184 } 185 pb.AllocatedResources.Networks[i] = &n 186 } 187 } 188 189 if r.LinuxResources != nil { 190 pb.LinuxResources = &proto.LinuxResources{ 191 CpuPeriod: r.LinuxResources.CPUPeriod, 192 CpuQuota: r.LinuxResources.CPUQuota, 193 CpuShares: r.LinuxResources.CPUShares, 194 MemoryLimitBytes: r.LinuxResources.MemoryLimitBytes, 195 OomScoreAdj: r.LinuxResources.OOMScoreAdj, 196 CpusetCpus: r.LinuxResources.CpusetCPUs, 197 CpusetMems: r.LinuxResources.CpusetMems, 198 PercentTicks: r.LinuxResources.PercentTicks, 199 } 200 } 201 202 return &pb 203 } 204 205 func DevicesFromProto(devices []*proto.Device) []*DeviceConfig { 206 if devices == nil { 207 return nil 208 } 209 210 out := make([]*DeviceConfig, len(devices)) 211 for i, d := range devices { 212 out[i] = DeviceFromProto(d) 213 } 214 215 return out 216 } 217 218 func DeviceFromProto(device *proto.Device) *DeviceConfig { 219 if device == nil { 220 return nil 221 } 222 223 return &DeviceConfig{ 224 TaskPath: device.TaskPath, 225 HostPath: device.HostPath, 226 Permissions: device.CgroupPermissions, 227 } 228 } 229 230 func MountsFromProto(mounts []*proto.Mount) []*MountConfig { 231 if mounts == nil { 232 return nil 233 } 234 235 out := make([]*MountConfig, len(mounts)) 236 for i, m := range mounts { 237 out[i] = MountFromProto(m) 238 } 239 240 return out 241 } 242 243 func MountFromProto(mount *proto.Mount) *MountConfig { 244 if mount == nil { 245 return nil 246 } 247 248 return &MountConfig{ 249 TaskPath: mount.TaskPath, 250 HostPath: mount.HostPath, 251 Readonly: mount.Readonly, 252 } 253 } 254 255 func DevicesToProto(devices []*DeviceConfig) []*proto.Device { 256 if devices == nil { 257 return nil 258 } 259 260 out := make([]*proto.Device, len(devices)) 261 for i, d := range devices { 262 out[i] = DeviceToProto(d) 263 } 264 265 return out 266 } 267 268 func DeviceToProto(device *DeviceConfig) *proto.Device { 269 if device == nil { 270 return nil 271 } 272 273 return &proto.Device{ 274 TaskPath: device.TaskPath, 275 HostPath: device.HostPath, 276 CgroupPermissions: device.Permissions, 277 } 278 } 279 280 func MountsToProto(mounts []*MountConfig) []*proto.Mount { 281 if mounts == nil { 282 return nil 283 } 284 285 out := make([]*proto.Mount, len(mounts)) 286 for i, m := range mounts { 287 out[i] = MountToProto(m) 288 } 289 290 return out 291 } 292 293 func MountToProto(mount *MountConfig) *proto.Mount { 294 if mount == nil { 295 return nil 296 } 297 298 return &proto.Mount{ 299 TaskPath: mount.TaskPath, 300 HostPath: mount.HostPath, 301 Readonly: mount.Readonly, 302 } 303 } 304 305 func taskHandleFromProto(pb *proto.TaskHandle) *TaskHandle { 306 if pb == nil { 307 return &TaskHandle{} 308 } 309 return &TaskHandle{ 310 Version: int(pb.Version), 311 Config: taskConfigFromProto(pb.Config), 312 State: taskStateFromProtoMap[pb.State], 313 DriverState: pb.DriverState, 314 } 315 } 316 317 func taskHandleToProto(handle *TaskHandle) *proto.TaskHandle { 318 return &proto.TaskHandle{ 319 Version: int32(handle.Version), 320 Config: taskConfigToProto(handle.Config), 321 State: taskStateToProtoMap[handle.State], 322 DriverState: handle.DriverState, 323 } 324 } 325 326 func exitResultToProto(result *ExitResult) *proto.ExitResult { 327 if result == nil { 328 return &proto.ExitResult{} 329 } 330 return &proto.ExitResult{ 331 ExitCode: int32(result.ExitCode), 332 Signal: int32(result.Signal), 333 OomKilled: result.OOMKilled, 334 } 335 } 336 337 func exitResultFromProto(pb *proto.ExitResult) *ExitResult { 338 return &ExitResult{ 339 ExitCode: int(pb.ExitCode), 340 Signal: int(pb.Signal), 341 OOMKilled: pb.OomKilled, 342 } 343 } 344 345 func taskStatusToProto(status *TaskStatus) (*proto.TaskStatus, error) { 346 started, err := ptypes.TimestampProto(status.StartedAt) 347 if err != nil { 348 return nil, err 349 } 350 completed, err := ptypes.TimestampProto(status.CompletedAt) 351 if err != nil { 352 return nil, err 353 } 354 return &proto.TaskStatus{ 355 Id: status.ID, 356 Name: status.Name, 357 State: taskStateToProtoMap[status.State], 358 StartedAt: started, 359 CompletedAt: completed, 360 Result: exitResultToProto(status.ExitResult), 361 }, nil 362 } 363 364 func taskStatusFromProto(pb *proto.TaskStatus) (*TaskStatus, error) { 365 started, err := ptypes.Timestamp(pb.StartedAt) 366 if err != nil { 367 return nil, err 368 } 369 370 completed, err := ptypes.Timestamp(pb.CompletedAt) 371 if err != nil { 372 return nil, err 373 } 374 375 return &TaskStatus{ 376 ID: pb.Id, 377 Name: pb.Name, 378 State: taskStateFromProtoMap[pb.State], 379 StartedAt: started, 380 CompletedAt: completed, 381 ExitResult: exitResultFromProto(pb.Result), 382 }, nil 383 } 384 385 func TaskStatsToProto(stats *TaskResourceUsage) (*proto.TaskStats, error) { 386 timestamp, err := ptypes.TimestampProto(time.Unix(0, stats.Timestamp)) 387 if err != nil { 388 return nil, err 389 } 390 391 pids := map[string]*proto.TaskResourceUsage{} 392 for pid, ru := range stats.Pids { 393 pids[pid] = resourceUsageToProto(ru) 394 } 395 396 return &proto.TaskStats{ 397 Timestamp: timestamp, 398 AggResourceUsage: resourceUsageToProto(stats.ResourceUsage), 399 ResourceUsageByPid: pids, 400 }, nil 401 } 402 403 func TaskStatsFromProto(pb *proto.TaskStats) (*TaskResourceUsage, error) { 404 timestamp, err := ptypes.Timestamp(pb.Timestamp) 405 if err != nil { 406 return nil, err 407 } 408 409 pids := map[string]*ResourceUsage{} 410 for pid, ru := range pb.ResourceUsageByPid { 411 pids[pid] = resourceUsageFromProto(ru) 412 } 413 414 stats := &TaskResourceUsage{ 415 Timestamp: timestamp.UnixNano(), 416 ResourceUsage: resourceUsageFromProto(pb.AggResourceUsage), 417 Pids: pids, 418 } 419 420 return stats, nil 421 } 422 423 func resourceUsageToProto(ru *ResourceUsage) *proto.TaskResourceUsage { 424 cpu := &proto.CPUUsage{ 425 MeasuredFields: cpuUsageMeasuredFieldsToProto(ru.CpuStats.Measured), 426 SystemMode: ru.CpuStats.SystemMode, 427 UserMode: ru.CpuStats.UserMode, 428 TotalTicks: ru.CpuStats.TotalTicks, 429 ThrottledPeriods: ru.CpuStats.ThrottledPeriods, 430 ThrottledTime: ru.CpuStats.ThrottledTime, 431 Percent: ru.CpuStats.Percent, 432 } 433 434 memory := &proto.MemoryUsage{ 435 MeasuredFields: memoryUsageMeasuredFieldsToProto(ru.MemoryStats.Measured), 436 Rss: ru.MemoryStats.RSS, 437 Cache: ru.MemoryStats.Cache, 438 Swap: ru.MemoryStats.Swap, 439 Usage: ru.MemoryStats.Usage, 440 MaxUsage: ru.MemoryStats.MaxUsage, 441 KernelUsage: ru.MemoryStats.KernelUsage, 442 KernelMaxUsage: ru.MemoryStats.KernelMaxUsage, 443 } 444 445 return &proto.TaskResourceUsage{ 446 Cpu: cpu, 447 Memory: memory, 448 } 449 } 450 451 func resourceUsageFromProto(pb *proto.TaskResourceUsage) *ResourceUsage { 452 cpu := CpuStats{} 453 if pb.Cpu != nil { 454 cpu = CpuStats{ 455 Measured: cpuUsageMeasuredFieldsFromProto(pb.Cpu.MeasuredFields), 456 SystemMode: pb.Cpu.SystemMode, 457 UserMode: pb.Cpu.UserMode, 458 TotalTicks: pb.Cpu.TotalTicks, 459 ThrottledPeriods: pb.Cpu.ThrottledPeriods, 460 ThrottledTime: pb.Cpu.ThrottledTime, 461 Percent: pb.Cpu.Percent, 462 } 463 } 464 465 memory := MemoryStats{} 466 if pb.Memory != nil { 467 memory = MemoryStats{ 468 Measured: memoryUsageMeasuredFieldsFromProto(pb.Memory.MeasuredFields), 469 RSS: pb.Memory.Rss, 470 Cache: pb.Memory.Cache, 471 Swap: pb.Memory.Swap, 472 Usage: pb.Memory.Usage, 473 MaxUsage: pb.Memory.MaxUsage, 474 KernelUsage: pb.Memory.KernelUsage, 475 KernelMaxUsage: pb.Memory.KernelMaxUsage, 476 } 477 } 478 479 return &ResourceUsage{ 480 CpuStats: &cpu, 481 MemoryStats: &memory, 482 } 483 } 484 485 func BytesToMB(bytes int64) int64 { 486 return bytes / (1024 * 1024) 487 } 488 489 var cpuUsageMeasuredFieldToProtoMap = map[string]proto.CPUUsage_Fields{ 490 "System Mode": proto.CPUUsage_SYSTEM_MODE, 491 "User Mode": proto.CPUUsage_USER_MODE, 492 "Total Ticks": proto.CPUUsage_TOTAL_TICKS, 493 "Throttled Periods": proto.CPUUsage_THROTTLED_PERIODS, 494 "Throttled Time": proto.CPUUsage_THROTTLED_TIME, 495 "Percent": proto.CPUUsage_PERCENT, 496 } 497 498 var cpuUsageMeasuredFieldFromProtoMap = map[proto.CPUUsage_Fields]string{ 499 proto.CPUUsage_SYSTEM_MODE: "System Mode", 500 proto.CPUUsage_USER_MODE: "User Mode", 501 proto.CPUUsage_TOTAL_TICKS: "Total Ticks", 502 proto.CPUUsage_THROTTLED_PERIODS: "Throttled Periods", 503 proto.CPUUsage_THROTTLED_TIME: "Throttled Time", 504 proto.CPUUsage_PERCENT: "Percent", 505 } 506 507 func cpuUsageMeasuredFieldsToProto(fields []string) []proto.CPUUsage_Fields { 508 r := make([]proto.CPUUsage_Fields, 0, len(fields)) 509 510 for _, f := range fields { 511 if v, ok := cpuUsageMeasuredFieldToProtoMap[f]; ok { 512 r = append(r, v) 513 } 514 } 515 516 return r 517 } 518 519 func cpuUsageMeasuredFieldsFromProto(fields []proto.CPUUsage_Fields) []string { 520 r := make([]string, 0, len(fields)) 521 522 for _, f := range fields { 523 if v, ok := cpuUsageMeasuredFieldFromProtoMap[f]; ok { 524 r = append(r, v) 525 } 526 } 527 528 return r 529 } 530 531 var memoryUsageMeasuredFieldToProtoMap = map[string]proto.MemoryUsage_Fields{ 532 "RSS": proto.MemoryUsage_RSS, 533 "Cache": proto.MemoryUsage_CACHE, 534 "Swap": proto.MemoryUsage_SWAP, 535 "Usage": proto.MemoryUsage_USAGE, 536 "Max Usage": proto.MemoryUsage_MAX_USAGE, 537 "Kernel Usage": proto.MemoryUsage_KERNEL_USAGE, 538 "Kernel Max Usage": proto.MemoryUsage_KERNEL_MAX_USAGE, 539 } 540 541 var memoryUsageMeasuredFieldFromProtoMap = map[proto.MemoryUsage_Fields]string{ 542 proto.MemoryUsage_RSS: "RSS", 543 proto.MemoryUsage_CACHE: "Cache", 544 proto.MemoryUsage_SWAP: "Swap", 545 proto.MemoryUsage_USAGE: "Usage", 546 proto.MemoryUsage_MAX_USAGE: "Max Usage", 547 proto.MemoryUsage_KERNEL_USAGE: "Kernel Usage", 548 proto.MemoryUsage_KERNEL_MAX_USAGE: "Kernel Max Usage", 549 } 550 551 func memoryUsageMeasuredFieldsToProto(fields []string) []proto.MemoryUsage_Fields { 552 r := make([]proto.MemoryUsage_Fields, 0, len(fields)) 553 554 for _, f := range fields { 555 if v, ok := memoryUsageMeasuredFieldToProtoMap[f]; ok { 556 r = append(r, v) 557 } 558 } 559 560 return r 561 } 562 563 func memoryUsageMeasuredFieldsFromProto(fields []proto.MemoryUsage_Fields) []string { 564 r := make([]string, 0, len(fields)) 565 566 for _, f := range fields { 567 if v, ok := memoryUsageMeasuredFieldFromProtoMap[f]; ok { 568 r = append(r, v) 569 } 570 } 571 572 return r 573 }