github.com/go-asm/go@v1.21.1-0.20240213172139-40c5ead50c48/trace/traceviewer/emitter.go (about) 1 // Copyright 2023 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package traceviewer 6 7 import ( 8 "encoding/json" 9 "fmt" 10 "io" 11 "strconv" 12 "time" 13 14 "github.com/go-asm/go/trace" 15 "github.com/go-asm/go/trace/traceviewer/format" 16 ) 17 18 type TraceConsumer struct { 19 ConsumeTimeUnit func(unit string) 20 ConsumeViewerEvent func(v *format.Event, required bool) 21 ConsumeViewerFrame func(key string, f format.Frame) 22 Flush func() 23 } 24 25 // ViewerDataTraceConsumer returns a TraceConsumer that writes to w. The 26 // startIdx and endIdx are used for splitting large traces. They refer to 27 // indexes in the traceEvents output array, not the events in the trace input. 28 func ViewerDataTraceConsumer(w io.Writer, startIdx, endIdx int64) TraceConsumer { 29 allFrames := make(map[string]format.Frame) 30 requiredFrames := make(map[string]format.Frame) 31 enc := json.NewEncoder(w) 32 written := 0 33 index := int64(-1) 34 35 io.WriteString(w, "{") 36 return TraceConsumer{ 37 ConsumeTimeUnit: func(unit string) { 38 io.WriteString(w, `"displayTimeUnit":`) 39 enc.Encode(unit) 40 io.WriteString(w, ",") 41 }, 42 ConsumeViewerEvent: func(v *format.Event, required bool) { 43 index++ 44 if !required && (index < startIdx || index > endIdx) { 45 // not in the range. Skip! 46 return 47 } 48 WalkStackFrames(allFrames, v.Stack, func(id int) { 49 s := strconv.Itoa(id) 50 requiredFrames[s] = allFrames[s] 51 }) 52 WalkStackFrames(allFrames, v.EndStack, func(id int) { 53 s := strconv.Itoa(id) 54 requiredFrames[s] = allFrames[s] 55 }) 56 if written == 0 { 57 io.WriteString(w, `"traceEvents": [`) 58 } 59 if written > 0 { 60 io.WriteString(w, ",") 61 } 62 enc.Encode(v) 63 // TODO(mknyszek): get rid of the extra \n inserted by enc.Encode. 64 // Same should be applied to splittingTraceConsumer. 65 written++ 66 }, 67 ConsumeViewerFrame: func(k string, v format.Frame) { 68 allFrames[k] = v 69 }, 70 Flush: func() { 71 io.WriteString(w, `], "stackFrames":`) 72 enc.Encode(requiredFrames) 73 io.WriteString(w, `}`) 74 }, 75 } 76 } 77 78 func SplittingTraceConsumer(max int) (*splitter, TraceConsumer) { 79 type eventSz struct { 80 Time float64 81 Sz int 82 Frames []int 83 } 84 85 var ( 86 // data.Frames contains only the frames for required events. 87 data = format.Data{Frames: make(map[string]format.Frame)} 88 89 allFrames = make(map[string]format.Frame) 90 91 sizes []eventSz 92 cw countingWriter 93 ) 94 95 s := new(splitter) 96 97 return s, TraceConsumer{ 98 ConsumeTimeUnit: func(unit string) { 99 data.TimeUnit = unit 100 }, 101 ConsumeViewerEvent: func(v *format.Event, required bool) { 102 if required { 103 // Store required events inside data so flush 104 // can include them in the required part of the 105 // trace. 106 data.Events = append(data.Events, v) 107 WalkStackFrames(allFrames, v.Stack, func(id int) { 108 s := strconv.Itoa(id) 109 data.Frames[s] = allFrames[s] 110 }) 111 WalkStackFrames(allFrames, v.EndStack, func(id int) { 112 s := strconv.Itoa(id) 113 data.Frames[s] = allFrames[s] 114 }) 115 return 116 } 117 enc := json.NewEncoder(&cw) 118 enc.Encode(v) 119 size := eventSz{Time: v.Time, Sz: cw.size + 1} // +1 for ",". 120 // Add referenced stack frames. Their size is computed 121 // in flush, where we can dedup across events. 122 WalkStackFrames(allFrames, v.Stack, func(id int) { 123 size.Frames = append(size.Frames, id) 124 }) 125 WalkStackFrames(allFrames, v.EndStack, func(id int) { 126 size.Frames = append(size.Frames, id) // This may add duplicates. We'll dedup later. 127 }) 128 sizes = append(sizes, size) 129 cw.size = 0 130 }, 131 ConsumeViewerFrame: func(k string, v format.Frame) { 132 allFrames[k] = v 133 }, 134 Flush: func() { 135 // Calculate size of the mandatory part of the trace. 136 // This includes thread names and stack frames for 137 // required events. 138 cw.size = 0 139 enc := json.NewEncoder(&cw) 140 enc.Encode(data) 141 requiredSize := cw.size 142 143 // Then calculate size of each individual event and 144 // their stack frames, grouping them into ranges. We 145 // only include stack frames relevant to the events in 146 // the range to reduce overhead. 147 148 var ( 149 start = 0 150 151 eventsSize = 0 152 153 frames = make(map[string]format.Frame) 154 framesSize = 0 155 ) 156 for i, ev := range sizes { 157 eventsSize += ev.Sz 158 159 // Add required stack frames. Note that they 160 // may already be in the map. 161 for _, id := range ev.Frames { 162 s := strconv.Itoa(id) 163 _, ok := frames[s] 164 if ok { 165 continue 166 } 167 f := allFrames[s] 168 frames[s] = f 169 framesSize += stackFrameEncodedSize(uint(id), f) 170 } 171 172 total := requiredSize + framesSize + eventsSize 173 if total < max { 174 continue 175 } 176 177 // Reached max size, commit this range and 178 // start a new range. 179 startTime := time.Duration(sizes[start].Time * 1000) 180 endTime := time.Duration(ev.Time * 1000) 181 s.Ranges = append(s.Ranges, Range{ 182 Name: fmt.Sprintf("%v-%v", startTime, endTime), 183 Start: start, 184 End: i + 1, 185 StartTime: int64(startTime), 186 EndTime: int64(endTime), 187 }) 188 start = i + 1 189 frames = make(map[string]format.Frame) 190 framesSize = 0 191 eventsSize = 0 192 } 193 if len(s.Ranges) <= 1 { 194 s.Ranges = nil 195 return 196 } 197 198 if end := len(sizes) - 1; start < end { 199 s.Ranges = append(s.Ranges, Range{ 200 Name: fmt.Sprintf("%v-%v", time.Duration(sizes[start].Time*1000), time.Duration(sizes[end].Time*1000)), 201 Start: start, 202 End: end, 203 StartTime: int64(sizes[start].Time * 1000), 204 EndTime: int64(sizes[end].Time * 1000), 205 }) 206 } 207 }, 208 } 209 } 210 211 type splitter struct { 212 Ranges []Range 213 } 214 215 type countingWriter struct { 216 size int 217 } 218 219 func (cw *countingWriter) Write(data []byte) (int, error) { 220 cw.size += len(data) 221 return len(data), nil 222 } 223 224 func stackFrameEncodedSize(id uint, f format.Frame) int { 225 // We want to know the marginal size of traceviewer.Data.Frames for 226 // each event. Running full JSON encoding of the map for each event is 227 // far too slow. 228 // 229 // Since the format is fixed, we can easily compute the size without 230 // encoding. 231 // 232 // A single entry looks like one of the following: 233 // 234 // "1":{"name":"main.main:30"}, 235 // "10":{"name":"pkg.NewSession:173","parent":9}, 236 // 237 // The parent is omitted if 0. The trailing comma is omitted from the 238 // last entry, but we don't need that much precision. 239 const ( 240 baseSize = len(`"`) + len(`":{"name":"`) + len(`"},`) 241 242 // Don't count the trailing quote on the name, as that is 243 // counted in baseSize. 244 parentBaseSize = len(`,"parent":`) 245 ) 246 247 size := baseSize 248 249 size += len(f.Name) 250 251 // Bytes for id (always positive). 252 for id > 0 { 253 size += 1 254 id /= 10 255 } 256 257 if f.Parent > 0 { 258 size += parentBaseSize 259 // Bytes for parent (always positive). 260 for f.Parent > 0 { 261 size += 1 262 f.Parent /= 10 263 } 264 } 265 266 return size 267 } 268 269 // WalkStackFrames calls fn for id and all of its parent frames from allFrames. 270 func WalkStackFrames(allFrames map[string]format.Frame, id int, fn func(id int)) { 271 for id != 0 { 272 f, ok := allFrames[strconv.Itoa(id)] 273 if !ok { 274 break 275 } 276 fn(id) 277 id = f.Parent 278 } 279 } 280 281 type Mode int 282 283 const ( 284 ModeGoroutineOriented Mode = 1 << iota 285 ModeTaskOriented 286 ModeThreadOriented // Mutually exclusive with ModeGoroutineOriented. 287 ) 288 289 // NewEmitter returns a new Emitter that writes to c. The rangeStart and 290 // rangeEnd args are used for splitting large traces. 291 func NewEmitter(c TraceConsumer, rangeStart, rangeEnd time.Duration) *Emitter { 292 c.ConsumeTimeUnit("ns") 293 294 return &Emitter{ 295 c: c, 296 rangeStart: rangeStart, 297 rangeEnd: rangeEnd, 298 frameTree: frameNode{children: make(map[uint64]frameNode)}, 299 resources: make(map[uint64]string), 300 tasks: make(map[uint64]task), 301 } 302 } 303 304 type Emitter struct { 305 c TraceConsumer 306 rangeStart time.Duration 307 rangeEnd time.Duration 308 309 heapStats, prevHeapStats heapStats 310 gstates, prevGstates [gStateCount]int64 311 threadStats, prevThreadStats [threadStateCount]int64 312 gomaxprocs uint64 313 frameTree frameNode 314 frameSeq int 315 arrowSeq uint64 316 filter func(uint64) bool 317 resourceType string 318 resources map[uint64]string 319 focusResource uint64 320 tasks map[uint64]task 321 asyncSliceSeq uint64 322 } 323 324 type task struct { 325 name string 326 sortIndex int 327 } 328 329 func (e *Emitter) Gomaxprocs(v uint64) { 330 if v > e.gomaxprocs { 331 e.gomaxprocs = v 332 } 333 } 334 335 func (e *Emitter) Resource(id uint64, name string) { 336 if e.filter != nil && !e.filter(id) { 337 return 338 } 339 e.resources[id] = name 340 } 341 342 func (e *Emitter) SetResourceType(name string) { 343 e.resourceType = name 344 } 345 346 func (e *Emitter) SetResourceFilter(filter func(uint64) bool) { 347 e.filter = filter 348 } 349 350 func (e *Emitter) Task(id uint64, name string, sortIndex int) { 351 e.tasks[id] = task{name, sortIndex} 352 } 353 354 func (e *Emitter) Slice(s SliceEvent) { 355 if e.filter != nil && !e.filter(s.Resource) { 356 return 357 } 358 e.slice(s, format.ProcsSection, "") 359 } 360 361 func (e *Emitter) TaskSlice(s SliceEvent) { 362 e.slice(s, format.TasksSection, pickTaskColor(s.Resource)) 363 } 364 365 func (e *Emitter) slice(s SliceEvent, sectionID uint64, cname string) { 366 if !e.tsWithinRange(s.Ts) && !e.tsWithinRange(s.Ts+s.Dur) { 367 return 368 } 369 e.OptionalEvent(&format.Event{ 370 Name: s.Name, 371 Phase: "X", 372 Time: viewerTime(s.Ts), 373 Dur: viewerTime(s.Dur), 374 PID: sectionID, 375 TID: s.Resource, 376 Stack: s.Stack, 377 EndStack: s.EndStack, 378 Arg: s.Arg, 379 Cname: cname, 380 }) 381 } 382 383 type SliceEvent struct { 384 Name string 385 Ts time.Duration 386 Dur time.Duration 387 Resource uint64 388 Stack int 389 EndStack int 390 Arg any 391 } 392 393 func (e *Emitter) AsyncSlice(s AsyncSliceEvent) { 394 if !e.tsWithinRange(s.Ts) && !e.tsWithinRange(s.Ts+s.Dur) { 395 return 396 } 397 if e.filter != nil && !e.filter(s.Resource) { 398 return 399 } 400 cname := "" 401 if s.TaskColorIndex != 0 { 402 cname = pickTaskColor(s.TaskColorIndex) 403 } 404 e.asyncSliceSeq++ 405 e.OptionalEvent(&format.Event{ 406 Category: s.Category, 407 Name: s.Name, 408 Phase: "b", 409 Time: viewerTime(s.Ts), 410 TID: s.Resource, 411 ID: e.asyncSliceSeq, 412 Scope: s.Scope, 413 Stack: s.Stack, 414 Cname: cname, 415 }) 416 e.OptionalEvent(&format.Event{ 417 Category: s.Category, 418 Name: s.Name, 419 Phase: "e", 420 Time: viewerTime(s.Ts + s.Dur), 421 TID: s.Resource, 422 ID: e.asyncSliceSeq, 423 Scope: s.Scope, 424 Stack: s.EndStack, 425 Arg: s.Arg, 426 Cname: cname, 427 }) 428 } 429 430 type AsyncSliceEvent struct { 431 SliceEvent 432 Category string 433 Scope string 434 TaskColorIndex uint64 // Take on the same color as the task with this ID. 435 } 436 437 func (e *Emitter) Instant(i InstantEvent) { 438 if !e.tsWithinRange(i.Ts) { 439 return 440 } 441 if e.filter != nil && !e.filter(i.Resource) { 442 return 443 } 444 cname := "" 445 e.OptionalEvent(&format.Event{ 446 Name: i.Name, 447 Category: i.Category, 448 Phase: "I", 449 Scope: "t", 450 Time: viewerTime(i.Ts), 451 PID: format.ProcsSection, 452 TID: i.Resource, 453 Stack: i.Stack, 454 Cname: cname, 455 Arg: i.Arg, 456 }) 457 } 458 459 type InstantEvent struct { 460 Ts time.Duration 461 Name string 462 Category string 463 Resource uint64 464 Stack int 465 Arg any 466 } 467 468 func (e *Emitter) Arrow(a ArrowEvent) { 469 if e.filter != nil && (!e.filter(a.FromResource) || !e.filter(a.ToResource)) { 470 return 471 } 472 e.arrow(a, format.ProcsSection) 473 } 474 475 func (e *Emitter) TaskArrow(a ArrowEvent) { 476 e.arrow(a, format.TasksSection) 477 } 478 479 func (e *Emitter) arrow(a ArrowEvent, sectionID uint64) { 480 if !e.tsWithinRange(a.Start) || !e.tsWithinRange(a.End) { 481 return 482 } 483 e.arrowSeq++ 484 e.OptionalEvent(&format.Event{ 485 Name: a.Name, 486 Phase: "s", 487 TID: a.FromResource, 488 PID: sectionID, 489 ID: e.arrowSeq, 490 Time: viewerTime(a.Start), 491 Stack: a.FromStack, 492 }) 493 e.OptionalEvent(&format.Event{ 494 Name: a.Name, 495 Phase: "t", 496 TID: a.ToResource, 497 PID: sectionID, 498 ID: e.arrowSeq, 499 Time: viewerTime(a.End), 500 }) 501 } 502 503 type ArrowEvent struct { 504 Name string 505 Start time.Duration 506 End time.Duration 507 FromResource uint64 508 FromStack int 509 ToResource uint64 510 } 511 512 func (e *Emitter) Event(ev *format.Event) { 513 e.c.ConsumeViewerEvent(ev, true) 514 } 515 516 func (e *Emitter) HeapAlloc(ts time.Duration, v uint64) { 517 e.heapStats.heapAlloc = v 518 e.emitHeapCounters(ts) 519 } 520 521 func (e *Emitter) Focus(id uint64) { 522 e.focusResource = id 523 } 524 525 func (e *Emitter) GoroutineTransition(ts time.Duration, from, to GState) { 526 e.gstates[from]-- 527 e.gstates[to]++ 528 if e.prevGstates == e.gstates { 529 return 530 } 531 if e.tsWithinRange(ts) { 532 e.OptionalEvent(&format.Event{ 533 Name: "Goroutines", 534 Phase: "C", 535 Time: viewerTime(ts), 536 PID: 1, 537 Arg: &format.GoroutineCountersArg{ 538 Running: uint64(e.gstates[GRunning]), 539 Runnable: uint64(e.gstates[GRunnable]), 540 GCWaiting: uint64(e.gstates[GWaitingGC]), 541 }, 542 }) 543 } 544 e.prevGstates = e.gstates 545 } 546 547 func (e *Emitter) IncThreadStateCount(ts time.Duration, state ThreadState, delta int64) { 548 e.threadStats[state] += delta 549 if e.prevThreadStats == e.threadStats { 550 return 551 } 552 if e.tsWithinRange(ts) { 553 e.OptionalEvent(&format.Event{ 554 Name: "Threads", 555 Phase: "C", 556 Time: viewerTime(ts), 557 PID: 1, 558 Arg: &format.ThreadCountersArg{ 559 Running: int64(e.threadStats[ThreadStateRunning]), 560 InSyscall: int64(e.threadStats[ThreadStateInSyscall]), 561 // TODO(mknyszek): Why is InSyscallRuntime not included here? 562 }, 563 }) 564 } 565 e.prevThreadStats = e.threadStats 566 } 567 568 func (e *Emitter) HeapGoal(ts time.Duration, v uint64) { 569 // This cutoff at 1 PiB is a Workaround for https://github.com/golang/go/issues/63864. 570 // 571 // TODO(mknyszek): Remove this once the problem has been fixed. 572 const PB = 1 << 50 573 if v > PB { 574 v = 0 575 } 576 e.heapStats.nextGC = v 577 e.emitHeapCounters(ts) 578 } 579 580 func (e *Emitter) emitHeapCounters(ts time.Duration) { 581 if e.prevHeapStats == e.heapStats { 582 return 583 } 584 diff := uint64(0) 585 if e.heapStats.nextGC > e.heapStats.heapAlloc { 586 diff = e.heapStats.nextGC - e.heapStats.heapAlloc 587 } 588 if e.tsWithinRange(ts) { 589 e.OptionalEvent(&format.Event{ 590 Name: "Heap", 591 Phase: "C", 592 Time: viewerTime(ts), 593 PID: 1, 594 Arg: &format.HeapCountersArg{Allocated: e.heapStats.heapAlloc, NextGC: diff}, 595 }) 596 } 597 e.prevHeapStats = e.heapStats 598 } 599 600 // Err returns an error if the emitter is in an invalid state. 601 func (e *Emitter) Err() error { 602 if e.gstates[GRunnable] < 0 || e.gstates[GRunning] < 0 || e.threadStats[ThreadStateInSyscall] < 0 || e.threadStats[ThreadStateInSyscallRuntime] < 0 { 603 return fmt.Errorf( 604 "runnable=%d running=%d insyscall=%d insyscallRuntime=%d", 605 e.gstates[GRunnable], 606 e.gstates[GRunning], 607 e.threadStats[ThreadStateInSyscall], 608 e.threadStats[ThreadStateInSyscallRuntime], 609 ) 610 } 611 return nil 612 } 613 614 func (e *Emitter) tsWithinRange(ts time.Duration) bool { 615 return e.rangeStart <= ts && ts <= e.rangeEnd 616 } 617 618 // OptionalEvent emits ev if it's within the time range of of the consumer, i.e. 619 // the selected trace split range. 620 func (e *Emitter) OptionalEvent(ev *format.Event) { 621 e.c.ConsumeViewerEvent(ev, false) 622 } 623 624 func (e *Emitter) Flush() { 625 e.processMeta(format.StatsSection, "STATS", 0) 626 627 if len(e.tasks) != 0 { 628 e.processMeta(format.TasksSection, "TASKS", 1) 629 } 630 for id, task := range e.tasks { 631 e.threadMeta(format.TasksSection, id, task.name, task.sortIndex) 632 } 633 634 e.processMeta(format.ProcsSection, e.resourceType, 2) 635 636 e.threadMeta(format.ProcsSection, trace.GCP, "GC", -6) 637 e.threadMeta(format.ProcsSection, trace.NetpollP, "Network", -5) 638 e.threadMeta(format.ProcsSection, trace.TimerP, "Timers", -4) 639 e.threadMeta(format.ProcsSection, trace.SyscallP, "Syscalls", -3) 640 641 for id, name := range e.resources { 642 priority := int(id) 643 if e.focusResource != 0 && id == e.focusResource { 644 // Put the focus goroutine on top. 645 priority = -2 646 } 647 e.threadMeta(format.ProcsSection, id, name, priority) 648 } 649 650 e.c.Flush() 651 } 652 653 func (e *Emitter) threadMeta(sectionID, tid uint64, name string, priority int) { 654 e.Event(&format.Event{ 655 Name: "thread_name", 656 Phase: "M", 657 PID: sectionID, 658 TID: tid, 659 Arg: &format.NameArg{Name: name}, 660 }) 661 e.Event(&format.Event{ 662 Name: "thread_sort_index", 663 Phase: "M", 664 PID: sectionID, 665 TID: tid, 666 Arg: &format.SortIndexArg{Index: priority}, 667 }) 668 } 669 670 func (e *Emitter) processMeta(sectionID uint64, name string, priority int) { 671 e.Event(&format.Event{ 672 Name: "process_name", 673 Phase: "M", 674 PID: sectionID, 675 Arg: &format.NameArg{Name: name}, 676 }) 677 e.Event(&format.Event{ 678 Name: "process_sort_index", 679 Phase: "M", 680 PID: sectionID, 681 Arg: &format.SortIndexArg{Index: priority}, 682 }) 683 } 684 685 // Stack emits the given frames and returns a unique id for the stack. No 686 // pointers to the given data are being retained beyond the call to Stack. 687 func (e *Emitter) Stack(stk []*trace.Frame) int { 688 return e.buildBranch(e.frameTree, stk) 689 } 690 691 // buildBranch builds one branch in the prefix tree rooted at ctx.frameTree. 692 func (e *Emitter) buildBranch(parent frameNode, stk []*trace.Frame) int { 693 if len(stk) == 0 { 694 return parent.id 695 } 696 last := len(stk) - 1 697 frame := stk[last] 698 stk = stk[:last] 699 700 node, ok := parent.children[frame.PC] 701 if !ok { 702 e.frameSeq++ 703 node.id = e.frameSeq 704 node.children = make(map[uint64]frameNode) 705 parent.children[frame.PC] = node 706 e.c.ConsumeViewerFrame(strconv.Itoa(node.id), format.Frame{Name: fmt.Sprintf("%v:%v", frame.Fn, frame.Line), Parent: parent.id}) 707 } 708 return e.buildBranch(node, stk) 709 } 710 711 type heapStats struct { 712 heapAlloc uint64 713 nextGC uint64 714 } 715 716 func viewerTime(t time.Duration) float64 { 717 return float64(t) / float64(time.Microsecond) 718 } 719 720 type GState int 721 722 const ( 723 GDead GState = iota 724 GRunnable 725 GRunning 726 GWaiting 727 GWaitingGC 728 729 gStateCount 730 ) 731 732 type ThreadState int 733 734 const ( 735 ThreadStateInSyscall ThreadState = iota 736 ThreadStateInSyscallRuntime 737 ThreadStateRunning 738 739 threadStateCount 740 ) 741 742 type frameNode struct { 743 id int 744 children map[uint64]frameNode 745 } 746 747 // Mapping from more reasonable color names to the reserved color names in 748 // https://github.com/catapult-project/catapult/blob/master/tracing/tracing/base/color_scheme.html#L50 749 // The chrome trace viewer allows only those as cname values. 750 const ( 751 colorLightMauve = "thread_state_uninterruptible" // 182, 125, 143 752 colorOrange = "thread_state_iowait" // 255, 140, 0 753 colorSeafoamGreen = "thread_state_running" // 126, 200, 148 754 colorVistaBlue = "thread_state_runnable" // 133, 160, 210 755 colorTan = "thread_state_unknown" // 199, 155, 125 756 colorIrisBlue = "background_memory_dump" // 0, 180, 180 757 colorMidnightBlue = "light_memory_dump" // 0, 0, 180 758 colorDeepMagenta = "detailed_memory_dump" // 180, 0, 180 759 colorBlue = "vsync_highlight_color" // 0, 0, 255 760 colorGrey = "generic_work" // 125, 125, 125 761 colorGreen = "good" // 0, 125, 0 762 colorDarkGoldenrod = "bad" // 180, 125, 0 763 colorPeach = "terrible" // 180, 0, 0 764 colorBlack = "black" // 0, 0, 0 765 colorLightGrey = "grey" // 221, 221, 221 766 colorWhite = "white" // 255, 255, 255 767 colorYellow = "yellow" // 255, 255, 0 768 colorOlive = "olive" // 100, 100, 0 769 colorCornflowerBlue = "rail_response" // 67, 135, 253 770 colorSunsetOrange = "rail_animation" // 244, 74, 63 771 colorTangerine = "rail_idle" // 238, 142, 0 772 colorShamrockGreen = "rail_load" // 13, 168, 97 773 colorGreenishYellow = "startup" // 230, 230, 0 774 colorDarkGrey = "heap_dump_stack_frame" // 128, 128, 128 775 colorTawny = "heap_dump_child_node_arrow" // 204, 102, 0 776 colorLemon = "cq_build_running" // 255, 255, 119 777 colorLime = "cq_build_passed" // 153, 238, 102 778 colorPink = "cq_build_failed" // 238, 136, 136 779 colorSilver = "cq_build_abandoned" // 187, 187, 187 780 colorManzGreen = "cq_build_attempt_runnig" // 222, 222, 75 781 colorKellyGreen = "cq_build_attempt_passed" // 108, 218, 35 782 colorAnotherGrey = "cq_build_attempt_failed" // 187, 187, 187 783 ) 784 785 var colorForTask = []string{ 786 colorLightMauve, 787 colorOrange, 788 colorSeafoamGreen, 789 colorVistaBlue, 790 colorTan, 791 colorMidnightBlue, 792 colorIrisBlue, 793 colorDeepMagenta, 794 colorGreen, 795 colorDarkGoldenrod, 796 colorPeach, 797 colorOlive, 798 colorCornflowerBlue, 799 colorSunsetOrange, 800 colorTangerine, 801 colorShamrockGreen, 802 colorTawny, 803 colorLemon, 804 colorLime, 805 colorPink, 806 colorSilver, 807 colorManzGreen, 808 colorKellyGreen, 809 } 810 811 func pickTaskColor(id uint64) string { 812 idx := id % uint64(len(colorForTask)) 813 return colorForTask[idx] 814 }