golang.org/x/exp@v0.0.0-20240506185415-9bf2ced13842/trace/reader.go (about) 1 // Copyright 2023 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Code generated by "gen.bash" from internal/trace/v2; DO NOT EDIT. 6 7 //go:build go1.21 8 9 package trace 10 11 import ( 12 "bufio" 13 "fmt" 14 "io" 15 "slices" 16 "strings" 17 18 "golang.org/x/exp/trace/internal/event/go122" 19 "golang.org/x/exp/trace/internal/oldtrace" 20 "golang.org/x/exp/trace/internal/version" 21 ) 22 23 // Reader reads a byte stream, validates it, and produces trace events. 24 type Reader struct { 25 r *bufio.Reader 26 lastTs Time 27 gen *generation 28 spill *spilledBatch 29 frontier []*batchCursor 30 cpuSamples []cpuSample 31 order ordering 32 emittedSync bool 33 34 go121Events *oldTraceConverter 35 } 36 37 // NewReader creates a new trace reader. 38 func NewReader(r io.Reader) (*Reader, error) { 39 br := bufio.NewReader(r) 40 v, err := version.ReadHeader(br) 41 if err != nil { 42 return nil, err 43 } 44 switch v { 45 case version.Go111, version.Go119, version.Go121: 46 tr, err := oldtrace.Parse(br, v) 47 if err != nil { 48 return nil, err 49 } 50 return &Reader{ 51 go121Events: convertOldFormat(tr), 52 }, nil 53 case version.Go122, version.Go123: 54 return &Reader{ 55 r: br, 56 order: ordering{ 57 mStates: make(map[ThreadID]*mState), 58 pStates: make(map[ProcID]*pState), 59 gStates: make(map[GoID]*gState), 60 activeTasks: make(map[TaskID]taskState), 61 }, 62 // Don't emit a sync event when we first go to emit events. 63 emittedSync: true, 64 }, nil 65 default: 66 return nil, fmt.Errorf("unknown or unsupported version go 1.%d", v) 67 } 68 } 69 70 // ReadEvent reads a single event from the stream. 71 // 72 // If the stream has been exhausted, it returns an invalid 73 // event and io.EOF. 74 func (r *Reader) ReadEvent() (e Event, err error) { 75 if r.go121Events != nil { 76 ev, err := r.go121Events.next() 77 if err != nil { 78 // XXX do we have to emit an EventSync when the trace is done? 79 return Event{}, err 80 } 81 return ev, nil 82 } 83 84 // Go 1.22+ trace parsing algorithm. 85 // 86 // (1) Read in all the batches for the next generation from the stream. 87 // (a) Use the size field in the header to quickly find all batches. 88 // (2) Parse out the strings, stacks, CPU samples, and timestamp conversion data. 89 // (3) Group each event batch by M, sorted by timestamp. (batchCursor contains the groups.) 90 // (4) Organize batchCursors in a min-heap, ordered by the timestamp of the next event for each M. 91 // (5) Try to advance the next event for the M at the top of the min-heap. 92 // (a) On success, select that M. 93 // (b) On failure, sort the min-heap and try to advance other Ms. Select the first M that advances. 94 // (c) If there's nothing left to advance, goto (1). 95 // (6) Select the latest event for the selected M and get it ready to be returned. 96 // (7) Read the next event for the selected M and update the min-heap. 97 // (8) Return the selected event, goto (5) on the next call. 98 99 // Set us up to track the last timestamp and fix up 100 // the timestamp of any event that comes through. 101 defer func() { 102 if err != nil { 103 return 104 } 105 if err = e.validateTableIDs(); err != nil { 106 return 107 } 108 if e.base.time <= r.lastTs { 109 e.base.time = r.lastTs + 1 110 } 111 r.lastTs = e.base.time 112 }() 113 114 // Consume any events in the ordering first. 115 if ev, ok := r.order.Next(); ok { 116 return ev, nil 117 } 118 119 // Check if we need to refresh the generation. 120 if len(r.frontier) == 0 && len(r.cpuSamples) == 0 { 121 if !r.emittedSync { 122 r.emittedSync = true 123 return syncEvent(r.gen.evTable, r.lastTs), nil 124 } 125 if r.gen != nil && r.spill == nil { 126 // If we have a generation from the last read, 127 // and there's nothing left in the frontier, and 128 // there's no spilled batch, indicating that there's 129 // no further generation, it means we're done. 130 // Return io.EOF. 131 return Event{}, io.EOF 132 } 133 // Read the next generation. 134 r.gen, r.spill, err = readGeneration(r.r, r.spill) 135 if err != nil { 136 return Event{}, err 137 } 138 139 // Reset CPU samples cursor. 140 r.cpuSamples = r.gen.cpuSamples 141 142 // Reset frontier. 143 for m, batches := range r.gen.batches { 144 bc := &batchCursor{m: m} 145 ok, err := bc.nextEvent(batches, r.gen.freq) 146 if err != nil { 147 return Event{}, err 148 } 149 if !ok { 150 // Turns out there aren't actually any events in these batches. 151 continue 152 } 153 r.frontier = heapInsert(r.frontier, bc) 154 } 155 156 // Reset emittedSync. 157 r.emittedSync = false 158 } 159 tryAdvance := func(i int) (bool, error) { 160 bc := r.frontier[i] 161 162 if ok, err := r.order.Advance(&bc.ev, r.gen.evTable, bc.m, r.gen.gen); !ok || err != nil { 163 return ok, err 164 } 165 166 // Refresh the cursor's event. 167 ok, err := bc.nextEvent(r.gen.batches[bc.m], r.gen.freq) 168 if err != nil { 169 return false, err 170 } 171 if ok { 172 // If we successfully refreshed, update the heap. 173 heapUpdate(r.frontier, i) 174 } else { 175 // There's nothing else to read. Delete this cursor from the frontier. 176 r.frontier = heapRemove(r.frontier, i) 177 } 178 return true, nil 179 } 180 // Inject a CPU sample if it comes next. 181 if len(r.cpuSamples) != 0 { 182 if len(r.frontier) == 0 || r.cpuSamples[0].time < r.frontier[0].ev.time { 183 e := r.cpuSamples[0].asEvent(r.gen.evTable) 184 r.cpuSamples = r.cpuSamples[1:] 185 return e, nil 186 } 187 } 188 // Try to advance the head of the frontier, which should have the minimum timestamp. 189 // This should be by far the most common case 190 if len(r.frontier) == 0 { 191 return Event{}, fmt.Errorf("broken trace: frontier is empty:\n[gen=%d]\n\n%s\n%s\n", r.gen.gen, dumpFrontier(r.frontier), dumpOrdering(&r.order)) 192 } 193 if ok, err := tryAdvance(0); err != nil { 194 return Event{}, err 195 } else if !ok { 196 // Try to advance the rest of the frontier, in timestamp order. 197 // 198 // To do this, sort the min-heap. A sorted min-heap is still a 199 // min-heap, but now we can iterate over the rest and try to 200 // advance in order. This path should be rare. 201 slices.SortFunc(r.frontier, (*batchCursor).compare) 202 success := false 203 for i := 1; i < len(r.frontier); i++ { 204 if ok, err = tryAdvance(i); err != nil { 205 return Event{}, err 206 } else if ok { 207 success = true 208 break 209 } 210 } 211 if !success { 212 return Event{}, fmt.Errorf("broken trace: failed to advance: frontier:\n[gen=%d]\n\n%s\n%s\n", r.gen.gen, dumpFrontier(r.frontier), dumpOrdering(&r.order)) 213 } 214 } 215 216 // Pick off the next event on the queue. At this point, one must exist. 217 ev, ok := r.order.Next() 218 if !ok { 219 panic("invariant violation: advance successful, but queue is empty") 220 } 221 return ev, nil 222 } 223 224 func dumpFrontier(frontier []*batchCursor) string { 225 var sb strings.Builder 226 for _, bc := range frontier { 227 spec := go122.Specs()[bc.ev.typ] 228 fmt.Fprintf(&sb, "M %d [%s time=%d", bc.m, spec.Name, bc.ev.time) 229 for i, arg := range spec.Args[1:] { 230 fmt.Fprintf(&sb, " %s=%d", arg, bc.ev.args[i]) 231 } 232 fmt.Fprintf(&sb, "]\n") 233 } 234 return sb.String() 235 }