github.com/ice-blockchain/go/src@v0.0.0-20240403114104-1564d284e521/internal/trace/v2/reader.go (about) 1 // Copyright 2023 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package trace 6 7 import ( 8 "bufio" 9 "fmt" 10 "io" 11 "slices" 12 "strings" 13 14 "internal/trace/v2/event/go122" 15 "internal/trace/v2/internal/oldtrace" 16 "internal/trace/v2/version" 17 ) 18 19 // Reader reads a byte stream, validates it, and produces trace events. 20 type Reader struct { 21 r *bufio.Reader 22 lastTs Time 23 gen *generation 24 spill *spilledBatch 25 frontier []*batchCursor 26 cpuSamples []cpuSample 27 order ordering 28 emittedSync bool 29 30 go121Events *oldTraceConverter 31 } 32 33 // NewReader creates a new trace reader. 34 func NewReader(r io.Reader) (*Reader, error) { 35 br := bufio.NewReader(r) 36 v, err := version.ReadHeader(br) 37 if err != nil { 38 return nil, err 39 } 40 switch v { 41 case version.Go111, version.Go119, version.Go121: 42 tr, err := oldtrace.Parse(br, v) 43 if err != nil { 44 return nil, err 45 } 46 return &Reader{ 47 go121Events: convertOldFormat(tr), 48 }, nil 49 case version.Go122, version.Go123: 50 return &Reader{ 51 r: br, 52 order: ordering{ 53 mStates: make(map[ThreadID]*mState), 54 pStates: make(map[ProcID]*pState), 55 gStates: make(map[GoID]*gState), 56 activeTasks: make(map[TaskID]taskState), 57 }, 58 // Don't emit a sync event when we first go to emit events. 59 emittedSync: true, 60 }, nil 61 default: 62 return nil, fmt.Errorf("unknown or unsupported version go 1.%d", v) 63 } 64 } 65 66 // ReadEvent reads a single event from the stream. 67 // 68 // If the stream has been exhausted, it returns an invalid 69 // event and io.EOF. 70 func (r *Reader) ReadEvent() (e Event, err error) { 71 if r.go121Events != nil { 72 ev, err := r.go121Events.next() 73 if err != nil { 74 // XXX do we have to emit an EventSync when the trace is done? 75 return Event{}, err 76 } 77 return ev, nil 78 } 79 80 // Go 1.22+ trace parsing algorithm. 81 // 82 // (1) Read in all the batches for the next generation from the stream. 83 // (a) Use the size field in the header to quickly find all batches. 84 // (2) Parse out the strings, stacks, CPU samples, and timestamp conversion data. 85 // (3) Group each event batch by M, sorted by timestamp. (batchCursor contains the groups.) 86 // (4) Organize batchCursors in a min-heap, ordered by the timestamp of the next event for each M. 87 // (5) Try to advance the next event for the M at the top of the min-heap. 88 // (a) On success, select that M. 89 // (b) On failure, sort the min-heap and try to advance other Ms. Select the first M that advances. 90 // (c) If there's nothing left to advance, goto (1). 91 // (6) Select the latest event for the selected M and get it ready to be returned. 92 // (7) Read the next event for the selected M and update the min-heap. 93 // (8) Return the selected event, goto (5) on the next call. 94 95 // Set us up to track the last timestamp and fix up 96 // the timestamp of any event that comes through. 97 defer func() { 98 if err != nil { 99 return 100 } 101 if err = e.validateTableIDs(); err != nil { 102 return 103 } 104 if e.base.time <= r.lastTs { 105 e.base.time = r.lastTs + 1 106 } 107 r.lastTs = e.base.time 108 }() 109 110 // Consume any events in the ordering first. 111 if ev, ok := r.order.Next(); ok { 112 return ev, nil 113 } 114 115 // Check if we need to refresh the generation. 116 if len(r.frontier) == 0 && len(r.cpuSamples) == 0 { 117 if !r.emittedSync { 118 r.emittedSync = true 119 return syncEvent(r.gen.evTable, r.lastTs), nil 120 } 121 if r.gen != nil && r.spill == nil { 122 // If we have a generation from the last read, 123 // and there's nothing left in the frontier, and 124 // there's no spilled batch, indicating that there's 125 // no further generation, it means we're done. 126 // Return io.EOF. 127 return Event{}, io.EOF 128 } 129 // Read the next generation. 130 r.gen, r.spill, err = readGeneration(r.r, r.spill) 131 if err != nil { 132 return Event{}, err 133 } 134 135 // Reset CPU samples cursor. 136 r.cpuSamples = r.gen.cpuSamples 137 138 // Reset frontier. 139 for m, batches := range r.gen.batches { 140 bc := &batchCursor{m: m} 141 ok, err := bc.nextEvent(batches, r.gen.freq) 142 if err != nil { 143 return Event{}, err 144 } 145 if !ok { 146 // Turns out there aren't actually any events in these batches. 147 continue 148 } 149 r.frontier = heapInsert(r.frontier, bc) 150 } 151 152 // Reset emittedSync. 153 r.emittedSync = false 154 } 155 tryAdvance := func(i int) (bool, error) { 156 bc := r.frontier[i] 157 158 if ok, err := r.order.Advance(&bc.ev, r.gen.evTable, bc.m, r.gen.gen); !ok || err != nil { 159 return ok, err 160 } 161 162 // Refresh the cursor's event. 163 ok, err := bc.nextEvent(r.gen.batches[bc.m], r.gen.freq) 164 if err != nil { 165 return false, err 166 } 167 if ok { 168 // If we successfully refreshed, update the heap. 169 heapUpdate(r.frontier, i) 170 } else { 171 // There's nothing else to read. Delete this cursor from the frontier. 172 r.frontier = heapRemove(r.frontier, i) 173 } 174 return true, nil 175 } 176 // Inject a CPU sample if it comes next. 177 if len(r.cpuSamples) != 0 { 178 if len(r.frontier) == 0 || r.cpuSamples[0].time < r.frontier[0].ev.time { 179 e := r.cpuSamples[0].asEvent(r.gen.evTable) 180 r.cpuSamples = r.cpuSamples[1:] 181 return e, nil 182 } 183 } 184 // Try to advance the head of the frontier, which should have the minimum timestamp. 185 // This should be by far the most common case 186 if len(r.frontier) == 0 { 187 return Event{}, fmt.Errorf("broken trace: frontier is empty:\n[gen=%d]\n\n%s\n%s\n", r.gen.gen, dumpFrontier(r.frontier), dumpOrdering(&r.order)) 188 } 189 if ok, err := tryAdvance(0); err != nil { 190 return Event{}, err 191 } else if !ok { 192 // Try to advance the rest of the frontier, in timestamp order. 193 // 194 // To do this, sort the min-heap. A sorted min-heap is still a 195 // min-heap, but now we can iterate over the rest and try to 196 // advance in order. This path should be rare. 197 slices.SortFunc(r.frontier, (*batchCursor).compare) 198 success := false 199 for i := 1; i < len(r.frontier); i++ { 200 if ok, err = tryAdvance(i); err != nil { 201 return Event{}, err 202 } else if ok { 203 success = true 204 break 205 } 206 } 207 if !success { 208 return Event{}, fmt.Errorf("broken trace: failed to advance: frontier:\n[gen=%d]\n\n%s\n%s\n", r.gen.gen, dumpFrontier(r.frontier), dumpOrdering(&r.order)) 209 } 210 } 211 212 // Pick off the next event on the queue. At this point, one must exist. 213 ev, ok := r.order.Next() 214 if !ok { 215 panic("invariant violation: advance successful, but queue is empty") 216 } 217 return ev, nil 218 } 219 220 func dumpFrontier(frontier []*batchCursor) string { 221 var sb strings.Builder 222 for _, bc := range frontier { 223 spec := go122.Specs()[bc.ev.typ] 224 fmt.Fprintf(&sb, "M %d [%s time=%d", bc.m, spec.Name, bc.ev.time) 225 for i, arg := range spec.Args[1:] { 226 fmt.Fprintf(&sb, " %s=%d", arg, bc.ev.args[i]) 227 } 228 fmt.Fprintf(&sb, "]\n") 229 } 230 return sb.String() 231 }