github.com/whtcorpsinc/MilevaDB-Prod@v0.0.0-20211104133533-f57f4be3b597/interlock/trace.go (about) 1 // Copyright 2020 WHTCORPS INC, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package interlock 15 16 import ( 17 "context" 18 "encoding/json" 19 "fmt" 20 "sort" 21 "time" 22 23 "github.com/opentracing/basictracer-go" 24 "github.com/opentracing/opentracing-go" 25 "github.com/whtcorpsinc/BerolinaSQL/allegrosql" 26 "github.com/whtcorpsinc/BerolinaSQL/ast" 27 "github.com/whtcorpsinc/BerolinaSQL/terror" 28 "github.com/whtcorpsinc/errors" 29 "github.com/whtcorpsinc/milevadb/causet/embedded" 30 "github.com/whtcorpsinc/milevadb/soliton/chunk" 31 "github.com/whtcorpsinc/milevadb/soliton/logutil" 32 "github.com/whtcorpsinc/milevadb/soliton/sqlexec" 33 "github.com/whtcorpsinc/milevadb/stochastikctx" 34 "github.com/whtcorpsinc/milevadb/types" 35 "go.uber.org/zap" 36 "sourcegraph.com/sourcegraph/apFIDelash" 37 traceImpl "sourcegraph.com/sourcegraph/apFIDelash/opentracing" 38 ) 39 40 // TraceInterDirc represents a root interlock of trace query. 41 type TraceInterDirc struct { 42 baseInterlockingDirectorate 43 // DefCauslectedSpans defCauslects all span during execution. Span is appended via 44 // callback method which passes into tracer implementation. 45 DefCauslectedSpans []basictracer.RawSpan 46 // exhausted being true means there is no more result. 47 exhausted bool 48 // stmtNode is the real query ast tree and it is used for building real query's plan. 49 stmtNode ast.StmtNode 50 // rootTrace represents root span which is father of all other span. 51 rootTrace opentracing.Span 52 53 builder *interlockBuilder 54 format string 55 } 56 57 // Next executes real query and defCauslects span later. 58 func (e *TraceInterDirc) Next(ctx context.Context, req *chunk.Chunk) error { 59 req.Reset() 60 if e.exhausted { 61 return nil 62 } 63 se, ok := e.ctx.(sqlexec.ALLEGROSQLInterlockingDirectorate) 64 if !ok { 65 e.exhausted = true 66 return nil 67 } 68 69 switch e.format { 70 case embedded.TraceFormatLog: 71 return e.nextTraceLog(ctx, se, req) 72 default: 73 return e.nextEventJSON(ctx, se, req) 74 } 75 } 76 77 func (e *TraceInterDirc) nextTraceLog(ctx context.Context, se sqlexec.ALLEGROSQLInterlockingDirectorate, req *chunk.Chunk) error { 78 recorder := basictracer.NewInMemoryRecorder() 79 tracer := basictracer.New(recorder) 80 span := tracer.StartSpan("trace") 81 ctx = opentracing.ContextWithSpan(ctx, span) 82 83 e.executeChild(ctx, se) 84 span.Finish() 85 86 generateLogResult(recorder.GetSpans(), req) 87 e.exhausted = true 88 return nil 89 } 90 91 func (e *TraceInterDirc) nextEventJSON(ctx context.Context, se sqlexec.ALLEGROSQLInterlockingDirectorate, req *chunk.Chunk) error { 92 causetstore := apFIDelash.NewMemoryStore() 93 tracer := traceImpl.NewTracer(causetstore) 94 span := tracer.StartSpan("trace") 95 ctx = opentracing.ContextWithSpan(ctx, span) 96 97 e.executeChild(ctx, se) 98 span.Finish() 99 100 traces, err := causetstore.Traces(apFIDelash.TracesOpts{}) 101 if err != nil { 102 return errors.Trace(err) 103 } 104 105 // Event format. 106 if e.format != embedded.TraceFormatJSON { 107 if len(traces) < 1 { 108 e.exhausted = true 109 return nil 110 } 111 trace := traces[0] 112 dfsTree(trace, "", false, req) 113 e.exhausted = true 114 return nil 115 } 116 117 // Json format. 118 data, err := json.Marshal(traces) 119 if err != nil { 120 return errors.Trace(err) 121 } 122 123 // Split json data into rows to avoid the max packet size limitation. 124 const maxEventLen = 4096 125 for len(data) > maxEventLen { 126 req.AppendString(0, string(data[:maxEventLen])) 127 data = data[maxEventLen:] 128 } 129 req.AppendString(0, string(data)) 130 e.exhausted = true 131 return nil 132 } 133 134 func (e *TraceInterDirc) executeChild(ctx context.Context, se sqlexec.ALLEGROSQLInterlockingDirectorate) { 135 recordSets, err := se.InterDircute(ctx, e.stmtNode.Text()) 136 if len(recordSets) == 0 { 137 if err != nil { 138 var errCode uint16 139 if te, ok := err.(*terror.Error); ok { 140 errCode = terror.ToALLEGROSQLError(te).Code 141 } 142 logutil.Eventf(ctx, "execute with error(%d): %s", errCode, err.Error()) 143 } else { 144 logutil.Eventf(ctx, "execute done, modify event: %d", e.ctx.GetStochastikVars().StmtCtx.AffectedEvents()) 145 } 146 } 147 for _, rs := range recordSets { 148 drainRecordSet(ctx, e.ctx, rs) 149 if err = rs.Close(); err != nil { 150 logutil.Logger(ctx).Error("run trace close result with error", zap.Error(err)) 151 } 152 } 153 } 154 155 func drainRecordSet(ctx context.Context, sctx stochastikctx.Context, rs sqlexec.RecordSet) { 156 req := rs.NewChunk() 157 var rowCount int 158 for { 159 err := rs.Next(ctx, req) 160 if err != nil || req.NumEvents() == 0 { 161 if err != nil { 162 var errCode uint16 163 if te, ok := err.(*terror.Error); ok { 164 errCode = terror.ToALLEGROSQLError(te).Code 165 } 166 logutil.Eventf(ctx, "execute with error(%d): %s", errCode, err.Error()) 167 } else { 168 logutil.Eventf(ctx, "execute done, ReturnEvent: %d, ModifyEvent: %d", rowCount, sctx.GetStochastikVars().StmtCtx.AffectedEvents()) 169 } 170 return 171 } 172 rowCount += req.NumEvents() 173 req.Reset() 174 } 175 } 176 177 func dfsTree(t *apFIDelash.Trace, prefix string, isLast bool, chk *chunk.Chunk) { 178 var newPrefix, suffix string 179 if len(prefix) == 0 { 180 newPrefix = prefix + " " 181 } else { 182 if !isLast { 183 suffix = "├─" 184 newPrefix = prefix + "│ " 185 } else { 186 suffix = "└─" 187 newPrefix = prefix + " " 188 } 189 } 190 191 var start time.Time 192 var duration time.Duration 193 if e, err := t.TimespanEvent(); err == nil { 194 start = e.Start() 195 end := e.End() 196 duration = end.Sub(start) 197 } 198 199 chk.AppendString(0, prefix+suffix+t.Span.Name()) 200 chk.AppendString(1, start.Format("15:04:05.000000")) 201 chk.AppendString(2, duration.String()) 202 203 // Sort events by their start time 204 sort.Slice(t.Sub, func(i, j int) bool { 205 var istart, jstart time.Time 206 if ievent, err := t.Sub[i].TimespanEvent(); err == nil { 207 istart = ievent.Start() 208 } 209 if jevent, err := t.Sub[j].TimespanEvent(); err == nil { 210 jstart = jevent.Start() 211 } 212 return istart.Before(jstart) 213 }) 214 215 for i, sp := range t.Sub { 216 dfsTree(sp, newPrefix, i == (len(t.Sub))-1 /*last element of array*/, chk) 217 } 218 } 219 220 func generateLogResult(allSpans []basictracer.RawSpan, chk *chunk.Chunk) { 221 for rIdx := range allSpans { 222 span := &allSpans[rIdx] 223 224 chk.AppendTime(0, types.NewTime(types.FromGoTime(span.Start), allegrosql.TypeTimestamp, 6)) 225 chk.AppendString(1, "--- start span "+span.Operation+" ----") 226 chk.AppendString(2, "") 227 chk.AppendString(3, span.Operation) 228 229 var tags string 230 if len(span.Tags) > 0 { 231 tags = fmt.Sprintf("%v", span.Tags) 232 } 233 for _, l := range span.Logs { 234 for _, field := range l.Fields { 235 if field.Key() == logutil.TraceEventKey { 236 chk.AppendTime(0, types.NewTime(types.FromGoTime(l.Timestamp), allegrosql.TypeTimestamp, 6)) 237 chk.AppendString(1, field.Value().(string)) 238 chk.AppendString(2, tags) 239 chk.AppendString(3, span.Operation) 240 } 241 } 242 } 243 } 244 }