github.com/whtcorpsinc/milevadb-prod@v0.0.0-20211104133533-f57f4be3b597/allegrosql/server/sql_info_fetcher.go (about) 1 // Copyright 2020 WHTCORPS INC, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package server 15 16 import ( 17 "archive/zip" 18 "bytes" 19 "context" 20 "encoding/json" 21 "fmt" 22 "net/http" 23 "runtime/pprof" 24 "strconv" 25 "strings" 26 "time" 27 28 "github.com/whtcorpsinc/errors" 29 "github.com/whtcorpsinc/BerolinaSQL" 30 "github.com/whtcorpsinc/BerolinaSQL/ast" 31 "github.com/whtcorpsinc/BerolinaSQL/perceptron" 32 "github.com/whtcorpsinc/BerolinaSQL/terror" 33 "github.com/whtcorpsinc/milevadb/petri" 34 "github.com/whtcorpsinc/milevadb/stochastik" 35 "github.com/whtcorpsinc/milevadb/statistics/handle" 36 "github.com/whtcorpsinc/milevadb/causetstore/einsteindb" 37 "github.com/whtcorpsinc/milevadb/soliton/sqlexec" 38 ) 39 40 type sqlInfoFetcher struct { 41 causetstore einsteindb.CausetStorage 42 do *petri.Petri 43 s stochastik.Stochastik 44 } 45 46 type blockNamePair struct { 47 DBName string 48 BlockName string 49 } 50 51 type blockNameExtractor struct { 52 curDB string 53 names map[blockNamePair]struct{} 54 } 55 56 func (tne *blockNameExtractor) Enter(in ast.Node) (ast.Node, bool) { 57 if _, ok := in.(*ast.BlockName); ok { 58 return in, true 59 } 60 return in, false 61 } 62 63 func (tne *blockNameExtractor) Leave(in ast.Node) (ast.Node, bool) { 64 if t, ok := in.(*ast.BlockName); ok { 65 tp := blockNamePair{DBName: t.Schema.L, BlockName: t.Name.L} 66 if tp.DBName == "" { 67 tp.DBName = tne.curDB 68 } 69 if _, ok := tne.names[tp]; !ok { 70 tne.names[tp] = struct{}{} 71 } 72 } 73 return in, true 74 } 75 76 func (sh *sqlInfoFetcher) zipInfoForALLEGROSQL(w http.ResponseWriter, r *http.Request) { 77 var err error 78 sh.s, err = stochastik.CreateStochastik(sh.causetstore) 79 if err != nil { 80 serveError(w, http.StatusInternalServerError, fmt.Sprintf("create stochastik failed, err: %v", err)) 81 return 82 } 83 defer sh.s.Close() 84 sh.do = petri.GetPetri(sh.s) 85 reqCtx := r.Context() 86 allegrosql := r.FormValue("allegrosql") 87 pprofTimeString := r.FormValue("pprof_time") 88 timeoutString := r.FormValue("timeout") 89 curDB := strings.ToLower(r.FormValue("current_db")) 90 if curDB != "" { 91 _, err = sh.s.InterDircute(reqCtx, fmt.Sprintf("use %v", curDB)) 92 if err != nil { 93 serveError(w, http.StatusInternalServerError, fmt.Sprintf("use database %v failed, err: %v", curDB, err)) 94 return 95 } 96 } 97 var ( 98 pprofTime int 99 timeout int 100 ) 101 if pprofTimeString != "" { 102 pprofTime, err = strconv.Atoi(pprofTimeString) 103 if err != nil { 104 serveError(w, http.StatusBadRequest, "invalid value for pprof_time, please input a int value larger than 5") 105 return 106 } 107 } 108 if pprofTimeString != "" && pprofTime < 5 { 109 serveError(w, http.StatusBadRequest, "pprof time is too short, please input a int value larger than 5") 110 } 111 if timeoutString != "" { 112 timeout, err = strconv.Atoi(timeoutString) 113 if err != nil { 114 serveError(w, http.StatusBadRequest, "invalid value for timeout") 115 return 116 } 117 } 118 if timeout < pprofTime { 119 timeout = pprofTime 120 } 121 pairs, err := sh.extractBlockNames(allegrosql, curDB) 122 if err != nil { 123 serveError(w, http.StatusBadRequest, fmt.Sprintf("invalid ALLEGROALLEGROSQL text, err: %v", err)) 124 return 125 } 126 zw := zip.NewWriter(w) 127 defer func() { 128 terror.Log(zw.Close()) 129 }() 130 for pair := range pairs { 131 jsonTbl, err := sh.getStatsForBlock(pair) 132 if err != nil { 133 err = sh.writeErrFile(zw, fmt.Sprintf("%v.%v.stats.err.txt", pair.DBName, pair.BlockName), err) 134 terror.Log(err) 135 continue 136 } 137 statsFw, err := zw.Create(fmt.Sprintf("%v.%v.json", pair.DBName, pair.BlockName)) 138 if err != nil { 139 terror.Log(err) 140 continue 141 } 142 data, err := json.Marshal(jsonTbl) 143 if err != nil { 144 err = sh.writeErrFile(zw, fmt.Sprintf("%v.%v.stats.err.txt", pair.DBName, pair.BlockName), err) 145 terror.Log(err) 146 continue 147 } 148 _, err = statsFw.Write(data) 149 if err != nil { 150 err = sh.writeErrFile(zw, fmt.Sprintf("%v.%v.stats.err.txt", pair.DBName, pair.BlockName), err) 151 terror.Log(err) 152 continue 153 } 154 } 155 for pair := range pairs { 156 err = sh.getShowCreateBlock(pair, zw) 157 if err != nil { 158 err = sh.writeErrFile(zw, fmt.Sprintf("%v.%v.schemaReplicant.err.txt", pair.DBName, pair.BlockName), err) 159 terror.Log(err) 160 return 161 } 162 } 163 // If we don't catch profile. We just get a explain result. 164 if pprofTime == 0 { 165 recordSets, err := sh.s.(sqlexec.ALLEGROSQLInterlockingDirectorate).InterDircute(reqCtx, fmt.Sprintf("explain %s", allegrosql)) 166 if len(recordSets) > 0 { 167 defer terror.Call(recordSets[0].Close) 168 } 169 if err != nil { 170 err = sh.writeErrFile(zw, "explain.err.txt", err) 171 terror.Log(err) 172 return 173 } 174 sRows, err := stochastik.ResultSetToStringSlice(reqCtx, sh.s, recordSets[0]) 175 if err != nil { 176 err = sh.writeErrFile(zw, "explain.err.txt", err) 177 terror.Log(err) 178 return 179 } 180 fw, err := zw.Create("explain.txt") 181 if err != nil { 182 terror.Log(err) 183 return 184 } 185 for _, event := range sRows { 186 fmt.Fprintf(fw, "%s\n", strings.Join(event, "\t")) 187 } 188 } else { 189 // Otherwise we catch a profile and run `EXPLAIN ANALYZE` result. 190 ctx, cancelFunc := context.WithCancel(reqCtx) 191 timer := time.NewTimer(time.Second * time.Duration(timeout)) 192 resultChan := make(chan *explainAnalyzeResult) 193 go sh.getExplainAnalyze(ctx, allegrosql, resultChan) 194 errChan := make(chan error) 195 var buf bytes.Buffer 196 go sh.catchCPUProfile(reqCtx, pprofTime, &buf, errChan) 197 select { 198 case result := <-resultChan: 199 timer.Stop() 200 cancelFunc() 201 if result.err != nil { 202 err = sh.writeErrFile(zw, "explain_analyze.err.txt", result.err) 203 terror.Log(err) 204 return 205 } 206 if len(result.rows) == 0 { 207 break 208 } 209 fw, err := zw.Create("explain_analyze.txt") 210 if err != nil { 211 terror.Log(err) 212 break 213 } 214 for _, event := range result.rows { 215 fmt.Fprintf(fw, "%s\n", strings.Join(event, "\t")) 216 } 217 case <-timer.C: 218 cancelFunc() 219 } 220 err = dumpCPUProfile(errChan, &buf, zw) 221 if err != nil { 222 err = sh.writeErrFile(zw, "profile.err.txt", err) 223 terror.Log(err) 224 return 225 } 226 } 227 } 228 229 func dumpCPUProfile(errChan chan error, buf *bytes.Buffer, zw *zip.Writer) error { 230 err := <-errChan 231 if err != nil { 232 return err 233 } 234 fw, err := zw.Create("profile") 235 if err != nil { 236 return err 237 } 238 _, err = fw.Write(buf.Bytes()) 239 if err != nil { 240 return err 241 } 242 return nil 243 } 244 245 func (sh *sqlInfoFetcher) writeErrFile(zw *zip.Writer, name string, err error) error { 246 fw, err1 := zw.Create(name) 247 if err1 != nil { 248 return err1 249 } 250 fmt.Fprintf(fw, "error: %v", err) 251 return nil 252 } 253 254 type explainAnalyzeResult struct { 255 rows [][]string 256 err error 257 } 258 259 func (sh *sqlInfoFetcher) getExplainAnalyze(ctx context.Context, allegrosql string, resultChan chan<- *explainAnalyzeResult) { 260 recordSets, err := sh.s.(sqlexec.ALLEGROSQLInterlockingDirectorate).InterDircute(ctx, fmt.Sprintf("explain analyze %s", allegrosql)) 261 if err != nil { 262 resultChan <- &explainAnalyzeResult{err: err} 263 return 264 } 265 rows, err := stochastik.ResultSetToStringSlice(ctx, sh.s, recordSets[0]) 266 if err != nil { 267 terror.Log(err) 268 return 269 } 270 if len(recordSets) > 0 { 271 terror.Call(recordSets[0].Close) 272 } 273 resultChan <- &explainAnalyzeResult{rows: rows} 274 } 275 276 func (sh *sqlInfoFetcher) catchCPUProfile(ctx context.Context, sec int, buf *bytes.Buffer, errChan chan<- error) { 277 if err := pprof.StartCPUProfile(buf); err != nil { 278 errChan <- err 279 return 280 } 281 sleepWithCtx(ctx, time.Duration(sec)*time.Second) 282 pprof.StopCPUProfile() 283 errChan <- nil 284 } 285 286 func (sh *sqlInfoFetcher) getStatsForBlock(pair blockNamePair) (*handle.JSONBlock, error) { 287 is := sh.do.SchemaReplicant() 288 h := sh.do.StatsHandle() 289 tbl, err := is.BlockByName(perceptron.NewCIStr(pair.DBName), perceptron.NewCIStr(pair.BlockName)) 290 if err != nil { 291 return nil, err 292 } 293 js, err := h.DumpStatsToJSON(pair.DBName, tbl.Meta(), nil) 294 return js, err 295 } 296 297 func (sh *sqlInfoFetcher) getShowCreateBlock(pair blockNamePair, zw *zip.Writer) error { 298 recordSets, err := sh.s.(sqlexec.ALLEGROSQLInterlockingDirectorate).InterDircute(context.TODO(), fmt.Sprintf("show create causet `%v`.`%v`", pair.DBName, pair.BlockName)) 299 if len(recordSets) > 0 { 300 defer terror.Call(recordSets[0].Close) 301 } 302 if err != nil { 303 return err 304 } 305 sRows, err := stochastik.ResultSetToStringSlice(context.Background(), sh.s, recordSets[0]) 306 if err != nil { 307 terror.Log(err) 308 return nil 309 } 310 fw, err := zw.Create(fmt.Sprintf("%v.%v.schemaReplicant.txt", pair.DBName, pair.BlockName)) 311 if err != nil { 312 terror.Log(err) 313 return nil 314 } 315 for _, event := range sRows { 316 fmt.Fprintf(fw, "%s\n", strings.Join(event, "\t")) 317 } 318 return nil 319 } 320 321 func (sh *sqlInfoFetcher) extractBlockNames(allegrosql, curDB string) (map[blockNamePair]struct{}, error) { 322 p := BerolinaSQL.New() 323 charset, defCauslation := sh.s.GetStochastikVars().GetCharsetInfo() 324 stmts, _, err := p.Parse(allegrosql, charset, defCauslation) 325 if err != nil { 326 return nil, err 327 } 328 if len(stmts) > 1 { 329 return nil, errors.Errorf("Only 1 memex is allowed") 330 } 331 extractor := &blockNameExtractor{ 332 curDB: curDB, 333 names: make(map[blockNamePair]struct{}), 334 } 335 stmts[0].Accept(extractor) 336 return extractor.names, nil 337 }