github.com/whtcorpsinc/MilevaDB-Prod@v0.0.0-20211104133533-f57f4be3b597/causetstore/petri/acyclic/causet/embedded/encode.go (about) 1 // Copyright 2020 WHTCORPS INC, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package embedded 15 16 import ( 17 "bytes" 18 "crypto/sha256" 19 "fmt" 20 "hash" 21 "sync" 22 23 "github.com/whtcorpsinc/failpoint" 24 "github.com/whtcorpsinc/milevadb/ekv" 25 "github.com/whtcorpsinc/milevadb/soliton/plancodec" 26 ) 27 28 var causetCausetEncoderPool = sync.Pool{ 29 New: func() interface{} { 30 return &planCausetEncoder{} 31 }, 32 } 33 34 type planCausetEncoder struct { 35 buf bytes.Buffer 36 encodedCausets map[int]bool 37 } 38 39 // EncodeCauset is used to encodeCauset the plan to the plan tree with compressing. 40 func EncodeCauset(p Causet) string { 41 pn := causetCausetEncoderPool.Get().(*planCausetEncoder) 42 defer causetCausetEncoderPool.Put(pn) 43 if p == nil || p.SCtx() == nil { 44 return "" 45 } 46 selectCauset := getSelectCauset(p) 47 if selectCauset != nil { 48 failpoint.Inject("mockCausetRowCount", func(val failpoint.Value) { 49 selectCauset.statsInfo().RowCount = float64(val.(int)) 50 }) 51 } 52 return pn.encodeCausetTree(p) 53 } 54 55 func (pn *planCausetEncoder) encodeCausetTree(p Causet) string { 56 pn.encodedCausets = make(map[int]bool) 57 pn.buf.Reset() 58 pn.encodeCauset(p, true, ekv.EinsteinDB, 0) 59 return plancodec.Compress(pn.buf.Bytes()) 60 } 61 62 func (pn *planCausetEncoder) encodeCauset(p Causet, isRoot bool, causetstore ekv.StoreType, depth int) { 63 taskTypeInfo := plancodec.EncodeTaskType(isRoot, causetstore) 64 actRows, analyzeInfo, memoryInfo, diskInfo := getRuntimeInfo(p.SCtx(), p) 65 rowCount := 0.0 66 if statsInfo := p.statsInfo(); statsInfo != nil { 67 rowCount = p.statsInfo().RowCount 68 } 69 plancodec.EncodeCausetNode(depth, p.ID(), p.TP(), rowCount, taskTypeInfo, p.ExplainInfo(), actRows, analyzeInfo, memoryInfo, diskInfo, &pn.buf) 70 pn.encodedCausets[p.ID()] = true 71 depth++ 72 73 selectCauset := getSelectCauset(p) 74 if selectCauset == nil { 75 return 76 } 77 if !pn.encodedCausets[selectCauset.ID()] { 78 pn.encodeCauset(selectCauset, isRoot, causetstore, depth) 79 return 80 } 81 for _, child := range selectCauset.Children() { 82 if pn.encodedCausets[child.ID()] { 83 continue 84 } 85 pn.encodeCauset(child.(PhysicalCauset), isRoot, causetstore, depth) 86 } 87 switch copCauset := selectCauset.(type) { 88 case *PhysicalBlockReader: 89 pn.encodeCauset(copCauset.blockCauset, false, copCauset.StoreType, depth) 90 case *PhysicalIndexReader: 91 pn.encodeCauset(copCauset.indexCauset, false, causetstore, depth) 92 case *PhysicalIndexLookUpReader: 93 pn.encodeCauset(copCauset.indexCauset, false, causetstore, depth) 94 pn.encodeCauset(copCauset.blockCauset, false, causetstore, depth) 95 case *PhysicalIndexMergeReader: 96 for _, p := range copCauset.partialCausets { 97 pn.encodeCauset(p, false, causetstore, depth) 98 } 99 if copCauset.blockCauset != nil { 100 pn.encodeCauset(copCauset.blockCauset, false, causetstore, depth) 101 } 102 } 103 } 104 105 var digesterPool = sync.Pool{ 106 New: func() interface{} { 107 return &planDigester{ 108 hasher: sha256.New(), 109 } 110 }, 111 } 112 113 type planDigester struct { 114 buf bytes.Buffer 115 encodedCausets map[int]bool 116 hasher hash.Hash 117 } 118 119 // NormalizeCauset is used to normalize the plan and generate plan digest. 120 func NormalizeCauset(p Causet) (normalized, digest string) { 121 selectCauset := getSelectCauset(p) 122 if selectCauset == nil { 123 return "", "" 124 } 125 d := digesterPool.Get().(*planDigester) 126 defer digesterPool.Put(d) 127 d.normalizeCausetTree(selectCauset) 128 normalized = d.buf.String() 129 d.hasher.Write(d.buf.Bytes()) 130 d.buf.Reset() 131 digest = fmt.Sprintf("%x", d.hasher.Sum(nil)) 132 d.hasher.Reset() 133 return 134 } 135 136 func (d *planDigester) normalizeCausetTree(p PhysicalCauset) { 137 d.encodedCausets = make(map[int]bool) 138 d.buf.Reset() 139 d.normalizeCauset(p, true, ekv.EinsteinDB, 0) 140 } 141 142 func (d *planDigester) normalizeCauset(p PhysicalCauset, isRoot bool, causetstore ekv.StoreType, depth int) { 143 taskTypeInfo := plancodec.EncodeTaskTypeForNormalize(isRoot, causetstore) 144 plancodec.NormalizeCausetNode(depth, p.TP(), taskTypeInfo, p.ExplainNormalizedInfo(), &d.buf) 145 d.encodedCausets[p.ID()] = true 146 147 depth++ 148 for _, child := range p.Children() { 149 if d.encodedCausets[child.ID()] { 150 continue 151 } 152 d.normalizeCauset(child.(PhysicalCauset), isRoot, causetstore, depth) 153 } 154 switch x := p.(type) { 155 case *PhysicalBlockReader: 156 d.normalizeCauset(x.blockCauset, false, x.StoreType, depth) 157 case *PhysicalIndexReader: 158 d.normalizeCauset(x.indexCauset, false, causetstore, depth) 159 case *PhysicalIndexLookUpReader: 160 d.normalizeCauset(x.indexCauset, false, causetstore, depth) 161 d.normalizeCauset(x.blockCauset, false, causetstore, depth) 162 case *PhysicalIndexMergeReader: 163 for _, p := range x.partialCausets { 164 d.normalizeCauset(p, false, causetstore, depth) 165 } 166 if x.blockCauset != nil { 167 d.normalizeCauset(x.blockCauset, false, causetstore, depth) 168 } 169 } 170 } 171 172 func getSelectCauset(p Causet) PhysicalCauset { 173 var selectCauset PhysicalCauset 174 if physicalCauset, ok := p.(PhysicalCauset); ok { 175 selectCauset = physicalCauset 176 } else { 177 switch x := p.(type) { 178 case *Delete: 179 selectCauset = x.SelectCauset 180 case *UFIDelate: 181 selectCauset = x.SelectCauset 182 case *Insert: 183 selectCauset = x.SelectCauset 184 } 185 } 186 return selectCauset 187 }