github.com/whtcorpsinc/MilevaDB-Prod@v0.0.0-20211104133533-f57f4be3b597/interlock/hash_table_test.go (about) 1 // Copyright 2020 WHTCORPS INC, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package interlock 15 16 import ( 17 "fmt" 18 "hash" 19 "hash/fnv" 20 21 . "github.com/whtcorpsinc/check" 22 "github.com/whtcorpsinc/BerolinaSQL/allegrosql" 23 "github.com/whtcorpsinc/milevadb/types" 24 "github.com/whtcorpsinc/milevadb/types/json" 25 "github.com/whtcorpsinc/milevadb/soliton/chunk" 26 "github.com/whtcorpsinc/milevadb/soliton/memory" 27 "github.com/whtcorpsinc/milevadb/soliton/mock" 28 ) 29 30 func (s *pkgTestSuite) testHashBlocks(c *C) { 31 var ht baseHashBlock 32 test := func() { 33 ht.Put(1, chunk.EventPtr{ChkIdx: 1, EventIdx: 1}) 34 c.Check(ht.Get(1), DeepEquals, []chunk.EventPtr{{ChkIdx: 1, EventIdx: 1}}) 35 36 rawData := map[uint64][]chunk.EventPtr{} 37 for i := uint64(0); i < 10; i++ { 38 for j := uint64(0); j < initialEntrySliceLen*i; j++ { 39 rawData[i] = append(rawData[i], chunk.EventPtr{ChkIdx: uint32(i), EventIdx: uint32(j)}) 40 } 41 } 42 // put all rawData into ht vertically 43 for j := uint64(0); j < initialEntrySliceLen*9; j++ { 44 for i := 9; i >= 0; i-- { 45 i := uint64(i) 46 if !(j < initialEntrySliceLen*i) { 47 break 48 } 49 ht.Put(i, rawData[i][j]) 50 } 51 } 52 // check 53 totalCount := 0 54 for i := uint64(0); i < 10; i++ { 55 totalCount += len(rawData[i]) 56 c.Check(ht.Get(i), DeepEquals, rawData[i]) 57 } 58 c.Check(ht.Len(), Equals, uint64(totalCount)) 59 } 60 // test unsafeHashBlock 61 ht = newUnsafeHashBlock(0) 62 test() 63 // test ConcurrentMapHashBlock 64 ht = newConcurrentMapHashBlock() 65 test() 66 } 67 68 func initBuildChunk(numEvents int) (*chunk.Chunk, []*types.FieldType) { 69 numDefCauss := 6 70 defCausTypes := make([]*types.FieldType, 0, numDefCauss) 71 defCausTypes = append(defCausTypes, &types.FieldType{Tp: allegrosql.TypeLonglong}) 72 defCausTypes = append(defCausTypes, &types.FieldType{Tp: allegrosql.TypeLonglong}) 73 defCausTypes = append(defCausTypes, &types.FieldType{Tp: allegrosql.TypeVarchar}) 74 defCausTypes = append(defCausTypes, &types.FieldType{Tp: allegrosql.TypeVarchar}) 75 defCausTypes = append(defCausTypes, &types.FieldType{Tp: allegrosql.TypeNewDecimal}) 76 defCausTypes = append(defCausTypes, &types.FieldType{Tp: allegrosql.TypeJSON}) 77 78 oldChk := chunk.NewChunkWithCapacity(defCausTypes, numEvents) 79 for i := 0; i < numEvents; i++ { 80 str := fmt.Sprintf("%d.12345", i) 81 oldChk.AppendNull(0) 82 oldChk.AppendInt64(1, int64(i)) 83 oldChk.AppendString(2, str) 84 oldChk.AppendString(3, str) 85 oldChk.AppendMyDecimal(4, types.NewDecFromStringForTest(str)) 86 oldChk.AppendJSON(5, json.CreateBinary(str)) 87 } 88 return oldChk, defCausTypes 89 } 90 91 func initProbeChunk(numEvents int) (*chunk.Chunk, []*types.FieldType) { 92 numDefCauss := 3 93 defCausTypes := make([]*types.FieldType, 0, numDefCauss) 94 defCausTypes = append(defCausTypes, &types.FieldType{Tp: allegrosql.TypeLonglong}) 95 defCausTypes = append(defCausTypes, &types.FieldType{Tp: allegrosql.TypeLonglong}) 96 defCausTypes = append(defCausTypes, &types.FieldType{Tp: allegrosql.TypeVarchar}) 97 98 oldChk := chunk.NewChunkWithCapacity(defCausTypes, numEvents) 99 for i := 0; i < numEvents; i++ { 100 str := fmt.Sprintf("%d.12345", i) 101 oldChk.AppendNull(0) 102 oldChk.AppendInt64(1, int64(i)) 103 oldChk.AppendString(2, str) 104 } 105 return oldChk, defCausTypes 106 } 107 108 type hashDefCauslision struct { 109 count int 110 } 111 112 func (h *hashDefCauslision) Sum64() uint64 { 113 h.count++ 114 return 0 115 } 116 func (h hashDefCauslision) Write(p []byte) (n int, err error) { return len(p), nil } 117 func (h hashDefCauslision) Reset() {} 118 func (h hashDefCauslision) Sum(b []byte) []byte { panic("not implemented") } 119 func (h hashDefCauslision) Size() int { panic("not implemented") } 120 func (h hashDefCauslision) BlockSize() int { panic("not implemented") } 121 122 func (s *pkgTestSerialSuite) TestHashEventContainer(c *C) { 123 hashFunc := func() hash.Hash64 { 124 return fnv.New64() 125 } 126 rowContainer := s.testHashEventContainer(c, hashFunc, false) 127 c.Assert(rowContainer.stat.probeDefCauslision, Equals, 0) 128 // On windows time.Now() is imprecise, the elapse time may equal 0 129 c.Assert(rowContainer.stat.buildBlockElapse >= 0, IsTrue) 130 131 rowContainer = s.testHashEventContainer(c, hashFunc, true) 132 c.Assert(rowContainer.stat.probeDefCauslision, Equals, 0) 133 c.Assert(rowContainer.stat.buildBlockElapse >= 0, IsTrue) 134 135 h := &hashDefCauslision{count: 0} 136 hashFuncDefCauslision := func() hash.Hash64 { 137 return h 138 } 139 rowContainer = s.testHashEventContainer(c, hashFuncDefCauslision, false) 140 c.Assert(h.count > 0, IsTrue) 141 c.Assert(rowContainer.stat.probeDefCauslision > 0, IsTrue) 142 c.Assert(rowContainer.stat.buildBlockElapse >= 0, IsTrue) 143 } 144 145 func (s *pkgTestSerialSuite) testHashEventContainer(c *C, hashFunc func() hash.Hash64, spill bool) *hashEventContainer { 146 sctx := mock.NewContext() 147 var err error 148 numEvents := 10 149 150 chk0, defCausTypes := initBuildChunk(numEvents) 151 chk1, _ := initBuildChunk(numEvents) 152 153 hCtx := &hashContext{ 154 allTypes: defCausTypes, 155 keyDefCausIdx: []int{1, 2}, 156 } 157 hCtx.hasNull = make([]bool, numEvents) 158 for i := 0; i < numEvents; i++ { 159 hCtx.hashVals = append(hCtx.hashVals, hashFunc()) 160 } 161 rowContainer := newHashEventContainer(sctx, 0, hCtx) 162 tracker := rowContainer.GetMemTracker() 163 tracker.SetLabel(memory.LabelForBuildSideResult) 164 if spill { 165 tracker.SetBytesLimit(1) 166 rowContainer.rowContainer.CausetActionSpillForTest().CausetAction(tracker) 167 } 168 err = rowContainer.PutChunk(chk0, nil) 169 c.Assert(err, IsNil) 170 err = rowContainer.PutChunk(chk1, nil) 171 c.Assert(err, IsNil) 172 rowContainer.CausetActionSpill().(*chunk.SpillDiskCausetAction).WaitForTest() 173 c.Assert(rowContainer.alreadySpilledSafeForTest(), Equals, spill) 174 c.Assert(rowContainer.GetMemTracker().BytesConsumed() == 0, Equals, spill) 175 c.Assert(rowContainer.GetMemTracker().BytesConsumed() > 0, Equals, !spill) 176 if rowContainer.alreadySpilledSafeForTest() { 177 c.Assert(rowContainer.GetDiskTracker(), NotNil) 178 c.Assert(rowContainer.GetDiskTracker().BytesConsumed() > 0, Equals, true) 179 } 180 181 probeChk, probeDefCausType := initProbeChunk(2) 182 probeEvent := probeChk.GetEvent(1) 183 probeCtx := &hashContext{ 184 allTypes: probeDefCausType, 185 keyDefCausIdx: []int{1, 2}, 186 } 187 probeCtx.hasNull = make([]bool, 1) 188 probeCtx.hashVals = append(hCtx.hashVals, hashFunc()) 189 matched, _, err := rowContainer.GetMatchedEventsAndPtrs(hCtx.hashVals[1].Sum64(), probeEvent, probeCtx) 190 c.Assert(err, IsNil) 191 c.Assert(len(matched), Equals, 2) 192 c.Assert(matched[0].GetCausetEvent(defCausTypes), DeepEquals, chk0.GetEvent(1).GetCausetEvent(defCausTypes)) 193 c.Assert(matched[1].GetCausetEvent(defCausTypes), DeepEquals, chk1.GetEvent(1).GetCausetEvent(defCausTypes)) 194 return rowContainer 195 }