github.com/pingcap/badger@v1.5.1-0.20230103063557-828f39b09b6d/writer_ingest.go (about) 1 package badger 2 3 import ( 4 "sync" 5 "sync/atomic" 6 7 "github.com/pingcap/badger/epoch" 8 "github.com/pingcap/badger/protos" 9 "github.com/pingcap/badger/table" 10 "github.com/pingcap/badger/table/sstable" 11 "github.com/pingcap/badger/y" 12 ) 13 14 type ingestTask struct { 15 sync.WaitGroup 16 tbls []table.Table 17 cnt int 18 err error 19 } 20 21 func (w *writeWorker) ingestTables(task *ingestTask) { 22 ts, wg, err := w.prepareIngestTask(task) 23 if err != nil { 24 task.err = err 25 task.Done() 26 return 27 } 28 29 // Because there is no concurrent write into ingesting key ranges, 30 // we can resume other writes and finish the ingest job in background. 31 go func() { 32 defer task.Done() 33 defer w.orc.doneCommit(ts) 34 35 ends := make([]y.Key, 0, len(task.tbls)) 36 37 for _, t := range task.tbls { 38 sst := t.(*sstable.Table) 39 if task.err = sst.SetGlobalTs(ts); task.err != nil { 40 return 41 } 42 ends = append(ends, t.Biggest()) 43 } 44 45 if wg != nil { 46 wg.Wait() 47 } 48 49 for i, tbl := range task.tbls { 50 if task.err = w.ingestTable(tbl.(*sstable.Table), ends[i+1:]); task.err != nil { 51 return 52 } 53 task.cnt++ 54 } 55 }() 56 } 57 58 func (w *writeWorker) prepareIngestTask(task *ingestTask) (ts uint64, wg *sync.WaitGroup, err error) { 59 w.orc.writeLock.Lock() 60 if !w.IsManaged() { 61 ts = w.orc.allocTs() 62 } 63 reqs := w.pollWriteCh(make([]*request, len(w.writeCh))) 64 w.orc.writeLock.Unlock() 65 66 if err = w.writeVLog(reqs); err != nil { 67 return 0, nil, err 68 } 69 70 guard := w.resourceMgr.Acquire() 71 defer guard.Done() 72 mTbls := w.mtbls.Load().(*memTables) 73 y.Assert(mTbls.tables[0] != nil) 74 it := mTbls.getMutable().NewIterator(false) 75 defer it.Close() 76 for _, t := range task.tbls { 77 it.Seek(t.Smallest().UserKey) 78 if it.Valid() && it.Key().Compare(t.Biggest()) <= 0 { 79 wg = w.flushMemTable() 80 break 81 } 82 } 83 return 84 } 85 86 func (w *writeWorker) ingestTable(tbl *sstable.Table, splitHints []y.Key) error { 87 cs := &w.lc.cstatus 88 kr := keyRange{ 89 left: tbl.Smallest(), 90 right: tbl.Biggest(), 91 } 92 ref := w.resourceMgr.Acquire() 93 defer ref.Done() 94 95 var ( 96 targetLevel int 97 overlappingTables []table.Table 98 ) 99 100 cs.Lock() 101 for targetLevel = 0; targetLevel < w.opt.TableBuilderOptions.MaxLevels; targetLevel++ { 102 tbls, overlap, ok := w.checkRangeInLevel(kr, targetLevel) 103 if !ok { 104 // cannot place table in current level, back to previous level. 105 if targetLevel != 0 { 106 targetLevel-- 107 } 108 break 109 } 110 111 overlappingTables = tbls 112 if overlap { 113 break 114 } 115 } 116 117 if len(overlappingTables) != 0 { 118 overlapLeft := overlappingTables[0].Smallest() 119 if overlapLeft.Compare(kr.left) < 0 { 120 kr.left = overlapLeft 121 } 122 overRight := overlappingTables[len(overlappingTables)-1].Biggest() 123 if overRight.Compare(kr.right) > 0 { 124 kr.right = overRight 125 } 126 } 127 l := cs.levels[targetLevel] 128 l.ranges = append(l.ranges, kr) 129 cs.Unlock() 130 defer l.remove(kr) 131 132 if targetLevel != 0 && len(overlappingTables) != 0 { 133 return w.runIngestCompact(targetLevel, tbl, overlappingTables, splitHints, ref) 134 } 135 136 change := newCreateChange(tbl.ID(), targetLevel) 137 if err := w.manifest.addChanges([]*protos.ManifestChange{change}, nil); err != nil { 138 return err 139 } 140 w.lc.levels[targetLevel].addTable(tbl) 141 return nil 142 } 143 144 func (w *writeWorker) runIngestCompact(level int, tbl *sstable.Table, overlappingTables []table.Table, splitHints []y.Key, guard *epoch.Guard) error { 145 cd := &CompactDef{ 146 Level: level - 1, 147 Top: []table.Table{tbl}, 148 nextRange: getKeyRange(overlappingTables), 149 splitHints: splitHints, 150 } 151 cd.fillBottomTables(overlappingTables) 152 newTables, err := w.lc.compactBuildTables(cd) 153 if err != nil { 154 return err 155 } 156 157 var changes []*protos.ManifestChange 158 for _, t := range newTables { 159 changes = append(changes, newCreateChange(t.ID(), level)) 160 } 161 for _, t := range cd.Bot { 162 changes = append(changes, newDeleteChange(t.ID())) 163 } 164 165 if err := w.manifest.addChanges(changes, nil); err != nil { 166 return err 167 } 168 w.lc.levels[cd.Level+1].replaceTables(newTables, cd, guard) 169 return nil 170 } 171 172 func (w *writeWorker) overlapWithFlushingMemTables(kr keyRange) bool { 173 tbls := w.mtbls.Load().(*memTables) 174 imms := tbls.tables[:atomic.LoadUint32(&tbls.length)] 175 for _, mt := range imms { 176 it := mt.NewIterator(false) 177 defer it.Close() 178 it.Seek(kr.left.UserKey) 179 if !it.Valid() || it.Key().Compare(kr.right) <= 0 { 180 return true 181 } 182 } 183 return false 184 } 185 186 func (w *writeWorker) checkRangeInLevel(kr keyRange, level int) (overlappingTables []table.Table, overlap bool, ok bool) { 187 cs := &w.lc.cstatus 188 handler := w.lc.levels[level] 189 handler.RLock() 190 defer handler.RUnlock() 191 192 if len(handler.tables) == 0 && level != 0 { 193 return nil, false, false 194 } 195 196 l := cs.levels[level] 197 if l.overlapsWith(kr) { 198 return nil, false, false 199 } 200 201 var left, right int 202 if level == 0 { 203 left, right = 0, len(handler.tables) 204 } else { 205 left, right = handler.overlappingTables(levelHandlerRLocked{}, kr) 206 } 207 208 for i := left; i < right; i++ { 209 it := handler.tables[i].NewIterator(false) 210 defer it.Close() 211 it.Seek(kr.left.UserKey) 212 if it.Valid() && it.Key().Compare(kr.right) <= 0 { 213 overlap = true 214 break 215 } 216 } 217 return handler.tables[left:right], overlap, true 218 }