github.com/theQRL/go-zond@v0.1.1/core/rawdb/freezer_batch.go (about) 1 // Copyright 2021 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package rawdb 18 19 import ( 20 "fmt" 21 22 "github.com/golang/snappy" 23 "github.com/theQRL/go-zond/common/math" 24 "github.com/theQRL/go-zond/rlp" 25 ) 26 27 // This is the maximum amount of data that will be buffered in memory 28 // for a single freezer table batch. 29 const freezerBatchBufferLimit = 2 * 1024 * 1024 30 31 // freezerBatch is a write operation of multiple items on a freezer. 32 type freezerBatch struct { 33 tables map[string]*freezerTableBatch 34 } 35 36 func newFreezerBatch(f *Freezer) *freezerBatch { 37 batch := &freezerBatch{tables: make(map[string]*freezerTableBatch, len(f.tables))} 38 for kind, table := range f.tables { 39 batch.tables[kind] = table.newBatch() 40 } 41 return batch 42 } 43 44 // Append adds an RLP-encoded item of the given kind. 45 func (batch *freezerBatch) Append(kind string, num uint64, item interface{}) error { 46 return batch.tables[kind].Append(num, item) 47 } 48 49 // AppendRaw adds an item of the given kind. 50 func (batch *freezerBatch) AppendRaw(kind string, num uint64, item []byte) error { 51 return batch.tables[kind].AppendRaw(num, item) 52 } 53 54 // reset initializes the batch. 55 func (batch *freezerBatch) reset() { 56 for _, tb := range batch.tables { 57 tb.reset() 58 } 59 } 60 61 // commit is called at the end of a write operation and 62 // writes all remaining data to tables. 63 func (batch *freezerBatch) commit() (item uint64, writeSize int64, err error) { 64 // Check that count agrees on all batches. 65 item = uint64(math.MaxUint64) 66 for name, tb := range batch.tables { 67 if item < math.MaxUint64 && tb.curItem != item { 68 return 0, 0, fmt.Errorf("table %s is at item %d, want %d", name, tb.curItem, item) 69 } 70 item = tb.curItem 71 } 72 73 // Commit all table batches. 74 for _, tb := range batch.tables { 75 if err := tb.commit(); err != nil { 76 return 0, 0, err 77 } 78 writeSize += tb.totalBytes 79 } 80 return item, writeSize, nil 81 } 82 83 // freezerTableBatch is a batch for a freezer table. 84 type freezerTableBatch struct { 85 t *freezerTable 86 87 sb *snappyBuffer 88 encBuffer writeBuffer 89 dataBuffer []byte 90 indexBuffer []byte 91 curItem uint64 // expected index of next append 92 totalBytes int64 // counts written bytes since reset 93 } 94 95 // newBatch creates a new batch for the freezer table. 96 func (t *freezerTable) newBatch() *freezerTableBatch { 97 batch := &freezerTableBatch{t: t} 98 if !t.noCompression { 99 batch.sb = new(snappyBuffer) 100 } 101 batch.reset() 102 return batch 103 } 104 105 // reset clears the batch for reuse. 106 func (batch *freezerTableBatch) reset() { 107 batch.dataBuffer = batch.dataBuffer[:0] 108 batch.indexBuffer = batch.indexBuffer[:0] 109 batch.curItem = batch.t.items.Load() 110 batch.totalBytes = 0 111 } 112 113 // Append rlp-encodes and adds data at the end of the freezer table. The item number is a 114 // precautionary parameter to ensure data correctness, but the table will reject already 115 // existing data. 116 func (batch *freezerTableBatch) Append(item uint64, data interface{}) error { 117 if item != batch.curItem { 118 return fmt.Errorf("%w: have %d want %d", errOutOrderInsertion, item, batch.curItem) 119 } 120 121 // Encode the item. 122 batch.encBuffer.Reset() 123 if err := rlp.Encode(&batch.encBuffer, data); err != nil { 124 return err 125 } 126 encItem := batch.encBuffer.data 127 if batch.sb != nil { 128 encItem = batch.sb.compress(encItem) 129 } 130 return batch.appendItem(encItem) 131 } 132 133 // AppendRaw injects a binary blob at the end of the freezer table. The item number is a 134 // precautionary parameter to ensure data correctness, but the table will reject already 135 // existing data. 136 func (batch *freezerTableBatch) AppendRaw(item uint64, blob []byte) error { 137 if item != batch.curItem { 138 return fmt.Errorf("%w: have %d want %d", errOutOrderInsertion, item, batch.curItem) 139 } 140 141 encItem := blob 142 if batch.sb != nil { 143 encItem = batch.sb.compress(blob) 144 } 145 return batch.appendItem(encItem) 146 } 147 148 func (batch *freezerTableBatch) appendItem(data []byte) error { 149 // Check if item fits into current data file. 150 itemSize := int64(len(data)) 151 itemOffset := batch.t.headBytes + int64(len(batch.dataBuffer)) 152 if itemOffset+itemSize > int64(batch.t.maxFileSize) { 153 // It doesn't fit, go to next file first. 154 if err := batch.commit(); err != nil { 155 return err 156 } 157 if err := batch.t.advanceHead(); err != nil { 158 return err 159 } 160 itemOffset = 0 161 } 162 163 // Put data to buffer. 164 batch.dataBuffer = append(batch.dataBuffer, data...) 165 batch.totalBytes += itemSize 166 167 // Put index entry to buffer. 168 entry := indexEntry{filenum: batch.t.headId, offset: uint32(itemOffset + itemSize)} 169 batch.indexBuffer = entry.append(batch.indexBuffer) 170 batch.curItem++ 171 172 return batch.maybeCommit() 173 } 174 175 // maybeCommit writes the buffered data if the buffer is full enough. 176 func (batch *freezerTableBatch) maybeCommit() error { 177 if len(batch.dataBuffer) > freezerBatchBufferLimit { 178 return batch.commit() 179 } 180 return nil 181 } 182 183 // commit writes the batched items to the backing freezerTable. 184 func (batch *freezerTableBatch) commit() error { 185 // Write data. 186 _, err := batch.t.head.Write(batch.dataBuffer) 187 if err != nil { 188 return err 189 } 190 dataSize := int64(len(batch.dataBuffer)) 191 batch.dataBuffer = batch.dataBuffer[:0] 192 193 // Write indices. 194 _, err = batch.t.index.Write(batch.indexBuffer) 195 if err != nil { 196 return err 197 } 198 indexSize := int64(len(batch.indexBuffer)) 199 batch.indexBuffer = batch.indexBuffer[:0] 200 201 // Update headBytes of table. 202 batch.t.headBytes += dataSize 203 batch.t.items.Store(batch.curItem) 204 205 // Update metrics. 206 batch.t.sizeGauge.Inc(dataSize + indexSize) 207 batch.t.writeMeter.Mark(dataSize + indexSize) 208 return nil 209 } 210 211 // snappyBuffer writes snappy in block format, and can be reused. It is 212 // reset when WriteTo is called. 213 type snappyBuffer struct { 214 dst []byte 215 } 216 217 // compress snappy-compresses the data. 218 func (s *snappyBuffer) compress(data []byte) []byte { 219 // The snappy library does not care what the capacity of the buffer is, 220 // but only checks the length. If the length is too small, it will 221 // allocate a brand new buffer. 222 // To avoid that, we check the required size here, and grow the size of the 223 // buffer to utilize the full capacity. 224 if n := snappy.MaxEncodedLen(len(data)); len(s.dst) < n { 225 if cap(s.dst) < n { 226 s.dst = make([]byte, n) 227 } 228 s.dst = s.dst[:n] 229 } 230 231 s.dst = snappy.Encode(s.dst, data) 232 return s.dst 233 } 234 235 // writeBuffer implements io.Writer for a byte slice. 236 type writeBuffer struct { 237 data []byte 238 } 239 240 func (wb *writeBuffer) Write(data []byte) (int, error) { 241 wb.data = append(wb.data, data...) 242 return len(data), nil 243 } 244 245 func (wb *writeBuffer) Reset() { 246 wb.data = wb.data[:0] 247 }