github.com/neatio-net/neatio@v1.7.3-0.20231114194659-f4d7a2226baa/chain/trie/sync.go (about) 1 package trie 2 3 import ( 4 "errors" 5 "fmt" 6 7 "github.com/neatio-net/neatio/neatdb" 8 "github.com/neatio-net/neatio/utilities/common" 9 "github.com/neatio-net/neatio/utilities/common/prque" 10 ) 11 12 var ErrNotRequested = errors.New("not requested") 13 14 var ErrAlreadyProcessed = errors.New("already processed") 15 16 type request struct { 17 hash common.Hash 18 data []byte 19 raw bool 20 21 parents []*request 22 depth int 23 deps int 24 25 callback LeafCallback 26 } 27 28 type SyncResult struct { 29 Hash common.Hash 30 Data []byte 31 } 32 33 type syncMemBatch struct { 34 batch map[common.Hash][]byte 35 order []common.Hash 36 } 37 38 func newSyncMemBatch() *syncMemBatch { 39 return &syncMemBatch{ 40 batch: make(map[common.Hash][]byte), 41 order: make([]common.Hash, 0, 256), 42 } 43 } 44 45 type Sync struct { 46 database neatdb.Reader 47 membatch *syncMemBatch 48 requests map[common.Hash]*request 49 queue *prque.Prque 50 } 51 52 func NewSync(root common.Hash, database neatdb.Reader, callback LeafCallback) *Sync { 53 ts := &Sync{ 54 database: database, 55 membatch: newSyncMemBatch(), 56 requests: make(map[common.Hash]*request), 57 queue: prque.New(nil), 58 } 59 ts.AddSubTrie(root, 0, common.Hash{}, callback) 60 return ts 61 } 62 63 func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callback LeafCallback) { 64 65 if root == emptyRoot { 66 return 67 } 68 if _, ok := s.membatch.batch[root]; ok { 69 return 70 } 71 key := root.Bytes() 72 blob, _ := s.database.Get(key) 73 if local, err := decodeNode(key, blob); local != nil && err == nil { 74 return 75 } 76 77 req := &request{ 78 hash: root, 79 depth: depth, 80 callback: callback, 81 } 82 83 if parent != (common.Hash{}) { 84 ancestor := s.requests[parent] 85 if ancestor == nil { 86 panic(fmt.Sprintf("sub-trie ancestor not found: %x", parent)) 87 } 88 ancestor.deps++ 89 req.parents = append(req.parents, ancestor) 90 } 91 s.schedule(req) 92 } 93 94 func (s *Sync) AddRawEntry(hash common.Hash, depth int, parent common.Hash) { 95 96 if hash == emptyState { 97 return 98 } 99 if _, ok := s.membatch.batch[hash]; ok { 100 return 101 } 102 if ok, _ := s.database.Has(hash.Bytes()); ok { 103 return 104 } 105 106 req := &request{ 107 hash: hash, 108 raw: true, 109 depth: depth, 110 } 111 112 if parent != (common.Hash{}) { 113 ancestor := s.requests[parent] 114 if ancestor == nil { 115 panic(fmt.Sprintf("raw-entry ancestor not found: %x", parent)) 116 } 117 ancestor.deps++ 118 req.parents = append(req.parents, ancestor) 119 } 120 s.schedule(req) 121 } 122 123 func (s *Sync) Missing(max int) []common.Hash { 124 var requests []common.Hash 125 for !s.queue.Empty() && (max == 0 || len(requests) < max) { 126 requests = append(requests, s.queue.PopItem().(common.Hash)) 127 } 128 return requests 129 } 130 131 func (s *Sync) Process(results []SyncResult) (bool, int, error) { 132 committed := false 133 134 for i, item := range results { 135 136 request := s.requests[item.Hash] 137 if request == nil { 138 return committed, i, ErrNotRequested 139 } 140 if request.data != nil { 141 return committed, i, ErrAlreadyProcessed 142 } 143 144 if request.raw { 145 request.data = item.Data 146 s.commit(request) 147 committed = true 148 continue 149 } 150 151 node, err := decodeNode(item.Hash[:], item.Data) 152 if err != nil { 153 return committed, i, err 154 } 155 request.data = item.Data 156 157 requests, err := s.sideren(request, node) 158 if err != nil { 159 return committed, i, err 160 } 161 if len(requests) == 0 && request.deps == 0 { 162 s.commit(request) 163 committed = true 164 continue 165 } 166 request.deps += len(requests) 167 for _, side := range requests { 168 s.schedule(side) 169 } 170 } 171 return committed, 0, nil 172 } 173 174 func (s *Sync) Commit(dbw neatdb.Writer) (int, error) { 175 176 for i, key := range s.membatch.order { 177 if err := dbw.Put(key[:], s.membatch.batch[key]); err != nil { 178 return i, err 179 } 180 } 181 written := len(s.membatch.order) 182 183 s.membatch = newSyncMemBatch() 184 return written, nil 185 } 186 187 func (s *Sync) Pending() int { 188 return len(s.requests) 189 } 190 191 func (s *Sync) schedule(req *request) { 192 193 if old, ok := s.requests[req.hash]; ok { 194 old.parents = append(old.parents, req.parents...) 195 return 196 } 197 198 s.queue.Push(req.hash, int64(req.depth)) 199 s.requests[req.hash] = req 200 } 201 202 func (s *Sync) sideren(req *request, object node) ([]*request, error) { 203 204 type side struct { 205 node node 206 depth int 207 } 208 var sideren []side 209 210 switch node := (object).(type) { 211 case *shortNode: 212 sideren = []side{{ 213 node: node.Val, 214 depth: req.depth + len(node.Key), 215 }} 216 case *fullNode: 217 for i := 0; i < 17; i++ { 218 if node.Children[i] != nil { 219 sideren = append(sideren, side{ 220 node: node.Children[i], 221 depth: req.depth + 1, 222 }) 223 } 224 } 225 default: 226 panic(fmt.Sprintf("unknown node: %+v", node)) 227 } 228 229 requests := make([]*request, 0, len(sideren)) 230 for _, side := range sideren { 231 232 if req.callback != nil { 233 if node, ok := (side.node).(valueNode); ok { 234 if err := req.callback(node, req.hash); err != nil { 235 return nil, err 236 } 237 } 238 } 239 240 if node, ok := (side.node).(hashNode); ok { 241 242 hash := common.BytesToHash(node) 243 if _, ok := s.membatch.batch[hash]; ok { 244 continue 245 } 246 if ok, _ := s.database.Has(node); ok { 247 continue 248 } 249 250 requests = append(requests, &request{ 251 hash: hash, 252 parents: []*request{req}, 253 depth: side.depth, 254 callback: req.callback, 255 }) 256 } 257 } 258 return requests, nil 259 } 260 261 func (s *Sync) commit(req *request) (err error) { 262 263 s.membatch.batch[req.hash] = req.data 264 s.membatch.order = append(s.membatch.order, req.hash) 265 266 delete(s.requests, req.hash) 267 268 for _, parent := range req.parents { 269 parent.deps-- 270 if parent.deps == 0 { 271 if err := s.commit(parent); err != nil { 272 return err 273 } 274 } 275 } 276 return nil 277 }