github.com/jincm/wesharechain@v0.0.0-20210122032815-1537409ce26a/chain/swarm/storage/mock/mem/mem.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package mem implements a mock store that keeps all chunk data in memory. 18 // While it can be used for testing on smaller scales, the main purpose of this 19 // package is to provide the simplest reference implementation of a mock store. 20 package mem 21 22 import ( 23 "archive/tar" 24 "bytes" 25 "encoding/json" 26 "io" 27 "io/ioutil" 28 "sort" 29 "sync" 30 31 "github.com/ethereum/go-ethereum/common" 32 "github.com/ethereum/go-ethereum/swarm/storage/mock" 33 ) 34 35 // GlobalStore stores all chunk data and also keys and node addresses relations. 36 // It implements mock.GlobalStore interface. 37 type GlobalStore struct { 38 // holds a slice of keys per node 39 nodeKeys map[common.Address][][]byte 40 // holds which key is stored on which nodes 41 keyNodes map[string][]common.Address 42 // all node addresses 43 nodes []common.Address 44 // all keys 45 keys [][]byte 46 // all keys data 47 data map[string][]byte 48 mu sync.RWMutex 49 } 50 51 // NewGlobalStore creates a new instance of GlobalStore. 52 func NewGlobalStore() *GlobalStore { 53 return &GlobalStore{ 54 nodeKeys: make(map[common.Address][][]byte), 55 keyNodes: make(map[string][]common.Address), 56 nodes: make([]common.Address, 0), 57 keys: make([][]byte, 0), 58 data: make(map[string][]byte), 59 } 60 } 61 62 // NewNodeStore returns a new instance of NodeStore that retrieves and stores 63 // chunk data only for a node with address addr. 64 func (s *GlobalStore) NewNodeStore(addr common.Address) *mock.NodeStore { 65 return mock.NewNodeStore(addr, s) 66 } 67 68 // Get returns chunk data if the chunk with key exists for node 69 // on address addr. 70 func (s *GlobalStore) Get(addr common.Address, key []byte) (data []byte, err error) { 71 s.mu.RLock() 72 defer s.mu.RUnlock() 73 74 if _, has := s.nodeKeyIndex(addr, key); !has { 75 return nil, mock.ErrNotFound 76 } 77 78 data, ok := s.data[string(key)] 79 if !ok { 80 return nil, mock.ErrNotFound 81 } 82 return data, nil 83 } 84 85 // Put saves the chunk data for node with address addr. 86 func (s *GlobalStore) Put(addr common.Address, key []byte, data []byte) error { 87 s.mu.Lock() 88 defer s.mu.Unlock() 89 90 if i, found := s.nodeKeyIndex(addr, key); !found { 91 s.nodeKeys[addr] = append(s.nodeKeys[addr], nil) 92 copy(s.nodeKeys[addr][i+1:], s.nodeKeys[addr][i:]) 93 s.nodeKeys[addr][i] = key 94 } 95 96 if i, found := s.keyNodeIndex(key, addr); !found { 97 k := string(key) 98 s.keyNodes[k] = append(s.keyNodes[k], addr) 99 copy(s.keyNodes[k][i+1:], s.keyNodes[k][i:]) 100 s.keyNodes[k][i] = addr 101 } 102 103 if i, found := s.nodeIndex(addr); !found { 104 s.nodes = append(s.nodes, addr) 105 copy(s.nodes[i+1:], s.nodes[i:]) 106 s.nodes[i] = addr 107 } 108 109 if i, found := s.keyIndex(key); !found { 110 s.keys = append(s.keys, nil) 111 copy(s.keys[i+1:], s.keys[i:]) 112 s.keys[i] = key 113 } 114 115 s.data[string(key)] = data 116 117 return nil 118 } 119 120 // Delete removes the chunk data for node with address addr. 121 func (s *GlobalStore) Delete(addr common.Address, key []byte) error { 122 s.mu.Lock() 123 defer s.mu.Unlock() 124 125 if i, has := s.nodeKeyIndex(addr, key); has { 126 s.nodeKeys[addr] = append(s.nodeKeys[addr][:i], s.nodeKeys[addr][i+1:]...) 127 } 128 129 k := string(key) 130 if i, on := s.keyNodeIndex(key, addr); on { 131 s.keyNodes[k] = append(s.keyNodes[k][:i], s.keyNodes[k][i+1:]...) 132 } 133 134 if len(s.nodeKeys[addr]) == 0 { 135 if i, found := s.nodeIndex(addr); found { 136 s.nodes = append(s.nodes[:i], s.nodes[i+1:]...) 137 } 138 } 139 140 if len(s.keyNodes[k]) == 0 { 141 if i, found := s.keyIndex(key); found { 142 s.keys = append(s.keys[:i], s.keys[i+1:]...) 143 } 144 } 145 return nil 146 } 147 148 // HasKey returns whether a node with addr contains the key. 149 func (s *GlobalStore) HasKey(addr common.Address, key []byte) (yes bool) { 150 s.mu.RLock() 151 defer s.mu.RUnlock() 152 153 _, yes = s.nodeKeyIndex(addr, key) 154 return yes 155 } 156 157 // keyIndex returns the index of a key in keys slice. 158 func (s *GlobalStore) keyIndex(key []byte) (index int, found bool) { 159 l := len(s.keys) 160 index = sort.Search(l, func(i int) bool { 161 return bytes.Compare(s.keys[i], key) >= 0 162 }) 163 found = index < l && bytes.Equal(s.keys[index], key) 164 return index, found 165 } 166 167 // nodeIndex returns the index of a node address in nodes slice. 168 func (s *GlobalStore) nodeIndex(addr common.Address) (index int, found bool) { 169 l := len(s.nodes) 170 index = sort.Search(l, func(i int) bool { 171 return bytes.Compare(s.nodes[i][:], addr[:]) >= 0 172 }) 173 found = index < l && bytes.Equal(s.nodes[index][:], addr[:]) 174 return index, found 175 } 176 177 // nodeKeyIndex returns the index of a key in nodeKeys slice. 178 func (s *GlobalStore) nodeKeyIndex(addr common.Address, key []byte) (index int, found bool) { 179 l := len(s.nodeKeys[addr]) 180 index = sort.Search(l, func(i int) bool { 181 return bytes.Compare(s.nodeKeys[addr][i], key) >= 0 182 }) 183 found = index < l && bytes.Equal(s.nodeKeys[addr][index], key) 184 return index, found 185 } 186 187 // keyNodeIndex returns the index of a node address in keyNodes slice. 188 func (s *GlobalStore) keyNodeIndex(key []byte, addr common.Address) (index int, found bool) { 189 k := string(key) 190 l := len(s.keyNodes[k]) 191 index = sort.Search(l, func(i int) bool { 192 return bytes.Compare(s.keyNodes[k][i][:], addr[:]) >= 0 193 }) 194 found = index < l && s.keyNodes[k][index] == addr 195 return index, found 196 } 197 198 // Keys returns a paginated list of keys on all nodes. 199 func (s *GlobalStore) Keys(startKey []byte, limit int) (keys mock.Keys, err error) { 200 s.mu.RLock() 201 defer s.mu.RUnlock() 202 203 var i int 204 if startKey != nil { 205 i, _ = s.keyIndex(startKey) 206 } 207 total := len(s.keys) 208 max := maxIndex(i, limit, total) 209 keys.Keys = make([][]byte, 0, max-i) 210 for ; i < max; i++ { 211 keys.Keys = append(keys.Keys, append([]byte(nil), s.keys[i]...)) 212 } 213 if total > max { 214 keys.Next = s.keys[max] 215 } 216 return keys, nil 217 } 218 219 // Nodes returns a paginated list of all known nodes. 220 func (s *GlobalStore) Nodes(startAddr *common.Address, limit int) (nodes mock.Nodes, err error) { 221 s.mu.RLock() 222 defer s.mu.RUnlock() 223 224 var i int 225 if startAddr != nil { 226 i, _ = s.nodeIndex(*startAddr) 227 } 228 total := len(s.nodes) 229 max := maxIndex(i, limit, total) 230 nodes.Addrs = make([]common.Address, 0, max-i) 231 for ; i < max; i++ { 232 nodes.Addrs = append(nodes.Addrs, s.nodes[i]) 233 } 234 if total > max { 235 nodes.Next = &s.nodes[max] 236 } 237 return nodes, nil 238 } 239 240 // NodeKeys returns a paginated list of keys on a node with provided address. 241 func (s *GlobalStore) NodeKeys(addr common.Address, startKey []byte, limit int) (keys mock.Keys, err error) { 242 s.mu.RLock() 243 defer s.mu.RUnlock() 244 245 var i int 246 if startKey != nil { 247 i, _ = s.nodeKeyIndex(addr, startKey) 248 } 249 total := len(s.nodeKeys[addr]) 250 max := maxIndex(i, limit, total) 251 keys.Keys = make([][]byte, 0, max-i) 252 for ; i < max; i++ { 253 keys.Keys = append(keys.Keys, append([]byte(nil), s.nodeKeys[addr][i]...)) 254 } 255 if total > max { 256 keys.Next = s.nodeKeys[addr][max] 257 } 258 return keys, nil 259 } 260 261 // KeyNodes returns a paginated list of nodes that contain a particular key. 262 func (s *GlobalStore) KeyNodes(key []byte, startAddr *common.Address, limit int) (nodes mock.Nodes, err error) { 263 s.mu.RLock() 264 defer s.mu.RUnlock() 265 266 var i int 267 if startAddr != nil { 268 i, _ = s.keyNodeIndex(key, *startAddr) 269 } 270 total := len(s.keyNodes[string(key)]) 271 max := maxIndex(i, limit, total) 272 nodes.Addrs = make([]common.Address, 0, max-i) 273 for ; i < max; i++ { 274 nodes.Addrs = append(nodes.Addrs, s.keyNodes[string(key)][i]) 275 } 276 if total > max { 277 nodes.Next = &s.keyNodes[string(key)][max] 278 } 279 return nodes, nil 280 } 281 282 // maxIndex returns the end index for one page listing 283 // based on the start index, limit and total number of elements. 284 func maxIndex(start, limit, total int) (max int) { 285 if limit <= 0 { 286 limit = mock.DefaultLimit 287 } 288 if limit > mock.MaxLimit { 289 limit = mock.MaxLimit 290 } 291 max = total 292 if start+limit < max { 293 max = start + limit 294 } 295 return max 296 } 297 298 // Import reads tar archive from a reader that contains exported chunk data. 299 // It returns the number of chunks imported and an error. 300 func (s *GlobalStore) Import(r io.Reader) (n int, err error) { 301 s.mu.Lock() 302 defer s.mu.Unlock() 303 304 tr := tar.NewReader(r) 305 306 for { 307 hdr, err := tr.Next() 308 if err != nil { 309 if err == io.EOF { 310 break 311 } 312 return n, err 313 } 314 315 data, err := ioutil.ReadAll(tr) 316 if err != nil { 317 return n, err 318 } 319 320 var c mock.ExportedChunk 321 if err = json.Unmarshal(data, &c); err != nil { 322 return n, err 323 } 324 325 key := common.Hex2Bytes(hdr.Name) 326 s.keyNodes[string(key)] = c.Addrs 327 for _, addr := range c.Addrs { 328 if i, has := s.nodeKeyIndex(addr, key); !has { 329 s.nodeKeys[addr] = append(s.nodeKeys[addr], nil) 330 copy(s.nodeKeys[addr][i+1:], s.nodeKeys[addr][i:]) 331 s.nodeKeys[addr][i] = key 332 } 333 if i, found := s.nodeIndex(addr); !found { 334 s.nodes = append(s.nodes, addr) 335 copy(s.nodes[i+1:], s.nodes[i:]) 336 s.nodes[i] = addr 337 } 338 } 339 if i, found := s.keyIndex(key); !found { 340 s.keys = append(s.keys, nil) 341 copy(s.keys[i+1:], s.keys[i:]) 342 s.keys[i] = key 343 } 344 s.data[string(key)] = c.Data 345 n++ 346 } 347 return n, err 348 } 349 350 // Export writes to a writer a tar archive with all chunk data from 351 // the store. It returns the number of chunks exported and an error. 352 func (s *GlobalStore) Export(w io.Writer) (n int, err error) { 353 s.mu.RLock() 354 defer s.mu.RUnlock() 355 356 tw := tar.NewWriter(w) 357 defer tw.Close() 358 359 buf := bytes.NewBuffer(make([]byte, 0, 1024)) 360 encoder := json.NewEncoder(buf) 361 for key, addrs := range s.keyNodes { 362 buf.Reset() 363 if err = encoder.Encode(mock.ExportedChunk{ 364 Addrs: addrs, 365 Data: s.data[key], 366 }); err != nil { 367 return n, err 368 } 369 370 data := buf.Bytes() 371 hdr := &tar.Header{ 372 Name: common.Bytes2Hex([]byte(key)), 373 Mode: 0644, 374 Size: int64(len(data)), 375 } 376 if err := tw.WriteHeader(hdr); err != nil { 377 return n, err 378 } 379 if _, err := tw.Write(data); err != nil { 380 return n, err 381 } 382 n++ 383 } 384 return n, err 385 }