github.com/xxRanger/go-ethereum@v1.8.23/swarm/storage/types.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package storage 18 19 import ( 20 "bytes" 21 "context" 22 "crypto" 23 "crypto/rand" 24 "encoding/binary" 25 "fmt" 26 "io" 27 28 "github.com/ethereum/go-ethereum/common" 29 "github.com/ethereum/go-ethereum/swarm/bmt" 30 ch "github.com/ethereum/go-ethereum/swarm/chunk" 31 "golang.org/x/crypto/sha3" 32 ) 33 34 const MaxPO = 16 35 const AddressLength = 32 36 37 type SwarmHasher func() SwarmHash 38 39 type Address []byte 40 41 // Proximity(x, y) returns the proximity order of the MSB distance between x and y 42 // 43 // The distance metric MSB(x, y) of two equal length byte sequences x an y is the 44 // value of the binary integer cast of the x^y, ie., x and y bitwise xor-ed. 45 // the binary cast is big endian: most significant bit first (=MSB). 46 // 47 // Proximity(x, y) is a discrete logarithmic scaling of the MSB distance. 48 // It is defined as the reverse rank of the integer part of the base 2 49 // logarithm of the distance. 50 // It is calculated by counting the number of common leading zeros in the (MSB) 51 // binary representation of the x^y. 52 // 53 // (0 farthest, 255 closest, 256 self) 54 func Proximity(one, other []byte) (ret int) { 55 b := (MaxPO-1)/8 + 1 56 if b > len(one) { 57 b = len(one) 58 } 59 m := 8 60 for i := 0; i < b; i++ { 61 oxo := one[i] ^ other[i] 62 for j := 0; j < m; j++ { 63 if (oxo>>uint8(7-j))&0x01 != 0 { 64 return i*8 + j 65 } 66 } 67 } 68 return MaxPO 69 } 70 71 var ZeroAddr = Address(common.Hash{}.Bytes()) 72 73 func MakeHashFunc(hash string) SwarmHasher { 74 switch hash { 75 case "SHA256": 76 return func() SwarmHash { return &HashWithLength{crypto.SHA256.New()} } 77 case "SHA3": 78 return func() SwarmHash { return &HashWithLength{sha3.NewLegacyKeccak256()} } 79 case "BMT": 80 return func() SwarmHash { 81 hasher := sha3.NewLegacyKeccak256 82 hasherSize := hasher().Size() 83 segmentCount := ch.DefaultSize / hasherSize 84 pool := bmt.NewTreePool(hasher, segmentCount, bmt.PoolSize) 85 return bmt.New(pool) 86 } 87 } 88 return nil 89 } 90 91 func (a Address) Hex() string { 92 return fmt.Sprintf("%064x", []byte(a[:])) 93 } 94 95 func (a Address) Log() string { 96 if len(a[:]) < 8 { 97 return fmt.Sprintf("%x", []byte(a[:])) 98 } 99 return fmt.Sprintf("%016x", []byte(a[:8])) 100 } 101 102 func (a Address) String() string { 103 return fmt.Sprintf("%064x", []byte(a)) 104 } 105 106 func (a Address) MarshalJSON() (out []byte, err error) { 107 return []byte(`"` + a.String() + `"`), nil 108 } 109 110 func (a *Address) UnmarshalJSON(value []byte) error { 111 s := string(value) 112 *a = make([]byte, 32) 113 h := common.Hex2Bytes(s[1 : len(s)-1]) 114 copy(*a, h) 115 return nil 116 } 117 118 type AddressCollection []Address 119 120 func NewAddressCollection(l int) AddressCollection { 121 return make(AddressCollection, l) 122 } 123 124 func (c AddressCollection) Len() int { 125 return len(c) 126 } 127 128 func (c AddressCollection) Less(i, j int) bool { 129 return bytes.Compare(c[i], c[j]) == -1 130 } 131 132 func (c AddressCollection) Swap(i, j int) { 133 c[i], c[j] = c[j], c[i] 134 } 135 136 // Chunk interface implemented by context.Contexts and data chunks 137 type Chunk interface { 138 Address() Address 139 Data() []byte 140 } 141 142 type chunk struct { 143 addr Address 144 sdata []byte 145 span int64 146 } 147 148 func NewChunk(addr Address, data []byte) *chunk { 149 return &chunk{ 150 addr: addr, 151 sdata: data, 152 span: -1, 153 } 154 } 155 156 func (c *chunk) Address() Address { 157 return c.addr 158 } 159 160 func (c *chunk) Data() []byte { 161 return c.sdata 162 } 163 164 // String() for pretty printing 165 func (self *chunk) String() string { 166 return fmt.Sprintf("Address: %v TreeSize: %v Chunksize: %v", self.addr.Log(), self.span, len(self.sdata)) 167 } 168 169 func GenerateRandomChunk(dataSize int64) Chunk { 170 hasher := MakeHashFunc(DefaultHash)() 171 sdata := make([]byte, dataSize+8) 172 rand.Read(sdata[8:]) 173 binary.LittleEndian.PutUint64(sdata[:8], uint64(dataSize)) 174 hasher.ResetWithLength(sdata[:8]) 175 hasher.Write(sdata[8:]) 176 return NewChunk(hasher.Sum(nil), sdata) 177 } 178 179 func GenerateRandomChunks(dataSize int64, count int) (chunks []Chunk) { 180 for i := 0; i < count; i++ { 181 ch := GenerateRandomChunk(dataSize) 182 chunks = append(chunks, ch) 183 } 184 return chunks 185 } 186 187 // Size, Seek, Read, ReadAt 188 type LazySectionReader interface { 189 Context() context.Context 190 Size(context.Context, chan bool) (int64, error) 191 io.Seeker 192 io.Reader 193 io.ReaderAt 194 } 195 196 type LazyTestSectionReader struct { 197 *io.SectionReader 198 } 199 200 func (r *LazyTestSectionReader) Size(context.Context, chan bool) (int64, error) { 201 return r.SectionReader.Size(), nil 202 } 203 204 func (r *LazyTestSectionReader) Context() context.Context { 205 return context.TODO() 206 } 207 208 type StoreParams struct { 209 Hash SwarmHasher `toml:"-"` 210 DbCapacity uint64 211 CacheCapacity uint 212 BaseKey []byte 213 } 214 215 func NewDefaultStoreParams() *StoreParams { 216 return NewStoreParams(defaultLDBCapacity, defaultCacheCapacity, nil, nil) 217 } 218 219 func NewStoreParams(ldbCap uint64, cacheCap uint, hash SwarmHasher, basekey []byte) *StoreParams { 220 if basekey == nil { 221 basekey = make([]byte, 32) 222 } 223 if hash == nil { 224 hash = MakeHashFunc(DefaultHash) 225 } 226 return &StoreParams{ 227 Hash: hash, 228 DbCapacity: ldbCap, 229 CacheCapacity: cacheCap, 230 BaseKey: basekey, 231 } 232 } 233 234 type ChunkData []byte 235 236 type Reference []byte 237 238 // Putter is responsible to store data and create a reference for it 239 type Putter interface { 240 Put(context.Context, ChunkData) (Reference, error) 241 // RefSize returns the length of the Reference created by this Putter 242 RefSize() int64 243 // Close is to indicate that no more chunk data will be Put on this Putter 244 Close() 245 // Wait returns if all data has been store and the Close() was called. 246 Wait(context.Context) error 247 } 248 249 // Getter is an interface to retrieve a chunk's data by its reference 250 type Getter interface { 251 Get(context.Context, Reference) (ChunkData, error) 252 } 253 254 // NOTE: this returns invalid data if chunk is encrypted 255 func (c ChunkData) Size() uint64 { 256 return binary.LittleEndian.Uint64(c[:8]) 257 } 258 259 type ChunkValidator interface { 260 Validate(chunk Chunk) bool 261 } 262 263 // Provides method for validation of content address in chunks 264 // Holds the corresponding hasher to create the address 265 type ContentAddressValidator struct { 266 Hasher SwarmHasher 267 } 268 269 // Constructor 270 func NewContentAddressValidator(hasher SwarmHasher) *ContentAddressValidator { 271 return &ContentAddressValidator{ 272 Hasher: hasher, 273 } 274 } 275 276 // Validate that the given key is a valid content address for the given data 277 func (v *ContentAddressValidator) Validate(chunk Chunk) bool { 278 data := chunk.Data() 279 if l := len(data); l < 9 || l > ch.DefaultSize+8 { 280 // log.Error("invalid chunk size", "chunk", addr.Hex(), "size", l) 281 return false 282 } 283 284 hasher := v.Hasher() 285 hasher.ResetWithLength(data[:8]) 286 hasher.Write(data[8:]) 287 hash := hasher.Sum(nil) 288 289 return bytes.Equal(hash, chunk.Address()) 290 } 291 292 type ChunkStore interface { 293 Put(ctx context.Context, ch Chunk) (err error) 294 Get(rctx context.Context, ref Address) (ch Chunk, err error) 295 Has(rctx context.Context, ref Address) bool 296 Close() 297 } 298 299 // SyncChunkStore is a ChunkStore which supports syncing 300 type SyncChunkStore interface { 301 ChunkStore 302 BinIndex(po uint8) uint64 303 Iterator(from uint64, to uint64, po uint8, f func(Address, uint64) bool) error 304 FetchFunc(ctx context.Context, ref Address) func(context.Context) error 305 } 306 307 // FakeChunkStore doesn't store anything, just implements the ChunkStore interface 308 // It can be used to inject into a hasherStore if you don't want to actually store data just do the 309 // hashing 310 type FakeChunkStore struct { 311 } 312 313 // Put doesn't store anything it is just here to implement ChunkStore 314 func (f *FakeChunkStore) Put(_ context.Context, ch Chunk) error { 315 return nil 316 } 317 318 // Has doesn't do anything it is just here to implement ChunkStore 319 func (f *FakeChunkStore) Has(_ context.Context, ref Address) bool { 320 panic("FakeChunkStore doesn't support HasChunk") 321 } 322 323 // Get doesn't store anything it is just here to implement ChunkStore 324 func (f *FakeChunkStore) Get(_ context.Context, ref Address) (Chunk, error) { 325 panic("FakeChunkStore doesn't support Get") 326 } 327 328 // Close doesn't store anything it is just here to implement ChunkStore 329 func (f *FakeChunkStore) Close() { 330 }