github.com/etherbanking/go-etherbanking@v1.7.1-0.20181009210156-cf649bca5aba/swarm/storage/types.go (about) 1 // Copyright 2016 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package storage 18 19 import ( 20 "bytes" 21 "crypto" 22 "fmt" 23 "hash" 24 "io" 25 "sync" 26 27 // "github.com/etherbanking/go-etherbanking/bmt" 28 "github.com/etherbanking/go-etherbanking/common" 29 "github.com/etherbanking/go-etherbanking/crypto/sha3" 30 ) 31 32 type Hasher func() hash.Hash 33 34 // Peer is the recorded as Source on the chunk 35 // should probably not be here? but network should wrap chunk object 36 type Peer interface{} 37 38 type Key []byte 39 40 func (x Key) Size() uint { 41 return uint(len(x)) 42 } 43 44 func (x Key) isEqual(y Key) bool { 45 return bytes.Equal(x, y) 46 } 47 48 func (h Key) bits(i, j uint) uint { 49 ii := i >> 3 50 jj := i & 7 51 if ii >= h.Size() { 52 return 0 53 } 54 55 if jj+j <= 8 { 56 return uint((h[ii] >> jj) & ((1 << j) - 1)) 57 } 58 59 res := uint(h[ii] >> jj) 60 jj = 8 - jj 61 j -= jj 62 for j != 0 { 63 ii++ 64 if j < 8 { 65 res += uint(h[ii]&((1<<j)-1)) << jj 66 return res 67 } 68 res += uint(h[ii]) << jj 69 jj += 8 70 j -= 8 71 } 72 return res 73 } 74 75 func IsZeroKey(key Key) bool { 76 return len(key) == 0 || bytes.Equal(key, ZeroKey) 77 } 78 79 var ZeroKey = Key(common.Hash{}.Bytes()) 80 81 func MakeHashFunc(hash string) Hasher { 82 switch hash { 83 case "SHA256": 84 return crypto.SHA256.New 85 case "SHA3": 86 return sha3.NewKeccak256 87 } 88 return nil 89 } 90 91 func (key Key) Hex() string { 92 return fmt.Sprintf("%064x", []byte(key[:])) 93 } 94 95 func (key Key) Log() string { 96 if len(key[:]) < 4 { 97 return fmt.Sprintf("%x", []byte(key[:])) 98 } 99 return fmt.Sprintf("%08x", []byte(key[:4])) 100 } 101 102 func (key Key) String() string { 103 return fmt.Sprintf("%064x", []byte(key)[:]) 104 } 105 106 func (key Key) MarshalJSON() (out []byte, err error) { 107 return []byte(`"` + key.String() + `"`), nil 108 } 109 110 func (key *Key) UnmarshalJSON(value []byte) error { 111 s := string(value) 112 *key = make([]byte, 32) 113 h := common.Hex2Bytes(s[1 : len(s)-1]) 114 copy(*key, h) 115 return nil 116 } 117 118 // each chunk when first requested opens a record associated with the request 119 // next time a request for the same chunk arrives, this record is updated 120 // this request status keeps track of the request ID-s as well as the requesting 121 // peers and has a channel that is closed when the chunk is retrieved. Multiple 122 // local callers can wait on this channel (or combined with a timeout, block with a 123 // select). 124 type RequestStatus struct { 125 Key Key 126 Source Peer 127 C chan bool 128 Requesters map[uint64][]interface{} 129 } 130 131 func newRequestStatus(key Key) *RequestStatus { 132 return &RequestStatus{ 133 Key: key, 134 Requesters: make(map[uint64][]interface{}), 135 C: make(chan bool), 136 } 137 } 138 139 // Chunk also serves as a request object passed to ChunkStores 140 // in case it is a retrieval request, Data is nil and Size is 0 141 // Note that Size is not the size of the data chunk, which is Data.Size() 142 // but the size of the subtree encoded in the chunk 143 // 0 if request, to be supplied by the dpa 144 type Chunk struct { 145 Key Key // always 146 SData []byte // nil if request, to be supplied by dpa 147 Size int64 // size of the data covered by the subtree encoded in this chunk 148 Source Peer // peer 149 C chan bool // to signal data delivery by the dpa 150 Req *RequestStatus // request Status needed by netStore 151 wg *sync.WaitGroup // wg to synchronize 152 dbStored chan bool // never remove a chunk from memStore before it is written to dbStore 153 } 154 155 func NewChunk(key Key, rs *RequestStatus) *Chunk { 156 return &Chunk{Key: key, Req: rs} 157 } 158 159 /* 160 The ChunkStore interface is implemented by : 161 162 - MemStore: a memory cache 163 - DbStore: local disk/db store 164 - LocalStore: a combination (sequence of) memStore and dbStore 165 - NetStore: cloud storage abstraction layer 166 - DPA: local requests for swarm storage and retrieval 167 */ 168 type ChunkStore interface { 169 Put(*Chunk) // effectively there is no error even if there is an error 170 Get(Key) (*Chunk, error) 171 Close() 172 } 173 174 /* 175 Chunker is the interface to a component that is responsible for disassembling and assembling larger data and indended to be the dependency of a DPA storage system with fixed maximum chunksize. 176 177 It relies on the underlying chunking model. 178 179 When calling Split, the caller provides a channel (chan *Chunk) on which it receives chunks to store. The DPA delegates to storage layers (implementing ChunkStore interface). 180 181 Split returns an error channel, which the caller can monitor. 182 After getting notified that all the data has been split (the error channel is closed), the caller can safely read or save the root key. Optionally it times out if not all chunks get stored or not the entire stream of data has been processed. By inspecting the errc channel the caller can check if any explicit errors (typically IO read/write failures) occurred during splitting. 183 184 When calling Join with a root key, the caller gets returned a seekable lazy reader. The caller again provides a channel on which the caller receives placeholder chunks with missing data. The DPA is supposed to forward this to the chunk stores and notify the chunker if the data has been delivered (i.e. retrieved from memory cache, disk-persisted db or cloud based swarm delivery). As the seekable reader is used, the chunker then puts these together the relevant parts on demand. 185 */ 186 type Splitter interface { 187 /* 188 When splitting, data is given as a SectionReader, and the key is a hashSize long byte slice (Key), the root hash of the entire content will fill this once processing finishes. 189 New chunks to store are coming to caller via the chunk storage channel, which the caller provides. 190 wg is a Waitgroup (can be nil) that can be used to block until the local storage finishes 191 The caller gets returned an error channel, if an error is encountered during splitting, it is fed to errC error channel. 192 A closed error signals process completion at which point the key can be considered final if there were no errors. 193 */ 194 Split(io.Reader, int64, chan *Chunk, *sync.WaitGroup, *sync.WaitGroup) (Key, error) 195 } 196 197 type Joiner interface { 198 /* 199 Join reconstructs original content based on a root key. 200 When joining, the caller gets returned a Lazy SectionReader, which is 201 seekable and implements on-demand fetching of chunks as and where it is read. 202 New chunks to retrieve are coming to caller via the Chunk channel, which the caller provides. 203 If an error is encountered during joining, it appears as a reader error. 204 The SectionReader. 205 As a result, partial reads from a document are possible even if other parts 206 are corrupt or lost. 207 The chunks are not meant to be validated by the chunker when joining. This 208 is because it is left to the DPA to decide which sources are trusted. 209 */ 210 Join(key Key, chunkC chan *Chunk) LazySectionReader 211 } 212 213 type Chunker interface { 214 Joiner 215 Splitter 216 // returns the key length 217 // KeySize() int64 218 } 219 220 // Size, Seek, Read, ReadAt 221 type LazySectionReader interface { 222 Size(chan bool) (int64, error) 223 io.Seeker 224 io.Reader 225 io.ReaderAt 226 } 227 228 type LazyTestSectionReader struct { 229 *io.SectionReader 230 } 231 232 func (self *LazyTestSectionReader) Size(chan bool) (int64, error) { 233 return self.SectionReader.Size(), nil 234 }