github.com/SmartMeshFoundation/Spectrum@v0.0.0-20220621030607-452a266fee1e/swarm/storage/types.go (about)

     1  // Copyright 2016 The Spectrum Authors
     2  // This file is part of the Spectrum library.
     3  //
     4  // The Spectrum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The Spectrum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the Spectrum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package storage
    18  
    19  import (
    20  	"bytes"
    21  	"crypto"
    22  	"fmt"
    23  	"hash"
    24  	"io"
    25  	"sync"
    26  
    27  	"github.com/SmartMeshFoundation/Spectrum/bmt"
    28  	"github.com/SmartMeshFoundation/Spectrum/common"
    29  	"github.com/SmartMeshFoundation/Spectrum/crypto/sha3"
    30  )
    31  
    32  type Hasher func() hash.Hash
    33  type SwarmHasher func() SwarmHash
    34  
    35  // Peer is the recorded as Source on the chunk
    36  // should probably not be here? but network should wrap chunk object
    37  type Peer interface{}
    38  
    39  type Key []byte
    40  
    41  func (x Key) Size() uint {
    42  	return uint(len(x))
    43  }
    44  
    45  func (x Key) isEqual(y Key) bool {
    46  	return bytes.Equal(x, y)
    47  }
    48  
    49  func (h Key) bits(i, j uint) uint {
    50  	ii := i >> 3
    51  	jj := i & 7
    52  	if ii >= h.Size() {
    53  		return 0
    54  	}
    55  
    56  	if jj+j <= 8 {
    57  		return uint((h[ii] >> jj) & ((1 << j) - 1))
    58  	}
    59  
    60  	res := uint(h[ii] >> jj)
    61  	jj = 8 - jj
    62  	j -= jj
    63  	for j != 0 {
    64  		ii++
    65  		if j < 8 {
    66  			res += uint(h[ii]&((1<<j)-1)) << jj
    67  			return res
    68  		}
    69  		res += uint(h[ii]) << jj
    70  		jj += 8
    71  		j -= 8
    72  	}
    73  	return res
    74  }
    75  
    76  func IsZeroKey(key Key) bool {
    77  	return len(key) == 0 || bytes.Equal(key, ZeroKey)
    78  }
    79  
    80  var ZeroKey = Key(common.Hash{}.Bytes())
    81  
    82  func MakeHashFunc(hash string) SwarmHasher {
    83  	switch hash {
    84  	case "SHA256":
    85  		return func() SwarmHash { return &HashWithLength{crypto.SHA256.New()} }
    86  	case "SHA3":
    87  		return func() SwarmHash { return &HashWithLength{sha3.NewKeccak256()} }
    88  	case "BMT":
    89  		return func() SwarmHash {
    90  			hasher := sha3.NewKeccak256
    91  			pool := bmt.NewTreePool(hasher, bmt.DefaultSegmentCount, bmt.DefaultPoolSize)
    92  			return bmt.New(pool)
    93  		}
    94  	}
    95  	return nil
    96  }
    97  
    98  func (key Key) Hex() string {
    99  	return fmt.Sprintf("%064x", []byte(key[:]))
   100  }
   101  
   102  func (key Key) Log() string {
   103  	if len(key[:]) < 4 {
   104  		return fmt.Sprintf("%x", []byte(key[:]))
   105  	}
   106  	return fmt.Sprintf("%08x", []byte(key[:4]))
   107  }
   108  
   109  func (key Key) String() string {
   110  	return fmt.Sprintf("%064x", []byte(key)[:])
   111  }
   112  
   113  func (key Key) MarshalJSON() (out []byte, err error) {
   114  	return []byte(`"` + key.String() + `"`), nil
   115  }
   116  
   117  func (key *Key) UnmarshalJSON(value []byte) error {
   118  	s := string(value)
   119  	*key = make([]byte, 32)
   120  	h := common.Hex2Bytes(s[1 : len(s)-1])
   121  	copy(*key, h)
   122  	return nil
   123  }
   124  
   125  // each chunk when first requested opens a record associated with the request
   126  // next time a request for the same chunk arrives, this record is updated
   127  // this request status keeps track of the request ID-s as well as the requesting
   128  // peers and has a channel that is closed when the chunk is retrieved. Multiple
   129  // local callers can wait on this channel (or combined with a timeout, block with a
   130  // select).
   131  type RequestStatus struct {
   132  	Key        Key
   133  	Source     Peer
   134  	C          chan bool
   135  	Requesters map[uint64][]interface{}
   136  }
   137  
   138  func newRequestStatus(key Key) *RequestStatus {
   139  	return &RequestStatus{
   140  		Key:        key,
   141  		Requesters: make(map[uint64][]interface{}),
   142  		C:          make(chan bool),
   143  	}
   144  }
   145  
   146  // Chunk also serves as a request object passed to ChunkStores
   147  // in case it is a retrieval request, Data is nil and Size is 0
   148  // Note that Size is not the size of the data chunk, which is Data.Size()
   149  // but the size of the subtree encoded in the chunk
   150  // 0 if request, to be supplied by the dpa
   151  type Chunk struct {
   152  	Key      Key             // always
   153  	SData    []byte          // nil if request, to be supplied by dpa
   154  	Size     int64           // size of the data covered by the subtree encoded in this chunk
   155  	Source   Peer            // peer
   156  	C        chan bool       // to signal data delivery by the dpa
   157  	Req      *RequestStatus  // request Status needed by netStore
   158  	wg       *sync.WaitGroup // wg to synchronize
   159  	dbStored chan bool       // never remove a chunk from memStore before it is written to dbStore
   160  }
   161  
   162  func NewChunk(key Key, rs *RequestStatus) *Chunk {
   163  	return &Chunk{Key: key, Req: rs}
   164  }
   165  
   166  /*
   167  The ChunkStore interface is implemented by :
   168  
   169  - MemStore: a memory cache
   170  - DbStore: local disk/db store
   171  - LocalStore: a combination (sequence of) memStore and dbStore
   172  - NetStore: cloud storage abstraction layer
   173  - DPA: local requests for swarm storage and retrieval
   174  */
   175  type ChunkStore interface {
   176  	Put(*Chunk) // effectively there is no error even if there is an error
   177  	Get(Key) (*Chunk, error)
   178  	Close()
   179  }
   180  
   181  /*
   182  Chunker is the interface to a component that is responsible for disassembling and assembling larger data and indended to be the dependency of a DPA storage system with fixed maximum chunksize.
   183  
   184  It relies on the underlying chunking model.
   185  
   186  When calling Split, the caller provides a channel (chan *Chunk) on which it receives chunks to store. The DPA delegates to storage layers (implementing ChunkStore interface).
   187  
   188  Split returns an error channel, which the caller can monitor.
   189  After getting notified that all the data has been split (the error channel is closed), the caller can safely read or save the root key. Optionally it times out if not all chunks get stored or not the entire stream of data has been processed. By inspecting the errc channel the caller can check if any explicit errors (typically IO read/write failures) occurred during splitting.
   190  
   191  When calling Join with a root key, the caller gets returned a seekable lazy reader. The caller again provides a channel on which the caller receives placeholder chunks with missing data. The DPA is supposed to forward this to the chunk stores and notify the chunker if the data has been delivered (i.e. retrieved from memory cache, disk-persisted db or cloud based swarm delivery). As the seekable reader is used, the chunker then puts these together the relevant parts on demand.
   192  */
   193  type Splitter interface {
   194  	/*
   195  	   When splitting, data is given as a SectionReader, and the key is a hashSize long byte slice (Key), the root hash of the entire content will fill this once processing finishes.
   196  	   New chunks to store are coming to caller via the chunk storage channel, which the caller provides.
   197  	   wg is a Waitgroup (can be nil) that can be used to block until the local storage finishes
   198  	   The caller gets returned an error channel, if an error is encountered during splitting, it is fed to errC error channel.
   199  	   A closed error signals process completion at which point the key can be considered final if there were no errors.
   200  	*/
   201  	Split(io.Reader, int64, chan *Chunk, *sync.WaitGroup, *sync.WaitGroup) (Key, error)
   202  
   203  	/* This is the first step in making files mutable (not chunks)..
   204  	   Append allows adding more data chunks to the end of the already existsing file.
   205  	   The key for the root chunk is supplied to load the respective tree.
   206  	   Rest of the parameters behave like Split.
   207  	*/
   208  	Append(Key, io.Reader, chan *Chunk, *sync.WaitGroup, *sync.WaitGroup) (Key, error)
   209  }
   210  
   211  type Joiner interface {
   212  	/*
   213  	   Join reconstructs original content based on a root key.
   214  	   When joining, the caller gets returned a Lazy SectionReader, which is
   215  	   seekable and implements on-demand fetching of chunks as and where it is read.
   216  	   New chunks to retrieve are coming to caller via the Chunk channel, which the caller provides.
   217  	   If an error is encountered during joining, it appears as a reader error.
   218  	   The SectionReader.
   219  	   As a result, partial reads from a document are possible even if other parts
   220  	   are corrupt or lost.
   221  	   The chunks are not meant to be validated by the chunker when joining. This
   222  	   is because it is left to the DPA to decide which sources are trusted.
   223  	*/
   224  	Join(key Key, chunkC chan *Chunk) LazySectionReader
   225  }
   226  
   227  type Chunker interface {
   228  	Joiner
   229  	Splitter
   230  	// returns the key length
   231  	// KeySize() int64
   232  }
   233  
   234  // Size, Seek, Read, ReadAt
   235  type LazySectionReader interface {
   236  	Size(chan bool) (int64, error)
   237  	io.Seeker
   238  	io.Reader
   239  	io.ReaderAt
   240  }
   241  
   242  type LazyTestSectionReader struct {
   243  	*io.SectionReader
   244  }
   245  
   246  func (self *LazyTestSectionReader) Size(chan bool) (int64, error) {
   247  	return self.SectionReader.Size(), nil
   248  }