github.com/gobitfly/go-ethereum@v1.8.12/swarm/storage/filestore.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package storage
    18  
    19  import (
    20  	"io"
    21  )
    22  
    23  /*
    24  FileStore provides the client API entrypoints Store and Retrieve to store and retrieve
    25  It can store anything that has a byte slice representation, so files or serialised objects etc.
    26  
    27  Storage: FileStore calls the Chunker to segment the input datastream of any size to a merkle hashed tree of chunks. The key of the root block is returned to the client.
    28  
    29  Retrieval: given the key of the root block, the FileStore retrieves the block chunks and reconstructs the original data and passes it back as a lazy reader. A lazy reader is a reader with on-demand delayed processing, i.e. the chunks needed to reconstruct a large file are only fetched and processed if that particular part of the document is actually read.
    30  
    31  As the chunker produces chunks, FileStore dispatches them to its own chunk store
    32  implementation for storage or retrieval.
    33  */
    34  
    35  const (
    36  	defaultLDBCapacity                = 5000000 // capacity for LevelDB, by default 5*10^6*4096 bytes == 20GB
    37  	defaultCacheCapacity              = 10000   // capacity for in-memory chunks' cache
    38  	defaultChunkRequestsCacheCapacity = 5000000 // capacity for container holding outgoing requests for chunks. should be set to LevelDB capacity
    39  )
    40  
    41  type FileStore struct {
    42  	ChunkStore
    43  	hashFunc SwarmHasher
    44  }
    45  
    46  type FileStoreParams struct {
    47  	Hash string
    48  }
    49  
    50  func NewFileStoreParams() *FileStoreParams {
    51  	return &FileStoreParams{
    52  		Hash: DefaultHash,
    53  	}
    54  }
    55  
    56  // for testing locally
    57  func NewLocalFileStore(datadir string, basekey []byte) (*FileStore, error) {
    58  	params := NewDefaultLocalStoreParams()
    59  	params.Init(datadir)
    60  	localStore, err := NewLocalStore(params, nil)
    61  	if err != nil {
    62  		return nil, err
    63  	}
    64  	localStore.Validators = append(localStore.Validators, NewContentAddressValidator(MakeHashFunc(DefaultHash)))
    65  	return NewFileStore(localStore, NewFileStoreParams()), nil
    66  }
    67  
    68  func NewFileStore(store ChunkStore, params *FileStoreParams) *FileStore {
    69  	hashFunc := MakeHashFunc(params.Hash)
    70  	return &FileStore{
    71  		ChunkStore: store,
    72  		hashFunc:   hashFunc,
    73  	}
    74  }
    75  
    76  // Public API. Main entry point for document retrieval directly. Used by the
    77  // FS-aware API and httpaccess
    78  // Chunk retrieval blocks on netStore requests with a timeout so reader will
    79  // report error if retrieval of chunks within requested range time out.
    80  // It returns a reader with the chunk data and whether the content was encrypted
    81  func (f *FileStore) Retrieve(addr Address) (reader *LazyChunkReader, isEncrypted bool) {
    82  	isEncrypted = len(addr) > f.hashFunc().Size()
    83  	getter := NewHasherStore(f.ChunkStore, f.hashFunc, isEncrypted)
    84  	reader = TreeJoin(addr, getter, 0)
    85  	return
    86  }
    87  
    88  // Public API. Main entry point for document storage directly. Used by the
    89  // FS-aware API and httpaccess
    90  func (f *FileStore) Store(data io.Reader, size int64, toEncrypt bool) (addr Address, wait func(), err error) {
    91  	putter := NewHasherStore(f.ChunkStore, f.hashFunc, toEncrypt)
    92  	return PyramidSplit(data, putter, putter)
    93  }
    94  
    95  func (f *FileStore) HashSize() int {
    96  	return f.hashFunc().Size()
    97  }