github.com/ethersphere/bee/v2@v2.2.0/pkg/storer/storer.go (about)

     1  // Copyright 2023 The Swarm Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package storer
     6  
     7  import (
     8  	"context"
     9  	"errors"
    10  	"fmt"
    11  	"io"
    12  	"io/fs"
    13  	"math/big"
    14  	"os"
    15  	"path"
    16  	"path/filepath"
    17  	"sync"
    18  	"sync/atomic"
    19  	"time"
    20  
    21  	"github.com/ethersphere/bee/v2/pkg/log"
    22  	"github.com/ethersphere/bee/v2/pkg/storer/internal/transaction"
    23  
    24  	m "github.com/ethersphere/bee/v2/pkg/metrics"
    25  	"github.com/ethersphere/bee/v2/pkg/postage"
    26  	"github.com/ethersphere/bee/v2/pkg/pusher"
    27  	"github.com/ethersphere/bee/v2/pkg/retrieval"
    28  	"github.com/ethersphere/bee/v2/pkg/sharky"
    29  	"github.com/ethersphere/bee/v2/pkg/storage"
    30  	"github.com/ethersphere/bee/v2/pkg/storage/leveldbstore"
    31  	"github.com/ethersphere/bee/v2/pkg/storage/migration"
    32  	"github.com/ethersphere/bee/v2/pkg/storer/internal/cache"
    33  	"github.com/ethersphere/bee/v2/pkg/storer/internal/events"
    34  	pinstore "github.com/ethersphere/bee/v2/pkg/storer/internal/pinning"
    35  	"github.com/ethersphere/bee/v2/pkg/storer/internal/reserve"
    36  	"github.com/ethersphere/bee/v2/pkg/storer/internal/upload"
    37  	localmigration "github.com/ethersphere/bee/v2/pkg/storer/migration"
    38  	"github.com/ethersphere/bee/v2/pkg/swarm"
    39  	"github.com/ethersphere/bee/v2/pkg/topology"
    40  	"github.com/ethersphere/bee/v2/pkg/tracing"
    41  	"github.com/ethersphere/bee/v2/pkg/util/syncutil"
    42  	"github.com/prometheus/client_golang/prometheus"
    43  	"github.com/spf13/afero"
    44  	"github.com/syndtr/goleveldb/leveldb"
    45  	"github.com/syndtr/goleveldb/leveldb/filter"
    46  	"github.com/syndtr/goleveldb/leveldb/opt"
    47  	"resenje.org/multex"
    48  )
    49  
    50  // PutterSession provides a session around the storage.Putter. The session on
    51  // successful completion commits all the operations or in case of error, rolls back
    52  // the state.
    53  type PutterSession interface {
    54  	storage.Putter
    55  	// Done is used to close the session and optionally assign a swarm.Address to
    56  	// this session.
    57  	Done(swarm.Address) error
    58  	// Cleanup is used to cleanup any state related to this session in case of
    59  	// any error.
    60  	Cleanup() error
    61  }
    62  
    63  // SessionInfo is a type which exports the storer tag object. This object
    64  // stores all the relevant information about a particular session.
    65  type SessionInfo = upload.TagItem
    66  
    67  // UploadStore is a logical component of the storer which deals with the upload
    68  // of data to swarm.
    69  type UploadStore interface {
    70  	// Upload provides a PutterSession which is tied to the tagID. Optionally if
    71  	// users requests to pin the data, a new pinning collection is created.
    72  	Upload(ctx context.Context, pin bool, tagID uint64) (PutterSession, error)
    73  	// NewSession can be used to obtain a tag ID to use for a new Upload session.
    74  	NewSession() (SessionInfo, error)
    75  	// Session will show the information about the session.
    76  	Session(tagID uint64) (SessionInfo, error)
    77  	// DeleteSession will delete the session info associated with the tag id.
    78  	DeleteSession(tagID uint64) error
    79  	// ListSessions will list all the Sessions currently being tracked.
    80  	ListSessions(offset, limit int) ([]SessionInfo, error)
    81  }
    82  
    83  // PinStore is a logical component of the storer which deals with pinning
    84  // functionality.
    85  type PinStore interface {
    86  	// NewCollection can be used to create a new PutterSession which writes a new
    87  	// pinning collection. The address passed in during the Done of the session is
    88  	// used as the root referencce.
    89  	NewCollection(context.Context) (PutterSession, error)
    90  	// DeletePin deletes all the chunks associated with the collection pointed to
    91  	// by the swarm.Address passed in.
    92  	DeletePin(context.Context, swarm.Address) error
    93  	// Pins returns all the root references of pinning collections.
    94  	Pins() ([]swarm.Address, error)
    95  	// HasPin is a helper which checks if a collection exists with the root
    96  	// reference passed in.
    97  	HasPin(swarm.Address) (bool, error)
    98  }
    99  
   100  // PinIterator is a helper interface which can be used to iterate over all the
   101  // chunks in a pinning collection.
   102  type PinIterator interface {
   103  	IteratePinCollection(root swarm.Address, iterateFn func(swarm.Address) (bool, error)) error
   104  }
   105  
   106  // CacheStore is a logical component of the storer that deals with cache
   107  // content.
   108  type CacheStore interface {
   109  	// Lookup method provides a storage.Getter wrapped around the underlying
   110  	// ChunkStore which will update cache related indexes if required on successful
   111  	// lookups.
   112  	Lookup() storage.Getter
   113  	// Cache method provides a storage.Putter which will add the chunks to cache.
   114  	// This will add the chunk to underlying store as well as new indexes which
   115  	// will keep track of the chunk in the cache.
   116  	Cache() storage.Putter
   117  }
   118  
   119  // NetStore is a logical component of the storer that deals with network. It will
   120  // push/retrieve chunks from the network.
   121  type NetStore interface {
   122  	// DirectUpload provides a session which can be used to push chunks directly
   123  	// to the network.
   124  	DirectUpload() PutterSession
   125  	// Download provides a getter which can be used to download data. If the data
   126  	// is found locally, its returned immediately, otherwise it is retrieved from
   127  	// the network.
   128  	Download(cache bool) storage.Getter
   129  	// PusherFeed is the feed for direct push chunks. This can be used by the
   130  	// pusher component to push out the chunks.
   131  	PusherFeed() <-chan *pusher.Op
   132  }
   133  
   134  var _ Reserve = (*DB)(nil)
   135  
   136  // Reserve is a logical component of the storer that deals with reserve
   137  // content. It will implement all the core functionality required for the protocols.
   138  type Reserve interface {
   139  	ReserveStore
   140  	EvictBatch(ctx context.Context, batchID []byte) error
   141  	ReserveSample(context.Context, []byte, uint8, uint64, *big.Int) (Sample, error)
   142  	ReserveSize() int
   143  }
   144  
   145  // ReserveIterator is a helper interface which can be used to iterate over all
   146  // the chunks in the reserve.
   147  type ReserveIterator interface {
   148  	ReserveIterateChunks(cb func(swarm.Chunk) (bool, error)) error
   149  }
   150  
   151  // ReserveStore is a logical component of the storer that deals with reserve
   152  // content. It will implement all the core functionality required for the protocols.
   153  type ReserveStore interface {
   154  	ReserveGet(ctx context.Context, addr swarm.Address, batchID []byte, stampHash []byte) (swarm.Chunk, error)
   155  	ReserveHas(addr swarm.Address, batchID []byte, stampHash []byte) (bool, error)
   156  	ReservePutter() storage.Putter
   157  	SubscribeBin(ctx context.Context, bin uint8, start uint64) (<-chan *BinC, func(), <-chan error)
   158  	ReserveLastBinIDs() ([]uint64, uint64, error)
   159  	RadiusChecker
   160  }
   161  
   162  // RadiusChecker provides the radius related functionality.
   163  type RadiusChecker interface {
   164  	IsWithinStorageRadius(addr swarm.Address) bool
   165  	StorageRadius() uint8
   166  }
   167  
   168  // LocalStore is a read-only ChunkStore. It can be used to check if chunk is known
   169  // locally, but it cannot tell what is the context of the chunk (whether it is
   170  // pinned, uploaded, etc.).
   171  type LocalStore interface {
   172  	ChunkStore() storage.ReadOnlyChunkStore
   173  }
   174  
   175  // Debugger is a helper interface which can be used to debug the storer.
   176  type Debugger interface {
   177  	DebugInfo(context.Context) (Info, error)
   178  }
   179  
   180  type memFS struct {
   181  	afero.Fs
   182  }
   183  
   184  func (m *memFS) Open(path string) (fs.File, error) {
   185  	return m.Fs.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644)
   186  }
   187  
   188  type dirFS struct {
   189  	basedir string
   190  }
   191  
   192  func (d *dirFS) Open(path string) (fs.File, error) {
   193  	return os.OpenFile(filepath.Join(d.basedir, path), os.O_RDWR|os.O_CREATE, 0644)
   194  }
   195  
   196  var sharkyNoOfShards = 32
   197  var ErrDBQuit = errors.New("db quit")
   198  
   199  type closerFn func() error
   200  
   201  func (c closerFn) Close() error { return c() }
   202  
   203  func closer(closers ...io.Closer) io.Closer {
   204  	return closerFn(func() error {
   205  		var err error
   206  		for _, closer := range closers {
   207  			err = errors.Join(err, closer.Close())
   208  		}
   209  		return err
   210  	})
   211  }
   212  
   213  func initInmemRepository() (transaction.Storage, io.Closer, error) {
   214  	store, err := leveldbstore.New("", nil)
   215  	if err != nil {
   216  		return nil, nil, fmt.Errorf("failed creating inmem levelDB index store: %w", err)
   217  	}
   218  
   219  	sharky, err := sharky.New(
   220  		&memFS{Fs: afero.NewMemMapFs()},
   221  		sharkyNoOfShards,
   222  		swarm.SocMaxChunkSize,
   223  	)
   224  	if err != nil {
   225  		return nil, nil, fmt.Errorf("failed creating inmem sharky instance: %w", err)
   226  	}
   227  
   228  	return transaction.NewStorage(sharky, store), closer(store, sharky), nil
   229  }
   230  
   231  // loggerName is the tree path name of the logger for this package.
   232  const loggerName = "storer"
   233  
   234  // Default options for levelDB.
   235  const (
   236  	defaultOpenFilesLimit         = uint64(256)
   237  	defaultBlockCacheCapacity     = uint64(32 * 1024 * 1024)
   238  	defaultWriteBufferSize        = uint64(32 * 1024 * 1024)
   239  	defaultDisableSeeksCompaction = false
   240  	defaultCacheCapacity          = uint64(1_000_000)
   241  	defaultBgCacheWorkers         = 16
   242  
   243  	indexPath  = "indexstore"
   244  	sharkyPath = "sharky"
   245  )
   246  
   247  func initStore(basePath string, opts *Options) (*leveldbstore.Store, error) {
   248  	ldbBasePath := path.Join(basePath, indexPath)
   249  
   250  	if _, err := os.Stat(ldbBasePath); os.IsNotExist(err) {
   251  		err := os.MkdirAll(ldbBasePath, 0777)
   252  		if err != nil {
   253  			return nil, err
   254  		}
   255  	}
   256  	store, err := leveldbstore.New(path.Join(basePath, "indexstore"), &opt.Options{
   257  		OpenFilesCacheCapacity: int(opts.LdbOpenFilesLimit),
   258  		BlockCacheCapacity:     int(opts.LdbBlockCacheCapacity),
   259  		WriteBuffer:            int(opts.LdbWriteBufferSize),
   260  		DisableSeeksCompaction: opts.LdbDisableSeeksCompaction,
   261  		CompactionL0Trigger:    8,
   262  		Filter:                 filter.NewBloomFilter(64),
   263  	})
   264  	if err != nil {
   265  		return nil, fmt.Errorf("failed creating levelDB index store: %w", err)
   266  	}
   267  
   268  	return store, nil
   269  }
   270  
   271  func initDiskRepository(
   272  	ctx context.Context,
   273  	basePath string,
   274  	opts *Options,
   275  ) (transaction.Storage, *PinIntegrity, io.Closer, error) {
   276  	store, err := initStore(basePath, opts)
   277  	if err != nil {
   278  		return nil, nil, nil, fmt.Errorf("failed creating levelDB index store: %w", err)
   279  	}
   280  
   281  	err = migration.Migrate(store, "core-migration", localmigration.BeforeInitSteps(store))
   282  	if err != nil {
   283  		return nil, nil, nil, fmt.Errorf("failed core migration: %w", err)
   284  	}
   285  
   286  	if opts.LdbStats.Load() != nil {
   287  		go func() {
   288  			ldbStats := opts.LdbStats.Load()
   289  			logger := log.NewLogger(loggerName).Register()
   290  			ticker := time.NewTicker(15 * time.Second)
   291  			defer ticker.Stop()
   292  
   293  			for {
   294  				select {
   295  				case <-ctx.Done():
   296  					return
   297  				case <-ticker.C:
   298  					stats := new(leveldb.DBStats)
   299  					switch err := store.DB().Stats(stats); {
   300  					case errors.Is(err, leveldb.ErrClosed):
   301  						return
   302  					case err != nil:
   303  						logger.Error(err, "snapshot levelDB stats")
   304  					default:
   305  						ldbStats.WithLabelValues("write_delay_count").Observe(float64(stats.WriteDelayCount))
   306  						ldbStats.WithLabelValues("write_delay_duration").Observe(stats.WriteDelayDuration.Seconds())
   307  						ldbStats.WithLabelValues("alive_snapshots").Observe(float64(stats.AliveSnapshots))
   308  						ldbStats.WithLabelValues("alive_iterators").Observe(float64(stats.AliveIterators))
   309  						ldbStats.WithLabelValues("io_write").Observe(float64(stats.IOWrite))
   310  						ldbStats.WithLabelValues("io_read").Observe(float64(stats.IORead))
   311  						ldbStats.WithLabelValues("block_cache_size").Observe(float64(stats.BlockCacheSize))
   312  						ldbStats.WithLabelValues("opened_tables_count").Observe(float64(stats.OpenedTablesCount))
   313  						ldbStats.WithLabelValues("mem_comp").Observe(float64(stats.MemComp))
   314  						ldbStats.WithLabelValues("level_0_comp").Observe(float64(stats.Level0Comp))
   315  						ldbStats.WithLabelValues("non_level_0_comp").Observe(float64(stats.NonLevel0Comp))
   316  						ldbStats.WithLabelValues("seek_comp").Observe(float64(stats.SeekComp))
   317  						for i := 0; i < len(stats.LevelSizes); i++ {
   318  							ldbStats.WithLabelValues(fmt.Sprintf("level_%d_size", i)).Observe(float64(stats.LevelSizes[i]))
   319  							ldbStats.WithLabelValues(fmt.Sprintf("level_%d_tables_count", i)).Observe(float64(stats.LevelTablesCounts[i]))
   320  							ldbStats.WithLabelValues(fmt.Sprintf("level_%d_read", i)).Observe(float64(stats.LevelRead[i]))
   321  							ldbStats.WithLabelValues(fmt.Sprintf("level_%d_write", i)).Observe(float64(stats.LevelWrite[i]))
   322  							ldbStats.WithLabelValues(fmt.Sprintf("level_%d_duration", i)).Observe(stats.LevelDurations[i].Seconds())
   323  						}
   324  					}
   325  				}
   326  			}
   327  		}()
   328  	}
   329  
   330  	sharkyBasePath := path.Join(basePath, sharkyPath)
   331  
   332  	if _, err := os.Stat(sharkyBasePath); os.IsNotExist(err) {
   333  		err := os.Mkdir(sharkyBasePath, 0777)
   334  		if err != nil {
   335  			return nil, nil, nil, err
   336  		}
   337  	}
   338  
   339  	recoveryCloser, err := sharkyRecovery(ctx, sharkyBasePath, store, opts)
   340  	if err != nil {
   341  		return nil, nil, nil, fmt.Errorf("failed to recover sharky: %w", err)
   342  	}
   343  
   344  	sharky, err := sharky.New(
   345  		&dirFS{basedir: sharkyBasePath},
   346  		sharkyNoOfShards,
   347  		swarm.SocMaxChunkSize,
   348  	)
   349  	if err != nil {
   350  		return nil, nil, nil, fmt.Errorf("failed creating sharky instance: %w", err)
   351  	}
   352  
   353  	pinIntegrity := &PinIntegrity{
   354  		Store:  store,
   355  		Sharky: sharky,
   356  	}
   357  
   358  	return transaction.NewStorage(sharky, store), pinIntegrity, closer(store, sharky, recoveryCloser), nil
   359  }
   360  
   361  const lockKeyNewSession string = "new_session"
   362  
   363  // Options provides a container to configure different things in the storer.
   364  type Options struct {
   365  	// These are options related to levelDB. Currently, the underlying storage used is levelDB.
   366  	LdbStats                  atomic.Pointer[prometheus.HistogramVec]
   367  	LdbOpenFilesLimit         uint64
   368  	LdbBlockCacheCapacity     uint64
   369  	LdbWriteBufferSize        uint64
   370  	LdbDisableSeeksCompaction bool
   371  	Logger                    log.Logger
   372  	Tracer                    *tracing.Tracer
   373  
   374  	Address        swarm.Address
   375  	WarmupDuration time.Duration
   376  	Batchstore     postage.Storer
   377  	ValidStamp     postage.ValidStampFn
   378  	RadiusSetter   topology.SetStorageRadiuser
   379  	StateStore     storage.StateStorer
   380  
   381  	ReserveCapacity       int
   382  	ReserveWakeUpDuration time.Duration
   383  	ReserveMinEvictCount  uint64
   384  
   385  	CacheCapacity      uint64
   386  	CacheMinEvictCount uint64
   387  
   388  	MinimumStorageRadius uint
   389  }
   390  
   391  func defaultOptions() *Options {
   392  	return &Options{
   393  		LdbOpenFilesLimit:         defaultOpenFilesLimit,
   394  		LdbBlockCacheCapacity:     defaultBlockCacheCapacity,
   395  		LdbWriteBufferSize:        defaultWriteBufferSize,
   396  		LdbDisableSeeksCompaction: defaultDisableSeeksCompaction,
   397  		CacheCapacity:             defaultCacheCapacity,
   398  		Logger:                    log.Noop,
   399  		ReserveCapacity:           4_194_304, // 2^22 chunks
   400  		ReserveWakeUpDuration:     time.Minute * 30,
   401  	}
   402  }
   403  
   404  // cacheLimiter is used to limit the number
   405  // of concurrent cache background workers.
   406  type cacheLimiter struct {
   407  	wg     sync.WaitGroup
   408  	sem    chan struct{}
   409  	ctx    context.Context
   410  	cancel context.CancelFunc
   411  }
   412  
   413  // DB implements all the component stores described above.
   414  type DB struct {
   415  	logger log.Logger
   416  	tracer *tracing.Tracer
   417  
   418  	metrics             metrics
   419  	storage             transaction.Storage
   420  	multex              *multex.Multex
   421  	cacheObj            *cache.Cache
   422  	retrieval           retrieval.Interface
   423  	pusherFeed          chan *pusher.Op
   424  	quit                chan struct{}
   425  	cacheLimiter        cacheLimiter
   426  	dbCloser            io.Closer
   427  	subscriptionsWG     sync.WaitGroup
   428  	events              *events.Subscriber
   429  	directUploadLimiter chan struct{}
   430  
   431  	reserve          *reserve.Reserve
   432  	inFlight         sync.WaitGroup
   433  	reserveBinEvents *events.Subscriber
   434  	baseAddr         swarm.Address
   435  	batchstore       postage.Storer
   436  	validStamp       postage.ValidStampFn
   437  	setSyncerOnce    sync.Once
   438  	syncer           Syncer
   439  	opts             workerOpts
   440  
   441  	pinIntegrity *PinIntegrity
   442  }
   443  
   444  type workerOpts struct {
   445  	reserveWarmupDuration time.Duration
   446  	reserveWakeupDuration time.Duration
   447  	reserveMinEvictCount  uint64
   448  	cacheMinEvictCount    uint64
   449  	minimumRadius         uint8
   450  }
   451  
   452  // New returns a newly constructed DB object which implements all the above
   453  // component stores.
   454  func New(ctx context.Context, dirPath string, opts *Options) (*DB, error) {
   455  	var (
   456  		err          error
   457  		pinIntegrity *PinIntegrity
   458  		st           transaction.Storage
   459  		dbCloser     io.Closer
   460  	)
   461  	if opts == nil {
   462  		opts = defaultOptions()
   463  	}
   464  
   465  	if opts.Logger == nil {
   466  		opts.Logger = log.Noop
   467  	}
   468  
   469  	lock := multex.New()
   470  	metrics := newMetrics()
   471  	opts.LdbStats.CompareAndSwap(nil, &metrics.LevelDBStats)
   472  
   473  	if dirPath == "" {
   474  		st, dbCloser, err = initInmemRepository()
   475  		if err != nil {
   476  			return nil, err
   477  		}
   478  	} else {
   479  		st, pinIntegrity, dbCloser, err = initDiskRepository(ctx, dirPath, opts)
   480  		if err != nil {
   481  			return nil, err
   482  		}
   483  	}
   484  
   485  	sharkyBasePath := ""
   486  	if dirPath != "" {
   487  		sharkyBasePath = path.Join(dirPath, sharkyPath)
   488  	}
   489  
   490  	err = st.Run(ctx, func(s transaction.Store) error {
   491  		return migration.Migrate(
   492  			s.IndexStore(),
   493  			"migration",
   494  			localmigration.AfterInitSteps(sharkyBasePath, sharkyNoOfShards, st, opts.Logger),
   495  		)
   496  	})
   497  	if err != nil {
   498  		return nil, err
   499  	}
   500  
   501  	cacheObj, err := cache.New(ctx, st.IndexStore(), opts.CacheCapacity)
   502  	if err != nil {
   503  		return nil, err
   504  	}
   505  
   506  	logger := opts.Logger.WithName(loggerName).Register()
   507  
   508  	clCtx, clCancel := context.WithCancel(ctx)
   509  	db := &DB{
   510  		metrics:    metrics,
   511  		storage:    st,
   512  		logger:     logger,
   513  		tracer:     opts.Tracer,
   514  		baseAddr:   opts.Address,
   515  		multex:     lock,
   516  		cacheObj:   cacheObj,
   517  		retrieval:  noopRetrieval{},
   518  		pusherFeed: make(chan *pusher.Op),
   519  		quit:       make(chan struct{}),
   520  		cacheLimiter: cacheLimiter{
   521  			sem:    make(chan struct{}, defaultBgCacheWorkers),
   522  			ctx:    clCtx,
   523  			cancel: clCancel,
   524  		},
   525  		dbCloser:         dbCloser,
   526  		batchstore:       opts.Batchstore,
   527  		validStamp:       opts.ValidStamp,
   528  		events:           events.NewSubscriber(),
   529  		reserveBinEvents: events.NewSubscriber(),
   530  		opts: workerOpts{
   531  			reserveWarmupDuration: opts.WarmupDuration,
   532  			reserveWakeupDuration: opts.ReserveWakeUpDuration,
   533  			reserveMinEvictCount:  opts.ReserveMinEvictCount,
   534  			cacheMinEvictCount:    opts.CacheMinEvictCount,
   535  			minimumRadius:         uint8(opts.MinimumStorageRadius),
   536  		},
   537  		directUploadLimiter: make(chan struct{}, pusher.ConcurrentPushes),
   538  		pinIntegrity:        pinIntegrity,
   539  	}
   540  
   541  	if db.validStamp == nil {
   542  		db.validStamp = postage.ValidStamp(db.batchstore)
   543  	}
   544  
   545  	if opts.ReserveCapacity > 0 {
   546  		rs, err := reserve.New(
   547  			opts.Address,
   548  			st,
   549  			opts.ReserveCapacity,
   550  			opts.RadiusSetter,
   551  			logger,
   552  		)
   553  		if err != nil {
   554  			return nil, err
   555  		}
   556  		db.reserve = rs
   557  
   558  		db.metrics.StorageRadius.Set(float64(rs.Radius()))
   559  		db.metrics.ReserveSize.Set(float64(rs.Size()))
   560  	}
   561  	db.metrics.CacheSize.Set(float64(db.cacheObj.Size()))
   562  
   563  	// Cleanup any dirty state in upload and pinning stores, this could happen
   564  	// in case of dirty shutdowns
   565  	err = errors.Join(
   566  		upload.CleanupDirty(db.storage),
   567  		pinstore.CleanupDirty(db.storage),
   568  	)
   569  	if err != nil {
   570  		return nil, err
   571  	}
   572  
   573  	db.inFlight.Add(1)
   574  	go db.cacheWorker(ctx)
   575  
   576  	return db, nil
   577  }
   578  
   579  // Reset removes all entries
   580  func (db *DB) ResetReserve(ctx context.Context) error {
   581  	return db.reserve.Reset(ctx)
   582  }
   583  
   584  // Metrics returns set of prometheus collectors.
   585  func (db *DB) Metrics() []prometheus.Collector {
   586  	collectors := m.PrometheusCollectorsFromFields(db.metrics)
   587  	if v, ok := db.storage.(m.Collector); ok {
   588  		collectors = append(collectors, v.Metrics()...)
   589  	}
   590  	return collectors
   591  }
   592  
   593  func (db *DB) Close() error {
   594  	close(db.quit)
   595  
   596  	bgReserveWorkersClosed := make(chan struct{})
   597  	go func() {
   598  		defer close(bgReserveWorkersClosed)
   599  		if !syncutil.WaitWithTimeout(&db.inFlight, 5*time.Second) {
   600  			db.logger.Warning("db shutting down with running goroutines")
   601  		}
   602  	}()
   603  
   604  	bgCacheWorkersClosed := make(chan struct{})
   605  	go func() {
   606  		defer close(bgCacheWorkersClosed)
   607  		if !syncutil.WaitWithTimeout(&db.cacheLimiter.wg, 5*time.Second) {
   608  			db.logger.Warning("cache goroutines still running after the wait timeout; force closing")
   609  			db.cacheLimiter.cancel()
   610  		}
   611  	}()
   612  
   613  	var err error
   614  	closerDone := make(chan struct{})
   615  	go func() {
   616  		defer close(closerDone)
   617  		err = db.dbCloser.Close()
   618  	}()
   619  
   620  	done := make(chan struct{})
   621  	go func() {
   622  		defer close(done)
   623  		<-closerDone
   624  		<-bgCacheWorkersClosed
   625  		<-bgReserveWorkersClosed
   626  	}()
   627  
   628  	select {
   629  	case <-done:
   630  	case <-time.After(3 * time.Second):
   631  		return errors.New("storer closed with bg goroutines running")
   632  	}
   633  
   634  	return err
   635  }
   636  
   637  func (db *DB) SetRetrievalService(r retrieval.Interface) {
   638  	db.retrieval = r
   639  }
   640  
   641  func (db *DB) StartReserveWorker(ctx context.Context, s Syncer, radius func() (uint8, error)) {
   642  	db.setSyncerOnce.Do(func() {
   643  		db.syncer = s
   644  		go db.startReserveWorkers(ctx, radius)
   645  	})
   646  }
   647  
   648  type noopRetrieval struct{}
   649  
   650  func (noopRetrieval) RetrieveChunk(_ context.Context, _ swarm.Address, _ swarm.Address) (swarm.Chunk, error) {
   651  	return nil, storage.ErrNotFound
   652  }
   653  
   654  func (db *DB) ChunkStore() storage.ReadOnlyChunkStore {
   655  	return db.storage.ChunkStore()
   656  }
   657  
   658  func (db *DB) PinIntegrity() *PinIntegrity {
   659  	return db.pinIntegrity
   660  }
   661  
   662  func (db *DB) Lock(strs ...string) func() {
   663  	for _, s := range strs {
   664  		db.multex.Lock(s)
   665  	}
   666  	return func() {
   667  		for _, s := range strs {
   668  			db.multex.Unlock(s)
   669  		}
   670  	}
   671  }
   672  
   673  func (db *DB) Storage() transaction.Storage {
   674  	return db.storage
   675  }
   676  
   677  type putterSession struct {
   678  	storage.Putter
   679  	done    func(swarm.Address) error
   680  	cleanup func() error
   681  }
   682  
   683  func (p *putterSession) Done(addr swarm.Address) error { return p.done(addr) }
   684  
   685  func (p *putterSession) Cleanup() error { return p.cleanup() }