github.com/celestiaorg/celestia-node@v0.15.0-beta.1/share/availability/light/availability.go (about)

     1  package light
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"sync"
     7  
     8  	"github.com/ipfs/go-datastore"
     9  	"github.com/ipfs/go-datastore/autobatch"
    10  	"github.com/ipfs/go-datastore/namespace"
    11  	ipldFormat "github.com/ipfs/go-ipld-format"
    12  	logging "github.com/ipfs/go-log/v2"
    13  
    14  	"github.com/celestiaorg/celestia-node/header"
    15  	"github.com/celestiaorg/celestia-node/share"
    16  	"github.com/celestiaorg/celestia-node/share/getters"
    17  )
    18  
    19  var (
    20  	log                     = logging.Logger("share/light")
    21  	cacheAvailabilityPrefix = datastore.NewKey("sampling_result")
    22  	writeBatchSize          = 2048
    23  )
    24  
    25  // ShareAvailability implements share.Availability using Data Availability Sampling technique.
    26  // It is light because it does not require the downloading of all the data to verify
    27  // its availability. It is assumed that there are a lot of lightAvailability instances
    28  // on the network doing sampling over the same Root to collectively verify its availability.
    29  type ShareAvailability struct {
    30  	getter share.Getter
    31  	params Parameters
    32  
    33  	// TODO(@Wondertan): Once we come to parallelized DASer, this lock becomes a contention point
    34  	//  Related to #483
    35  	// TODO: Striped locks? :D
    36  	dsLk sync.RWMutex
    37  	ds   *autobatch.Datastore
    38  }
    39  
    40  // NewShareAvailability creates a new light Availability.
    41  func NewShareAvailability(
    42  	getter share.Getter,
    43  	ds datastore.Batching,
    44  	opts ...Option,
    45  ) *ShareAvailability {
    46  	params := DefaultParameters()
    47  	ds = namespace.Wrap(ds, cacheAvailabilityPrefix)
    48  	autoDS := autobatch.NewAutoBatching(ds, writeBatchSize)
    49  
    50  	for _, opt := range opts {
    51  		opt(&params)
    52  	}
    53  
    54  	return &ShareAvailability{
    55  		getter: getter,
    56  		params: params,
    57  		ds:     autoDS,
    58  	}
    59  }
    60  
    61  // SharesAvailable randomly samples `params.SampleAmount` amount of Shares committed to the given
    62  // ExtendedHeader. This way SharesAvailable subjectively verifies that Shares are available.
    63  func (la *ShareAvailability) SharesAvailable(ctx context.Context, header *header.ExtendedHeader) error {
    64  	dah := header.DAH
    65  	// short-circuit if the given root is minimum DAH of an empty data square
    66  	if share.DataHash(dah.Hash()).IsEmptyRoot() {
    67  		return nil
    68  	}
    69  
    70  	// do not sample over Root that has already been sampled
    71  	key := rootKey(dah)
    72  
    73  	la.dsLk.RLock()
    74  	exists, err := la.ds.Has(ctx, key)
    75  	la.dsLk.RUnlock()
    76  	if err != nil || exists {
    77  		return err
    78  	}
    79  
    80  	log.Debugw("validate availability", "root", dah.String())
    81  	// We assume the caller of this method has already performed basic validation on the
    82  	// given dah/root. If for some reason this has not happened, the node should panic.
    83  	if err := dah.ValidateBasic(); err != nil {
    84  		log.Errorw("availability validation cannot be performed on a malformed DataAvailabilityHeader",
    85  			"err", err)
    86  		panic(err)
    87  	}
    88  	samples, err := SampleSquare(len(dah.RowRoots), int(la.params.SampleAmount))
    89  	if err != nil {
    90  		return err
    91  	}
    92  
    93  	// indicate to the share.Getter that a blockservice session should be created. This
    94  	// functionality is optional and must be supported by the used share.Getter.
    95  	ctx = getters.WithSession(ctx)
    96  
    97  	log.Debugw("starting sampling session", "root", dah.String())
    98  	errs := make(chan error, len(samples))
    99  	for _, s := range samples {
   100  		go func(s Sample) {
   101  			log.Debugw("fetching share", "root", dah.String(), "row", s.Row, "col", s.Col)
   102  			_, err := la.getter.GetShare(ctx, header, s.Row, s.Col)
   103  			if err != nil {
   104  				log.Debugw("error fetching share", "root", dah.String(), "row", s.Row, "col", s.Col)
   105  			}
   106  			// we don't really care about Share bodies at this point
   107  			// it also means we now saved the Share in local storage
   108  			select {
   109  			case errs <- err:
   110  			case <-ctx.Done():
   111  			}
   112  		}(s)
   113  	}
   114  
   115  	for range samples {
   116  		var err error
   117  		select {
   118  		case err = <-errs:
   119  		case <-ctx.Done():
   120  			err = ctx.Err()
   121  		}
   122  
   123  		if err != nil {
   124  			if errors.Is(err, context.Canceled) {
   125  				return err
   126  			}
   127  			log.Errorw("availability validation failed", "root", dah.String(), "err", err.Error())
   128  			if ipldFormat.IsNotFound(err) || errors.Is(err, context.DeadlineExceeded) {
   129  				return share.ErrNotAvailable
   130  			}
   131  			return err
   132  		}
   133  	}
   134  
   135  	la.dsLk.Lock()
   136  	err = la.ds.Put(ctx, key, []byte{})
   137  	la.dsLk.Unlock()
   138  	if err != nil {
   139  		log.Errorw("storing root of successful SharesAvailable request to disk", "err", err)
   140  	}
   141  	return nil
   142  }
   143  
   144  func rootKey(root *share.Root) datastore.Key {
   145  	return datastore.NewKey(root.String())
   146  }
   147  
   148  // Close flushes all queued writes to disk.
   149  func (la *ShareAvailability) Close(ctx context.Context) error {
   150  	return la.ds.Flush(ctx)
   151  }