github.com/lazyledger/lazyledger-core@v0.35.0-dev.0.20210613111200-4c651f053571/p2p/ipld/read.go (about)

     1  package ipld
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"math/rand"
     7  
     8  	"github.com/ipfs/go-cid"
     9  	ipld "github.com/ipfs/go-ipld-format"
    10  	"github.com/lazyledger/rsmt2d"
    11  
    12  	"github.com/lazyledger/lazyledger-core/ipfs/plugin"
    13  	"github.com/lazyledger/lazyledger-core/p2p/ipld/wrapper"
    14  	"github.com/lazyledger/lazyledger-core/types"
    15  	"github.com/lazyledger/lazyledger-core/types/consts"
    16  )
    17  
    18  const baseErrorMsg = "failure to retrieve block data:"
    19  
    20  var ErrEncounteredTooManyErrors = fmt.Errorf("%s %s", baseErrorMsg, "encountered too many errors")
    21  var ErrTimeout = fmt.Errorf("%s %s", baseErrorMsg, "timeout")
    22  
    23  // RetrieveBlockData asynchronously fetches block data using the minimum number
    24  // of requests to IPFS. It fails if one of the random samples sampled is not available.
    25  func RetrieveBlockData(
    26  	ctx context.Context,
    27  	dah *types.DataAvailabilityHeader,
    28  	dag ipld.NodeGetter,
    29  	codec rsmt2d.Codec,
    30  ) (types.Data, error) {
    31  	edsWidth := len(dah.RowsRoots)
    32  	sc := newshareCounter(ctx, uint32(edsWidth))
    33  
    34  	// convert the row and col roots into Cids
    35  	rowRoots := dah.RowsRoots.Bytes()
    36  	colRoots := dah.ColumnRoots.Bytes()
    37  
    38  	// sample 1/4 of the total extended square by sampling half of the leaves in
    39  	// half of the rows
    40  	for _, row := range uniqueRandNumbers(edsWidth/2, edsWidth) {
    41  		for _, col := range uniqueRandNumbers(edsWidth/2, edsWidth) {
    42  			rootCid, err := plugin.CidFromNamespacedSha256(rowRoots[row])
    43  			if err != nil {
    44  				return types.Data{}, err
    45  			}
    46  
    47  			go sc.retrieveShare(rootCid, true, row, col, dag)
    48  		}
    49  	}
    50  
    51  	// wait until enough data has been collected, too many errors encountered,
    52  	// or the timeout is reached
    53  	err := sc.wait()
    54  	if err != nil {
    55  		return types.Data{}, err
    56  	}
    57  
    58  	// flatten the square
    59  	flattened := sc.flatten()
    60  
    61  	tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(edsWidth) / 2)
    62  
    63  	// repair the square
    64  	eds, err := rsmt2d.RepairExtendedDataSquare(rowRoots, colRoots, flattened, codec, tree.Constructor)
    65  	if err != nil {
    66  		return types.Data{}, err
    67  	}
    68  
    69  	blockData, err := types.DataFromSquare(eds)
    70  	if err != nil {
    71  		return types.Data{}, err
    72  	}
    73  
    74  	return blockData, nil
    75  }
    76  
    77  // uniqueRandNumbers generates count unique random numbers with a max of max
    78  func uniqueRandNumbers(count, max int) []uint32 {
    79  	if count > max {
    80  		panic(fmt.Sprintf("cannot create %d unique samples from a max of %d", count, max))
    81  	}
    82  	samples := make(map[uint32]struct{}, count)
    83  	for i := 0; i < count; {
    84  		// nolint:gosec // G404: Use of weak random number generator
    85  		sample := uint32(rand.Intn(max))
    86  		if _, has := samples[sample]; has {
    87  			continue
    88  		}
    89  		samples[sample] = struct{}{}
    90  		i++
    91  	}
    92  	out := make([]uint32, count)
    93  	counter := 0
    94  	for s := range samples {
    95  		out[counter] = s
    96  		counter++
    97  	}
    98  	return out
    99  }
   100  
   101  type index struct {
   102  	row uint32
   103  	col uint32
   104  }
   105  
   106  type indexedShare struct {
   107  	data []byte
   108  	index
   109  }
   110  
   111  // shareCounter is a thread safe tallying mechanism for share retrieval
   112  type shareCounter struct {
   113  	// all shares
   114  	shares map[index][]byte
   115  	// number of shares successfully collected
   116  	counter uint32
   117  	// the width of the extended data square
   118  	edsWidth uint32
   119  	// the minimum shares needed to repair the extended data square
   120  	minSharesNeeded uint32
   121  
   122  	shareChan chan indexedShare
   123  	ctx       context.Context
   124  	cancel    context.CancelFunc
   125  	// any errors encountered when attempting to retrieve shares
   126  	errc chan error
   127  }
   128  
   129  func newshareCounter(parentCtx context.Context, edsWidth uint32) *shareCounter {
   130  	ctx, cancel := context.WithCancel(parentCtx)
   131  
   132  	// calculate the min number of shares needed to repair the square
   133  	minSharesNeeded := edsWidth * edsWidth / 4
   134  
   135  	return &shareCounter{
   136  		shares:          make(map[index][]byte),
   137  		edsWidth:        edsWidth,
   138  		minSharesNeeded: minSharesNeeded,
   139  		shareChan:       make(chan indexedShare, 1),
   140  		errc:            make(chan error, 1),
   141  		ctx:             ctx,
   142  		cancel:          cancel,
   143  	}
   144  }
   145  
   146  // retrieveLeaf uses GetLeafData to fetch a single leaf and counts that leaf
   147  func (sc *shareCounter) retrieveShare(
   148  	rootCid cid.Cid,
   149  	isRow bool,
   150  	axisIdx uint32,
   151  	idx uint32,
   152  	dag ipld.NodeGetter,
   153  ) {
   154  	data, err := GetLeafData(sc.ctx, rootCid, idx, sc.edsWidth, dag)
   155  	if err != nil {
   156  		select {
   157  		case <-sc.ctx.Done():
   158  		case sc.errc <- err:
   159  		}
   160  	}
   161  
   162  	if len(data) < consts.ShareSize {
   163  		return
   164  	}
   165  
   166  	// switch the row and col indexes if needed
   167  	rowIdx := idx
   168  	colIdx := axisIdx
   169  	if isRow {
   170  		rowIdx = axisIdx
   171  		colIdx = idx
   172  	}
   173  
   174  	select {
   175  	case <-sc.ctx.Done():
   176  	default:
   177  		sc.shareChan <- indexedShare{data: data[consts.NamespaceSize:], index: index{row: rowIdx, col: colIdx}}
   178  	}
   179  }
   180  
   181  // wait until enough data has been collected, the timeout has been reached, or
   182  // too many errors are encountered
   183  func (sc *shareCounter) wait() error {
   184  	defer sc.cancel()
   185  
   186  	for {
   187  		select {
   188  		case <-sc.ctx.Done():
   189  			return ErrTimeout
   190  
   191  		case share := <-sc.shareChan:
   192  			_, has := sc.shares[share.index]
   193  			// add iff it does not already exists
   194  			if !has {
   195  				sc.shares[share.index] = share.data
   196  				sc.counter++
   197  				// check finishing condition
   198  				if sc.counter >= sc.minSharesNeeded {
   199  					return nil
   200  				}
   201  			}
   202  
   203  		case err := <-sc.errc:
   204  			return fmt.Errorf("failure to retrieve data square: %w", err)
   205  		}
   206  	}
   207  }
   208  
   209  func (sc *shareCounter) flatten() [][]byte {
   210  	flattended := make([][]byte, sc.edsWidth*sc.edsWidth)
   211  	for index, data := range sc.shares {
   212  		flattended[(index.row*sc.edsWidth)+index.col] = data
   213  	}
   214  	return flattended
   215  }
   216  
   217  // GetLeafData fetches and returns the data for leaf leafIndex of root rootCid.
   218  // It stops and returns an error if the provided context is cancelled before
   219  // finishing
   220  func GetLeafData(
   221  	ctx context.Context,
   222  	rootCid cid.Cid,
   223  	leafIndex uint32,
   224  	totalLeafs uint32, // this corresponds to the extended square width
   225  	dag ipld.NodeGetter,
   226  ) ([]byte, error) {
   227  	nd, err := GetLeaf(ctx, dag, rootCid, leafIndex, totalLeafs)
   228  	if err != nil {
   229  		return nil, err
   230  	}
   231  
   232  	return nd.RawData()[1:], nil
   233  }
   234  
   235  // GetLeafData fetches and returns the raw leaf.
   236  // It walks down the IPLD NMT tree until it finds the requested one.
   237  func GetLeaf(ctx context.Context, dag ipld.NodeGetter, root cid.Cid, leaf, total uint32) (ipld.Node, error) {
   238  	// request the node
   239  	nd, err := dag.Get(ctx, root)
   240  	if err != nil {
   241  		return nil, err
   242  	}
   243  
   244  	// look for links
   245  	lnks := nd.Links()
   246  	if len(lnks) == 1 {
   247  		// in case there is only one we reached tree's bottom, so finally request the leaf.
   248  		return dag.Get(ctx, lnks[0].Cid)
   249  	}
   250  
   251  	// route walk to appropriate children
   252  	total /= 2 // as we are using binary tree, every step decreases total leaves in a half
   253  	if leaf < total {
   254  		root = lnks[0].Cid // if target leave on the left, go with walk down the first children
   255  	} else {
   256  		root, leaf = lnks[1].Cid, leaf-total // otherwise go down the second
   257  	}
   258  
   259  	// recursively walk down through selected children
   260  	return GetLeaf(ctx, dag, root, leaf, total)
   261  }