github.com/lazyledger/lazyledger-core@v0.35.0-dev.0.20210613111200-4c651f053571/p2p/ipld/write.go (about)

     1  package ipld
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"math"
     7  	"sync/atomic"
     8  	"time"
     9  
    10  	"github.com/ipfs/go-cid"
    11  	ipld "github.com/ipfs/go-ipld-format"
    12  	"github.com/lazyledger/nmt"
    13  	"github.com/lazyledger/rsmt2d"
    14  	"github.com/libp2p/go-libp2p-core/routing"
    15  	kbucket "github.com/libp2p/go-libp2p-kbucket"
    16  
    17  	"github.com/lazyledger/lazyledger-core/ipfs/plugin"
    18  	"github.com/lazyledger/lazyledger-core/libs/log"
    19  	"github.com/lazyledger/lazyledger-core/libs/sync"
    20  	"github.com/lazyledger/lazyledger-core/p2p/ipld/wrapper"
    21  	"github.com/lazyledger/lazyledger-core/types"
    22  )
    23  
    24  // PutBlock posts and pins erasured block data to IPFS using the provided
    25  // ipld.NodeAdder. Note: the erasured data is currently recomputed
    26  // TODO this craves for refactor
    27  func PutBlock(
    28  	ctx context.Context,
    29  	adder ipld.NodeAdder,
    30  	block *types.Block,
    31  	croute routing.ContentRouting,
    32  	logger log.Logger,
    33  ) error {
    34  	// recompute the shares
    35  	namespacedShares, _ := block.Data.ComputeShares()
    36  	shares := namespacedShares.RawShares()
    37  
    38  	// don't do anything if there is no data to put on IPFS
    39  	if len(shares) == 0 {
    40  		return nil
    41  	}
    42  
    43  	// create nmt adder wrapping batch adder
    44  	batchAdder := NewNmtNodeAdder(ctx, ipld.NewBatch(ctx, adder))
    45  
    46  	// create the nmt wrapper to generate row and col commitments
    47  	squareSize := uint32(math.Sqrt(float64(len(shares))))
    48  	tree := wrapper.NewErasuredNamespacedMerkleTree(uint64(squareSize), nmt.NodeVisitor(batchAdder.Visit))
    49  
    50  	// recompute the eds
    51  	eds, err := rsmt2d.ComputeExtendedDataSquare(shares, rsmt2d.NewRSGF8Codec(), tree.Constructor)
    52  	if err != nil {
    53  		return fmt.Errorf("failure to recompute the extended data square: %w", err)
    54  	}
    55  	// get row and col roots to be provided
    56  	// this also triggers adding data to DAG
    57  	prov := newProvider(ctx, croute, int32(squareSize*4), logger.With("height", block.Height))
    58  	for _, root := range eds.RowRoots() {
    59  		prov.Provide(plugin.MustCidFromNamespacedSha256(root))
    60  	}
    61  	for _, root := range eds.ColumnRoots() {
    62  		prov.Provide(plugin.MustCidFromNamespacedSha256(root))
    63  	}
    64  	// commit the batch to ipfs
    65  	err = batchAdder.Commit()
    66  	if err != nil {
    67  		return err
    68  	}
    69  	// wait until we provided all the roots if requested
    70  	<-prov.Done()
    71  	return prov.Err()
    72  }
    73  
    74  var provideWorkers = 32
    75  
    76  type provider struct {
    77  	ctx  context.Context
    78  	done chan struct{}
    79  
    80  	err   error
    81  	errLk sync.RWMutex
    82  
    83  	jobs  chan cid.Cid
    84  	total int32
    85  
    86  	croute    routing.ContentRouting
    87  	log       log.Logger
    88  	startTime time.Time
    89  }
    90  
    91  func newProvider(ctx context.Context, croute routing.ContentRouting, toProvide int32, logger log.Logger) *provider {
    92  	p := &provider{
    93  		ctx:    ctx,
    94  		done:   make(chan struct{}),
    95  		jobs:   make(chan cid.Cid, provideWorkers),
    96  		total:  toProvide,
    97  		croute: croute,
    98  		log:    logger,
    99  	}
   100  	for range make([]bool, provideWorkers) {
   101  		go p.worker()
   102  	}
   103  	logger.Info("Started Providing to DHT")
   104  	p.startTime = time.Now()
   105  	return p
   106  }
   107  
   108  func (p *provider) Provide(id cid.Cid) {
   109  	select {
   110  	case p.jobs <- id:
   111  	case <-p.ctx.Done():
   112  	}
   113  }
   114  
   115  func (p *provider) Done() <-chan struct{} {
   116  	return p.done
   117  }
   118  
   119  func (p *provider) Err() error {
   120  	p.errLk.RLock()
   121  	defer p.errLk.RUnlock()
   122  	if p.err != nil {
   123  		return p.err
   124  	}
   125  	return p.ctx.Err()
   126  }
   127  
   128  func (p *provider) worker() {
   129  	for {
   130  		select {
   131  		case id := <-p.jobs:
   132  			err := p.croute.Provide(p.ctx, id, true)
   133  			// Omit ErrLookupFailure to decrease test log spamming as
   134  			// this simply indicates we haven't connected to other DHT nodes yet.
   135  			if err != nil && err != kbucket.ErrLookupFailure {
   136  				if p.Err() == nil {
   137  					p.errLk.Lock()
   138  					p.err = err
   139  					p.errLk.Unlock()
   140  				}
   141  
   142  				p.log.Error("failed to provide to DHT", "err", err.Error())
   143  			}
   144  
   145  			p.provided()
   146  		case <-p.ctx.Done():
   147  			for {
   148  				select {
   149  				case <-p.jobs: // drain chan
   150  					p.provided() // ensure done is closed
   151  				default:
   152  					return
   153  				}
   154  			}
   155  		case <-p.done:
   156  			return
   157  		}
   158  	}
   159  }
   160  
   161  func (p *provider) provided() {
   162  	if atomic.AddInt32(&p.total, -1) == 0 {
   163  		p.log.Info("Finished providing to DHT", "took", time.Since(p.startTime).String())
   164  		close(p.done)
   165  	}
   166  }