github.com/filecoin-project/specs-actors/v4@v4.0.2/support/agent/miner_agent.go (about)

     1  package agent
     2  
     3  import (
     4  	"container/heap"
     5  	"crypto/sha256"
     6  	"fmt"
     7  	"math/rand"
     8  
     9  	"github.com/filecoin-project/go-address"
    10  	"github.com/filecoin-project/go-bitfield"
    11  	"github.com/filecoin-project/go-state-types/abi"
    12  	"github.com/filecoin-project/go-state-types/big"
    13  	"github.com/filecoin-project/go-state-types/cbor"
    14  	"github.com/filecoin-project/go-state-types/dline"
    15  	"github.com/ipfs/go-cid"
    16  	mh "github.com/multiformats/go-multihash"
    17  	"github.com/pkg/errors"
    18  
    19  	"github.com/filecoin-project/specs-actors/v4/actors/builtin"
    20  	"github.com/filecoin-project/specs-actors/v4/actors/builtin/market"
    21  	"github.com/filecoin-project/specs-actors/v4/actors/builtin/miner"
    22  	"github.com/filecoin-project/specs-actors/v4/actors/runtime/proof"
    23  )
    24  
    25  type MinerAgentConfig struct {
    26  	PrecommitRate    float64                 // average number of PreCommits per epoch
    27  	ProofType        abi.RegisteredSealProof // seal proof type for this miner
    28  	StartingBalance  abi.TokenAmount         // initial actor balance for miner actor
    29  	FaultRate        float64                 // rate at which committed sectors go faulty (faults per committed sector per epoch)
    30  	RecoveryRate     float64                 // rate at which faults are recovered (recoveries per fault per epoch)
    31  	MinMarketBalance abi.TokenAmount         // balance below which miner will top up funds in market actor
    32  	MaxMarketBalance abi.TokenAmount         // balance to which miner will top up funds in market actor
    33  	UpgradeSectors   bool                    // if true, miner will replace sectors without deals with sectors that do
    34  }
    35  
    36  type MinerAgent struct {
    37  	Config        MinerAgentConfig // parameters used to define miner prior to creation
    38  	Owner         address.Address
    39  	Worker        address.Address
    40  	IDAddress     address.Address
    41  	RobustAddress address.Address
    42  
    43  	// Stats
    44  	UpgradedSectors uint64
    45  
    46  	// These slices are used to track counts and for random selections
    47  	// all committed sectors (including sectors pending proof validation) that are not faulty and have not expired
    48  	liveSectors []uint64
    49  	// all sectors expected to be faulty
    50  	faultySectors []uint64
    51  	// all sectors that contain no deals (committed capacity sectors)
    52  	ccSectors []uint64
    53  
    54  	// deals made by this agent that need to be published
    55  	pendingDeals []market.ClientDealProposal
    56  	// deals made by this agent that need to be published
    57  	dealsPendingInclusion []pendingDeal
    58  
    59  	// priority queue used to trigger actions at future epochs
    60  	operationSchedule *opQueue
    61  	// which sector belongs to which deadline/partition
    62  	deadlines [miner.WPoStPeriodDeadlines][]partition
    63  	// iterator to time PreCommit events according to rate
    64  	preCommitEvents *RateIterator
    65  	// iterator to time faults events according to rate
    66  	faultEvents *RateIterator
    67  	// iterator to time recoveries according to rate
    68  	recoveryEvents *RateIterator
    69  	// tracks which sector number to use next
    70  	nextSectorNumber abi.SectorNumber
    71  	// tracks funds expected to be locked for miner deal collateral
    72  	expectedMarketBalance abi.TokenAmount
    73  	// random numnber generator provided by sim
    74  	rnd *rand.Rand
    75  }
    76  
    77  func NewMinerAgent(owner address.Address, worker address.Address, idAddress address.Address, robustAddress address.Address,
    78  	rndSeed int64, config MinerAgentConfig,
    79  ) *MinerAgent {
    80  	rnd := rand.New(rand.NewSource(rndSeed))
    81  	return &MinerAgent{
    82  		Config:        config,
    83  		Owner:         owner,
    84  		Worker:        worker,
    85  		IDAddress:     idAddress,
    86  		RobustAddress: robustAddress,
    87  
    88  		operationSchedule:     &opQueue{},
    89  		preCommitEvents:       NewRateIterator(config.PrecommitRate, rnd.Int63()),
    90  		expectedMarketBalance: big.Zero(),
    91  
    92  		// fault rate is the configured fault rate times the number of live sectors or zero.
    93  		faultEvents: NewRateIterator(0.0, rnd.Int63()),
    94  		// recovery rate is the configured recovery rate times the number of faults or zero.
    95  		recoveryEvents: NewRateIterator(0.0, rnd.Int63()),
    96  		rnd:            rnd, // rng for this miner isolated from original source
    97  	}
    98  }
    99  
   100  func (ma *MinerAgent) Tick(s SimState) ([]message, error) {
   101  	var messages []message
   102  
   103  	// act on scheduled operations
   104  	for _, op := range ma.operationSchedule.PopOpsUntil(s.GetEpoch()) {
   105  		switch o := op.action.(type) {
   106  		case proveCommitAction:
   107  			messages = append(messages, ma.createProveCommit(s.GetEpoch(), o.sectorNumber, o.committedCapacity, o.upgrade))
   108  		case registerSectorAction:
   109  			err := ma.registerSector(s, o.sectorNumber, o.committedCapacity, o.upgrade)
   110  			if err != nil {
   111  				return nil, err
   112  			}
   113  		case proveDeadlineAction:
   114  			msgs, err := ma.submitPoStForDeadline(s, o.dlIdx)
   115  			if err != nil {
   116  				return nil, err
   117  			}
   118  			messages = append(messages, msgs...)
   119  		case recoverSectorAction:
   120  			msgs, err := ma.delayedRecoveryMessage(o.dlIdx, o.pIdx, o.sectorNumber)
   121  			if err != nil {
   122  				return nil, err
   123  			}
   124  			messages = append(messages, msgs...)
   125  		case syncDeadlineStateAction:
   126  			if err := ma.syncMinerState(s, o.dlIdx); err != nil {
   127  				return nil, err
   128  			}
   129  		}
   130  	}
   131  
   132  	// Start PreCommits. PreCommits are triggered with a Poisson distribution at the PreCommit rate.
   133  	// This permits multiple PreCommits per epoch while also allowing multiple epochs to pass
   134  	// between PreCommits. For now always assume we have enough funds for the PreCommit deposit.
   135  	if err := ma.preCommitEvents.Tick(func() error {
   136  		// can't create precommit if in fee debt
   137  		mSt, err := s.MinerState(ma.IDAddress)
   138  		if err != nil {
   139  			return err
   140  		}
   141  		feeDebt, err := mSt.FeeDebt(s.Store())
   142  		if err != nil {
   143  			return err
   144  		}
   145  		if feeDebt.GreaterThan(big.Zero()) {
   146  			return nil
   147  		}
   148  
   149  		msg, err := ma.createPreCommit(s, s.GetEpoch())
   150  		if err != nil {
   151  			return err
   152  		}
   153  		messages = append(messages, msg)
   154  		return nil
   155  	}); err != nil {
   156  		return nil, err
   157  	}
   158  
   159  	// Fault sectors.
   160  	// Rate must be multiplied by the number of live sectors
   161  	faultRate := ma.Config.FaultRate * float64(len(ma.liveSectors))
   162  	if err := ma.faultEvents.TickWithRate(faultRate, func() error {
   163  		msgs, err := ma.createFault(s)
   164  		if err != nil {
   165  			return err
   166  		}
   167  		messages = append(messages, msgs...)
   168  		return nil
   169  	}); err != nil {
   170  		return nil, err
   171  	}
   172  
   173  	// Recover sectors.
   174  	// Rate must be multiplied by the number of faulty sectors
   175  	recoveryRate := ma.Config.RecoveryRate * float64(len(ma.faultySectors))
   176  	if err := ma.recoveryEvents.TickWithRate(recoveryRate, func() error {
   177  		msgs, err := ma.createRecovery(s)
   178  		if err != nil {
   179  			return err
   180  		}
   181  		messages = append(messages, msgs...)
   182  		return nil
   183  	}); err != nil {
   184  		return nil, err
   185  	}
   186  
   187  	// publish pending deals
   188  	messages = append(messages, ma.publishStorageDeals()...)
   189  
   190  	// add market balance if needed
   191  	messages = append(messages, ma.updateMarketBalance()...)
   192  
   193  	return messages, nil
   194  }
   195  
   196  ///////////////////////////////////
   197  //
   198  //  DealProvider methods
   199  //
   200  ///////////////////////////////////
   201  
   202  var _ DealProvider = (*MinerAgent)(nil)
   203  
   204  func (ma *MinerAgent) Address() address.Address {
   205  	return ma.IDAddress
   206  }
   207  
   208  func (ma *MinerAgent) DealRange(currentEpoch abi.ChainEpoch) (abi.ChainEpoch, abi.ChainEpoch) {
   209  	// maximum sector start and maximum expiration
   210  	return currentEpoch + miner.MaxProveCommitDuration[ma.Config.ProofType] + miner.MinSectorExpiration,
   211  		currentEpoch + miner.MaxSectorExpirationExtension
   212  }
   213  
   214  func (ma *MinerAgent) CreateDeal(proposal market.ClientDealProposal) {
   215  	ma.expectedMarketBalance = big.Sub(ma.expectedMarketBalance, proposal.Proposal.ProviderCollateral)
   216  	ma.pendingDeals = append(ma.pendingDeals, proposal)
   217  }
   218  
   219  func (ma *MinerAgent) AvailableCollateral() abi.TokenAmount {
   220  	return ma.expectedMarketBalance
   221  }
   222  
   223  ///////////////////////////////////
   224  //
   225  //  Message Generation
   226  //
   227  ///////////////////////////////////
   228  
   229  // create PreCommit message and activation trigger
   230  func (ma *MinerAgent) createPreCommit(s SimState, currentEpoch abi.ChainEpoch) (message, error) {
   231  	// go ahead and choose when we're going to activate this sector
   232  	sectorActivation := ma.sectorActivation(currentEpoch)
   233  	sectorNumber := ma.nextSectorNumber
   234  	ma.nextSectorNumber++
   235  
   236  	expiration := ma.sectorExpiration(currentEpoch)
   237  	dealIds, expiration := ma.fillSectorWithPendingDeals(expiration)
   238  	ma.pendingDeals = nil
   239  
   240  	// create sector with all deals the miner has made but not yet included
   241  	params := miner.PreCommitSectorParams{
   242  		DealIDs:       dealIds,
   243  		SealProof:     ma.Config.ProofType,
   244  		SectorNumber:  sectorNumber,
   245  		SealedCID:     sectorSealCID(ma.rnd),
   246  		SealRandEpoch: currentEpoch - 1,
   247  		Expiration:    expiration,
   248  	}
   249  
   250  	// upgrade sector if upgrades are on, this sector has deals, and we have a cc sector
   251  	isUpgrade := ma.Config.UpgradeSectors && len(dealIds) > 0 && len(ma.ccSectors) > 0
   252  	if isUpgrade {
   253  		var upgradeNumber uint64
   254  		upgradeNumber, ma.ccSectors = PopRandom(ma.ccSectors, ma.rnd)
   255  
   256  		// prevent sim from attempting to upgrade to sector with shorter duration
   257  		sinfo, err := ma.sectorInfo(s, upgradeNumber)
   258  		if err != nil {
   259  			return message{}, err
   260  		}
   261  		if sinfo.Expiration() > expiration {
   262  			params.Expiration = sinfo.Expiration()
   263  		}
   264  
   265  		dlInfo, pIdx, err := ma.dlInfoForSector(s, upgradeNumber)
   266  		if err != nil {
   267  			return message{}, err
   268  		}
   269  
   270  		params.ReplaceCapacity = true
   271  		params.ReplaceSectorNumber = abi.SectorNumber(upgradeNumber)
   272  		params.ReplaceSectorDeadline = dlInfo.Index
   273  		params.ReplaceSectorPartition = pIdx
   274  		ma.UpgradedSectors++
   275  	}
   276  
   277  	// assume PreCommit succeeds and schedule prove commit
   278  	ma.operationSchedule.ScheduleOp(sectorActivation, proveCommitAction{
   279  		sectorNumber:      sectorNumber,
   280  		committedCapacity: ma.Config.UpgradeSectors && len(dealIds) == 0,
   281  		upgrade:           isUpgrade,
   282  	})
   283  
   284  	return message{
   285  		From:   ma.Worker,
   286  		To:     ma.IDAddress,
   287  		Value:  big.Zero(),
   288  		Method: builtin.MethodsMiner.PreCommitSector,
   289  		Params: &params,
   290  	}, nil
   291  }
   292  
   293  // create prove commit message
   294  func (ma *MinerAgent) createProveCommit(epoch abi.ChainEpoch, sectorNumber abi.SectorNumber, committedCapacity bool, upgrade bool) message {
   295  	params := miner.ProveCommitSectorParams{
   296  		SectorNumber: sectorNumber,
   297  	}
   298  
   299  	// register an op for next epoch (after batch prove) to schedule a post for the sector
   300  	ma.operationSchedule.ScheduleOp(epoch+1, registerSectorAction{
   301  		sectorNumber:      sectorNumber,
   302  		committedCapacity: committedCapacity,
   303  		upgrade:           upgrade,
   304  	})
   305  
   306  	return message{
   307  		From:   ma.Worker,
   308  		To:     ma.IDAddress,
   309  		Value:  big.Zero(),
   310  		Method: builtin.MethodsMiner.ProveCommitSector,
   311  		Params: &params,
   312  	}
   313  }
   314  
   315  // Fault a sector.
   316  // This chooses a sector from live sectors and then either declares the recovery
   317  // or adds it as a fault
   318  func (ma *MinerAgent) createFault(v SimState) ([]message, error) {
   319  	// opt out if no live sectors
   320  	if len(ma.liveSectors) == 0 {
   321  		return nil, nil
   322  	}
   323  
   324  	// choose a live sector to go faulty
   325  	var faultNumber uint64
   326  	faultNumber, ma.liveSectors = PopRandom(ma.liveSectors, ma.rnd)
   327  	ma.faultySectors = append(ma.faultySectors, faultNumber)
   328  
   329  	// avoid trying to upgrade a faulty sector
   330  	ma.ccSectors = filterSlice(ma.ccSectors, map[uint64]bool{faultNumber: true})
   331  
   332  	faultDlInfo, pIdx, err := ma.dlInfoForSector(v, faultNumber)
   333  	if err != nil {
   334  		return nil, err
   335  	}
   336  
   337  	parts := ma.deadlines[faultDlInfo.Index]
   338  	if pIdx >= uint64(len(parts)) {
   339  		return nil, errors.Errorf("sector %d in deadline %d has unregistered partition %d",
   340  			faultNumber, faultDlInfo.Index, pIdx)
   341  	}
   342  	parts[pIdx].faults.Set(faultNumber)
   343  
   344  	// If it's too late, skip fault rather than declaring it
   345  	if faultDlInfo.FaultCutoffPassed() {
   346  		parts[pIdx].toBeSkipped.Set(faultNumber)
   347  		return nil, nil
   348  	}
   349  
   350  	// for now, just send a message per fault rather than trying to batch them
   351  	faultParams := miner.DeclareFaultsParams{
   352  		Faults: []miner.FaultDeclaration{{
   353  			Deadline:  faultDlInfo.Index,
   354  			Partition: pIdx,
   355  			Sectors:   bitfield.NewFromSet([]uint64{faultNumber}),
   356  		}},
   357  	}
   358  
   359  	return []message{{
   360  		From:   ma.Worker,
   361  		To:     ma.IDAddress,
   362  		Value:  big.Zero(),
   363  		Method: builtin.MethodsMiner.DeclareFaults,
   364  		Params: &faultParams,
   365  	}}, nil
   366  }
   367  
   368  // Recover a sector.
   369  // This chooses a sector from faulty sectors and then either declare the recovery or schedule one for later
   370  func (ma *MinerAgent) createRecovery(v SimState) ([]message, error) {
   371  	// opt out if no faulty sectors
   372  	if len(ma.faultySectors) == 0 {
   373  		return nil, nil
   374  	}
   375  
   376  	// choose a faulty sector to recover
   377  	var recoveryNumber uint64
   378  	recoveryNumber, ma.faultySectors = PopRandom(ma.faultySectors, ma.rnd)
   379  
   380  	recoveryDlInfo, pIdx, err := ma.dlInfoForSector(v, recoveryNumber)
   381  	if err != nil {
   382  		return nil, err
   383  	}
   384  
   385  	parts := ma.deadlines[recoveryDlInfo.Index]
   386  	if pIdx >= uint64(len(parts)) {
   387  		return nil, errors.Errorf("recovered sector %d in deadline %d has unregistered partition %d",
   388  			recoveryNumber, recoveryDlInfo.Index, pIdx)
   389  	}
   390  	if set, err := parts[pIdx].faults.IsSet(recoveryNumber); err != nil {
   391  		return nil, errors.Errorf("could not check if %d in deadline %d partition %d is faulty",
   392  			recoveryNumber, recoveryDlInfo.Index, pIdx)
   393  	} else if !set {
   394  		return nil, errors.Errorf("recovery %d in deadline %d partition %d was not a fault",
   395  			recoveryNumber, recoveryDlInfo.Index, pIdx)
   396  	}
   397  
   398  	// If it's too late, schedule recovery rather than declaring it
   399  	if recoveryDlInfo.FaultCutoffPassed() {
   400  		ma.operationSchedule.ScheduleOp(recoveryDlInfo.Close, recoverSectorAction{
   401  			dlIdx:        recoveryDlInfo.Index,
   402  			pIdx:         pIdx,
   403  			sectorNumber: abi.SectorNumber(recoveryNumber),
   404  		})
   405  		return nil, nil
   406  	}
   407  
   408  	return ma.recoveryMessage(recoveryDlInfo.Index, pIdx, abi.SectorNumber(recoveryNumber))
   409  }
   410  
   411  // prove sectors in deadline
   412  func (ma *MinerAgent) submitPoStForDeadline(v SimState, dlIdx uint64) ([]message, error) {
   413  	var partitions []miner.PoStPartition
   414  	for pIdx, part := range ma.deadlines[dlIdx] {
   415  		if live, err := bitfield.SubtractBitField(part.sectors, part.faults); err != nil {
   416  			return nil, err
   417  		} else if empty, err := live.IsEmpty(); err != nil {
   418  			return nil, err
   419  		} else if !empty {
   420  			partitions = append(partitions, miner.PoStPartition{
   421  				Index:   uint64(pIdx),
   422  				Skipped: part.toBeSkipped,
   423  			})
   424  
   425  			part.toBeSkipped = bitfield.New()
   426  		}
   427  	}
   428  
   429  	// schedule post-deadline state synchronization and next PoSt
   430  	if err := ma.scheduleSyncAndNextProof(v, dlIdx); err != nil {
   431  		return nil, err
   432  	}
   433  
   434  	// submitPoSt only if we have something to prove
   435  	if len(partitions) == 0 {
   436  		return nil, nil
   437  	}
   438  
   439  	postProofType, err := ma.Config.ProofType.RegisteredWindowPoStProof()
   440  	if err != nil {
   441  		return nil, err
   442  	}
   443  
   444  	params := miner.SubmitWindowedPoStParams{
   445  		Deadline:   dlIdx,
   446  		Partitions: partitions,
   447  		Proofs: []proof.PoStProof{{
   448  			PoStProof:  postProofType,
   449  			ProofBytes: []byte{},
   450  		}},
   451  		ChainCommitEpoch: v.GetEpoch() - 1,
   452  		ChainCommitRand:  []byte("not really random"),
   453  	}
   454  
   455  	return []message{{
   456  		From:   ma.Worker,
   457  		To:     ma.IDAddress,
   458  		Value:  big.Zero(),
   459  		Method: builtin.MethodsMiner.SubmitWindowedPoSt,
   460  		Params: &params,
   461  	}}, nil
   462  }
   463  
   464  // create a deal proposal message and notify provider of deal
   465  func (ma *MinerAgent) publishStorageDeals() []message {
   466  	if len(ma.pendingDeals) == 0 {
   467  		return []message{}
   468  	}
   469  
   470  	params := market.PublishStorageDealsParams{
   471  		Deals: ma.pendingDeals,
   472  	}
   473  	ma.pendingDeals = nil
   474  
   475  	return []message{{
   476  		From:   ma.Worker,
   477  		To:     builtin.StorageMarketActorAddr,
   478  		Value:  big.Zero(),
   479  		Method: builtin.MethodsMarket.PublishStorageDeals,
   480  		Params: &params,
   481  		ReturnHandler: func(_ SimState, _ message, ret cbor.Marshaler) error {
   482  			// add returned deal ids to be included within sectors
   483  			publishReturn, ok := ret.(*market.PublishStorageDealsReturn)
   484  			if !ok {
   485  				return errors.Errorf("create miner return has wrong type: %v", ret)
   486  			}
   487  
   488  			for idx, dealId := range publishReturn.IDs {
   489  				ma.dealsPendingInclusion = append(ma.dealsPendingInclusion, pendingDeal{
   490  					id:   dealId,
   491  					size: params.Deals[idx].Proposal.PieceSize,
   492  					ends: params.Deals[idx].Proposal.EndEpoch,
   493  				})
   494  			}
   495  			return nil
   496  		},
   497  	}}
   498  }
   499  
   500  func (ma *MinerAgent) updateMarketBalance() []message {
   501  	if ma.expectedMarketBalance.GreaterThanEqual(ma.Config.MinMarketBalance) {
   502  		return []message{}
   503  	}
   504  
   505  	balanceToAdd := big.Sub(ma.Config.MaxMarketBalance, ma.expectedMarketBalance)
   506  
   507  	return []message{{
   508  		From:   ma.Worker,
   509  		To:     builtin.StorageMarketActorAddr,
   510  		Value:  balanceToAdd,
   511  		Method: builtin.MethodsMarket.AddBalance,
   512  		Params: &ma.IDAddress,
   513  
   514  		// update in return handler to prevent deals before the miner has balance
   515  		ReturnHandler: func(_ SimState, _ message, _ cbor.Marshaler) error {
   516  			ma.expectedMarketBalance = ma.Config.MaxMarketBalance
   517  			return nil
   518  		},
   519  	}}
   520  }
   521  
   522  ////////////////////////////////////////////////
   523  //
   524  //  Misc methods
   525  //
   526  ////////////////////////////////////////////////
   527  
   528  // looks up sector deadline and partition so we can start adding it to PoSts
   529  func (ma *MinerAgent) registerSector(v SimState, sectorNumber abi.SectorNumber, committedCapacity bool, upgrade bool) error {
   530  	mSt, err := v.MinerState(ma.IDAddress)
   531  	if err != nil {
   532  		return err
   533  	}
   534  
   535  	// first check for sector
   536  	if found, err := mSt.HasSectorNo(v.Store(), sectorNumber); err != nil {
   537  		return err
   538  	} else if !found {
   539  		fmt.Printf("failed to register sector %d, did proof verification fail?\n", sectorNumber)
   540  		return nil
   541  	}
   542  
   543  	dlIdx, pIdx, err := mSt.FindSector(v.Store(), sectorNumber)
   544  	if err != nil {
   545  		return err
   546  	}
   547  
   548  	if len(ma.deadlines[dlIdx]) == 0 {
   549  		err := ma.scheduleSyncAndNextProof(v, dlIdx)
   550  		if err != nil {
   551  			return err
   552  		}
   553  	}
   554  
   555  	if upgrade {
   556  		ma.UpgradedSectors++
   557  	}
   558  
   559  	ma.liveSectors = append(ma.liveSectors, uint64(sectorNumber))
   560  	if committedCapacity {
   561  		ma.ccSectors = append(ma.ccSectors, uint64(sectorNumber))
   562  	}
   563  
   564  	// pIdx should be sequential, but add empty partitions just in case
   565  	for pIdx >= uint64(len(ma.deadlines[dlIdx])) {
   566  		ma.deadlines[dlIdx] = append(ma.deadlines[dlIdx], partition{
   567  			sectors:     bitfield.New(),
   568  			toBeSkipped: bitfield.New(),
   569  			faults:      bitfield.New(),
   570  		})
   571  	}
   572  	ma.deadlines[dlIdx][pIdx].sectors.Set(uint64(sectorNumber))
   573  	return nil
   574  }
   575  
   576  // schedule a proof within the deadline's bounds
   577  func (ma *MinerAgent) scheduleSyncAndNextProof(v SimState, dlIdx uint64) error {
   578  	mSt, err := v.MinerState(ma.IDAddress)
   579  	if err != nil {
   580  		return err
   581  	}
   582  
   583  	// find next proving window for this deadline
   584  	provingPeriodStart, err := mSt.ProvingPeriodStart(v.Store())
   585  	if err != nil {
   586  		return err
   587  	}
   588  	deadlineStart := provingPeriodStart + abi.ChainEpoch(dlIdx)*miner.WPoStChallengeWindow
   589  	if deadlineStart-miner.WPoStChallengeWindow < v.GetEpoch() {
   590  		deadlineStart += miner.WPoStProvingPeriod
   591  	}
   592  	deadlineClose := deadlineStart + miner.WPoStChallengeWindow
   593  
   594  	ma.operationSchedule.ScheduleOp(deadlineClose, syncDeadlineStateAction{dlIdx: dlIdx})
   595  
   596  	proveAt := deadlineStart + abi.ChainEpoch(ma.rnd.Int63n(int64(deadlineClose-deadlineStart)))
   597  	ma.operationSchedule.ScheduleOp(proveAt, proveDeadlineAction{dlIdx: dlIdx})
   598  
   599  	return nil
   600  }
   601  
   602  // Fill sector with deals
   603  // This is a naive packing algorithm that adds pieces in order received.
   604  func (ma *MinerAgent) fillSectorWithPendingDeals(expiration abi.ChainEpoch) ([]abi.DealID, abi.ChainEpoch) {
   605  	var dealIDs []abi.DealID
   606  
   607  	sectorSize, err := ma.Config.ProofType.SectorSize()
   608  	if err != nil {
   609  		panic(err)
   610  	}
   611  
   612  	// pieces are aligned so that each starts at the first multiple of its piece size >= the next empty slot.
   613  	// just stop when we find one that doesn't fit in the sector. Assume pieces can't have zero size
   614  	loc := uint64(0)
   615  	for _, piece := range ma.dealsPendingInclusion {
   616  		size := uint64(piece.size)
   617  		loc = ((loc + size - 1) / size) * size // round loc up to the next multiple of size
   618  		if loc+size > uint64(sectorSize) {
   619  			break
   620  		}
   621  
   622  		dealIDs = append(dealIDs, piece.id)
   623  		if piece.ends > expiration {
   624  			expiration = piece.ends
   625  		}
   626  
   627  		loc += size
   628  	}
   629  
   630  	// remove ids we've added from pending
   631  	ma.dealsPendingInclusion = ma.dealsPendingInclusion[len(dealIDs):]
   632  
   633  	return dealIDs, expiration
   634  }
   635  
   636  // ensure recovery hasn't expired since it was scheduled
   637  func (ma *MinerAgent) delayedRecoveryMessage(dlIdx uint64, pIdx uint64, recoveryNumber abi.SectorNumber) ([]message, error) {
   638  	part := ma.deadlines[dlIdx][pIdx]
   639  	if expired, err := part.expired.IsSet(uint64(recoveryNumber)); err != nil {
   640  		return nil, err
   641  	} else if expired {
   642  		// just ignore this recovery if expired
   643  		return nil, nil
   644  	}
   645  
   646  	return ma.recoveryMessage(dlIdx, pIdx, recoveryNumber)
   647  }
   648  
   649  func (ma *MinerAgent) recoveryMessage(dlIdx uint64, pIdx uint64, recoveryNumber abi.SectorNumber) ([]message, error) {
   650  	// assume this message succeeds
   651  	ma.liveSectors = append(ma.liveSectors, uint64(recoveryNumber))
   652  	part := ma.deadlines[dlIdx][pIdx]
   653  	part.faults.Unset(uint64(recoveryNumber))
   654  
   655  	recoverParams := miner.DeclareFaultsRecoveredParams{
   656  		Recoveries: []miner.RecoveryDeclaration{{
   657  			Deadline:  dlIdx,
   658  			Partition: pIdx,
   659  			Sectors:   bitfield.NewFromSet([]uint64{uint64(recoveryNumber)}),
   660  		}},
   661  	}
   662  
   663  	return []message{{
   664  		From:   ma.Worker,
   665  		To:     ma.IDAddress,
   666  		Value:  big.Zero(),
   667  		Method: builtin.MethodsMiner.DeclareFaultsRecovered,
   668  		Params: &recoverParams,
   669  	}}, nil
   670  }
   671  
   672  // This function updates all sectors in deadline that have newly expired
   673  func (ma *MinerAgent) syncMinerState(s SimState, dlIdx uint64) error {
   674  	mSt, err := s.MinerState(ma.IDAddress)
   675  	if err != nil {
   676  		return err
   677  	}
   678  
   679  	dl, err := mSt.LoadDeadlineState(s.Store(), dlIdx)
   680  	if err != nil {
   681  		return err
   682  	}
   683  
   684  	// update sector state for all partitions in deadline
   685  	var allNewExpired []bitfield.BitField
   686  	for pIdx, part := range ma.deadlines[dlIdx] {
   687  		partState, err := dl.LoadPartition(s.Store(), uint64(pIdx))
   688  		if err != nil {
   689  			return err
   690  		}
   691  		newExpired, err := bitfield.IntersectBitField(part.sectors, partState.Terminated())
   692  		if err != nil {
   693  			return err
   694  		}
   695  
   696  		if empty, err := newExpired.IsEmpty(); err != nil {
   697  			return err
   698  		} else if !empty {
   699  			err := part.expireSectors(newExpired)
   700  			if err != nil {
   701  				return err
   702  			}
   703  			allNewExpired = append(allNewExpired, newExpired)
   704  		}
   705  	}
   706  
   707  	// remove newly expired sectors from miner agent state to prevent choosing them in the future.
   708  	toRemoveBF, err := bitfield.MultiMerge(allNewExpired...)
   709  	if err != nil {
   710  		return err
   711  	}
   712  
   713  	toRemove, err := toRemoveBF.AllMap(uint64(ma.nextSectorNumber))
   714  	if err != nil {
   715  		return err
   716  	}
   717  
   718  	if len(toRemove) > 0 {
   719  		ma.liveSectors = filterSlice(ma.liveSectors, toRemove)
   720  		ma.faultySectors = filterSlice(ma.faultySectors, toRemove)
   721  		ma.ccSectors = filterSlice(ma.ccSectors, toRemove)
   722  	}
   723  	return nil
   724  }
   725  
   726  func filterSlice(ns []uint64, toRemove map[uint64]bool) []uint64 {
   727  	var nextLive []uint64
   728  	for _, sn := range ns {
   729  		_, expired := toRemove[sn]
   730  		if !expired {
   731  			nextLive = append(nextLive, sn)
   732  		}
   733  	}
   734  	return nextLive
   735  }
   736  
   737  func (ma *MinerAgent) sectorInfo(v SimState, sectorNumber uint64) (SimSectorInfo, error) {
   738  	mSt, err := v.MinerState(ma.IDAddress)
   739  	if err != nil {
   740  		return nil, err
   741  	}
   742  
   743  	sector, err := mSt.LoadSectorInfo(v.Store(), sectorNumber)
   744  	if err != nil {
   745  		return nil, err
   746  	}
   747  	return sector, nil
   748  }
   749  
   750  func (ma *MinerAgent) dlInfoForSector(v SimState, sectorNumber uint64) (*dline.Info, uint64, error) {
   751  	mSt, err := v.MinerState(ma.IDAddress)
   752  	if err != nil {
   753  		return nil, 0, err
   754  	}
   755  
   756  	dlIdx, pIdx, err := mSt.FindSector(v.Store(), abi.SectorNumber(sectorNumber))
   757  	if err != nil {
   758  		return nil, 0, err
   759  	}
   760  
   761  	dlInfo, err := mSt.DeadlineInfo(v.Store(), v.GetEpoch())
   762  	if err != nil {
   763  		return nil, 0, err
   764  	}
   765  	sectorDLInfo := miner.NewDeadlineInfo(dlInfo.PeriodStart, dlIdx, v.GetEpoch()).NextNotElapsed()
   766  	return sectorDLInfo, pIdx, nil
   767  }
   768  
   769  // create a random valid sector expiration
   770  func (ma *MinerAgent) sectorExpiration(currentEpoch abi.ChainEpoch) abi.ChainEpoch {
   771  	// Require sector lifetime meets minimum by assuming activation happens at last epoch permitted for seal proof
   772  	// to meet the constraints imposed in PreCommit.
   773  	minExp := currentEpoch + miner.MaxProveCommitDuration[ma.Config.ProofType] + miner.MinSectorExpiration
   774  	// Require duration of sector from now does not exceed the maximum sector extension. This constraint
   775  	// is also imposed by PreCommit, and along with the first constraint define the bounds for a valid
   776  	// expiration of a new sector.
   777  	maxExp := currentEpoch + miner.MaxSectorExpirationExtension
   778  
   779  	// generate a uniformly distributed expiration in the valid range.
   780  	return minExp + abi.ChainEpoch(ma.rnd.Int63n(int64(maxExp-minExp)))
   781  }
   782  
   783  // Generate a sector activation over the range of acceptable values.
   784  // The range varies widely from 150 - 3030 epochs after precommit.
   785  // Assume differences in hardware and contention in the miner's sealing queue create a uniform distribution
   786  // over the acceptable range
   787  func (ma *MinerAgent) sectorActivation(preCommitAt abi.ChainEpoch) abi.ChainEpoch {
   788  	minActivation := preCommitAt + miner.PreCommitChallengeDelay + 1
   789  	maxActivation := preCommitAt + miner.MaxProveCommitDuration[ma.Config.ProofType]
   790  	return minActivation + abi.ChainEpoch(ma.rnd.Int63n(int64(maxActivation-minActivation)))
   791  }
   792  
   793  // create a random seal CID
   794  func sectorSealCID(rnd *rand.Rand) cid.Cid {
   795  	data := make([]byte, 10)
   796  	_, err := rnd.Read(data)
   797  	if err != nil {
   798  		panic(err)
   799  	}
   800  
   801  	sum := sha256.Sum256(data)
   802  	hash, err := mh.Encode(sum[:], miner.SealedCIDPrefix.MhType)
   803  	if err != nil {
   804  		panic(err)
   805  	}
   806  	return cid.NewCidV1(miner.SealedCIDPrefix.Codec, hash)
   807  }
   808  
   809  /////////////////////////////////////////////
   810  //
   811  //  Internal data structures
   812  //
   813  /////////////////////////////////////////////
   814  
   815  // tracks state relevant to each partition
   816  type partition struct {
   817  	sectors     bitfield.BitField // sector numbers of all sectors that have not expired
   818  	toBeSkipped bitfield.BitField // sector numbers of sectors to be skipped next PoSt
   819  	faults      bitfield.BitField // sector numbers of sectors believed to be faulty
   820  	expired     bitfield.BitField // sector number of sectors believed to have expired
   821  }
   822  
   823  func (part *partition) expireSectors(newExpired bitfield.BitField) error {
   824  	var err error
   825  	part.sectors, err = bitfield.SubtractBitField(part.sectors, newExpired)
   826  	if err != nil {
   827  		return err
   828  	}
   829  	part.faults, err = bitfield.SubtractBitField(part.faults, newExpired)
   830  	if err != nil {
   831  		return err
   832  	}
   833  	part.toBeSkipped, err = bitfield.SubtractBitField(part.toBeSkipped, newExpired)
   834  	if err != nil {
   835  		return err
   836  	}
   837  	part.expired, err = bitfield.MergeBitFields(part.expired, newExpired)
   838  	if err != nil {
   839  		return err
   840  	}
   841  	return nil
   842  }
   843  
   844  type minerOp struct {
   845  	epoch  abi.ChainEpoch
   846  	action interface{}
   847  }
   848  
   849  type proveCommitAction struct {
   850  	sectorNumber      abi.SectorNumber
   851  	committedCapacity bool
   852  	upgrade           bool
   853  }
   854  
   855  type registerSectorAction struct {
   856  	sectorNumber      abi.SectorNumber
   857  	committedCapacity bool
   858  	upgrade           bool
   859  }
   860  
   861  type recoverSectorAction struct {
   862  	dlIdx        uint64
   863  	pIdx         uint64
   864  	sectorNumber abi.SectorNumber
   865  }
   866  
   867  type proveDeadlineAction struct {
   868  	dlIdx uint64
   869  }
   870  
   871  type syncDeadlineStateAction struct {
   872  	dlIdx uint64
   873  }
   874  
   875  type pendingDeal struct {
   876  	id   abi.DealID
   877  	size abi.PaddedPieceSize
   878  	ends abi.ChainEpoch
   879  }
   880  
   881  /////////////////////////////////////////////
   882  //
   883  //  opQueue priority queue for scheduling
   884  //
   885  /////////////////////////////////////////////
   886  
   887  type opQueue struct {
   888  	ops []minerOp
   889  }
   890  
   891  var _ heap.Interface = (*opQueue)(nil)
   892  
   893  // add an op to schedule
   894  func (o *opQueue) ScheduleOp(epoch abi.ChainEpoch, action interface{}) {
   895  	heap.Push(o, minerOp{
   896  		epoch:  epoch,
   897  		action: action,
   898  	})
   899  }
   900  
   901  // get operations for up to and including current epoch
   902  func (o *opQueue) PopOpsUntil(epoch abi.ChainEpoch) []minerOp {
   903  	var ops []minerOp
   904  
   905  	for !o.IsEmpty() && o.NextEpoch() <= epoch {
   906  		next := heap.Pop(o).(minerOp)
   907  		ops = append(ops, next)
   908  	}
   909  	return ops
   910  }
   911  
   912  func (o *opQueue) NextEpoch() abi.ChainEpoch {
   913  	return o.ops[0].epoch
   914  }
   915  
   916  func (o *opQueue) IsEmpty() bool {
   917  	return len(o.ops) == 0
   918  }
   919  
   920  func (o *opQueue) Len() int {
   921  	return len(o.ops)
   922  }
   923  
   924  func (o *opQueue) Less(i, j int) bool {
   925  	return o.ops[i].epoch < o.ops[j].epoch
   926  }
   927  
   928  func (o *opQueue) Swap(i, j int) {
   929  	o.ops[i], o.ops[j] = o.ops[j], o.ops[i]
   930  }
   931  
   932  func (o *opQueue) Push(x interface{}) {
   933  	o.ops = append(o.ops, x.(minerOp))
   934  }
   935  
   936  func (o *opQueue) Pop() interface{} {
   937  	op := o.ops[len(o.ops)-1]
   938  	o.ops = o.ops[:len(o.ops)-1]
   939  	return op
   940  }