github.com/filecoin-project/specs-actors/v4@v4.0.2/actors/migration/nv10/miner.go (about)

     1  package nv10
     2  
     3  import (
     4  	"context"
     5  
     6  	"github.com/filecoin-project/go-bitfield"
     7  	cid "github.com/ipfs/go-cid"
     8  	cbor "github.com/ipfs/go-ipld-cbor"
     9  	"golang.org/x/xerrors"
    10  
    11  	miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
    12  	adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
    13  
    14  	builtin3 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
    15  	miner3 "github.com/filecoin-project/specs-actors/v4/actors/builtin/miner"
    16  	adt3 "github.com/filecoin-project/specs-actors/v4/actors/util/adt"
    17  )
    18  
    19  type minerMigrator struct{}
    20  
    21  func (m minerMigrator) migrateState(ctx context.Context, store cbor.IpldStore, in actorMigrationInput) (*actorMigrationResult, error) {
    22  	var inState miner2.State
    23  	if err := store.Get(ctx, in.head, &inState); err != nil {
    24  		return nil, err
    25  	}
    26  
    27  	infoOut, err := m.migrateInfo(ctx, store, inState.Info)
    28  	if err != nil {
    29  		return nil, err
    30  	}
    31  	preCommittedSectorsOut, err := migrateHAMTRaw(ctx, store, inState.PreCommittedSectors, builtin3.DefaultHamtBitwidth)
    32  	if err != nil {
    33  		return nil, err
    34  	}
    35  	preCommittedSectorsExpiryOut, err := migrateAMTRaw(ctx, store, inState.PreCommittedSectorsExpiry, miner3.PrecommitExpiryAmtBitwidth)
    36  	if err != nil {
    37  		return nil, err
    38  	}
    39  
    40  	sectorsOut, err := in.cache.Load(SectorsRootKey(inState.Sectors), func() (cid.Cid, error) {
    41  		return migrateAMTRaw(ctx, store, inState.Sectors, miner3.SectorsAmtBitwidth)
    42  	})
    43  	if err != nil {
    44  		return nil, err
    45  	}
    46  
    47  	deadlinesOut, err := m.migrateDeadlines(ctx, store, in.cache, inState.Deadlines)
    48  	if err != nil {
    49  		return nil, err
    50  	}
    51  
    52  	outState := miner3.State{
    53  		Info:                      infoOut,
    54  		PreCommitDeposits:         inState.PreCommitDeposits,
    55  		LockedFunds:               inState.LockedFunds,
    56  		VestingFunds:              inState.VestingFunds,
    57  		FeeDebt:                   inState.FeeDebt,
    58  		InitialPledge:             inState.InitialPledge,
    59  		PreCommittedSectors:       preCommittedSectorsOut,
    60  		PreCommittedSectorsExpiry: preCommittedSectorsExpiryOut,
    61  		AllocatedSectors:          inState.AllocatedSectors,
    62  		Sectors:                   sectorsOut,
    63  		ProvingPeriodStart:        inState.ProvingPeriodStart,
    64  		CurrentDeadline:           inState.CurrentDeadline,
    65  		Deadlines:                 deadlinesOut,
    66  		EarlyTerminations:         inState.EarlyTerminations,
    67  	}
    68  	newHead, err := store.Put(ctx, &outState)
    69  	return &actorMigrationResult{
    70  		newCodeCID: m.migratedCodeCID(),
    71  		newHead:    newHead,
    72  	}, err
    73  }
    74  
    75  func (m minerMigrator) migratedCodeCID() cid.Cid {
    76  	return builtin3.StorageMinerActorCodeID
    77  }
    78  
    79  func (m *minerMigrator) migrateInfo(ctx context.Context, store cbor.IpldStore, c cid.Cid) (cid.Cid, error) {
    80  	var oldInfo miner2.MinerInfo
    81  	err := store.Get(ctx, c, &oldInfo)
    82  	if err != nil {
    83  		return cid.Undef, err
    84  	}
    85  
    86  	var newWorkerKeyChange *miner3.WorkerKeyChange
    87  	if oldInfo.PendingWorkerKey != nil {
    88  		newWorkerKeyChange = &miner3.WorkerKeyChange{
    89  			NewWorker:   oldInfo.PendingWorkerKey.NewWorker,
    90  			EffectiveAt: oldInfo.PendingWorkerKey.EffectiveAt,
    91  		}
    92  	}
    93  
    94  	windowPoStProof, err := oldInfo.SealProofType.RegisteredWindowPoStProof()
    95  	if err != nil {
    96  		return cid.Undef, err
    97  	}
    98  
    99  	newInfo := miner3.MinerInfo{
   100  		Owner:                      oldInfo.Owner,
   101  		Worker:                     oldInfo.Worker,
   102  		ControlAddresses:           oldInfo.ControlAddresses,
   103  		PendingWorkerKey:           newWorkerKeyChange,
   104  		PeerId:                     oldInfo.PeerId,
   105  		Multiaddrs:                 oldInfo.Multiaddrs,
   106  		WindowPoStProofType:        windowPoStProof,
   107  		SectorSize:                 oldInfo.SectorSize,
   108  		WindowPoStPartitionSectors: oldInfo.WindowPoStPartitionSectors,
   109  		ConsensusFaultElapsed:      oldInfo.ConsensusFaultElapsed,
   110  		PendingOwnerAddress:        oldInfo.PendingOwnerAddress,
   111  	}
   112  	return store.Put(ctx, &newInfo)
   113  }
   114  
   115  func (m *minerMigrator) migrateDeadlines(ctx context.Context, store cbor.IpldStore, cache MigrationCache, deadlines cid.Cid) (cid.Cid, error) {
   116  	var inDeadlines miner2.Deadlines
   117  	err := store.Get(ctx, deadlines, &inDeadlines)
   118  	if err != nil {
   119  		return cid.Undef, err
   120  	}
   121  
   122  	if miner2.WPoStPeriodDeadlines != miner3.WPoStPeriodDeadlines {
   123  		return cid.Undef, xerrors.Errorf("unexpected WPoStPeriodDeadlines changed from %d to %d",
   124  			miner2.WPoStPeriodDeadlines, miner3.WPoStPeriodDeadlines)
   125  	}
   126  
   127  	outDeadlines := miner3.Deadlines{Due: [miner3.WPoStPeriodDeadlines]cid.Cid{}}
   128  
   129  	// Start from an empty template to zero-initialize new fields.
   130  	deadlineTemplate, err := miner3.ConstructDeadline(adt3.WrapStore(ctx, store))
   131  	if err != nil {
   132  		return cid.Undef, xerrors.Errorf("failed to construct new deadline template")
   133  	}
   134  
   135  	for i, c := range inDeadlines.Due {
   136  		outDlCid, err := cache.Load(DeadlineKey(c), func() (cid.Cid, error) {
   137  			var inDeadline miner2.Deadline
   138  			if err = store.Get(ctx, c, &inDeadline); err != nil {
   139  				return cid.Undef, err
   140  			}
   141  
   142  			partitions, err := m.migratePartitions(ctx, store, inDeadline.Partitions)
   143  			if err != nil {
   144  				return cid.Undef, xerrors.Errorf("partitions: %w", err)
   145  			}
   146  
   147  			expirationEpochs, err := migrateAMTRaw(ctx, store, inDeadline.ExpirationsEpochs, miner3.DeadlineExpirationAmtBitwidth)
   148  			if err != nil {
   149  				return cid.Undef, xerrors.Errorf("bitfield queue: %w", err)
   150  			}
   151  
   152  			outDeadline := *deadlineTemplate
   153  			outDeadline.Partitions = partitions
   154  			outDeadline.ExpirationsEpochs = expirationEpochs
   155  			outDeadline.PartitionsPoSted = inDeadline.PostSubmissions
   156  			outDeadline.EarlyTerminations = inDeadline.EarlyTerminations
   157  			outDeadline.LiveSectors = inDeadline.LiveSectors
   158  			outDeadline.TotalSectors = inDeadline.TotalSectors
   159  			outDeadline.FaultyPower = miner3.PowerPair(inDeadline.FaultyPower)
   160  
   161  			// If there are no live sectors in this partition, zero out the "partitions
   162  			// posted" bitfield. This corrects a state issue where:
   163  			// 1. A proof is submitted and a partition is marked as proven.
   164  			// 2. All sectors in a deadline are terminated during the challenge window.
   165  			// 3. The end of deadline logic is skipped because there are no live sectors.
   166  			// This bug has been fixed in actors v3 (no terminations allowed during the
   167  			// challenge window) but the state still needs to be fixed.
   168  			// See: https://github.com/filecoin-project/specs-actors/issues/1348
   169  			if outDeadline.LiveSectors == 0 {
   170  				outDeadline.PartitionsPoSted = bitfield.New()
   171  			}
   172  
   173  			return store.Put(ctx, &outDeadline)
   174  		})
   175  		if err != nil {
   176  			return cid.Undef, err
   177  		}
   178  
   179  		outDeadlines.Due[i] = outDlCid
   180  	}
   181  
   182  	return store.Put(ctx, &outDeadlines)
   183  }
   184  
   185  func (m *minerMigrator) migratePartitions(ctx context.Context, store cbor.IpldStore, root cid.Cid) (cid.Cid, error) {
   186  	// AMT[PartitionNumber]Partition
   187  	inArray, err := adt2.AsArray(adt2.WrapStore(ctx, store), root)
   188  	if err != nil {
   189  		return cid.Undef, err
   190  	}
   191  	outArray, err := adt3.MakeEmptyArray(adt2.WrapStore(ctx, store), miner3.DeadlinePartitionsAmtBitwidth)
   192  	if err != nil {
   193  		return cid.Undef, err
   194  	}
   195  
   196  	var inPartition miner2.Partition
   197  	if err = inArray.ForEach(&inPartition, func(i int64) error {
   198  		expirationEpochs, err := migrateAMTRaw(ctx, store, inPartition.ExpirationsEpochs, miner3.PartitionExpirationAmtBitwidth)
   199  		if err != nil {
   200  			return xerrors.Errorf("expiration queue: %w", err)
   201  		}
   202  
   203  		earlyTerminated, err := migrateAMTRaw(ctx, store, inPartition.EarlyTerminated, miner3.PartitionEarlyTerminationArrayAmtBitwidth)
   204  		if err != nil {
   205  			return xerrors.Errorf("early termination queue: %w", err)
   206  		}
   207  
   208  		outPartition := miner3.Partition{
   209  			Sectors:           inPartition.Sectors,
   210  			Unproven:          inPartition.Unproven,
   211  			Faults:            inPartition.Faults,
   212  			Recoveries:        inPartition.Recoveries,
   213  			Terminated:        inPartition.Terminated,
   214  			ExpirationsEpochs: expirationEpochs,
   215  			EarlyTerminated:   earlyTerminated,
   216  			LivePower:         miner3.PowerPair(inPartition.LivePower),
   217  			UnprovenPower:     miner3.PowerPair(inPartition.UnprovenPower),
   218  			FaultyPower:       miner3.PowerPair(inPartition.FaultyPower),
   219  			RecoveringPower:   miner3.PowerPair(inPartition.RecoveringPower),
   220  		}
   221  
   222  		return outArray.Set(uint64(i), &outPartition)
   223  	}); err != nil {
   224  		return cid.Undef, err
   225  	}
   226  
   227  	return outArray.Root()
   228  }