github.com/prysmaticlabs/prysm@v1.4.4/validator/db/kv/migration_optimal_attester_protection.go (about)

     1  package kv
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  
     7  	types "github.com/prysmaticlabs/eth2-types"
     8  	"github.com/prysmaticlabs/prysm/shared/bytesutil"
     9  	"github.com/prysmaticlabs/prysm/shared/params"
    10  	"github.com/prysmaticlabs/prysm/shared/progressutil"
    11  	bolt "go.etcd.io/bbolt"
    12  )
    13  
    14  var migrationOptimalAttesterProtectionKey = []byte("optimal_attester_protection_0")
    15  
    16  // Migrate attester protection to a more optimal format in the DB. Given we
    17  // stored attesting history as large, 2Mb arrays per validator, we need to perform
    18  // this migration differently than the rest, ensuring we perform each expensive bolt
    19  // update in its own transaction to prevent having everything on the heap.
    20  func (s *Store) migrateOptimalAttesterProtectionUp(ctx context.Context) error {
    21  	publicKeyBytes := make([][]byte, 0)
    22  	attestingHistoryBytes := make([][]byte, 0)
    23  	numKeys := 0
    24  	err := s.db.Update(func(tx *bolt.Tx) error {
    25  		mb := tx.Bucket(migrationsBucket)
    26  		if b := mb.Get(migrationOptimalAttesterProtectionKey); bytes.Equal(b, migrationCompleted) {
    27  			return nil // Migration already completed.
    28  		}
    29  
    30  		bkt := tx.Bucket(deprecatedAttestationHistoryBucket)
    31  		numKeys = bkt.Stats().KeyN
    32  		return bkt.ForEach(func(k, v []byte) error {
    33  			if v == nil {
    34  				return nil
    35  			}
    36  			bucket := tx.Bucket(pubKeysBucket)
    37  			pkBucket, err := bucket.CreateBucketIfNotExists(k)
    38  			if err != nil {
    39  				return err
    40  			}
    41  			_, err = pkBucket.CreateBucketIfNotExists(attestationSourceEpochsBucket)
    42  			if err != nil {
    43  				return err
    44  			}
    45  			_, err = pkBucket.CreateBucketIfNotExists(attestationSigningRootsBucket)
    46  			if err != nil {
    47  				return err
    48  			}
    49  			nk := make([]byte, len(k))
    50  			copy(nk, k)
    51  			nv := make([]byte, len(v))
    52  			copy(nv, v)
    53  			publicKeyBytes = append(publicKeyBytes, nk)
    54  			attestingHistoryBytes = append(attestingHistoryBytes, nv)
    55  			return nil
    56  		})
    57  	})
    58  	if err != nil {
    59  		return err
    60  	}
    61  
    62  	bar := progressutil.InitializeProgressBar(numKeys, "Migrating attesting history to more efficient format")
    63  	for i, publicKey := range publicKeyBytes {
    64  		attestingHistory := deprecatedEncodedAttestingHistory(attestingHistoryBytes[i])
    65  		err = s.db.Update(func(tx *bolt.Tx) error {
    66  			if attestingHistory == nil {
    67  				return nil
    68  			}
    69  			bucket := tx.Bucket(pubKeysBucket)
    70  			pkBucket := bucket.Bucket(publicKey)
    71  			sourceEpochsBucket := pkBucket.Bucket(attestationSourceEpochsBucket)
    72  
    73  			signingRootsBucket := pkBucket.Bucket(attestationSigningRootsBucket)
    74  
    75  			// Extract every single source, target, signing root
    76  			// from the attesting history then insert them into the
    77  			// respective buckets under the new db schema.
    78  			latestEpochWritten, err := attestingHistory.getLatestEpochWritten(ctx)
    79  			if err != nil {
    80  				return err
    81  			}
    82  			// For every epoch since genesis up to the highest epoch written, we then
    83  			// extract historical data and insert it into the new schema.
    84  			for targetEpoch := types.Epoch(0); targetEpoch <= latestEpochWritten; targetEpoch++ {
    85  				historicalAtt, err := attestingHistory.getTargetData(ctx, targetEpoch)
    86  				if err != nil {
    87  					return err
    88  				}
    89  				if historicalAtt.isEmpty() {
    90  					continue
    91  				}
    92  				targetEpochBytes := bytesutil.EpochToBytesBigEndian(targetEpoch)
    93  				sourceEpochBytes := bytesutil.EpochToBytesBigEndian(historicalAtt.Source)
    94  				if err := sourceEpochsBucket.Put(sourceEpochBytes, targetEpochBytes); err != nil {
    95  					return err
    96  				}
    97  				if err := signingRootsBucket.Put(targetEpochBytes, historicalAtt.SigningRoot); err != nil {
    98  					return err
    99  				}
   100  			}
   101  			return bar.Add(1)
   102  		})
   103  		if err != nil {
   104  			return err
   105  		}
   106  	}
   107  
   108  	return s.db.Update(func(tx *bolt.Tx) error {
   109  		mb := tx.Bucket(migrationsBucket)
   110  		return mb.Put(migrationOptimalAttesterProtectionKey, migrationCompleted)
   111  	})
   112  }
   113  
   114  // Migrate attester protection from the more optimal format to the old format in the DB.
   115  func (s *Store) migrateOptimalAttesterProtectionDown(ctx context.Context) error {
   116  	// First we extract the public keys we are migrating down for.
   117  	pubKeys := make([][48]byte, 0)
   118  	err := s.view(func(tx *bolt.Tx) error {
   119  		mb := tx.Bucket(migrationsBucket)
   120  		if b := mb.Get(migrationOptimalAttesterProtectionKey); b == nil {
   121  			// Migration has not occurred, meaning data is already in old format
   122  			// so no need to perform a down migration.
   123  			return nil
   124  		}
   125  		bkt := tx.Bucket(pubKeysBucket)
   126  		if bkt == nil {
   127  			return nil
   128  		}
   129  		return bkt.ForEach(func(pubKey, v []byte) error {
   130  			if pubKey == nil {
   131  				return nil
   132  			}
   133  			pkBucket := bkt.Bucket(pubKey)
   134  			if pkBucket == nil {
   135  				return nil
   136  			}
   137  			pubKeys = append(pubKeys, bytesutil.ToBytes48(pubKey))
   138  			return nil
   139  		})
   140  	})
   141  	if err != nil {
   142  		return err
   143  	}
   144  
   145  	// Next up, we extract the data for attested epochs and signing roots
   146  	// from the optimized db schema into maps we can use later.
   147  	signingRootsByTarget := make(map[types.Epoch][]byte)
   148  	targetEpochsBySource := make(map[types.Epoch][]types.Epoch)
   149  	err = s.view(func(tx *bolt.Tx) error {
   150  		bkt := tx.Bucket(pubKeysBucket)
   151  		if bkt == nil {
   152  			return nil
   153  		}
   154  		for _, pubKey := range pubKeys {
   155  			pubKeyBkt := bkt.Bucket(pubKey[:])
   156  			if pubKeyBkt == nil {
   157  				continue
   158  			}
   159  			sourceEpochsBucket := pubKeyBkt.Bucket(attestationSourceEpochsBucket)
   160  			signingRootsBucket := pubKeyBkt.Bucket(attestationSigningRootsBucket)
   161  			// Extract signing roots.
   162  			if err := signingRootsBucket.ForEach(func(targetBytes, signingRoot []byte) error {
   163  				var sr [32]byte
   164  				copy(sr[:], signingRoot)
   165  				signingRootsByTarget[bytesutil.BytesToEpochBigEndian(targetBytes)] = sr[:]
   166  				return nil
   167  			}); err != nil {
   168  				return err
   169  			}
   170  			// Next up, extract the target epochs by source.
   171  			if err := sourceEpochsBucket.ForEach(func(sourceBytes, targetEpochsBytes []byte) error {
   172  				targetEpochs := make([]types.Epoch, 0)
   173  				for i := 0; i < len(targetEpochsBytes); i += 8 {
   174  					targetEpochs = append(targetEpochs, bytesutil.BytesToEpochBigEndian(targetEpochsBytes[i:i+8]))
   175  				}
   176  				targetEpochsBySource[bytesutil.BytesToEpochBigEndian(sourceBytes)] = targetEpochs
   177  				return nil
   178  			}); err != nil {
   179  				return err
   180  			}
   181  		}
   182  		return nil
   183  	})
   184  	if err != nil {
   185  		return err
   186  	}
   187  
   188  	// Then, we use the data we extracted to recreate the old
   189  	// attesting history format and for each public key, we save it
   190  	// to the appropriate bucket.
   191  	err = s.update(func(tx *bolt.Tx) error {
   192  		bkt := tx.Bucket(pubKeysBucket)
   193  		if bkt == nil {
   194  			return nil
   195  		}
   196  		bar := progressutil.InitializeProgressBar(len(pubKeys), "Migrating attesting history to old format")
   197  		for _, pubKey := range pubKeys {
   198  			// Now we write the attesting history using the data we extracted
   199  			// from the buckets accordingly.
   200  			history := newDeprecatedAttestingHistory(0)
   201  			var maxTargetWritten types.Epoch
   202  			for source, targetEpochs := range targetEpochsBySource {
   203  				for _, target := range targetEpochs {
   204  					signingRoot := params.BeaconConfig().ZeroHash[:]
   205  					if sr, ok := signingRootsByTarget[target]; ok {
   206  						signingRoot = sr
   207  					}
   208  					newHist, err := history.setTargetData(ctx, target, &deprecatedHistoryData{
   209  						Source:      source,
   210  						SigningRoot: signingRoot,
   211  					})
   212  					if err != nil {
   213  						return err
   214  					}
   215  					history = newHist
   216  					if target > maxTargetWritten {
   217  						maxTargetWritten = target
   218  					}
   219  				}
   220  			}
   221  			newHist, err := history.setLatestEpochWritten(ctx, maxTargetWritten)
   222  			if err != nil {
   223  				return err
   224  			}
   225  			history = newHist
   226  			deprecatedBkt, err := tx.CreateBucketIfNotExists(deprecatedAttestationHistoryBucket)
   227  			if err != nil {
   228  				return err
   229  			}
   230  			if err := deprecatedBkt.Put(pubKey[:], history); err != nil {
   231  				return err
   232  			}
   233  			if err := bar.Add(1); err != nil {
   234  				return err
   235  			}
   236  		}
   237  		return nil
   238  	})
   239  	if err != nil {
   240  		return err
   241  	}
   242  
   243  	// Finally, we clear the migration key.
   244  	return s.update(func(tx *bolt.Tx) error {
   245  		migrationsBkt := tx.Bucket(migrationsBucket)
   246  		return migrationsBkt.Delete(migrationOptimalAttesterProtectionKey)
   247  	})
   248  }