github.com/onflow/flow-go@v0.33.17/state/protocol/badger/validity.go (about) 1 package badger 2 3 import ( 4 "fmt" 5 6 "github.com/onflow/flow-go/consensus/hotstuff/committees" 7 "github.com/onflow/flow-go/consensus/hotstuff/signature" 8 "github.com/onflow/flow-go/consensus/hotstuff/validator" 9 "github.com/onflow/flow-go/consensus/hotstuff/verification" 10 "github.com/onflow/flow-go/model/flow" 11 "github.com/onflow/flow-go/model/flow/factory" 12 "github.com/onflow/flow-go/model/flow/filter" 13 "github.com/onflow/flow-go/state/protocol" 14 ) 15 16 // isValidExtendingEpochSetup checks whether an epoch setup service being 17 // added to the state is valid. In addition to intrinsic validity, we also 18 // check that it is valid w.r.t. the previous epoch setup event, and the 19 // current epoch status. 20 // Assumes all inputs besides extendingSetup are already validated. 21 // Expected errors during normal operations: 22 // * protocol.InvalidServiceEventError if the input service event is invalid to extend the currently active epoch status 23 func isValidExtendingEpochSetup(extendingSetup *flow.EpochSetup, activeSetup *flow.EpochSetup, status *flow.EpochStatus) error { 24 // We should only have a single epoch setup event per epoch. 25 if status.NextEpoch.SetupID != flow.ZeroID { 26 // true iff EpochSetup event for NEXT epoch was already included before 27 return protocol.NewInvalidServiceEventErrorf("duplicate epoch setup service event: %x", status.NextEpoch.SetupID) 28 } 29 30 // The setup event should have the counter increased by one. 31 if extendingSetup.Counter != activeSetup.Counter+1 { 32 return protocol.NewInvalidServiceEventErrorf("next epoch setup has invalid counter (%d => %d)", activeSetup.Counter, extendingSetup.Counter) 33 } 34 35 // The first view needs to be exactly one greater than the current epoch final view 36 if extendingSetup.FirstView != activeSetup.FinalView+1 { 37 return protocol.NewInvalidServiceEventErrorf( 38 "next epoch first view must be exactly 1 more than current epoch final view (%d != %d+1)", 39 extendingSetup.FirstView, 40 activeSetup.FinalView, 41 ) 42 } 43 44 // Finally, the epoch setup event must contain all necessary information. 45 err := verifyEpochSetup(extendingSetup, true) 46 if err != nil { 47 return protocol.NewInvalidServiceEventErrorf("invalid epoch setup: %w", err) 48 } 49 50 return nil 51 } 52 53 // verifyEpochSetup checks whether an `EpochSetup` event is syntactically correct. 54 // The boolean parameter `verifyNetworkAddress` controls, whether we want to permit 55 // nodes to share a networking address. 56 // This is a side-effect-free function. Any error return indicates that the 57 // EpochSetup event is not compliant with protocol rules. 58 func verifyEpochSetup(setup *flow.EpochSetup, verifyNetworkAddress bool) error { 59 // STEP 1: general sanity checks 60 // the seed needs to be at least minimum length 61 if len(setup.RandomSource) != flow.EpochSetupRandomSourceLength { 62 return fmt.Errorf("seed has incorrect length (%d != %d)", len(setup.RandomSource), flow.EpochSetupRandomSourceLength) 63 } 64 65 // STEP 2: sanity checks of all nodes listed as participants 66 // there should be no duplicate node IDs 67 identLookup := make(map[flow.Identifier]struct{}) 68 for _, participant := range setup.Participants { 69 _, ok := identLookup[participant.NodeID] 70 if ok { 71 return fmt.Errorf("duplicate node identifier (%x)", participant.NodeID) 72 } 73 identLookup[participant.NodeID] = struct{}{} 74 } 75 76 if verifyNetworkAddress { 77 // there should be no duplicate node addresses 78 addrLookup := make(map[string]struct{}) 79 for _, participant := range setup.Participants { 80 _, ok := addrLookup[participant.Address] 81 if ok { 82 return fmt.Errorf("duplicate node address (%x)", participant.Address) 83 } 84 addrLookup[participant.Address] = struct{}{} 85 } 86 } 87 88 // the participants must be listed in canonical order 89 if !flow.IsIdentityListCanonical(setup.Participants) { 90 return fmt.Errorf("participants are not canonically ordered") 91 } 92 93 // STEP 3: sanity checks for individual roles 94 // IMPORTANT: here we remove all nodes with zero weight, as they are allowed to partake 95 // in communication but not in respective node functions 96 activeParticipants := setup.Participants.Filter(filter.HasWeight(true)) 97 98 // we need at least one node of each role 99 roles := make(map[flow.Role]uint) 100 for _, participant := range activeParticipants { 101 roles[participant.Role]++ 102 } 103 if roles[flow.RoleConsensus] < 1 { 104 return fmt.Errorf("need at least one consensus node") 105 } 106 if roles[flow.RoleCollection] < 1 { 107 return fmt.Errorf("need at least one collection node") 108 } 109 if roles[flow.RoleExecution] < 1 { 110 return fmt.Errorf("need at least one execution node") 111 } 112 if roles[flow.RoleVerification] < 1 { 113 return fmt.Errorf("need at least one verification node") 114 } 115 116 // first view must be before final view 117 if setup.FirstView >= setup.FinalView { 118 return fmt.Errorf("first view (%d) must be before final view (%d)", setup.FirstView, setup.FinalView) 119 } 120 121 // we need at least one collection cluster 122 if len(setup.Assignments) == 0 { 123 return fmt.Errorf("need at least one collection cluster") 124 } 125 126 // the collection cluster assignments need to be valid 127 _, err := factory.NewClusterList(setup.Assignments, activeParticipants.Filter(filter.HasRole(flow.RoleCollection))) 128 if err != nil { 129 return fmt.Errorf("invalid cluster assignments: %w", err) 130 } 131 132 return nil 133 } 134 135 // isValidExtendingEpochCommit checks whether an epoch commit service being 136 // added to the state is valid. In addition to intrinsic validity, we also 137 // check that it is valid w.r.t. the previous epoch setup event, and the 138 // current epoch status. 139 // Assumes all inputs besides extendingCommit are already validated. 140 // Expected errors during normal operations: 141 // * protocol.InvalidServiceEventError if the input service event is invalid to extend the currently active epoch status 142 func isValidExtendingEpochCommit(extendingCommit *flow.EpochCommit, extendingSetup *flow.EpochSetup, activeSetup *flow.EpochSetup, status *flow.EpochStatus) error { 143 144 // We should only have a single epoch commit event per epoch. 145 if status.NextEpoch.CommitID != flow.ZeroID { 146 // true iff EpochCommit event for NEXT epoch was already included before 147 return protocol.NewInvalidServiceEventErrorf("duplicate epoch commit service event: %x", status.NextEpoch.CommitID) 148 } 149 150 // The epoch setup event needs to happen before the commit. 151 if status.NextEpoch.SetupID == flow.ZeroID { 152 return protocol.NewInvalidServiceEventErrorf("missing epoch setup for epoch commit") 153 } 154 155 // The commit event should have the counter increased by one. 156 if extendingCommit.Counter != activeSetup.Counter+1 { 157 return protocol.NewInvalidServiceEventErrorf("next epoch commit has invalid counter (%d => %d)", activeSetup.Counter, extendingCommit.Counter) 158 } 159 160 err := isValidEpochCommit(extendingCommit, extendingSetup) 161 if err != nil { 162 return protocol.NewInvalidServiceEventErrorf("invalid epoch commit: %s", err) 163 } 164 165 return nil 166 } 167 168 // isValidEpochCommit checks whether an epoch commit service event is intrinsically valid. 169 // Assumes the input flow.EpochSetup event has already been validated. 170 // Expected errors during normal operations: 171 // * protocol.InvalidServiceEventError if the EpochCommit is invalid 172 func isValidEpochCommit(commit *flow.EpochCommit, setup *flow.EpochSetup) error { 173 174 if len(setup.Assignments) != len(commit.ClusterQCs) { 175 return protocol.NewInvalidServiceEventErrorf("number of clusters (%d) does not number of QCs (%d)", len(setup.Assignments), len(commit.ClusterQCs)) 176 } 177 178 if commit.Counter != setup.Counter { 179 return protocol.NewInvalidServiceEventErrorf("inconsistent epoch counter between commit (%d) and setup (%d) events in same epoch", commit.Counter, setup.Counter) 180 } 181 182 // make sure we have a valid DKG public key 183 if commit.DKGGroupKey == nil { 184 return protocol.NewInvalidServiceEventErrorf("missing DKG public group key") 185 } 186 187 participants := setup.Participants.Filter(filter.IsValidDKGParticipant) 188 if len(participants) != len(commit.DKGParticipantKeys) { 189 return protocol.NewInvalidServiceEventErrorf("participant list (len=%d) does not match dkg key list (len=%d)", len(participants), len(commit.DKGParticipantKeys)) 190 } 191 192 return nil 193 } 194 195 // IsValidRootSnapshot checks internal consistency of root state snapshot 196 // if verifyResultID allows/disallows Result ID verification 197 func IsValidRootSnapshot(snap protocol.Snapshot, verifyResultID bool) error { 198 199 segment, err := snap.SealingSegment() 200 if err != nil { 201 return fmt.Errorf("could not get sealing segment: %w", err) 202 } 203 result, seal, err := snap.SealedResult() 204 if err != nil { 205 return fmt.Errorf("could not latest sealed result: %w", err) 206 } 207 208 err = segment.Validate() 209 if err != nil { 210 return fmt.Errorf("invalid root sealing segment: %w", err) 211 } 212 213 highest := segment.Highest() // reference block of the snapshot 214 lowest := segment.Sealed() // last sealed block 215 highestID := highest.ID() 216 lowestID := lowest.ID() 217 218 if result.BlockID != lowestID { 219 return fmt.Errorf("root execution result for wrong block (%x != %x)", result.BlockID, lowest.ID()) 220 } 221 222 if seal.BlockID != lowestID { 223 return fmt.Errorf("root block seal for wrong block (%x != %x)", seal.BlockID, lowest.ID()) 224 } 225 226 if verifyResultID { 227 if seal.ResultID != result.ID() { 228 return fmt.Errorf("root block seal for wrong execution result (%x != %x)", seal.ResultID, result.ID()) 229 } 230 } 231 232 // identities must be canonically ordered 233 identities, err := snap.Identities(filter.Any) 234 if err != nil { 235 return fmt.Errorf("could not get identities for root snapshot: %w", err) 236 } 237 if !flow.IsIdentityListCanonical(identities) { 238 return fmt.Errorf("identities are not canonically ordered") 239 } 240 241 // root qc must be for reference block of snapshot 242 qc, err := snap.QuorumCertificate() 243 if err != nil { 244 return fmt.Errorf("could not get qc for root snapshot: %w", err) 245 } 246 if qc.BlockID != highestID { 247 return fmt.Errorf("qc is for wrong block (got: %x, expected: %x)", qc.BlockID, highestID) 248 } 249 250 firstView, err := snap.Epochs().Current().FirstView() 251 if err != nil { 252 return fmt.Errorf("could not get first view: %w", err) 253 } 254 finalView, err := snap.Epochs().Current().FinalView() 255 if err != nil { 256 return fmt.Errorf("could not get final view: %w", err) 257 } 258 259 // the segment must be fully within the current epoch 260 if firstView > lowest.Header.View { 261 return fmt.Errorf("lowest block of sealing segment has lower view than first view of epoch") 262 } 263 if highest.Header.View >= finalView { 264 return fmt.Errorf("final view of epoch less than first block view") 265 } 266 267 err = validateVersionBeacon(snap) 268 if err != nil { 269 return err 270 } 271 272 return nil 273 } 274 275 // IsValidRootSnapshotQCs checks internal consistency of QCs that are included in the root state snapshot 276 // It verifies QCs for main consensus and for each collection cluster. 277 func IsValidRootSnapshotQCs(snap protocol.Snapshot) error { 278 // validate main consensus QC 279 err := validateRootQC(snap) 280 if err != nil { 281 return fmt.Errorf("invalid root QC: %w", err) 282 } 283 284 // validate each collection cluster separately 285 curEpoch := snap.Epochs().Current() 286 clusters, err := curEpoch.Clustering() 287 if err != nil { 288 return fmt.Errorf("could not get clustering for root snapshot: %w", err) 289 } 290 for clusterIndex := range clusters { 291 cluster, err := curEpoch.Cluster(uint(clusterIndex)) 292 if err != nil { 293 return fmt.Errorf("could not get cluster %d for root snapshot: %w", clusterIndex, err) 294 } 295 err = validateClusterQC(cluster) 296 if err != nil { 297 return fmt.Errorf("invalid cluster qc %d: %w", clusterIndex, err) 298 } 299 } 300 return nil 301 } 302 303 // validateRootQC performs validation of root QC 304 // Returns nil on success 305 func validateRootQC(snap protocol.Snapshot) error { 306 identities, err := snap.Identities(filter.IsVotingConsensusCommitteeMember) 307 if err != nil { 308 return fmt.Errorf("could not get root snapshot identities: %w", err) 309 } 310 311 rootQC, err := snap.QuorumCertificate() 312 if err != nil { 313 return fmt.Errorf("could not get root QC: %w", err) 314 } 315 316 dkg, err := snap.Epochs().Current().DKG() 317 if err != nil { 318 return fmt.Errorf("could not get DKG for root snapshot: %w", err) 319 } 320 321 committee, err := committees.NewStaticCommitteeWithDKG(identities, flow.Identifier{}, dkg) 322 if err != nil { 323 return fmt.Errorf("could not create static committee: %w", err) 324 } 325 verifier := verification.NewCombinedVerifier(committee, signature.NewConsensusSigDataPacker(committee)) 326 hotstuffValidator := validator.New(committee, verifier) 327 err = hotstuffValidator.ValidateQC(rootQC) 328 if err != nil { 329 return fmt.Errorf("could not validate root qc: %w", err) 330 } 331 return nil 332 } 333 334 // validateClusterQC performs QC validation of single collection cluster 335 // Returns nil on success 336 func validateClusterQC(cluster protocol.Cluster) error { 337 committee, err := committees.NewStaticCommittee(cluster.Members(), flow.Identifier{}, nil, nil) 338 if err != nil { 339 return fmt.Errorf("could not create static committee: %w", err) 340 } 341 verifier := verification.NewStakingVerifier() 342 hotstuffValidator := validator.New(committee, verifier) 343 err = hotstuffValidator.ValidateQC(cluster.RootQC()) 344 if err != nil { 345 return fmt.Errorf("could not validate root qc: %w", err) 346 } 347 return nil 348 } 349 350 // validateVersionBeacon returns an InvalidServiceEventError if the snapshot 351 // version beacon is invalid 352 func validateVersionBeacon(snap protocol.Snapshot) error { 353 errf := func(msg string, args ...any) error { 354 return protocol.NewInvalidServiceEventErrorf(msg, args) 355 } 356 357 versionBeacon, err := snap.VersionBeacon() 358 if err != nil { 359 return errf("could not get version beacon: %w", err) 360 } 361 362 if versionBeacon == nil { 363 return nil 364 } 365 366 head, err := snap.Head() 367 if err != nil { 368 return errf("could not get snapshot head: %w", err) 369 } 370 371 // version beacon must be included in a past block to be effective 372 if versionBeacon.SealHeight > head.Height { 373 return errf("version table height higher than highest height") 374 } 375 376 err = versionBeacon.Validate() 377 if err != nil { 378 return errf("version beacon is invalid: %w", err) 379 } 380 381 return nil 382 } 383 384 // ValidRootSnapshotContainsEntityExpiryRange performs a sanity check to make sure the 385 // root snapshot has enough history to encompass at least one full entity expiry window. 386 // Entities (in particular transactions and collections) may reference a block within 387 // the past `flow.DefaultTransactionExpiry` blocks, so a new node must begin with at least 388 // this many blocks worth of history leading up to the snapshot's root block. 389 // 390 // Currently, Access Nodes and Consensus Nodes require root snapshots passing this validator function. 391 // 392 // - Consensus Nodes because they process guarantees referencing past blocks 393 // - Access Nodes because they index transactions referencing past blocks 394 // 395 // One of the following conditions must be satisfied to pass this validation: 396 // 1. This is a snapshot build from a first block of spork 397 // -> there is no earlier history which transactions/collections could reference 398 // 2. This snapshot sealing segment contains at least one expiry window of blocks 399 // -> all possible reference blocks in future transactions/collections will be within the initial history. 400 // 3. This snapshot sealing segment includes the spork root block 401 // -> there is no earlier history which transactions/collections could reference 402 func ValidRootSnapshotContainsEntityExpiryRange(snapshot protocol.Snapshot) error { 403 isSporkRootSnapshot, err := protocol.IsSporkRootSnapshot(snapshot) 404 if err != nil { 405 return fmt.Errorf("could not check if root snapshot is a spork root snapshot: %w", err) 406 } 407 // Condition 1 satisfied 408 if isSporkRootSnapshot { 409 return nil 410 } 411 412 head, err := snapshot.Head() 413 if err != nil { 414 return fmt.Errorf("could not query root snapshot head: %w", err) 415 } 416 417 sporkRootBlockHeight, err := snapshot.Params().SporkRootBlockHeight() 418 if err != nil { 419 return fmt.Errorf("could not query spork root block height: %w", err) 420 } 421 422 sealingSegment, err := snapshot.SealingSegment() 423 if err != nil { 424 return fmt.Errorf("could not query sealing segment: %w", err) 425 } 426 427 sealingSegmentLength := uint64(len(sealingSegment.AllBlocks())) 428 transactionExpiry := uint64(flow.DefaultTransactionExpiry) 429 blocksInSpork := head.Height - sporkRootBlockHeight + 1 // range is inclusive on both ends 430 431 // Condition 3: 432 // check if head.Height - sporkRootBlockHeight < flow.DefaultTransactionExpiry 433 // this is the case where we bootstrap early into the spork and there is simply not enough blocks 434 if blocksInSpork < transactionExpiry { 435 // the distance to spork root is less than transaction expiry, we need all blocks back to the spork root. 436 if sealingSegmentLength != blocksInSpork { 437 return fmt.Errorf("invalid root snapshot length, expecting exactly (%d), got (%d)", blocksInSpork, sealingSegmentLength) 438 } 439 } else { 440 // Condition 2: 441 // the distance to spork root is more than transaction expiry, we need at least `transactionExpiry` many blocks 442 if sealingSegmentLength < transactionExpiry { 443 return fmt.Errorf("invalid root snapshot length, expecting at least (%d), got (%d)", 444 transactionExpiry, sealingSegmentLength) 445 } 446 } 447 return nil 448 }