github.com/koko1123/flow-go-1@v0.29.6/engine/execution/ingestion/uploader/retryable_uploader_wrapper.go (about) 1 package uploader 2 3 import ( 4 "errors" 5 "sync" 6 7 "github.com/rs/zerolog/log" 8 9 "github.com/koko1123/flow-go-1/engine" 10 "github.com/koko1123/flow-go-1/engine/execution" 11 "github.com/koko1123/flow-go-1/ledger" 12 "github.com/koko1123/flow-go-1/model/flow" 13 "github.com/koko1123/flow-go-1/module" 14 "github.com/koko1123/flow-go-1/module/executiondatasync/execution_data" 15 "github.com/koko1123/flow-go-1/module/mempool/entity" 16 "github.com/koko1123/flow-go-1/storage" 17 ) 18 19 // RetryableUploaderWrapper defines the interface for uploader that is retryable 20 type RetryableUploaderWrapper interface { 21 Uploader 22 RetryUpload() error 23 } 24 25 // BadgerRetryableUploaderWrapper is the BadgerDB based implementation to RetryableUploaderWrapper 26 type BadgerRetryableUploaderWrapper struct { 27 uploader *AsyncUploader 28 execDataDownloader execution_data.Downloader 29 unit *engine.Unit 30 metrics module.ExecutionMetrics 31 blocks storage.Blocks 32 commits storage.Commits 33 collections storage.Collections 34 events storage.Events 35 results storage.ExecutionResults 36 transactionResults storage.TransactionResults 37 uploadStatusStore storage.ComputationResultUploadStatus 38 } 39 40 func NewBadgerRetryableUploaderWrapper( 41 uploader *AsyncUploader, 42 blocks storage.Blocks, 43 commits storage.Commits, 44 collections storage.Collections, 45 events storage.Events, 46 results storage.ExecutionResults, 47 transactionResults storage.TransactionResults, 48 uploadStatusStore storage.ComputationResultUploadStatus, 49 execDataDownloader execution_data.Downloader, 50 metrics module.ExecutionMetrics) *BadgerRetryableUploaderWrapper { 51 52 // check params 53 if uploader == nil { 54 log.Error().Msg("nil uploader passed in") 55 return nil 56 } 57 if blocks == nil || commits == nil || collections == nil || 58 events == nil || results == nil || transactionResults == nil || 59 uploadStatusStore == nil || execDataDownloader == nil { 60 log.Error().Msg("not all storage parameters are valid") 61 return nil 62 } 63 64 // When Upload() is successful, the ComputationResult upload status in BadgerDB will be updated to true 65 onCompleteCB := func(computationResult *execution.ComputationResult, err error) { 66 if computationResult == nil || computationResult.ExecutableBlock == nil || 67 computationResult.ExecutableBlock.Block == nil { 68 log.Warn().Msg("nil ComputationResult or nil ComputationResult.ExecutableBlock or " + 69 "computationResult.ExecutableBlock.Block") 70 return 71 } 72 73 blockID := computationResult.ExecutableBlock.Block.ID() 74 if err != nil { 75 log.Warn().Msgf("ComputationResults upload failed with BlockID %s", blockID.String()) 76 return 77 } 78 79 // Update upload status as Done(true) 80 if err := uploadStatusStore.Upsert(blockID, true /*upload complete*/); err != nil { 81 log.Warn().Msgf( 82 "ComputationResults with BlockID %s failed to be updated on local disk. ERR: %s ", 83 blockID.String(), err.Error()) 84 } 85 86 metrics.ExecutionComputationResultUploaded() 87 } 88 89 uploader.SetOnCompleteCallback(onCompleteCB) 90 91 return &BadgerRetryableUploaderWrapper{ 92 uploader: uploader, 93 execDataDownloader: execDataDownloader, 94 unit: engine.NewUnit(), 95 metrics: metrics, 96 blocks: blocks, 97 commits: commits, 98 collections: collections, 99 events: events, 100 results: results, 101 transactionResults: transactionResults, 102 uploadStatusStore: uploadStatusStore, 103 } 104 } 105 106 func (b *BadgerRetryableUploaderWrapper) Ready() <-chan struct{} { 107 return b.uploader.Ready() 108 } 109 110 func (b *BadgerRetryableUploaderWrapper) Done() <-chan struct{} { 111 return b.uploader.Done() 112 } 113 114 func (b *BadgerRetryableUploaderWrapper) Upload(computationResult *execution.ComputationResult) error { 115 if computationResult == nil || computationResult.ExecutableBlock == nil || 116 computationResult.ExecutableBlock.Block == nil { 117 return errors.New("ComputationResult or its ExecutableBlock(or its Block) is nil when Upload() is called") 118 } 119 120 // Before upload we store ComputationResult upload status to BadgerDB as false before upload is done. 121 // It will be marked as true when upload completes. 122 blockID := computationResult.ExecutableBlock.Block.ID() 123 if err := b.uploadStatusStore.Upsert(blockID, false /*not completed*/); err != nil { 124 log.Warn().Msgf("failed to store ComputationResult into local DB with BlockID %s", blockID) 125 } 126 127 return b.uploader.Upload(computationResult) 128 } 129 130 func (b *BadgerRetryableUploaderWrapper) RetryUpload() error { 131 blockIDs, retErr := b.uploadStatusStore.GetIDsByUploadStatus(false /* not uploaded */) 132 if retErr != nil { 133 log.Error().Err(retErr).Msg("Failed to load the BlockID list of un-uploaded ComputationResult from local DB") 134 return retErr 135 } 136 137 var wg sync.WaitGroup 138 for _, blockID := range blockIDs { 139 wg.Add(1) 140 go func(blockID flow.Identifier) { 141 defer wg.Done() 142 143 log.Debug().Msgf("retrying upload for computation result of block %s", blockID.String()) 144 145 var cr_err error 146 retComputationResult, err := b.reconstructComputationResult(blockID) 147 if err != nil { 148 err = cr_err 149 log.Error().Err(err).Msgf( 150 "failed to reconstruct ComputationResult with BlockID %s", blockID) 151 return 152 } 153 154 // Do Upload 155 if cr_err = b.uploader.Upload(retComputationResult); cr_err != nil { 156 log.Error().Err(cr_err).Msgf( 157 "Failed to re-upload ComputationResult with BlockID %s", blockID) 158 retErr = cr_err 159 } else { 160 log.Debug().Msgf("computation result of block %s was successfully re-uploaded", blockID.String()) 161 } 162 163 b.metrics.ExecutionComputationResultUploadRetried() 164 }(blockID) 165 } 166 wg.Wait() 167 168 // return latest occurred error 169 return retErr 170 } 171 172 func (b *BadgerRetryableUploaderWrapper) reconstructComputationResult( 173 blockID flow.Identifier) (*execution.ComputationResult, error) { 174 175 // Get EDID from ExecutionResult in BadgerDB 176 executionResult, err := b.results.ByBlockID(blockID) 177 if err != nil { 178 log.Error().Err(err).Msgf( 179 "failed to retrieve ExecutionResult from Badger with BlockID %s", blockID.String()) 180 return nil, err 181 } 182 executionDataID := executionResult.ExecutionDataID 183 184 // retrieving BlockExecutionData from EDS 185 executionData, err := b.execDataDownloader.Download(b.unit.Ctx(), executionDataID) 186 if executionData == nil || err != nil { 187 log.Error().Err(err).Msgf( 188 "failed to retrieve BlockExecutionData from EDS with ID %s", executionDataID.String()) 189 return nil, err 190 } 191 192 // grabbing TrieUpdates from BlockExecutionData 193 var trieUpdates []*ledger.TrieUpdate 194 for _, chunkExecutionData := range executionData.ChunkExecutionDatas { 195 if chunkExecutionData.TrieUpdate != nil { 196 trieUpdates = append(trieUpdates, chunkExecutionData.TrieUpdate) 197 } 198 } 199 200 // retrieving events from local BadgerDB 201 events, err := b.events.ByBlockID(blockID) 202 if err != nil { 203 log.Warn().Msgf( 204 "failed to retrieve events for BlockID %s. Error: %s", blockID.String(), err.Error()) 205 } 206 207 // retrieving Block from local BadgerDB 208 block, err := b.blocks.ByID(blockID) 209 if err != nil { 210 log.Warn().Msgf( 211 "failed to retrieve Block with BlockID %s. Error: %s", blockID.String(), err.Error()) 212 } 213 214 // grabbing collections and guarantees from BadgerDB 215 guarantees := make([]*flow.CollectionGuarantee, 0) 216 if block != nil && block.Payload != nil { 217 guarantees = block.Payload.Guarantees 218 } 219 220 completeCollections := make(map[flow.Identifier]*entity.CompleteCollection) 221 for inx, guarantee := range guarantees { 222 collectionID := guarantee.CollectionID 223 collection, err := b.collections.ByID(collectionID) 224 if err != nil { 225 log.Warn().Msgf( 226 "failed to retrieve collections with CollectionID %s. Error: %s", collectionID, err.Error()) 227 continue 228 } 229 230 completeCollections[collectionID] = &entity.CompleteCollection{ 231 Guarantee: guarantees[inx], 232 Transactions: collection.Transactions, 233 } 234 } 235 236 // retrieving TransactionResults from BadgerDB 237 transactionResults, err := b.transactionResults.ByBlockID(blockID) 238 if err != nil { 239 log.Warn().Msgf( 240 "failed to retrieve TransactionResults with BlockID %s. Error: %s", blockID.String(), err.Error()) 241 } 242 243 // retrieving CommitStatement from BadgerDB 244 endState, err := b.commits.ByBlockID(blockID) 245 if err != nil { 246 log.Warn().Msgf("failed to retrieve StateCommitment with BlockID %s. Error: %s", blockID.String(), err.Error()) 247 } 248 249 // for now we only care about fields in BlockData 250 return &execution.ComputationResult{ 251 ExecutableBlock: &entity.ExecutableBlock{ 252 Block: block, 253 CompleteCollections: completeCollections, 254 }, 255 Events: []flow.EventsList{events}, 256 TransactionResults: transactionResults, 257 StateCommitments: []flow.StateCommitment{endState}, 258 TrieUpdates: trieUpdates, 259 }, nil 260 }