github.com/muhammedhassanm/blockchain@v0.0.0-20200120143007-697261defd4d/sawtooth-core-master/validator/sawtooth_validator/journal/completer.py (about) 1 # Copyright 2017 Intel Corporation 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ------------------------------------------------------------------------------ 15 16 import logging 17 from threading import RLock 18 from collections import deque 19 20 from sawtooth_validator.journal.block_cache import BlockCache 21 from sawtooth_validator.journal.block_wrapper import BlockWrapper 22 from sawtooth_validator.journal.block_wrapper import NULL_BLOCK_IDENTIFIER 23 from sawtooth_validator.journal.timed_cache import TimedCache 24 from sawtooth_validator.protobuf.transaction_pb2 import TransactionHeader 25 from sawtooth_validator.protobuf import network_pb2 26 from sawtooth_validator.networking.dispatch import Handler 27 from sawtooth_validator.networking.dispatch import HandlerResult 28 from sawtooth_validator.networking.dispatch import HandlerStatus 29 from sawtooth_validator import metrics 30 31 LOGGER = logging.getLogger(__name__) 32 COLLECTOR = metrics.get_collector(__name__) 33 34 35 class Completer(object): 36 """ 37 The Completer is responsible for making sure blocks are formally 38 complete before they are delivered to the chain controller. A formally 39 complete block is a block whose predecessor is in the block cache and all 40 the batches are present in the batch list and in the order specified by the 41 block header. If the predecessor or a batch is missing, a request message 42 is sent sent out over the gossip network. It also checks that all batches 43 have their dependencies satisifed, otherwise it will request the batch that 44 has the missing transaction. 45 """ 46 47 def __init__(self, 48 block_store, 49 gossip, 50 cache_keep_time=1200, 51 cache_purge_frequency=30, 52 requested_keep_time=300): 53 """ 54 :param block_store (dictionary) The block store shared with the journal 55 :param gossip (gossip.Gossip) Broadcasts block and batch request to 56 peers 57 :param cache_keep_time (float) Time in seconds to keep values in 58 TimedCaches. 59 :param cache_purge_frequency (float) Time between purging the 60 TimedCaches. 61 :param requested_keep_time (float) Time in seconds to keep the ids 62 of requested objects. WARNING this time should always be less than 63 cache_keep_time or the validator can get into a state where it 64 fails to make progress because it thinks it has already requested 65 something that it is missing. 66 """ 67 self.gossip = gossip 68 self.batch_cache = TimedCache(cache_keep_time, cache_purge_frequency) 69 self.block_cache = BlockCache(block_store, 70 cache_keep_time, 71 cache_purge_frequency) 72 self._block_store = block_store 73 # avoid throwing away the genesis block 74 self.block_cache[NULL_BLOCK_IDENTIFIER] = None 75 self._seen_txns = TimedCache(cache_keep_time, cache_purge_frequency) 76 self._incomplete_batches = TimedCache(cache_keep_time, 77 cache_purge_frequency) 78 self._incomplete_blocks = TimedCache(cache_keep_time, 79 cache_purge_frequency) 80 self._requested = TimedCache(requested_keep_time, 81 cache_purge_frequency) 82 self._on_block_received = None 83 self._on_batch_received = None 84 self._has_block = None 85 self.lock = RLock() 86 87 # Tracks how many times an unsatisfied dependency is found 88 self._unsatisfied_dependency_count = COLLECTOR.counter( 89 'unsatisfied_dependency_count', instance=self) 90 # Tracks the length of the completer's _seen_txns 91 self._seen_txns_length = COLLECTOR.gauge( 92 'seen_txns_length', instance=self) 93 # Tracks the length of the completer's _incomplete_blocks 94 self._incomplete_blocks_length = COLLECTOR.gauge( 95 'incomplete_blocks_length', instance=self) 96 # Tracks the length of the completer's _incomplete_batches 97 self._incomplete_batches_length = COLLECTOR.gauge( 98 'incomplete_batches_length', instance=self) 99 100 def _complete_block(self, block): 101 """ Check the block to see if it is complete and if it can be passed to 102 the journal. If the block's predecessor is not in the block_cache 103 the predecessor is requested and the current block is added to the 104 the incomplete_block cache. If the block.batches and 105 block.header.batch_ids are not the same length, the batch_id list 106 is checked against the batch_cache to see if the batch_list can be 107 built. If any batches are missing from the block and we do not have 108 the batches in the batch_cache, they are requested. The block is 109 then added to the incomplete_block cache. If we can complete the 110 block, a new batch list is created in the correct order and added 111 to the block. The block is now considered complete and is returned. 112 If block.batches and block.header.batch_ids are the same length, 113 the block's batch list needs to be in the same order as the 114 block.header.batch_ids list. If the block has all of its expected 115 batches but are not in the correct order, the batch list is rebuilt 116 and added to the block. Once a block has the correct batch list it 117 is added to the block_cache and is returned. 118 119 """ 120 121 if block.header_signature in self.block_cache: 122 LOGGER.debug("Drop duplicate block: %s", block) 123 return None 124 125 if block.previous_block_id not in self.block_cache: 126 if not self._has_block(block.previous_block_id): 127 if block.previous_block_id not in self._incomplete_blocks: 128 self._incomplete_blocks[block.previous_block_id] = [block] 129 elif block not in \ 130 self._incomplete_blocks[block.previous_block_id]: 131 self._incomplete_blocks[block.previous_block_id] += [block] 132 133 # We have already requested the block, do not do so again 134 if block.previous_block_id in self._requested: 135 return None 136 137 LOGGER.debug("Request missing predecessor: %s", 138 block.previous_block_id) 139 self._requested[block.previous_block_id] = None 140 self.gossip.broadcast_block_request(block.previous_block_id) 141 return None 142 143 # Check for same number of batch_ids and batches 144 # If different starting building batch list, Otherwise there is a batch 145 # that does not belong, block should be dropped. 146 if len(block.batches) > len(block.header.batch_ids): 147 LOGGER.debug("Block has extra batches. Dropping %s", block) 148 return None 149 150 # used to supplement batch_cache, contains batches already in block 151 temp_batches = {} 152 for batch in block.batches: 153 temp_batches[batch.header_signature] = batch 154 155 # The block is missing batches. Check to see if we can complete it. 156 if len(block.batches) != len(block.header.batch_ids): 157 building = True 158 for batch_id in block.header.batch_ids: 159 if batch_id not in self.batch_cache and \ 160 batch_id not in temp_batches: 161 # Request all missing batches 162 if batch_id not in self._incomplete_blocks: 163 self._incomplete_blocks[batch_id] = [block] 164 elif block not in self._incomplete_blocks[batch_id]: 165 self._incomplete_blocks[batch_id] += [block] 166 167 # We have already requested the batch, do not do so again 168 if batch_id in self._requested: 169 return None 170 self._requested[batch_id] = None 171 self.gossip.broadcast_batch_by_batch_id_request(batch_id) 172 building = False 173 174 if not building: 175 # The block cannot be completed. 176 return None 177 178 batches = self._finalize_batch_list(block, temp_batches) 179 del block.batches[:] 180 # reset batches with full list batches 181 block.batches.extend(batches) 182 if block.header_signature in self._requested: 183 del self._requested[block.header_signature] 184 return block 185 186 else: 187 batch_id_list = [x.header_signature for x in block.batches] 188 # Check to see if batchs are in the correct order. 189 if batch_id_list == list(block.header.batch_ids): 190 if block.header_signature in self._requested: 191 del self._requested[block.header_signature] 192 return block 193 # Check to see if the block has all batch_ids and they can be put 194 # in the correct order 195 elif sorted(batch_id_list) == sorted(list(block.header.batch_ids)): 196 batches = self._finalize_batch_list(block, temp_batches) 197 # Clear batches from block 198 del block.batches[:] 199 # reset batches with full list batches 200 if batches is not None: 201 block.batches.extend(batches) 202 else: 203 return None 204 205 if block.header_signature in self._requested: 206 del self._requested[block.header_signature] 207 208 return block 209 else: 210 LOGGER.debug("Block.header.batch_ids does not match set of " 211 "batches in block.batches Dropping %s", block) 212 return None 213 214 def _finalize_batch_list(self, block, temp_batches): 215 batches = [] 216 for batch_id in block.header.batch_ids: 217 if batch_id in self.batch_cache: 218 batches.append(self.batch_cache[batch_id]) 219 elif batch_id in temp_batches: 220 batches.append(temp_batches[batch_id]) 221 else: 222 return None 223 224 return batches 225 226 def _complete_batch(self, batch): 227 valid = True 228 dependencies = [] 229 for txn in batch.transactions: 230 txn_header = TransactionHeader() 231 txn_header.ParseFromString(txn.header) 232 for dependency in txn_header.dependencies: 233 # Check to see if the dependency has been seen or is in the 234 # current chain (block_store) 235 if dependency not in self._seen_txns and not \ 236 self.block_cache.block_store.has_transaction( 237 dependency): 238 self._unsatisfied_dependency_count.inc() 239 240 # Check to see if the dependency has already been requested 241 if dependency not in self._requested: 242 dependencies.append(dependency) 243 self._requested[dependency] = None 244 if dependency not in self._incomplete_batches: 245 self._incomplete_batches[dependency] = [batch] 246 elif batch not in self._incomplete_batches[dependency]: 247 self._incomplete_batches[dependency] += [batch] 248 valid = False 249 if not valid: 250 self.gossip.broadcast_batch_by_transaction_id_request( 251 dependencies) 252 253 return valid 254 255 def _add_seen_txns(self, batch): 256 for txn in batch.transactions: 257 self._seen_txns[txn.header_signature] = batch.header_signature 258 self._seen_txns_length.set_value( 259 len(self._seen_txns)) 260 261 def _process_incomplete_batches(self, key): 262 # Keys are transaction_id 263 if key in self._incomplete_batches: 264 batches = self._incomplete_batches[key] 265 for batch in batches: 266 self.add_batch(batch) 267 del self._incomplete_batches[key] 268 269 def _process_incomplete_blocks(self, key): 270 # Keys are either a block_id or batch_id 271 if key in self._incomplete_blocks: 272 to_complete = deque() 273 to_complete.append(key) 274 275 while to_complete: 276 my_key = to_complete.popleft() 277 if my_key in self._incomplete_blocks: 278 inc_blocks = self._incomplete_blocks[my_key] 279 for inc_block in inc_blocks: 280 if self._complete_block(inc_block): 281 self.block_cache[inc_block.header_signature] = \ 282 inc_block 283 self._on_block_received(inc_block) 284 to_complete.append(inc_block.header_signature) 285 del self._incomplete_blocks[my_key] 286 287 def set_on_block_received(self, on_block_received_func): 288 self._on_block_received = on_block_received_func 289 290 def set_on_batch_received(self, on_batch_received_func): 291 self._on_batch_received = on_batch_received_func 292 293 def set_chain_has_block(self, set_chain_has_block): 294 self._has_block = set_chain_has_block 295 296 def add_block(self, block): 297 with self.lock: 298 blkw = BlockWrapper(block) 299 block = self._complete_block(blkw) 300 if block is not None: 301 self.block_cache[block.header_signature] = blkw 302 self._on_block_received(blkw) 303 self._process_incomplete_blocks(block.header_signature) 304 self._incomplete_blocks_length.set_value( 305 len(self._incomplete_blocks)) 306 307 def add_batch(self, batch): 308 with self.lock: 309 if batch.header_signature in self.batch_cache: 310 return 311 if self._complete_batch(batch): 312 self.batch_cache[batch.header_signature] = batch 313 self._add_seen_txns(batch) 314 self._on_batch_received(batch) 315 self._process_incomplete_blocks(batch.header_signature) 316 if batch.header_signature in self._requested: 317 del self._requested[batch.header_signature] 318 # If there was a batch waiting on this transaction, process 319 # that batch 320 for txn in batch.transactions: 321 if txn.header_signature in self._incomplete_batches: 322 if txn.header_signature in self._requested: 323 del self._requested[txn.header_signature] 324 self._process_incomplete_batches(txn.header_signature) 325 self._incomplete_batches_length.set_value( 326 len(self._incomplete_batches)) 327 328 def get_chain_head(self): 329 """Returns the block which is the current head of the chain. 330 331 Returns: 332 BlockWrapper: The head of the chain. 333 """ 334 with self.lock: 335 return self._block_store.chain_head 336 337 def get_block(self, block_id): 338 with self.lock: 339 if block_id in self.block_cache: 340 return self.block_cache[block_id] 341 return None 342 343 def get_batch(self, batch_id): 344 with self.lock: 345 if batch_id in self.batch_cache: 346 return self.batch_cache[batch_id] 347 348 else: 349 block_store = self.block_cache.block_store 350 try: 351 return block_store.get_batch(batch_id) 352 except ValueError: 353 return None 354 355 def get_batch_by_transaction(self, transaction_id): 356 with self.lock: 357 if transaction_id in self._seen_txns: 358 batch_id = self._seen_txns[transaction_id] 359 return self.get_batch(batch_id) 360 361 else: 362 block_store = self.block_cache.block_store 363 try: 364 return block_store.get_batch_by_transaction(transaction_id) 365 except ValueError: 366 return None 367 368 369 class CompleterBatchListBroadcastHandler(Handler): 370 def __init__(self, completer, gossip): 371 self._completer = completer 372 self._gossip = gossip 373 374 def handle(self, connection_id, message_content): 375 for batch in message_content.batches: 376 if batch.trace: 377 LOGGER.debug("TRACE %s: %s", batch.header_signature, 378 self.__class__.__name__) 379 self._completer.add_batch(batch) 380 self._gossip.broadcast_batch(batch) 381 return HandlerResult(status=HandlerStatus.PASS) 382 383 384 class CompleterGossipHandler(Handler): 385 def __init__(self, completer): 386 self._completer = completer 387 388 def handle(self, connection_id, message_content): 389 obj, tag, _ = message_content 390 391 if tag == network_pb2.GossipMessage.BLOCK: 392 self._completer.add_block(obj) 393 elif tag == network_pb2.GossipMessage.BATCH: 394 self._completer.add_batch(obj) 395 return HandlerResult(status=HandlerStatus.PASS) 396 397 398 class CompleterGossipBlockResponseHandler(Handler): 399 def __init__(self, completer): 400 self._completer = completer 401 402 def handle(self, connection_id, message_content): 403 block, _ = message_content 404 self._completer.add_block(block) 405 406 return HandlerResult(status=HandlerStatus.PASS) 407 408 409 class CompleterGossipBatchResponseHandler(Handler): 410 def __init__(self, completer): 411 self._completer = completer 412 413 def handle(self, connection_id, message_content): 414 batch, _ = message_content 415 self._completer.add_batch(batch) 416 417 return HandlerResult(status=HandlerStatus.PASS)