github.com/muhammedhassanm/blockchain@v0.0.0-20200120143007-697261defd4d/sawtooth-core-master/integration/sawtooth_integration/tests/test_poet_liveness.py (about)

     1  # Copyright 2017 Intel Corporation
     2  #
     3  # Licensed under the Apache License, Version 2.0 (the "License");
     4  # you may not use this file except in compliance with the License.
     5  # You may obtain a copy of the License at
     6  #
     7  #     http://www.apache.org/licenses/LICENSE-2.0
     8  #
     9  # Unless required by applicable law or agreed to in writing, software
    10  # distributed under the License is distributed on an "AS IS" BASIS,
    11  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  # See the License for the specific language governing permissions and
    13  # limitations under the License.
    14  # ------------------------------------------------------------------------------
    15  
    16  # pylint: disable=bare-except
    17  
    18  import time
    19  import unittest
    20  import logging
    21  
    22  import requests
    23  
    24  
    25  LOGGER = logging.getLogger(__name__)
    26  
    27  URL = 'http://rest-api-%d:8008'
    28  
    29  # The number of nodes in the test (this needs to match the test's compose file)
    30  NODES = 5
    31  
    32  # Blocks must have between this many batches
    33  BATCHES_PER_BLOCK_RANGE = (1, 100)
    34  
    35  # At the end of the test, this many batches must be in the chain
    36  MIN_TOTAL_BATCHES = 100
    37  
    38  # All nodes must reach this block for the test to pass.
    39  BLOCK_TO_REACH = 55
    40  
    41  # Once all nodes reach the BLOCK_TO_REACH, the test will check for consensus at
    42  # this block. These are different because PoET occassionally forks.
    43  BLOCK_TO_CHECK_CONSENSUS = 52
    44  
    45  
    46  class TestPoetLive(unittest.TestCase):
    47      def test_poet_liveness(self):
    48          """Test that a PoET network publishes blocks and stays in consensus."""
    49  
    50          # Wait until all nodes have reached the minimum block number
    51          nodes_reached = set()
    52          while len(nodes_reached) < NODES:
    53              for i in range(0, NODES):
    54                  block = get_block(i)
    55                  if block is not None:
    56  
    57                      # Ensure all blocks have an acceptable number of batches
    58                      self.assertTrue(check_block_batch_count(
    59                          block, BATCHES_PER_BLOCK_RANGE))
    60                      if int(block["header"]["block_num"]) >= BLOCK_TO_REACH:
    61                          nodes_reached.add(i)
    62  
    63                      log_block(i, block)
    64  
    65              time.sleep(15)
    66  
    67          chains = [get_chain(node) for node in range(0, NODES)]
    68  
    69          # Ensure all nodes are in consensus on the target block
    70          self.assertTrue(check_consensus(chains, BLOCK_TO_CHECK_CONSENSUS))
    71  
    72          # Assert an acceptable number of batches were committed
    73          self.assertTrue(check_min_batches(chains[0], MIN_TOTAL_BATCHES))
    74  
    75  
    76  def get_block(node):
    77      try:
    78          result = requests.get((URL % node) + "/blocks?count=1")
    79          result = result.json()
    80          try:
    81              return result["data"][0]
    82          except:
    83              LOGGER.warning(result)
    84      except:
    85          LOGGER.warning("Couldn't connect to REST API %s", node)
    86  
    87  
    88  def get_chain(node):
    89      try:
    90          result = requests.get((URL % node) + "/blocks")
    91          result = result.json()
    92          try:
    93              return result["data"]
    94          except:
    95              LOGGER.warning(result)
    96      except:
    97          LOGGER.warning("Couldn't connect to REST API %s", node)
    98  
    99  
   100  def log_block(node, block):
   101      batches = block["header"]["batch_ids"]
   102      batches = [b[:6] + '..' for b in batches]
   103      LOGGER.warning(
   104          "Validator-%s has block %s: %s, batches (%s): %s",
   105          node,
   106          block["header"]["block_num"],
   107          block["header_signature"][:6] + '..',
   108          len(batches),
   109          batches)
   110  
   111  
   112  def check_block_batch_count(block, batch_range):
   113      batch_count = len(block["header"]["batch_ids"])
   114  
   115      valid = batch_range[0] <= batch_count <= batch_range[1]
   116  
   117      if not valid:
   118          LOGGER.error(
   119              "Block (%s, %s) had %s batches in it",
   120              block["header"]["block_num"],
   121              block["header_signature"],
   122              batch_count)
   123  
   124      return valid
   125  
   126  
   127  def check_min_batches(chain, min_batches):
   128      n = sum([len(block["header"]["batch_ids"]) for block in chain])
   129      return n >= min_batches
   130  
   131  
   132  def check_consensus(chains, block_num):
   133      blocks = []
   134      for chain in chains:
   135          if chain is not None:
   136              block = chain[-(block_num + 1)]
   137              blocks.append(block)
   138          else:
   139              return False
   140      b0 = blocks[0]
   141      for b in blocks[1:]:
   142          if b0["header_signature"] != b["header_signature"]:
   143              LOGGER.error("Validators not in consensus on block %s", block_num)
   144              LOGGER.error("BLOCK DUMP: %s", blocks)
   145              return False
   146      return True