github.com/platonnetwork/platon-go@v0.7.6/cases/environment/env.py (about)

     1  import json
     2  import os
     3  import time
     4  import random
     5  import shutil
     6  import tarfile
     7  from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED
     8  import copy
     9  from common.load_file import get_f
    10  from ruamel import yaml
    11  from environment.node import Node
    12  from environment.server import Server
    13  from common.abspath import abspath
    14  from common.key import generate_key, generate_blskey
    15  from common.load_file import LoadFile, calc_hash
    16  from common.log import log
    17  from environment.account import Account
    18  from environment.config import TestConfig
    19  from conf.settings import DEFAULT_CONF_TMP_DIR, ConfTmpDir
    20  from typing import List
    21  
    22  
    23  def check_file_exists(*args):
    24      """
    25      Check if local files exist
    26      :param args:
    27      """
    28      for arg in args:
    29          if not os.path.exists(os.path.abspath(arg)):
    30              raise Exception("file:{} does not exist".format(arg))
    31  
    32  
    33  class TestEnvironment:
    34      def __init__(self, cfg: TestConfig):
    35          # env config
    36          self.cfg = cfg
    37  
    38          # these file must be exist
    39          check_file_exists(self.cfg.platon_bin_file, self.cfg.genesis_file, self.cfg.supervisor_file,
    40                            self.cfg.node_file, self.cfg.address_file)
    41          if not os.path.exists(self.cfg.root_tmp):
    42              os.mkdir(self.cfg.root_tmp)
    43  
    44          # node config
    45          self.__is_update_node_file = False
    46          self.node_config = LoadFile(self.cfg.node_file).get_data()
    47          self.consensus_node_config_list = self.node_config.get("consensus", [])
    48          self.noconsensus_node_config_list = self.node_config.get("noconsensus", [])
    49          self.node_config_list = self.consensus_node_config_list + self.noconsensus_node_config_list
    50          self.__rewrite_node_file()
    51  
    52          # node obj list
    53          self.__consensus_node_list = []
    54          self.__normal_node_list = []
    55  
    56          # env info
    57          self.cfg.env_id = self.__reset_env()
    58  
    59          # genesis
    60          self.genesis_config = LoadFile(self.cfg.genesis_file).get_data()
    61  
    62          # servers
    63          self.server_list = self.__parse_servers()
    64  
    65          # node
    66          self.__parse_node()
    67  
    68          # accounts
    69          self.account = Account(self.cfg.account_file, self.genesis_config["config"]["chainId"])
    70  
    71          self.rewrite_genesis_file()
    72  
    73      @property
    74      def consensus_node_list(self) -> List[Node]:
    75          return self.__consensus_node_list
    76  
    77      @property
    78      def normal_node_list(self) -> List[Node]:
    79          return self.__normal_node_list
    80  
    81      @property
    82      def chain_id(self):
    83          return self.genesis_config["config"]["chainId"]
    84  
    85      @property
    86      def amount(self):
    87          return self.genesis_config["config"]["cbft"]["amount"]
    88  
    89      @property
    90      def period(self):
    91          return self.genesis_config["config"]["cbft"]["period"]
    92  
    93      @property
    94      def validatorMode(self):
    95          return self.genesis_config["config"]["cbft"]["validatorMode"]
    96  
    97      @property
    98      def version(self):
    99          return ""
   100  
   101      @property
   102      def running(self) -> bool:
   103          """
   104          Determine if all nodes are running
   105          :return: bool
   106          """
   107          for node in self.get_all_nodes():
   108              if not node.running:
   109                  return False
   110          return True
   111  
   112      @property
   113      def max_byzantium(self) -> int:
   114          """
   115          Maximum number of Byzantine nodes
   116          """
   117          return get_f(self.consensus_node_config_list)
   118  
   119      @property
   120      def block_interval(self) -> int:
   121          """
   122          Block interval
   123          """
   124          period = self.genesis_config["config"]["cbft"].get("period")
   125          amount = self.genesis_config["config"]["cbft"].get("amount")
   126          return int(period / 1000 / amount)
   127  
   128      def copy_env(self):
   129          """
   130          Copy environment
   131          """
   132          return copy.copy(self)
   133  
   134      def set_cfg(self, cfg: TestConfig):
   135          """
   136          Set the configuration file and modify the node's cfg
   137          :param cfg:
   138          """
   139          self.cfg = cfg
   140          genesis_config = LoadFile(self.cfg.genesis_file).get_data()
   141          self.rewrite_genesis_file()
   142          self.set_genesis(genesis_config)
   143          for node in self.get_all_nodes():
   144              node.cfg = cfg
   145  
   146      def set_genesis(self, genesis_config: dict):
   147          """
   148          Set the genesis and modify the genesis of the node.
   149          :param genesis_config:
   150          """
   151          self.genesis_config = genesis_config
   152          self.account.chain_id = self.chain_id
   153          for node in self.get_all_nodes():
   154              node.chain_id = self.chain_id
   155  
   156      def __reset_env(self) -> str:
   157          """
   158          Determine whether you need to re-create a new environment
   159          based on the platon binary information and the node configuration file.
   160          :return: env_id
   161          """
   162          env_tmp_file = os.path.join(self.cfg.env_tmp, "env.yml")
   163          if os.path.exists(self.cfg.env_tmp):
   164              if os.path.exists(env_tmp_file):
   165                  env_data = LoadFile(env_tmp_file).get_data()
   166                  if env_data["bin_hash"] == calc_hash(self.cfg.platon_bin_file) \
   167                          and env_data["node_hash"] == calc_hash(self.cfg.node_file):
   168                      return env_data["env_id"]
   169  
   170              shutil.rmtree(self.cfg.env_tmp)
   171          os.makedirs(self.cfg.env_tmp)
   172          new_env_data = {"bin_hash": calc_hash(self.cfg.platon_bin_file), "node_hash": calc_hash(self.cfg.node_file)}
   173          env_id = new_env_data["bin_hash"] + new_env_data["node_hash"]
   174          new_env_data["env_id"] = env_id
   175          with open(env_tmp_file, "w", encoding="utf-8") as f:
   176              yaml.dump(new_env_data, f, Dumper=yaml.RoundTripDumper)
   177          return env_id
   178  
   179      def get_init_nodes(self) -> List[dict]:
   180          """
   181          Get the list of init nodes
   182          :return: list
   183          """
   184          init_node_list = []
   185          for node in self.consensus_node_list:
   186              init_node_list.append({"node": node.enode, "blsPubKey": node.blspubkey})
   187          return init_node_list
   188  
   189      def get_static_nodes(self) -> list:
   190          """
   191          Get static node enode list
   192          :return: list
   193          """
   194          static_node_list = []
   195          for node in self.get_all_nodes():
   196              static_node_list.append(node.enode)
   197          return static_node_list
   198  
   199      def get_all_nodes(self) -> List[Node]:
   200          """
   201          Get all node objects
   202          :return: Node object
   203          """
   204          return self.__consensus_node_list + self.__normal_node_list
   205  
   206      def get_rand_node(self) -> Node:
   207          """
   208          Randomly obtain a consensus node
   209          :return: Node object
   210          """
   211          return random.choice(self.consensus_node_list)
   212  
   213      def get_consensus_node_by_index(self, index) -> Node:
   214          """
   215          Get a consensus node based on the index
   216          :param index:
   217          :return: Node object
   218          """
   219          return self.__consensus_node_list[index]
   220  
   221      def get_normal_node_by_index(self, index) -> Node:
   222          """
   223          Get a normal node based on the index
   224          :param index:
   225          :return: Node object
   226          """
   227          return self.__normal_node_list[index]
   228  
   229      def get_a_normal_node(self) -> Node:
   230          """
   231          Get the first normal node
   232          :return: Node object
   233          """
   234          return self.__normal_node_list[0]
   235  
   236      def executor(self, func, data_list, *args) -> bool:
   237          with ThreadPoolExecutor(max_workers=self.cfg.max_worker) as exe:
   238              futures = [exe.submit(func, pair, *args) for pair in data_list]
   239              done, unfinished = wait(futures, timeout=30, return_when=ALL_COMPLETED)
   240          result = []
   241          for d in done:
   242              is_success, msg = d.result()
   243              if not is_success:
   244                  result.append(msg)
   245          if len(result) > 0:
   246              raise Exception("executor {} failed:{}".format(func.__name__, result))
   247          return True
   248  
   249      def deploy_all(self, genesis_file=None):
   250          """
   251          Deploy all nodes and start
   252          :param genesis_file: Specify genesis, do not pass the default generated using tmp
   253          """
   254          self.account.reset()
   255          self.prepare_all()
   256          if genesis_file is None:
   257              genesis_file = self.cfg.genesis_tmp
   258          log.info("deploy all node")
   259          self.deploy_nodes(self.get_all_nodes(), genesis_file)
   260          log.info("deploy success")
   261  
   262      def prepare_all(self):
   263          """
   264          Prepare environmental data
   265          """
   266          self.rewrite_genesis_file()
   267          self.rewrite_static_nodes()
   268          self.rewrite_config_json()
   269          self.__compression()
   270          if self.cfg.install_supervisor:
   271              self.install_all_supervisor()
   272              self.cfg.install_supervisor = False
   273          if self.cfg.install_dependency:
   274              self.install_all_dependency()
   275              self.cfg.install_dependency = False
   276          self.put_all_compression()
   277  
   278      def start_all(self):
   279          """
   280          Start all nodes, judge whether to initialize according to the value of cfg init_chain
   281          """
   282          log.info("start all node")
   283          self.start_nodes(self.get_all_nodes(), self.cfg.init_chain)
   284  
   285      def stop_all(self):
   286          """
   287          Stop all nodes
   288          """
   289          log.info("stop all node")
   290          self.stop_nodes(self.get_all_nodes())
   291  
   292      def reset_all(self):
   293          """
   294          Restart all nodes
   295          """
   296          log.info("restart all node")
   297          self.reset_nodes(self.get_all_nodes())
   298  
   299      def clean_all(self):
   300          """
   301          Close all nodes and delete the directory of the deployment node
   302          """
   303          log.info("clean all node")
   304          self.clean_nodes(self.get_all_nodes())
   305  
   306      def clean_db_all(self):
   307          """
   308          Close all nodes and delete the database
   309          """
   310          log.info("clean db all node")
   311          self.clean_db_nodes(self.get_all_nodes())
   312  
   313      def shutdown(self):
   314          """
   315          Close all nodes and delete the node deployment directory, supervisor node configuration
   316          """
   317          log.info("shutdown and clean all nodes")
   318  
   319          def close(node: Node):
   320              return node.close()
   321  
   322          return self.executor(close, self.get_all_nodes())
   323  
   324      def start_nodes(self, node_list: List[Node], init_chain=True):
   325          """
   326          Boot node
   327          :param node_list:
   328          :param init_chain:
   329          """
   330          def start(node: Node, need_init_chain):
   331              return node.start(need_init_chain)
   332  
   333          return self.executor(start, node_list, init_chain)
   334  
   335      def deploy_nodes(self, node_list: List[Node], genesis_file):
   336          """
   337          Deployment node
   338                                                                  Choose whether to empty the environment depending on whether initialization is required
   339                                                                  Upload all node files
   340          :param node_list:
   341          :param genesis_file:
   342          """
   343          log.info("deploy node")
   344          if self.cfg.init_chain:
   345              self.clean_nodes(node_list)
   346  
   347          self.put_file_nodes(node_list, genesis_file)
   348          return self.start_nodes(node_list, self.cfg.init_chain)
   349  
   350      def put_file_nodes(self, node_list: List[Node], genesis_file):
   351          """
   352          Upload all files
   353          :param node_list:
   354          :param genesis_file:
   355          """
   356          def prepare(node: Node):
   357              return node.put_all_file(genesis_file)
   358  
   359          return self.executor(prepare, node_list)
   360  
   361      def stop_nodes(self, node_list: List[Node]):
   362          """
   363          Close node
   364          :param node_list:
   365          """
   366          def stop(node: Node):
   367              return node.stop()
   368  
   369          return self.executor(stop, node_list)
   370  
   371      def reset_nodes(self, node_list: List[Node]):
   372          """
   373          Restart node
   374          :param node_list:
   375          """
   376          def restart(node: Node):
   377              return node.restart()
   378  
   379          return self.executor(restart, node_list)
   380  
   381      def clean_nodes(self, node_list: List[Node]):
   382          """
   383          Close the node and delete the node data
   384          :param node_list:
   385          :return:
   386          """
   387          def clean(node: Node):
   388              return node.clean()
   389  
   390          return self.executor(clean, node_list)
   391  
   392      def clean_db_nodes(self, node_list: List[Node]):
   393          """
   394          Close the node and clear the node database
   395          :param node_list:
   396          """
   397          def clean_db(node: Node):
   398              return node.clean_db()
   399  
   400          return self.executor(clean_db, node_list)
   401  
   402      def __parse_node(self):
   403          """
   404          Instantiate all nodes
   405          """
   406          def init(node_config):
   407              return Node(node_config, self.cfg, self.chain_id)
   408  
   409          log.info("parse node to node object")
   410          with ThreadPoolExecutor(max_workers=self.cfg.max_worker) as executor:
   411              futures = [executor.submit(init, pair) for pair in self.consensus_node_config_list]
   412              done, unfinished = wait(futures, timeout=30, return_when=ALL_COMPLETED)
   413          for do in done:
   414              self.__consensus_node_list.append(do.result())
   415  
   416          if self.noconsensus_node_config_list:
   417              with ThreadPoolExecutor(max_workers=self.cfg.max_worker) as executor:
   418                  futures = [executor.submit(init, pair) for pair in self.noconsensus_node_config_list]
   419                  done, unfinished = wait(futures, timeout=30, return_when=ALL_COMPLETED)
   420              for do in done:
   421                  self.__normal_node_list.append(do.result())
   422  
   423      def put_all_compression(self):
   424          """
   425          Upload compressed file
   426          """
   427          log.info("upload compression")
   428  
   429          def uploads(server: Server):
   430              return server.put_compression()
   431  
   432          return self.executor(uploads, self.server_list)
   433  
   434      def install_all_dependency(self):
   435          """
   436          Installation dependence
   437          """
   438          log.info("install rely")
   439  
   440          def install(server: Server):
   441              return server.install_dependency()
   442  
   443          return self.executor(install, self.server_list)
   444  
   445      def install_all_supervisor(self):
   446          """
   447          Install supervisor
   448          """
   449          log.info("install supervisor")
   450  
   451          def install(server: Server):
   452              return server.install_supervisor()
   453  
   454          return self.executor(install, self.server_list)
   455  
   456      def __parse_servers(self) -> List[Server]:
   457          """
   458          Instantiate all servers
   459          """
   460          server_config_list, server_list = [], []
   461  
   462          def check_in(_ip, nodes):
   463              for n in nodes:
   464                  if _ip == n["host"]:
   465                      return True
   466              return False
   467  
   468          for node_config in self.node_config_list:
   469              ip = node_config["host"]
   470              if check_in(ip, server_config_list):
   471                  continue
   472              server_config_list.append(node_config)
   473  
   474          def init(config):
   475              return Server(config, self.cfg)
   476  
   477          with ThreadPoolExecutor(max_workers=self.cfg.max_worker) as executor:
   478              futures = [executor.submit(init, pair) for pair in server_config_list]
   479              done, unfinished = wait(futures, timeout=30, return_when=ALL_COMPLETED)
   480          for do in done:
   481              server_list.append(do.result())
   482          return server_list
   483  
   484      def block_numbers(self, node_list: List[Node] = None) -> dict:
   485          """
   486          Get the block height of the incoming node
   487          :param node_list:
   488          """
   489          if node_list is None:
   490              node_list = self.get_all_nodes()
   491          result = {}
   492          for node in node_list:
   493              result[node.node_mark] = node.block_number
   494          return result
   495  
   496      def check_block(self, need_number=10, multiple=3, node_list: List[Node] = None):
   497          """
   498          Verify the highest block in the current chain
   499          :param need_number:
   500          :param multiple:
   501          :param node_list:
   502          """
   503          if node_list is None:
   504              node_list = self.get_all_nodes()
   505          use_time = int(need_number * self.block_interval * multiple)
   506          while use_time:
   507              if max(self.block_numbers(node_list).values()) < need_number:
   508                  time.sleep(1)
   509                  use_time -= 1
   510                  continue
   511              return
   512          raise Exception("The environment is not working properly")
   513  
   514      def backup_all_logs(self, case_name: str):
   515          """
   516          Download all node logs
   517          """
   518          return self.backup_logs(self.get_all_nodes(), case_name)
   519  
   520      def backup_logs(self, node_list: List[Node], case_name):
   521          """
   522          Backup log
   523          :param node_list:
   524          :param case_name:
   525          """
   526          self.__check_log_path()
   527  
   528          def backup(node: Node):
   529              return node.backup_log()
   530  
   531          self.executor(backup, node_list)
   532          return self.__zip_all_log(case_name)
   533  
   534      def __check_log_path(self):
   535          if not os.path.exists(self.cfg.tmp_log):
   536              os.mkdir(self.cfg.tmp_log)
   537          else:
   538              shutil.rmtree(self.cfg.tmp_log)
   539              os.mkdir(self.cfg.tmp_log)
   540          if not os.path.exists(self.cfg.bug_log):
   541              os.mkdir(self.cfg.bug_log)
   542  
   543      def __zip_all_log(self, case_name):
   544          log.info("Start compressing.....")
   545          t = time.strftime("%Y%m%d%H%M%S", time.localtime())
   546          tar_name = "{}/{}_{}.tar.gz".format(self.cfg.bug_log, case_name, t)
   547          tar = tarfile.open(tar_name, "w:gz")
   548          tar.add(self.cfg.tmp_log, arcname=os.path.basename(self.cfg.tmp_log))
   549          tar.close()
   550          log.info("Compression completed")
   551          log.info("Start deleting the cache.....")
   552          shutil.rmtree(self.cfg.tmp_log)
   553          log.info("Delete cache complete")
   554          return os.path.basename(tar_name)
   555  
   556      def rewrite_genesis_file(self):
   557          """
   558          Rewrite genesis
   559          """
   560          log.info("rewrite genesis.json")
   561          self.genesis_config['config']['cbft']["initialNodes"] = self.get_init_nodes()
   562          # with open(self.cfg.address_file, "r", encoding="UTF-8") as f:
   563          #     key_dict = json.load(f)
   564          # account = key_dict["address"]
   565          # self.genesis_config['alloc'][account] = {"balance": str(99999999999999999999999999)}
   566          accounts = self.account.get_all_accounts()
   567          for account in accounts:
   568              self.genesis_config['alloc'][account['address']] = {"balance": str(account['balance'])}
   569          with open(self.cfg.genesis_tmp, 'w', encoding='utf-8') as f:
   570              f.write(json.dumps(self.genesis_config, indent=4))
   571  
   572      def rewrite_static_nodes(self):
   573          """
   574          Rewrite static
   575          """
   576          log.info("rewrite static-nodes.json")
   577          static_nodes = self.get_static_nodes()
   578          with open(self.cfg.static_node_tmp, 'w', encoding='utf-8') as f:
   579              f.write(json.dumps(static_nodes, indent=4))
   580  
   581      def rewrite_config_json(self):
   582          """
   583          Rewrite config
   584          :return:
   585          """
   586          log.info("rewrite config.json")
   587          config_data = LoadFile(self.cfg.config_json_file).get_data()
   588          # config_data['node']['P2P']["BootstrapNodes"] = self.get_static_nodes()
   589          with open(self.cfg.config_json_tmp, 'w', encoding='utf-8') as f:
   590              f.write(json.dumps(config_data, indent=4))
   591  
   592      def __fill_node_config(self, node_config: dict):
   593          """
   594          Fill in the node file with some necessary values
   595          :param node_config:
   596          """
   597          if not node_config.get("id") or not node_config.get("nodekey"):
   598              self.__is_update_node_file = True
   599              node_config["nodekey"], node_config["id"] = generate_key()
   600          if not node_config.get("blsprikey") or not node_config.get("blspubkey"):
   601              self.__is_update_node_file = True
   602              node_config["blsprikey"], node_config["blspubkey"] = generate_blskey()
   603          if not node_config.get("port"):
   604              self.__is_update_node_file = True
   605              node_config["port"] = 16789
   606          if not node_config.get("rpcport"):
   607              self.__is_update_node_file = True
   608              node_config["rpcport"] = 6789
   609          if not node_config.get("url"):
   610              self.__is_update_node_file = True
   611              node_config["url"] = "http://{}:{}".format(node_config["host"], node_config["rpcport"])
   612          if node_config.get("wsport"):
   613              self.__is_update_node_file = True
   614              node_config["wsurl"] = "ws://{}:{}".format(node_config["host"], node_config["wsport"])
   615          return node_config
   616  
   617      def __rewrite_node_file(self):
   618          log.info("rewrite node file")
   619          result, result_consensus_list, result_noconsensus_list = {}, [], []
   620          if len(self.consensus_node_config_list) >= 1:
   621              for node_config in self.consensus_node_config_list:
   622                  result_consensus_list.append(self.__fill_node_config(node_config))
   623              result["consensus"] = result_consensus_list
   624          if self.noconsensus_node_config_list and len(self.noconsensus_node_config_list) >= 1:
   625              for node_config in self.noconsensus_node_config_list:
   626                  result_noconsensus_list.append(self.__fill_node_config(node_config))
   627              result["noconsensus"] = result_noconsensus_list
   628          if self.__is_update_node_file:
   629              self.consensus_node_config_list = result_consensus_list
   630              self.noconsensus_node_config_list = result_noconsensus_list
   631              with open(self.cfg.node_file, encoding="utf-8", mode="w") as f:
   632                  yaml.dump(result, f, Dumper=yaml.RoundTripDumper)
   633  
   634      def __compression(self):
   635          """
   636          Compressed file
   637          """
   638          log.info("compression data")
   639          env_gz = os.path.join(self.cfg.env_tmp, self.cfg.env_id)
   640          if os.path.exists(env_gz):
   641              return
   642          os.makedirs(env_gz)
   643          data_dir = os.path.join(env_gz, "data")
   644          os.makedirs(data_dir)
   645          keystore_dir = os.path.join(data_dir, "keystore")
   646          os.makedirs(keystore_dir)
   647          keystore = os.path.join(keystore_dir, os.path.basename(self.cfg.address_file))
   648          shutil.copyfile(self.cfg.address_file, keystore)
   649          shutil.copyfile(self.cfg.platon_bin_file, os.path.join(env_gz, "platon"))
   650          shutil.copyfile(self.cfg.config_json_tmp, os.path.join(env_gz, "config.json"))
   651          t = tarfile.open(env_gz + ".tar.gz", "w:gz")
   652          t.add(env_gz, arcname=os.path.basename(env_gz))
   653          t.close()
   654  
   655  
   656  def create_env(conf_tmp=None, node_file=None, account_file=None, init_chain=True,
   657                 install_dependency=False, install_supervisor=False, can_deploy=True) -> TestEnvironment:
   658      if not conf_tmp:
   659          conf_tmp = DEFAULT_CONF_TMP_DIR
   660      else:
   661          conf_tmp = ConfTmpDir(conf_tmp)
   662      cfg = TestConfig(conf_tmp=conf_tmp, install_supervisor=install_supervisor, install_dependency=install_dependency, init_chain=init_chain, can_deploy=can_deploy)
   663      if node_file:
   664          cfg.node_file = node_file
   665      if account_file:
   666          cfg.account_file = account_file
   667      return TestEnvironment(cfg)
   668  
   669  
   670  if __name__ == "__main__":
   671      from tests.lib import get_no_pledge_node, get_no_pledge_node_list, get_pledge_list, check_node_in_list
   672      node_filename = abspath("deploy/node/debug_4_4.yml")
   673      env = create_env(node_file=node_filename)
   674      # print(os.path.getctime(env.cfg.platon_bin_file))
   675      # new_cfg = copy.copy(env.cfg)
   676      # new_cfg.syncmode = "fast"
   677      # print(env.cfg.syncmode)
   678      log.info("测试部署")
   679      env.deploy_all()
   680      # node = env.get_consensus_node_by_index(0)
   681      # print(node.debug.economicConfig())
   682      # print(type(node.debug.economicConfig()))
   683      # print(node.node_mark)
   684      # address, prikey = env.account.generate_account(node.web3, 10**18*100000000000)
   685      # transaction_cfg = {"gasPrice": 3000000000000000, "gas": 1000000}
   686      # # print(node.pip.submitParam(node.node_id, "ddd", "Slashing", "SlashBlockReward", "1000", prikey, transaction_cfg))
   687      # print(node.pip.getGovernParamValue("Slashing", "SlashBlockReward", address))
   688      # print(node.pip.listGovernParam("Staking"))
   689      # from tests.lib.genesis import Genesis
   690      # from dacite import from_dict
   691      # genesis = from_dict(data_class=Genesis, data=env.genesis_config)
   692      # print(genesis.EconomicModel.Slashing.MaxEvidenceAge)
   693      # env.account.generate_account(env.get_a_normal_node().web3, 0)
   694      # log.info("account:{}".format(env.account.accounts))
   695      # env.deploy_all()
   696      # log.info("account:{}".format(env.account.accounts))
   697      # node = env.get_rand_node()
   698      # print(node.node_id)
   699      # print(env.normal_node_list[1].node_id)
   700      # print(env.get_normal_node_by_index(1).node_id)
   701      # print(get_no_pledge_node(env.get_all_nodes()))
   702      # print(get_no_pledge_node_list(env.get_all_nodes()))
   703      # print(get_pledge_list(node.ppos.getVerifierList))
   704      # print(check_node_in_list(node.node_id, node.ppos.getVerifierList))
   705      # print(env.block_numbers(env.normal_node_list))
   706      # print(env.block_numbers())
   707      # for node in env.consensus_node_list:
   708      #     print(node.node_id)
   709      # time.sleep(3000)
   710      # env.deploy_all()
   711      # d = env.block_numbers()
   712      # print(d)
   713      # node = env.get_rand_node()
   714      # node.create_keystore()
   715      # print(node.node_mark)
   716      # time.sleep(80)
   717      # log.info("测试关闭")
   718      # env.stop_all()
   719      # time.sleep(30)
   720      # log.info("测试不初始化启动")
   721      # env.cfg.init_chain = False
   722      # env.start_all()
   723      # time.sleep(60)
   724      # d = env.block_numbers()
   725      # print(d)
   726      # log.info("测试重启")
   727      # env.reset_all()
   728      # time.sleep(60)
   729      # d = env.block_numbers()
   730      # print(d)
   731      # log.info("测试删除数据库")
   732      # env.clean_db_all()
   733      # log.info("删除数据库成功")
   734      # time.sleep(60)
   735      # env.cfg.init_chain = True
   736      # env.start_all()
   737      # time.sleep(30)
   738      # d = env.block_numbers()
   739      # print(d)
   740      # log.info("测试删除所有数据")
   741      # env.clean_all()
   742      # log.info("删除数据成功")
   743      # log.info("重新部署")
   744      # env.deploy_all()
   745      # d = env.block_numbers()
   746      # print(d)
   747      # time.sleep(60)
   748      # d = env.block_numbers()
   749      # print(d)
   750      # env.shutdown()