github.com/pingcap/ticdc@v0.0.0-20220526033649-485a10ef2652/tests/_utils/start_tidb_cluster_impl (about)

     1  #!/bin/bash
     2  
     3  # --workdir: work directory
     4  # --tidb-config: path to tidb config file
     5  # --multiple-upstream-pd: whether to deploy multiple pd severs in upstream
     6  
     7  set -e
     8  
     9  OUT_DIR=
    10  tidb_config=
    11  pd_config=
    12  multiple_upstream_pd=
    13  
    14  while [[ ${1} ]]; do
    15      case "${1}" in
    16          --workdir)
    17              OUT_DIR=${2}
    18              shift
    19              ;;
    20          --tidb-config)
    21              tidb_config=${2}
    22              shift
    23              ;;
    24          --pd-config)
    25              pd_config=${2}
    26              shift
    27              ;;
    28          --multiple-upstream-pd)
    29              multiple_upstream_pd=${2}
    30              shift
    31              ;;
    32          *)
    33              echo "Unknown parameter: ${1}" >&2
    34              exit 1
    35      esac
    36  
    37      if ! shift; then
    38          echo 'Missing parameter argument.' >&2
    39          exit 1
    40      fi
    41  done
    42  
    43  CUR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
    44  source $CUR/../_utils/test_prepare
    45  
    46  stop_tidb_cluster
    47  
    48  cd $OUT_DIR && echo "start tidb cluster in $OUT_DIR"
    49  
    50  
    51  # pd server config file
    52  if [[ "$pd_config" != "" ]]; then
    53      cat $pd_config > $OUT_DIR/pd-config.toml
    54  else
    55      cat - >"$OUT_DIR/pd-config.toml" <<EOF
    56  [replication]
    57  # Set it to 1 to make sure we have enough replicas to run placement-rules.
    58  max-replicas = 1
    59  enable-placement-rules = true
    60  EOF
    61  fi
    62  
    63  echo "Starting Upstream PD..."
    64  if [[ "$multiple_upstream_pd" == "true" ]]; then
    65      pd_count=3
    66      initial_cluster="pd1=http://${UP_PD_HOST_1}:${UP_PD_PEER_PORT_1},pd2=http://${UP_PD_HOST_2}:${UP_PD_PEER_PORT_2},pd3=http://${UP_PD_HOST_3}:${UP_PD_PEER_PORT_3}"
    67  else
    68      pd_count=1
    69      initial_cluster="pd1=http://${UP_PD_HOST_1}:${UP_PD_PEER_PORT_1}"
    70  fi
    71  for idx in $(seq 1 $pd_count); do
    72      host="UP_PD_HOST_$idx"
    73      port="UP_PD_PORT_$idx"
    74      peer_port="UP_PD_PEER_PORT_$idx"
    75      pd-server \
    76          --advertise-client-urls http://${!host}:${!port} \
    77          --client-urls http://0.0.0.0:${!port} \
    78          --advertise-peer-urls http://${!host}:${!peer_port} \
    79          --peer-urls http://0.0.0.0:${!peer_port} \
    80          --config "$OUT_DIR/pd-config.toml" \
    81          --log-file "$OUT_DIR/pd$idx.log" \
    82          --data-dir "$OUT_DIR/pd$idx" \
    83          --name="pd$idx" \
    84          --initial-cluster=${initial_cluster} &
    85  done
    86  
    87  echo "Starting Downstream PD..."
    88  pd-server \
    89      --advertise-client-urls http://${DOWN_PD_HOST}:${DOWN_PD_PORT} \
    90      --client-urls http://0.0.0.0:${DOWN_PD_PORT} \
    91      --advertise-peer-urls http://${DOWN_PD_HOST}:${DOWN_PD_PEER_PORT} \
    92      --peer-urls http://0.0.0.0:${DOWN_PD_PEER_PORT} \
    93      --config "$OUT_DIR/pd-config.toml" \
    94      --log-file "$OUT_DIR/down_pd.log" \
    95      --data-dir "$OUT_DIR/down_pd" &
    96  
    97  # wait until PD is online...
    98  for idx in $(seq 1 $pd_count); do
    99      host="UP_PD_HOST_$idx"
   100      port="UP_PD_PORT_$idx"
   101  
   102      while ! curl -o /dev/null -sf http://${!host}:${!port}/pd/api/v1/version; do
   103          sleep 1
   104      done
   105  
   106      while [ -z "$(curl http://${!host}:${!port}/pd/health 2> /dev/null | grep 'health' | grep 'true')" ]; do
   107          sleep 1
   108      done
   109  done
   110  
   111  while ! curl -o /dev/null -sf http://${DOWN_PD_HOST}:${DOWN_PD_PORT}/pd/api/v1/version; do
   112      sleep 1
   113  done
   114  
   115  while [ -z "$(curl http://${DOWN_PD_HOST}:${DOWN_PD_PORT}/pd/health 2> /dev/null | grep 'health' | grep 'true')" ]; do
   116      sleep 1
   117  done
   118  
   119  # Tries to limit the max number of open files under the system limit
   120  cat - >"$OUT_DIR/tikv-config.toml" <<EOF
   121  [storage]
   122  # Disable creating a large temp file.
   123  reserve-space = "0MB"
   124  [rocksdb]
   125  max-open-files = 4096
   126  [raftdb]
   127  max-open-files = 4096
   128  [raftstore]
   129  # true (default value) for high reliability, this can prevent data loss when power failure.
   130  sync-log = false
   131  EOF
   132  
   133  # tidb server config file
   134  if [[ "$tidb_config" != "" ]]; then
   135      cat $tidb_config > $OUT_DIR/tidb-config.toml
   136  else
   137      cat - >"$OUT_DIR/tidb-config.toml" <<EOF
   138  split-table = true
   139  alter-primary-key = true
   140  new_collations_enabled_on_first_bootstrap = true
   141  EOF
   142  fi
   143  
   144  echo "Starting Upstream TiKV..."
   145  for idx in $(seq 1 3); do
   146      host="UP_TIKV_HOST_$idx"
   147      port="UP_TIKV_PORT_$idx"
   148      status_port="UP_TIKV_STATUS_PORT_$idx"
   149      tikv-server \
   150          --pd ${UP_PD_HOST_1}:${UP_PD_PORT_1} \
   151          -A ${!host}:${!port} \
   152          --status-addr ${!host}:${!status_port} \
   153          --log-file "$OUT_DIR/tikv$idx.log" \
   154          --log-level debug \
   155          -C "$OUT_DIR/tikv-config.toml" \
   156          -s "$OUT_DIR/tikv$idx" &
   157  done
   158  
   159  echo "Starting Downstream TiKV..."
   160  tikv-server \
   161      --pd ${DOWN_PD_HOST}:${DOWN_PD_PORT} \
   162      -A ${DOWN_TIKV_HOST}:${DOWN_TIKV_PORT} \
   163      --status-addr ${DOWN_TIKV_HOST}:${DOWN_TIKV_STATUS_PORT} \
   164      --log-file "$OUT_DIR/tikv_down.log" \
   165      --log-level debug \
   166      -C "$OUT_DIR/tikv-config.toml" \
   167      -s "$OUT_DIR/tikv_down" &
   168  
   169  sleep 2
   170  
   171  echo "Starting Upstream TiDB..."
   172  tidb-server \
   173      -P ${UP_TIDB_PORT} \
   174      -config "$OUT_DIR/tidb-config.toml" \
   175      --store tikv \
   176      --path ${UP_PD_HOST_1}:${UP_PD_PORT_1} \
   177      --status=${UP_TIDB_STATUS} \
   178      --log-file "$OUT_DIR/tidb.log" &
   179  
   180  tidb-server \
   181      -P ${UP_TIDB_OTHER_PORT} \
   182      -config "$OUT_DIR/tidb-config.toml" \
   183      --store tikv \
   184      --path ${UP_PD_HOST_1}:${UP_PD_PORT_1} \
   185      --status=${UP_TIDB_OTHER_STATUS} \
   186      --log-file "$OUT_DIR/tidb_other.log" &
   187  
   188  echo "Starting Downstream TiDB..."
   189  tidb-server \
   190      -P ${DOWN_TIDB_PORT} \
   191      -config "$OUT_DIR/tidb-config.toml" \
   192      --store tikv \
   193      --path ${DOWN_PD_HOST}:${DOWN_PD_PORT} \
   194      --status=${DOWN_TIDB_STATUS} \
   195      --log-file "$OUT_DIR/tidb_down.log" &
   196  
   197  echo "Verifying Upstream TiDB is started..."
   198  i=0
   199  while ! mysql -uroot -h${UP_TIDB_HOST} -P${UP_TIDB_PORT} --default-character-set utf8mb4 -e 'select * from mysql.tidb;'; do
   200      i=$((i + 1))
   201      if [ "$i" -gt 60 ]; then
   202          echo 'Failed to start upstream TiDB'
   203          exit 2
   204      fi
   205      sleep 2
   206  done
   207  
   208  i=0
   209  while ! mysql -uroot -h${UP_TIDB_HOST} -P${UP_TIDB_OTHER_PORT} --default-character-set utf8mb4 -e 'select * from mysql.tidb;'; do
   210      i=$((i + 1))
   211      if [ "$i" -gt 60 ]; then
   212          echo 'Failed to start upstream TiDB'
   213          exit 2
   214      fi
   215      sleep 2
   216  done
   217  
   218  echo "Verifying Downstream TiDB is started..."
   219  i=0
   220  while ! mysql -uroot -h${DOWN_TIDB_HOST} -P${DOWN_TIDB_PORT} --default-character-set utf8mb4 -e 'select * from mysql.tidb;'; do
   221      i=$((i + 1))
   222      if [ "$i" -gt 60 ]; then
   223          echo 'Failed to start downstream TiDB'
   224          exit 1
   225      fi
   226      sleep 2
   227  done
   228  
   229  run_sql "update mysql.tidb set variable_value='60m' where variable_name='tikv_gc_life_time';" ${UP_TIDB_HOST} ${UP_TIDB_PORT}
   230  run_sql "update mysql.tidb set variable_value='60m' where variable_name='tikv_gc_life_time';" ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT}
   231  run_sql "CREATE user 'normal'@'%' identified by '123456';" ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT}
   232  run_sql "GRANT select,insert,update,delete,index,create,drop,alter,create view ON *.* TO 'normal'@'%';" ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT}
   233  run_sql "FLUSH privileges" ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT}
   234  
   235  cat - >"$OUT_DIR/tiflash-config.toml" <<EOF
   236  tmp_path = "${OUT_DIR}/tiflash/tmp"
   237  display_name = "TiFlash"
   238  default_profile = "default"
   239  users_config = "${OUT_DIR}/tiflash/users.toml"
   240  path = "${OUT_DIR}/tiflash/db"
   241  mark_cache_size = 5368709120
   242  listen_host = "127.0.0.1"
   243  tcp_port = 5000
   244  http_port = 4500
   245  interserver_http_port = 5500
   246  
   247  [flash]
   248  tidb_status_addr = "127.0.0.1:8500"
   249  service_addr = "127.0.0.1:9500"
   250  overlap_threshold = 0.6
   251  
   252  [flash.flash_cluster]
   253  master_ttl = 60
   254  refresh_interval = 20
   255  update_rule_interval = 5
   256  cluster_manager_path = "${CUR}/../../bin/flash_cluster_manager"
   257  
   258  [flash.proxy]
   259  addr = "127.0.0.1:9000"
   260  advertise-addr = "127.0.0.1:9000"
   261  data-dir = "${OUT_DIR}/tiflash/db/proxy"
   262  config = "${OUT_DIR}/tiflash-proxy.toml"
   263  log-file = "${OUT_DIR}/tiflash/log/proxy.log"
   264  
   265  [logger]
   266  level = "trace"
   267  log = "${OUT_DIR}/tiflash/log/server.log"
   268  errorlog = "${OUT_DIR}/tiflash/log/error.log"
   269  size = "4000M"
   270  count = 10
   271  
   272  [application]
   273  runAsDaemon = true
   274  
   275  [raft]
   276  kvstore_path = "${OUT_DIR}/tiflash/kvstore"
   277  pd_addr = "${UP_PD_HOST_1}:${UP_PD_PORT_1}"
   278  ignore_databases = "system,default"
   279  storage_engine = "dt"
   280  EOF
   281  
   282  cat - >"$OUT_DIR/tiflash-proxy.toml" <<EOF
   283  log-level = "info"
   284  
   285  [server]
   286  engine-addr = "127.0.0.1:9500"
   287  status-addr = "127.0.0.1:17000"
   288  
   289  [raftstore]
   290  sync-log = true
   291  capacity = "100GB"
   292  hibernate-regions = false
   293  
   294  [rocksdb]
   295  wal-dir = ""
   296  max-open-files = 1000
   297  
   298  [rocksdb.defaultcf]
   299  block-cache-size = "10GB"
   300  
   301  [rocksdb.lockcf]
   302  block-cache-size = "4GB"
   303  
   304  [rocksdb.writecf]
   305  block-cache-size = "4GB"
   306  
   307  [raftdb]
   308  max-open-files = 1000
   309  
   310  [raftdb.defaultcf]
   311  block-cache-size = "1GB"
   312  EOF
   313  
   314  echo "Starting Upstream TiFlash..."
   315  mkdir -p ${OUT_DIR}/tiflash/ && cp $CUR/tiflash-users.toml ${OUT_DIR}/tiflash/users.toml
   316  tiflash server --config-file "$OUT_DIR/tiflash-config.toml" &
   317  
   318  echo "Verifying Upstream TiFlash is started..."
   319  # Make sure TiFlash is started.
   320  while ! curl -o /dev/null -sf http://127.0.0.1:17000/metrics 1>/dev/null 2>&1; do
   321      i=$((i + 1))
   322      if [ "$i" -gt 10 ]; then
   323          cat ${OUT_DIR}/tiflash/log/proxy.log
   324          cat ${OUT_DIR}/tiflash/log/server.log
   325          cat ${OUT_DIR}/tiflash/log/error.log
   326          echo 'Failed to start TiFlash'
   327          exit 1
   328      fi
   329      sleep 2
   330  done
   331  
   332  echo "Starting CDC state checker..."
   333  cd $CUR/../../testing_utils/cdc_state_checker
   334  if [ ! -f ./cdc_state_checker ]; then
   335    GO111MODULE=on go build
   336  fi
   337  ./cdc_state_checker -pd ${UP_PD_HOST_1}:${UP_PD_PEER_PORT_1} > $OUT_DIR/cdc_etcd_check.log &
   338  cd $OUT_DIR