github.com/kubernetes-incubator/kube-aws@v0.16.4/e2e/run (about)

     1  #!/bin/bash
     2  
     3  KUBE_AWS_CMD=${KUBE_AWS_CMD:-./bin/kube-aws}
     4  E2E_DIR=$(cd $(dirname $0); pwd)
     5  WORK_DIR=${E2E_DIR}/assets/${KUBE_AWS_CLUSTER_NAME}
     6  TESTINFRA_DIR=${E2E_DIR}/testinfra
     7  KUBE_AWS_TEST_INFRA_STACK_NAME=${KUBE_AWS_TEST_INFRA_STACK_NAME:-${KUBE_AWS_CLUSTER_NAME}-testinfra}
     8  SRC_DIR=$(cd $(dirname $0); cd ..; pwd)
     9  KUBECONFIG=${WORK_DIR}/kubeconfig
    10  ETCD_COUNT=${ETCD_COUNT:-3}
    11  CONTROLLER_COUNT=${CONTROLLER_COUNT:-2}
    12  ETCD_VERSION=${ETCD_VERSION:-}
    13  
    14  export KUBECONFIG
    15  
    16  USAGE_EXAMPLE="KUBE_AWS_CLUSTER_NAME=kubeawstest1 KUBE_AWS_KEY_NAME=name/of/ec2/key KUBE_AWS_KMS_KEY_ARN=arn:aws:kms:us-west-1:<account id>:key/your-key KUBE_AWS_REGION=us-west-1 KUBE_AWS_AVAILABILITY_ZONE=us-west-1b ./$0 [init|configure|start|all]"
    17  
    18  if [ "${KUBE_AWS_CLUSTER_NAME}" == "" ]; then
    19    echo KUBE_AWS_CLUSTER_NAME is not set. Run this command like $USAGE_EXAMPLE 1>&2
    20    exit 1
    21  fi
    22  
    23  if [ "${KUBE_AWS_KEY_NAME}" == "" ]; then
    24    echo KUBE_AWS_KEY_NAME is not set. Run this command like $USAGE_EXAMPLE 1>&2
    25    exit 1
    26  fi
    27  
    28  if [ "${KUBE_AWS_REGION}" == "" ]; then
    29    echo KUBE_AWS_REGION is not set. Run this command like $USAGE_EXAMPLE 1>&2
    30    exit 1
    31  fi
    32  # set the AWS CLI default region to the region we are deploying to, otherwise a mismatch with the user env will cause all `aws` commands to fail
    33  export AWS_DEFAULT_REGION=${KUBE_AWS_REGION}
    34  
    35  if [ "${KUBE_AWS_AVAILABILITY_ZONE}" == "" ]; then
    36    echo KUBE_AWS_AVAILABILITY_ZONE is not set. Run this command like $USAGE_EXAMPLE 1>&2
    37    exit 1
    38  fi
    39  
    40  if [ ! -e "${KUBE_AWS_CMD}" ]; then
    41    echo ${KUBE_AWS_CMD} does not exist. 1>&2
    42    exit 1
    43  fi
    44  
    45  KUBE_AWS_VERSION=$($KUBE_AWS_CMD version)
    46  echo Using the kube-aws command at ${KUBE_AWS_CMD}"($KUBE_AWS_VERSION)". Set KUBE_AWS_CMD=path/to/kube-aws to override.
    47  
    48  EXTERNAL_DNS_NAME=${KUBE_AWS_CLUSTER_NAME}.${KUBE_AWS_DOMAIN}
    49  echo The kubernetes API would be accessible via ${EXTERNAL_DNS_NAME}
    50  
    51  KUBE_AWS_S3_URI=${KUBE_AWS_S3_DIR_URI}/${KUBE_AWS_CLUSTER_NAME}
    52  echo CloudFormation stack template would be uploaded to ${KUBE_AWS_S3_URI}
    53  
    54  build() {
    55    echo Building kube-aws
    56    cd ${SRC_DIR}
    57    ./build
    58  }
    59  
    60  main_stack_name() {
    61    echo $KUBE_AWS_CLUSTER_NAME
    62  }
    63  
    64  main_status() {
    65    aws cloudformation describe-stacks --stack-name $(main_stack_name) --output json | jq -rc '.Stacks[0].StackStatus'
    66  }
    67  
    68  main_all() {
    69    status=$(aws cloudformation describe-stacks --stack-name $(main_stack_name) --output json | jq -rc '.Stacks[0].StackStatus')
    70    if [ "$status" = "" ]; then
    71      if [ ! -e "${WORK_DIR}/cluster.yaml" ]; then
    72        init
    73      fi
    74      if ! [ -d "${WORK_DIR}/credentials" -a -d "${WORK_DIR}/userdata" -a -e "${WORK_DIR}/kubeconfig" -a -d "${WORK_DIR}/stack-templates" ]; then
    75        configure
    76      fi
    77      if [ "$(main_status)" = "" ]; then
    78        up
    79      fi
    80      if [ "$KUBE_AWS_UPDATE" != "" ]; then
    81        scale_out
    82      fi
    83    fi
    84  }
    85  
    86  init() {
    87    echo Creating or ensuring existence of the kube-aws assets directory ${WORK_DIR}
    88    mkdir -p ${WORK_DIR}
    89  
    90    cd ${WORK_DIR}
    91  
    92    ${KUBE_AWS_CMD} init \
    93      --cluster-name ${KUBE_AWS_CLUSTER_NAME} \
    94      --external-dns-name ${EXTERNAL_DNS_NAME} \
    95      --region ${KUBE_AWS_REGION} \
    96      --availability-zone ${KUBE_AWS_AVAILABILITY_ZONE} \
    97      --key-name ${KUBE_AWS_KEY_NAME} \
    98      --kms-key-arn ${KUBE_AWS_KMS_KEY_ARN} \
    99      --hosted-zone-id ${KUBE_AWS_HOSTED_ZONE_ID}
   100  
   101    if [ "${KUBE_AWS_USE_CALICO}" != "" ]; then
   102      echo 'useCalico: true' >> cluster.yaml
   103    fi
   104  
   105    customize_cluster_yaml
   106  }
   107  
   108  regenerate_stack() {
   109    cd ${WORK_DIR}
   110  
   111    ${KUBE_AWS_CMD} render stack
   112  }
   113  
   114  
   115  regenerate_credentials() {
   116    cd ${WORK_DIR}
   117  
   118    rm -rf ./credentials
   119  
   120    ${KUBE_AWS_CMD} render credentials --generate-ca
   121  }
   122  
   123  configure() {
   124    cd ${WORK_DIR}
   125  
   126    rm -rf ./kubeconfig ./credentials ./userdata ./stack-templates ./exported
   127  
   128    ${KUBE_AWS_CMD} render stack
   129    ${KUBE_AWS_CMD} render credentials --generate-ca
   130  
   131    if [ "${EXISTING_CA}" != "" ]; then
   132      mv ./credentials/ca-key.pem my-ca-key.pem
   133      mv ./credentials/ca.pem my-ca.pem
   134      rm -rf ./credentials
   135      ${KUBE_AWS_CMD} render credentials --ca-key-path=./my-ca-key.pem --ca-cert-path=./my-ca.pem
   136    fi
   137  
   138    validate
   139  
   140    ${KUBE_AWS_CMD} up --export --s3-uri ${KUBE_AWS_S3_URI} --pretty-print
   141  
   142    echo Generated configuration files in ${WORK_DIR}:
   143    find .
   144  }
   145  
   146  kube-aws() {
   147    mkdir -p ${WORK_DIR}
   148    cd ${WORK_DIR}
   149    ${KUBE_AWS_CMD} "$@"
   150  }
   151  
   152  validate() {
   153    cd ${WORK_DIR}
   154    ${KUBE_AWS_CMD} validate --s3-uri ${KUBE_AWS_S3_URI}
   155  }
   156  
   157  customize_cluster_yaml() {
   158    echo Writing to $(pwd)/cluster.yaml
   159  
   160    if [ "${KUBE_AWS_DEPLOY_TO_EXISTING_VPC}" != "" ]; then
   161      echo -e "vpc:\n  id: $(testinfra_vpc)" >> cluster.yaml
   162      echo -e "routeTableId: $(testinfra_public_routetable)" >> cluster.yaml
   163      echo -e "workerSecurityGroupIds:\n- $(testinfra_glue_sg)" >> cluster.yaml
   164    fi
   165  
   166    if [ "${LIMIT_SSH_ACCESS}" != "" ]; then
   167      ip=$(curl -s https://api.ipify.org)
   168      echo -e "sshAccessAllowedSourceCIDRs:\n- $ip/32" >> cluster.yaml
   169    fi
   170  
   171    echo -e "
   172  worker:
   173    nodePools:
   174    - name: asg1
   175      autoscaling:
   176        # this pool is an autoscaling-target of CA
   177        clusterAutoscaler:
   178          enabled: true
   179      # give this pool permissions to run CA
   180      clusterAutoscalerSupport:
   181        enabled: true
   182    - name: asg2
   183      autoScalingGroup:
   184        minSize: 0
   185        maxSize: 2
   186    - name: asg3
   187      count: 1
   188      waitSignal:
   189        enabled: true
   190      awsNodeLabels:
   191        enabled: true
   192      awsEnvironment:
   193        enabled: true
   194        environment:
   195          CFNSTACK: '{\"Ref\":\"AWS::StackId\"}'
   196      nodeDrainer:
   197        enabled: true
   198      nodeLabels:
   199        kube-aws.coreos.com/role: worker
   200        kube-aws.coreos.com/reservation-type: on-demand" >> cluster.yaml
   201  
   202    if [ "${KUBE_AWS_DEPLOY_TO_EXISTING_VPC}" != "" ]; then
   203      echo -e "
   204      securityGroupIds:
   205      - $(testinfra_glue_sg)
   206      loadBalancer:
   207        enabled: true
   208        names:
   209        - $(testinfra_public_elb)
   210        securityGroupIds:
   211        - $(testinfra_public_elb_backend_sg)
   212      targetGroup:
   213        enabled: true
   214        arns:
   215        - $(testinfra_target_group)
   216        securityGroupIds:
   217        - $(testinfra_public_alb_backend_sg)" >> cluster.yaml
   218    fi
   219  
   220    echo -e "
   221    - name: fleet1
   222      spotFleet:
   223        targetCapacity: 1
   224      clusterAutoscalerSupport:
   225        enabled: true
   226    - name: fleet2
   227      spotFleet:
   228        targetCapacity: 2
   229      awsNodeLabels:
   230        enabled: true
   231      awsEnvironment:
   232        enabled: true
   233        environment:
   234          CFNSTACK: '{\"Ref\":\"AWS::StackId\"}'
   235      autoscaling:
   236        clusterAutoscaler:
   237          enabled: true
   238      clusterAutoscalerSupport:
   239        enabled: true
   240      nodeDrainer:
   241        enabled: true
   242      nodeLabels:
   243        kube-aws.coreos.com/role: worker
   244        kube-aws.coreos.com/reservation-type: spot" >> cluster.yaml
   245  
   246    if [ "${KUBE_AWS_DEPLOY_TO_EXISTING_VPC}" != "" ]; then
   247      echo -e "
   248      securityGroupIds:
   249      - $(testinfra_glue_sg)
   250      loadBalancer:
   251        enabled: true
   252        names:
   253        - $(testinfra_public_elb)
   254        securityGroupIds:
   255        - $(testinfra_public_elb_backend_sg)
   256      targetGroup:
   257        enabled: true
   258        arns:
   259        - $(testinfra_target_group)
   260        securityGroupIds:
   261        - $(testinfra_public_alb_backend_sg)" >> cluster.yaml
   262    fi
   263  
   264    echo -e "
   265  # controller configuration
   266  controller:
   267    count: $CONTROLLER_COUNT
   268    nodeLabels:
   269      kube-aws.coreos.com/role: controller
   270  waitSignal:
   271    enabled: true
   272  kubeResourcesAutosave:
   273    enabled: true
   274  experimental:
   275    awsNodeLabels:
   276      enabled: true
   277    auditLog:
   278      enabled: true" >> cluster.yaml
   279  
   280    if [ "${ENCRYPTION_AT_REST}" != "" ]; then
   281        echo -e "
   282    encryptionAtRest:
   283      enabled: true" >> cluster.yaml
   284    fi
   285  
   286    echo -e "
   287  addons:
   288    clusterAutoscaler:
   289      enabled: true" >> cluster.yaml
   290  
   291    echo -e "
   292  cloudWatchLogging:
   293    enabled: true
   294  amazonSsmAgent:
   295    enabled: true
   296  # etcd configuration
   297  etcd:
   298    count: $ETCD_COUNT" >> cluster.yaml
   299  
   300    if [ "${ETCD_VERSION}" != "" ]; then
   301      echo -e "  version: ${ETCD_VERSION}" >> cluster.yaml
   302    fi
   303  
   304    if [ "${ETCD_DISASTER_RECOVERY_AUTOMATED}" != "" ]; then
   305      echo -e "  disasterRecovery:
   306      automated: true" >> cluster.yaml
   307    fi
   308  
   309  
   310    if [ "${ETCD_SNAPSHOT_AUTOMATED}" != "" ]; then
   311      echo -e "  snapshot:
   312      automated: true" >> cluster.yaml
   313    fi
   314  
   315    if [ "${ETCD_MEMBER_IDENTITY_PROVIDER}" != "" ]; then
   316      echo -e "  memberIdentityProvider: ${ETCD_MEMBER_IDENTITY_PROVIDER}" >> cluster.yaml
   317    fi
   318  
   319    if [ "${ETCD_INTERNAL_DOMAIN_NAME}" != "" ]; then
   320      echo -e "  internalDomainName: ${ETCD_INTERNAL_DOMAIN_NAME}" >> cluster.yaml
   321    fi
   322  }
   323  
   324  clean() {
   325    cd ${WORK_DIR}/..
   326    if [ -d "${KUBE_AWS_CLUSTER_NAME}" ]; then
   327      echo Removing the directory "${WORK_DIR}"
   328      rm -Rf ./${KUBE_AWS_CLUSTER_NAME}/*
   329    fi
   330  }
   331  
   332  up() {
   333    cd ${WORK_DIR}
   334  
   335    starttime=$(date +%s)
   336  
   337    ${KUBE_AWS_CMD} up --s3-uri ${KUBE_AWS_S3_URI} --pretty-print
   338  
   339    set +vx
   340  
   341    printf 'Waiting for the Kubernetes API to be accessible'
   342  
   343    while ! kubectl get no 2>/dev/null; do
   344      sleep 10
   345      printf '.'
   346    done
   347  
   348    endtime=$(date +%s)
   349  
   350    echo Done. it took $(($endtime - $starttime)) seconds until kubeapiserver to be in service.
   351  
   352    ${KUBE_AWS_CMD} status
   353  
   354    set -vx
   355  }
   356  
   357  status() {
   358    cd ${WORK_DIR}
   359    ${KUBE_AWS_CMD} status
   360  }
   361  
   362  main_destroy() {
   363    status=$(main_status)
   364  
   365    if [ "$status" != "" ]; then
   366      if [ "$status" != "DELETE_IN_PROGRESS" ]; then
   367          aws cloudformation delete-stack --stack-name $(main_stack_name)
   368          aws cloudformation wait stack-delete-complete --stack-name $(main_stack_name)
   369      else
   370          aws cloudformation wait stack-delete-complete --stack-name $(main_stack_name)
   371      fi
   372    else
   373      echo $(main_stack_name) does not exist. skipping.
   374    fi
   375  
   376    cd ${WORK_DIR}
   377    rm -Rf cluster.yaml user-data/ credentials/ stack-templates/
   378  }
   379  
   380  update() {
   381    cd ${WORK_DIR}
   382  
   383    ${KUBE_AWS_CMD} update --s3-uri ${KUBE_AWS_S3_URI} --pretty-print || true
   384    aws cloudformation wait stack-update-complete --stack-name ${KUBE_AWS_CLUSTER_NAME}
   385  }
   386  
   387  scale_out() {
   388    cd ${WORK_DIR}
   389  
   390    SED_CMD="sed -e 's/count: 1/count: 2/' -e 's/controllerCount: 2/controllerCount: 3/'"
   391    diff --unified cluster.yaml <(cat cluster.yaml | sh -c "${SED_CMD}") || true
   392    sh -c "${SED_CMD} -i bak cluster.yaml"
   393    ${KUBE_AWS_CMD} update --s3-uri ${KUBE_AWS_S3_URI}
   394    aws cloudformation wait stack-update-complete --stack-name ${KUBE_AWS_CLUSTER_NAME}
   395  
   396    printf 'Waiting for the Kubernetes API to be accessible'
   397    while ! kubectl get no 2>/dev/null; do
   398      sleep 10
   399      printf '.'
   400    done
   401    echo done
   402  }
   403  
   404  test-destruction() {
   405    aws cloudformation wait stack-delete-complete --stack-name ${KUBE_AWS_CLUSTER_NAME}
   406  }
   407  
   408  # Usage: DOCKER_REPO=quay.io/mumoshu/ SSH_PRIVATE_KEY=path/to/private/key ./e2e run conformance
   409  conformance() {
   410    cd ${E2E_DIR}/kubernetes
   411  
   412    if [ "$DOCKER_REPO" == "" ]; then
   413      echo DOCKER_REPO is not set.
   414      exit 1
   415    fi
   416  
   417    if [ "$SSH_PRIVATE_KEY" == "" ]; then
   418      echo SSH_PRIVATE_KEY is not set.
   419      exit 1
   420    fi
   421  
   422    if [ ! -f "$SSH_PRIVATE_KEY" ]; then
   423      echo ${SSH_PRIVATE_KEY} does not exist.
   424      exit 1
   425    fi
   426  
   427    echo Opening ingress on 4194 and 10250...
   428  
   429    # Authorize these ingresses for E2E testing or it'll end up failing like:
   430    #
   431    # Summarizing 2 Failures:
   432    #
   433    # [Fail] [k8s.io] Proxy version v1 [It] should proxy logs on node using proxy subresource [Conformance]
   434    # /go/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/proxy.go:325
   435    #
   436    # [Fail] [k8s.io] Proxy version v1 [It] should proxy logs on node with explicit kubelet port [Conformance]
   437    # /go/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/proxy.go:325
   438    #
   439    # Ran 117 of 473 Specs in 3529.785 seconds
   440    # FAIL! -- 115 Passed | 2 Failed | 0 Pending | 356 Skipped --- FAIL: TestE2E (3529.90s)
   441    #
   442    group_id=$(aws cloudformation --output json describe-stack-resources --stack-name $(controlplane_stack_name) | jq -r '.StackResources[] | select(.LogicalResourceId == "SecurityGroupController").PhysicalResourceId')
   443    aws ec2 authorize-security-group-ingress --group-id ${group_id} --protocol tcp --port 4194 --source-group ${group_id} || echo 'skipping authorization for 4194'
   444    aws ec2 authorize-security-group-ingress --group-id ${group_id} --protocol tcp --port 10250 --source-group ${group_id} || echo 'skipping authorization for 10250'
   445  
   446    master_host=$(controller_host)
   447  
   448    echo Connecting to $master_host via SSH
   449  
   450    KUBE_AWS_ASSETS=${WORK_DIR} MASTER_HOST=$master_host make run-remotely
   451  }
   452  
   453  conformance_result() {
   454    cd ${E2E_DIR}/kubernetes
   455  
   456    master_host=$(controller_host)
   457  
   458    echo Connecting to $master_host via SSH
   459  
   460    KUBE_AWS_ASSETS=${WORK_DIR} MASTER_HOST=$master_host make show-log
   461  }
   462  
   463  controlplane_stack_name() {
   464    aws cloudformation --output json describe-stacks --stack-name ${KUBE_AWS_CLUSTER_NAME} | jq -r '.Stacks[].Outputs[] | select (.OutputKey == "ControlPlaneStackName").OutputValue'
   465  }
   466  
   467  first_host_for_asg() {
   468    controlplane_stack_name=$(controlplane_stack_name)
   469    aws ec2 describe-instances --output json --query "Reservations[].Instances[]
   470        | [?Tags[?Key==\`aws:cloudformation:stack-name\`].Value|[0]==\`${controlplane_stack_name}\`]
   471        | [?Tags[?Key==\`aws:cloudformation:logical-id\`].Value|[0]==\`${1}\`][]
   472        | [?State.Name==\`running\`][]
   473        | []" | jq -r 'map({InstanceId: .InstanceId, PublicIpAddress: .PublicIpAddress}) | first | .PublicIpAddress'
   474  }
   475  
   476  controller_host() {
   477    first_host_for_asg Controllers
   478  }
   479  
   480  worker_host() {
   481    first_host_for_asg Workers
   482  }
   483  
   484  ssh_controller() {
   485    ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${KUBE_AWS_SSH_KEY} core@$(controller_host) "$@"
   486  }
   487  
   488  ssh_worker() {
   489    ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${KUBE_AWS_SSH_KEY} core@$(worker_host) "$@"
   490  }
   491  
   492  testinfra_up() {
   493    cd ${TESTINFRA_DIR}
   494  
   495    aws cloudformation create-stack \
   496      --template-body file://$(pwd)/stack-template.yaml \
   497      --stack-name ${KUBE_AWS_TEST_INFRA_STACK_NAME} \
   498      --parameter ParameterKey=AZ1,ParameterValue=${KUBE_AWS_AZ_1} ParameterKey=Id,ParameterValue=${KUBE_AWS_CLUSTER_NAME}-infra
   499    aws cloudformation wait stack-create-complete \
   500      --stack-name ${KUBE_AWS_TEST_INFRA_STACK_NAME}
   501  }
   502  
   503  testinfra_update() {
   504    cd ${TESTINFRA_DIR}
   505  
   506    aws cloudformation update-stack \
   507      --template-body file://$(pwd)/stack-template.yaml \
   508      --stack-name ${KUBE_AWS_TEST_INFRA_STACK_NAME} \
   509      --parameter ParameterKey=AZ1,ParameterValue=${KUBE_AWS_AZ_1}
   510    aws cloudformation wait stack-update-complete \
   511      --stack-name ${KUBE_AWS_TEST_INFRA_STACK_NAME}
   512  }
   513  
   514  testinfra_destroy() {
   515    aws cloudformation delete-stack --stack-name ${KUBE_AWS_TEST_INFRA_STACK_NAME}
   516    aws cloudformation wait stack-delete-complete \
   517      --stack-name ${KUBE_AWS_TEST_INFRA_STACK_NAME}
   518  }
   519  
   520  testinfra_output() {
   521    aws cloudformation describe-stacks --stack-name ${KUBE_AWS_TEST_INFRA_STACK_NAME} | jq -r '.Stacks[0].Outputs[] | select(.OutputKey == "'$1'").OutputValue'
   522  }
   523  
   524  testinfra_vpc() {
   525    testinfra_output VPC
   526  }
   527  
   528  testinfra_public_routetable() {
   529    testinfra_output PublicRouteTable
   530  }
   531  
   532  testinfra_public_elb_backend_sg() {
   533    testinfra_output PublicELBBackendSG
   534  }
   535  
   536  testinfra_public_alb_backend_sg() {
   537    testinfra_output PublicALBBackendSG
   538  }
   539  
   540  testinfra_public_elb() {
   541    testinfra_output PublicELB
   542  }
   543  
   544  testinfra_target_group() {
   545    testinfra_output TargetGroup
   546  }
   547  
   548  testinfra_glue_sg() {
   549    testinfra_output GlueSG
   550  }
   551  
   552  all() {
   553    build
   554  
   555    if [ "${KUBE_AWS_DEPLOY_TO_EXISTING_VPC}" != "" ]; then
   556      testinfra_up
   557    fi
   558  
   559    main_all
   560    conformance
   561  }
   562  
   563  rerun() {
   564    all_destroy
   565    all
   566  }
   567  
   568  all_destroy() {
   569    main_destroy
   570    if [ "${KUBE_AWS_DEPLOY_TO_EXISTING_VPC}" != "" ]; then
   571      testinfra_destroy
   572    fi
   573  }
   574  
   575  kubesys() {
   576    kubectl --namespace kube-system "$@"
   577  }
   578  
   579  if [ "$1" == "" ]; then
   580    echo Usage: $USAGE_EXAMPLE
   581    exit 1
   582  fi
   583  
   584  set -vxe
   585  
   586  "$@"