k8s.io/kubernetes@v1.29.3/test/cmd/core.sh (about)

     1  #!/usr/bin/env bash
     2  
     3  # Copyright 2018 The Kubernetes Authors.
     4  #
     5  # Licensed under the Apache License, Version 2.0 (the "License");
     6  # you may not use this file except in compliance with the License.
     7  # You may obtain a copy of the License at
     8  #
     9  #     http://www.apache.org/licenses/LICENSE-2.0
    10  #
    11  # Unless required by applicable law or agreed to in writing, software
    12  # distributed under the License is distributed on an "AS IS" BASIS,
    13  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14  # See the License for the specific language governing permissions and
    15  # limitations under the License.
    16  
    17  set -o errexit
    18  set -o nounset
    19  set -o pipefail
    20  
    21  run_configmap_tests() {
    22    set -o nounset
    23    set -o errexit
    24  
    25    create_and_use_new_namespace
    26    kube::log::status "Testing configmaps"
    27    kubectl create -f test/fixtures/doc-yaml/user-guide/configmap/configmap.yaml
    28    kube::test::get_object_assert 'configmap/test-configmap' "{{${id_field:?}}}" 'test-configmap'
    29    kubectl delete configmap test-configmap "${kube_flags[@]:?}"
    30  
    31    ### Create a new namespace
    32    # Pre-condition: the test-configmaps namespace does not exist
    33    kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq $id_field \"test-configmaps\" }}found{{end}}{{end}}:" ':'
    34    # Command
    35    kubectl create namespace test-configmaps
    36    # Post-condition: namespace 'test-configmaps' is created.
    37    kube::test::get_object_assert 'namespaces/test-configmaps' "{{$id_field}}" 'test-configmaps'
    38  
    39    ### Create a generic configmap in a specific namespace
    40    # Pre-condition: configmap test-configmap and test-binary-configmap does not exist
    41    kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \"test-configmap\" }}found{{end}}{{end}}:" ':'
    42    kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \"test-binary-configmap\" }}found{{end}}{{end}}:" ':'
    43    # Dry-run command
    44    kubectl create configmap test-configmap --dry-run=client --from-literal=key1=value1 --namespace=test-configmaps
    45    kubectl create configmap test-configmap --dry-run=server --from-literal=key1=value1 --namespace=test-configmaps
    46    kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \"test-configmap\" }}found{{end}}{{end}}:" ':'
    47    # Command
    48    kubectl create configmap test-configmap --from-literal=key1=value1 --namespace=test-configmaps
    49    kubectl create configmap test-binary-configmap --from-file <( head -c 256 /dev/urandom ) --namespace=test-configmaps
    50    # Post-condition: configmap exists and has expected values
    51    kube::test::get_object_assert 'configmap/test-configmap --namespace=test-configmaps' "{{$id_field}}" 'test-configmap'
    52    kube::test::get_object_assert 'configmap/test-binary-configmap --namespace=test-configmaps' "{{$id_field}}" 'test-binary-configmap'
    53    grep -q "key1: value1" <<< "$(kubectl get configmap/test-configmap --namespace=test-configmaps -o yaml "${kube_flags[@]}")"
    54    grep -q "binaryData" <<< "$(kubectl get configmap/test-binary-configmap --namespace=test-configmaps -o yaml "${kube_flags[@]}")"
    55    # Describe command should respect the chunk size parameter
    56    kube::test::describe_resource_chunk_size_assert configmaps events "--namespace=test-configmaps"
    57    # Clean-up
    58    kubectl delete configmap test-configmap --namespace=test-configmaps
    59    kubectl delete configmap test-binary-configmap --namespace=test-configmaps
    60    kubectl delete namespace test-configmaps
    61  
    62    set +o nounset
    63    set +o errexit
    64  }
    65  
    66  # Runs all pod related tests.
    67  run_pod_tests() {
    68    set -o nounset
    69    set -o errexit
    70  
    71    kube::log::status "Testing kubectl(v1:pods)"
    72  
    73    ### Create POD valid-pod from JSON
    74    # Pre-condition: no POD exists
    75    create_and_use_new_namespace
    76    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
    77    # Command
    78    kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
    79    # Post-condition: valid-pod POD is created
    80    kubectl get "${kube_flags[@]}" pods -o json
    81    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
    82    kube::test::get_object_assert 'pod valid-pod' "{{$id_field}}" 'valid-pod'
    83    kube::test::get_object_assert 'pod/valid-pod' "{{$id_field}}" 'valid-pod'
    84    kube::test::get_object_assert 'pods/valid-pod' "{{$id_field}}" 'valid-pod'
    85    # pod has field manager for kubectl create
    86    output_message=$(kubectl get --show-managed-fields -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
    87    kube::test::if_has_string "${output_message}" 'kubectl-create'
    88    # Repeat above test using jsonpath template
    89    kube::test::get_object_jsonpath_assert pods "{.items[*]$id_field}" 'valid-pod'
    90    kube::test::get_object_jsonpath_assert 'pod valid-pod' "{$id_field}" 'valid-pod'
    91    kube::test::get_object_jsonpath_assert 'pod/valid-pod' "{$id_field}" 'valid-pod'
    92    kube::test::get_object_jsonpath_assert 'pods/valid-pod' "{$id_field}" 'valid-pod'
    93    # Describe command should print detailed information
    94    kube::test::describe_object_assert pods 'valid-pod' "Name:" "Image:" "Node:" "Labels:" "Status:"
    95    # Describe command should print events information by default
    96    kube::test::describe_object_events_assert pods 'valid-pod'
    97    # Describe command should not print events information when show-events=false
    98    kube::test::describe_object_events_assert pods 'valid-pod' false
    99    # Describe command should print events information when show-events=true
   100    kube::test::describe_object_events_assert pods 'valid-pod' true
   101    # Describe command (resource only) should print detailed information
   102    kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:"
   103  
   104    # Describe command should print events information by default
   105    kube::test::describe_resource_events_assert pods
   106    # Describe command should not print events information when show-events=false
   107    kube::test::describe_resource_events_assert pods false
   108    # Describe command should print events information when show-events=true
   109    kube::test::describe_resource_events_assert pods true
   110    # Describe command should respect the chunk size parameter
   111    kube::test::describe_resource_chunk_size_assert pods events
   112  
   113    ### Dump current valid-pod POD
   114    output_pod=$(kubectl get pod valid-pod -o yaml "${kube_flags[@]}")
   115  
   116    ### Delete POD valid-pod by id
   117    # Pre-condition: valid-pod POD exists
   118    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
   119    # Command
   120    kubectl delete pod valid-pod "${kube_flags[@]}" --grace-period=0 --force
   121    # Post-condition: valid-pod POD doesn't exist
   122    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
   123  
   124    ### Delete POD valid-pod by id with --now
   125    # Pre-condition: valid-pod POD exists
   126    kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
   127    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
   128    # Command
   129    kubectl delete pod valid-pod "${kube_flags[@]}" --now
   130    # Post-condition: valid-pod POD doesn't exist
   131    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
   132  
   133    ### Delete POD valid-pod by id with --grace-period=0
   134    # Pre-condition: valid-pod POD exists
   135    kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
   136    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
   137    # Command succeeds without --force by waiting
   138    kubectl delete pod valid-pod "${kube_flags[@]}" --grace-period=0
   139    # Post-condition: valid-pod POD doesn't exist
   140    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
   141  
   142    ### Create POD valid-pod from dumped YAML
   143    # Pre-condition: no POD exists
   144    create_and_use_new_namespace
   145    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
   146    # Command
   147    echo "${output_pod}" | ${SED} '/namespace:/d' | kubectl create -f - "${kube_flags[@]}"
   148    # Post-condition: valid-pod POD is created
   149    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
   150  
   151    ### Delete POD valid-pod from JSON
   152    # Pre-condition: valid-pod POD exists
   153    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
   154    # Command
   155    kubectl delete -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" --grace-period=0 --force
   156    # Post-condition: valid-pod POD doesn't exist
   157    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
   158  
   159    ### Create POD valid-pod from JSON
   160    # Pre-condition: no POD exists
   161    create_and_use_new_namespace
   162    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
   163    # Command
   164    kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
   165    # Post-condition: valid-pod POD is created
   166    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" "valid-pod:"
   167  
   168    ### Delete POD valid-pod with label
   169    # Pre-condition: valid-pod POD exists
   170    kube::test::get_object_assert "pods -lname=valid-pod" "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
   171    # Command
   172    kubectl delete pods -lname=valid-pod "${kube_flags[@]}" --grace-period=0 --force
   173    # Post-condition: valid-pod POD doesn't exist
   174    kube::test::get_object_assert "pods -lname=valid-pod" "{{range.items}}{{$id_field}}:{{end}}" ''
   175  
   176    ### Create POD valid-pod from YAML
   177    # Pre-condition: no POD exists
   178    create_and_use_new_namespace
   179    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
   180    # Command
   181    kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
   182    # Post-condition: valid-pod POD is created
   183    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
   184    # Command
   185    output_message=$(kubectl get pods --field-selector metadata.name=valid-pod "${kube_flags[@]}")
   186    kube::test::if_has_string "${output_message}" "valid-pod"
   187    # Command
   188    phase=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .status.phase }}')
   189    output_message=$(kubectl get pods --field-selector status.phase="${phase}" "${kube_flags[@]}")
   190    kube::test::if_has_string "${output_message}" "valid-pod"
   191  
   192    ### Delete PODs with no parameter mustn't kill everything
   193    # Pre-condition: valid-pod POD exists
   194    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
   195    # Command
   196    ! kubectl delete pods "${kube_flags[@]}" || exit 1
   197    # Post-condition: valid-pod POD exists
   198    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
   199  
   200    ### Delete PODs with --all and a label selector is not permitted
   201    # Pre-condition: valid-pod POD exists
   202    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
   203    # Command
   204    ! kubectl delete --all pods -lname=valid-pod "${kube_flags[@]}" || exit 1
   205    # Post-condition: valid-pod POD exists
   206    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
   207  
   208    ### Delete all PODs
   209    # Pre-condition: valid-pod POD exists
   210    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
   211    # Command
   212    kubectl delete --all pods "${kube_flags[@]}" --grace-period=0 --force # --all remove all the pods
   213    # Post-condition: no POD exists
   214    kube::test::get_object_assert "pods -lname=valid-pod" "{{range.items}}{{$id_field}}:{{end}}" ''
   215  
   216    # Detailed tests for describe pod output
   217      ### Create a new namespace
   218    # Pre-condition: the test-secrets namespace does not exist
   219    kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq $id_field \"test-kubectl-describe-pod\" }}found{{end}}{{end}}:" ':'
   220    # Command
   221    kubectl create namespace test-kubectl-describe-pod
   222    # Post-condition: namespace 'test-secrets' is created.
   223    kube::test::get_object_assert 'namespaces/test-kubectl-describe-pod' "{{$id_field}}" 'test-kubectl-describe-pod'
   224  
   225    ### Create a generic secret
   226    # Pre-condition: no SECRET exists
   227    kube::test::get_object_assert 'secrets --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
   228    # Dry-run command
   229    kubectl create secret generic test-secret --dry-run=client --from-literal=key-1=value1 --type=test-type --namespace=test-kubectl-describe-pod
   230    kubectl create secret generic test-secret --dry-run=server --from-literal=key-1=value1 --type=test-type --namespace=test-kubectl-describe-pod
   231    kube::test::get_object_assert 'secrets --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
   232    # Command
   233    kubectl create secret generic test-secret --from-literal=key-1=value1 --type=test-type --namespace=test-kubectl-describe-pod
   234    # Post-condition: secret exists and has expected values
   235    kube::test::get_object_assert 'secret/test-secret --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-secret'
   236    kube::test::get_object_assert 'secret/test-secret --namespace=test-kubectl-describe-pod' "{{${secret_type:?}}}" 'test-type'
   237  
   238    ### Create a generic configmap
   239    # Pre-condition: CONFIGMAP test-configmap does not exist
   240    #kube::test::get_object_assert 'configmap/test-configmap --namespace=test-kubectl-describe-pod' "{{$id_field}}" ''
   241    kube::test::get_object_assert 'configmaps --namespace=test-kubectl-describe-pod' "{{range.items}}{{ if eq $id_field \"test-configmap\" }}found{{end}}{{end}}:" ':'
   242  
   243    #kube::test::get_object_assert 'configmaps --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
   244    # Command
   245    kubectl create configmap test-configmap --from-literal=key-2=value2 --namespace=test-kubectl-describe-pod
   246    # Post-condition: configmap exists and has expected values
   247    kube::test::get_object_assert 'configmap/test-configmap --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-configmap'
   248  
   249    ### Create a pod disruption budget with minAvailable
   250    # Pre-condition: pdb does not exist
   251    kube::test::get_object_assert 'pdb --namespace=test-kubectl-describe-pod' "{{range.items}}{{ if eq $id_field \"test-pdb-1\" }}found{{end}}{{end}}:" ':'
   252    # Dry-run command
   253    kubectl create pdb test-pdb-1 --dry-run=client --selector=app=rails --min-available=2 --namespace=test-kubectl-describe-pod
   254    kubectl create pdb test-pdb-1 --dry-run=server --selector=app=rails --min-available=2 --namespace=test-kubectl-describe-pod
   255    kube::test::get_object_assert 'pdb --namespace=test-kubectl-describe-pod' "{{range.items}}{{ if eq $id_field \"test-pdb-1\" }}found{{end}}{{end}}:" ':'
   256    # Command
   257    kubectl create pdb test-pdb-1 --selector=app=rails --min-available=2 --namespace=test-kubectl-describe-pod
   258    # Post-condition: pdb exists and has expected values
   259    kube::test::get_object_assert 'pdb/test-pdb-1 --namespace=test-kubectl-describe-pod' "{{${pdb_min_available:?}}}" '2'
   260    # Command
   261    kubectl create pdb test-pdb-2 --selector=app=rails --min-available=50% --namespace=test-kubectl-describe-pod
   262    # Post-condition: pdb exists and has expected values
   263    kube::test::get_object_assert 'pdb/test-pdb-2 --namespace=test-kubectl-describe-pod' "{{$pdb_min_available}}" '50%'
   264    # Describe command should respect the chunk size parameter
   265    kube::test::describe_resource_chunk_size_assert poddisruptionbudgets events "--namespace=test-kubectl-describe-pod"
   266  
   267    ### Create a pod disruption budget with maxUnavailable
   268    # Command
   269    kubectl create pdb test-pdb-3 --selector=app=rails --max-unavailable=2 --namespace=test-kubectl-describe-pod
   270    # Post-condition: pdb exists and has expected values
   271    kube::test::get_object_assert 'pdb/test-pdb-3 --namespace=test-kubectl-describe-pod' "{{${pdb_max_unavailable:?}}}" '2'
   272    # Command
   273    kubectl create pdb test-pdb-4 --selector=app=rails --max-unavailable=50% --namespace=test-kubectl-describe-pod
   274    # Post-condition: pdb exists and has expected values
   275    kube::test::get_object_assert 'pdb/test-pdb-4 --namespace=test-kubectl-describe-pod' "{{$pdb_max_unavailable}}" '50%'
   276  
   277    ### Fail creating a pod disruption budget if both maxUnavailable and minAvailable specified
   278    ! kubectl create pdb test-pdb --selector=app=rails --min-available=2 --max-unavailable=3 --namespace=test-kubectl-describe-pod || exit 1
   279  
   280    # Create a pod that consumes secret, configmap, and downward API keys as envs
   281    kube::test::get_object_assert 'pods --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
   282    kubectl create -f hack/testdata/pod-with-api-env.yaml --namespace=test-kubectl-describe-pod
   283  
   284    kube::test::describe_object_assert 'pods --namespace=test-kubectl-describe-pod' 'env-test-pod' "TEST_CMD_1" "<set to the key 'key-1' in secret 'test-secret'>" "TEST_CMD_2" "<set to the key 'key-2' of config map 'test-configmap'>" "TEST_CMD_3" "env-test-pod (v1:metadata.name)"
   285    # Describe command (resource only) should print detailed information about environment variables
   286    kube::test::describe_resource_assert 'pods --namespace=test-kubectl-describe-pod' "TEST_CMD_1" "<set to the key 'key-1' in secret 'test-secret'>" "TEST_CMD_2" "<set to the key 'key-2' of config map 'test-configmap'>" "TEST_CMD_3" "env-test-pod (v1:metadata.name)"
   287  
   288    # Clean-up
   289    kubectl delete pod env-test-pod --namespace=test-kubectl-describe-pod
   290    kubectl delete secret test-secret --namespace=test-kubectl-describe-pod
   291    kubectl delete configmap test-configmap --namespace=test-kubectl-describe-pod
   292    kubectl delete pdb/test-pdb-1 pdb/test-pdb-2 pdb/test-pdb-3 pdb/test-pdb-4 --namespace=test-kubectl-describe-pod
   293    kubectl delete namespace test-kubectl-describe-pod
   294  
   295    ### Priority Class
   296    kube::test::get_object_assert 'priorityclasses' "{{range.items}}{{ if eq $id_field \"test-priorityclass\" }}found{{end}}{{end}}:" ':'
   297    # Dry-run command
   298    kubectl create priorityclass test-priorityclass --dry-run=client
   299    kubectl create priorityclass test-priorityclass --dry-run=server
   300    kube::test::get_object_assert 'priorityclasses' "{{range.items}}{{ if eq $id_field \"test-priorityclass\" }}found{{end}}{{end}}:" ':'
   301    # Command
   302    kubectl create priorityclass test-priorityclass
   303    kube::test::get_object_assert 'priorityclasses' "{{range.items}}{{ if eq $id_field \"test-priorityclass\" }}found{{end}}{{end}}:" 'found:'
   304    # Describe command should respect the chunk size parameter
   305    kube::test::describe_resource_chunk_size_assert priorityclasses events
   306    kubectl delete priorityclass test-priorityclass
   307  
   308    ### Create two PODs
   309    # Pre-condition: no POD exists
   310    create_and_use_new_namespace
   311    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
   312    # Command
   313    kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
   314    kubectl create -f test/e2e/testing-manifests/kubectl/agnhost-primary-pod.yaml "${kube_flags[@]}"
   315    # Post-condition: valid-pod and agnhost-primary PODs are created
   316    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'agnhost-primary:valid-pod:'
   317  
   318    ### Delete multiple PODs at once
   319    # Pre-condition: valid-pod and agnhost-primary PODs exist
   320    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'agnhost-primary:valid-pod:'
   321    # Command
   322    kubectl delete pods valid-pod agnhost-primary "${kube_flags[@]}" --grace-period=0 --force # delete multiple pods at once
   323    # Post-condition: no POD exists
   324    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
   325  
   326    ### Create valid-pod POD
   327    # Pre-condition: no POD exists
   328    create_and_use_new_namespace
   329    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
   330    # Command
   331    kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
   332    # Post-condition: valid-pod POD is created
   333    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
   334  
   335    ### Dry-run label the valid-pod POD
   336    # Pre-condition: valid-pod is not labelled
   337    kube::test::get_object_assert 'pod valid-pod' "{{range${labels_field:?}}}{{.}}:{{end}}" 'valid-pod:'
   338    # Command
   339    kubectl label pods valid-pod new-name=new-valid-pod --dry-run=client "${kube_flags[@]}"
   340    kubectl label pods valid-pod new-name=new-valid-pod --dry-run=server "${kube_flags[@]}"
   341    # Post-condition: valid-pod is not labelled
   342    kube::test::get_object_assert 'pod valid-pod' "{{range${labels_field:?}}}{{.}}:{{end}}" 'valid-pod:'
   343  
   344    ### Label the valid-pod POD
   345    # Pre-condition: valid-pod is not labelled
   346    kube::test::get_object_assert 'pod valid-pod' "{{range${labels_field:?}}}{{.}}:{{end}}" 'valid-pod:'
   347    # Command
   348    kubectl label pods valid-pod new-name=new-valid-pod "${kube_flags[@]}"
   349    # Post-condition: valid-pod is labelled
   350    kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:'
   351  
   352    ### Label the valid-pod POD with empty label value
   353    # Pre-condition: valid-pod does not have label "emptylabel"
   354    kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:'
   355    # Command
   356    kubectl label pods valid-pod emptylabel="" "${kube_flags[@]}"
   357    # Post-condition: valid pod contains "emptylabel" with no value
   358    kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.emptylabel}}" ''
   359  
   360    ### Dry-run annotate the valid-pod POD with empty annotation value
   361    # Pre-condition: valid-pod does not have annotation "emptyannotation"
   362    kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field:?}.emptyannotation}}" '<no value>'
   363    # Command
   364    kubectl annotate pods valid-pod emptyannotation="" --dry-run=client "${kube_flags[@]}"
   365    kubectl annotate pods valid-pod emptyannotation="" --dry-run=server "${kube_flags[@]}"
   366    # Post-condition: valid-pod does not have annotation "emptyannotation"
   367    kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field:?}.emptyannotation}}" '<no value>'
   368  
   369    ### Annotate the valid-pod POD with empty annotation value
   370    # Pre-condition: valid-pod does not have annotation "emptyannotation"
   371    kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field:?}.emptyannotation}}" '<no value>'
   372    # Command
   373    kubectl annotate pods valid-pod emptyannotation="" "${kube_flags[@]}"
   374    # Post-condition: valid pod contains "emptyannotation" with no value
   375    kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field}.emptyannotation}}" ''
   376    # pod has field for kubectl annotate field manager
   377    output_message=$(kubectl get pod valid-pod -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
   378    kube::test::if_has_string "${output_message}" 'kubectl-annotate'
   379  
   380    ### Record label change
   381    # Pre-condition: valid-pod does not have record annotation
   382    kube::test::get_object_assert 'pod valid-pod' "{{range.items}}{{$annotations_field}}:{{end}}" ''
   383    # Command
   384    kubectl label pods valid-pod record-change=true --record=true "${kube_flags[@]}"
   385    # Post-condition: valid-pod has record annotation
   386    kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*--record=true.*"
   387    # pod has field for kubectl label field manager
   388    output_message=$(kubectl get pod valid-pod -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
   389    kube::test::if_has_string "${output_message}" 'kubectl-label'
   390  
   391    ### Do not record label change
   392    # Command
   393    kubectl label pods valid-pod no-record-change=true --record=false "${kube_flags[@]}"
   394    # Post-condition: valid-pod's record annotation still contains command with --record=true
   395    kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*--record=true.*"
   396  
   397    ### Record label change with specified flag and previous change already recorded
   398    ### we are no longer tricked by data from another user into revealing more information about our client
   399    # Command
   400    kubectl label pods valid-pod new-record-change=true --record=true "${kube_flags[@]}"
   401    # Post-condition: valid-pod's record annotation contains new change
   402    kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*new-record-change=true.*"
   403  
   404  
   405    ### Delete POD by label
   406    # Pre-condition: valid-pod POD exists
   407    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
   408    # Command
   409    kubectl delete pods -lnew-name=new-valid-pod --grace-period=0 --force "${kube_flags[@]}"
   410    # Post-condition: valid-pod POD doesn't exist
   411    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
   412  
   413    ### Create pod-with-precision POD
   414    # Pre-condition: no POD is running
   415    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
   416    # Command
   417    kubectl create -f hack/testdata/pod-with-precision.json "${kube_flags[@]}"
   418    # Post-condition: valid-pod POD is running
   419    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'pod-with-precision:'
   420  
   421    ## Patch preserves precision
   422    # Command
   423    kubectl patch "${kube_flags[@]}" pod pod-with-precision -p='{"metadata":{"annotations":{"patchkey": "patchvalue"}}}'
   424    # Post-condition: pod-with-precision POD has patched annotation
   425    kube::test::get_object_assert 'pod pod-with-precision' "{{${annotations_field}.patchkey}}" 'patchvalue'
   426    # Command
   427    kubectl label pods pod-with-precision labelkey=labelvalue "${kube_flags[@]}"
   428    # Post-condition: pod-with-precision POD has label
   429    kube::test::get_object_assert 'pod pod-with-precision' "{{${labels_field}.labelkey}}" 'labelvalue'
   430    # Command
   431    kubectl annotate pods pod-with-precision annotatekey=annotatevalue "${kube_flags[@]}"
   432    # Post-condition: pod-with-precision POD has annotation
   433    kube::test::get_object_assert 'pod pod-with-precision' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
   434    # Cleanup
   435    kubectl delete pod pod-with-precision "${kube_flags[@]}"
   436  
   437    ### Annotate POD YAML file locally without effecting the live pod.
   438    kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
   439    # Command
   440    kubectl annotate -f hack/testdata/pod.yaml annotatekey=annotatevalue "${kube_flags[@]}"
   441  
   442    # Pre-condition: annotationkey is annotationvalue
   443    kube::test::get_object_assert 'pod test-pod' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
   444  
   445    # Command
   446    output_message=$(kubectl annotate --local -f hack/testdata/pod.yaml annotatekey=localvalue -o yaml "${kube_flags[@]}")
   447    echo "$output_message"
   448  
   449    # Post-condition: annotationkey is still annotationvalue in the live pod, but command output is the new value
   450    kube::test::get_object_assert 'pod test-pod' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
   451    kube::test::if_has_string "${output_message}" "localvalue"
   452  
   453    # Cleanup
   454    kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
   455  
   456    ### Create valid-pod POD
   457    # Pre-condition: no services and no rcs exist
   458    kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" ''
   459    kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
   460    ## kubectl create --edit can update the label filed of multiple resources. tmp-editor.sh is a fake editor
   461    TEMP=$(mktemp /tmp/tmp-editor-XXXXXXXX.sh)
   462    echo -e "#!/usr/bin/env bash\n${SED} -i \"s/mock/modified/g\" \$1" > "${TEMP}"
   463    chmod +x "${TEMP}"
   464    # Command
   465    EDITOR=${TEMP} kubectl create --edit -f hack/testdata/multi-resource-json.json "${kube_flags[@]}"
   466    # Post-condition: service named modified and rc named modified are created
   467    kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
   468    kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
   469    # resources have field manager for kubectl create
   470    output_message=$(kubectl get service/modified -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
   471    kube::test::if_has_string "${output_message}" 'kubectl-create'
   472    output_message=$(kubectl get rc/modified -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
   473    kube::test::if_has_string "${output_message}" 'kubectl-create'
   474    # Clean up
   475    kubectl delete service/modified "${kube_flags[@]}"
   476    kubectl delete rc/modified "${kube_flags[@]}"
   477  
   478    # Pre-condition: no services and no rcs exist
   479    kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" ''
   480    kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
   481    # Command
   482    EDITOR=${TEMP} kubectl create --edit -f hack/testdata/multi-resource-list.json "${kube_flags[@]}"
   483    # Post-condition: service named modified and rc named modified are created
   484    kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
   485    kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
   486    # Clean up
   487    rm "${TEMP}"
   488    kubectl delete service/modified "${kube_flags[@]}"
   489    kubectl delete rc/modified "${kube_flags[@]}"
   490  
   491    ## kubectl create --edit won't create anything if user makes no changes
   492    grep -q 'Edit cancelled' <<< "$(EDITOR="cat" kubectl create --edit -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -o json 2>&1)"
   493  
   494    ## Create valid-pod POD
   495    # Pre-condition: no POD exists
   496    kube::test::wait_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
   497    # Command
   498    kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
   499    # Post-condition: valid-pod POD is created
   500    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
   501  
   502    ## Patch can modify a local object
   503    kubectl patch --local -f test/fixtures/pkg/kubectl/cmd/patch/validPod.yaml --patch='{"spec": {"restartPolicy":"Never"}}' -o yaml | grep -q "Never"
   504  
   505    ## Patch fails with type restore error and exit code 1
   506    output_message=$(! kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"metadata":{"labels":"invalid"}}' 2>&1)
   507    kube::test::if_has_string "${output_message}" 'cannot restore map from string'
   508  
   509    ## Patch exits with error message "patched (no change)" and exit code 0 when no-op occurs
   510    output_message=$(kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"metadata":{"labels":{"name":"valid-pod"}}}' 2>&1)
   511    kube::test::if_has_string "${output_message}" 'patched (no change)'
   512  
   513    ## Patch pod can change image
   514    # Command
   515    kubectl patch "${kube_flags[@]}" pod valid-pod --record -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]}}'
   516    # Post-condition: valid-pod POD has image nginx
   517    kube::test::get_object_assert pods "{{range.items}}{{${image_field:?}}}:{{end}}" 'nginx:'
   518    # Post-condition: valid-pod has the record annotation
   519    kube::test::get_object_assert pods "{{range.items}}{{$annotations_field}}:{{end}}" "${change_cause_annotation:?}"
   520    # prove that patch can use different types
   521    kubectl patch "${kube_flags[@]}" pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx2"}]'
   522    # Post-condition: valid-pod POD has image nginx
   523    kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx2:'
   524    # prove that patch can use different types
   525    kubectl patch "${kube_flags[@]}" pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx"}]'
   526    # Post-condition: valid-pod POD has image nginx
   527    kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
   528    # Dry-run change image
   529    kubectl patch "${kube_flags[@]}" pod valid-pod --record --dry-run=client -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "not-nginx"}]}}'
   530    kubectl patch "${kube_flags[@]}" pod valid-pod --record --dry-run=server -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "not-nginx"}]}}'
   531    # Post-condition: valid-pod POD has image nginx
   532    kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
   533    # prove that yaml input works too
   534    YAML_PATCH=$'spec:\n  containers:\n  - name: kubernetes-serve-hostname\n    image: changed-with-yaml\n'
   535    kubectl patch "${kube_flags[@]}" pod valid-pod -p="${YAML_PATCH}"
   536    # Post-condition: valid-pod POD has image nginx
   537    kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'changed-with-yaml:'
   538    ## Patch pod from JSON can change image
   539    # Command
   540    kubectl patch "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "registry.k8s.io/pause:3.9"}]}}'
   541    # Post-condition: valid-pod POD has expected image
   542    kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'registry.k8s.io/pause:3.9:'
   543  
   544    # pod has field for kubectl patch field manager
   545    output_message=$(kubectl get pod valid-pod -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
   546    kube::test::if_has_string "${output_message}" 'kubectl-patch'
   547  
   548    ## If resourceVersion is specified in the patch, it will be treated as a precondition, i.e., if the resourceVersion is different from that is stored in the server, the Patch should be rejected
   549    ERROR_FILE="${KUBE_TEMP}/conflict-error"
   550    ## If the resourceVersion is the same as the one stored in the server, the patch will be applied.
   551    # Command
   552    # Needs to retry because other party may change the resource.
   553    for count in {0..3}; do
   554      resourceVersion=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}')
   555      kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'"${resourceVersion}"'"}}' 2> "${ERROR_FILE}" || true
   556      if grep -q "the object has been modified" "${ERROR_FILE}"; then
   557        kube::log::status "retry $1, error: $(cat "${ERROR_FILE}")"
   558        rm "${ERROR_FILE}"
   559        sleep $((2**count))
   560      else
   561        rm "${ERROR_FILE}"
   562        kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
   563        break
   564      fi
   565    done
   566  
   567    ## If the resourceVersion is the different from the one stored in the server, the patch will be rejected.
   568    resourceVersion=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}')
   569    ((resourceVersion+=100))
   570    # Command
   571    kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'"$resourceVersion"'"}}' 2> "${ERROR_FILE}" || true
   572    # Post-condition: should get an error reporting the conflict
   573    if grep -q "please apply your changes to the latest version and try again" "${ERROR_FILE}"; then
   574      kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns error as expected: $(cat "${ERROR_FILE}")"
   575    else
   576      kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns unexpected error or non-error: $(cat "${ERROR_FILE}")"
   577      exit 1
   578    fi
   579    rm "${ERROR_FILE}"
   580  
   581    ## --force replace pod can change other field, e.g., spec.container.name
   582    # Command
   583    kubectl get "${kube_flags[@]}" pod valid-pod -o json | ${SED} 's/"kubernetes-serve-hostname"/"replaced-k8s-serve-hostname"/g' > /tmp/tmp-valid-pod.json
   584    kubectl replace "${kube_flags[@]}" --force -f /tmp/tmp-valid-pod.json
   585    # Post-condition: spec.container.name = "replaced-k8s-serve-hostname"
   586    kube::test::get_object_assert 'pod valid-pod' "{{(index .spec.containers 0).name}}" 'replaced-k8s-serve-hostname'
   587  
   588    # Pod has field manager for kubectl replace
   589    output_message=$(kubectl get pod valid-pod -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
   590    kube::test::if_has_string "${output_message}" 'kubectl-replace'
   591  
   592    ## check replace --grace-period requires --force
   593    output_message=$(! kubectl replace "${kube_flags[@]}" --grace-period=1 -f /tmp/tmp-valid-pod.json 2>&1)
   594    kube::test::if_has_string "${output_message}" '\-\-grace-period must have \-\-force specified'
   595  
   596    ## check replace --timeout requires --force
   597    output_message=$(! kubectl replace "${kube_flags[@]}" --timeout=1s -f /tmp/tmp-valid-pod.json 2>&1)
   598    kube::test::if_has_string "${output_message}" '\-\-timeout must have \-\-force specified'
   599  
   600    #cleaning
   601    rm /tmp/tmp-valid-pod.json
   602  
   603    ## replace of a cluster scoped resource can succeed
   604    # Pre-condition: a node exists
   605    kubectl create -f - "${kube_flags[@]}" << __EOF__
   606  {
   607    "kind": "Node",
   608    "apiVersion": "v1",
   609    "metadata": {
   610      "name": "node-v1-test"
   611    }
   612  }
   613  __EOF__
   614    kube::test::get_object_assert "node node-v1-test" "{{range.items}}{{if .metadata.annotations.a}}found{{end}}{{end}}:" ':'
   615  
   616    # Dry-run command
   617    kubectl replace --dry-run=server -f - "${kube_flags[@]}" << __EOF__
   618  {
   619    "kind": "Node",
   620    "apiVersion": "v1",
   621    "metadata": {
   622      "name": "node-v1-test",
   623      "annotations": {"a":"b"},
   624      "resourceVersion": "0"
   625    }
   626  }
   627  __EOF__
   628    kubectl replace --dry-run=client -f - "${kube_flags[@]}" << __EOF__
   629  {
   630    "kind": "Node",
   631    "apiVersion": "v1",
   632    "metadata": {
   633      "name": "node-v1-test",
   634      "annotations": {"a":"b"},
   635      "resourceVersion": "0"
   636    }
   637  }
   638  __EOF__
   639    kube::test::get_object_assert "node node-v1-test" "{{range.items}}{{if .metadata.annotations.a}}found{{end}}{{end}}:" ':'
   640  
   641    # Command
   642    kubectl replace -f - "${kube_flags[@]}" << __EOF__
   643  {
   644    "kind": "Node",
   645    "apiVersion": "v1",
   646    "metadata": {
   647      "name": "node-v1-test",
   648      "annotations": {"a":"b"},
   649      "resourceVersion": "0"
   650    }
   651  }
   652  __EOF__
   653  
   654    # Post-condition: the node command succeeds
   655    kube::test::get_object_assert "node node-v1-test" "{{.metadata.annotations.a}}" 'b'
   656    kubectl delete node node-v1-test "${kube_flags[@]}"
   657  
   658    ## kubectl edit can update the image field of a POD. tmp-editor.sh is a fake editor
   659    echo -e "#!/usr/bin/env bash\n${SED} -i \"s/nginx/registry.k8s.io\/serve_hostname/g\" \$1" > /tmp/tmp-editor.sh
   660    chmod +x /tmp/tmp-editor.sh
   661    # Pre-condition: valid-pod POD has image nginx
   662    kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
   663    grep -q 'Patch:' <<< "$(EDITOR=/tmp/tmp-editor.sh kubectl edit "${kube_flags[@]}" pods/valid-pod --output-patch=true)"
   664    # Post-condition: valid-pod POD has image registry.k8s.io/serve_hostname
   665    kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'registry.k8s.io/serve_hostname:'
   666    # pod has field for kubectl edit field manager
   667    output_message=$(kubectl get pod valid-pod -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
   668    kube::test::if_has_string "${output_message}" 'kubectl-edit'
   669    # cleaning
   670    rm /tmp/tmp-editor.sh
   671  
   672    ## kubectl edit should work on Windows
   673    grep -q 'Edit cancelled' <<< "$(EDITOR="cat" kubectl edit pod/valid-pod 2>&1)"
   674    grep -q 'name: valid-pod' <<< "$(EDITOR="cat" kubectl edit pod/valid-pod)"
   675    grep -q CRLF <<< "$(EDITOR="cat" kubectl edit --windows-line-endings pod/valid-pod | file - )"
   676    ! grep -q CRLF <<< "$(EDITOR="cat" kubectl edit --windows-line-endings=false pod/valid-pod | file - )" || exit 1
   677    grep -q 'kind: List' <<< "$(EDITOR="cat" kubectl edit ns)"
   678  
   679    ### Label POD YAML file locally without effecting the live pod.
   680    # Pre-condition: name is valid-pod
   681    kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
   682    # Command
   683    output_message=$(kubectl label --local --overwrite -f hack/testdata/pod.yaml name=localonlyvalue -o yaml "${kube_flags[@]}")
   684    echo "$output_message"
   685    # Post-condition: name is still valid-pod in the live pod, but command output is the new value
   686    kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
   687    kube::test::if_has_string "${output_message}" "localonlyvalue"
   688  
   689    ### Overwriting an existing label is not permitted
   690    # Pre-condition: name is valid-pod
   691    kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
   692    # Command
   693    ! kubectl label pods valid-pod name=valid-pod-super-sayan "${kube_flags[@]}" || exit 1
   694    # Post-condition: name is still valid-pod
   695    kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
   696  
   697    ### --overwrite must be used to overwrite existing label, can be applied to all resources
   698    # Pre-condition: name is valid-pod
   699    kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
   700    # Command
   701    kubectl label --overwrite pods --all name=valid-pod-super-sayan "${kube_flags[@]}"
   702    # Post-condition: name is valid-pod-super-sayan
   703    kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod-super-sayan'
   704  
   705    ### Delete POD by label
   706    # Pre-condition: valid-pod POD exists
   707    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
   708    # Command
   709    kubectl delete pods -lname=valid-pod-super-sayan --grace-period=0 --force "${kube_flags[@]}"
   710    # Post-condition: valid-pod POD doesn't exist
   711    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
   712  
   713    ### Create two PODs from 1 yaml file
   714    # Pre-condition: no POD exists
   715    create_and_use_new_namespace
   716    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
   717    # Command
   718    kubectl create -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml "${kube_flags[@]}"
   719    # Post-condition: redis-master and valid-pod PODs exist
   720    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:valid-pod:'
   721  
   722    ### Delete two PODs from 1 yaml file
   723    # Pre-condition: redis-master and valid-pod PODs exist
   724    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:valid-pod:'
   725    # Command
   726    kubectl delete -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml "${kube_flags[@]}"
   727    # Post-condition: no PODs exist
   728    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
   729  
   730    ## kubectl apply should update configuration annotations only if apply is already called
   731    ## 1. kubectl create doesn't set the annotation
   732    # Pre-Condition: no POD exists
   733    create_and_use_new_namespace
   734    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
   735    # Command: create a pod "test-pod"
   736    kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
   737    # Post-Condition: pod "test-pod" is created
   738    kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
   739    # Post-Condition: pod "test-pod" doesn't have configuration annotation
   740    ! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" )" || exit 1
   741    ## 2. kubectl replace doesn't set the annotation
   742    kubectl get pods test-pod -o yaml "${kube_flags[@]}" | ${SED} 's/test-pod-label/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml
   743    # Command: replace the pod "test-pod"
   744    kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml "${kube_flags[@]}"
   745    # Post-Condition: pod "test-pod" is replaced
   746    kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced'
   747    # Post-Condition: pod "test-pod" doesn't have configuration annotation
   748    ! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")" || exit 1
   749    ## 3. kubectl apply does set the annotation
   750    # Command: apply the pod "test-pod"
   751    kubectl apply -f hack/testdata/pod-apply.yaml "${kube_flags[@]}"
   752    # Post-Condition: pod "test-pod" is applied
   753    kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-applied'
   754    # Post-Condition: pod "test-pod" has configuration annotation
   755    grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")"
   756    kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration
   757    ## 4. kubectl replace updates an existing annotation
   758    kubectl get pods test-pod -o yaml "${kube_flags[@]}" | ${SED} 's/test-pod-applied/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml
   759    # Command: replace the pod "test-pod"
   760    kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml "${kube_flags[@]}"
   761    # Post-Condition: pod "test-pod" is replaced
   762    kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced'
   763    # Post-Condition: pod "test-pod" has configuration annotation, and it's updated (different from the annotation when it's applied)
   764    grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" )"
   765    kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration-replaced
   766    ! [[ $(diff -q "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced > /dev/null) ]] || exit 1
   767    # Clean up
   768    rm "${KUBE_TEMP}"/test-pod-replace.yaml "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced
   769    kubectl delete pods test-pod "${kube_flags[@]}"
   770  
   771    set +o nounset
   772    set +o errexit
   773  }
   774  
   775  # runs specific kubectl create tests
   776  run_create_secret_tests() {
   777      set -o nounset
   778      set -o errexit
   779  
   780      ### Create generic secret with explicit namespace
   781      # Pre-condition: secret 'mysecret' does not exist
   782      output_message=$(! kubectl get secrets mysecret 2>&1 "${kube_flags[@]}")
   783      kube::test::if_has_string "${output_message}" 'secrets "mysecret" not found'
   784      # Command
   785      output_message=$(kubectl create "${kube_flags[@]}" secret generic mysecret --dry-run=client --from-literal=foo=bar -o jsonpath='{.metadata.namespace}' --namespace=user-specified)
   786      kube::test::if_has_string "${output_message}" 'user-specified'
   787      # Post-condition: mysecret still not created since --dry-run was used
   788      # Output from 'create' command should contain the specified --namespace value
   789      failure_message=$(! kubectl get secrets mysecret 2>&1 "${kube_flags[@]}")
   790      kube::test::if_has_string "${failure_message}" 'secrets "mysecret" not found'
   791      # Command
   792      output_message=$(kubectl create "${kube_flags[@]}" secret generic mysecret --dry-run=client --from-literal=foo=bar -o jsonpath='{.metadata.namespace}')
   793      # Post-condition: jsonpath for .metadata.namespace should be empty for object since --namespace was not explicitly specified
   794      kube::test::if_empty_string "${output_message}"
   795  
   796  
   797      # check to make sure that replace correctly PUTs to a URL
   798      kubectl create configmap tester-update-cm -o json --dry-run=client | kubectl create "${kube_flags[@]}" --raw /api/v1/namespaces/default/configmaps -f -
   799      output_message=$(kubectl create configmap tester-update-cm --from-literal=key1=config1 -o json --dry-run=client | kubectl replace "${kube_flags[@]}" --raw /api/v1/namespaces/default/configmaps/tester-update-cm -f -)
   800      # the message should show the body returned which will include a UID not present in the input
   801      kube::test::if_has_string "${output_message}" 'uid'
   802      # if the PUT was well-formed, the server will now have a key and value we can retrieve on GET
   803      output_message=$(kubectl get "${kube_flags[@]}" --raw /api/v1/namespaces/default/configmaps/tester-update-cm 2>&1 "${kube_flags[@]}")
   804      kube::test::if_has_string "${output_message}" 'config1'
   805  
   806      # if DELETE raw works correctly, this will delete the configmap
   807      kubectl delete "${kube_flags[@]}" --raw /api/v1/namespaces/default/configmaps/tester-update-cm
   808      output_message=$(! kubectl get "${kube_flags[@]}" configmap tester-update-cm 2>&1 "${kube_flags[@]}")
   809      kube::test::if_has_string "${output_message}" 'configmaps "tester-update-cm" not found'
   810  
   811      set +o nounset
   812      set +o errexit
   813  }
   814  
   815  run_secrets_test() {
   816    set -o nounset
   817    set -o errexit
   818  
   819    create_and_use_new_namespace
   820    kube::log::status "Testing secrets"
   821  
   822    # Ensure dry run succeeds and includes kind, apiVersion and data, and doesn't require a server connection
   823    output_message=$(kubectl create secret generic test --from-literal=key1=value1 --dry-run=client -o yaml --server=example.com --v=6)
   824    kube::test::if_has_string "${output_message}" 'kind: Secret'
   825    kube::test::if_has_string "${output_message}" 'apiVersion: v1'
   826    kube::test::if_has_string "${output_message}" 'key1: dmFsdWUx'
   827    kube::test::if_has_not_string "${output_message}" 'example.com'
   828  
   829    ### Create a new namespace
   830    # Pre-condition: the test-secrets namespace does not exist
   831    kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq $id_field \"test-secrets\" }}found{{end}}{{end}}:" ':'
   832    # Command
   833    kubectl create namespace test-secrets
   834    # Post-condition: namespace 'test-secrets' is created.
   835    kube::test::get_object_assert 'namespaces/test-secrets' "{{$id_field}}" 'test-secrets'
   836  
   837    ### Create a generic secret in a specific namespace
   838    # Pre-condition: no SECRET exists
   839    kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
   840    # Command
   841    kubectl create secret generic test-secret --from-literal=key1=value1 --type=test-type --namespace=test-secrets
   842    # Post-condition: secret exists and has expected values
   843    kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
   844    kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'test-type'
   845    grep -q 'key1: dmFsdWUx' <<< "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}")"
   846    # Describe command should respect the chunk size parameter
   847    kube::test::describe_resource_chunk_size_assert secrets ""  "--namespace=test-secrets"
   848    # Clean-up
   849    kubectl delete secret test-secret --namespace=test-secrets
   850  
   851    ### Create a docker-registry secret in a specific namespace
   852    if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
   853      kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
   854    fi
   855    # Pre-condition: no SECRET exists
   856    kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
   857    # Command
   858    kubectl create secret docker-registry test-secret --docker-username=test-user --docker-password=test-password --docker-email='test-user@test.com' --namespace=test-secrets
   859    # Post-condition: secret exists and has expected values
   860    kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
   861    kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/dockerconfigjson'
   862    grep -q '.dockerconfigjson: eyJhdXRocyI6eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsidXNlcm5hbWUiOiJ0ZXN0LXVzZXIiLCJwYXNzd29yZCI6InRlc3QtcGFzc3dvcmQiLCJlbWFpbCI6InRlc3QtdXNlckB0ZXN0LmNvbSIsImF1dGgiOiJkR1Z6ZEMxMWMyVnlPblJsYzNRdGNHRnpjM2R2Y21RPSJ9fX0=' <<< "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}")"
   863    # Clean-up
   864    kubectl delete secret test-secret --namespace=test-secrets
   865  
   866    ### Create a docker-registry secret in a specific namespace with docker config file
   867    if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
   868      kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
   869    fi
   870    # Pre-condition: no SECRET exists
   871    kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
   872    # Command
   873    kubectl create secret docker-registry test-secret --from-file=.dockerconfigjson=hack/testdata/dockerconfig.json --namespace=test-secrets
   874    # Post-condition: secret exists and has expected values
   875    kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
   876    kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/dockerconfigjson'
   877    grep -q '.dockerconfigjson: ewogICAgImF1dGhzIjp7CiAgICAgICAgImh0dHA6Ly9mb28uZXhhbXBsZS5jb20iOnsKICAgICAgICAgICAgInVzZXJuYW1lIjoiZm9vIiwKICAgICAgICAgICAgInBhc3N3b3JkIjoiYmFyIiwKICAgICAgICAgICAgImVtYWlsIjoiZm9vQGV4YW1wbGUuY29tIgogICAgICAgIH0sCiAgICAgICAgImh0dHA6Ly9iYXIuZXhhbXBsZS5jb20iOnsKICAgICAgICAgICAgInVzZXJuYW1lIjoiYmFyIiwKICAgICAgICAgICAgInBhc3N3b3JkIjoiYmF6IiwKICAgICAgICAgICAgImVtYWlsIjoiYmFyQGV4YW1wbGUuY29tIgogICAgICAgIH0KICAgIH0KfQo=' <<< "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}")"
   878    # Clean-up
   879    kubectl delete secret test-secret --namespace=test-secrets
   880  
   881    ### Create a tls secret
   882    if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
   883      kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
   884    fi
   885    # Pre-condition: no SECRET exists
   886    kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
   887    # Command
   888    kubectl create secret tls test-secret --namespace=test-secrets --key=hack/testdata/tls.key --cert=hack/testdata/tls.crt
   889    kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
   890    kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/tls'
   891    # Clean-up
   892    kubectl delete secret test-secret --namespace=test-secrets
   893  
   894    # Command with process substitution
   895    kubectl create secret tls test-secret --namespace=test-secrets --key <(cat hack/testdata/tls.key) --cert <(cat hack/testdata/tls.crt)
   896    kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
   897    kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/tls'
   898      # Clean-up
   899    kubectl delete secret test-secret --namespace=test-secrets
   900  
   901    # Create a secret using stringData
   902    kubectl create --namespace=test-secrets -f - "${kube_flags[@]}" << __EOF__
   903  {
   904    "kind": "Secret",
   905    "apiVersion": "v1",
   906    "metadata": {
   907      "name": "secret-string-data"
   908    },
   909    "data": {
   910      "k1":"djE=",
   911      "k2":""
   912    },
   913    "stringData": {
   914      "k2":"v2"
   915    }
   916  }
   917  __EOF__
   918    # Post-condition: secret-string-data secret is created with expected data, merged/overridden data from stringData, and a cleared stringData field
   919    kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.data}}' '.*k1:djE=.*'
   920    kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.data}}' '.*k2:djI=.*'
   921    kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.stringData}}' '<no value>'
   922    # Clean up
   923    kubectl delete secret secret-string-data --namespace=test-secrets
   924  
   925    ### Create a secret using output flags
   926    if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
   927      kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
   928    fi
   929    # Pre-condition: no secret exists
   930    kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
   931    # Command
   932    grep -q 'test-secret:' <<< "$(kubectl create secret generic test-secret --namespace=test-secrets --from-literal=key1=value1 --output=go-template --template="{{.metadata.name}}:")"
   933    ## Clean-up
   934    kubectl delete secret test-secret --namespace=test-secrets
   935    # Clean up
   936    kubectl delete namespace test-secrets
   937  
   938    set +o nounset
   939    set +o errexit
   940  }
   941  
   942  run_service_accounts_tests() {
   943    set -o nounset
   944    set -o errexit
   945  
   946    create_and_use_new_namespace
   947    kube::log::status "Testing service accounts"
   948  
   949    ### Create a new namespace
   950    # Pre-condition: the test-service-accounts namespace does not exist
   951    kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq $id_field \"test-service-accounts\" }}found{{end}}{{end}}:" ':'
   952    # Command
   953    kubectl create namespace test-service-accounts
   954    # Post-condition: namespace 'test-service-accounts' is created.
   955    kube::test::get_object_assert 'namespaces/test-service-accounts' "{{$id_field}}" 'test-service-accounts'
   956  
   957    ### Create a service account in a specific namespace
   958    # Pre-condition: service account does not exist
   959    kube::test::get_object_assert 'serviceaccount --namespace=test-service-accounts' "{{range.items}}{{ if eq $id_field \"test-service-account\" }}found{{end}}{{end}}:" ':'
   960    # Dry-run command
   961    kubectl create serviceaccount test-service-account --dry-run=client --namespace=test-service-accounts
   962    kubectl create serviceaccount test-service-account --dry-run=server --namespace=test-service-accounts
   963    kube::test::get_object_assert 'serviceaccount --namespace=test-service-accounts' "{{range.items}}{{ if eq $id_field \"test-service-account\" }}found{{end}}{{end}}:" ':'
   964    # Command
   965    kubectl create serviceaccount test-service-account --namespace=test-service-accounts
   966    # Post-condition: secret exists and has expected values
   967    kube::test::get_object_assert 'serviceaccount/test-service-account --namespace=test-service-accounts' "{{$id_field}}" 'test-service-account'
   968    # Describe command should respect the chunk size parameter
   969    kube::test::describe_resource_chunk_size_assert serviceaccounts secrets,events "--namespace=test-service-accounts"
   970    # Clean-up
   971    kubectl delete serviceaccount test-service-account --namespace=test-service-accounts
   972    # Clean up
   973    kubectl delete namespace test-service-accounts
   974  
   975    set +o nounset
   976    set +o errexit
   977  }
   978  
   979  run_service_tests() {
   980    set -o nounset
   981    set -o errexit
   982  
   983    # switch back to the default namespace
   984    kubectl config set-context "${CONTEXT}" --namespace=""
   985    kube::log::status "Testing kubectl(v1:services)"
   986  
   987    ### Create redis-master service from JSON
   988    # Pre-condition: Only the default kubernetes services exist
   989    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
   990    # Command
   991    kubectl create -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml "${kube_flags[@]}"
   992    # Post-condition: redis-master service exists
   993    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
   994    # Describe command should print detailed information
   995    kube::test::describe_object_assert services 'redis-master' "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:"
   996    # Describe command should print events information by default
   997    kube::test::describe_object_events_assert services 'redis-master'
   998    # Describe command should not print events information when show-events=false
   999    kube::test::describe_object_events_assert services 'redis-master' false
  1000    # Describe command should print events information when show-events=true
  1001    kube::test::describe_object_events_assert services 'redis-master' true
  1002    # Describe command (resource only) should print detailed information
  1003    kube::test::describe_resource_assert services "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:"
  1004    # Describe command should print events information by default
  1005    kube::test::describe_resource_events_assert services
  1006    # Describe command should not print events information when show-events=false
  1007    kube::test::describe_resource_events_assert services false
  1008    # Describe command should print events information when show-events=true
  1009    kube::test::describe_resource_events_assert services true
  1010    # Describe command should respect the chunk size parameter
  1011    kube::test::describe_resource_chunk_size_assert services events
  1012  
  1013    ### set selector
  1014    # prove role=master
  1015    kube::test::get_object_assert 'services redis-master' "{{range${service_selector_field:?}}}{{.}}:{{end}}" "redis:master:backend:"
  1016  
  1017    # Set selector of a local file without talking to the server
  1018    kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan --local -o yaml "${kube_flags[@]}"
  1019    kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan --dry-run=client -o yaml "${kube_flags[@]}"
  1020    # Set command to change the selector.
  1021    kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan
  1022    # prove role=padawan
  1023    kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "padawan:"
  1024    # Set command to reset the selector back to the original one.
  1025    kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml app=redis,role=master,tier=backend
  1026    # prove role=master
  1027    kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
  1028    # Show dry-run works on running selector
  1029    kubectl set selector services redis-master role=padawan --dry-run=client -o yaml "${kube_flags[@]}"
  1030    kubectl set selector services redis-master role=padawan --dry-run=server -o yaml "${kube_flags[@]}"
  1031    output_message=$(kubectl get services redis-master --show-managed-fields -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
  1032    kube::test::if_has_string "${output_message}" 'kubectl-set'
  1033    ! kubectl set selector services redis-master role=padawan --local -o yaml "${kube_flags[@]}" || exit 1
  1034    kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
  1035    # --resource-version=<current-resource-version> succeeds
  1036    rv=$(kubectl get services redis-master -o jsonpath='{.metadata.resourceVersion}' "${kube_flags[@]}")
  1037    kubectl set selector services redis-master rvtest1=true "--resource-version=${rv}" "${kube_flags[@]}"
  1038    # --resource-version=<non-current-resource-version> fails
  1039    output_message=$(! kubectl set selector services redis-master rvtest1=true --resource-version=1 2>&1 "${kube_flags[@]}")
  1040    kube::test::if_has_string "${output_message}" 'Conflict'
  1041  
  1042    ### Dump current redis-master service
  1043    output_service=$(kubectl get service redis-master -o json "${kube_flags[@]}")
  1044  
  1045    ### Delete redis-master-service by id
  1046    # Pre-condition: redis-master service exists
  1047    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
  1048    # Command
  1049    kubectl delete service redis-master "${kube_flags[@]}"
  1050    if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
  1051      kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  1052    fi
  1053    # Post-condition: Only the default kubernetes services exist
  1054    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  1055  
  1056    ### Create redis-master-service from dumped JSON
  1057    # Pre-condition: Only the default kubernetes services exist
  1058    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  1059    # Command
  1060    echo "${output_service}" | kubectl create -f - "${kube_flags[@]}"
  1061    # Post-condition: redis-master service is created
  1062    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
  1063  
  1064    ### Create redis-master-v1-test service
  1065    # Pre-condition: redis-master-service service exists
  1066    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
  1067    # Command
  1068    kubectl create -f - "${kube_flags[@]}" << __EOF__
  1069  {
  1070    "kind": "Service",
  1071    "apiVersion": "v1",
  1072    "metadata": {
  1073      "name": "service-v1-test"
  1074    },
  1075    "spec": {
  1076      "ports": [
  1077        {
  1078          "protocol": "TCP",
  1079          "port": 80,
  1080          "targetPort": 80
  1081        }
  1082      ]
  1083    }
  1084  }
  1085  __EOF__
  1086    # Post-condition: service-v1-test service is created
  1087    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:'
  1088  
  1089    ### Identity
  1090    kubectl get service "${kube_flags[@]}" service-v1-test -o json | kubectl replace "${kube_flags[@]}" -f -
  1091  
  1092    ### Delete services by id
  1093    # Pre-condition: service-v1-test exists
  1094    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:'
  1095    # Command
  1096    kubectl delete service redis-master "${kube_flags[@]}"
  1097    kubectl delete service "service-v1-test" "${kube_flags[@]}"
  1098    if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
  1099      kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  1100    fi
  1101    # Post-condition: Only the default kubernetes services exist
  1102    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  1103  
  1104    ### Create two services
  1105    # Pre-condition: Only the default kubernetes services exist
  1106    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  1107    # Command
  1108    kubectl create -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml "${kube_flags[@]}"
  1109    kubectl create -f test/e2e/testing-manifests/guestbook/redis-slave-service.yaml "${kube_flags[@]}"
  1110    # Post-condition: redis-master and redis-slave services are created
  1111    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:'
  1112  
  1113    ### Custom columns can be specified
  1114    # Pre-condition: generate output using custom columns
  1115    output_message=$(kubectl get services -o=custom-columns=NAME:.metadata.name,RSRC:.metadata.resourceVersion 2>&1 "${kube_flags[@]}")
  1116    # Post-condition: should contain name column
  1117    kube::test::if_has_string "${output_message}" 'redis-master'
  1118  
  1119    ### Delete multiple services at once
  1120    # Pre-condition: redis-master and redis-slave services exist
  1121    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:'
  1122    # Command
  1123    kubectl delete services redis-master redis-slave "${kube_flags[@]}" # delete multiple services at once
  1124    if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
  1125      kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  1126    fi
  1127    # Post-condition: Only the default kubernetes services exist
  1128    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  1129  
  1130    ### Create an ExternalName service
  1131    # Pre-condition: Only the default kubernetes service exist
  1132    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  1133    # Dry-run command
  1134    kubectl create service externalname beep-boop --dry-run=client --external-name bar.com
  1135    kubectl create service externalname beep-boop --dry-run=server --external-name bar.com
  1136    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  1137    # Command
  1138    kubectl create service externalname beep-boop --external-name bar.com
  1139    # Post-condition: beep-boop service is created
  1140    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'beep-boop:kubernetes:'
  1141  
  1142    ### Delete beep-boop service by id
  1143    # Pre-condition: beep-boop service exists
  1144    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'beep-boop:kubernetes:'
  1145    # Command
  1146    kubectl delete service beep-boop "${kube_flags[@]}"
  1147    if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
  1148      kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  1149    fi
  1150    # Post-condition: Only the default kubernetes services exist
  1151    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  1152  
  1153    ### Create pod and service
  1154    # Pre-condition: no pod exists
  1155    kube::test::wait_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  1156    # Pre-condition: Only the default kubernetes services exist
  1157    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  1158    # Dry-run command
  1159    kubectl run testmetadata --image=nginx --port=80 --expose --dry-run=client
  1160    kubectl run testmetadata --image=nginx --port=80 --expose --dry-run=server
  1161    # Check only the default kubernetes services exist
  1162    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  1163    # Command
  1164    kubectl run testmetadata --image=nginx --port=80 --expose
  1165    # Check result
  1166    kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'testmetadata:'
  1167    kube::test::get_object_assert 'service testmetadata' "{{${port_field:?}}}" '80'
  1168    # pod has field for kubectl run field manager
  1169    output_message=$(kubectl get pod testmetadata -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
  1170    kube::test::if_has_string "${output_message}" 'kubectl-run'
  1171  
  1172    ### Expose pod as a new service
  1173    # Command
  1174    kubectl expose pod testmetadata  --port=1000 --target-port=80 --type=NodePort --name=exposemetadata --overrides='{ "metadata": { "annotations": { "zone-context": "work" } } } '
  1175    # Check result
  1176    kube::test::get_object_assert 'service exposemetadata' "{{.metadata.annotations}}" "map\[zone-context:work\]"
  1177    # Service has field manager for kubectl expose
  1178    output_message=$(kubectl get service exposemetadata -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
  1179    kube::test::if_has_string "${output_message}" 'kubectl-expose'
  1180  
  1181    # Clean-Up
  1182    # Command
  1183    kubectl delete service exposemetadata testmetadata "${kube_flags[@]}"
  1184    if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
  1185      kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  1186    fi
  1187    kubectl delete pod testmetadata "${kube_flags[@]}"
  1188    if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
  1189      kube::test::wait_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
  1190    fi
  1191  
  1192    set +o nounset
  1193    set +o errexit
  1194  }
  1195  
  1196  run_rc_tests() {
  1197    set -o nounset
  1198    set -o errexit
  1199  
  1200    create_and_use_new_namespace
  1201    kube::log::status "Testing kubectl(v1:replicationcontrollers)"
  1202  
  1203    ### Create and stop controller, make sure it doesn't leak pods
  1204    # Pre-condition: no replication controller exists
  1205    kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  1206    # Command
  1207    kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
  1208    kubectl delete rc frontend "${kube_flags[@]}"
  1209    # Post-condition: no pods from frontend controller
  1210    kube::test::wait_object_assert "pods -l name=frontend" "{{range.items}}{{$id_field}}:{{end}}" ''
  1211  
  1212    ### Create replication controller frontend from JSON
  1213    # Pre-condition: no replication controller exists
  1214    kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  1215    # Command
  1216    kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
  1217    # Post-condition: frontend replication controller is created
  1218    kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
  1219    # Describe command should print detailed information
  1220    kube::test::describe_object_assert rc 'frontend' "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:" "GET_HOSTS_FROM:"
  1221    # Describe command should print events information by default
  1222    kube::test::describe_object_events_assert rc 'frontend'
  1223    # Describe command should not print events information when show-events=false
  1224    kube::test::describe_object_events_assert rc 'frontend' false
  1225    # Describe command should print events information when show-events=true
  1226    kube::test::describe_object_events_assert rc 'frontend' true
  1227    # Describe command (resource only) should print detailed information
  1228    kube::test::describe_resource_assert rc "Name:" "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:" "GET_HOSTS_FROM:"
  1229    # Describe command should print events information by default
  1230    kube::test::describe_resource_events_assert rc
  1231    # Describe command should not print events information when show-events=false
  1232    kube::test::describe_resource_events_assert rc false
  1233    # Describe command should print events information when show-events=true
  1234    kube::test::describe_resource_events_assert rc true
  1235    # Describe command should respect the chunk size parameter
  1236    kube::test::describe_resource_chunk_size_assert replicationcontrollers events
  1237  
  1238    ### Scale replication controller frontend with current-replicas and replicas
  1239    # Pre-condition: 3 replicas
  1240    kube::test::get_object_assert 'rc frontend' "{{${rc_replicas_field:?}}}" '3'
  1241    # Command
  1242    kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
  1243    # Post-condition: 2 replicas
  1244    kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
  1245  
  1246    ### Scale replication controller frontend with (wrong) current-replicas and replicas
  1247    # Pre-condition: 2 replicas
  1248    kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
  1249    # Command
  1250    ! kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}" || exit 1
  1251    # Post-condition: nothing changed
  1252    kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
  1253  
  1254    ### Scale replication controller frontend with replicas only
  1255    # Pre-condition: 2 replicas
  1256    kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
  1257    # Command
  1258    kubectl scale  --replicas=3 replicationcontrollers frontend "${kube_flags[@]}"
  1259    # Post-condition: 3 replicas
  1260    kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
  1261  
  1262    ### Scale replication controller from JSON with replicas only
  1263    # Pre-condition: 3 replicas
  1264    kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
  1265    # Command
  1266    kubectl scale  --replicas=2 -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
  1267    # Post-condition: 2 replicas
  1268    kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
  1269    # Clean-up
  1270    kubectl delete rc frontend "${kube_flags[@]}"
  1271  
  1272    ### Scale multiple replication controllers
  1273    kubectl create -f test/e2e/testing-manifests/guestbook/legacy/redis-master-controller.yaml "${kube_flags[@]}"
  1274    kubectl create -f test/e2e/testing-manifests/guestbook/legacy/redis-slave-controller.yaml "${kube_flags[@]}"
  1275    # Command dry-run client
  1276    output_message=$(kubectl scale rc/redis-master rc/redis-slave --replicas=4 --dry-run=client "${kube_flags[@]}")
  1277    # Post-condition dry-run client: 1 replicas each
  1278    kube::test::if_has_string "${output_message}" 'replicationcontroller/redis-master scaled (dry run)'
  1279    kube::test::if_has_string "${output_message}" 'replicationcontroller/redis-slave scaled (dry run)'
  1280    kube::test::get_object_assert 'rc redis-master' "{{$rc_replicas_field}}" '1'
  1281    kube::test::get_object_assert 'rc redis-slave' "{{$rc_replicas_field}}" '2'
  1282    # Command dry-run server
  1283    output_message=$(kubectl scale rc/redis-master rc/redis-slave --replicas=4 --dry-run=server "${kube_flags[@]}")
  1284    # Post-condition dry-run server: 1 replicas each
  1285    kube::test::if_has_string "${output_message}" 'replicationcontroller/redis-master scaled (server dry run)'
  1286    kube::test::if_has_string "${output_message}" 'replicationcontroller/redis-slave scaled (server dry run)'
  1287    kube::test::get_object_assert 'rc redis-master' "{{$rc_replicas_field}}" '1'
  1288    kube::test::get_object_assert 'rc redis-slave' "{{$rc_replicas_field}}" '2'
  1289    # Command
  1290    kubectl scale rc/redis-master rc/redis-slave --replicas=4 "${kube_flags[@]}"
  1291    # Post-condition: 4 replicas each
  1292    kube::test::get_object_assert 'rc redis-master' "{{$rc_replicas_field}}" '4'
  1293    kube::test::get_object_assert 'rc redis-slave' "{{$rc_replicas_field}}" '4'
  1294    # Clean-up
  1295    kubectl delete rc redis-{master,slave} "${kube_flags[@]}"
  1296  
  1297    ### Scale a deployment
  1298    kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
  1299    # Command dry-run client
  1300    output_message=$(kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment --dry-run=client)
  1301    # Post-condition: 3 replica for nginx-deployment dry-run client
  1302    kube::test::if_has_string "${output_message}" 'nginx-deployment scaled (dry run)'
  1303    kube::test::get_object_assert 'deployment nginx-deployment' "{{${deployment_replicas:?}}}" '3'
  1304    # Command dry-run server
  1305    output_message=$(kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment --dry-run=server)
  1306    # Post-condition: 3 replica for nginx-deployment dry-run server
  1307    kube::test::if_has_string "${output_message}" 'nginx-deployment scaled (server dry run)'
  1308    kube::test::get_object_assert 'deployment nginx-deployment' "{{${deployment_replicas:?}}}" '3'
  1309    # Command
  1310    kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment
  1311    # Post-condition: 1 replica for nginx-deployment
  1312    kube::test::get_object_assert 'deployment nginx-deployment' "{{${deployment_replicas:?}}}" '1'
  1313    # Clean-up
  1314    kubectl delete deployment/nginx-deployment "${kube_flags[@]}"
  1315  
  1316    ### Scale a deployment with piped input
  1317    kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
  1318    # Command
  1319    kubectl get deployment/nginx-deployment -o json | kubectl scale --replicas=2 -f -
  1320    # Post-condition: 2 replica for nginx-deployment
  1321    kube::test::get_object_assert 'deployment nginx-deployment' "{{${deployment_replicas:?}}}" '2'
  1322    # Clean-up
  1323    kubectl delete deployment/nginx-deployment "${kube_flags[@]}"
  1324  
  1325    ### Expose deployments by creating a service
  1326    # Uses deployment selectors for created service
  1327    output_message=$(kubectl expose -f test/fixtures/pkg/kubectl/cmd/expose/appsv1deployment.yaml --port 80 2>&1 "${kube_flags[@]}")
  1328    # Post-condition: service created for deployment.
  1329    kube::test::if_has_string "${output_message}" 'service/expose-test-deployment exposed'
  1330    # Clean-up
  1331    kubectl delete service/expose-test-deployment "${kube_flags[@]}"
  1332    # Contains no selectors, should fail.
  1333    output_message=$(! kubectl expose -f test/fixtures/pkg/kubectl/cmd/expose/appsv1deployment-no-selectors.yaml --port 80 2>&1 "${kube_flags[@]}")
  1334    # Post-condition: service created for deployment.
  1335    kube::test::if_has_string "${output_message}" 'invalid deployment: no selectors'
  1336  
  1337    ### Expose a deployment as a service
  1338    kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
  1339    # Pre-condition: 3 replicas
  1340    kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '3'
  1341    # Command
  1342    kubectl expose deployment/nginx-deployment
  1343    # Post-condition: service exists and exposes deployment port (80)
  1344    kube::test::get_object_assert 'service nginx-deployment' "{{${port_field:?}}}" '80'
  1345    # Clean-up
  1346    kubectl delete deployment/nginx-deployment service/nginx-deployment "${kube_flags[@]}"
  1347  
  1348    ### Expose replication controller as service
  1349    kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
  1350    # Pre-condition: 3 replicas
  1351    kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
  1352    # Command
  1353    kubectl expose rc frontend --port=80 "${kube_flags[@]}"
  1354    # Post-condition: service exists and the port is unnamed
  1355    kube::test::get_object_assert 'service frontend' "{{${port_name:?}}} {{$port_field}}" '<no value> 80'
  1356    # Command
  1357    kubectl expose service frontend --port=443 --name=frontend-2 "${kube_flags[@]}"
  1358    # Post-condition: service exists and the port is unnamed
  1359    kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" '<no value> 443'
  1360    # Command
  1361    kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
  1362    kubectl expose pod valid-pod --port=444 --name=frontend-3 "${kube_flags[@]}"
  1363    # Post-condition: service exists and the port is unnamed
  1364    kube::test::get_object_assert 'service frontend-3' "{{$port_name}} {{$port_field}}" '<no value> 444'
  1365    # Verify that expose service works without specifying a port.
  1366    kubectl expose service frontend --name=frontend-4 "${kube_flags[@]}"
  1367    # Post-condition: service exists with the same port as the original service.
  1368    kube::test::get_object_assert 'service frontend-4' "{{$port_field}}" '80'
  1369    # Cleanup services
  1370    kubectl delete pod valid-pod "${kube_flags[@]}"
  1371    kubectl delete service frontend{,-2,-3,-4} "${kube_flags[@]}"
  1372  
  1373    ### Expose negative invalid resource test
  1374    # Pre-condition: don't need
  1375    # Command
  1376    output_message=$(! kubectl expose nodes 127.0.0.1 2>&1 "${kube_flags[@]}")
  1377    # Post-condition: the error message has "cannot expose" string
  1378    kube::test::if_has_string "${output_message}" 'cannot expose'
  1379  
  1380    ### Try to generate a service with invalid name (exceeding maximum valid size)
  1381    # Pre-condition: use --name flag
  1382    output_message=$(! kubectl expose -f hack/testdata/pod-with-large-name.yaml --name=invalid-large-service-name-that-has-more-than-sixty-three-characters --port=8081 2>&1 "${kube_flags[@]}")
  1383    # Post-condition: should fail due to invalid name
  1384    kube::test::if_has_string "${output_message}" 'metadata.name: Invalid value'
  1385    # Pre-condition: default run without --name flag; should succeed by truncating the inherited name
  1386    output_message=$(kubectl expose -f hack/testdata/pod-with-large-name.yaml --port=8081 2>&1 "${kube_flags[@]}")
  1387    # Post-condition: inherited name from pod has been truncated
  1388    kube::test::if_has_string "${output_message}" 'kubernetes-serve-hostname-testing-sixty-three-characters-in-len exposed'
  1389    # Clean-up
  1390    kubectl delete svc kubernetes-serve-hostname-testing-sixty-three-characters-in-len "${kube_flags[@]}"
  1391  
  1392    ### Expose multiport object as a new service
  1393    # Pre-condition: don't use --port flag
  1394    output_message=$(kubectl expose -f test/fixtures/doc-yaml/admin/high-availability/etcd.yaml --selector=test=etcd 2>&1 "${kube_flags[@]}")
  1395    # Post-condition: expose succeeded
  1396    kube::test::if_has_string "${output_message}" 'etcd-server exposed'
  1397    # Post-condition: generated service has both ports from the exposed pod
  1398    kube::test::get_object_assert 'service etcd-server' "{{$port_name}} {{$port_field}}" 'port-1 2380'
  1399    kube::test::get_object_assert 'service etcd-server' "{{${second_port_name:?}}} {{${second_port_field:?}}}" 'port-2 2379'
  1400    # Clean-up
  1401    kubectl delete svc etcd-server "${kube_flags[@]}"
  1402  
  1403    ### Delete replication controller with id
  1404    # Pre-condition: frontend replication controller exists
  1405    kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
  1406    # Command
  1407    kubectl delete rc frontend "${kube_flags[@]}"
  1408    # Post-condition: no replication controller exists
  1409    kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  1410  
  1411    ### Create two replication controllers
  1412    # Pre-condition: no replication controller exists
  1413    kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  1414    # Command
  1415    kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
  1416    kubectl create -f test/e2e/testing-manifests/guestbook/legacy/redis-slave-controller.yaml "${kube_flags[@]}"
  1417    # Post-condition: frontend and redis-slave
  1418    kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
  1419  
  1420    ### Delete multiple controllers at once
  1421    # Pre-condition: frontend and redis-slave
  1422    kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
  1423    # Command
  1424    kubectl delete rc frontend redis-slave "${kube_flags[@]}" # delete multiple controllers at once
  1425    # Post-condition: no replication controller exists
  1426    kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  1427  
  1428    ### Auto scale replication controller
  1429    # Pre-condition: no replication controller exists
  1430    kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  1431    # Command
  1432    kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
  1433    kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
  1434    # autoscale 1~2 pods, CPU utilization 70%, rc specified by file
  1435    kubectl autoscale -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70
  1436    kube::test::get_object_assert 'hpa frontend' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '1 2 70'
  1437    kubectl delete hpa frontend "${kube_flags[@]}"
  1438    # autoscale 2~3 pods, no CPU utilization specified, rc specified by name
  1439    kubectl autoscale rc frontend "${kube_flags[@]}" --min=2 --max=3
  1440    kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
  1441    kubectl delete hpa frontend "${kube_flags[@]}"
  1442    # autoscale without specifying --max should fail
  1443    ! kubectl autoscale rc frontend "${kube_flags[@]}" || exit 1
  1444    # Clean up
  1445    kubectl delete rc frontend "${kube_flags[@]}"
  1446  
  1447    ## Set resource limits/request of a deployment
  1448    # Pre-condition: no deployment exists
  1449    kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
  1450    # Set resources of a local file without talking to the server
  1451    kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --local -o yaml "${kube_flags[@]}"
  1452    ! kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --dry-run=client -o yaml "${kube_flags[@]}" || exit 1
  1453    # Create a deployment
  1454    kubectl create -f hack/testdata/deployment-multicontainer-resources.yaml "${kube_flags[@]}"
  1455    kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment-resources:'
  1456    kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
  1457    kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
  1458    # Set the deployment's cpu limits
  1459    kubectl set resources deployment nginx-deployment-resources --limits=cpu=100m "${kube_flags[@]}"
  1460    kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "100m:"
  1461    kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "100m:"
  1462    # Set a non-existing container should fail
  1463    ! kubectl set resources deployment nginx-deployment-resources -c=redis --limits=cpu=100m || exit 1
  1464    # Set the limit of a specific container in deployment
  1465    kubectl set resources deployment nginx-deployment-resources -c=nginx --limits=cpu=200m "${kube_flags[@]}"
  1466    kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
  1467    kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "100m:"
  1468    # Set limits/requests of a deployment specified by a file
  1469    kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m "${kube_flags[@]}"
  1470    kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
  1471    kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "300m:"
  1472    kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"
  1473    # Show dry-run works on running deployments
  1474    kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --dry-run=client -o yaml "${kube_flags[@]}"
  1475    kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --dry-run=server -o yaml "${kube_flags[@]}"
  1476    ! kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --local -o yaml "${kube_flags[@]}" || exit 1
  1477    kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
  1478    kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "300m:"
  1479    kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"
  1480    # Clean up
  1481    kubectl delete deployment nginx-deployment-resources "${kube_flags[@]}"
  1482  
  1483    set +o nounset
  1484    set +o errexit
  1485  }
  1486  
  1487  run_namespace_tests() {
  1488    set -o nounset
  1489    set -o errexit
  1490  
  1491    kube::log::status "Testing kubectl(v1:namespaces)"
  1492    ### Create a new namespace
  1493    # Pre-condition: test namespace does not exist
  1494    output_message=$(! kubectl get ns/my-namespace 2>&1 "${kube_flags[@]}")
  1495    kube::test::if_has_string "${output_message}" ' not found'
  1496    # Dry-run command
  1497    kubectl create namespace my-namespace --dry-run=client
  1498    kubectl create namespace my-namespace --dry-run=server
  1499    output_message=$(! kubectl get ns/my-namespace 2>&1 "${kube_flags[@]}")
  1500    kube::test::if_has_string "${output_message}" ' not found'
  1501    # Command
  1502    kubectl create namespace my-namespace
  1503    # Post-condition: namespace 'my-namespace' is created.
  1504    kube::test::get_object_assert 'namespaces/my-namespace' "{{$id_field}}" 'my-namespace'
  1505    # Describe command should respect the chunk size parameter
  1506    kube::test::describe_resource_chunk_size_assert namespaces resourcequotas,limitranges
  1507    # Clean up
  1508    kubectl delete namespace my-namespace --wait=false
  1509    # make sure that wait properly waits for finalization
  1510    kubectl wait --for=delete ns/my-namespace
  1511    output_message=$(! kubectl get ns/my-namespace 2>&1 "${kube_flags[@]}")
  1512    kube::test::if_has_string "${output_message}" ' not found'
  1513  
  1514    kubectl create namespace my-namespace
  1515    kube::test::get_object_assert 'namespaces/my-namespace' "{{$id_field}}" 'my-namespace'
  1516    output_message=$(! kubectl delete namespace -n my-namespace --all 2>&1 "${kube_flags[@]}")
  1517    kube::test::if_has_string "${output_message}" 'Warning: deleting cluster-scoped resources'
  1518    kube::test::if_has_string "${output_message}" 'namespace "my-namespace" deleted'
  1519  
  1520    ### Quota
  1521    kubectl create namespace quotas
  1522    kube::test::get_object_assert 'namespaces/quotas' "{{$id_field}}" 'quotas'
  1523    kube::test::get_object_assert 'quota --namespace=quotas' "{{range.items}}{{ if eq $id_field \"test-quota\" }}found{{end}}{{end}}:" ':'
  1524    # Dry-run command
  1525    kubectl create quota test-quota --dry-run=client --namespace=quotas
  1526    kubectl create quota test-quota --dry-run=server --namespace=quotas
  1527    kube::test::get_object_assert 'quota --namespace=quotas' "{{range.items}}{{ if eq $id_field \"test-quota\" }}found{{end}}{{end}}:" ':'
  1528    # Command
  1529    kubectl create quota test-quota --namespace=quotas
  1530    kube::test::get_object_assert 'quota --namespace=quotas' "{{range.items}}{{ if eq $id_field \"test-quota\" }}found{{end}}{{end}}:" 'found:'
  1531    # Describe command should respect the chunk size parameter
  1532    kube::test::describe_resource_chunk_size_assert resourcequotas "" "--namespace=quotas"
  1533    # Clean up
  1534    kubectl delete quota test-quota --namespace=quotas
  1535    kubectl delete namespace quotas
  1536  
  1537    ######################
  1538    # Pods in Namespaces #
  1539    ######################
  1540  
  1541    if kube::test::if_supports_resource "${pods:?}" ; then
  1542      ### Create a new namespace
  1543      # Pre-condition: the other namespace does not exist
  1544      kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq $id_field \"other\" }}found{{end}}{{end}}:" ':'
  1545      # Command
  1546      kubectl create namespace other
  1547      # Post-condition: namespace 'other' is created.
  1548      kube::test::get_object_assert 'namespaces/other' "{{$id_field}}" 'other'
  1549  
  1550      ### Create POD valid-pod in specific namespace
  1551      # Pre-condition: no POD exists
  1552      kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
  1553      # Command
  1554      kubectl create "${kube_flags[@]}" --namespace=other -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
  1555      # Post-condition: valid-pod POD is created
  1556      kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  1557      # Post-condition: verify shorthand `-n other` has the same results as `--namespace=other`
  1558      kube::test::get_object_assert 'pods -n other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  1559      # Post-condition: a resource cannot be retrieved by name across all namespaces
  1560      output_message=$(! kubectl get "${kube_flags[@]}" pod valid-pod --all-namespaces 2>&1)
  1561      kube::test::if_has_string "${output_message}" "a resource cannot be retrieved by name across all namespaces"
  1562  
  1563      ### Delete POD valid-pod in specific namespace
  1564      # Pre-condition: valid-pod POD exists
  1565      kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  1566      # Command
  1567      kubectl delete "${kube_flags[@]}" pod --namespace=other valid-pod --grace-period=0 --force
  1568      # Post-condition: valid-pod POD doesn't exist
  1569      kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
  1570      # Clean up
  1571      kubectl delete namespace other
  1572    fi
  1573  
  1574    set +o nounset
  1575    set +o errexit
  1576  }
  1577  
  1578  run_nodes_tests() {
  1579    set -o nounset
  1580    set -o errexit
  1581  
  1582    kube::log::status "Testing kubectl(v1:nodes)"
  1583  
  1584    kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
  1585  
  1586    kube::test::describe_object_assert nodes "127.0.0.1" "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:"
  1587    # Describe command should print events information by default
  1588    kube::test::describe_object_events_assert nodes "127.0.0.1"
  1589    # Describe command should not print events information when show-events=false
  1590    kube::test::describe_object_events_assert nodes "127.0.0.1" false
  1591    # Describe command should print events information when show-events=true
  1592    kube::test::describe_object_events_assert nodes "127.0.0.1" true
  1593    # Describe command (resource only) should print detailed information
  1594    kube::test::describe_resource_assert nodes "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:"
  1595    # Describe command should print events information by default
  1596    kube::test::describe_resource_events_assert nodes
  1597    # Describe command should not print events information when show-events=false
  1598    kube::test::describe_resource_events_assert nodes false
  1599    # Describe command should print events information when show-events=true
  1600    kube::test::describe_resource_events_assert nodes true
  1601    # Describe command should respect the chunk size parameter
  1602    kube::test::describe_resource_chunk_size_assert nodes pods,events
  1603  
  1604    ### kubectl patch update can mark node unschedulable
  1605    # Pre-condition: node is schedulable
  1606    kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  1607    kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":true}}'
  1608    # Post-condition: node is unschedulable
  1609    kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
  1610    kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":null}}'
  1611    # Post-condition: node is schedulable
  1612    kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  1613  
  1614    # check webhook token authentication endpoint, kubectl doesn't actually display the returned object so this isn't super useful
  1615    # but it proves that works
  1616    kubectl create -f test/fixtures/pkg/kubectl/cmd/create/tokenreview-v1.json --validate=false
  1617  
  1618    set +o nounset
  1619    set +o errexit
  1620  }
  1621  
  1622  run_pod_templates_tests() {
  1623    set -o nounset
  1624    set -o errexit
  1625  
  1626    create_and_use_new_namespace
  1627    kube::log::status "Testing pod templates"
  1628  
  1629    ### Create PODTEMPLATE
  1630    # Pre-condition: no PODTEMPLATE
  1631    kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" ''
  1632    # Command
  1633    kubectl create -f test/fixtures/doc-yaml/user-guide/walkthrough/podtemplate.json "${kube_flags[@]}"
  1634    # Post-condition: nginx PODTEMPLATE is available
  1635    kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
  1636  
  1637    ### Printing pod templates works
  1638    kubectl get podtemplates "${kube_flags[@]}"
  1639    grep -q nginx <<< "$(kubectl get podtemplates -o yaml "${kube_flags[@]}")"
  1640  
  1641    ### Delete nginx pod template by name
  1642    # Pre-condition: nginx pod template is available
  1643    kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
  1644    # Describe command should respect the chunk size parameter
  1645    kube::test::describe_resource_chunk_size_assert podtemplates events
  1646    # Command
  1647    kubectl delete podtemplate nginx "${kube_flags[@]}"
  1648    # Post-condition: No templates exist
  1649    kube::test::get_object_assert podtemplate "{{range.items}}{{.metadata.name}}:{{end}}" ''
  1650  
  1651    set +o nounset
  1652    set +o errexit
  1653  }