github.com/containers/podman/v5@v5.1.0-rc1/test/system/700-play.bats (about)

     1  #!/usr/bin/env bats   -*- bats -*-
     2  #
     3  # Test podman play
     4  #
     5  
     6  load helpers
     7  load helpers.network
     8  load helpers.registry
     9  
    10  # This is a long ugly way to clean up pods and remove the pause image
    11  function teardown() {
    12      run_podman pod rm -t 0 -f -a
    13      run_podman rm -t 0 -f -a
    14      run_podman image list --format '{{.ID}} {{.Repository}}'
    15      while read id name; do
    16          if [[ "$name" =~ /podman-pause ]]; then
    17              run_podman rmi $id
    18          fi
    19      done <<<"$output"
    20  
    21      basic_teardown
    22  }
    23  
    24  testYaml="
    25  apiVersion: v1
    26  kind: Pod
    27  metadata:
    28    labels:
    29      app: test
    30    name: test_pod
    31  spec:
    32    containers:
    33    - command:
    34      - sleep
    35      - \"100\"
    36      env:
    37      - name: PATH
    38        value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
    39      - name: TERM
    40        value: xterm
    41      - name: container
    42        value: podman
    43      image: $IMAGE
    44      name: test
    45      resources: {}
    46      securityContext:
    47        runAsUser: 1000
    48        runAsGroup: 3000
    49        fsGroup: 2000
    50        allowPrivilegeEscalation: true
    51        capabilities: {}
    52        privileged: false
    53        seLinuxOptions:
    54           level: \"s0:c1,c2\"
    55        readOnlyRootFilesystem: false
    56      volumeMounts:
    57      - mountPath: /testdir:z
    58        name: home-podman-testdir
    59      workingDir: /
    60    volumes:
    61    - hostPath:
    62        path: TESTDIR
    63        type: Directory
    64      name: home-podman-testdir
    65  status: {}
    66  "
    67  
    68  RELABEL="system_u:object_r:container_file_t:s0"
    69  
    70  @test "podman kube with stdin" {
    71      TESTDIR=$PODMAN_TMPDIR/testdir
    72      mkdir -p $TESTDIR
    73      echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
    74  
    75      run_podman kube play - < $PODMAN_TMPDIR/test.yaml
    76      if selinux_enabled; then
    77         run ls -Zd $TESTDIR
    78         is "$output" "${RELABEL} $TESTDIR" "selinux relabel should have happened"
    79      fi
    80  
    81      # Make sure that the K8s pause image isn't pulled but the local podman-pause is built.
    82      run_podman images
    83      run_podman 1 image exists k8s.gcr.io/pause
    84      run_podman 1 image exists registry.k8s.io/pause
    85      run_podman image exists $(pause_image)
    86  
    87      run_podman stop -a -t 0
    88      run_podman pod rm -t 0 -f test_pod
    89  }
    90  
    91  @test "podman play" {
    92      # Testing that the "podman play" cmd still works now that
    93      # "podman kube" is an option.
    94      TESTDIR=$PODMAN_TMPDIR/testdir
    95      mkdir -p $TESTDIR
    96      echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
    97      run_podman play kube $PODMAN_TMPDIR/test.yaml
    98      if selinux_enabled; then
    99         run ls -Zd $TESTDIR
   100         is "$output" "${RELABEL} $TESTDIR" "selinux relabel should have happened"
   101      fi
   102  
   103      # Now rerun twice to make sure nothing gets removed
   104      run_podman 125 play kube $PODMAN_TMPDIR/test.yaml
   105      is "$output" ".* is in use: pod already exists"
   106      run_podman 125 play kube $PODMAN_TMPDIR/test.yaml
   107      is "$output" ".* is in use: pod already exists"
   108  
   109      run_podman stop -a -t 0
   110      run_podman pod rm -t 0 -f test_pod
   111  }
   112  
   113  # helper function: writes a yaml file with customizable values
   114  function _write_test_yaml() {
   115      local outfile=$PODMAN_TMPDIR/test.yaml
   116  
   117      # Function args must all be of the form 'keyword=value' (value may be null)
   118      local annotations=
   119      local labels="app: test"
   120      local name="test_pod"
   121      local command=""
   122      local image="$IMAGE"
   123      local ctrname="test"
   124      for i;do
   125          # This will error on 'foo=' (no value). That's totally OK.
   126          local value=$(expr "$i" : '[^=]*=\(.*\)')
   127          case "$i" in
   128              annotations=*)   annotations="$value" ;;
   129              labels=*)        labels="$value"      ;;
   130              name=*)          name="$value"        ;;
   131              command=*)       command="$value"     ;;
   132              image=*)         image="$value"       ;;
   133              ctrname=*)       ctrname="$value"     ;;
   134              *)               die "_write_test_yaml: cannot grok '$i'" ;;
   135          esac
   136      done
   137  
   138      # These three header lines are common to all yamls.
   139      # Note: use >> (append), not > (overwrite), for multi-pod test
   140      cat >>$outfile <<EOF
   141  apiVersion: v1
   142  kind: Pod
   143  metadata:
   144  EOF
   145  
   146      if [[ -n "$annotations" ]]; then
   147          echo "  annotations:"   >>$outfile
   148          echo "    $annotations" >>$outfile
   149      fi
   150      if [[ -n "$labels" ]]; then
   151          echo "  labels:"        >>$outfile
   152          echo "    $labels"      >>$outfile
   153      fi
   154      if [[ -n "$name" ]]; then
   155          echo "  name: $name"    >>$outfile
   156      fi
   157  
   158      # We always have spec and container lines...
   159      echo "spec:"                >>$outfile
   160      echo "  containers:"        >>$outfile
   161      # ...but command is optional. If absent, assume our caller will fill it in.
   162      if [[ -n "$command" ]]; then
   163          cat <<EOF               >>$outfile
   164    - command:
   165      - $command
   166      image: $image
   167      name: $ctrname
   168      resources: {}
   169  status: {}
   170  EOF
   171      fi
   172  }
   173  
   174  @test "podman play --service-container" {
   175      skip_if_remote "service containers only work locally"
   176  
   177      # Create the YAMl file
   178      yaml_source="$PODMAN_TMPDIR/test.yaml"
   179      _write_test_yaml command=top
   180  
   181      # Run `play kube` in the background as it will wait for the service
   182      # container to exit.
   183      timeout --foreground -v --kill=10 60 \
   184          $PODMAN play kube --service-container=true --log-driver journald $yaml_source &>/dev/null &
   185  
   186      # Wait for the container to be running
   187      container_a=test_pod-test
   188      container_running=
   189      for i in $(seq 1 20); do
   190          run_podman "?" container wait $container_a --condition="running"
   191          if [[ $status == 0 ]]; then
   192              container_running=1
   193              break
   194          fi
   195          sleep 0.5
   196          # Just for debugging
   197          run_podman ps -a
   198      done
   199      if [[ -z "$container_running" ]]; then
   200          die "container $container_a did not start"
   201      fi
   202  
   203      # The name of the service container is predictable: the first 12 characters
   204      # of the hash of the YAML file followed by the "-service" suffix
   205      yaml_sha=$(sha256sum $yaml_source)
   206      service_container="${yaml_sha:0:12}-service"
   207  
   208      # Make sure that the service container exists and runs.
   209      run_podman container inspect $service_container --format "{{.State.Running}}"
   210      is "$output" "true"
   211  
   212      run_podman container inspect $service_container --format '{{.Config.StopTimeout}}'
   213      is "$output" "10" "StopTimeout should be initialized to 10"
   214  
   215      # Stop the *main* container and make sure that
   216      #  1) The pod transitions to Exited
   217      #  2) The service container is stopped
   218      #  #) The service container is marked as a service container
   219      run_podman stop test_pod-test
   220      _ensure_pod_state test_pod Exited
   221      _ensure_container_running $service_container false
   222      run_podman container inspect $service_container --format "{{.IsService}}"
   223      is "$output" "true"
   224  
   225      # Restart the pod, make sure the service is running again
   226      run_podman pod restart test_pod
   227      run_podman container inspect $service_container --format "{{.State.Running}}"
   228      is "$output" "true"
   229  
   230      # Check for an error when trying to remove the service container
   231      run_podman 125 container rm $service_container
   232      is "$output" "Error: container .* is the service container of pod(s) .* and cannot be removed without removing the pod(s)"
   233      run_podman 125 container rm --force $service_container
   234      is "$output" "Error: container .* is the service container of pod(s) .* and cannot be removed without removing the pod(s)"
   235  
   236      # Kill the pod and make sure the service is not running
   237      run_podman pod kill test_pod
   238      _ensure_container_running $service_container false
   239  
   240      # Remove the pod and make sure the service is removed along with it
   241      run_podman pod rm test_pod
   242      run_podman 1 container exists $service_container
   243  }
   244  
   245  @test "podman kube --network" {
   246      TESTDIR=$PODMAN_TMPDIR/testdir
   247      mkdir -p $TESTDIR
   248      echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
   249  
   250      run_podman kube play --network host $PODMAN_TMPDIR/test.yaml
   251      is "$output" "Pod:.*" "podman kube play should work with --network host"
   252  
   253      run_podman pod inspect --format "{{.InfraConfig.HostNetwork}}" test_pod
   254      is "$output" "true" ".InfraConfig.HostNetwork"
   255      run_podman stop -a -t 0
   256      run_podman pod rm -t 0 -f test_pod
   257  
   258      if has_slirp4netns; then
   259          run_podman kube play --network slirp4netns:port_handler=slirp4netns $PODMAN_TMPDIR/test.yaml
   260          run_podman pod inspect --format {{.InfraContainerID}} "${lines[1]}"
   261          infraID="$output"
   262          run_podman container inspect --format "{{.HostConfig.NetworkMode}}" $infraID
   263          is "$output" "slirp4netns" "network mode slirp4netns is set for the container"
   264      fi
   265  
   266      run_podman stop -a -t 0
   267      run_podman pod rm -t 0 -f test_pod
   268  
   269      run_podman kube play --network none $PODMAN_TMPDIR/test.yaml
   270      run_podman pod inspect --format {{.InfraContainerID}} "${lines[1]}"
   271      infraID="$output"
   272      run_podman container inspect --format "{{.HostConfig.NetworkMode}}" $infraID
   273      is "$output" "none" "network mode none is set for the container"
   274  
   275      run_podman kube down $PODMAN_TMPDIR/test.yaml
   276      run_podman 125 inspect test_pod-test
   277      is "$output" ".*Error: no such object: \"test_pod-test\""
   278      run_podman pod rm -a
   279      run_podman rm -a
   280  }
   281  
   282  @test "podman kube play read-only" {
   283      YAML=$PODMAN_TMPDIR/test.yml
   284  
   285      # --restart=no is crucial: without that, the "podman wait" below
   286      # will spin for indeterminate time.
   287      run_podman create --pod new:pod1         --restart=no --name test1 $IMAGE touch /testrw
   288      run_podman create --pod pod1 --read-only --restart=no --name test2 $IMAGE touch /testro
   289      run_podman create --pod pod1 --read-only --restart=no --name test3 $IMAGE sh -c "echo "#!echo hi" > /tmp/testtmp; chmod +x /tmp/test/tmp; /tmp/testtmp"
   290  
   291      # Generate and run from yaml. (The "cat" is for debugging failures)
   292      run_podman kube generate pod1 -f $YAML
   293      cat $YAML
   294      run_podman kube play --replace $YAML
   295  
   296      # Wait for all containers and check their exit statuses
   297      run_podman wait pod1-test1 pod1-test2 pod1-test3
   298      is "${lines[0]}" 0 "exit status: touch /file on read/write container"
   299      is "${lines[1]}" 1 "exit status: touch /file on read-only container"
   300      is "${lines[2]}" 0 "exit status: touch on /tmp is always ok, even on read-only container"
   301  
   302      # Confirm config settings
   303      run_podman container inspect --format '{{.HostConfig.ReadonlyRootfs}}' pod1-test1 pod1-test2 pod1-test3
   304      is "${lines[0]}" "false"  "ReadonlyRootfs - container 1"
   305      is "${lines[1]}" "true"   "ReadonlyRootfs - container 2"
   306      is "${lines[2]}" "true"   "ReadonlyRootfs - container 3"
   307  
   308      # Clean up
   309      run_podman kube down - < $YAML
   310      run_podman 1 container exists pod1-test1
   311      run_podman 1 container exists pod1-test2
   312      run_podman 1 container exists pod1-test3
   313  }
   314  
   315  @test "podman kube play read-only from containers.conf" {
   316      containersconf=$PODMAN_TMPDIR/containers.conf
   317      cat >$containersconf <<EOF
   318  [containers]
   319  read_only=true
   320  EOF
   321  
   322      YAML=$PODMAN_TMPDIR/test.yml
   323  
   324      # --restart=no is crucial: without that, the "podman wait" below
   325      # will spin for indeterminate time.
   326      CONTAINERS_CONF_OVERRIDE="$containersconf" run_podman create --pod new:pod1 --read-only=false --restart=no --name test1 $IMAGE touch /testrw
   327      CONTAINERS_CONF_OVERRIDE="$containersconf" run_podman create --pod pod1                       --restart=no --name test2 $IMAGE touch /testro
   328      CONTAINERS_CONF_OVERRIDE="$containersconf" run_podman create --pod pod1                       --restart=no --name test3 $IMAGE touch /tmp/testtmp
   329  
   330      # Inspect settings in created containers
   331      CONTAINERS_CONF_OVERRIDE="$containersconf" run_podman container inspect --format '{{.HostConfig.ReadonlyRootfs}}' test1 test2 test3
   332      is "${lines[0]}" "false"  "ReadonlyRootfs - container 1, created"
   333      is "${lines[1]}" "true"   "ReadonlyRootfs - container 2, created"
   334      is "${lines[2]}" "true"   "ReadonlyRootfs - container 3, created"
   335  
   336      # Now generate and run kube.yaml on a machine without the defaults set
   337      CONTAINERS_CONF_OVERRIDE="$containersconf" run_podman kube generate pod1 -f $YAML
   338      cat $YAML
   339  
   340      run_podman kube play --replace $YAML
   341  
   342      # Wait for all containers and check their exit statuses
   343      run_podman wait pod1-test1 pod1-test2 pod1-test3
   344      is "${lines[0]}" 0 "exit status: touch /file on read/write container"
   345      is "${lines[1]}" 1 "exit status: touch /file on read-only container"
   346      is "${lines[2]}" 0 "exit status: touch on /tmp is always ok, even on read-only container"
   347  
   348      # Confirm settings again
   349      run_podman container inspect --format '{{.HostConfig.ReadonlyRootfs}}' pod1-test1 pod1-test2 pod1-test3
   350      is "${lines[0]}" "false"  "ReadonlyRootfs - container 1, post-run"
   351      is "${lines[1]}" "true"   "ReadonlyRootfs - container 2, post-run"
   352      is "${lines[2]}" "true"   "ReadonlyRootfs - container 3, post-run"
   353  
   354      # Clean up
   355      run_podman kube down - < $YAML
   356      run_podman 1 container exists pod1-test1
   357      run_podman 1 container exists pod1-test2
   358      run_podman 1 container exists pod1-test3
   359  }
   360  
   361  @test "podman play with user from image" {
   362      TESTDIR=$PODMAN_TMPDIR/testdir
   363      mkdir -p $TESTDIR
   364  
   365      _write_test_yaml command=id image=userimage
   366  
   367  cat > $PODMAN_TMPDIR/Containerfile << _EOF
   368  from $IMAGE
   369  USER bin
   370  _EOF
   371  
   372      # Unset the PATH during build and make sure that all default env variables
   373      # are correctly set for the created container.
   374      run_podman build --unsetenv PATH -t userimage $PODMAN_TMPDIR
   375      run_podman image inspect userimage --format "{{.Config.Env}}"
   376      is "$output" "\[\]" "image does not set PATH - env is empty"
   377  
   378      run_podman play kube --start=false $PODMAN_TMPDIR/test.yaml
   379      run_podman inspect --format "{{ .Config.User }}" test_pod-test
   380      is "$output" bin "expect container within pod to run as the bin user"
   381      run_podman inspect --format "{{ .Config.Env }}" test_pod-test
   382      is "$output" ".*PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin.*" "expect PATH to be set"
   383      is "$output" ".*container=podman.*" "expect container to be set"
   384  
   385      run_podman stop -a -t 0
   386      run_podman pod rm -t 0 -f test_pod
   387      run_podman rmi -f userimage:latest
   388  }
   389  
   390  @test "podman play --build --context-dir" {
   391      skip_if_remote "--build is not supported in context remote"
   392  
   393      mkdir -p $PODMAN_TMPDIR/userimage
   394      cat > $PODMAN_TMPDIR/userimage/Containerfile << _EOF
   395  from $IMAGE
   396  USER bin
   397  _EOF
   398  
   399      _write_test_yaml command=id image=quay.io/libpod/userimage
   400      run_podman 125 play kube --build --start=false $PODMAN_TMPDIR/test.yaml
   401      run_podman play kube --replace --context-dir=$PODMAN_TMPDIR --build --start=false $PODMAN_TMPDIR/test.yaml
   402      run_podman inspect --format "{{ .Config.User }}" test_pod-test
   403      is "$output" bin "expect container within pod to run as the bin user"
   404  
   405      run_podman stop -a -t 0
   406      run_podman pod rm -t 0 -f test_pod
   407      run_podman rmi -f userimage:latest
   408  
   409      cd $PODMAN_TMPDIR
   410      run_podman play kube --replace --build --start=false $PODMAN_TMPDIR/test.yaml
   411      run_podman inspect --format "{{ .Config.User }}" test_pod-test
   412      is "$output" bin "expect container within pod to run as the bin user"
   413  
   414      run_podman stop -a -t 0
   415      run_podman pod rm -t 0 -f test_pod
   416      run_podman rmi -f userimage:latest
   417  }
   418  
   419  # Occasionally a remnant storage container is left behind which causes
   420  # podman play kube --replace to fail. This tests created a conflicting
   421  # storage container name using buildah to make sure --replace, still
   422  # functions proplery by removing the storage container.
   423  @test "podman kube play --replace external storage" {
   424      TESTDIR=$PODMAN_TMPDIR/testdir
   425      mkdir -p $TESTDIR
   426      echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
   427      run_podman play kube $PODMAN_TMPDIR/test.yaml
   428      # Force removal of container
   429      run_podman rm --force -t0 test_pod-test
   430      # Create external container using buildah with same name
   431      buildah from --name test_pod-test $IMAGE
   432      # --replace deletes the buildah container and replace it with new one
   433      run_podman play kube --replace $PODMAN_TMPDIR/test.yaml
   434  
   435      run_podman stop -a -t 0
   436      run_podman pod rm -t 0 -f test_pod
   437      run_podman rmi -f userimage:latest
   438  }
   439  
   440  @test "podman kube --annotation" {
   441      TESTDIR=$PODMAN_TMPDIR/testdir
   442      RANDOMSTRING=$(random_string 15)
   443      ANNOTATION_WITH_COMMA="comma,$(random_string 5)"
   444      mkdir -p $TESTDIR
   445      echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
   446      run_podman kube play --annotation "name=$RANDOMSTRING"  \
   447          --annotation "anno=$ANNOTATION_WITH_COMMA" $PODMAN_TMPDIR/test.yaml
   448      run_podman inspect --format "{{ .Config.Annotations }}" test_pod-test
   449      is "$output" ".*name:$RANDOMSTRING" "Annotation should be added to pod"
   450      is "$output" ".*anno:$ANNOTATION_WITH_COMMA" "Annotation with comma should be added to pod"
   451  
   452      # invalid annotation
   453      run_podman 125 kube play --annotation "val" $PODMAN_TMPDIR/test.yaml
   454      assert "$output" == "Error: annotation \"val\" must include an '=' sign" "invalid annotation error"
   455  
   456      run_podman stop -a -t 0
   457      run_podman pod rm -t 0 -f test_pod
   458  }
   459  
   460  @test "podman play Yaml deprecated --no-trunc annotation" {
   461     RANDOMSTRING=$(random_string 65)
   462  
   463     _write_test_yaml "annotations=test: ${RANDOMSTRING}" command=id
   464     run_podman play kube --no-trunc - < $PODMAN_TMPDIR/test.yaml
   465  }
   466  
   467  @test "podman kube play - default log driver" {
   468      TESTDIR=$PODMAN_TMPDIR/testdir
   469      mkdir -p $TESTDIR
   470      echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
   471      # Get the default log driver
   472      run_podman info --format "{{.Host.LogDriver}}"
   473      default_driver=$output
   474  
   475      # Make sure that the default log driver is used
   476      run_podman kube play $PODMAN_TMPDIR/test.yaml
   477      run_podman inspect --format "{{.HostConfig.LogConfig.Type}}" test_pod-test
   478      is "$output" "$default_driver" "play kube uses default log driver"
   479  
   480      run_podman kube down $PODMAN_TMPDIR/test.yaml
   481      run_podman 125 inspect test_pod-test
   482      is "$output" ".*Error: no such object: \"test_pod-test\""
   483      run_podman pod rm -a
   484      run_podman rm -a
   485  }
   486  
   487  @test "podman kube play - URL" {
   488      TESTDIR=$PODMAN_TMPDIR/testdir
   489      mkdir -p $TESTDIR
   490      echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
   491      echo READY                                      > $PODMAN_TMPDIR/ready
   492  
   493      HOST_PORT=$(random_free_port)
   494      SERVER=http://127.0.0.1:$HOST_PORT
   495  
   496      run_podman run -d --name myyaml -p "$HOST_PORT:80" \
   497                 -v $PODMAN_TMPDIR/test.yaml:/var/www/testpod.yaml:Z \
   498                 -v $PODMAN_TMPDIR/ready:/var/www/ready:Z \
   499                 -w /var/www \
   500                 $IMAGE /bin/busybox-extras httpd -f -p 80
   501  
   502      wait_for_port 127.0.0.1 $HOST_PORT
   503      wait_for_command_output "curl -s -S $SERVER/ready" "READY"
   504  
   505      run_podman kube play $SERVER/testpod.yaml
   506      run_podman inspect test_pod-test --format "{{.State.Running}}"
   507      is "$output" "true"
   508      run_podman kube down $SERVER/testpod.yaml
   509      run_podman 125 inspect test_pod-test
   510      is "$output" ".*Error: no such object: \"test_pod-test\""
   511  
   512      run_podman pod rm -a -f
   513      run_podman rm -a -f -t0
   514  }
   515  
   516  @test "podman play with init container" {
   517      _write_test_yaml command=
   518      cat >>$PODMAN_TMPDIR/test.yaml <<EOF
   519    - command:
   520      - ls
   521      - /dev/shm/test1
   522      image: $IMAGE
   523      name: testCtr
   524    initContainers:
   525    - command:
   526      - touch
   527      - /dev/shm/test1
   528      image: $IMAGE
   529      name: initCtr
   530  EOF
   531  
   532      run_podman kube play $PODMAN_TMPDIR/test.yaml
   533      assert "$output" !~ "level=" "init containers should not generate logrus.Error"
   534      run_podman inspect --format "{{.State.ExitCode}}" test_pod-testCtr
   535      is "$output" "0" "init container should have created /dev/shm/test1"
   536  
   537      run_podman kube down $PODMAN_TMPDIR/test.yaml
   538  }
   539  
   540  @test "podman kube play - hostport" {
   541      HOST_PORT=$(random_free_port)
   542      _write_test_yaml
   543      cat >>$PODMAN_TMPDIR/test.yaml <<EOF
   544      - name: server
   545        image: $IMAGE
   546        ports:
   547          - name: hostp
   548            hostPort: $HOST_PORT
   549  EOF
   550  
   551      run_podman kube play $PODMAN_TMPDIR/test.yaml
   552      run_podman pod inspect test_pod --format "{{.InfraConfig.PortBindings}}"
   553      assert "$output" = "map[$HOST_PORT/tcp:[{0.0.0.0 $HOST_PORT}]]"
   554      run_podman kube down $PODMAN_TMPDIR/test.yaml
   555  
   556      run_podman pod rm -a -f
   557      run_podman rm -a -f
   558  }
   559  
   560  @test "podman kube play - multi-pod YAML" {
   561      skip_if_remote "service containers only work locally"
   562      skip_if_journald_unavailable
   563  
   564      # Create the YAMl file, with two pods, each with one container
   565      yaml_source="$PODMAN_TMPDIR/test.yaml"
   566      for n in 1 2;do
   567          _write_test_yaml labels="app: pod$n" name="pod$n" ctrname="ctr$n" command=top
   568  
   569          # Separator between two yaml halves
   570          if [[ $n = 1 ]]; then
   571              echo "---" >>$yaml_source
   572          fi
   573      done
   574  
   575      # Run `play kube` in the background as it will wait for the service
   576      # container to exit.
   577      timeout --foreground -v --kill=10 60 \
   578          $PODMAN play kube --service-container=true --log-driver journald $yaml_source &>/dev/null &
   579  
   580      # The name of the service container is predictable: the first 12 characters
   581      # of the hash of the YAML file followed by the "-service" suffix
   582      yaml_sha=$(sha256sum $yaml_source)
   583      service_container="${yaml_sha:0:12}-service"
   584      # Wait for the containers to be running
   585      container_1=pod1-ctr1
   586      container_2=pod2-ctr2
   587      containers_running=
   588      for i in $(seq 1 20); do
   589          run_podman "?" container wait $container_1 $container_2 $service_container --condition="running"
   590          if [[ $status == 0 ]]; then
   591              containers_running=1
   592              break
   593          fi
   594          sleep 0.5
   595          # Just for debugging
   596          run_podman ps -a
   597      done
   598      if [[ -z "$containers_running" ]]; then
   599          die "container $container_1, $container_2 and/or $service_container did not start"
   600      fi
   601  
   602      # Stop the pods, make sure that no ugly error logs show up and that the
   603      # service container will implicitly get stopped as well
   604      run_podman pod stop pod1 pod2
   605      assert "$output" !~ "Stopping"
   606      _ensure_container_running $service_container false
   607  
   608      run_podman kube down $yaml_source
   609  }
   610  
   611  @test "podman kube generate filetype" {
   612      YAML=$PODMAN_TMPDIR/test.yml
   613      run_podman create --pod new:pod1 --security-opt label=level:s0:c1,c2 --security-opt label=filetype:usr_t -v myvol:/myvol --name test1 $IMAGE true
   614      run_podman kube generate pod1 -f $YAML
   615      run cat $YAML
   616      is "$output" ".*filetype: usr_t" "Generated YAML file should contain filetype usr_t"
   617      run_podman pod rm --force pod1
   618      run_podman volume rm -t -1 myvol --force
   619  
   620      run_podman kube play $YAML
   621      if selinux_enabled; then
   622          run_podman inspect pod1-test1 --format "{{ .MountLabel }}"
   623          is "$output" "system_u:object_r:usr_t:s0:c1,c2" "Generated container should use filetype usr_t"
   624          run_podman volume inspect myvol --format '{{ .Mountpoint }}'
   625          path=${output}
   626          run ls -Zd $path
   627          is "$output" "system_u:object_r:usr_t:s0 $path" "volume should be labeled with usr_t type"
   628      fi
   629      run_podman kube down $YAML
   630      run_podman volume rm myvol --force
   631  }
   632  
   633  # kube play --wait=true, where we clear up the created containers, pods, and volumes when a kill or sigterm is triggered
   634  @test "podman kube play --wait with siginterrupt" {
   635      cname=c$(random_string 15)
   636      fname="/tmp/play_kube_wait_$(random_string 6).yaml"
   637      run_podman container create --name $cname $IMAGE top
   638      run_podman kube generate -f $fname $cname
   639  
   640      # delete the container we generated from
   641      run_podman rm -f $cname
   642  
   643      # force a timeout to happen so that the kube play command is killed
   644      # and expect the timeout code 124 to happen so that we can clean up
   645      local t0=$SECONDS
   646      PODMAN_TIMEOUT=15 run_podman 124 kube play --wait $fname
   647      local t1=$SECONDS
   648      local delta_t=$((t1 - t0))
   649      assert $delta_t -le 20 \
   650             "podman kube play did not get killed within 10 seconds"
   651  
   652      # there should be no containers running or created
   653      run_podman ps -aq
   654      is "$output" "" "There should be no containers"
   655      run_podman rmi $(pause_image)
   656  }
   657  
   658  @test "podman kube play --wait - wait for pod to exit" {
   659      fname="/tmp/play_kube_wait_$(random_string 6).yaml"
   660      echo "
   661  apiVersion: v1
   662  kind: Pod
   663  metadata:
   664    labels:
   665      app: test
   666    name: test_pod
   667  spec:
   668    restartPolicy: Never
   669    containers:
   670      - name: server
   671        image: $IMAGE
   672        command:
   673        - echo
   674        - "hello"
   675  " > $fname
   676  
   677      run_podman kube play --wait $fname
   678  
   679      # debug to see what container is being left behind after the cleanup
   680      # there should be no containers running or created
   681      run_podman ps -a --noheading
   682      is "$output" "" "There should be no containers"
   683      run_podman pod ps
   684      run_podman rmi $(pause_image)
   685  }
   686  
   687  @test "podman kube play with configmaps" {
   688      configmap_file=${PODMAN_TMPDIR}/play_kube_configmap_configmaps$(random_string 6),withcomma.yaml
   689      echo "
   690  ---
   691  apiVersion: v1
   692  kind: ConfigMap
   693  metadata:
   694    name: foo
   695  data:
   696    value: foo
   697  ---
   698  apiVersion: v1
   699  kind: ConfigMap
   700  metadata:
   701    name: bar
   702  data:
   703    value: bar
   704  " > $configmap_file
   705  
   706      pod_file=${PODMAN_TMPDIR}/play_kube_configmap_pod$(random_string 6).yaml
   707      echo "
   708  apiVersion: v1
   709  kind: Pod
   710  metadata:
   711    labels:
   712      app: test
   713    name: test_pod
   714  spec:
   715    restartPolicy: Never
   716    containers:
   717    - name: server
   718      image: $IMAGE
   719      env:
   720      - name: FOO
   721        valueFrom:
   722          configMapKeyRef:
   723            name: foo
   724            key: value
   725      - name: BAR
   726        valueFrom:
   727          configMapKeyRef:
   728            name: bar
   729            key: value
   730      command:
   731      - /bin/sh
   732      args:
   733      - -c
   734      - "echo \$FOO:\$BAR"
   735  " > $pod_file
   736  
   737      run_podman kube play --configmap=$configmap_file $pod_file
   738      run_podman wait test_pod-server
   739  
   740      # systemd logs are unreliable; we may need to retry a few times
   741      # https://github.com/systemd/systemd/issues/28650
   742      local retries=10
   743      while [[ $retries -gt 0 ]]; do
   744          run_podman logs test_pod-server
   745          test -n "$output" && break
   746          sleep 0.1
   747          retries=$((retries - 1))
   748      done
   749      assert "$retries" -gt 0 "Timed out waiting for podman logs"
   750      assert "$output" = "foo:bar" "output from podman logs"
   751  
   752      run_podman kube down $pod_file
   753  }
   754  
   755  @test "podman kube with --authfile=/tmp/bogus" {
   756      TESTDIR=$PODMAN_TMPDIR/testdir
   757      mkdir -p $TESTDIR
   758      echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
   759      bogus=$PODMAN_TMPDIR/bogus-authfile
   760  
   761      run_podman 125 kube play --authfile=$bogus - < $PODMAN_TMPDIR/test.yaml
   762      is "$output" "Error: credential file is not accessible: faccessat $bogus: no such file or directory" \
   763             "$command should fail with not such file"
   764  }
   765  
   766  @test "podman kube play with umask from containers.conf" {
   767      skip_if_remote "remote does not support CONTAINERS_CONF*"
   768      YAML=$PODMAN_TMPDIR/test.yaml
   769  
   770      containersConf=$PODMAN_TMPDIR/containers.conf
   771      touch $containersConf
   772      cat >$containersConf <<EOF
   773  [containers]
   774  umask = "0472"
   775  EOF
   776  
   777      ctr="ctr"
   778      ctrInPod="ctr-pod-ctr"
   779  
   780      run_podman create --restart never --name $ctr $IMAGE sh -c "touch /umask-test;stat -c '%a' /umask-test"
   781      run_podman kube generate -f $YAML $ctr
   782      CONTAINERS_CONF_OVERRIDE="$containersConf" run_podman kube play $YAML
   783      run_podman container inspect --format '{{ .Config.Umask }}' $ctrInPod
   784      is "${output}" "0472"
   785      # Confirm that umask actually takes effect. Might take a second or so.
   786      local retries=10
   787      while [[ $retries -gt 0 ]]; do
   788          run_podman logs $ctrInPod
   789          test -n "$output" && break
   790          sleep 0.1
   791          retries=$((retries - 1))
   792      done
   793      assert "$retries" -gt 0 "Timed out waiting for container output"
   794      assert "$output" = "204" "stat() on created file"
   795  
   796      run_podman kube down $YAML
   797      run_podman pod rm -a
   798      run_podman rm -a
   799  }
   800  
   801  @test "podman kube generate tmpfs on /tmp" {
   802        KUBE=$PODMAN_TMPDIR/kube.yaml
   803        run_podman create --name test $IMAGE sleep 100
   804        run_podman kube generate test -f $KUBE
   805        run_podman kube play $KUBE
   806        run_podman exec test-pod-test sh -c "mount | grep /tmp"
   807        assert "$output" !~ "noexec" "mounts on /tmp should not be noexec"
   808        run_podman kube down $KUBE
   809        run_podman pod rm -a -f -t 0
   810        run_podman rm -a -f -t 0
   811  }
   812  
   813  @test "podman kube play - pull policy" {
   814      skip_if_remote "pull debug logs only work locally"
   815  
   816      yaml_source="$PODMAN_TMPDIR/test.yaml"
   817      _write_test_yaml command=true
   818  
   819      # Exploit a debug message to make sure the expected pull policy is used
   820      run_podman --debug kube play $yaml_source
   821      assert "$output" =~ "Pulling image $IMAGE \(policy\: missing\)" "default pull policy is missing"
   822      run_podman kube down $yaml_source
   823  
   824      local_image="localhost/name:latest"
   825      run_podman tag $IMAGE $local_image
   826      rm $yaml_source
   827      _write_test_yaml command=true image=$local_image
   828  
   829      run_podman --debug kube play $yaml_source
   830      assert "$output" =~ "Pulling image $local_image \(policy\: newer\)" "pull policy is set to newhen pulling latest tag"
   831      run_podman kube down $yaml_source
   832  
   833      run_podman rmi $local_image
   834  }
   835  
   836  @test "podman kube play healthcheck should wait initialDelaySeconds before updating status (healthy)" {
   837      fname="$PODMAN_TMPDIR/play_kube_healthy_$(random_string 6).yaml"
   838      echo "
   839  apiVersion: v1
   840  kind: Pod
   841  metadata:
   842    labels:
   843    name: liveness-exec
   844  spec:
   845    containers:
   846    - name: liveness
   847      image: $IMAGE
   848      args:
   849      - /bin/sh
   850      - -c
   851      - touch /tmp/healthy && sleep 100
   852      livenessProbe:
   853        exec:
   854          command:
   855          - cat
   856          - /tmp/healthy
   857        initialDelaySeconds: 3
   858        failureThreshold: 1
   859        periodSeconds: 1
   860  " > $fname
   861  
   862      run_podman kube play $fname
   863      ctrName="liveness-exec-liveness"
   864  
   865      # Keep checking status. For the first 2 seconds it must be 'starting'
   866      t0=$SECONDS
   867      while [[ $SECONDS -le $((t0 + 2)) ]]; do
   868          run_podman inspect $ctrName --format "1-{{.State.Health.Status}}"
   869          assert "$output" == "1-starting" "Health.Status at $((SECONDS - t0))"
   870          sleep 0.5
   871      done
   872  
   873      # After 3 seconds it may take another second to go healthy. Wait.
   874      t0=$SECONDS
   875      while [[ $SECONDS -le $((t0 + 3)) ]]; do
   876          run_podman inspect $ctrName --format "2-{{.State.Health.Status}}"
   877          if [[ "$output" = "2-healthy" ]]; then
   878              break;
   879          fi
   880          sleep 0.5
   881      done
   882      assert $output == "2-healthy" "After 3 seconds"
   883  
   884      run_podman kube down $fname
   885      run_podman pod rm -a
   886      run_podman rm -a
   887  }
   888  
   889  @test "podman kube play healthcheck should wait initialDelaySeconds before updating status (unhealthy)" {
   890      fname="$PODMAN_TMPDIR/play_kube_unhealthy_$(random_string 6).yaml"
   891      echo "
   892  apiVersion: v1
   893  kind: Pod
   894  metadata:
   895    labels:
   896    name: liveness-exec
   897  spec:
   898    containers:
   899    - name: liveness
   900      image: $IMAGE
   901      args:
   902      - /bin/sh
   903      - -c
   904      - touch /tmp/healthy && sleep 100
   905      livenessProbe:
   906        exec:
   907          command:
   908          - cat
   909          - /tmp/randomfile
   910        initialDelaySeconds: 3
   911        failureThreshold: 1
   912        periodSeconds: 1
   913  " > $fname
   914  
   915      run_podman kube play $fname
   916      ctrName="liveness-exec-liveness"
   917  
   918      # Keep checking status. For the first 2 seconds it must be 'starting'
   919      t0=$SECONDS
   920      while [[ $SECONDS -le $((t0 + 2)) ]]; do
   921          run_podman inspect $ctrName --format "1-{{.State.Health.Status}}"
   922          assert "$output" == "1-starting" "Health.Status at $((SECONDS - t0))"
   923          sleep 0.5
   924      done
   925  
   926      # After 3 seconds it may take another second to go unhealthy. Wait.
   927      t0=$SECONDS
   928      while [[ $SECONDS -le $((t0 + 3)) ]]; do
   929          run_podman inspect $ctrName --format "2-{{.State.Health.Status}}"
   930          if [[ "$output" = "2-unhealthy" ]]; then
   931              break;
   932          fi
   933          sleep 0.5
   934      done
   935      assert $output == "2-unhealthy" "After 3 seconds"
   936  
   937      run_podman kube down $fname
   938      run_podman pod rm -a
   939      run_podman rm -a
   940  }
   941  
   942  @test "podman play --build private registry" {
   943      skip_if_remote "--build is not supported in context remote"
   944  
   945      local registry=localhost:${PODMAN_LOGIN_REGISTRY_PORT}
   946      local from_image=$registry/quadlet_image_test:$(random_string)
   947      local authfile=$PODMAN_TMPDIR/authfile.json
   948  
   949      mkdir -p $PODMAN_TMPDIR/userimage
   950      cat > $PODMAN_TMPDIR/userimage/Containerfile << _EOF
   951  from $from_image
   952  USER bin
   953  _EOF
   954  
   955      # Start the registry and populate the authfile that we can use for the test.
   956      start_registry
   957      run_podman login --authfile=$authfile \
   958          --tls-verify=false \
   959          --username ${PODMAN_LOGIN_USER} \
   960          --password ${PODMAN_LOGIN_PASS} \
   961          $registry
   962  
   963      # Push the test image to the registry
   964      run_podman image tag $IMAGE $from_image
   965      run_podman image push --tls-verify=false --authfile=$authfile $from_image
   966  
   967      # Remove the local image to make sure it will be pulled again
   968      run_podman image rm --ignore $from_image
   969  
   970      _write_test_yaml command=id image=userimage
   971      run_podman 125 play kube --build --start=false $PODMAN_TMPDIR/test.yaml
   972      assert "$output" "=~" \
   973          "Error: short-name resolution enforced but cannot prompt without a TTY|Resolving \"userimage\" using unqualified-search registries" \
   974          "The error message does match any of the expected ones"
   975  
   976      run_podman play kube --replace --context-dir=$PODMAN_TMPDIR --tls-verify=false --authfile=$authfile --build --start=false $PODMAN_TMPDIR/test.yaml
   977      run_podman inspect --format "{{ .Config.User }}" test_pod-test
   978      is "$output" bin "expect container within pod to run as the bin user"
   979  
   980      run_podman stop -a -t 0
   981      run_podman pod rm -t 0 -f test_pod
   982      run_podman rmi -f userimage:latest $from_image
   983  }
   984  
   985  @test "podman play with automount volume" {
   986      cat >$PODMAN_TMPDIR/Containerfile <<EOF
   987  FROM $IMAGE
   988  RUN mkdir /test1 /test2
   989  RUN touch /test1/a /test1/b /test1/c
   990  RUN touch /test2/asdf /test2/ejgre /test2/lteghe
   991  VOLUME /test1
   992  VOLUME /test2
   993  EOF
   994  
   995      run_podman build -t automount_test -f $PODMAN_TMPDIR/Containerfile
   996  
   997      fname="/tmp/play_kube_wait_$(random_string 6).yaml"
   998      echo "
   999  apiVersion: v1
  1000  kind: Pod
  1001  metadata:
  1002    labels:
  1003      app: test
  1004    name: test_pod
  1005  spec:
  1006    restartPolicy: Never
  1007    containers:
  1008      - name: testctr
  1009        image: $IMAGE
  1010        command:
  1011        - top
  1012  " > $fname
  1013  
  1014      run_podman kube play --annotation "io.podman.annotations.kube.image.volumes.mount/testctr=automount_test" $fname
  1015  
  1016      run_podman run --rm automount_test ls /test1
  1017      run_out_test1="$output"
  1018      run_podman exec test_pod-testctr ls /test1
  1019      assert "$output" = "$run_out_test1" "matching ls run/exec volume path test1"
  1020  
  1021      run_podman run --rm automount_test ls /test2
  1022      run_out_test2="$output"
  1023      run_podman exec test_pod-testctr ls /test2
  1024      assert "$output" = "$run_out_test2" "matching ls run/exec volume path test2"
  1025  
  1026      run_podman rm -f -t 0 -a
  1027      run_podman rmi automount_test
  1028  }