github.com/containers/podman/v5@v5.1.0-rc1/test/system/250-systemd.bats (about)

     1  #!/usr/bin/env bats   -*- bats -*-
     2  #
     3  # Tests generated configurations for systemd.
     4  #
     5  
     6  load helpers
     7  load helpers.systemd
     8  load helpers.network
     9  
    10  SERVICE_NAME="podman_test_$(random_string)"
    11  
    12  UNIT_FILE="$UNIT_DIR/$SERVICE_NAME.service"
    13  TEMPLATE_FILE="$UNIT_DIR/$SERVICE_NAME@.service"
    14  
    15  function setup() {
    16      skip_if_remote "systemd tests are meaningless over remote"
    17  
    18      basic_setup
    19  }
    20  
    21  function teardown() {
    22      if [[ -e "$UNIT_FILE" ]]; then
    23          run systemctl stop "$SERVICE_NAME"
    24          if [ $status -ne 0 ]; then
    25              echo "# WARNING: systemctl stop failed in teardown: $output" >&3
    26          fi
    27  
    28          rm -f "$UNIT_FILE"
    29          systemctl daemon-reload
    30      fi
    31  
    32      basic_teardown
    33  }
    34  
    35  # Helper to start a systemd service running a container
    36  function service_setup() {
    37      # January 2024: we can no longer do "run_podman generate systemd" followed
    38      # by "echo $output >file", because generate-systemd is deprecated and now
    39      # says so loudly, to stderr, with no way to silence it. Since BATS gloms
    40      # stdout + stderr, that warning goes to the unit file. (Today's systemd
    41      # is forgiving about that, but RHEL8 systemd chokes with EINVAL)
    42      (
    43          cd $UNIT_DIR
    44          run_podman generate systemd --files --name \
    45                 -e http_proxy -e https_proxy -e no_proxy \
    46                 -e HTTP_PROXY -e HTTPS_PROXY -e NO_PROXY \
    47                 --new $cname
    48          mv "container-$cname.service" $UNIT_FILE
    49      )
    50      run_podman rm $cname
    51  
    52      systemctl daemon-reload
    53  
    54      # Also test enabling services (see #12438).
    55      run systemctl enable "$SERVICE_NAME"
    56      assert $status -eq 0 "Error enabling systemd unit $SERVICE_NAME: $output"
    57  
    58      systemctl_start "$SERVICE_NAME"
    59  
    60      run systemctl status "$SERVICE_NAME"
    61      assert $status -eq 0 "systemctl status $SERVICE_NAME: $output"
    62  }
    63  
    64  # Helper to stop a systemd service running a container
    65  function service_cleanup() {
    66      run systemctl stop "$SERVICE_NAME"
    67      assert $status -eq 0 "Error stopping systemd unit $SERVICE_NAME: $output"
    68  
    69      # Regression test for #11304: confirm that unit stops into correct state
    70      local expected_state="$1"
    71      if [[ -n "$expected_state" ]]; then
    72          run systemctl show --property=ActiveState "$SERVICE_NAME"
    73          assert "$output" = "ActiveState=$expected_state" \
    74                 "state of service after systemctl stop"
    75      fi
    76  
    77      run systemctl disable "$SERVICE_NAME"
    78      assert $status -eq 0 "Error disabling systemd unit $SERVICE_NAME: $output"
    79  
    80      rm -f "$UNIT_FILE"
    81      systemctl daemon-reload
    82  }
    83  
    84  # These tests can fail in dev. environment because of SELinux.
    85  # quick fix: chcon -t container_runtime_exec_t ./bin/podman
    86  @test "podman generate - systemd - basic" {
    87      # Warn when a custom restart policy is used without --new (see #15284)
    88      run_podman create --restart=always $IMAGE
    89      cid="$output"
    90      run_podman 0+w generate systemd $cid
    91      require_warning "Container $cid has restart policy .*always.* which can lead to issues on shutdown" \
    92                      "generate systemd emits warning"
    93      run_podman rm -f $cid
    94  
    95      cname=$(random_string)
    96      # See #7407 for --pull=always.
    97      run_podman create --pull=always --name $cname --label "io.containers.autoupdate=registry" $IMAGE \
    98          sh -c "trap 'echo Received SIGTERM, finishing; exit' SIGTERM; echo WAITING; while :; do sleep 0.1; done"
    99  
   100      # Start systemd service to run this container
   101      service_setup
   102  
   103      # Give container time to start; make sure output looks top-like
   104      sleep 2
   105      run_podman logs $cname
   106      is "$output" ".*WAITING.*" "running is waiting for signal"
   107  
   108      # All good. Stop service, clean up.
   109      # Also make sure the service is in the `inactive` state (see #11304).
   110      service_cleanup inactive
   111  }
   112  
   113  @test "podman autoupdate local" {
   114      # Note that the entrypoint may be a JSON string which requires preserving the quotes (see #12477)
   115      cname=$(random_string)
   116  
   117      # Create a scratch image (copy of our regular one)
   118      image_copy=base$(random_string | tr A-Z a-z)
   119      run_podman tag $IMAGE $image_copy
   120  
   121      # Create a container based on that
   122      run_podman create --name $cname --label "io.containers.autoupdate=local" --entrypoint '["top"]' $image_copy
   123  
   124      # Start systemd service to run this container
   125      service_setup
   126  
   127      # Give container time to start; make sure output looks top-like
   128      wait_for_output 'Load average' $cname
   129  
   130      # Run auto-update and check that it restarted the container
   131      run_podman commit --change "CMD=/bin/bash" $cname $image_copy
   132      run_podman auto-update
   133      is "$output" ".*$SERVICE_NAME.*" "autoupdate local restarted container"
   134  
   135      # All good. Stop service, clean up.
   136      service_cleanup
   137      run_podman rmi $image_copy
   138  }
   139  
   140  # These tests can fail in dev. environment because of SELinux.
   141  # quick fix: chcon -t container_runtime_exec_t ./bin/podman
   142  @test "podman generate systemd - envar" {
   143      cname=$(random_string)
   144      FOO=value BAR=%s run_podman create --name $cname --env FOO -e BAR --env MYVAR=myval \
   145          $IMAGE sh -c 'printenv && sleep 100'
   146  
   147      # Start systemd service to run this container
   148      service_setup
   149  
   150      # Give container time to start; make sure output looks top-like
   151      sleep 2
   152      run_podman logs $cname
   153      is "$output" ".*FOO=value.*" "FOO environment variable set"
   154      is "$output" ".*BAR=%s.*" "BAR environment variable set"
   155      is "$output" ".*MYVAR=myval.*" "MYVAL environment variable set"
   156  
   157      # All good. Stop service, clean up.
   158      service_cleanup
   159  }
   160  
   161  # Regression test for #11438
   162  @test "podman generate systemd - restart policy & timeouts" {
   163      cname=$(random_string)
   164      run_podman create --restart=always --name $cname $IMAGE
   165      run_podman generate systemd --new $cname
   166      is "$output" ".*Restart=always.*" "Use container's restart policy if set"
   167      run_podman generate systemd --new --restart-policy=on-failure $cname
   168      is "$output" ".*Restart=on-failure.*" "Override container's restart policy"
   169  
   170      cname2=$(random_string)
   171      run_podman create --restart=unless-stopped --name $cname2 $IMAGE
   172      run_podman generate systemd --new $cname2
   173      is "$output" ".*Restart=always.*" "unless-stopped translated to always"
   174  
   175      cname3=$(random_string)
   176      run_podman create --restart=on-failure:42 --name $cname3 $IMAGE
   177      run_podman generate systemd --new $cname3
   178      is "$output" ".*Restart=on-failure.*" "on-failure:xx is parsed correctly"
   179      is "$output" ".*StartLimitBurst=42.*" "on-failure:xx is parsed correctly"
   180  
   181      run_podman rm -t 0 -f $cname $cname2 $cname3
   182  }
   183  
   184  function set_listen_env() {
   185      export LISTEN_PID="100" LISTEN_FDS="1" LISTEN_FDNAMES="listen_fdnames"
   186  }
   187  
   188  function unset_listen_env() {
   189      unset LISTEN_PID LISTEN_FDS LISTEN_FDNAMES
   190  }
   191  
   192  function check_listen_env() {
   193      local stdenv="$1"
   194      local context="$2"
   195      if is_remote; then
   196          is "$output" "$stdenv" "LISTEN Environment did not pass: $context"
   197      else
   198          out=$(for o in $output; do echo $o; done| sort)
   199          std=$(echo "$stdenv
   200  LISTEN_PID=1
   201  LISTEN_FDS=1
   202  LISTEN_FDNAMES=listen_fdnames" | sort)
   203         echo "<$out>"
   204         echo "<$std>"
   205         is "$out" "$std" "LISTEN Environment passed: $context"
   206      fi
   207  }
   208  
   209  @test "podman pass LISTEN environment " {
   210      # Note that `--hostname=host1` makes sure that all containers have the same
   211      # environment.
   212      run_podman run --hostname=host1 --rm $IMAGE printenv
   213      stdenv=$output
   214  
   215      # podman run
   216      set_listen_env
   217      run_podman run --hostname=host1 --rm $IMAGE printenv
   218      unset_listen_env
   219      check_listen_env "$stdenv" "podman run"
   220  
   221      # podman start
   222      run_podman create --hostname=host1 --rm $IMAGE printenv
   223      cid="$output"
   224      set_listen_env
   225      run_podman start --attach $cid
   226      unset_listen_env
   227      check_listen_env "$stdenv" "podman start"
   228  }
   229  
   230  @test "podman generate - systemd template" {
   231      cname=$(random_string)
   232      run_podman create --name $cname $IMAGE top
   233  
   234      # See note in service_setup() above re: using --files
   235      (
   236          cd $UNIT_DIR
   237          run_podman generate systemd --template --files -n $cname
   238          mv "container-$cname.service" $TEMPLATE_FILE
   239      )
   240      run_podman rm -f $cname
   241  
   242      systemctl daemon-reload
   243  
   244      INSTANCE="$SERVICE_NAME@1.service"
   245      systemctl_start "$INSTANCE"
   246  
   247      run systemctl status "$INSTANCE"
   248      assert $status -eq 0 "systemctl status $INSTANCE: $output"
   249  
   250      run systemctl stop "$INSTANCE"
   251      assert $status -eq 0 "Error stopping systemd unit $INSTANCE: $output"
   252  
   253      rm -f $TEMPLATE_FILE
   254      systemctl daemon-reload
   255  }
   256  
   257  @test "podman generate - systemd template no support for pod" {
   258      cname=$(random_string)
   259      podname=$(random_string)
   260      run_podman pod create --name $podname
   261      run_podman run --pod $podname -dt --name $cname $IMAGE top
   262  
   263      run_podman 125 generate systemd --new --template -n $podname
   264      is "$output" ".*--template is not supported for pods.*" "Error message contains 'not supported'"
   265  
   266      run_podman rm -f $cname
   267      run_podman pod rm -f $podname
   268      run_podman rmi $(pause_image)
   269  }
   270  
   271  @test "podman generate - systemd template only used on --new" {
   272      cname=$(random_string)
   273      run_podman create --name $cname $IMAGE top
   274      run_podman 125 generate systemd --new=false --template -n $cname
   275      is "$output" ".*--template cannot be set" "Error message should be '--template requires --new'"
   276  }
   277  
   278  @test "podman --cgroup=cgroupfs doesn't show systemd warning" {
   279      DBUS_SESSION_BUS_ADDRESS= run_podman --log-level warning --cgroup-manager=cgroupfs info -f ''
   280      is "$output" "" "output should be empty"
   281  }
   282  
   283  @test "podman --systemd sets container_uuid" {
   284      run_podman run --systemd=always --name test $IMAGE printenv container_uuid
   285      container_uuid=$output
   286      run_podman inspect test --format '{{ .ID }}'
   287      is "${container_uuid}" "${output:0:32}" "UUID should be first 32 chars of Container id"
   288  }
   289  
   290  @test "podman --systemd fails on cgroup v1 with a private cgroupns" {
   291      skip_if_cgroupsv2
   292  
   293      run_podman 126 run --systemd=always --cgroupns=private $IMAGE true
   294      assert "$output" =~ ".*cgroup namespace is not supported with cgroup v1 and systemd mode"
   295  }
   296  
   297  # https://github.com/containers/podman/issues/13153
   298  @test "podman rootless-netns pasta processes should be in different cgroup" {
   299      is_rootless || skip "only meaningful for rootless"
   300  
   301      cname=$(random_string)
   302      local netname=testnet-$(random_string 10)
   303  
   304      # create network and container with network
   305      run_podman network create $netname
   306      run_podman create --name $cname --network $netname $IMAGE top
   307  
   308      # run container in systemd unit
   309      service_setup
   310  
   311      # run second container with network
   312      cname2=$(random_string)
   313      run_podman run -d --name $cname2 --network $netname $IMAGE top
   314  
   315      # stop systemd container
   316      service_cleanup
   317  
   318      pasta_iface=$(default_ifname)
   319  
   320      # now check that the rootless netns slirp4netns process is still alive and working
   321      run_podman unshare --rootless-netns ip addr
   322      is "$output" ".*$pasta_iface.*" "pasta interface exists in the netns"
   323      run_podman exec $cname2 nslookup google.com
   324  
   325      run_podman rm -f -t0 $cname2
   326      run_podman network rm -f $netname
   327  }
   328  
   329  @test "podman create --health-on-failure=kill" {
   330      cname=c_$(random_string)
   331      run_podman create --name $cname                  \
   332                 --health-cmd /home/podman/healthcheck \
   333                 --health-on-failure=kill              \
   334                 --health-retries=1                    \
   335                 --restart=on-failure                  \
   336                 $IMAGE /home/podman/pause
   337  
   338      # run container in systemd unit
   339      service_setup
   340  
   341      run_podman container inspect $cname --format "{{.ID}}"
   342      oldID="$output"
   343  
   344      run_podman healthcheck run $cname
   345  
   346      # Now cause the healthcheck to fail
   347      run_podman exec $cname touch /uh-oh
   348  
   349      # healthcheck should now fail, with exit status 1 and 'unhealthy' output
   350      run_podman 1 healthcheck run $cname
   351      is "$output" "unhealthy" "output from 'podman healthcheck run'"
   352  
   353      # What is expected to happen now:
   354      #  1) The container gets killed as the health check has failed
   355      #  2) Systemd restarts the service as the restart policy is set to "on-failure"
   356      #  3) The /uh-oh file is gone and $cname has another ID
   357  
   358      # Wait at most 10 seconds for the service to be restarted
   359      local timeout=10
   360      while [[ $timeout -gt 1 ]]; do
   361          # Possible outcomes:
   362          #  - status 0, old container is still terminating: sleep and retry
   363          #  - status 0, new CID: yay, break
   364          #  - status 1, container not found: sleep and retry
   365          run_podman '?' container inspect $cname --format '{{.ID}}'
   366          if [[ $status == 0 ]]; then
   367              if [[ "$output" != "$oldID" ]]; then
   368                  break
   369              fi
   370          fi
   371          sleep 1
   372          let timeout=$timeout-1
   373      done
   374  
   375      run_podman healthcheck run $cname
   376  
   377      # stop systemd container
   378      service_cleanup
   379  }
   380  
   381  @test "podman-kube@.service template" {
   382      install_kube_template
   383      # Create the YAMl file
   384      yaml_source="$PODMAN_TMPDIR/test.yaml"
   385      cat >$yaml_source <<EOF
   386  apiVersion: v1
   387  kind: Pod
   388  metadata:
   389    annotations:
   390        io.containers.autoupdate: "local"
   391        io.containers.autoupdate/b: "registry"
   392    labels:
   393      app: test
   394    name: test_pod
   395  spec:
   396    containers:
   397    - command:
   398      - sh
   399      - -c
   400      - echo a stdout; echo a stderr 1>&2; sleep inf
   401      image: $IMAGE
   402      name: a
   403    - command:
   404      - sh
   405      - -c
   406      - echo b stdout; echo b stderr 1>&2; sleep inf
   407      image: $IMAGE
   408      name: b
   409  EOF
   410  
   411      # Dispatch the YAML file
   412      service_name="podman-kube@$(systemd-escape $yaml_source).service"
   413      systemctl_start $service_name
   414      systemctl is-active $service_name
   415  
   416      # Make sure that Podman is the service's MainPID
   417      run systemctl show --property=MainPID --value $service_name
   418      is "$(</proc/$output/comm)" "conmon" "podman is the service mainPID"
   419  
   420      # The name of the service container is predictable: the first 12 characters
   421      # of the hash of the YAML file followed by the "-service" suffix
   422      yaml_sha=$(sha256sum $yaml_source)
   423      service_container="${yaml_sha:0:12}-service"
   424  
   425      # Make sure that the service container exists and runs.
   426      run_podman container inspect $service_container --format "{{.State.Running}}"
   427      is "$output" "true"
   428  
   429      # Check for an error when trying to remove the service container
   430      run_podman 125 container rm $service_container
   431      is "$output" "Error: container .* is the service container of pod(s) .* and cannot be removed without removing the pod(s)"
   432  
   433      # containers/podman/issues/17482: verify that the log-driver for the Pod's containers is NOT passthrough
   434      for name in "a" "b"; do
   435          run_podman container inspect test_pod-${name} --format "{{.HostConfig.LogConfig.Type}}"
   436          assert $output != "passthrough"
   437          # check that we can get the logs with passthrough when we run in a systemd unit
   438          run_podman logs test_pod-$name
   439          assert "$output" == "$name stdout
   440  $name stderr" "logs work with passthrough"
   441      done
   442  
   443      # we cannot assume the ordering between a b, this depends on timing and would flake in CI
   444      # use --names so we do not have to get the ID
   445      run_podman pod logs --names test_pod
   446      assert "$output" =~ ".*^test_pod-a a stdout.*" "logs from container a shown"
   447      assert "$output" =~ ".*^test_pod-b b stdout.*" "logs from container b shown"
   448  
   449      # Add a simple `auto-update --dry-run` test here to avoid too much redundancy
   450      # with 255-auto-update.bats
   451      run_podman auto-update --dry-run --format "{{.Unit}},{{.Container}},{{.Image}},{{.Updated}},{{.Policy}}"
   452      is "$output" ".*$service_name,.* (test_pod-a),$IMAGE,false,local.*" "global auto-update policy gets applied"
   453      is "$output" ".*$service_name,.* (test_pod-b),$IMAGE,false,registry.*" "container-specified auto-update policy gets applied"
   454  
   455      # Kill the pod and make sure the service is not running.
   456      run_podman pod kill test_pod
   457      for i in {0..20}; do
   458          # echos are for debugging test flakes
   459          echo "$_LOG_PROMPT systemctl is-active $service_name"
   460          run systemctl is-active $service_name
   461          echo "$output"
   462          if [[ "$output" == "inactive" ]]; then
   463              break
   464          fi
   465          sleep 0.5
   466      done
   467      is "$output" "inactive" "systemd service transitioned to 'inactive' state: $service_name"
   468  
   469      # Now stop and start the service again.
   470      systemctl stop $service_name
   471      systemctl_start $service_name
   472      systemctl is-active $service_name
   473      run_podman container inspect $service_container --format "{{.State.Running}}"
   474      is "$output" "true"
   475  
   476      # Clean up
   477      systemctl stop $service_name
   478      run_podman 1 container exists $service_container
   479      run_podman 1 pod exists test_pod
   480      run_podman rmi $(pause_image)
   481      rm -f $UNIT_DIR/$unit_name
   482  }
   483  
   484  @test "podman generate - systemd - DEPRECATED" {
   485      run_podman generate systemd --help
   486      is "$output" ".*[DEPRECATED] command:"
   487      is "$output" ".*\[DEPRECATED\] Generate systemd units.*"
   488      run_podman create --name test $IMAGE
   489      run_podman generate systemd test >/dev/null
   490      is "$output" ".*[DEPRECATED] command:"
   491      run_podman generate --help
   492      is "$output" ".*\[DEPRECATED\] Generate systemd units"
   493  }
   494  # vim: filetype=sh