github.com/containers/podman/v5@v5.1.0-rc1/test/upgrade/test-upgrade.bats (about)

     1  # -*- bats -*-
     2  
     3  # This lets us do "run -0", which does an implicit exit-status check
     4  bats_require_minimum_version 1.8.0
     5  
     6  load helpers
     7  
     8  # Create a var-lib-containers dir for this podman. We need to bind-mount
     9  # this into the container, and use --root and --runroot and --tmpdir
    10  # options both in the container podman and out here: that's the only
    11  # way to share image and container storage.
    12  if [ -z "${PODMAN_UPGRADE_WORKDIR}" ]; then
    13      # Much as I'd love a descriptive name like "podman-upgrade-tests.XXXXX",
    14      # keep it short ("pu") because of the 100-character path length limit
    15      # for UNIX sockets (needed by conmon)
    16      export PODMAN_UPGRADE_WORKDIR=$(mktemp -d --tmpdir=${BATS_TMPDIR:-${TMPDIR:-/tmp}} pu.XXXXXX)
    17  
    18      touch $PODMAN_UPGRADE_WORKDIR/status
    19  fi
    20  
    21  # Generate a set of random strings used for content verification
    22  if [ -z "${RANDOM_STRING_1}" ]; then
    23      export RANDOM_STRING_1=$(random_string 15)
    24      export LABEL_CREATED=$(random_string 16)
    25      export LABEL_FAILED=$(random_string 17)
    26      export LABEL_RUNNING=$(random_string 18)
    27      export HOST_PORT=$(random_free_port)
    28      export MYTESTNETWORK=mytestnetwork$(random_string 8)
    29  fi
    30  
    31  # Version string of the podman we're actually testing, e.g. '3.0.0-dev-d1a26013'
    32  PODMAN_VERSION=$($PODMAN version  |awk '/^Version:/ { V=$2 } /^Git Commit:/ { G=$3 } END { print V "-" substr(G,0,8) }')
    33  
    34  setup() {
    35      skip_if_rootless
    36  
    37      # The podman-in-podman image (old podman)
    38      if [[ -z "$PODMAN_UPGRADE_FROM" ]]; then
    39          echo "# \$PODMAN_UPGRADE_FROM is undefined (should be e.g. v4.1.0)" >&3
    40          false
    41      fi
    42  
    43      if [ "$(< $PODMAN_UPGRADE_WORKDIR/status)" = "failed" ]; then
    44          skip "*** setup failed - no point in running tests"
    45      fi
    46  
    47      # cgroup-manager=systemd does not work inside a container
    48      # skip_mount_home=true is required so we can share the storage mounts between host and container,
    49      # the default c/storage behavior is to make the mount propagation private.
    50      export _PODMAN_TEST_OPTS="--storage-opt=skip_mount_home=true --cgroup-manager=cgroupfs --root=$PODMAN_UPGRADE_WORKDIR/root --runroot=$PODMAN_UPGRADE_WORKDIR/runroot --tmpdir=$PODMAN_UPGRADE_WORKDIR/tmp"
    51  }
    52  
    53  ###############################################################################
    54  # BEGIN setup
    55  
    56  @test "initial setup: start $PODMAN_UPGRADE_FROM containers" {
    57      echo failed >| $PODMAN_UPGRADE_WORKDIR/status
    58  
    59      OLD_PODMAN=quay.io/podman/stable:$PODMAN_UPGRADE_FROM
    60      $PODMAN pull $OLD_PODMAN
    61  
    62      # Can't mix-and-match iptables.
    63      # This can only fail when we bring in new CI VMs. If/when it does fail,
    64      # we'll need to figure out how to solve it. Until then, punt.
    65      iptables_old_version=$($PODMAN run --rm $OLD_PODMAN iptables -V)
    66      run -0 expr "$iptables_old_version" : ".*(\(.*\))"
    67      iptables_old_which="$output"
    68  
    69      iptables_new_version=$(iptables -V)
    70      run -0 expr "$iptables_new_version" : ".*(\(.*\))"
    71      iptables_new_which="$output"
    72  
    73      if [[ "$iptables_new_which" != "$iptables_old_which" ]]; then
    74          die "Cannot mix iptables; $PODMAN_UPGRADE_FROM container uses $iptables_old_which, host uses $iptables_new_which"
    75      fi
    76  
    77      # Shortcut name, because we're referencing it a lot
    78      pmroot=$PODMAN_UPGRADE_WORKDIR
    79  
    80      # WWW content to share
    81      mkdir -p $pmroot/var/www
    82      echo $RANDOM_STRING_1 >$pmroot/var/www/index.txt
    83  
    84      # podman tmpdir
    85      mkdir -p $pmroot/tmp
    86  
    87      #
    88      # Script to run >>OLD<< podman commands.
    89      #
    90      # These commands will be run inside a podman container. The "podman"
    91      # command in this script will be the desired old-podman version.
    92      #
    93      pmscript=$pmroot/setup
    94      cat >| $pmscript <<EOF
    95  #!/bin/bash
    96  
    97  #
    98  # Argh! podman >= 3.4 something something namespace something, fails with
    99  #   Error: invalid config provided: cannot set hostname when running in the host UTS namespace: invalid configuration
   100  #
   101  # https://github.com/containers/podman/issues/11969#issuecomment-943386484
   102  #
   103  if grep -q utsns /etc/containers/containers.conf; then
   104      sed -i -e '/^\utsns=/d' /etc/containers/containers.conf
   105  fi
   106  
   107  # events-backend=journald does not work inside a container
   108  opts="--events-backend=file $_PODMAN_TEST_OPTS"
   109  
   110  set -ex
   111  
   112  # Try try again, because network flakiness makes this a point of failure
   113  podman \$opts pull $IMAGE \
   114    || (sleep 10; podman \$opts pull $IMAGE) \
   115    || (sleep 30; podman \$opts pull $IMAGE)
   116  
   117  
   118  podman \$opts create --name mycreatedcontainer --label mylabel=$LABEL_CREATED \
   119                                                 $IMAGE false
   120  
   121  podman \$opts run    --name mydonecontainer    $IMAGE echo ++$RANDOM_STRING_1++
   122  
   123  podman \$opts run    --name myfailedcontainer  --label mylabel=$LABEL_FAILED \
   124                                                 $IMAGE sh -c 'exit 17' || true
   125  
   126  podman \$opts run -d --name myrunningcontainer --label mylabel=$LABEL_RUNNING \
   127                                                 --network bridge \
   128                                                 -p $HOST_PORT:80 \
   129                                                 -p 127.0.0.1:9090-9092:8080-8082 \
   130                                                 -v $pmroot/var/www:/var/www \
   131                                                 -w /var/www \
   132                                                 --mac-address aa:bb:cc:dd:ee:ff \
   133                                                 $IMAGE /bin/busybox-extras httpd -f -p 80
   134  
   135  podman \$opts pod create --name mypod
   136  
   137  podman \$opts network create --disable-dns $MYTESTNETWORK
   138  
   139  echo READY
   140  while :;do
   141      if [ -e /stop ]; then
   142          echo STOPPING
   143          podman \$opts stop -t 0 myrunningcontainer || true
   144          podman \$opts rm -f     myrunningcontainer || true
   145          podman \$opts network rm -f $MYTESTNETWORK
   146          exit 0
   147      fi
   148      sleep 0.5
   149  done
   150  EOF
   151      chmod 555 $pmscript
   152  
   153      # Clean up vestiges of previous run
   154      $PODMAN rm -f podman_parent
   155  
   156      # Not entirely a NOP! This is just so we get the /run/... mount points created on a CI VM
   157      $PODMAN run --rm $OLD_PODMAN true
   158  
   159      # Containers-common around release 1-55 no-longer supplies this file
   160      sconf=/etc/containers/storage.conf
   161      v_sconf=
   162      if [[ -e "$sconf" ]]; then
   163          v_sconf="-v $sconf:$sconf"
   164      fi
   165  
   166      #
   167      # Use new-podman to run the above script under old-podman.
   168      #
   169      # DO NOT USE run_podman HERE! That would use $_PODMAN_TEST_OPTS
   170      # and would write into our shared test dir, which would then
   171      # pollute it for use by old-podman. We must keep that pristine
   172      # so old-podman is the first to write to it.
   173      #
   174      # mount /etc/containers/storage.conf to use the same storage settings as on the host
   175      # mount /dev/shm because the container locks are stored there
   176      # mount /run/containers for the dnsname plugin
   177      #
   178      $PODMAN run -d --name podman_parent \
   179              --privileged \
   180              --net=host \
   181              --cgroupns=host \
   182              --pid=host \
   183              $v_sconf \
   184              -v /dev/fuse:/dev/fuse \
   185              -v /run/crun:/run/crun \
   186              -v /run/netns:/run/netns:rshared \
   187              -v /run/containers:/run/containers \
   188              -v /dev/shm:/dev/shm \
   189              -v /etc/containers/networks:/etc/containers/networks \
   190              -v $pmroot:$pmroot:rshared \
   191              $OLD_PODMAN $pmroot/setup
   192  
   193      _PODMAN_TEST_OPTS= wait_for_ready podman_parent
   194  
   195      echo OK >| $PODMAN_UPGRADE_WORKDIR/status
   196  }
   197  
   198  # END   setup
   199  ###############################################################################
   200  # BEGIN actual tests
   201  
   202  # This is a NOP; used only so the version string will show up in logs
   203  @test "upgrade: $PODMAN_UPGRADE_FROM -> $PODMAN_VERSION" {
   204      :
   205  }
   206  
   207  @test "info - network" {
   208      run_podman info --format '{{.Host.NetworkBackend}}'
   209      assert "$output" = "netavark" "As of Feb 2024, CNI will never be default"
   210  }
   211  
   212  # Whichever DB was picked by old_podman, make sure we honor it
   213  @test "info - database" {
   214      run_podman info --format '{{.Host.DatabaseBackend}}'
   215      if version_is_older_than 4.8; then
   216          assert "$output" = "boltdb" "DatabaseBackend for podman < 4.8"
   217      else
   218          assert "$output" = "sqlite" "DatabaseBackend for podman >= 4.8"
   219      fi
   220  }
   221  
   222  @test "images" {
   223      run_podman images -a --format '{{.Names}}'
   224      assert "${lines[0]}" =~ "\[localhost/podman-pause:${PODMAN_UPGRADE_FROM##v}-.*\]" "podman images, line 0"
   225      assert "${lines[1]}" = "[$IMAGE]" "podman images, line 1"
   226  }
   227  
   228  @test "ps : one container running" {
   229      run_podman ps --format '{{.Image}}--{{.Names}}'
   230      is "$output" "$IMAGE--myrunningcontainer" "ps: one container running"
   231  }
   232  
   233  @test "ps -a : shows all containers" {
   234      run_podman ps -a \
   235                 --format '{{.Names}}--{{.Status}}--{{.Ports}}--{{.Labels.mylabel}}' \
   236                 --sort=created
   237      assert "${lines[0]}" == "mycreatedcontainer--Created----$LABEL_CREATED" "line 0, created"
   238      assert "${lines[1]}" =~ "mydonecontainer--Exited \(0\).*----<no value>"   "line 1, done"
   239      assert "${lines[2]}" =~ "myfailedcontainer--Exited \(17\) .*----$LABEL_FAILED" "line 2, fail"
   240  
   241      # Port order is not guaranteed
   242      assert "${lines[3]}" =~ "myrunningcontainer--Up .*--$LABEL_RUNNING" "line 3, running"
   243      assert "${lines[3]}" =~ ".*--.*0\.0\.0\.0:$HOST_PORT->80\/tcp.*--.*"  "line 3, first port forward"
   244      assert "${lines[3]}" =~ ".*--.*127\.0\.0\.1\:9090-9092->8080-8082\/tcp.*--.*" "line 3, second port forward"
   245  
   246      assert "${lines[4]}" =~ ".*-infra--Created----<no value>" "line 4, infra container"
   247  
   248      # For debugging: dump containers and IDs
   249      if [[ -n "$PODMAN_UPGRADE_TEST_DEBUG" ]]; then
   250          run_podman ps -a
   251          for l in "${lines[@]}"; do
   252              echo "# $l" >&3
   253          done
   254      fi
   255  }
   256  
   257  
   258  @test "inspect - all container status" {
   259      tests="
   260  running   | running    |  0
   261  created   | created    |  0
   262  done      | exited     |  0
   263  failed    | exited     | 17
   264  "
   265      while read cname state exitstatus; do
   266          run_podman inspect --format '{{.State.Status}}--{{.State.ExitCode}}' my${cname}container
   267          is "$output" "$state--$exitstatus" "status of my${cname}container"
   268      done < <(parse_table "$tests")
   269  }
   270  
   271  @test "network - curl" {
   272      run -0 curl --max-time 3 -s 127.0.0.1:$HOST_PORT/index.txt
   273      is "$output" "$RANDOM_STRING_1" "curl on running container"
   274  }
   275  
   276  # IMPORTANT: connect should happen before restart, we want to check
   277  # if we can connect on an existing running container
   278  @test "network - connect" {
   279      run_podman network connect $MYTESTNETWORK myrunningcontainer
   280      run_podman network disconnect podman myrunningcontainer
   281      run -0 curl --max-time 3 -s 127.0.0.1:$HOST_PORT/index.txt
   282      is "$output" "$RANDOM_STRING_1" "curl on container with second network connected"
   283  }
   284  
   285  @test "network - restart" {
   286      # restart the container and check if we can still use the port
   287      run_podman stop -t0 myrunningcontainer
   288      run_podman start myrunningcontainer
   289  
   290      run -0 curl --max-time 3 -s 127.0.0.1:$HOST_PORT/index.txt
   291      is "$output" "$RANDOM_STRING_1" "curl on restarted container"
   292  }
   293  
   294  
   295  @test "logs" {
   296      run_podman logs mydonecontainer
   297      is "$output" "++$RANDOM_STRING_1++" "podman logs on stopped container"
   298  }
   299  
   300  @test "exec" {
   301      run_podman exec myrunningcontainer cat /var/www/index.txt
   302      is "$output" "$RANDOM_STRING_1" "exec into myrunningcontainer"
   303  }
   304  
   305  @test "load" {
   306      # FIXME, is this really necessary?
   307      skip "TBI. Not sure if there's any point to this."
   308  }
   309  
   310  @test "mount" {
   311      skip "TBI"
   312  }
   313  
   314  @test "pods" {
   315      run_podman pod inspect mypod
   316      is "$output" ".*mypod.*"
   317  
   318      run_podman pod start mypod
   319      is "$output" "[0-9a-f]\\{64\\}" "podman pod start"
   320  
   321      # run a container in an existing pod
   322      # FIXME: 2024-02-07 fails: pod X cgroup is not set: internal libpod error
   323      #run_podman run --pod=mypod --ipc=host --rm $IMAGE echo it works
   324      #is "$output" ".*it works.*" "podman run --pod"
   325  
   326      run_podman pod ps
   327      is "$output" ".*mypod.*" "podman pod ps shows name"
   328      is "$output" ".*Running.*" "podman pod ps shows running state"
   329  
   330      run_podman pod stop mypod
   331      is "$output" "[0-9a-f]\\{64\\}" "podman pod stop"
   332  
   333      run_podman pod rm mypod
   334      is "$output" "[0-9a-f]\\{64\\}" "podman pod rm"
   335  }
   336  
   337  # FIXME: commit? kill? network? pause? restart? top? volumes? What else?
   338  
   339  
   340  @test "start" {
   341      run_podman start -a mydonecontainer
   342      is "$output" "++$RANDOM_STRING_1++" "start on already-run container"
   343  }
   344  
   345  @test "rm a stopped container" {
   346      run_podman rm myfailedcontainer
   347      is "$output" "myfailedcontainer" "podman rm myfailedcontainer"
   348  
   349      run_podman rm mydonecontainer
   350      is "$output" "mydonecontainer" "podman rm mydonecontainer"
   351  }
   352  
   353  
   354  @test "stop and rm" {
   355      run_podman stop -t0 myrunningcontainer
   356      run_podman rm       myrunningcontainer
   357  }
   358  
   359  @test "clean up parent" {
   360      if [[ -n "$PODMAN_UPGRADE_TEST_DEBUG" ]]; then
   361          skip "workdir is $PODMAN_UPGRADE_WORKDIR"
   362      fi
   363  
   364      # We're done with shared environment. By clearing this, we can now
   365      # use run_podman for actions on the podman_parent container
   366      unset _PODMAN_TEST_OPTS
   367  
   368      # (Useful for debugging the 'rm -f' step below, which, when it fails, only
   369      # gives a container ID. This 'ps' confirms that the CID is podman_parent)
   370      run_podman ps -a
   371  
   372      # Stop the container gracefully
   373      run_podman exec podman_parent touch /stop
   374      run_podman wait podman_parent
   375  
   376      run_podman 0+we logs podman_parent
   377      run_podman 0+we rm -f podman_parent
   378  
   379      # Maybe some day I'll understand why podman leaves stray overlay mounts
   380      while read overlaydir; do
   381          umount $overlaydir || true
   382      done < <(mount | grep $PODMAN_UPGRADE_WORKDIR | awk '{print $3}' | sort -r)
   383  
   384      rm -rf $PODMAN_UPGRADE_WORKDIR
   385  }
   386  
   387  # FIXME: now clean up