github.com/rawahars/moby@v24.0.4+incompatible/integration/container/daemon_linux_test.go (about) 1 package container // import "github.com/docker/docker/integration/container" 2 3 import ( 4 "context" 5 "fmt" 6 "os" 7 "strconv" 8 "strings" 9 "testing" 10 "time" 11 12 "github.com/docker/docker/api/types" 13 containerapi "github.com/docker/docker/api/types/container" 14 realcontainer "github.com/docker/docker/container" 15 "github.com/docker/docker/integration/internal/container" 16 "github.com/docker/docker/testutil/daemon" 17 "golang.org/x/sys/unix" 18 "gotest.tools/v3/assert" 19 is "gotest.tools/v3/assert/cmp" 20 "gotest.tools/v3/assert/opt" 21 "gotest.tools/v3/skip" 22 ) 23 24 // This is a regression test for #36145 25 // It ensures that a container can be started when the daemon was improperly 26 // shutdown when the daemon is brought back up. 27 // 28 // The regression is due to improper error handling preventing a container from 29 // being restored and as such have the resources cleaned up. 30 // 31 // To test this, we need to kill dockerd, then kill both the containerd-shim and 32 // the container process, then start dockerd back up and attempt to start the 33 // container again. 34 func TestContainerStartOnDaemonRestart(t *testing.T) { 35 skip.If(t, testEnv.IsRemoteDaemon, "cannot start daemon on remote test run") 36 skip.If(t, testEnv.DaemonInfo.OSType == "windows") 37 skip.If(t, testEnv.IsRootless) 38 t.Parallel() 39 40 d := daemon.New(t) 41 d.StartWithBusybox(t, "--iptables=false") 42 defer d.Stop(t) 43 44 c := d.NewClientT(t) 45 46 ctx := context.Background() 47 48 cID := container.Create(ctx, t, c) 49 defer c.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{Force: true}) 50 51 err := c.ContainerStart(ctx, cID, types.ContainerStartOptions{}) 52 assert.Check(t, err, "error starting test container") 53 54 inspect, err := c.ContainerInspect(ctx, cID) 55 assert.Check(t, err, "error getting inspect data") 56 57 ppid := getContainerdShimPid(t, inspect) 58 59 err = d.Kill() 60 assert.Check(t, err, "failed to kill test daemon") 61 62 err = unix.Kill(inspect.State.Pid, unix.SIGKILL) 63 assert.Check(t, err, "failed to kill container process") 64 65 err = unix.Kill(ppid, unix.SIGKILL) 66 assert.Check(t, err, "failed to kill containerd-shim") 67 68 d.Start(t, "--iptables=false") 69 70 err = c.ContainerStart(ctx, cID, types.ContainerStartOptions{}) 71 assert.Check(t, err, "failed to start test container") 72 } 73 74 func getContainerdShimPid(t *testing.T, c types.ContainerJSON) int { 75 statB, err := os.ReadFile(fmt.Sprintf("/proc/%d/stat", c.State.Pid)) 76 assert.Check(t, err, "error looking up containerd-shim pid") 77 78 // ppid is the 4th entry in `/proc/pid/stat` 79 ppid, err := strconv.Atoi(strings.Fields(string(statB))[3]) 80 assert.Check(t, err, "error converting ppid field to int") 81 82 assert.Check(t, ppid != 1, "got unexpected ppid") 83 return ppid 84 } 85 86 // TestDaemonRestartIpcMode makes sure a container keeps its ipc mode 87 // (derived from daemon default) even after the daemon is restarted 88 // with a different default ipc mode. 89 func TestDaemonRestartIpcMode(t *testing.T) { 90 skip.If(t, testEnv.IsRemoteDaemon, "cannot start daemon on remote test run") 91 skip.If(t, testEnv.DaemonInfo.OSType == "windows") 92 t.Parallel() 93 94 d := daemon.New(t) 95 d.StartWithBusybox(t, "--iptables=false", "--default-ipc-mode=private") 96 defer d.Stop(t) 97 98 c := d.NewClientT(t) 99 ctx := context.Background() 100 101 // check the container is created with private ipc mode as per daemon default 102 cID := container.Run(ctx, t, c, 103 container.WithCmd("top"), 104 container.WithRestartPolicy("always"), 105 ) 106 defer c.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{Force: true}) 107 108 inspect, err := c.ContainerInspect(ctx, cID) 109 assert.NilError(t, err) 110 assert.Check(t, is.Equal(string(inspect.HostConfig.IpcMode), "private")) 111 112 // restart the daemon with shareable default ipc mode 113 d.Restart(t, "--iptables=false", "--default-ipc-mode=shareable") 114 115 // check the container is still having private ipc mode 116 inspect, err = c.ContainerInspect(ctx, cID) 117 assert.NilError(t, err) 118 assert.Check(t, is.Equal(string(inspect.HostConfig.IpcMode), "private")) 119 120 // check a new container is created with shareable ipc mode as per new daemon default 121 cID = container.Run(ctx, t, c) 122 defer c.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{Force: true}) 123 124 inspect, err = c.ContainerInspect(ctx, cID) 125 assert.NilError(t, err) 126 assert.Check(t, is.Equal(string(inspect.HostConfig.IpcMode), "shareable")) 127 } 128 129 // TestDaemonHostGatewayIP verifies that when a magic string "host-gateway" is passed 130 // to ExtraHosts (--add-host) instead of an IP address, its value is set to 131 // 1. Daemon config flag value specified by host-gateway-ip or 132 // 2. IP of the default bridge network 133 // and is added to the /etc/hosts file 134 func TestDaemonHostGatewayIP(t *testing.T) { 135 skip.If(t, testEnv.IsRemoteDaemon) 136 skip.If(t, testEnv.DaemonInfo.OSType == "windows") 137 skip.If(t, testEnv.IsRootless, "rootless mode has different view of network") 138 t.Parallel() 139 140 // Verify the IP in /etc/hosts is same as host-gateway-ip 141 d := daemon.New(t) 142 // Verify the IP in /etc/hosts is same as the default bridge's IP 143 d.StartWithBusybox(t) 144 c := d.NewClientT(t) 145 ctx := context.Background() 146 cID := container.Run(ctx, t, c, 147 container.WithExtraHost("host.docker.internal:host-gateway"), 148 ) 149 res, err := container.Exec(ctx, c, cID, []string{"cat", "/etc/hosts"}) 150 assert.NilError(t, err) 151 assert.Assert(t, is.Len(res.Stderr(), 0)) 152 assert.Equal(t, 0, res.ExitCode) 153 inspect, err := c.NetworkInspect(ctx, "bridge", types.NetworkInspectOptions{}) 154 assert.NilError(t, err) 155 assert.Check(t, is.Contains(res.Stdout(), inspect.IPAM.Config[0].Gateway)) 156 c.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{Force: true}) 157 d.Stop(t) 158 159 // Verify the IP in /etc/hosts is same as host-gateway-ip 160 d.StartWithBusybox(t, "--host-gateway-ip=6.7.8.9") 161 cID = container.Run(ctx, t, c, 162 container.WithExtraHost("host.docker.internal:host-gateway"), 163 ) 164 res, err = container.Exec(ctx, c, cID, []string{"cat", "/etc/hosts"}) 165 assert.NilError(t, err) 166 assert.Assert(t, is.Len(res.Stderr(), 0)) 167 assert.Equal(t, 0, res.ExitCode) 168 assert.Check(t, is.Contains(res.Stdout(), "6.7.8.9")) 169 c.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{Force: true}) 170 d.Stop(t) 171 } 172 173 // TestRestartDaemonWithRestartingContainer simulates a case where a container is in "restarting" state when 174 // dockerd is killed (due to machine reset or something else). 175 // 176 // Related to moby/moby#41817 177 // 178 // In this test we'll change the container state to "restarting". 179 // This means that the container will not be 'alive' when we attempt to restore in on daemon startup. 180 // 181 // We could do the same with `docker run -d --resetart=always busybox:latest exit 1`, and then 182 // `kill -9` dockerd while the container is in "restarting" state. This is difficult to reproduce reliably 183 // in an automated test, so we manipulate on disk state instead. 184 func TestRestartDaemonWithRestartingContainer(t *testing.T) { 185 skip.If(t, testEnv.IsRemoteDaemon, "cannot start daemon on remote test run") 186 skip.If(t, testEnv.DaemonInfo.OSType == "windows") 187 188 t.Parallel() 189 190 d := daemon.New(t) 191 defer d.Cleanup(t) 192 193 d.StartWithBusybox(t, "--iptables=false") 194 defer d.Stop(t) 195 196 ctx := context.Background() 197 client := d.NewClientT(t) 198 199 // Just create the container, no need to start it to be started. 200 // We really want to make sure there is no process running when docker starts back up. 201 // We will manipulate the on disk state later 202 id := container.Create(ctx, t, client, container.WithRestartPolicy("always"), container.WithCmd("/bin/sh", "-c", "exit 1")) 203 204 d.Stop(t) 205 206 d.TamperWithContainerConfig(t, id, func(c *realcontainer.Container) { 207 c.SetRestarting(&realcontainer.ExitStatus{ExitCode: 1}) 208 c.HasBeenStartedBefore = true 209 }) 210 211 d.Start(t) 212 213 ctxTimeout, cancel := context.WithTimeout(ctx, 30*time.Second) 214 defer cancel() 215 chOk, chErr := client.ContainerWait(ctxTimeout, id, containerapi.WaitConditionNextExit) 216 select { 217 case <-chOk: 218 case err := <-chErr: 219 assert.NilError(t, err) 220 } 221 } 222 223 // TestHardRestartWhenContainerIsRunning simulates a case where dockerd is 224 // killed while a container is running, and the container's task no longer 225 // exists when dockerd starts back up. This can happen if the system is 226 // hard-rebooted, for example. 227 // 228 // Regression test for moby/moby#45788 229 func TestHardRestartWhenContainerIsRunning(t *testing.T) { 230 skip.If(t, testEnv.IsRemoteDaemon, "cannot start daemon on remote test run") 231 skip.If(t, testEnv.DaemonInfo.OSType == "windows") 232 233 t.Parallel() 234 235 d := daemon.New(t) 236 defer d.Cleanup(t) 237 238 d.StartWithBusybox(t, "--iptables=false") 239 defer d.Stop(t) 240 241 ctx := context.Background() 242 client := d.NewClientT(t) 243 244 // Just create the containers, no need to start them. 245 // We really want to make sure there is no process running when docker starts back up. 246 // We will manipulate the on disk state later. 247 nopolicy := container.Create(ctx, t, client, container.WithCmd("/bin/sh", "-c", "exit 1")) 248 onfailure := container.Create(ctx, t, client, container.WithRestartPolicy("on-failure"), container.WithCmd("/bin/sh", "-c", "sleep 60")) 249 250 d.Stop(t) 251 252 for _, id := range []string{nopolicy, onfailure} { 253 d.TamperWithContainerConfig(t, id, func(c *realcontainer.Container) { 254 c.SetRunning(nil, nil, true) 255 c.HasBeenStartedBefore = true 256 }) 257 } 258 259 d.Start(t) 260 261 t.Run("RestartPolicy=none", func(t *testing.T) { 262 ctx, cancel := context.WithTimeout(ctx, 5*time.Second) 263 defer cancel() 264 inspect, err := client.ContainerInspect(ctx, nopolicy) 265 assert.NilError(t, err) 266 assert.Check(t, is.Equal(inspect.State.Status, "exited")) 267 assert.Check(t, is.Equal(inspect.State.ExitCode, 255)) 268 finishedAt, err := time.Parse(time.RFC3339Nano, inspect.State.FinishedAt) 269 if assert.Check(t, err) { 270 assert.Check(t, is.DeepEqual(finishedAt, time.Now(), opt.TimeWithThreshold(time.Minute))) 271 } 272 }) 273 274 t.Run("RestartPolicy=on-failure", func(t *testing.T) { 275 ctx, cancel := context.WithTimeout(ctx, 5*time.Second) 276 defer cancel() 277 inspect, err := client.ContainerInspect(ctx, onfailure) 278 assert.NilError(t, err) 279 assert.Check(t, is.Equal(inspect.State.Status, "running")) 280 assert.Check(t, is.Equal(inspect.State.ExitCode, 0)) 281 finishedAt, err := time.Parse(time.RFC3339Nano, inspect.State.FinishedAt) 282 if assert.Check(t, err) { 283 assert.Check(t, is.DeepEqual(finishedAt, time.Now(), opt.TimeWithThreshold(time.Minute))) 284 } 285 286 stopTimeout := 0 287 assert.Assert(t, client.ContainerStop(ctx, onfailure, containerapi.StopOptions{Timeout: &stopTimeout})) 288 }) 289 }