github.com/Prakhar-Agarwal-byte/moby@v0.0.0-20231027092010-a14e3e8ab87e/integration/container/daemon_linux_test.go (about) 1 package container // import "github.com/Prakhar-Agarwal-byte/moby/integration/container" 2 3 import ( 4 "context" 5 "fmt" 6 "os" 7 "strconv" 8 "strings" 9 "testing" 10 "time" 11 12 "github.com/Prakhar-Agarwal-byte/moby/api/types" 13 containertypes "github.com/Prakhar-Agarwal-byte/moby/api/types/container" 14 realcontainer "github.com/Prakhar-Agarwal-byte/moby/container" 15 "github.com/Prakhar-Agarwal-byte/moby/integration/internal/container" 16 "github.com/Prakhar-Agarwal-byte/moby/testutil" 17 "github.com/Prakhar-Agarwal-byte/moby/testutil/daemon" 18 "golang.org/x/sys/unix" 19 "gotest.tools/v3/assert" 20 is "gotest.tools/v3/assert/cmp" 21 "gotest.tools/v3/assert/opt" 22 "gotest.tools/v3/skip" 23 ) 24 25 // This is a regression test for #36145 26 // It ensures that a container can be started when the daemon was improperly 27 // shutdown when the daemon is brought back up. 28 // 29 // The regression is due to improper error handling preventing a container from 30 // being restored and as such have the resources cleaned up. 31 // 32 // To test this, we need to kill dockerd, then kill both the containerd-shim and 33 // the container process, then start dockerd back up and attempt to start the 34 // container again. 35 func TestContainerStartOnDaemonRestart(t *testing.T) { 36 skip.If(t, testEnv.IsRemoteDaemon, "cannot start daemon on remote test run") 37 skip.If(t, testEnv.DaemonInfo.OSType == "windows") 38 skip.If(t, testEnv.IsRootless) 39 t.Parallel() 40 41 ctx := testutil.StartSpan(baseContext, t) 42 43 d := daemon.New(t) 44 d.StartWithBusybox(ctx, t, "--iptables=false") 45 defer d.Stop(t) 46 47 c := d.NewClientT(t) 48 49 cID := container.Create(ctx, t, c) 50 defer c.ContainerRemove(ctx, cID, containertypes.RemoveOptions{Force: true}) 51 52 err := c.ContainerStart(ctx, cID, containertypes.StartOptions{}) 53 assert.Check(t, err, "error starting test container") 54 55 inspect, err := c.ContainerInspect(ctx, cID) 56 assert.Check(t, err, "error getting inspect data") 57 58 ppid := getContainerdShimPid(t, inspect) 59 60 err = d.Kill() 61 assert.Check(t, err, "failed to kill test daemon") 62 63 err = unix.Kill(inspect.State.Pid, unix.SIGKILL) 64 assert.Check(t, err, "failed to kill container process") 65 66 err = unix.Kill(ppid, unix.SIGKILL) 67 assert.Check(t, err, "failed to kill containerd-shim") 68 69 d.Start(t, "--iptables=false") 70 71 err = c.ContainerStart(ctx, cID, containertypes.StartOptions{}) 72 assert.Check(t, err, "failed to start test container") 73 } 74 75 func getContainerdShimPid(t *testing.T, c types.ContainerJSON) int { 76 statB, err := os.ReadFile(fmt.Sprintf("/proc/%d/stat", c.State.Pid)) 77 assert.Check(t, err, "error looking up containerd-shim pid") 78 79 // ppid is the 4th entry in `/proc/pid/stat` 80 ppid, err := strconv.Atoi(strings.Fields(string(statB))[3]) 81 assert.Check(t, err, "error converting ppid field to int") 82 83 assert.Check(t, ppid != 1, "got unexpected ppid") 84 return ppid 85 } 86 87 // TestDaemonRestartIpcMode makes sure a container keeps its ipc mode 88 // (derived from daemon default) even after the daemon is restarted 89 // with a different default ipc mode. 90 func TestDaemonRestartIpcMode(t *testing.T) { 91 skip.If(t, testEnv.IsRemoteDaemon, "cannot start daemon on remote test run") 92 skip.If(t, testEnv.DaemonInfo.OSType == "windows") 93 t.Parallel() 94 95 ctx := testutil.StartSpan(baseContext, t) 96 97 d := daemon.New(t) 98 d.StartWithBusybox(ctx, t, "--iptables=false", "--default-ipc-mode=private") 99 defer d.Stop(t) 100 101 c := d.NewClientT(t) 102 103 // check the container is created with private ipc mode as per daemon default 104 cID := container.Run(ctx, t, c, 105 container.WithCmd("top"), 106 container.WithRestartPolicy(containertypes.RestartPolicyAlways), 107 ) 108 defer c.ContainerRemove(ctx, cID, containertypes.RemoveOptions{Force: true}) 109 110 inspect, err := c.ContainerInspect(ctx, cID) 111 assert.NilError(t, err) 112 assert.Check(t, is.Equal(string(inspect.HostConfig.IpcMode), "private")) 113 114 // restart the daemon with shareable default ipc mode 115 d.Restart(t, "--iptables=false", "--default-ipc-mode=shareable") 116 117 // check the container is still having private ipc mode 118 inspect, err = c.ContainerInspect(ctx, cID) 119 assert.NilError(t, err) 120 assert.Check(t, is.Equal(string(inspect.HostConfig.IpcMode), "private")) 121 122 // check a new container is created with shareable ipc mode as per new daemon default 123 cID = container.Run(ctx, t, c) 124 defer c.ContainerRemove(ctx, cID, containertypes.RemoveOptions{Force: true}) 125 126 inspect, err = c.ContainerInspect(ctx, cID) 127 assert.NilError(t, err) 128 assert.Check(t, is.Equal(string(inspect.HostConfig.IpcMode), "shareable")) 129 } 130 131 // TestDaemonHostGatewayIP verifies that when a magic string "host-gateway" is passed 132 // to ExtraHosts (--add-host) instead of an IP address, its value is set to 133 // 1. Daemon config flag value specified by host-gateway-ip or 134 // 2. IP of the default bridge network 135 // and is added to the /etc/hosts file 136 func TestDaemonHostGatewayIP(t *testing.T) { 137 skip.If(t, testEnv.IsRemoteDaemon) 138 skip.If(t, testEnv.DaemonInfo.OSType == "windows") 139 skip.If(t, testEnv.IsRootless, "rootless mode has different view of network") 140 t.Parallel() 141 142 ctx := testutil.StartSpan(baseContext, t) 143 144 // Verify the IP in /etc/hosts is same as host-gateway-ip 145 d := daemon.New(t) 146 // Verify the IP in /etc/hosts is same as the default bridge's IP 147 d.StartWithBusybox(ctx, t, "--iptables=false") 148 c := d.NewClientT(t) 149 cID := container.Run(ctx, t, c, 150 container.WithExtraHost("host.docker.internal:host-gateway"), 151 ) 152 res, err := container.Exec(ctx, c, cID, []string{"cat", "/etc/hosts"}) 153 assert.NilError(t, err) 154 assert.Assert(t, is.Len(res.Stderr(), 0)) 155 assert.Equal(t, 0, res.ExitCode) 156 inspect, err := c.NetworkInspect(ctx, "bridge", types.NetworkInspectOptions{}) 157 assert.NilError(t, err) 158 assert.Check(t, is.Contains(res.Stdout(), inspect.IPAM.Config[0].Gateway)) 159 c.ContainerRemove(ctx, cID, containertypes.RemoveOptions{Force: true}) 160 d.Stop(t) 161 162 // Verify the IP in /etc/hosts is same as host-gateway-ip 163 d.StartWithBusybox(ctx, t, "--iptables=false", "--host-gateway-ip=6.7.8.9") 164 cID = container.Run(ctx, t, c, 165 container.WithExtraHost("host.docker.internal:host-gateway"), 166 ) 167 res, err = container.Exec(ctx, c, cID, []string{"cat", "/etc/hosts"}) 168 assert.NilError(t, err) 169 assert.Assert(t, is.Len(res.Stderr(), 0)) 170 assert.Equal(t, 0, res.ExitCode) 171 assert.Check(t, is.Contains(res.Stdout(), "6.7.8.9")) 172 c.ContainerRemove(ctx, cID, containertypes.RemoveOptions{Force: true}) 173 d.Stop(t) 174 } 175 176 // TestRestartDaemonWithRestartingContainer simulates a case where a container is in "restarting" state when 177 // dockerd is killed (due to machine reset or something else). 178 // 179 // Related to moby/moby#41817 180 // 181 // In this test we'll change the container state to "restarting". 182 // This means that the container will not be 'alive' when we attempt to restore in on daemon startup. 183 // 184 // We could do the same with `docker run -d --resetart=always busybox:latest exit 1`, and then 185 // `kill -9` dockerd while the container is in "restarting" state. This is difficult to reproduce reliably 186 // in an automated test, so we manipulate on disk state instead. 187 func TestRestartDaemonWithRestartingContainer(t *testing.T) { 188 skip.If(t, testEnv.IsRemoteDaemon, "cannot start daemon on remote test run") 189 skip.If(t, testEnv.DaemonInfo.OSType == "windows") 190 191 t.Parallel() 192 193 ctx := testutil.StartSpan(baseContext, t) 194 195 d := daemon.New(t) 196 defer d.Cleanup(t) 197 198 d.StartWithBusybox(ctx, t, "--iptables=false") 199 defer d.Stop(t) 200 201 apiClient := d.NewClientT(t) 202 203 // Just create the container, no need to start it to be started. 204 // We really want to make sure there is no process running when docker starts back up. 205 // We will manipulate the on disk state later 206 id := container.Create(ctx, t, apiClient, container.WithRestartPolicy(containertypes.RestartPolicyAlways), container.WithCmd("/bin/sh", "-c", "exit 1")) 207 208 d.Stop(t) 209 210 d.TamperWithContainerConfig(t, id, func(c *realcontainer.Container) { 211 c.SetRestarting(&realcontainer.ExitStatus{ExitCode: 1}) 212 c.HasBeenStartedBefore = true 213 }) 214 215 d.Start(t, "--iptables=false") 216 217 ctxTimeout, cancel := context.WithTimeout(ctx, 30*time.Second) 218 defer cancel() 219 chOk, chErr := apiClient.ContainerWait(ctxTimeout, id, containertypes.WaitConditionNextExit) 220 select { 221 case <-chOk: 222 case err := <-chErr: 223 assert.NilError(t, err) 224 } 225 } 226 227 // TestHardRestartWhenContainerIsRunning simulates a case where dockerd is 228 // killed while a container is running, and the container's task no longer 229 // exists when dockerd starts back up. This can happen if the system is 230 // hard-rebooted, for example. 231 // 232 // Regression test for moby/moby#45788 233 func TestHardRestartWhenContainerIsRunning(t *testing.T) { 234 skip.If(t, testEnv.IsRemoteDaemon, "cannot start daemon on remote test run") 235 skip.If(t, testEnv.DaemonInfo.OSType == "windows") 236 237 t.Parallel() 238 239 ctx := testutil.StartSpan(baseContext, t) 240 241 d := daemon.New(t) 242 defer d.Cleanup(t) 243 244 d.StartWithBusybox(ctx, t, "--iptables=false") 245 defer d.Stop(t) 246 247 apiClient := d.NewClientT(t) 248 249 // Just create the containers, no need to start them. 250 // We really want to make sure there is no process running when docker starts back up. 251 // We will manipulate the on disk state later. 252 noPolicy := container.Create(ctx, t, apiClient, container.WithCmd("/bin/sh", "-c", "exit 1")) 253 onFailure := container.Create(ctx, t, apiClient, container.WithRestartPolicy("on-failure"), container.WithCmd("/bin/sh", "-c", "sleep 60")) 254 255 d.Stop(t) 256 257 for _, id := range []string{noPolicy, onFailure} { 258 d.TamperWithContainerConfig(t, id, func(c *realcontainer.Container) { 259 c.SetRunning(nil, nil, true) 260 c.HasBeenStartedBefore = true 261 }) 262 } 263 264 d.Start(t, "--iptables=false") 265 266 t.Run("RestartPolicy=none", func(t *testing.T) { 267 ctx := testutil.StartSpan(ctx, t) 268 ctx, cancel := context.WithTimeout(ctx, 5*time.Second) 269 defer cancel() 270 inspect, err := apiClient.ContainerInspect(ctx, noPolicy) 271 assert.NilError(t, err) 272 assert.Check(t, is.Equal(inspect.State.Status, "exited")) 273 assert.Check(t, is.Equal(inspect.State.ExitCode, 255)) 274 finishedAt, err := time.Parse(time.RFC3339Nano, inspect.State.FinishedAt) 275 if assert.Check(t, err) { 276 assert.Check(t, is.DeepEqual(finishedAt, time.Now(), opt.TimeWithThreshold(time.Minute))) 277 } 278 }) 279 280 t.Run("RestartPolicy=on-failure", func(t *testing.T) { 281 ctx := testutil.StartSpan(ctx, t) 282 ctx, cancel := context.WithTimeout(ctx, 5*time.Second) 283 defer cancel() 284 inspect, err := apiClient.ContainerInspect(ctx, onFailure) 285 assert.NilError(t, err) 286 assert.Check(t, is.Equal(inspect.State.Status, "running")) 287 assert.Check(t, is.Equal(inspect.State.ExitCode, 0)) 288 finishedAt, err := time.Parse(time.RFC3339Nano, inspect.State.FinishedAt) 289 if assert.Check(t, err) { 290 assert.Check(t, is.DeepEqual(finishedAt, time.Now(), opt.TimeWithThreshold(time.Minute))) 291 } 292 293 stopTimeout := 0 294 assert.Assert(t, apiClient.ContainerStop(ctx, onFailure, containertypes.StopOptions{Timeout: &stopTimeout})) 295 }) 296 }