github.com/gunjan5/docker@v1.8.2/daemon/monitor.go (about) 1 package daemon 2 3 import ( 4 "io" 5 "os/exec" 6 "sync" 7 "time" 8 9 "github.com/Sirupsen/logrus" 10 "github.com/docker/docker/daemon/execdriver" 11 "github.com/docker/docker/pkg/stringid" 12 "github.com/docker/docker/runconfig" 13 ) 14 15 const ( 16 defaultTimeIncrement = 100 17 loggerCloseTimeout = 10 * time.Second 18 ) 19 20 // containerMonitor monitors the execution of a container's main process. 21 // If a restart policy is specified for the container the monitor will ensure that the 22 // process is restarted based on the rules of the policy. When the container is finally stopped 23 // the monitor will reset and cleanup any of the container resources such as networking allocations 24 // and the rootfs 25 type containerMonitor struct { 26 mux sync.Mutex 27 28 // container is the container being monitored 29 container *Container 30 31 // restartPolicy is the current policy being applied to the container monitor 32 restartPolicy runconfig.RestartPolicy 33 34 // failureCount is the number of times the container has failed to 35 // start in a row 36 failureCount int 37 38 // shouldStop signals the monitor that the next time the container exits it is 39 // either because docker or the user asked for the container to be stopped 40 shouldStop bool 41 42 // startSignal is a channel that is closes after the container initially starts 43 startSignal chan struct{} 44 45 // stopChan is used to signal to the monitor whenever there is a wait for the 46 // next restart so that the timeIncrement is not honored and the user is not 47 // left waiting for nothing to happen during this time 48 stopChan chan struct{} 49 50 // timeIncrement is the amount of time to wait between restarts 51 // this is in milliseconds 52 timeIncrement int 53 54 // lastStartTime is the time which the monitor last exec'd the container's process 55 lastStartTime time.Time 56 } 57 58 // newContainerMonitor returns an initialized containerMonitor for the provided container 59 // honoring the provided restart policy 60 func newContainerMonitor(container *Container, policy runconfig.RestartPolicy) *containerMonitor { 61 return &containerMonitor{ 62 container: container, 63 restartPolicy: policy, 64 timeIncrement: defaultTimeIncrement, 65 stopChan: make(chan struct{}), 66 startSignal: make(chan struct{}), 67 } 68 } 69 70 // Stop signals to the container monitor that it should stop monitoring the container 71 // for exits the next time the process dies 72 func (m *containerMonitor) ExitOnNext() { 73 m.mux.Lock() 74 75 // we need to protect having a double close of the channel when stop is called 76 // twice or else we will get a panic 77 if !m.shouldStop { 78 m.shouldStop = true 79 close(m.stopChan) 80 } 81 82 m.mux.Unlock() 83 } 84 85 // Close closes the container's resources such as networking allocations and 86 // unmounts the contatiner's root filesystem 87 func (m *containerMonitor) Close() error { 88 // Cleanup networking and mounts 89 m.container.cleanup() 90 91 // FIXME: here is race condition between two RUN instructions in Dockerfile 92 // because they share same runconfig and change image. Must be fixed 93 // in builder/builder.go 94 if err := m.container.toDisk(); err != nil { 95 logrus.Errorf("Error dumping container %s state to disk: %s", m.container.ID, err) 96 97 return err 98 } 99 100 return nil 101 } 102 103 // Start starts the containers process and monitors it according to the restart policy 104 func (m *containerMonitor) Start() error { 105 var ( 106 err error 107 exitStatus execdriver.ExitStatus 108 // this variable indicates where we in execution flow: 109 // before Run or after 110 afterRun bool 111 ) 112 113 // ensure that when the monitor finally exits we release the networking and unmount the rootfs 114 defer func() { 115 if afterRun { 116 m.container.Lock() 117 m.container.setStopped(&exitStatus) 118 defer m.container.Unlock() 119 } 120 m.Close() 121 }() 122 123 // reset the restart count 124 m.container.RestartCount = -1 125 126 for { 127 m.container.RestartCount++ 128 129 if err := m.container.startLogging(); err != nil { 130 m.resetContainer(false) 131 132 return err 133 } 134 135 pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin) 136 137 m.container.LogEvent("start") 138 139 m.lastStartTime = time.Now() 140 141 if exitStatus, err = m.container.daemon.Run(m.container, pipes, m.callback); err != nil { 142 // if we receive an internal error from the initial start of a container then lets 143 // return it instead of entering the restart loop 144 if m.container.RestartCount == 0 { 145 m.container.ExitCode = -1 146 m.resetContainer(false) 147 148 return err 149 } 150 151 logrus.Errorf("Error running container: %s", err) 152 } 153 154 // here container.Lock is already lost 155 afterRun = true 156 157 m.resetMonitor(err == nil && exitStatus.ExitCode == 0) 158 159 if m.shouldRestart(exitStatus.ExitCode) { 160 m.container.SetRestarting(&exitStatus) 161 if exitStatus.OOMKilled { 162 m.container.LogEvent("oom") 163 } 164 m.container.LogEvent("die") 165 m.resetContainer(true) 166 167 // sleep with a small time increment between each restart to help avoid issues cased by quickly 168 // restarting the container because of some types of errors ( networking cut out, etc... ) 169 m.waitForNextRestart() 170 171 // we need to check this before reentering the loop because the waitForNextRestart could have 172 // been terminated by a request from a user 173 if m.shouldStop { 174 return err 175 } 176 continue 177 } 178 if exitStatus.OOMKilled { 179 m.container.LogEvent("oom") 180 } 181 m.container.LogEvent("die") 182 m.resetContainer(true) 183 return err 184 } 185 } 186 187 // resetMonitor resets the stateful fields on the containerMonitor based on the 188 // previous runs success or failure. Regardless of success, if the container had 189 // an execution time of more than 10s then reset the timer back to the default 190 func (m *containerMonitor) resetMonitor(successful bool) { 191 executionTime := time.Now().Sub(m.lastStartTime).Seconds() 192 193 if executionTime > 10 { 194 m.timeIncrement = defaultTimeIncrement 195 } else { 196 // otherwise we need to increment the amount of time we wait before restarting 197 // the process. We will build up by multiplying the increment by 2 198 m.timeIncrement *= 2 199 } 200 201 // the container exited successfully so we need to reset the failure counter 202 if successful { 203 m.failureCount = 0 204 } else { 205 m.failureCount++ 206 } 207 } 208 209 // waitForNextRestart waits with the default time increment to restart the container unless 210 // a user or docker asks for the container to be stopped 211 func (m *containerMonitor) waitForNextRestart() { 212 select { 213 case <-time.After(time.Duration(m.timeIncrement) * time.Millisecond): 214 case <-m.stopChan: 215 } 216 } 217 218 // shouldRestart checks the restart policy and applies the rules to determine if 219 // the container's process should be restarted 220 func (m *containerMonitor) shouldRestart(exitCode int) bool { 221 m.mux.Lock() 222 defer m.mux.Unlock() 223 224 // do not restart if the user or docker has requested that this container be stopped 225 if m.shouldStop { 226 return false 227 } 228 229 switch { 230 case m.restartPolicy.IsAlways(): 231 return true 232 case m.restartPolicy.IsOnFailure(): 233 // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count 234 if max := m.restartPolicy.MaximumRetryCount; max != 0 && m.failureCount > max { 235 logrus.Debugf("stopping restart of container %s because maximum failure could of %d has been reached", 236 stringid.TruncateID(m.container.ID), max) 237 return false 238 } 239 240 return exitCode != 0 241 } 242 243 return false 244 } 245 246 // callback ensures that the container's state is properly updated after we 247 // received ack from the execution drivers 248 func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid int) { 249 if processConfig.Tty { 250 // The callback is called after the process Start() 251 // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave 252 // which we close here. 253 if c, ok := processConfig.Stdout.(io.Closer); ok { 254 c.Close() 255 } 256 } 257 258 m.container.setRunning(pid) 259 260 // signal that the process has started 261 // close channel only if not closed 262 select { 263 case <-m.startSignal: 264 default: 265 close(m.startSignal) 266 } 267 268 if err := m.container.ToDisk(); err != nil { 269 logrus.Errorf("Error saving container to disk: %v", err) 270 } 271 } 272 273 // resetContainer resets the container's IO and ensures that the command is able to be executed again 274 // by copying the data into a new struct 275 // if lock is true, then container locked during reset 276 func (m *containerMonitor) resetContainer(lock bool) { 277 container := m.container 278 if lock { 279 container.Lock() 280 defer container.Unlock() 281 } 282 283 if container.Config.OpenStdin { 284 if err := container.stdin.Close(); err != nil { 285 logrus.Errorf("%s: Error close stdin: %s", container.ID, err) 286 } 287 } 288 289 if err := container.stdout.Clean(); err != nil { 290 logrus.Errorf("%s: Error close stdout: %s", container.ID, err) 291 } 292 293 if err := container.stderr.Clean(); err != nil { 294 logrus.Errorf("%s: Error close stderr: %s", container.ID, err) 295 } 296 297 if container.command != nil && container.command.ProcessConfig.Terminal != nil { 298 if err := container.command.ProcessConfig.Terminal.Close(); err != nil { 299 logrus.Errorf("%s: Error closing terminal: %s", container.ID, err) 300 } 301 } 302 303 // Re-create a brand new stdin pipe once the container exited 304 if container.Config.OpenStdin { 305 container.stdin, container.stdinPipe = io.Pipe() 306 } 307 308 if container.logDriver != nil { 309 if container.logCopier != nil { 310 exit := make(chan struct{}) 311 go func() { 312 container.logCopier.Wait() 313 close(exit) 314 }() 315 select { 316 case <-time.After(loggerCloseTimeout): 317 logrus.Warnf("Logger didn't exit in time: logs may be truncated") 318 case <-exit: 319 } 320 } 321 container.logDriver.Close() 322 container.logCopier = nil 323 container.logDriver = nil 324 } 325 326 c := container.command.ProcessConfig.Cmd 327 328 container.command.ProcessConfig.Cmd = exec.Cmd{ 329 Stdin: c.Stdin, 330 Stdout: c.Stdout, 331 Stderr: c.Stderr, 332 Path: c.Path, 333 Env: c.Env, 334 ExtraFiles: c.ExtraFiles, 335 Args: c.Args, 336 Dir: c.Dir, 337 SysProcAttr: c.SysProcAttr, 338 } 339 }