github.com/containers/libpod@v1.9.4-0.20220419124438-4284fd425507/libpod/container_api.go (about) 1 package libpod 2 3 import ( 4 "bufio" 5 "context" 6 "io/ioutil" 7 "net" 8 "os" 9 "strings" 10 "sync" 11 "time" 12 13 "github.com/containers/libpod/libpod/define" 14 "github.com/containers/libpod/libpod/events" 15 "github.com/containers/libpod/libpod/logs" 16 "github.com/opentracing/opentracing-go" 17 "github.com/pkg/errors" 18 "github.com/sirupsen/logrus" 19 "k8s.io/client-go/tools/remotecommand" 20 ) 21 22 // Init creates a container in the OCI runtime 23 func (c *Container) Init(ctx context.Context) (err error) { 24 span, _ := opentracing.StartSpanFromContext(ctx, "containerInit") 25 span.SetTag("struct", "container") 26 defer span.Finish() 27 28 if !c.batched { 29 c.lock.Lock() 30 defer c.lock.Unlock() 31 32 if err := c.syncContainer(); err != nil { 33 return err 34 } 35 } 36 37 if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateStopped, define.ContainerStateExited) { 38 return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has already been created in runtime", c.ID()) 39 } 40 41 // don't recursively start 42 if err := c.checkDependenciesAndHandleError(ctx); err != nil { 43 return err 44 } 45 46 if err := c.prepare(); err != nil { 47 if err2 := c.cleanup(ctx); err2 != nil { 48 logrus.Errorf("error cleaning up container %s: %v", c.ID(), err2) 49 } 50 return err 51 } 52 53 if c.state.State == define.ContainerStateStopped { 54 // Reinitialize the container 55 return c.reinit(ctx, false) 56 } 57 58 // Initialize the container for the first time 59 return c.init(ctx, false) 60 } 61 62 // Start starts a container. 63 // Start can start configured, created or stopped containers. 64 // For configured containers, the container will be initialized first, then 65 // started. 66 // Stopped containers will be deleted and re-created in runc, undergoing a fresh 67 // Init(). 68 // If recursive is set, Start will also start all containers this container depends on. 69 func (c *Container) Start(ctx context.Context, recursive bool) (err error) { 70 span, _ := opentracing.StartSpanFromContext(ctx, "containerStart") 71 span.SetTag("struct", "container") 72 defer span.Finish() 73 74 if !c.batched { 75 c.lock.Lock() 76 defer c.lock.Unlock() 77 78 if err := c.syncContainer(); err != nil { 79 return err 80 } 81 } 82 if err := c.prepareToStart(ctx, recursive); err != nil { 83 return err 84 } 85 86 // Start the container 87 return c.start() 88 } 89 90 // StartAndAttach starts a container and attaches to it. 91 // StartAndAttach can start configured, created or stopped containers. 92 // For configured containers, the container will be initialized first, then 93 // started. 94 // Stopped containers will be deleted and re-created in runc, undergoing a fresh 95 // Init(). 96 // If successful, an error channel will be returned containing the result of the 97 // attach call. 98 // The channel will be closed automatically after the result of attach has been 99 // sent. 100 // If recursive is set, StartAndAttach will also start all containers this container depends on. 101 func (c *Container) StartAndAttach(ctx context.Context, streams *define.AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, recursive bool) (attachResChan <-chan error, err error) { 102 if !c.batched { 103 c.lock.Lock() 104 defer c.lock.Unlock() 105 106 if err := c.syncContainer(); err != nil { 107 return nil, err 108 } 109 } 110 111 if err := c.prepareToStart(ctx, recursive); err != nil { 112 return nil, err 113 } 114 attachChan := make(chan error) 115 116 // We need to ensure that we don't return until start() fired in attach. 117 // Use a channel to sync 118 startedChan := make(chan bool) 119 120 // Attach to the container before starting it 121 go func() { 122 if err := c.attach(streams, keys, resize, true, startedChan); err != nil { 123 attachChan <- err 124 } 125 close(attachChan) 126 }() 127 128 select { 129 case err := <-attachChan: 130 return nil, err 131 case <-startedChan: 132 c.newContainerEvent(events.Attach) 133 } 134 135 return attachChan, nil 136 } 137 138 // RestartWithTimeout restarts a running container and takes a given timeout in uint 139 func (c *Container) RestartWithTimeout(ctx context.Context, timeout uint) (err error) { 140 if !c.batched { 141 c.lock.Lock() 142 defer c.lock.Unlock() 143 144 if err := c.syncContainer(); err != nil { 145 return err 146 } 147 } 148 149 if err = c.checkDependenciesAndHandleError(ctx); err != nil { 150 return err 151 } 152 153 return c.restartWithTimeout(ctx, timeout) 154 } 155 156 // Stop uses the container's stop signal (or SIGTERM if no signal was specified) 157 // to stop the container, and if it has not stopped after container's stop 158 // timeout, SIGKILL is used to attempt to forcibly stop the container 159 // Default stop timeout is 10 seconds, but can be overridden when the container 160 // is created 161 func (c *Container) Stop() error { 162 // Stop with the container's given timeout 163 return c.StopWithTimeout(c.config.StopTimeout) 164 } 165 166 // StopWithTimeout is a version of Stop that allows a timeout to be specified 167 // manually. If timeout is 0, SIGKILL will be used immediately to kill the 168 // container. 169 func (c *Container) StopWithTimeout(timeout uint) error { 170 if !c.batched { 171 c.lock.Lock() 172 defer c.lock.Unlock() 173 174 if err := c.syncContainer(); err != nil { 175 return err 176 } 177 } 178 179 if c.ensureState(define.ContainerStateStopped, define.ContainerStateExited) { 180 return define.ErrCtrStopped 181 } 182 183 if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) { 184 return errors.Wrapf(define.ErrCtrStateInvalid, "can only stop created or running containers. %s is in state %s", c.ID(), c.state.State.String()) 185 } 186 187 return c.stop(timeout) 188 } 189 190 // Kill sends a signal to a container 191 func (c *Container) Kill(signal uint) error { 192 if !c.batched { 193 c.lock.Lock() 194 defer c.lock.Unlock() 195 196 if err := c.syncContainer(); err != nil { 197 return err 198 } 199 } 200 201 // TODO: Is killing a paused container OK? 202 if c.state.State != define.ContainerStateRunning { 203 return errors.Wrapf(define.ErrCtrStateInvalid, "can only kill running containers. %s is in state %s", c.ID(), c.state.State.String()) 204 } 205 206 // Hardcode all = false, we only use all when removing. 207 if err := c.ociRuntime.KillContainer(c, signal, false); err != nil { 208 return err 209 } 210 211 c.state.StoppedByUser = true 212 213 c.newContainerEvent(events.Kill) 214 215 return c.save() 216 } 217 218 // Attach attaches to a container. 219 // This function returns when the attach finishes. It does not hold the lock for 220 // the duration of its runtime, only using it at the beginning to verify state. 221 func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <-chan remotecommand.TerminalSize) error { 222 if !c.batched { 223 c.lock.Lock() 224 if err := c.syncContainer(); err != nil { 225 c.lock.Unlock() 226 return err 227 } 228 // We are NOT holding the lock for the duration of the function. 229 c.lock.Unlock() 230 } 231 232 if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) { 233 return errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers") 234 } 235 236 c.newContainerEvent(events.Attach) 237 return c.attach(streams, keys, resize, false, nil) 238 } 239 240 // HTTPAttach forwards an attach session over a hijacked HTTP session. 241 // HTTPAttach will consume and close the included httpCon, which is expected to 242 // be sourced from a hijacked HTTP connection. 243 // The cancel channel is optional, and can be used to asynchronously cancel the 244 // attach session. 245 // The streams variable is only supported if the container was not a terminal, 246 // and allows specifying which of the container's standard streams will be 247 // forwarded to the client. 248 // This function returns when the attach finishes. It does not hold the lock for 249 // the duration of its runtime, only using it at the beginning to verify state. 250 // The streamLogs parameter indicates that all the container's logs until present 251 // will be streamed at the beginning of the attach. 252 // The streamAttach parameter indicates that the attach itself will be streamed 253 // over the socket; if this is not set, but streamLogs is, only the logs will be 254 // sent. 255 // At least one of streamAttach and streamLogs must be set. 256 func (c *Container) HTTPAttach(httpCon net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool, streamAttach, streamLogs bool) (deferredErr error) { 257 isTerminal := false 258 if c.config.Spec.Process != nil { 259 isTerminal = c.config.Spec.Process.Terminal 260 } 261 // Ensure our contract of writing errors to and closing the HTTP conn is 262 // honored. 263 defer func() { 264 hijackWriteErrorAndClose(deferredErr, c.ID(), isTerminal, httpCon, httpBuf) 265 }() 266 267 if !c.batched { 268 c.lock.Lock() 269 if err := c.syncContainer(); err != nil { 270 c.lock.Unlock() 271 272 return err 273 } 274 // We are NOT holding the lock for the duration of the function. 275 c.lock.Unlock() 276 } 277 278 if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) { 279 return errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers") 280 } 281 282 if !streamAttach && !streamLogs { 283 return errors.Wrapf(define.ErrInvalidArg, "must specify at least one of stream or logs") 284 } 285 286 logrus.Infof("Performing HTTP Hijack attach to container %s", c.ID()) 287 288 if streamLogs { 289 // Get all logs for the container 290 logChan := make(chan *logs.LogLine) 291 logOpts := new(logs.LogOptions) 292 logOpts.Tail = -1 293 logOpts.WaitGroup = new(sync.WaitGroup) 294 errChan := make(chan error) 295 go func() { 296 var err error 297 // In non-terminal mode we need to prepend with the 298 // stream header. 299 logrus.Debugf("Writing logs for container %s to HTTP attach", c.ID()) 300 for logLine := range logChan { 301 if !isTerminal { 302 device := logLine.Device 303 var header []byte 304 headerLen := uint32(len(logLine.Msg)) 305 306 switch strings.ToLower(device) { 307 case "stdin": 308 header = makeHTTPAttachHeader(0, headerLen) 309 case "stdout": 310 header = makeHTTPAttachHeader(1, headerLen) 311 case "stderr": 312 header = makeHTTPAttachHeader(2, headerLen) 313 default: 314 logrus.Errorf("Unknown device for log line: %s", device) 315 header = makeHTTPAttachHeader(1, headerLen) 316 } 317 _, err = httpBuf.Write(header) 318 if err != nil { 319 break 320 } 321 } 322 _, err = httpBuf.Write([]byte(logLine.Msg)) 323 if err != nil { 324 break 325 } 326 _, err = httpBuf.Write([]byte("\n")) 327 if err != nil { 328 break 329 } 330 err = httpBuf.Flush() 331 if err != nil { 332 break 333 } 334 } 335 errChan <- err 336 }() 337 go func() { 338 logOpts.WaitGroup.Wait() 339 close(logChan) 340 }() 341 if err := c.ReadLog(logOpts, logChan); err != nil { 342 return err 343 } 344 logrus.Debugf("Done reading logs for container %s", c.ID()) 345 if err := <-errChan; err != nil { 346 return err 347 } 348 } 349 if !streamAttach { 350 return nil 351 } 352 353 c.newContainerEvent(events.Attach) 354 return c.ociRuntime.HTTPAttach(c, httpCon, httpBuf, streams, detachKeys, cancel) 355 } 356 357 // AttachResize resizes the container's terminal, which is displayed by Attach 358 // and HTTPAttach. 359 func (c *Container) AttachResize(newSize remotecommand.TerminalSize) error { 360 if !c.batched { 361 c.lock.Lock() 362 defer c.lock.Unlock() 363 364 if err := c.syncContainer(); err != nil { 365 return err 366 } 367 } 368 369 if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) { 370 return errors.Wrapf(define.ErrCtrStateInvalid, "can only resize created or running containers") 371 } 372 373 logrus.Infof("Resizing TTY of container %s", c.ID()) 374 375 return c.ociRuntime.AttachResize(c, newSize) 376 } 377 378 // Mount mounts a container's filesystem on the host 379 // The path where the container has been mounted is returned 380 func (c *Container) Mount() (string, error) { 381 if !c.batched { 382 c.lock.Lock() 383 defer c.lock.Unlock() 384 385 if err := c.syncContainer(); err != nil { 386 return "", err 387 } 388 } 389 390 if c.state.State == define.ContainerStateRemoving { 391 return "", errors.Wrapf(define.ErrCtrStateInvalid, "cannot mount container %s as it is being removed", c.ID()) 392 } 393 394 defer c.newContainerEvent(events.Mount) 395 return c.mount() 396 } 397 398 // Unmount unmounts a container's filesystem on the host 399 func (c *Container) Unmount(force bool) error { 400 if !c.batched { 401 c.lock.Lock() 402 defer c.lock.Unlock() 403 404 if err := c.syncContainer(); err != nil { 405 return err 406 } 407 } 408 409 if c.state.Mounted { 410 mounted, err := c.runtime.storageService.MountedContainerImage(c.ID()) 411 if err != nil { 412 return errors.Wrapf(err, "can't determine how many times %s is mounted, refusing to unmount", c.ID()) 413 } 414 if mounted == 1 { 415 if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { 416 return errors.Wrapf(define.ErrCtrStateInvalid, "cannot unmount storage for container %s as it is running or paused", c.ID()) 417 } 418 execSessions, err := c.getActiveExecSessions() 419 if err != nil { 420 return err 421 } 422 if len(execSessions) != 0 { 423 return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to unmount", c.ID()) 424 } 425 return errors.Wrapf(define.ErrInternal, "can't unmount %s last mount, it is still in use", c.ID()) 426 } 427 } 428 defer c.newContainerEvent(events.Unmount) 429 return c.unmount(force) 430 } 431 432 // Pause pauses a container 433 func (c *Container) Pause() error { 434 if !c.batched { 435 c.lock.Lock() 436 defer c.lock.Unlock() 437 438 if err := c.syncContainer(); err != nil { 439 return err 440 } 441 } 442 443 if c.state.State == define.ContainerStatePaused { 444 return errors.Wrapf(define.ErrCtrStateInvalid, "%q is already paused", c.ID()) 445 } 446 if c.state.State != define.ContainerStateRunning { 447 return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not running, can't pause", c.state.State) 448 } 449 defer c.newContainerEvent(events.Pause) 450 return c.pause() 451 } 452 453 // Unpause unpauses a container 454 func (c *Container) Unpause() error { 455 if !c.batched { 456 c.lock.Lock() 457 defer c.lock.Unlock() 458 459 if err := c.syncContainer(); err != nil { 460 return err 461 } 462 } 463 464 if c.state.State != define.ContainerStatePaused { 465 return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not paused, can't unpause", c.ID()) 466 } 467 defer c.newContainerEvent(events.Unpause) 468 return c.unpause() 469 } 470 471 // Export exports a container's root filesystem as a tar archive 472 // The archive will be saved as a file at the given path 473 func (c *Container) Export(path string) error { 474 if !c.batched { 475 c.lock.Lock() 476 defer c.lock.Unlock() 477 478 if err := c.syncContainer(); err != nil { 479 return err 480 } 481 } 482 483 if c.state.State == define.ContainerStateRemoving { 484 return errors.Wrapf(define.ErrCtrStateInvalid, "cannot mount container %s as it is being removed", c.ID()) 485 } 486 487 defer c.newContainerEvent(events.Mount) 488 return c.export(path) 489 } 490 491 // AddArtifact creates and writes to an artifact file for the container 492 func (c *Container) AddArtifact(name string, data []byte) error { 493 if !c.valid { 494 return define.ErrCtrRemoved 495 } 496 497 return ioutil.WriteFile(c.getArtifactPath(name), data, 0740) 498 } 499 500 // GetArtifact reads the specified artifact file from the container 501 func (c *Container) GetArtifact(name string) ([]byte, error) { 502 if !c.valid { 503 return nil, define.ErrCtrRemoved 504 } 505 506 return ioutil.ReadFile(c.getArtifactPath(name)) 507 } 508 509 // RemoveArtifact deletes the specified artifacts file 510 func (c *Container) RemoveArtifact(name string) error { 511 if !c.valid { 512 return define.ErrCtrRemoved 513 } 514 515 return os.Remove(c.getArtifactPath(name)) 516 } 517 518 // Wait blocks until the container exits and returns its exit code. 519 func (c *Container) Wait() (int32, error) { 520 return c.WaitWithInterval(DefaultWaitInterval) 521 } 522 523 // WaitWithInterval blocks until the container to exit and returns its exit 524 // code. The argument is the interval at which checks the container's status. 525 func (c *Container) WaitWithInterval(waitTimeout time.Duration) (int32, error) { 526 if !c.valid { 527 return -1, define.ErrCtrRemoved 528 } 529 530 exitFile, err := c.exitFilePath() 531 if err != nil { 532 return -1, err 533 } 534 chWait := make(chan error, 1) 535 536 defer close(chWait) 537 538 for { 539 // ignore errors here, it is only used to avoid waiting 540 // too long. 541 _, _ = WaitForFile(exitFile, chWait, waitTimeout) 542 543 stopped, err := c.isStopped() 544 if err != nil { 545 return -1, err 546 } 547 if stopped { 548 return c.state.ExitCode, nil 549 } 550 } 551 } 552 553 func (c *Container) WaitForConditionWithInterval(waitTimeout time.Duration, condition define.ContainerStatus) (int32, error) { 554 if !c.valid { 555 return -1, define.ErrCtrRemoved 556 } 557 if condition == define.ContainerStateStopped || condition == define.ContainerStateExited { 558 return c.WaitWithInterval(waitTimeout) 559 } 560 for { 561 state, err := c.State() 562 if err != nil { 563 return -1, err 564 } 565 if state == condition { 566 break 567 } 568 time.Sleep(waitTimeout) 569 } 570 return -1, nil 571 } 572 573 // Cleanup unmounts all mount points in container and cleans up container storage 574 // It also cleans up the network stack 575 func (c *Container) Cleanup(ctx context.Context) error { 576 if !c.batched { 577 c.lock.Lock() 578 defer c.lock.Unlock() 579 580 if err := c.syncContainer(); err != nil { 581 return err 582 } 583 } 584 585 // Check if state is good 586 if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateExited) { 587 return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID()) 588 } 589 590 // Handle restart policy. 591 // Returns a bool indicating whether we actually restarted. 592 // If we did, don't proceed to cleanup - just exit. 593 didRestart, err := c.handleRestartPolicy(ctx) 594 if err != nil { 595 return err 596 } 597 if didRestart { 598 return nil 599 } 600 601 // If we didn't restart, we perform a normal cleanup 602 603 // Check for running exec sessions 604 sessions, err := c.getActiveExecSessions() 605 if err != nil { 606 return err 607 } 608 if len(sessions) > 0 { 609 return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to clean up", c.ID()) 610 } 611 612 defer c.newContainerEvent(events.Cleanup) 613 return c.cleanup(ctx) 614 } 615 616 // Batch starts a batch operation on the given container 617 // All commands in the passed function will execute under the same lock and 618 // without syncronyzing state after each operation 619 // This will result in substantial performance benefits when running numerous 620 // commands on the same container 621 // Note that the container passed into the Batch function cannot be removed 622 // during batched operations. runtime.RemoveContainer can only be called outside 623 // of Batch 624 // Any error returned by the given batch function will be returned unmodified by 625 // Batch 626 // As Batch normally disables updating the current state of the container, the 627 // Sync() function is provided to enable container state to be updated and 628 // checked within Batch. 629 func (c *Container) Batch(batchFunc func(*Container) error) error { 630 c.lock.Lock() 631 defer c.lock.Unlock() 632 633 if err := c.syncContainer(); err != nil { 634 return err 635 } 636 637 newCtr := new(Container) 638 newCtr.config = c.config 639 newCtr.state = c.state 640 newCtr.runtime = c.runtime 641 newCtr.ociRuntime = c.ociRuntime 642 newCtr.lock = c.lock 643 newCtr.valid = true 644 645 newCtr.batched = true 646 err := batchFunc(newCtr) 647 newCtr.batched = false 648 649 return err 650 } 651 652 // Sync updates the status of a container by querying the OCI runtime. 653 // If the container has not been created inside the OCI runtime, nothing will be 654 // done. 655 // Most of the time, Podman does not explicitly query the OCI runtime for 656 // container status, and instead relies upon exit files created by conmon. 657 // This can cause a disconnect between running state and what Podman sees in 658 // cases where Conmon was killed unexpected, or runc was upgraded. 659 // Running a manual Sync() ensures that container state will be correct in 660 // such situations. 661 func (c *Container) Sync() error { 662 if !c.batched { 663 c.lock.Lock() 664 defer c.lock.Unlock() 665 } 666 667 // If runtime knows about the container, update its status in runtime 668 // And then save back to disk 669 if c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStatePaused, define.ContainerStateStopped) { 670 oldState := c.state.State 671 if err := c.ociRuntime.UpdateContainerStatus(c); err != nil { 672 return err 673 } 674 // Only save back to DB if state changed 675 if c.state.State != oldState { 676 if err := c.save(); err != nil { 677 return err 678 } 679 } 680 } 681 682 defer c.newContainerEvent(events.Sync) 683 return nil 684 } 685 686 // Refresh is DEPRECATED and REMOVED. 687 func (c *Container) Refresh(ctx context.Context) error { 688 // This has been deprecated for a long while, and is in the process of 689 // being removed. 690 return define.ErrNotImplemented 691 } 692 693 // ContainerCheckpointOptions is a struct used to pass the parameters 694 // for checkpointing (and restoring) to the corresponding functions 695 type ContainerCheckpointOptions struct { 696 // Keep tells the API to not delete checkpoint artifacts 697 Keep bool 698 // KeepRunning tells the API to keep the container running 699 // after writing the checkpoint to disk 700 KeepRunning bool 701 // TCPEstablished tells the API to checkpoint a container 702 // even if it contains established TCP connections 703 TCPEstablished bool 704 // TargetFile tells the API to read (or write) the checkpoint image 705 // from (or to) the filename set in TargetFile 706 TargetFile string 707 // Name tells the API that during restore from an exported 708 // checkpoint archive a new name should be used for the 709 // restored container 710 Name string 711 // IgnoreRootfs tells the API to not export changes to 712 // the container's root file-system (or to not import) 713 IgnoreRootfs bool 714 // IgnoreStaticIP tells the API to ignore the IP set 715 // during 'podman run' with '--ip'. This is especially 716 // important to be able to restore a container multiple 717 // times with '--import --name'. 718 IgnoreStaticIP bool 719 // IgnoreStaticMAC tells the API to ignore the MAC set 720 // during 'podman run' with '--mac-address'. This is especially 721 // important to be able to restore a container multiple 722 // times with '--import --name'. 723 IgnoreStaticMAC bool 724 } 725 726 // Checkpoint checkpoints a container 727 func (c *Container) Checkpoint(ctx context.Context, options ContainerCheckpointOptions) error { 728 logrus.Debugf("Trying to checkpoint container %s", c.ID()) 729 730 if options.TargetFile != "" { 731 if err := c.prepareCheckpointExport(); err != nil { 732 return err 733 } 734 } 735 736 if !c.batched { 737 c.lock.Lock() 738 defer c.lock.Unlock() 739 740 if err := c.syncContainer(); err != nil { 741 return err 742 } 743 } 744 return c.checkpoint(ctx, options) 745 } 746 747 // Restore restores a container 748 func (c *Container) Restore(ctx context.Context, options ContainerCheckpointOptions) (err error) { 749 logrus.Debugf("Trying to restore container %s", c.ID()) 750 if !c.batched { 751 c.lock.Lock() 752 defer c.lock.Unlock() 753 754 if err := c.syncContainer(); err != nil { 755 return err 756 } 757 } 758 defer c.newContainerEvent(events.Restore) 759 return c.restore(ctx, options) 760 }