github.com/hamo/docker@v1.11.1/api/client/stats.go (about)

     1  package client
     2  
     3  import (
     4  	"fmt"
     5  	"io"
     6  	"strings"
     7  	"sync"
     8  	"text/tabwriter"
     9  	"time"
    10  
    11  	"golang.org/x/net/context"
    12  
    13  	Cli "github.com/docker/docker/cli"
    14  	"github.com/docker/engine-api/types"
    15  	"github.com/docker/engine-api/types/events"
    16  	"github.com/docker/engine-api/types/filters"
    17  )
    18  
    19  // CmdStats displays a live stream of resource usage statistics for one or more containers.
    20  //
    21  // This shows real-time information on CPU usage, memory usage, and network I/O.
    22  //
    23  // Usage: docker stats [OPTIONS] [CONTAINER...]
    24  func (cli *DockerCli) CmdStats(args ...string) error {
    25  	cmd := Cli.Subcmd("stats", []string{"[CONTAINER...]"}, Cli.DockerCommands["stats"].Description, true)
    26  	all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)")
    27  	noStream := cmd.Bool([]string{"-no-stream"}, false, "Disable streaming stats and only pull the first result")
    28  
    29  	cmd.ParseFlags(args, true)
    30  
    31  	names := cmd.Args()
    32  	showAll := len(names) == 0
    33  	closeChan := make(chan error)
    34  
    35  	// monitorContainerEvents watches for container creation and removal (only
    36  	// used when calling `docker stats` without arguments).
    37  	monitorContainerEvents := func(started chan<- struct{}, c chan events.Message) {
    38  		f := filters.NewArgs()
    39  		f.Add("type", "container")
    40  		options := types.EventsOptions{
    41  			Filters: f,
    42  		}
    43  		resBody, err := cli.client.Events(context.Background(), options)
    44  		// Whether we successfully subscribed to events or not, we can now
    45  		// unblock the main goroutine.
    46  		close(started)
    47  		if err != nil {
    48  			closeChan <- err
    49  			return
    50  		}
    51  		defer resBody.Close()
    52  
    53  		decodeEvents(resBody, func(event events.Message, err error) error {
    54  			if err != nil {
    55  				closeChan <- err
    56  				return nil
    57  			}
    58  			c <- event
    59  			return nil
    60  		})
    61  	}
    62  
    63  	// waitFirst is a WaitGroup to wait first stat data's reach for each container
    64  	waitFirst := &sync.WaitGroup{}
    65  
    66  	cStats := stats{}
    67  	// getContainerList simulates creation event for all previously existing
    68  	// containers (only used when calling `docker stats` without arguments).
    69  	getContainerList := func() {
    70  		options := types.ContainerListOptions{
    71  			All: *all,
    72  		}
    73  		cs, err := cli.client.ContainerList(context.Background(), options)
    74  		if err != nil {
    75  			closeChan <- err
    76  		}
    77  		for _, container := range cs {
    78  			s := &containerStats{Name: container.ID[:12]}
    79  			if cStats.add(s) {
    80  				waitFirst.Add(1)
    81  				go s.Collect(cli.client, !*noStream, waitFirst)
    82  			}
    83  		}
    84  	}
    85  
    86  	if showAll {
    87  		// If no names were specified, start a long running goroutine which
    88  		// monitors container events. We make sure we're subscribed before
    89  		// retrieving the list of running containers to avoid a race where we
    90  		// would "miss" a creation.
    91  		started := make(chan struct{})
    92  		eh := eventHandler{handlers: make(map[string]func(events.Message))}
    93  		eh.Handle("create", func(e events.Message) {
    94  			if *all {
    95  				s := &containerStats{Name: e.ID[:12]}
    96  				if cStats.add(s) {
    97  					waitFirst.Add(1)
    98  					go s.Collect(cli.client, !*noStream, waitFirst)
    99  				}
   100  			}
   101  		})
   102  
   103  		eh.Handle("start", func(e events.Message) {
   104  			s := &containerStats{Name: e.ID[:12]}
   105  			if cStats.add(s) {
   106  				waitFirst.Add(1)
   107  				go s.Collect(cli.client, !*noStream, waitFirst)
   108  			}
   109  		})
   110  
   111  		eh.Handle("die", func(e events.Message) {
   112  			if !*all {
   113  				cStats.remove(e.ID[:12])
   114  			}
   115  		})
   116  
   117  		eventChan := make(chan events.Message)
   118  		go eh.Watch(eventChan)
   119  		go monitorContainerEvents(started, eventChan)
   120  		defer close(eventChan)
   121  		<-started
   122  
   123  		// Start a short-lived goroutine to retrieve the initial list of
   124  		// containers.
   125  		getContainerList()
   126  	} else {
   127  		// Artificially send creation events for the containers we were asked to
   128  		// monitor (same code path than we use when monitoring all containers).
   129  		for _, name := range names {
   130  			s := &containerStats{Name: name}
   131  			if cStats.add(s) {
   132  				waitFirst.Add(1)
   133  				go s.Collect(cli.client, !*noStream, waitFirst)
   134  			}
   135  		}
   136  
   137  		// We don't expect any asynchronous errors: closeChan can be closed.
   138  		close(closeChan)
   139  
   140  		// Do a quick pause to detect any error with the provided list of
   141  		// container names.
   142  		time.Sleep(1500 * time.Millisecond)
   143  		var errs []string
   144  		cStats.mu.Lock()
   145  		for _, c := range cStats.cs {
   146  			c.mu.Lock()
   147  			if c.err != nil {
   148  				errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err))
   149  			}
   150  			c.mu.Unlock()
   151  		}
   152  		cStats.mu.Unlock()
   153  		if len(errs) > 0 {
   154  			return fmt.Errorf("%s", strings.Join(errs, ", "))
   155  		}
   156  	}
   157  
   158  	// before print to screen, make sure each container get at least one valid stat data
   159  	waitFirst.Wait()
   160  
   161  	w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
   162  	printHeader := func() {
   163  		if !*noStream {
   164  			fmt.Fprint(cli.out, "\033[2J")
   165  			fmt.Fprint(cli.out, "\033[H")
   166  		}
   167  		io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE / LIMIT\tMEM %\tNET I/O\tBLOCK I/O\tPIDS\n")
   168  	}
   169  
   170  	for range time.Tick(500 * time.Millisecond) {
   171  		printHeader()
   172  		toRemove := []int{}
   173  		cStats.mu.Lock()
   174  		for i, s := range cStats.cs {
   175  			if err := s.Display(w); err != nil && !*noStream {
   176  				toRemove = append(toRemove, i)
   177  			}
   178  		}
   179  		for j := len(toRemove) - 1; j >= 0; j-- {
   180  			i := toRemove[j]
   181  			cStats.cs = append(cStats.cs[:i], cStats.cs[i+1:]...)
   182  		}
   183  		if len(cStats.cs) == 0 && !showAll {
   184  			return nil
   185  		}
   186  		cStats.mu.Unlock()
   187  		w.Flush()
   188  		if *noStream {
   189  			break
   190  		}
   191  		select {
   192  		case err, ok := <-closeChan:
   193  			if ok {
   194  				if err != nil {
   195  					// this is suppressing "unexpected EOF" in the cli when the
   196  					// daemon restarts so it shutdowns cleanly
   197  					if err == io.ErrUnexpectedEOF {
   198  						return nil
   199  					}
   200  					return err
   201  				}
   202  			}
   203  		default:
   204  			// just skip
   205  		}
   206  	}
   207  	return nil
   208  }