github.com/ncdc/docker@v0.10.1-0.20160129113957-6c6729ef5b74/api/client/stats.go (about)

     1  package client
     2  
     3  import (
     4  	"encoding/json"
     5  	"fmt"
     6  	"io"
     7  	"sort"
     8  	"strings"
     9  	"sync"
    10  	"text/tabwriter"
    11  	"time"
    12  
    13  	Cli "github.com/docker/docker/cli"
    14  	"github.com/docker/engine-api/types"
    15  	"github.com/docker/engine-api/types/events"
    16  	"github.com/docker/engine-api/types/filters"
    17  	"github.com/docker/go-units"
    18  )
    19  
    20  type containerStats struct {
    21  	Name             string
    22  	CPUPercentage    float64
    23  	Memory           float64
    24  	MemoryLimit      float64
    25  	MemoryPercentage float64
    26  	NetworkRx        float64
    27  	NetworkTx        float64
    28  	BlockRead        float64
    29  	BlockWrite       float64
    30  	mu               sync.RWMutex
    31  	err              error
    32  }
    33  
    34  type stats struct {
    35  	mu sync.Mutex
    36  	cs []*containerStats
    37  }
    38  
    39  func (s *containerStats) Collect(cli *DockerCli, streamStats bool) {
    40  	responseBody, err := cli.client.ContainerStats(s.Name, streamStats)
    41  	if err != nil {
    42  		s.mu.Lock()
    43  		s.err = err
    44  		s.mu.Unlock()
    45  		return
    46  	}
    47  	defer responseBody.Close()
    48  
    49  	var (
    50  		previousCPU    uint64
    51  		previousSystem uint64
    52  		dec            = json.NewDecoder(responseBody)
    53  		u              = make(chan error, 1)
    54  	)
    55  	go func() {
    56  		for {
    57  			var v *types.StatsJSON
    58  			if err := dec.Decode(&v); err != nil {
    59  				u <- err
    60  				return
    61  			}
    62  
    63  			var memPercent = 0.0
    64  			var cpuPercent = 0.0
    65  
    66  			// MemoryStats.Limit will never be 0 unless the container is not running and we haven't
    67  			// got any data from cgroup
    68  			if v.MemoryStats.Limit != 0 {
    69  				memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0
    70  			}
    71  
    72  			previousCPU = v.PreCPUStats.CPUUsage.TotalUsage
    73  			previousSystem = v.PreCPUStats.SystemUsage
    74  			cpuPercent = calculateCPUPercent(previousCPU, previousSystem, v)
    75  			blkRead, blkWrite := calculateBlockIO(v.BlkioStats)
    76  			s.mu.Lock()
    77  			s.CPUPercentage = cpuPercent
    78  			s.Memory = float64(v.MemoryStats.Usage)
    79  			s.MemoryLimit = float64(v.MemoryStats.Limit)
    80  			s.MemoryPercentage = memPercent
    81  			s.NetworkRx, s.NetworkTx = calculateNetwork(v.Networks)
    82  			s.BlockRead = float64(blkRead)
    83  			s.BlockWrite = float64(blkWrite)
    84  			s.mu.Unlock()
    85  			u <- nil
    86  			if !streamStats {
    87  				return
    88  			}
    89  		}
    90  	}()
    91  	for {
    92  		select {
    93  		case <-time.After(2 * time.Second):
    94  			// zero out the values if we have not received an update within
    95  			// the specified duration.
    96  			s.mu.Lock()
    97  			s.CPUPercentage = 0
    98  			s.Memory = 0
    99  			s.MemoryPercentage = 0
   100  			s.MemoryLimit = 0
   101  			s.NetworkRx = 0
   102  			s.NetworkTx = 0
   103  			s.BlockRead = 0
   104  			s.BlockWrite = 0
   105  			s.mu.Unlock()
   106  		case err := <-u:
   107  			if err != nil {
   108  				s.mu.Lock()
   109  				s.err = err
   110  				s.mu.Unlock()
   111  				return
   112  			}
   113  		}
   114  		if !streamStats {
   115  			return
   116  		}
   117  	}
   118  }
   119  
   120  func (s *containerStats) Display(w io.Writer) error {
   121  	s.mu.RLock()
   122  	defer s.mu.RUnlock()
   123  	if s.err != nil {
   124  		return s.err
   125  	}
   126  	fmt.Fprintf(w, "%s\t%.2f%%\t%s / %s\t%.2f%%\t%s / %s\t%s / %s\n",
   127  		s.Name,
   128  		s.CPUPercentage,
   129  		units.HumanSize(s.Memory), units.HumanSize(s.MemoryLimit),
   130  		s.MemoryPercentage,
   131  		units.HumanSize(s.NetworkRx), units.HumanSize(s.NetworkTx),
   132  		units.HumanSize(s.BlockRead), units.HumanSize(s.BlockWrite))
   133  	return nil
   134  }
   135  
   136  // CmdStats displays a live stream of resource usage statistics for one or more containers.
   137  //
   138  // This shows real-time information on CPU usage, memory usage, and network I/O.
   139  //
   140  // Usage: docker stats [OPTIONS] [CONTAINER...]
   141  func (cli *DockerCli) CmdStats(args ...string) error {
   142  	cmd := Cli.Subcmd("stats", []string{"[CONTAINER...]"}, Cli.DockerCommands["stats"].Description, true)
   143  	all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)")
   144  	noStream := cmd.Bool([]string{"-no-stream"}, false, "Disable streaming stats and only pull the first result")
   145  
   146  	cmd.ParseFlags(args, true)
   147  
   148  	names := cmd.Args()
   149  	showAll := len(names) == 0
   150  
   151  	if showAll {
   152  		options := types.ContainerListOptions{
   153  			All: *all,
   154  		}
   155  		cs, err := cli.client.ContainerList(options)
   156  		if err != nil {
   157  			return err
   158  		}
   159  		for _, c := range cs {
   160  			names = append(names, c.ID[:12])
   161  		}
   162  	}
   163  	if len(names) == 0 && !showAll {
   164  		return fmt.Errorf("No containers found")
   165  	}
   166  	sort.Strings(names)
   167  
   168  	var (
   169  		cStats = stats{}
   170  		w      = tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
   171  	)
   172  	printHeader := func() {
   173  		if !*noStream {
   174  			fmt.Fprint(cli.out, "\033[2J")
   175  			fmt.Fprint(cli.out, "\033[H")
   176  		}
   177  		io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE / LIMIT\tMEM %\tNET I/O\tBLOCK I/O\n")
   178  	}
   179  	for _, n := range names {
   180  		s := &containerStats{Name: n}
   181  		// no need to lock here since only the main goroutine is running here
   182  		cStats.cs = append(cStats.cs, s)
   183  		go s.Collect(cli, !*noStream)
   184  	}
   185  	closeChan := make(chan error)
   186  	if showAll {
   187  		type watch struct {
   188  			cid   string
   189  			event string
   190  			err   error
   191  		}
   192  		getNewContainers := func(c chan<- watch) {
   193  			f := filters.NewArgs()
   194  			f.Add("type", "container")
   195  			options := types.EventsOptions{
   196  				Filters: f,
   197  			}
   198  			resBody, err := cli.client.Events(options)
   199  			if err != nil {
   200  				c <- watch{err: err}
   201  				return
   202  			}
   203  			defer resBody.Close()
   204  
   205  			decodeEvents(resBody, func(event events.Message, err error) error {
   206  				if err != nil {
   207  					c <- watch{err: err}
   208  					return nil
   209  				}
   210  
   211  				c <- watch{event.ID[:12], event.Action, nil}
   212  				return nil
   213  			})
   214  		}
   215  		go func(stopChan chan<- error) {
   216  			cChan := make(chan watch)
   217  			go getNewContainers(cChan)
   218  			for {
   219  				c := <-cChan
   220  				if c.err != nil {
   221  					stopChan <- c.err
   222  					return
   223  				}
   224  				switch c.event {
   225  				case "create":
   226  					s := &containerStats{Name: c.cid}
   227  					cStats.mu.Lock()
   228  					cStats.cs = append(cStats.cs, s)
   229  					cStats.mu.Unlock()
   230  					go s.Collect(cli, !*noStream)
   231  				case "stop":
   232  				case "die":
   233  					if !*all {
   234  						var remove int
   235  						// cStats cannot be O(1) with a map cause ranging over it would cause
   236  						// containers in stats to move up and down in the list...:(
   237  						cStats.mu.Lock()
   238  						for i, s := range cStats.cs {
   239  							if s.Name == c.cid {
   240  								remove = i
   241  								break
   242  							}
   243  						}
   244  						cStats.cs = append(cStats.cs[:remove], cStats.cs[remove+1:]...)
   245  						cStats.mu.Unlock()
   246  					}
   247  				}
   248  			}
   249  		}(closeChan)
   250  	} else {
   251  		close(closeChan)
   252  	}
   253  	// do a quick pause so that any failed connections for containers that do not exist are able to be
   254  	// evicted before we display the initial or default values.
   255  	time.Sleep(1500 * time.Millisecond)
   256  	var errs []string
   257  	cStats.mu.Lock()
   258  	for _, c := range cStats.cs {
   259  		c.mu.Lock()
   260  		if c.err != nil {
   261  			errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err))
   262  		}
   263  		c.mu.Unlock()
   264  	}
   265  	cStats.mu.Unlock()
   266  	if len(errs) > 0 {
   267  		return fmt.Errorf("%s", strings.Join(errs, ", "))
   268  	}
   269  	for range time.Tick(500 * time.Millisecond) {
   270  		printHeader()
   271  		toRemove := []int{}
   272  		cStats.mu.Lock()
   273  		for i, s := range cStats.cs {
   274  			if err := s.Display(w); err != nil && !*noStream {
   275  				toRemove = append(toRemove, i)
   276  			}
   277  		}
   278  		for j := len(toRemove) - 1; j >= 0; j-- {
   279  			i := toRemove[j]
   280  			cStats.cs = append(cStats.cs[:i], cStats.cs[i+1:]...)
   281  		}
   282  		if len(cStats.cs) == 0 && !showAll {
   283  			return nil
   284  		}
   285  		cStats.mu.Unlock()
   286  		w.Flush()
   287  		if *noStream {
   288  			break
   289  		}
   290  		select {
   291  		case err, ok := <-closeChan:
   292  			if ok {
   293  				if err != nil {
   294  					// this is suppressing "unexpected EOF" in the cli when the
   295  					// daemon restarts so it shutdowns cleanly
   296  					if err == io.ErrUnexpectedEOF {
   297  						return nil
   298  					}
   299  					return err
   300  				}
   301  			}
   302  		default:
   303  			// just skip
   304  		}
   305  	}
   306  	return nil
   307  }
   308  
   309  func calculateCPUPercent(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 {
   310  	var (
   311  		cpuPercent = 0.0
   312  		// calculate the change for the cpu usage of the container in between readings
   313  		cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU)
   314  		// calculate the change for the entire system between readings
   315  		systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem)
   316  	)
   317  
   318  	if systemDelta > 0.0 && cpuDelta > 0.0 {
   319  		cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0
   320  	}
   321  	return cpuPercent
   322  }
   323  
   324  func calculateBlockIO(blkio types.BlkioStats) (blkRead uint64, blkWrite uint64) {
   325  	for _, bioEntry := range blkio.IoServiceBytesRecursive {
   326  		switch strings.ToLower(bioEntry.Op) {
   327  		case "read":
   328  			blkRead = blkRead + bioEntry.Value
   329  		case "write":
   330  			blkWrite = blkWrite + bioEntry.Value
   331  		}
   332  	}
   333  	return
   334  }
   335  
   336  func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) {
   337  	var rx, tx float64
   338  
   339  	for _, v := range network {
   340  		rx += float64(v.RxBytes)
   341  		tx += float64(v.TxBytes)
   342  	}
   343  	return rx, tx
   344  }