github.com/brandon-bethke-neudesic/moby@v1.13.1/daemon/stats_collector.go (about)

     1  // +build !solaris
     2  
     3  package daemon
     4  
     5  import (
     6  	"bufio"
     7  	"sync"
     8  	"time"
     9  
    10  	"github.com/Sirupsen/logrus"
    11  	"github.com/docker/docker/api/types"
    12  	"github.com/docker/docker/container"
    13  	"github.com/docker/docker/pkg/pubsub"
    14  )
    15  
    16  type statsSupervisor interface {
    17  	// GetContainerStats collects all the stats related to a container
    18  	GetContainerStats(container *container.Container) (*types.StatsJSON, error)
    19  }
    20  
    21  // newStatsCollector returns a new statsCollector that collections
    22  // stats for a registered container at the specified interval.
    23  // The collector allows non-running containers to be added
    24  // and will start processing stats when they are started.
    25  func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector {
    26  	s := &statsCollector{
    27  		interval:   interval,
    28  		supervisor: daemon,
    29  		publishers: make(map[*container.Container]*pubsub.Publisher),
    30  		bufReader:  bufio.NewReaderSize(nil, 128),
    31  	}
    32  	platformNewStatsCollector(s)
    33  	go s.run()
    34  	return s
    35  }
    36  
    37  // statsCollector manages and provides container resource stats
    38  type statsCollector struct {
    39  	m          sync.Mutex
    40  	supervisor statsSupervisor
    41  	interval   time.Duration
    42  	publishers map[*container.Container]*pubsub.Publisher
    43  	bufReader  *bufio.Reader
    44  
    45  	// The following fields are not set on Windows currently.
    46  	clockTicksPerSecond uint64
    47  	machineMemory       uint64
    48  }
    49  
    50  // collect registers the container with the collector and adds it to
    51  // the event loop for collection on the specified interval returning
    52  // a channel for the subscriber to receive on.
    53  func (s *statsCollector) collect(c *container.Container) chan interface{} {
    54  	s.m.Lock()
    55  	defer s.m.Unlock()
    56  	publisher, exists := s.publishers[c]
    57  	if !exists {
    58  		publisher = pubsub.NewPublisher(100*time.Millisecond, 1024)
    59  		s.publishers[c] = publisher
    60  	}
    61  	return publisher.Subscribe()
    62  }
    63  
    64  // stopCollection closes the channels for all subscribers and removes
    65  // the container from metrics collection.
    66  func (s *statsCollector) stopCollection(c *container.Container) {
    67  	s.m.Lock()
    68  	if publisher, exists := s.publishers[c]; exists {
    69  		publisher.Close()
    70  		delete(s.publishers, c)
    71  	}
    72  	s.m.Unlock()
    73  }
    74  
    75  // unsubscribe removes a specific subscriber from receiving updates for a container's stats.
    76  func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) {
    77  	s.m.Lock()
    78  	publisher := s.publishers[c]
    79  	if publisher != nil {
    80  		publisher.Evict(ch)
    81  		if publisher.Len() == 0 {
    82  			delete(s.publishers, c)
    83  		}
    84  	}
    85  	s.m.Unlock()
    86  }
    87  
    88  func (s *statsCollector) run() {
    89  	type publishersPair struct {
    90  		container *container.Container
    91  		publisher *pubsub.Publisher
    92  	}
    93  	// we cannot determine the capacity here.
    94  	// it will grow enough in first iteration
    95  	var pairs []publishersPair
    96  
    97  	for range time.Tick(s.interval) {
    98  		// it does not make sense in the first iteration,
    99  		// but saves allocations in further iterations
   100  		pairs = pairs[:0]
   101  
   102  		s.m.Lock()
   103  		for container, publisher := range s.publishers {
   104  			// copy pointers here to release the lock ASAP
   105  			pairs = append(pairs, publishersPair{container, publisher})
   106  		}
   107  		s.m.Unlock()
   108  		if len(pairs) == 0 {
   109  			continue
   110  		}
   111  
   112  		systemUsage, err := s.getSystemCPUUsage()
   113  		if err != nil {
   114  			logrus.Errorf("collecting system cpu usage: %v", err)
   115  			continue
   116  		}
   117  
   118  		for _, pair := range pairs {
   119  			stats, err := s.supervisor.GetContainerStats(pair.container)
   120  			if err != nil {
   121  				if _, ok := err.(errNotRunning); !ok {
   122  					logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err)
   123  				}
   124  				continue
   125  			}
   126  			// FIXME: move to containerd on Linux (not Windows)
   127  			stats.CPUStats.SystemUsage = systemUsage
   128  
   129  			pair.publisher.Publish(*stats)
   130  		}
   131  	}
   132  }