bosun.org@v0.0.0-20210513094433-e25bc3e69a1f/cmd/scollector/collectors/cadvisor.go (about)

     1  package collectors
     2  
     3  import (
     4  	"fmt"
     5  	"io/ioutil"
     6  	"os"
     7  	"strconv"
     8  	"strings"
     9  
    10  	"github.com/google/cadvisor/client"
    11  	"github.com/google/cadvisor/info/v1"
    12  
    13  	"bosun.org/cmd/scollector/conf"
    14  	"bosun.org/metadata"
    15  	"bosun.org/opentsdb"
    16  	"bosun.org/slog"
    17  )
    18  
    19  func init() {
    20  	registerInit(startCadvisorCollector)
    21  }
    22  
    23  var cadvisorMeta = map[string]MetricMeta{
    24  	"container.cpu": {
    25  		RateType: metadata.Counter,
    26  		Unit:     metadata.Nanosecond,
    27  		Desc:     "Cumulative cpu time consumed in user/system in nanoseconds.",
    28  	},
    29  	"container.cpu.usage": {
    30  		RateType: metadata.Counter,
    31  		Unit:     metadata.Nanosecond,
    32  		Desc:     "Cumulative cpu time consumed in nanoseconds.",
    33  	},
    34  	"container.cpu.usage.percpu": {
    35  		RateType: metadata.Counter,
    36  		Unit:     metadata.Nanosecond,
    37  		Desc:     "Cumulative cpu time consumed per cpu in nanoseconds.",
    38  	},
    39  	"container.cpu.loadavg": {
    40  		RateType: metadata.Gauge,
    41  		Unit:     metadata.Second,
    42  		Desc:     "Smoothed 10s average of number of runnable threads x 1000",
    43  	},
    44  	"container.blkio.io_service_bytes.async": {
    45  		RateType: metadata.Counter,
    46  		Unit:     metadata.Bytes,
    47  		Desc:     "Number of bytes transferred to/from the disk by the cgroup asynchronously",
    48  	},
    49  	"container.blkio.io_service_bytes.read": {
    50  		RateType: metadata.Counter,
    51  		Unit:     metadata.Bytes,
    52  		Desc:     "Number of bytes read from the disk by the cgroup",
    53  	},
    54  	"container.blkio.io_service_bytes.sync": {
    55  		RateType: metadata.Counter,
    56  		Unit:     metadata.Bytes,
    57  		Desc:     "Number of bytes transferred to/from the disk by the cgroup synchronously",
    58  	},
    59  	"container.blkio.io_service_bytes.write": {
    60  		RateType: metadata.Counter,
    61  		Unit:     metadata.Bytes,
    62  		Desc:     "Number of bytes written to the disk by the cgroup",
    63  	},
    64  	"container.blkio.io_serviced.async": {
    65  		RateType: metadata.Counter,
    66  		Unit:     metadata.Operation,
    67  		Desc:     "Number of async IOs issued to the disk by the cgroup",
    68  	},
    69  	"container.blkio.io_serviced.read": {
    70  		RateType: metadata.Counter,
    71  		Unit:     metadata.Operation,
    72  		Desc:     "Number of read issued to the disk by the group",
    73  	},
    74  	"container.blkio.io_serviced.sync": {
    75  		RateType: metadata.Counter,
    76  		Unit:     metadata.Operation,
    77  		Desc:     "Number of sync IOs issued to the disk by the cgroup",
    78  	},
    79  	"container.blkio.io_serviced.write": {
    80  		RateType: metadata.Counter,
    81  		Unit:     metadata.Operation,
    82  		Desc:     "Number of write issued to the disk by the group",
    83  	},
    84  	"container.blkio.io_queued.async": {
    85  		RateType: metadata.Gauge,
    86  		Unit:     metadata.Operation,
    87  		Desc:     "Total number of async requests queued up at any given instant for this cgroup",
    88  	},
    89  	"container.blkio.io_queued.read": {
    90  		RateType: metadata.Gauge,
    91  		Unit:     metadata.Operation,
    92  		Desc:     "Total number of read requests queued up at any given instant for this cgroup",
    93  	},
    94  	"container.blkio.io_queued.sync": {
    95  		RateType: metadata.Gauge,
    96  		Unit:     metadata.Operation,
    97  		Desc:     "Total number of sync requests queued up at any given instant for this cgroup",
    98  	},
    99  	"container.blkio.io_queued.write": {
   100  		RateType: metadata.Gauge,
   101  		Unit:     metadata.Operation,
   102  		Desc:     "Total number of write requests queued up at any given instant for this cgroup",
   103  	},
   104  	"container.blkio.sectors.count": {
   105  		RateType: metadata.Counter,
   106  		Unit:     metadata.Sector,
   107  		Desc:     "Number of sectors transferred to/from disk by the group",
   108  	},
   109  	"container.blkio.io_service_time.async": {
   110  		RateType: metadata.Counter,
   111  		Unit:     metadata.Nanosecond,
   112  		Desc:     "Total amount of time between async request dispatch and request completion for the IOs done by this cgroup",
   113  	},
   114  	"container.blkio.io_service_time.read": {
   115  		RateType: metadata.Counter,
   116  		Unit:     metadata.Nanosecond,
   117  		Desc:     "Total amount of time between read request dispatch and request completion for the IOs done by this cgroup",
   118  	},
   119  	"container.blkio.io_service_time.sync": {
   120  		RateType: metadata.Counter,
   121  		Unit:     metadata.Nanosecond,
   122  		Desc:     "Total amount of time between sync request dispatch and request completion for the IOs done by this cgroup",
   123  	},
   124  	"container.blkio.io_service_time.write": {
   125  		RateType: metadata.Counter,
   126  		Unit:     metadata.Nanosecond,
   127  		Desc:     "Total amount of time between write request dispatch and request completion for the IOs done by this cgroup",
   128  	},
   129  	"container.blkio.io_wait_time.async": {
   130  		RateType: metadata.Counter,
   131  		Unit:     metadata.Nanosecond,
   132  		Desc:     "Total amount of time the async IOs for this cgroup spent waiting in the scheduler queues for service",
   133  	},
   134  	"container.blkio.io_wait_time.read": {
   135  		RateType: metadata.Counter,
   136  		Unit:     metadata.Nanosecond,
   137  		Desc:     "Total amount of time the read request for this cgroup spent waiting in the scheduler queues for service",
   138  	},
   139  	"container.blkio.io_wait_time.sync": {
   140  		RateType: metadata.Counter,
   141  		Unit:     metadata.Nanosecond,
   142  		Desc:     "Total amount of time the sync IOs for this cgroup spent waiting in the scheduler queues for service",
   143  	},
   144  	"container.blkio.io_wait_time.write": {
   145  		RateType: metadata.Counter,
   146  		Unit:     metadata.Nanosecond,
   147  		Desc:     "Total amount of time the write request for this cgroup spent waiting in the scheduler queues for service",
   148  	},
   149  	"container.blkio.io_merged.async": {
   150  		RateType: metadata.Counter,
   151  		Unit:     metadata.Operation,
   152  		Desc:     "Total number of async requests merged into requests belonging to this cgroup.",
   153  	},
   154  	"container.blkio.io_merged.read": {
   155  		RateType: metadata.Counter,
   156  		Unit:     metadata.Operation,
   157  		Desc:     "Total number of read requests merged into requests belonging to this cgroup.",
   158  	},
   159  	"container.blkio.io_merged.sync": {
   160  		RateType: metadata.Counter,
   161  		Unit:     metadata.Operation,
   162  		Desc:     "Total number of sync requests merged into requests belonging to this cgroup.",
   163  	},
   164  	"container.blkio.io_merged.write": {
   165  		RateType: metadata.Counter,
   166  		Unit:     metadata.Operation,
   167  		Desc:     "Total number of write requests merged into requests belonging to this cgroup.",
   168  	},
   169  	"container.blkio.io_time.count": {
   170  		RateType: metadata.Counter,
   171  		Unit:     metadata.MilliSecond,
   172  		Desc:     "Disk time allocated to cgroup per device",
   173  	},
   174  	"container.fs.available": {
   175  		RateType: metadata.Gauge,
   176  		Unit:     metadata.Bytes,
   177  		Desc:     "Number of bytes available for non-root user.",
   178  	},
   179  	"container.fs.limit": {
   180  		RateType: metadata.Gauge,
   181  		Unit:     metadata.Bytes,
   182  		Desc:     "Number of bytes that can be consumed by the container on this filesystem.",
   183  	},
   184  	"container.fs.usage": {
   185  		RateType: metadata.Gauge,
   186  		Unit:     metadata.Operation,
   187  		Desc:     "Number of bytes that is consumed by the container on this filesystem.",
   188  	},
   189  	"container.fs.reads.time": {
   190  		RateType: metadata.Counter,
   191  		Unit:     metadata.MilliSecond,
   192  		Desc:     "Number of milliseconds spent reading",
   193  	},
   194  	"container.fs.reads.merged": {
   195  		RateType: metadata.Counter,
   196  		Unit:     metadata.Operation,
   197  		Desc:     "Number of reads merged",
   198  	},
   199  	"container.fs.reads.sectors": {
   200  		RateType: metadata.Counter,
   201  		Unit:     metadata.Sector,
   202  		Desc:     "Number of sectors read",
   203  	},
   204  	"container.fs.reads": {
   205  		RateType: metadata.Counter,
   206  		Unit:     metadata.Operation,
   207  		Desc:     "Number of reads completed",
   208  	},
   209  	"container.fs.writes.sectors": {
   210  		RateType: metadata.Counter,
   211  		Unit:     metadata.Sector,
   212  		Desc:     "Number of sectors written",
   213  	},
   214  	"container.fs.writes.time": {
   215  		RateType: metadata.Counter,
   216  		Unit:     metadata.MilliSecond,
   217  		Desc:     "Number of milliseconds spent writing",
   218  	},
   219  	"container.fs.writes.merged": {
   220  		RateType: metadata.Counter,
   221  		Unit:     metadata.Operation,
   222  		Desc:     "Number of writes merged",
   223  	},
   224  	"container.fs.writes": {
   225  		RateType: metadata.Counter,
   226  		Unit:     metadata.Operation,
   227  		Desc:     "Number of writes completed",
   228  	},
   229  	"container.fs.io.current": {
   230  		RateType: metadata.Gauge,
   231  		Unit:     metadata.Operation,
   232  		Desc:     "Number of I/Os currently in progress",
   233  	},
   234  	"container.fs.io.time": {
   235  		RateType: metadata.Counter,
   236  		Unit:     metadata.MilliSecond,
   237  		Desc:     "Number of milliseconds spent doing I/Os",
   238  	},
   239  	"container.fs.io.time.weighted": {
   240  		RateType: metadata.Counter,
   241  		Unit:     metadata.MilliSecond,
   242  		Desc:     "Cumulative weighted I/O time",
   243  	},
   244  	"container.last.seen": {
   245  		RateType: metadata.Gauge,
   246  		Unit:     metadata.None,
   247  	},
   248  	"container.memory.failures": {
   249  		RateType: metadata.Counter,
   250  		Unit:     metadata.Fault,
   251  		Desc:     "Count of memory allocation failure.",
   252  	},
   253  	"container.memory.usage": {
   254  		RateType: metadata.Gauge,
   255  		Unit:     metadata.Bytes,
   256  		Desc:     "Current memory usage.",
   257  	},
   258  	"container.memory.working_set": {
   259  		RateType: metadata.Gauge,
   260  		Unit:     metadata.Bytes,
   261  		Desc:     "Current working set.",
   262  	},
   263  	"container.net.bytes": {
   264  		RateType: metadata.Counter,
   265  		Unit:     metadata.Bytes,
   266  	},
   267  	"container.net.errors": {
   268  		RateType: metadata.Counter,
   269  		Unit:     metadata.Error,
   270  	},
   271  	"container.net.dropped": {
   272  		RateType: metadata.Counter,
   273  		Unit:     metadata.Packet,
   274  	},
   275  	"container.net.packets": {
   276  		RateType: metadata.Counter,
   277  		Unit:     metadata.Packet,
   278  	},
   279  	"container.net.tcp": {
   280  		RateType: metadata.Counter,
   281  		Unit:     metadata.Connection,
   282  		Desc:     "Count of tcp connection states.",
   283  	},
   284  	"container.net.tcp6": {
   285  		RateType: metadata.Counter,
   286  		Unit:     metadata.Connection,
   287  		Desc:     "Count of tcp6 connection states.",
   288  	},
   289  }
   290  
   291  var blkioStatsWhitelist = []string{"Async", "Sync", "Read", "Write", "Count"}
   292  
   293  var knownDiskMapErrors = make(map[string]bool)
   294  
   295  func cadvisorAdd(md *opentsdb.MultiDataPoint, name string, value interface{}, ts opentsdb.TagSet) {
   296  	Add(md, name, value, ts, cadvisorMeta[name].RateType, cadvisorMeta[name].Unit, cadvisorMeta[name].Desc)
   297  }
   298  
   299  func containerTagSet(ts opentsdb.TagSet, container *v1.ContainerInfo) opentsdb.TagSet {
   300  	var tags opentsdb.TagSet
   301  	if container.Namespace == "docker" {
   302  		tags = opentsdb.TagSet{
   303  			"name":        container.Name,
   304  			"docker_name": container.Aliases[0],
   305  			"docker_id":   container.Aliases[1],
   306  		}
   307  	} else {
   308  		tags = opentsdb.TagSet{
   309  			"name": container.Name,
   310  		}
   311  	}
   312  	for k, v := range ts {
   313  		tags[k] = v
   314  	}
   315  	return tags
   316  }
   317  
   318  func inBlkioWhitelist(name string) bool {
   319  	valid := false
   320  	for _, n := range blkioStatsWhitelist {
   321  		if n == name {
   322  			valid = true
   323  			break
   324  		}
   325  	}
   326  	return valid
   327  }
   328  
   329  func addBlkioStat(md *opentsdb.MultiDataPoint, name string, diskStats v1.PerDiskStats, container *v1.ContainerInfo, config *conf.Cadvisor) {
   330  	var device string
   331  	if config.IsRemote {
   332  		device = fmt.Sprintf("major%d_minor%d", diskStats.Major, diskStats.Minor)
   333  	} else {
   334  		device = blockDeviceLookup(diskStats.Major, diskStats.Minor)
   335  	}
   336  	for label, val := range diskStats.Stats {
   337  		if inBlkioWhitelist(label) {
   338  			cadvisorAdd(md, name+strings.ToLower(label), val, containerTagSet(opentsdb.TagSet{"dev": device}, container))
   339  		}
   340  	}
   341  }
   342  
   343  func blockDeviceLookup(major, minor uint64) string {
   344  	blockDevideLoopkupFallback := func(major, minor uint64) string {
   345  		name := fmt.Sprintf("major%d_minor%d", major, minor)
   346  		if _, ok := knownDiskMapErrors[name]; ok == false {
   347  			slog.Errorf("Unable to perform lookup under /sys/dev/ for block device major(%d) minor(%d). Use IsRemote = true to disable lookups. This error will only be displayed once.", major, minor)
   348  			knownDiskMapErrors[name] = true
   349  		}
   350  		return name
   351  	}
   352  
   353  	path := fmt.Sprintf("/sys/dev/block/%d:%d/uevent", major, minor)
   354  	file, err := os.Open(path)
   355  	if err != nil {
   356  		return blockDevideLoopkupFallback(major, minor)
   357  	}
   358  	defer file.Close()
   359  
   360  	content, err := ioutil.ReadAll(file)
   361  	if err != nil {
   362  		return blockDevideLoopkupFallback(major, minor)
   363  	}
   364  
   365  	startIdx := strings.Index(string(content), "DEVNAME=")
   366  	if startIdx == -1 {
   367  		return blockDevideLoopkupFallback(major, minor)
   368  	}
   369  
   370  	// Start after the =
   371  	startIdx += 7
   372  
   373  	endIdx := strings.Index(string(content[startIdx:]), "\n")
   374  	if endIdx == -1 {
   375  		return blockDevideLoopkupFallback(major, minor)
   376  	}
   377  
   378  	return string(content[startIdx : startIdx+endIdx])
   379  }
   380  
   381  func statsForContainer(md *opentsdb.MultiDataPoint, container *v1.ContainerInfo, config *conf.Cadvisor) {
   382  	stats := container.Stats[0]
   383  	var ts opentsdb.TagSet
   384  	if container.Spec.HasCpu {
   385  		cadvisorAdd(md, "container.cpu", stats.Cpu.Usage.System, containerTagSet(opentsdb.TagSet{"type": "system"}, container))
   386  		cadvisorAdd(md, "container.cpu", stats.Cpu.Usage.User, containerTagSet(opentsdb.TagSet{"type": "user"}, container))
   387  
   388  		ts = containerTagSet(ts, container)
   389  		cadvisorAdd(md, "container.cpu.loadavg", stats.Cpu.LoadAverage, ts)
   390  		cadvisorAdd(md, "container.cpu.usage", stats.Cpu.Usage.Total, ts)
   391  
   392  		if config.PerCpuUsage {
   393  			for idx := range stats.Cpu.Usage.PerCpu {
   394  				ts = containerTagSet(opentsdb.TagSet{"cpu": strconv.Itoa(idx)}, container)
   395  				cadvisorAdd(md, "container.cpu.usage.percpu", stats.Cpu.Usage.PerCpu[idx], ts)
   396  			}
   397  		}
   398  
   399  	}
   400  
   401  	if container.Spec.HasFilesystem {
   402  		for idx := range stats.Filesystem {
   403  			ts = containerTagSet(opentsdb.TagSet{"device": stats.Filesystem[idx].Device}, container)
   404  			cadvisorAdd(md, "container.fs.available", stats.Filesystem[idx].Available, ts)
   405  			cadvisorAdd(md, "container.fs.limit", stats.Filesystem[idx].Limit, ts)
   406  			cadvisorAdd(md, "container.fs.usage", stats.Filesystem[idx].Usage, ts)
   407  			cadvisorAdd(md, "container.fs.reads.time", stats.Filesystem[idx].ReadTime, ts)
   408  			cadvisorAdd(md, "container.fs.reads.merged", stats.Filesystem[idx].ReadsMerged, ts)
   409  			cadvisorAdd(md, "container.fs.reads.sectors", stats.Filesystem[idx].SectorsRead, ts)
   410  			cadvisorAdd(md, "container.fs.reads", stats.Filesystem[idx].ReadsCompleted, ts)
   411  			cadvisorAdd(md, "container.fs.writes.sectors", stats.Filesystem[idx].SectorsWritten, ts)
   412  			cadvisorAdd(md, "container.fs.writes.time", stats.Filesystem[idx].WriteTime, ts)
   413  			cadvisorAdd(md, "container.fs.writes.merged", stats.Filesystem[idx].WritesMerged, ts)
   414  			cadvisorAdd(md, "container.fs.writes", stats.Filesystem[idx].WritesCompleted, ts)
   415  			cadvisorAdd(md, "container.fs.io.current", stats.Filesystem[idx].IoInProgress, ts)
   416  			cadvisorAdd(md, "container.fs.io.time", stats.Filesystem[idx].IoTime, ts)
   417  			cadvisorAdd(md, "container.fs.io.time.weighted", stats.Filesystem[idx].WeightedIoTime, ts)
   418  		}
   419  	}
   420  
   421  	if container.Spec.HasMemory {
   422  		cadvisorAdd(md, "container.memory.failures", stats.Memory.ContainerData.Pgfault,
   423  			containerTagSet(opentsdb.TagSet{"scope": "container", "type": "pgfault"}, container))
   424  		cadvisorAdd(md, "container.memory.failures", stats.Memory.ContainerData.Pgmajfault,
   425  			containerTagSet(opentsdb.TagSet{"scope": "container", "type": "pgmajfault"}, container))
   426  		cadvisorAdd(md, "container.memory.failures", stats.Memory.HierarchicalData.Pgfault,
   427  			containerTagSet(opentsdb.TagSet{"scope": "hierarchy", "type": "pgfault"}, container))
   428  		cadvisorAdd(md, "container.memory.failures", stats.Memory.HierarchicalData.Pgmajfault,
   429  			containerTagSet(opentsdb.TagSet{"scope": "hierarchy", "type": "pgmajfault"}, container))
   430  		cadvisorAdd(md, "container.memory.working_set", stats.Memory.WorkingSet, containerTagSet(nil, container))
   431  		cadvisorAdd(md, "container.memory.usage", stats.Memory.Usage, containerTagSet(nil, container))
   432  	}
   433  
   434  	if container.Spec.HasNetwork {
   435  		for _, iface := range stats.Network.Interfaces {
   436  			ts = containerTagSet(opentsdb.TagSet{"ifName": iface.Name, "direction": "in"}, container)
   437  			cadvisorAdd(md, "container.net.bytes", iface.RxBytes, ts)
   438  			cadvisorAdd(md, "container.net.errors", iface.RxErrors, ts)
   439  			cadvisorAdd(md, "container.net.dropped", iface.RxDropped, ts)
   440  			cadvisorAdd(md, "container.net.packets", iface.RxPackets, ts)
   441  			ts = containerTagSet(opentsdb.TagSet{"ifName": iface.Name, "direction": "out"}, container)
   442  			cadvisorAdd(md, "container.net.bytes", iface.TxBytes, ts)
   443  			cadvisorAdd(md, "container.net.errors", iface.TxErrors, ts)
   444  			cadvisorAdd(md, "container.net.dropped", iface.TxDropped, ts)
   445  			cadvisorAdd(md, "container.net.packets", iface.TxPackets, ts)
   446  		}
   447  		cadvisorAdd(md, "container.net.tcp", stats.Network.Tcp.Close, containerTagSet(opentsdb.TagSet{"state": "close"}, container))
   448  		cadvisorAdd(md, "container.net.tcp", stats.Network.Tcp.CloseWait, containerTagSet(opentsdb.TagSet{"state": "closewait"}, container))
   449  		cadvisorAdd(md, "container.net.tcp", stats.Network.Tcp.Closing, containerTagSet(opentsdb.TagSet{"state": "closing"}, container))
   450  		cadvisorAdd(md, "container.net.tcp", stats.Network.Tcp.Established, containerTagSet(opentsdb.TagSet{"state": "established"}, container))
   451  		cadvisorAdd(md, "container.net.tcp", stats.Network.Tcp.FinWait1, containerTagSet(opentsdb.TagSet{"state": "finwait1"}, container))
   452  		cadvisorAdd(md, "container.net.tcp", stats.Network.Tcp.FinWait2, containerTagSet(opentsdb.TagSet{"state": "finwait2"}, container))
   453  		cadvisorAdd(md, "container.net.tcp", stats.Network.Tcp.LastAck, containerTagSet(opentsdb.TagSet{"state": "lastack"}, container))
   454  		cadvisorAdd(md, "container.net.tcp", stats.Network.Tcp.Listen, containerTagSet(opentsdb.TagSet{"state": "listen"}, container))
   455  		cadvisorAdd(md, "container.net.tcp", stats.Network.Tcp.SynRecv, containerTagSet(opentsdb.TagSet{"state": "synrecv"}, container))
   456  		cadvisorAdd(md, "container.net.tcp", stats.Network.Tcp.SynSent, containerTagSet(opentsdb.TagSet{"state": "synsent"}, container))
   457  		cadvisorAdd(md, "container.net.tcp", stats.Network.Tcp.TimeWait, containerTagSet(opentsdb.TagSet{"state": "timewait"}, container))
   458  
   459  		cadvisorAdd(md, "container.net.tcp6", stats.Network.Tcp6.Close, containerTagSet(opentsdb.TagSet{"state": "close"}, container))
   460  		cadvisorAdd(md, "container.net.tcp6", stats.Network.Tcp6.CloseWait, containerTagSet(opentsdb.TagSet{"state": "closewait"}, container))
   461  		cadvisorAdd(md, "container.net.tcp6", stats.Network.Tcp6.Closing, containerTagSet(opentsdb.TagSet{"state": "closing"}, container))
   462  		cadvisorAdd(md, "container.net.tcp6", stats.Network.Tcp6.Established, containerTagSet(opentsdb.TagSet{"state": "established"}, container))
   463  		cadvisorAdd(md, "container.net.tcp6", stats.Network.Tcp6.FinWait1, containerTagSet(opentsdb.TagSet{"state": "finwait1"}, container))
   464  		cadvisorAdd(md, "container.net.tcp6", stats.Network.Tcp6.FinWait2, containerTagSet(opentsdb.TagSet{"state": "finwait2"}, container))
   465  		cadvisorAdd(md, "container.net.tcp6", stats.Network.Tcp6.LastAck, containerTagSet(opentsdb.TagSet{"state": "lastack"}, container))
   466  		cadvisorAdd(md, "container.net.tcp6", stats.Network.Tcp6.Listen, containerTagSet(opentsdb.TagSet{"state": "listen"}, container))
   467  		cadvisorAdd(md, "container.net.tcp6", stats.Network.Tcp6.SynRecv, containerTagSet(opentsdb.TagSet{"state": "synrecv"}, container))
   468  		cadvisorAdd(md, "container.net.tcp6", stats.Network.Tcp6.SynSent, containerTagSet(opentsdb.TagSet{"state": "synsent"}, container))
   469  		cadvisorAdd(md, "container.net.tcp6", stats.Network.Tcp6.TimeWait, containerTagSet(opentsdb.TagSet{"state": "timewait"}, container))
   470  	}
   471  
   472  	if container.Spec.HasDiskIo {
   473  		for _, d := range stats.DiskIo.IoServiceBytes {
   474  			addBlkioStat(md, "container.blkio.io_service_bytes.", d, container, config)
   475  		}
   476  
   477  		for _, d := range stats.DiskIo.IoServiced {
   478  			addBlkioStat(md, "container.blkio.io_serviced.", d, container, config)
   479  		}
   480  
   481  		for _, d := range stats.DiskIo.IoQueued {
   482  			addBlkioStat(md, "container.blkio.io_service_queued.", d, container, config)
   483  		}
   484  
   485  		for _, d := range stats.DiskIo.Sectors {
   486  			addBlkioStat(md, "container.blkio.sectors.", d, container, config)
   487  		}
   488  
   489  		for _, d := range stats.DiskIo.IoServiceTime {
   490  			addBlkioStat(md, "container.blkio.io_service_time.", d, container, config)
   491  		}
   492  
   493  		for _, d := range stats.DiskIo.IoWaitTime {
   494  			addBlkioStat(md, "container.blkio.io_wait_time.", d, container, config)
   495  		}
   496  
   497  		for _, d := range stats.DiskIo.IoMerged {
   498  			addBlkioStat(md, "container.blkio.io_merged.", d, container, config)
   499  		}
   500  
   501  		for _, d := range stats.DiskIo.IoTime {
   502  			addBlkioStat(md, "container.blkio.io_time.", d, container, config)
   503  		}
   504  	}
   505  }
   506  
   507  func c_cadvisor(c *client.Client, config *conf.Cadvisor) (opentsdb.MultiDataPoint, error) {
   508  	var md opentsdb.MultiDataPoint
   509  
   510  	containers, err := c.AllDockerContainers(&v1.ContainerInfoRequest{NumStats: 1})
   511  	if err != nil {
   512  		slog.Errorf("Error fetching containers from cadvisor: %v", err)
   513  		return md, err
   514  	}
   515  
   516  	for _, container := range containers {
   517  		statsForContainer(&md, &container, config)
   518  	}
   519  
   520  	return md, nil
   521  }
   522  
   523  func startCadvisorCollector(c *conf.Conf) {
   524  	for _, config := range c.Cadvisor {
   525  		cClient, err := client.NewClient(config.URL)
   526  		if err != nil {
   527  			slog.Warningf("Could not start collector for URL [%s] due to err: %v", config.URL, err)
   528  		}
   529  		collectors = append(collectors, &IntervalCollector{
   530  			F: func() (opentsdb.MultiDataPoint, error) {
   531  				return c_cadvisor(cClient, &config)
   532  			},
   533  			name: "cadvisor",
   534  		})
   535  	}
   536  }