github.com/cspotcode/docker-cli@v20.10.0-rc1.0.20201201121459-3faad7acc5b8+incompatible/cli/command/container/stats_helpers.go (about) 1 package container 2 3 import ( 4 "context" 5 "encoding/json" 6 "io" 7 "sync" 8 "time" 9 10 "github.com/docker/docker/api/types" 11 "github.com/docker/docker/client" 12 "github.com/pkg/errors" 13 "github.com/sirupsen/logrus" 14 ) 15 16 type stats struct { 17 mu sync.Mutex 18 cs []*Stats 19 } 20 21 // daemonOSType is set once we have at least one stat for a container 22 // from the daemon. It is used to ensure we print the right header based 23 // on the daemon platform. 24 var daemonOSType string 25 26 func (s *stats) add(cs *Stats) bool { 27 s.mu.Lock() 28 defer s.mu.Unlock() 29 if _, exists := s.isKnownContainer(cs.Container); !exists { 30 s.cs = append(s.cs, cs) 31 return true 32 } 33 return false 34 } 35 36 func (s *stats) remove(id string) { 37 s.mu.Lock() 38 if i, exists := s.isKnownContainer(id); exists { 39 s.cs = append(s.cs[:i], s.cs[i+1:]...) 40 } 41 s.mu.Unlock() 42 } 43 44 func (s *stats) isKnownContainer(cid string) (int, bool) { 45 for i, c := range s.cs { 46 if c.Container == cid { 47 return i, true 48 } 49 } 50 return -1, false 51 } 52 53 func collect(ctx context.Context, s *Stats, cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) { 54 logrus.Debugf("collecting stats for %s", s.Container) 55 var ( 56 getFirst bool 57 previousCPU uint64 58 previousSystem uint64 59 u = make(chan error, 1) 60 ) 61 62 defer func() { 63 // if error happens and we get nothing of stats, release wait group whatever 64 if !getFirst { 65 getFirst = true 66 waitFirst.Done() 67 } 68 }() 69 70 response, err := cli.ContainerStats(ctx, s.Container, streamStats) 71 if err != nil { 72 s.SetError(err) 73 return 74 } 75 defer response.Body.Close() 76 77 dec := json.NewDecoder(response.Body) 78 go func() { 79 for { 80 var ( 81 v *types.StatsJSON 82 memPercent, cpuPercent float64 83 blkRead, blkWrite uint64 // Only used on Linux 84 mem, memLimit float64 85 pidsStatsCurrent uint64 86 ) 87 88 if err := dec.Decode(&v); err != nil { 89 dec = json.NewDecoder(io.MultiReader(dec.Buffered(), response.Body)) 90 u <- err 91 if err == io.EOF { 92 break 93 } 94 time.Sleep(100 * time.Millisecond) 95 continue 96 } 97 98 daemonOSType = response.OSType 99 100 if daemonOSType != "windows" { 101 previousCPU = v.PreCPUStats.CPUUsage.TotalUsage 102 previousSystem = v.PreCPUStats.SystemUsage 103 cpuPercent = calculateCPUPercentUnix(previousCPU, previousSystem, v) 104 blkRead, blkWrite = calculateBlockIO(v.BlkioStats) 105 mem = calculateMemUsageUnixNoCache(v.MemoryStats) 106 memLimit = float64(v.MemoryStats.Limit) 107 memPercent = calculateMemPercentUnixNoCache(memLimit, mem) 108 pidsStatsCurrent = v.PidsStats.Current 109 } else { 110 cpuPercent = calculateCPUPercentWindows(v) 111 blkRead = v.StorageStats.ReadSizeBytes 112 blkWrite = v.StorageStats.WriteSizeBytes 113 mem = float64(v.MemoryStats.PrivateWorkingSet) 114 } 115 netRx, netTx := calculateNetwork(v.Networks) 116 s.SetStatistics(StatsEntry{ 117 Name: v.Name, 118 ID: v.ID, 119 CPUPercentage: cpuPercent, 120 Memory: mem, 121 MemoryPercentage: memPercent, 122 MemoryLimit: memLimit, 123 NetworkRx: netRx, 124 NetworkTx: netTx, 125 BlockRead: float64(blkRead), 126 BlockWrite: float64(blkWrite), 127 PidsCurrent: pidsStatsCurrent, 128 }) 129 u <- nil 130 if !streamStats { 131 return 132 } 133 } 134 }() 135 for { 136 select { 137 case <-time.After(2 * time.Second): 138 // zero out the values if we have not received an update within 139 // the specified duration. 140 s.SetErrorAndReset(errors.New("timeout waiting for stats")) 141 // if this is the first stat you get, release WaitGroup 142 if !getFirst { 143 getFirst = true 144 waitFirst.Done() 145 } 146 case err := <-u: 147 s.SetError(err) 148 if err == io.EOF { 149 break 150 } 151 if err != nil { 152 continue 153 } 154 // if this is the first stat you get, release WaitGroup 155 if !getFirst { 156 getFirst = true 157 waitFirst.Done() 158 } 159 } 160 if !streamStats { 161 return 162 } 163 } 164 } 165 166 func calculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { 167 var ( 168 cpuPercent = 0.0 169 // calculate the change for the cpu usage of the container in between readings 170 cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU) 171 // calculate the change for the entire system between readings 172 systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem) 173 onlineCPUs = float64(v.CPUStats.OnlineCPUs) 174 ) 175 176 if onlineCPUs == 0.0 { 177 onlineCPUs = float64(len(v.CPUStats.CPUUsage.PercpuUsage)) 178 } 179 if systemDelta > 0.0 && cpuDelta > 0.0 { 180 cpuPercent = (cpuDelta / systemDelta) * onlineCPUs * 100.0 181 } 182 return cpuPercent 183 } 184 185 func calculateCPUPercentWindows(v *types.StatsJSON) float64 { 186 // Max number of 100ns intervals between the previous time read and now 187 possIntervals := uint64(v.Read.Sub(v.PreRead).Nanoseconds()) // Start with number of ns intervals 188 possIntervals /= 100 // Convert to number of 100ns intervals 189 possIntervals *= uint64(v.NumProcs) // Multiple by the number of processors 190 191 // Intervals used 192 intervalsUsed := v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage 193 194 // Percentage avoiding divide-by-zero 195 if possIntervals > 0 { 196 return float64(intervalsUsed) / float64(possIntervals) * 100.0 197 } 198 return 0.00 199 } 200 201 func calculateBlockIO(blkio types.BlkioStats) (uint64, uint64) { 202 var blkRead, blkWrite uint64 203 for _, bioEntry := range blkio.IoServiceBytesRecursive { 204 if len(bioEntry.Op) == 0 { 205 continue 206 } 207 switch bioEntry.Op[0] { 208 case 'r', 'R': 209 blkRead = blkRead + bioEntry.Value 210 case 'w', 'W': 211 blkWrite = blkWrite + bioEntry.Value 212 } 213 } 214 return blkRead, blkWrite 215 } 216 217 func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) { 218 var rx, tx float64 219 220 for _, v := range network { 221 rx += float64(v.RxBytes) 222 tx += float64(v.TxBytes) 223 } 224 return rx, tx 225 } 226 227 // calculateMemUsageUnixNoCache calculate memory usage of the container. 228 // Cache is intentionally excluded to avoid misinterpretation of the output. 229 // 230 // On cgroup v1 host, the result is `mem.Usage - mem.Stats["total_inactive_file"]` . 231 // On cgroup v2 host, the result is `mem.Usage - mem.Stats["inactive_file"] `. 232 // 233 // This definition is consistent with cadvisor and containerd/CRI. 234 // * https://github.com/google/cadvisor/commit/307d1b1cb320fef66fab02db749f07a459245451 235 // * https://github.com/containerd/cri/commit/6b8846cdf8b8c98c1d965313d66bc8489166059a 236 // 237 // On Docker 19.03 and older, the result was `mem.Usage - mem.Stats["cache"]`. 238 // See https://github.com/moby/moby/issues/40727 for the background. 239 func calculateMemUsageUnixNoCache(mem types.MemoryStats) float64 { 240 // cgroup v1 241 if v, isCgroup1 := mem.Stats["total_inactive_file"]; isCgroup1 && v < mem.Usage { 242 return float64(mem.Usage - v) 243 } 244 // cgroup v2 245 if v := mem.Stats["inactive_file"]; v < mem.Usage { 246 return float64(mem.Usage - v) 247 } 248 return float64(mem.Usage) 249 } 250 251 func calculateMemPercentUnixNoCache(limit float64, usedNoCache float64) float64 { 252 // MemoryStats.Limit will never be 0 unless the container is not running and we haven't 253 // got any data from cgroup 254 if limit != 0 { 255 return usedNoCache / limit * 100.0 256 } 257 return 0 258 }