github.com/zhuohuang-hust/src-cbuild@v0.0.0-20230105071821-c7aab3e7c840/mergeCode/runc/events.go (about) 1 // +build linux 2 3 package main 4 5 import ( 6 "encoding/json" 7 "fmt" 8 "os" 9 "sync" 10 "time" 11 12 "github.com/Sirupsen/logrus" 13 "github.com/opencontainers/runc/libcontainer" 14 "github.com/opencontainers/runc/libcontainer/cgroups" 15 "github.com/urfave/cli" 16 ) 17 18 // event struct for encoding the event data to json. 19 type event struct { 20 Type string `json:"type"` 21 ID string `json:"id"` 22 Data interface{} `json:"data,omitempty"` 23 } 24 25 // stats is the runc specific stats structure for stability when encoding and decoding stats. 26 type stats struct { 27 Cpu cpu `json:"cpu"` 28 Memory memory `json:"memory"` 29 Pids pids `json:"pids"` 30 Blkio blkio `json:"blkio"` 31 Hugetlb map[string]hugetlb `json:"hugetlb"` 32 } 33 34 type hugetlb struct { 35 Usage uint64 `json:"usage,omitempty"` 36 Max uint64 `json:"max,omitempty"` 37 Failcnt uint64 `json:"failcnt"` 38 } 39 40 type blkioEntry struct { 41 Major uint64 `json:"major,omitempty"` 42 Minor uint64 `json:"minor,omitempty"` 43 Op string `json:"op,omitempty"` 44 Value uint64 `json:"value,omitempty"` 45 } 46 47 type blkio struct { 48 IoServiceBytesRecursive []blkioEntry `json:"ioServiceBytesRecursive,omitempty"` 49 IoServicedRecursive []blkioEntry `json:"ioServicedRecursive,omitempty"` 50 IoQueuedRecursive []blkioEntry `json:"ioQueueRecursive,omitempty"` 51 IoServiceTimeRecursive []blkioEntry `json:"ioServiceTimeRecursive,omitempty"` 52 IoWaitTimeRecursive []blkioEntry `json:"ioWaitTimeRecursive,omitempty"` 53 IoMergedRecursive []blkioEntry `json:"ioMergedRecursive,omitempty"` 54 IoTimeRecursive []blkioEntry `json:"ioTimeRecursive,omitempty"` 55 SectorsRecursive []blkioEntry `json:"sectorsRecursive,omitempty"` 56 } 57 58 type pids struct { 59 Current uint64 `json:"current,omitempty"` 60 Limit uint64 `json:"limit,omitempty"` 61 } 62 63 type throttling struct { 64 Periods uint64 `json:"periods,omitempty"` 65 ThrottledPeriods uint64 `json:"throttledPeriods,omitempty"` 66 ThrottledTime uint64 `json:"throttledTime,omitempty"` 67 } 68 69 type cpuUsage struct { 70 // Units: nanoseconds. 71 Total uint64 `json:"total,omitempty"` 72 Percpu []uint64 `json:"percpu,omitempty"` 73 Kernel uint64 `json:"kernel"` 74 User uint64 `json:"user"` 75 } 76 77 type cpu struct { 78 Usage cpuUsage `json:"usage,omitempty"` 79 Throttling throttling `json:"throttling,omitempty"` 80 } 81 82 type memoryEntry struct { 83 Limit uint64 `json:"limit"` 84 Usage uint64 `json:"usage,omitempty"` 85 Max uint64 `json:"max,omitempty"` 86 Failcnt uint64 `json:"failcnt"` 87 } 88 89 type memory struct { 90 Cache uint64 `json:"cache,omitempty"` 91 Usage memoryEntry `json:"usage,omitempty"` 92 Swap memoryEntry `json:"swap,omitempty"` 93 Kernel memoryEntry `json:"kernel,omitempty"` 94 KernelTCP memoryEntry `json:"kernelTCP,omitempty"` 95 Raw map[string]uint64 `json:"raw,omitempty"` 96 } 97 98 var eventsCommand = cli.Command{ 99 Name: "events", 100 Usage: "display container events such as OOM notifications, cpu, memory, and IO usage statistics", 101 ArgsUsage: `<container-id> 102 103 Where "<container-id>" is the name for the instance of the container.`, 104 Description: `The events command displays information about the container. By default the 105 information is displayed once every 5 seconds.`, 106 Flags: []cli.Flag{ 107 cli.DurationFlag{Name: "interval", Value: 5 * time.Second, Usage: "set the stats collection interval"}, 108 cli.BoolFlag{Name: "stats", Usage: "display the container's stats then exit"}, 109 }, 110 Action: func(context *cli.Context) error { 111 container, err := getContainer(context) 112 if err != nil { 113 return err 114 } 115 duration := context.Duration("interval") 116 if duration <= 0 { 117 return fmt.Errorf("duration interval must be greater than 0") 118 } 119 status, err := container.Status() 120 if err != nil { 121 return err 122 } 123 if status == libcontainer.Stopped { 124 return fmt.Errorf("container with id %s is not running", container.ID()) 125 } 126 var ( 127 stats = make(chan *libcontainer.Stats, 1) 128 events = make(chan *event, 1024) 129 group = &sync.WaitGroup{} 130 ) 131 group.Add(1) 132 go func() { 133 defer group.Done() 134 enc := json.NewEncoder(os.Stdout) 135 for e := range events { 136 if err := enc.Encode(e); err != nil { 137 logrus.Error(err) 138 } 139 } 140 }() 141 if context.Bool("stats") { 142 s, err := container.Stats() 143 if err != nil { 144 return err 145 } 146 events <- &event{Type: "stats", ID: container.ID(), Data: convertLibcontainerStats(s)} 147 close(events) 148 group.Wait() 149 return nil 150 } 151 go func() { 152 for range time.Tick(context.Duration("interval")) { 153 s, err := container.Stats() 154 if err != nil { 155 logrus.Error(err) 156 continue 157 } 158 stats <- s 159 } 160 }() 161 n, err := container.NotifyOOM() 162 if err != nil { 163 return err 164 } 165 for { 166 select { 167 case _, ok := <-n: 168 if ok { 169 // this means an oom event was received, if it is !ok then 170 // the channel was closed because the container stopped and 171 // the cgroups no longer exist. 172 events <- &event{Type: "oom", ID: container.ID()} 173 } else { 174 n = nil 175 } 176 case s := <-stats: 177 events <- &event{Type: "stats", ID: container.ID(), Data: convertLibcontainerStats(s)} 178 } 179 if n == nil { 180 close(events) 181 break 182 } 183 } 184 group.Wait() 185 return nil 186 }, 187 } 188 189 func convertLibcontainerStats(ls *libcontainer.Stats) *stats { 190 cg := ls.CgroupStats 191 if cg == nil { 192 return nil 193 } 194 var s stats 195 s.Pids.Current = cg.PidsStats.Current 196 s.Pids.Limit = cg.PidsStats.Limit 197 198 s.Cpu.Usage.Kernel = cg.CpuStats.CpuUsage.UsageInKernelmode 199 s.Cpu.Usage.User = cg.CpuStats.CpuUsage.UsageInUsermode 200 s.Cpu.Usage.Total = cg.CpuStats.CpuUsage.TotalUsage 201 s.Cpu.Usage.Percpu = cg.CpuStats.CpuUsage.PercpuUsage 202 s.Cpu.Throttling.Periods = cg.CpuStats.ThrottlingData.Periods 203 s.Cpu.Throttling.ThrottledPeriods = cg.CpuStats.ThrottlingData.ThrottledPeriods 204 s.Cpu.Throttling.ThrottledTime = cg.CpuStats.ThrottlingData.ThrottledTime 205 206 s.Memory.Cache = cg.MemoryStats.Cache 207 s.Memory.Kernel = convertMemoryEntry(cg.MemoryStats.KernelUsage) 208 s.Memory.KernelTCP = convertMemoryEntry(cg.MemoryStats.KernelTCPUsage) 209 s.Memory.Swap = convertMemoryEntry(cg.MemoryStats.SwapUsage) 210 s.Memory.Usage = convertMemoryEntry(cg.MemoryStats.Usage) 211 s.Memory.Raw = cg.MemoryStats.Stats 212 213 s.Blkio.IoServiceBytesRecursive = convertBlkioEntry(cg.BlkioStats.IoServiceBytesRecursive) 214 s.Blkio.IoServicedRecursive = convertBlkioEntry(cg.BlkioStats.IoServicedRecursive) 215 s.Blkio.IoQueuedRecursive = convertBlkioEntry(cg.BlkioStats.IoQueuedRecursive) 216 s.Blkio.IoServiceTimeRecursive = convertBlkioEntry(cg.BlkioStats.IoServiceTimeRecursive) 217 s.Blkio.IoWaitTimeRecursive = convertBlkioEntry(cg.BlkioStats.IoWaitTimeRecursive) 218 s.Blkio.IoMergedRecursive = convertBlkioEntry(cg.BlkioStats.IoMergedRecursive) 219 s.Blkio.IoTimeRecursive = convertBlkioEntry(cg.BlkioStats.IoTimeRecursive) 220 s.Blkio.SectorsRecursive = convertBlkioEntry(cg.BlkioStats.SectorsRecursive) 221 222 s.Hugetlb = make(map[string]hugetlb) 223 for k, v := range cg.HugetlbStats { 224 s.Hugetlb[k] = convertHugtlb(v) 225 } 226 return &s 227 } 228 229 func convertHugtlb(c cgroups.HugetlbStats) hugetlb { 230 return hugetlb{ 231 Usage: c.Usage, 232 Max: c.MaxUsage, 233 Failcnt: c.Failcnt, 234 } 235 } 236 237 func convertMemoryEntry(c cgroups.MemoryData) memoryEntry { 238 return memoryEntry{ 239 Limit: c.Limit, 240 Usage: c.Usage, 241 Max: c.MaxUsage, 242 Failcnt: c.Failcnt, 243 } 244 } 245 246 func convertBlkioEntry(c []cgroups.BlkioStatEntry) []blkioEntry { 247 var out []blkioEntry 248 for _, e := range c { 249 out = append(out, blkioEntry{ 250 Major: e.Major, 251 Minor: e.Minor, 252 Op: e.Op, 253 Value: e.Value, 254 }) 255 } 256 return out 257 }