github.com/minio/minio@v0.0.0-20240328213742-3f72439b8a27/cmd/metrics-realtime.go (about) 1 // Copyright (c) 2015-2022 MinIO, Inc. 2 // 3 // This file is part of MinIO Object Storage stack 4 // 5 // This program is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU Affero General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // This program is distributed in the hope that it will be useful 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU Affero General Public License for more details. 14 // 15 // You should have received a copy of the GNU Affero General Public License 16 // along with this program. If not, see <http://www.gnu.org/licenses/>. 17 18 package cmd 19 20 import ( 21 "context" 22 "fmt" 23 "time" 24 25 "github.com/minio/madmin-go/v3" 26 "github.com/minio/minio/internal/disk" 27 "github.com/minio/minio/internal/net" 28 c "github.com/shirou/gopsutil/v3/cpu" 29 "github.com/shirou/gopsutil/v3/load" 30 ) 31 32 type collectMetricsOpts struct { 33 hosts map[string]struct{} 34 disks map[string]struct{} 35 jobID string 36 depID string 37 } 38 39 func collectLocalMetrics(types madmin.MetricType, opts collectMetricsOpts) (m madmin.RealtimeMetrics) { 40 if types == madmin.MetricsNone { 41 return 42 } 43 44 if types.Contains(madmin.MetricsDisk) { 45 m.ByDisk = make(map[string]madmin.DiskMetric) 46 aggr := madmin.DiskMetric{ 47 CollectedAt: time.Now(), 48 } 49 for name, disk := range collectLocalDisksMetrics(opts.disks) { 50 m.ByDisk[name] = disk 51 aggr.Merge(&disk) 52 } 53 m.Aggregated.Disk = &aggr 54 } 55 56 if types.Contains(madmin.MetricsScanner) { 57 metrics := globalScannerMetrics.report() 58 m.Aggregated.Scanner = &metrics 59 } 60 if types.Contains(madmin.MetricsOS) { 61 metrics := globalOSMetrics.report() 62 m.Aggregated.OS = &metrics 63 } 64 if types.Contains(madmin.MetricsBatchJobs) { 65 m.Aggregated.BatchJobs = globalBatchJobsMetrics.report(opts.jobID) 66 } 67 if types.Contains(madmin.MetricsSiteResync) { 68 m.Aggregated.SiteResync = globalSiteResyncMetrics.report(opts.depID) 69 } 70 if types.Contains(madmin.MetricNet) { 71 m.Aggregated.Net = &madmin.NetMetrics{ 72 CollectedAt: UTCNow(), 73 InterfaceName: globalInternodeInterface, 74 } 75 netStats, err := net.GetInterfaceNetStats(globalInternodeInterface) 76 if err != nil { 77 m.Errors = append(m.Errors, fmt.Sprintf("%s: %v (nicstats)", globalMinioAddr, err.Error())) 78 } else { 79 m.Aggregated.Net.NetStats = netStats 80 } 81 } 82 if types.Contains(madmin.MetricsMem) { 83 m.Aggregated.Mem = &madmin.MemMetrics{ 84 CollectedAt: UTCNow(), 85 } 86 m.Aggregated.Mem.Info = madmin.GetMemInfo(GlobalContext, globalMinioAddr) 87 } 88 if types.Contains(madmin.MetricsCPU) { 89 m.Aggregated.CPU = &madmin.CPUMetrics{ 90 CollectedAt: UTCNow(), 91 } 92 cm, err := c.Times(false) 93 if err != nil { 94 m.Errors = append(m.Errors, fmt.Sprintf("%s: %v (cpuTimes)", globalMinioAddr, err.Error())) 95 } else { 96 // not collecting per-cpu stats, so there will be only one element 97 if len(cm) == 1 { 98 m.Aggregated.CPU.TimesStat = &cm[0] 99 } else { 100 m.Errors = append(m.Errors, fmt.Sprintf("%s: Expected one CPU stat, got %d", globalMinioAddr, len(cm))) 101 } 102 } 103 cpuCount, err := c.Counts(true) 104 if err != nil { 105 m.Errors = append(m.Errors, fmt.Sprintf("%s: %v (cpuCount)", globalMinioAddr, err.Error())) 106 } else { 107 m.Aggregated.CPU.CPUCount = cpuCount 108 } 109 110 loadStat, err := load.Avg() 111 if err != nil { 112 m.Errors = append(m.Errors, fmt.Sprintf("%s: %v (loadStat)", globalMinioAddr, err.Error())) 113 } else { 114 m.Aggregated.CPU.LoadStat = loadStat 115 } 116 } 117 // Add types... 118 119 // ByHost is a shallow reference, so careful about sharing. 120 m.ByHost = map[string]madmin.Metrics{globalMinioAddr: m.Aggregated} 121 m.Hosts = append(m.Hosts, globalMinioAddr) 122 123 return m 124 } 125 126 func collectLocalDisksMetrics(disks map[string]struct{}) map[string]madmin.DiskMetric { 127 objLayer := newObjectLayerFn() 128 if objLayer == nil { 129 return nil 130 } 131 132 metrics := make(map[string]madmin.DiskMetric) 133 storageInfo := objLayer.LocalStorageInfo(GlobalContext, true) 134 for _, d := range storageInfo.Disks { 135 if len(disks) != 0 { 136 _, ok := disks[d.Endpoint] 137 if !ok { 138 continue 139 } 140 } 141 142 if d.State != madmin.DriveStateOk && d.State != madmin.DriveStateUnformatted { 143 metrics[d.Endpoint] = madmin.DiskMetric{NDisks: 1, Offline: 1} 144 continue 145 } 146 147 var dm madmin.DiskMetric 148 dm.NDisks = 1 149 if d.Healing { 150 dm.Healing++ 151 } 152 if d.Metrics != nil { 153 dm.LifeTimeOps = make(map[string]uint64, len(d.Metrics.APICalls)) 154 for k, v := range d.Metrics.APICalls { 155 if v != 0 { 156 dm.LifeTimeOps[k] = v 157 } 158 } 159 dm.LastMinute.Operations = make(map[string]madmin.TimedAction, len(d.Metrics.APICalls)) 160 for k, v := range d.Metrics.LastMinute { 161 if v.Count != 0 { 162 dm.LastMinute.Operations[k] = v 163 } 164 } 165 } 166 167 st, err := disk.GetDriveStats(d.Major, d.Minor) 168 if err == nil { 169 dm.IOStats = madmin.DiskIOStats{ 170 ReadIOs: st.ReadIOs, 171 ReadMerges: st.ReadMerges, 172 ReadSectors: st.ReadSectors, 173 ReadTicks: st.ReadTicks, 174 WriteIOs: st.WriteIOs, 175 WriteMerges: st.WriteMerges, 176 WriteSectors: st.WriteSectors, 177 WriteTicks: st.WriteTicks, 178 CurrentIOs: st.CurrentIOs, 179 TotalTicks: st.TotalTicks, 180 ReqTicks: st.ReqTicks, 181 DiscardIOs: st.DiscardIOs, 182 DiscardMerges: st.DiscardMerges, 183 DiscardSectors: st.DiscardSectors, 184 DiscardTicks: st.DiscardTicks, 185 FlushIOs: st.FlushIOs, 186 FlushTicks: st.FlushTicks, 187 } 188 } 189 190 metrics[d.Endpoint] = dm 191 } 192 return metrics 193 } 194 195 func collectRemoteMetrics(ctx context.Context, types madmin.MetricType, opts collectMetricsOpts) (m madmin.RealtimeMetrics) { 196 if !globalIsDistErasure { 197 return 198 } 199 all := globalNotificationSys.GetMetrics(ctx, types, opts) 200 for _, remote := range all { 201 m.Merge(&remote) 202 } 203 return m 204 }