github.com/boomhut/fiber/v2@v2.0.0-20230603160335-b65c856e57d3/internal/gopsutil/cpu/cpu_darwin_cgo.go (about) 1 //go:build darwin && cgo 2 // +build darwin,cgo 3 4 package cpu 5 6 /* 7 #include <stdlib.h> 8 #include <sys/sysctl.h> 9 #include <sys/mount.h> 10 #include <mach/mach_init.h> 11 #include <mach/mach_host.h> 12 #include <mach/host_info.h> 13 #include <TargetConditionals.h> 14 #if TARGET_OS_MAC 15 #include <libproc.h> 16 #endif 17 #include <mach/processor_info.h> 18 #include <mach/vm_map.h> 19 */ 20 import "C" 21 22 import ( 23 "bytes" 24 "encoding/binary" 25 "fmt" 26 "unsafe" 27 ) 28 29 // these CPU times for darwin is borrowed from influxdb/telegraf. 30 31 func perCPUTimes() ([]TimesStat, error) { 32 var ( 33 count C.mach_msg_type_number_t 34 cpuload *C.processor_cpu_load_info_data_t 35 ncpu C.natural_t 36 ) 37 38 status := C.host_processor_info(C.host_t(C.mach_host_self()), 39 C.PROCESSOR_CPU_LOAD_INFO, 40 &ncpu, 41 (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)), 42 &count) 43 44 if status != C.KERN_SUCCESS { 45 return nil, fmt.Errorf("host_processor_info error=%d", status) 46 } 47 48 // jump through some cgo casting hoops and ensure we properly free 49 // the memory that cpuload points to 50 target := C.vm_map_t(C.mach_task_self_) 51 address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload))) 52 defer C.vm_deallocate(target, address, C.vm_size_t(ncpu)) 53 54 // the body of struct processor_cpu_load_info 55 // aka processor_cpu_load_info_data_t 56 var cpu_ticks [C.CPU_STATE_MAX]uint32 57 58 // copy the cpuload array to a []byte buffer 59 // where we can binary.Read the data 60 size := int(ncpu) * binary.Size(cpu_ticks) 61 buf := (*[1 << 30]byte)(unsafe.Pointer(cpuload))[:size:size] 62 63 bbuf := bytes.NewBuffer(buf) 64 65 var ret []TimesStat 66 67 for i := 0; i < int(ncpu); i++ { 68 err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks) 69 if err != nil { 70 return nil, err 71 } 72 73 c := TimesStat{ 74 CPU: fmt.Sprintf("cpu%d", i), 75 User: float64(cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec, 76 System: float64(cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec, 77 Nice: float64(cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec, 78 Idle: float64(cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec, 79 } 80 81 ret = append(ret, c) 82 } 83 84 return ret, nil 85 } 86 87 func allCPUTimes() ([]TimesStat, error) { 88 var count C.mach_msg_type_number_t 89 var cpuload C.host_cpu_load_info_data_t 90 91 count = C.HOST_CPU_LOAD_INFO_COUNT 92 93 status := C.host_statistics(C.host_t(C.mach_host_self()), 94 C.HOST_CPU_LOAD_INFO, 95 C.host_info_t(unsafe.Pointer(&cpuload)), 96 &count) 97 98 if status != C.KERN_SUCCESS { 99 return nil, fmt.Errorf("host_statistics error=%d", status) 100 } 101 102 c := TimesStat{ 103 CPU: "cpu-total", 104 User: float64(cpuload.cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec, 105 System: float64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec, 106 Nice: float64(cpuload.cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec, 107 Idle: float64(cpuload.cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec, 108 } 109 110 return []TimesStat{c}, nil 111 112 }