github.com/isyscore/isc-gobase@v1.5.3-0.20231218061332-cbc7451899e9/system/cpu/cpu_darwin_cgo.go (about)

     1  //go:build darwin && cgo
     2  
     3  package cpu
     4  
     5  /*
     6  #include <stdlib.h>
     7  #include <sys/sysctl.h>
     8  #include <sys/mount.h>
     9  #include <mach/mach_init.h>
    10  #include <mach/mach_host.h>
    11  #include <mach/host_info.h>
    12  #include <TargetConditionals.h>
    13  #if TARGET_OS_MAC
    14  #include <libproc.h>
    15  #endif
    16  #include <mach/processor_info.h>
    17  #include <mach/vm_map.h>
    18  */
    19  import "C"
    20  
    21  import (
    22  	"bytes"
    23  	"encoding/binary"
    24  	"fmt"
    25  	"unsafe"
    26  )
    27  
    28  // these CPU times for darwin is borrowed from influxdb/telegraf.
    29  
    30  func perCPUTimes() ([]TimesStat, error) {
    31  	var (
    32  		count   C.mach_msg_type_number_t
    33  		cpuload *C.processor_cpu_load_info_data_t
    34  		ncpu    C.natural_t
    35  	)
    36  
    37  	status := C.host_processor_info(C.host_t(C.mach_host_self()),
    38  		C.PROCESSOR_CPU_LOAD_INFO,
    39  		&ncpu,
    40  		(*C.processor_info_array_t)(unsafe.Pointer(&cpuload)),
    41  		&count)
    42  
    43  	if status != C.KERN_SUCCESS {
    44  		return nil, fmt.Errorf("host_processor_info error=%d", int(status))
    45  	}
    46  
    47  	// jump through some cgo casting hoops and ensure we properly free
    48  	// the memory that cpuload points to
    49  	target := C.vm_map_t(C.mach_task_self_)
    50  	address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload)))
    51  	defer C.vm_deallocate(target, address, C.vm_size_t(ncpu))
    52  
    53  	// the body of struct processor_cpu_load_info
    54  	// aka processor_cpu_load_info_data_t
    55  	var cpu_ticks [C.CPU_STATE_MAX]uint32
    56  
    57  	// copy the cpuload array to a []byte buffer
    58  	// where we can binary.Read the data
    59  	size := int(ncpu) * binary.Size(cpu_ticks)
    60  	buf := (*[1 << 30]byte)(unsafe.Pointer(cpuload))[:size:size]
    61  
    62  	bbuf := bytes.NewBuffer(buf)
    63  
    64  	var ret []TimesStat
    65  
    66  	for i := 0; i < int(ncpu); i++ {
    67  		err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks)
    68  		if err != nil {
    69  			return nil, err
    70  		}
    71  
    72  		c := TimesStat{
    73  			CPU:    fmt.Sprintf("cpu%d", i),
    74  			User:   float64(cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec,
    75  			System: float64(cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec,
    76  			Nice:   float64(cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec,
    77  			Idle:   float64(cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec,
    78  		}
    79  
    80  		ret = append(ret, c)
    81  	}
    82  
    83  	return ret, nil
    84  }
    85  
    86  func allCPUTimes() ([]TimesStat, error) {
    87  	var count C.mach_msg_type_number_t
    88  	var cpuload C.host_cpu_load_info_data_t
    89  
    90  	count = C.HOST_CPU_LOAD_INFO_COUNT
    91  
    92  	status := C.host_statistics(C.host_t(C.mach_host_self()),
    93  		C.HOST_CPU_LOAD_INFO,
    94  		C.host_info_t(unsafe.Pointer(&cpuload)),
    95  		&count)
    96  
    97  	if status != C.KERN_SUCCESS {
    98  		return nil, fmt.Errorf("host_statistics error=%d", int(status))
    99  	}
   100  
   101  	c := TimesStat{
   102  		CPU:    "cpu-total",
   103  		User:   float64(cpuload.cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec,
   104  		System: float64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec,
   105  		Nice:   float64(cpuload.cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec,
   106  		Idle:   float64(cpuload.cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec,
   107  	}
   108  
   109  	return []TimesStat{c}, nil
   110  
   111  }