github.com/userpro/linearpool@v0.5.3-0.20231115092206-0ca073169b71/utils.go (about) 1 /* 2 * Linear Allocator 3 * 4 * Improve the memory allocation and garbage collection performance. 5 * 6 * Copyright (C) 2020-2023 crazybie@github.com. 7 * https://github.com/crazybie/linear_ac 8 */ 9 10 package memorypool 11 12 import ( 13 "fmt" 14 "reflect" 15 "runtime" 16 "sync/atomic" 17 "unsafe" 18 ) 19 20 const ( 21 flagIndir uintptr = 1 << 7 22 ptrSize = int64(unsafe.Sizeof(uintptr(0))) 23 ) 24 25 func init() { 26 if ptrSize != 8 { 27 panic("expect 64bit platform") 28 } 29 if unsafe.Sizeof(sliceHeader{}) != unsafe.Sizeof(reflect.SliceHeader{}) { 30 panic("ABI not match") 31 } 32 if unsafe.Sizeof(stringHeader{}) != unsafe.Sizeof(reflect.StringHeader{}) { 33 panic("ABI not match") 34 } 35 if unsafe.Sizeof((any)(nil)) != unsafe.Sizeof(emptyInterface{}) { 36 panic("ABI not match") 37 } 38 if unsafe.Sizeof(reflectedValue{}) != unsafe.Sizeof(reflect.Value{}) { 39 panic("ABI not match") 40 } 41 } 42 43 type sliceHeader struct { 44 Data unsafe.Pointer 45 Len int64 46 Cap int64 47 } 48 49 type stringHeader struct { 50 Data unsafe.Pointer 51 Len int 52 } 53 54 type emptyInterface struct { 55 Type unsafe.Pointer 56 Data unsafe.Pointer 57 } 58 59 type reflectedValue struct { 60 Type unsafe.Pointer 61 Ptr unsafe.Pointer 62 flag uintptr 63 } 64 65 //go:linkname roundupsize runtime.roundupsize 66 //go:noescape 67 func roundupsize(size uintptr) uintptr 68 69 //go:linkname memclrNoHeapPointers reflect.memclrNoHeapPointers 70 //go:noescape 71 func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) 72 73 //go:linkname memmoveNoHeapPointers reflect.memmove 74 //go:noescape 75 func memmoveNoHeapPointers(to, from unsafe.Pointer, n uintptr) 76 77 func data(i interface{}) unsafe.Pointer { 78 return (*emptyInterface)(unsafe.Pointer(&i)).Data 79 } 80 81 func interfaceOfUnexported(v reflect.Value) (ret interface{}) { 82 v2 := (*reflectedValue)(unsafe.Pointer(&v)) 83 r := (*emptyInterface)(unsafe.Pointer(&ret)) 84 r.Type = v2.Type 85 switch { 86 case v2.flag&flagIndir != 0: 87 r.Data = *(*unsafe.Pointer)(v2.Ptr) 88 default: 89 r.Data = v2.Ptr 90 } 91 return 92 } 93 94 func interfaceEqual(a, b any) bool { 95 return *(*emptyInterface)(unsafe.Pointer(&a)) == *(*emptyInterface)(unsafe.Pointer(&b)) 96 } 97 98 func resetSlice[T any](s []T) []T { 99 c := cap(s) 100 s = s[:c] 101 var zero T 102 for i := 0; i < c; i++ { 103 s[i] = zero 104 } 105 return s[:0] 106 } 107 108 type number interface { 109 ~int8 | ~int16 | ~int | ~int32 | ~int64 | 110 ~uint8 | ~uint16 | ~uint | ~uint32 | ~uint64 | 111 ~float32 | ~float64 112 } 113 114 func max[T number](a, b T) T { 115 if a > b { 116 return a 117 } 118 return b 119 } 120 121 func min[T number](a, b T) T { 122 if a > b { 123 return b 124 } 125 return a 126 } 127 128 type Logger interface { 129 Errorf(format string, args ...interface{}) 130 } 131 132 func errorf(logger Logger, format string, args ...any) { 133 if logger != nil { 134 logger.Errorf(format, args...) 135 } else { 136 panic(fmt.Errorf(format, args...)) 137 } 138 } 139 140 func mayContainsPtr(k reflect.Kind) bool { 141 switch k { 142 case reflect.Bool, 143 reflect.Int, 144 reflect.Int8, 145 reflect.Int16, 146 reflect.Int32, 147 reflect.Int64, 148 reflect.Uint, 149 reflect.Uint8, 150 reflect.Uint16, 151 reflect.Uint32, 152 reflect.Uint64, 153 reflect.Float32, 154 reflect.Float64, 155 reflect.Complex64, 156 reflect.Complex128: 157 return false 158 } 159 return true 160 } 161 162 func noMalloc(f func()) { 163 var s, e runtime.MemStats 164 runtime.ReadMemStats(&s) 165 f() 166 runtime.ReadMemStats(&e) 167 if n := e.Mallocs - s.Mallocs; n > 0 { 168 panic(fmt.Errorf("has %v malloc, bytes: %v", n, e.Alloc-s.Alloc)) 169 } 170 } 171 172 //============================================================================ 173 // Spin lock 174 //============================================================================ 175 176 type spinLock int32 177 178 func (s *spinLock) Lock() { 179 for !atomic.CompareAndSwapInt32((*int32)(s), 0, 1) { 180 runtime.Gosched() 181 } 182 } 183 184 func (s *spinLock) Unlock() { 185 atomic.StoreInt32((*int32)(s), 0) 186 } 187 188 //============================================================================ 189 // weakUniqQueue 190 //============================================================================ 191 192 // weakUniqQueue is used to reduce the duplication of elems in queue. 193 // the major purpose is to reduce memory usage. 194 type weakUniqQueue[T any] struct { 195 spinLock 196 slice []T 197 uniqRange int 198 equal func(a, b T) bool 199 } 200 201 func newWeakUniqQueue[T any](uniqRange int, eq func(a, b T) bool) weakUniqQueue[T] { 202 return weakUniqQueue[T]{equal: eq, uniqRange: uniqRange} 203 } 204 205 // Clear ... 206 func (e *weakUniqQueue[T]) Clear() { 207 e.Lock() 208 defer e.Unlock() 209 e.slice = nil 210 } 211 212 // Put ... 213 func (e *weakUniqQueue[T]) Put(a T) { 214 e.Lock() 215 defer e.Unlock() 216 if l := len(e.slice); l > 0 { 217 if l < e.uniqRange { 218 for _, k := range e.slice { 219 if e.equal(k, a) { 220 return 221 } 222 } 223 } 224 last := e.slice[l-1] 225 if e.equal(a, last) { 226 return 227 } 228 } 229 e.slice = append(e.slice, a) 230 } 231 232 func anyEq(a, b any) bool { 233 return a == b 234 } 235 236 func eq[T comparable](a, b T) bool { 237 return a == b 238 }