github.com/primecitizens/pcz/std@v0.2.1/runtime/builtin_slice.go (about) 1 // SPDX-License-Identifier: Apache-2.0 2 // Copyright 2023 The Prime Citizens 3 4 //go:build pcz 5 6 package runtime 7 8 import ( 9 "unsafe" 10 11 stdslice "github.com/primecitizens/pcz/std/builtin/slice" 12 "github.com/primecitizens/pcz/std/core/abi" 13 "github.com/primecitizens/pcz/std/core/alloc" 14 "github.com/primecitizens/pcz/std/core/arch" 15 "github.com/primecitizens/pcz/std/core/asan" 16 "github.com/primecitizens/pcz/std/core/assert" 17 "github.com/primecitizens/pcz/std/core/bits" 18 "github.com/primecitizens/pcz/std/core/math" 19 "github.com/primecitizens/pcz/std/core/mem" 20 "github.com/primecitizens/pcz/std/core/msan" 21 "github.com/primecitizens/pcz/std/core/num" 22 "github.com/primecitizens/pcz/std/core/os" 23 "github.com/primecitizens/pcz/std/core/race" 24 ) 25 26 // see $GOROOT/src/runtime/slice.go 27 28 func makeslice(et *abi.Type, len, cap int) unsafe.Pointer { 29 return alloc.MakeTyped(getg().G().DefaultAlloc(), et, len, cap) 30 } 31 32 func makeslice64(et *abi.Type, len64, cap64 int64) unsafe.Pointer { 33 len := int(len64) 34 if int64(len) != len64 { 35 panicmakeslicelen() 36 } 37 38 cap := int(cap64) 39 if int64(cap) != cap64 { 40 panicmakeslicecap() 41 } 42 43 return makeslice(et, len, cap) 44 } 45 46 func makeslicecopy(et *abi.Type, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer { 47 var tomem, copymem uintptr 48 if uintptr(tolen) > uintptr(fromlen) { 49 var overflow bool 50 tomem, overflow = math.MulUintptr(et.Size_, uintptr(tolen)) 51 if overflow || tomem > os.MaxAlloc || tolen < 0 { 52 panicmakeslicelen() 53 } 54 copymem = et.Size_ * uintptr(fromlen) 55 } else { 56 // fromlen is a known good length providing and equal or greater than tolen, 57 // thereby making tolen a good slice length too as from and to slices have the 58 // same element width. 59 tomem = et.Size_ * uintptr(tolen) 60 copymem = tomem 61 } 62 63 var to unsafe.Pointer 64 if et.PtrBytes == 0 { 65 to = explicit_mallocgc(nil, tomem, false) 66 if copymem < tomem { 67 mem.Clear(unsafe.Add(to, copymem), tomem-copymem) 68 } 69 } else { 70 // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. 71 to = explicit_mallocgc(et, uintptr(tolen), true) 72 if copymem > 0 && writeBarrier.enabled { 73 // Only shade the pointers in old.array since we know the destination slice to 74 // only contains nil pointers because it has been cleared during alloc. 75 bulkBarrierPreWriteSrcOnly(uintptr(to), uintptr(from), copymem) 76 } 77 } 78 79 if race.Enabled { 80 callerpc := getcallerpc() 81 pc := abi.FuncPCABIInternal(makeslicecopy) 82 race.ReadRangePC(from, copymem, callerpc, pc) 83 } 84 if msan.Enabled { 85 msan.Read(from, copymem) 86 } 87 if asan.Enabled { 88 asan.Read(from, copymem) 89 } 90 91 mem.Move(to, from, copymem) 92 return to 93 } 94 95 // growslice allocates new backing store for a slice. 96 // 97 // arguments: 98 // 99 // oldPtr = pointer to the slice's backing array 100 // newLen = new length (= oldLen + num) 101 // oldCap = original slice's capacity. 102 // num = number of elements being added 103 // et = element type 104 // 105 // return values: 106 // 107 // newPtr = pointer to the new backing store 108 // newLen = same value as the argument 109 // newCap = capacity of the new backing store 110 // 111 // Requires that uint(newLen) > uint(oldCap). 112 // Assumes the original slice length is newLen - num 113 // 114 // A new backing store is allocated with space for at least newLen elements. 115 // Existing entries [0, oldLen) are copied over to the new backing store. 116 // Added entries [oldLen, newLen) are not initialized by growslice 117 // (although for pointer-containing element types, they are zeroed). They 118 // must be initialized by the caller. 119 // Trailing entries [newLen, newCap) are zeroed. 120 // 121 // growslice's odd calling convention makes the generated code that calls 122 // this function simpler. In particular, it accepts and returns the 123 // new length so that the old length is not live (does not need to be 124 // spilled/restored) and the new length is returned (also does not need 125 // to be spilled/restored). 126 func growslice(oldPtr unsafe.Pointer, newLen, oldCap, n int, et *abi.Type) stdslice.Header { 127 oldLen := newLen - n 128 if race.Enabled { 129 callerpc := getcallerpc() 130 race.ReadRangePC(oldPtr, uintptr(oldLen*int(et.Size_)), callerpc, abi.FuncPCABIInternal(growslice)) 131 } 132 if msan.Enabled { 133 msan.Read(oldPtr, uintptr(oldLen*int(et.Size_))) 134 } 135 if asan.Enabled { 136 asan.Read(oldPtr, uintptr(oldLen*int(et.Size_))) 137 } 138 139 if newLen < 0 { 140 assert.Throw("growslice:", "len", "out", "of", "range") 141 } 142 143 if et.Size_ == 0 { 144 // append should not create a slice with nil pointer but non-zero len. 145 // We assume that append doesn't need to preserve oldPtr in this case. 146 return stdslice.Header{Array: alloc.ZeroSized(), Len: newLen, Cap: newLen} 147 } 148 149 newcap := oldCap 150 doublecap := newcap + newcap 151 if newLen > doublecap { 152 newcap = newLen 153 } else { 154 const threshold = 256 155 if oldCap < threshold { 156 newcap = doublecap 157 } else { 158 // Check 0 < newcap to detect overflow 159 // and prevent an infinite loop. 160 for 0 < newcap && newcap < newLen { 161 // Transition from growing 2x for small slices 162 // to growing 1.25x for large slices. This formula 163 // gives a smooth-ish transition between the two. 164 newcap += (newcap + 3*threshold) / 4 165 } 166 // Set newcap to the requested cap when 167 // the newcap calculation overflowed. 168 if newcap <= 0 { 169 newcap = newLen 170 } 171 } 172 } 173 174 var overflow bool 175 var lenmem, newlenmem, capmem uintptr 176 // Specialize for common values of et.Size. 177 // For 1 we don't need any division/multiplication. 178 // For arch.PtrSize, compiler will optimize division/multiplication into a shift by a constant. 179 // For powers of 2, use a variable shift. 180 switch { 181 case et.Size_ == 1: 182 lenmem = uintptr(oldLen) 183 newlenmem = uintptr(newLen) 184 capmem = roundupsize(uintptr(newcap)) 185 overflow = uintptr(newcap) > os.MaxAlloc 186 newcap = int(capmem) 187 case et.Size_ == arch.PtrSize: 188 lenmem = uintptr(oldLen) * arch.PtrSize 189 newlenmem = uintptr(newLen) * arch.PtrSize 190 capmem = roundupsize(uintptr(newcap) * arch.PtrSize) 191 overflow = uintptr(newcap) > os.MaxAlloc/arch.PtrSize 192 newcap = int(capmem / arch.PtrSize) 193 case num.IsPowerOfTwo(et.Size_): 194 var shift uintptr 195 if arch.PtrSize == 8 { 196 // Mask shift for better code generation. 197 shift = uintptr(bits.TrailingZeros64(uint64(et.Size_))) & 63 198 } else { 199 shift = uintptr(bits.TrailingZeros32(uint32(et.Size_))) & 31 200 } 201 lenmem = uintptr(oldLen) << shift 202 newlenmem = uintptr(newLen) << shift 203 capmem = roundupsize(uintptr(newcap) << shift) 204 overflow = uintptr(newcap) > (os.MaxAlloc >> shift) 205 newcap = int(capmem >> shift) 206 capmem = uintptr(newcap) << shift 207 default: 208 lenmem = uintptr(oldLen) * et.Size_ 209 newlenmem = uintptr(newLen) * et.Size_ 210 capmem, overflow = math.MulUintptr(et.Size_, uintptr(newcap)) 211 capmem = roundupsize(capmem) 212 newcap = int(capmem / et.Size_) 213 capmem = uintptr(newcap) * et.Size_ 214 } 215 216 // The check of overflow in addition to capmem > os.MaxAlloc is needed 217 // to prevent an overflow which can be used to trigger a segfault 218 // on 32bit architectures with this example program: 219 // 220 // type T [1<<27 + 1]int64 221 // 222 // var d T 223 // var s []T 224 // 225 // func main() { 226 // s = append(s, d, d, d, d) 227 // print(len(s), "\n") 228 // } 229 if overflow || capmem > os.MaxAlloc { 230 assert.Throw("growslice:", "len", "out", "of", "range") 231 } 232 233 var p unsafe.Pointer 234 if et.PtrBytes == 0 { 235 p = mallocgc(capmem, nil, false) 236 // The append() that calls growslice is going to overwrite from oldLen to newLen. 237 // Only clear the part that will not be overwritten. 238 // The reflect_growslice() that calls growslice will manually clear 239 // the region not cleared here. 240 mem.Clear(unsafe.Add(p, newlenmem), capmem-newlenmem) 241 } else { 242 // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. 243 p = mallocgc(capmem, et, true) 244 if lenmem > 0 && writeBarrier.enabled { 245 // Only shade the pointers in oldPtr since we know the destination slice p 246 // only contains nil pointers because it has been cleared during alloc. 247 bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.Size_+et.PtrBytes) 248 } 249 } 250 mem.Move(p, oldPtr, lenmem) 251 252 return stdslice.Header{Array: p, Len: newLen, Cap: newcap} 253 } 254 255 // slicecopy is used to copy from a string or slice of pointerless elements into a slice. 256 func slicecopy(toPtr unsafe.Pointer, toLen int, fromPtr unsafe.Pointer, fromLen int, width uintptr) int { 257 if fromLen == 0 || toLen == 0 { 258 return 0 259 } 260 261 n := fromLen 262 if toLen < n { 263 n = toLen 264 } 265 266 if width == 0 { 267 return n 268 } 269 270 size := uintptr(n) * width 271 if race.Enabled { 272 callerpc := getcallerpc() 273 pc := abi.FuncPCABIInternal(slicecopy) 274 race.ReadRangePC(fromPtr, size, callerpc, pc) 275 race.WriteRangePC(toPtr, size, callerpc, pc) 276 } 277 if msan.Enabled { 278 msan.Read(fromPtr, size) 279 msan.Write(toPtr, size) 280 } 281 if asan.Enabled { 282 asan.Read(fromPtr, size) 283 asan.Write(toPtr, size) 284 } 285 286 if size == 1 { // common case worth about 2x to do here 287 // TODO: is this still worth it with new memmove impl? 288 *(*byte)(toPtr) = *(*byte)(fromPtr) // known to be a byte pointer 289 } else { 290 mem.Move(toPtr, fromPtr, size) 291 } 292 return n 293 } 294 295 func panicmakeslicelen() { assert.Throw("makeslice:", "len", "out", "of", "range") } 296 func panicmakeslicecap() { assert.Throw("makeslice:", "cap", "out", "of", "range") }