github.com/twelsh-aw/go/src@v0.0.0-20230516233729-a56fe86a7c81/runtime/slice.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/abi" 9 "internal/goarch" 10 "runtime/internal/math" 11 "runtime/internal/sys" 12 "unsafe" 13 ) 14 15 type slice struct { 16 array unsafe.Pointer 17 len int 18 cap int 19 } 20 21 // A notInHeapSlice is a slice backed by runtime/internal/sys.NotInHeap memory. 22 type notInHeapSlice struct { 23 array *notInHeap 24 len int 25 cap int 26 } 27 28 func panicmakeslicelen() { 29 panic(errorString("makeslice: len out of range")) 30 } 31 32 func panicmakeslicecap() { 33 panic(errorString("makeslice: cap out of range")) 34 } 35 36 // makeslicecopy allocates a slice of "tolen" elements of type "et", 37 // then copies "fromlen" elements of type "et" into that new allocation from "from". 38 func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer { 39 var tomem, copymem uintptr 40 if uintptr(tolen) > uintptr(fromlen) { 41 var overflow bool 42 tomem, overflow = math.MulUintptr(et.Size_, uintptr(tolen)) 43 if overflow || tomem > maxAlloc || tolen < 0 { 44 panicmakeslicelen() 45 } 46 copymem = et.Size_ * uintptr(fromlen) 47 } else { 48 // fromlen is a known good length providing and equal or greater than tolen, 49 // thereby making tolen a good slice length too as from and to slices have the 50 // same element width. 51 tomem = et.Size_ * uintptr(tolen) 52 copymem = tomem 53 } 54 55 var to unsafe.Pointer 56 if et.PtrBytes == 0 { 57 to = mallocgc(tomem, nil, false) 58 if copymem < tomem { 59 memclrNoHeapPointers(add(to, copymem), tomem-copymem) 60 } 61 } else { 62 // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. 63 to = mallocgc(tomem, et, true) 64 if copymem > 0 && writeBarrier.enabled { 65 // Only shade the pointers in old.array since we know the destination slice to 66 // only contains nil pointers because it has been cleared during alloc. 67 bulkBarrierPreWriteSrcOnly(uintptr(to), uintptr(from), copymem) 68 } 69 } 70 71 if raceenabled { 72 callerpc := getcallerpc() 73 pc := abi.FuncPCABIInternal(makeslicecopy) 74 racereadrangepc(from, copymem, callerpc, pc) 75 } 76 if msanenabled { 77 msanread(from, copymem) 78 } 79 if asanenabled { 80 asanread(from, copymem) 81 } 82 83 memmove(to, from, copymem) 84 85 return to 86 } 87 88 func makeslice(et *_type, len, cap int) unsafe.Pointer { 89 mem, overflow := math.MulUintptr(et.Size_, uintptr(cap)) 90 if overflow || mem > maxAlloc || len < 0 || len > cap { 91 // NOTE: Produce a 'len out of range' error instead of a 92 // 'cap out of range' error when someone does make([]T, bignumber). 93 // 'cap out of range' is true too, but since the cap is only being 94 // supplied implicitly, saying len is clearer. 95 // See golang.org/issue/4085. 96 mem, overflow := math.MulUintptr(et.Size_, uintptr(len)) 97 if overflow || mem > maxAlloc || len < 0 { 98 panicmakeslicelen() 99 } 100 panicmakeslicecap() 101 } 102 103 return mallocgc(mem, et, true) 104 } 105 106 func makeslice64(et *_type, len64, cap64 int64) unsafe.Pointer { 107 len := int(len64) 108 if int64(len) != len64 { 109 panicmakeslicelen() 110 } 111 112 cap := int(cap64) 113 if int64(cap) != cap64 { 114 panicmakeslicecap() 115 } 116 117 return makeslice(et, len, cap) 118 } 119 120 // This is a wrapper over runtime/internal/math.MulUintptr, 121 // so the compiler can recognize and treat it as an intrinsic. 122 func mulUintptr(a, b uintptr) (uintptr, bool) { 123 return math.MulUintptr(a, b) 124 } 125 126 // growslice allocates new backing store for a slice. 127 // 128 // arguments: 129 // 130 // oldPtr = pointer to the slice's backing array 131 // newLen = new length (= oldLen + num) 132 // oldCap = original slice's capacity. 133 // num = number of elements being added 134 // et = element type 135 // 136 // return values: 137 // 138 // newPtr = pointer to the new backing store 139 // newLen = same value as the argument 140 // newCap = capacity of the new backing store 141 // 142 // Requires that uint(newLen) > uint(oldCap). 143 // Assumes the original slice length is newLen - num 144 // 145 // A new backing store is allocated with space for at least newLen elements. 146 // Existing entries [0, oldLen) are copied over to the new backing store. 147 // Added entries [oldLen, newLen) are not initialized by growslice 148 // (although for pointer-containing element types, they are zeroed). They 149 // must be initialized by the caller. 150 // Trailing entries [newLen, newCap) are zeroed. 151 // 152 // growslice's odd calling convention makes the generated code that calls 153 // this function simpler. In particular, it accepts and returns the 154 // new length so that the old length is not live (does not need to be 155 // spilled/restored) and the new length is returned (also does not need 156 // to be spilled/restored). 157 func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice { 158 oldLen := newLen - num 159 if raceenabled { 160 callerpc := getcallerpc() 161 racereadrangepc(oldPtr, uintptr(oldLen*int(et.Size_)), callerpc, abi.FuncPCABIInternal(growslice)) 162 } 163 if msanenabled { 164 msanread(oldPtr, uintptr(oldLen*int(et.Size_))) 165 } 166 if asanenabled { 167 asanread(oldPtr, uintptr(oldLen*int(et.Size_))) 168 } 169 170 if newLen < 0 { 171 panic(errorString("growslice: len out of range")) 172 } 173 174 if et.Size_ == 0 { 175 // append should not create a slice with nil pointer but non-zero len. 176 // We assume that append doesn't need to preserve oldPtr in this case. 177 return slice{unsafe.Pointer(&zerobase), newLen, newLen} 178 } 179 180 newcap := oldCap 181 doublecap := newcap + newcap 182 if newLen > doublecap { 183 newcap = newLen 184 } else { 185 const threshold = 256 186 if oldCap < threshold { 187 newcap = doublecap 188 } else { 189 // Check 0 < newcap to detect overflow 190 // and prevent an infinite loop. 191 for 0 < newcap && newcap < newLen { 192 // Transition from growing 2x for small slices 193 // to growing 1.25x for large slices. This formula 194 // gives a smooth-ish transition between the two. 195 newcap += (newcap + 3*threshold) / 4 196 } 197 // Set newcap to the requested cap when 198 // the newcap calculation overflowed. 199 if newcap <= 0 { 200 newcap = newLen 201 } 202 } 203 } 204 205 var overflow bool 206 var lenmem, newlenmem, capmem uintptr 207 // Specialize for common values of et.Size. 208 // For 1 we don't need any division/multiplication. 209 // For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant. 210 // For powers of 2, use a variable shift. 211 switch { 212 case et.Size_ == 1: 213 lenmem = uintptr(oldLen) 214 newlenmem = uintptr(newLen) 215 capmem = roundupsize(uintptr(newcap)) 216 overflow = uintptr(newcap) > maxAlloc 217 newcap = int(capmem) 218 case et.Size_ == goarch.PtrSize: 219 lenmem = uintptr(oldLen) * goarch.PtrSize 220 newlenmem = uintptr(newLen) * goarch.PtrSize 221 capmem = roundupsize(uintptr(newcap) * goarch.PtrSize) 222 overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize 223 newcap = int(capmem / goarch.PtrSize) 224 case isPowerOfTwo(et.Size_): 225 var shift uintptr 226 if goarch.PtrSize == 8 { 227 // Mask shift for better code generation. 228 shift = uintptr(sys.TrailingZeros64(uint64(et.Size_))) & 63 229 } else { 230 shift = uintptr(sys.TrailingZeros32(uint32(et.Size_))) & 31 231 } 232 lenmem = uintptr(oldLen) << shift 233 newlenmem = uintptr(newLen) << shift 234 capmem = roundupsize(uintptr(newcap) << shift) 235 overflow = uintptr(newcap) > (maxAlloc >> shift) 236 newcap = int(capmem >> shift) 237 capmem = uintptr(newcap) << shift 238 default: 239 lenmem = uintptr(oldLen) * et.Size_ 240 newlenmem = uintptr(newLen) * et.Size_ 241 capmem, overflow = math.MulUintptr(et.Size_, uintptr(newcap)) 242 capmem = roundupsize(capmem) 243 newcap = int(capmem / et.Size_) 244 capmem = uintptr(newcap) * et.Size_ 245 } 246 247 // The check of overflow in addition to capmem > maxAlloc is needed 248 // to prevent an overflow which can be used to trigger a segfault 249 // on 32bit architectures with this example program: 250 // 251 // type T [1<<27 + 1]int64 252 // 253 // var d T 254 // var s []T 255 // 256 // func main() { 257 // s = append(s, d, d, d, d) 258 // print(len(s), "\n") 259 // } 260 if overflow || capmem > maxAlloc { 261 panic(errorString("growslice: len out of range")) 262 } 263 264 var p unsafe.Pointer 265 if et.PtrBytes == 0 { 266 p = mallocgc(capmem, nil, false) 267 // The append() that calls growslice is going to overwrite from oldLen to newLen. 268 // Only clear the part that will not be overwritten. 269 // The reflect_growslice() that calls growslice will manually clear 270 // the region not cleared here. 271 memclrNoHeapPointers(add(p, newlenmem), capmem-newlenmem) 272 } else { 273 // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. 274 p = mallocgc(capmem, et, true) 275 if lenmem > 0 && writeBarrier.enabled { 276 // Only shade the pointers in oldPtr since we know the destination slice p 277 // only contains nil pointers because it has been cleared during alloc. 278 bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.Size_+et.PtrBytes) 279 } 280 } 281 memmove(p, oldPtr, lenmem) 282 283 return slice{p, newLen, newcap} 284 } 285 286 //go:linkname reflect_growslice reflect.growslice 287 func reflect_growslice(et *_type, old slice, num int) slice { 288 // Semantically equivalent to slices.Grow, except that the caller 289 // is responsible for ensuring that old.len+num > old.cap. 290 num -= old.cap - old.len // preserve memory of old[old.len:old.cap] 291 new := growslice(old.array, old.cap+num, old.cap, num, et) 292 // growslice does not zero out new[old.cap:new.len] since it assumes that 293 // the memory will be overwritten by an append() that called growslice. 294 // Since the caller of reflect_growslice is not append(), 295 // zero out this region before returning the slice to the reflect package. 296 if et.PtrBytes == 0 { 297 oldcapmem := uintptr(old.cap) * et.Size_ 298 newlenmem := uintptr(new.len) * et.Size_ 299 memclrNoHeapPointers(add(new.array, oldcapmem), newlenmem-oldcapmem) 300 } 301 new.len = old.len // preserve the old length 302 return new 303 } 304 305 func isPowerOfTwo(x uintptr) bool { 306 return x&(x-1) == 0 307 } 308 309 // slicecopy is used to copy from a string or slice of pointerless elements into a slice. 310 func slicecopy(toPtr unsafe.Pointer, toLen int, fromPtr unsafe.Pointer, fromLen int, width uintptr) int { 311 if fromLen == 0 || toLen == 0 { 312 return 0 313 } 314 315 n := fromLen 316 if toLen < n { 317 n = toLen 318 } 319 320 if width == 0 { 321 return n 322 } 323 324 size := uintptr(n) * width 325 if raceenabled { 326 callerpc := getcallerpc() 327 pc := abi.FuncPCABIInternal(slicecopy) 328 racereadrangepc(fromPtr, size, callerpc, pc) 329 racewriterangepc(toPtr, size, callerpc, pc) 330 } 331 if msanenabled { 332 msanread(fromPtr, size) 333 msanwrite(toPtr, size) 334 } 335 if asanenabled { 336 asanread(fromPtr, size) 337 asanwrite(toPtr, size) 338 } 339 340 if size == 1 { // common case worth about 2x to do here 341 // TODO: is this still worth it with new memmove impl? 342 *(*byte)(toPtr) = *(*byte)(fromPtr) // known to be a byte pointer 343 } else { 344 memmove(toPtr, fromPtr, size) 345 } 346 return n 347 } 348 349 //go:linkname bytealg_MakeNoZero internal/bytealg.MakeNoZero 350 func bytealg_MakeNoZero(len int) []byte { 351 if uintptr(len) > maxAlloc { 352 panicmakeslicelen() 353 } 354 return unsafe.Slice((*byte)(mallocgc(uintptr(len), nil, false)), len) 355 }