github.com/ice-blockchain/go/src@v0.0.0-20240403114104-1564d284e521/runtime/slice.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/abi" 9 "internal/goarch" 10 "runtime/internal/math" 11 "runtime/internal/sys" 12 "unsafe" 13 ) 14 15 type slice struct { 16 array unsafe.Pointer 17 len int 18 cap int 19 } 20 21 // A notInHeapSlice is a slice backed by runtime/internal/sys.NotInHeap memory. 22 type notInHeapSlice struct { 23 array *notInHeap 24 len int 25 cap int 26 } 27 28 func panicmakeslicelen() { 29 panic(errorString("makeslice: len out of range")) 30 } 31 32 func panicmakeslicecap() { 33 panic(errorString("makeslice: cap out of range")) 34 } 35 36 // makeslicecopy allocates a slice of "tolen" elements of type "et", 37 // then copies "fromlen" elements of type "et" into that new allocation from "from". 38 func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer { 39 var tomem, copymem uintptr 40 if uintptr(tolen) > uintptr(fromlen) { 41 var overflow bool 42 tomem, overflow = math.MulUintptr(et.Size_, uintptr(tolen)) 43 if overflow || tomem > maxAlloc || tolen < 0 { 44 panicmakeslicelen() 45 } 46 copymem = et.Size_ * uintptr(fromlen) 47 } else { 48 // fromlen is a known good length providing and equal or greater than tolen, 49 // thereby making tolen a good slice length too as from and to slices have the 50 // same element width. 51 tomem = et.Size_ * uintptr(tolen) 52 copymem = tomem 53 } 54 55 var to unsafe.Pointer 56 if !et.Pointers() { 57 to = mallocgc(tomem, nil, false) 58 if copymem < tomem { 59 memclrNoHeapPointers(add(to, copymem), tomem-copymem) 60 } 61 } else { 62 // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. 63 to = mallocgc(tomem, et, true) 64 if copymem > 0 && writeBarrier.enabled { 65 // Only shade the pointers in old.array since we know the destination slice to 66 // only contains nil pointers because it has been cleared during alloc. 67 // 68 // It's safe to pass a type to this function as an optimization because 69 // from and to only ever refer to memory representing whole values of 70 // type et. See the comment on bulkBarrierPreWrite. 71 bulkBarrierPreWriteSrcOnly(uintptr(to), uintptr(from), copymem, et) 72 } 73 } 74 75 if raceenabled { 76 callerpc := getcallerpc() 77 pc := abi.FuncPCABIInternal(makeslicecopy) 78 racereadrangepc(from, copymem, callerpc, pc) 79 } 80 if msanenabled { 81 msanread(from, copymem) 82 } 83 if asanenabled { 84 asanread(from, copymem) 85 } 86 87 memmove(to, from, copymem) 88 89 return to 90 } 91 92 func makeslice(et *_type, len, cap int) unsafe.Pointer { 93 mem, overflow := math.MulUintptr(et.Size_, uintptr(cap)) 94 if overflow || mem > maxAlloc || len < 0 || len > cap { 95 // NOTE: Produce a 'len out of range' error instead of a 96 // 'cap out of range' error when someone does make([]T, bignumber). 97 // 'cap out of range' is true too, but since the cap is only being 98 // supplied implicitly, saying len is clearer. 99 // See golang.org/issue/4085. 100 mem, overflow := math.MulUintptr(et.Size_, uintptr(len)) 101 if overflow || mem > maxAlloc || len < 0 { 102 panicmakeslicelen() 103 } 104 panicmakeslicecap() 105 } 106 107 return mallocgc(mem, et, true) 108 } 109 110 func makeslice64(et *_type, len64, cap64 int64) unsafe.Pointer { 111 len := int(len64) 112 if int64(len) != len64 { 113 panicmakeslicelen() 114 } 115 116 cap := int(cap64) 117 if int64(cap) != cap64 { 118 panicmakeslicecap() 119 } 120 121 return makeslice(et, len, cap) 122 } 123 124 // growslice allocates new backing store for a slice. 125 // 126 // arguments: 127 // 128 // oldPtr = pointer to the slice's backing array 129 // newLen = new length (= oldLen + num) 130 // oldCap = original slice's capacity. 131 // num = number of elements being added 132 // et = element type 133 // 134 // return values: 135 // 136 // newPtr = pointer to the new backing store 137 // newLen = same value as the argument 138 // newCap = capacity of the new backing store 139 // 140 // Requires that uint(newLen) > uint(oldCap). 141 // Assumes the original slice length is newLen - num 142 // 143 // A new backing store is allocated with space for at least newLen elements. 144 // Existing entries [0, oldLen) are copied over to the new backing store. 145 // Added entries [oldLen, newLen) are not initialized by growslice 146 // (although for pointer-containing element types, they are zeroed). They 147 // must be initialized by the caller. 148 // Trailing entries [newLen, newCap) are zeroed. 149 // 150 // growslice's odd calling convention makes the generated code that calls 151 // this function simpler. In particular, it accepts and returns the 152 // new length so that the old length is not live (does not need to be 153 // spilled/restored) and the new length is returned (also does not need 154 // to be spilled/restored). 155 func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice { 156 oldLen := newLen - num 157 if raceenabled { 158 callerpc := getcallerpc() 159 racereadrangepc(oldPtr, uintptr(oldLen*int(et.Size_)), callerpc, abi.FuncPCABIInternal(growslice)) 160 } 161 if msanenabled { 162 msanread(oldPtr, uintptr(oldLen*int(et.Size_))) 163 } 164 if asanenabled { 165 asanread(oldPtr, uintptr(oldLen*int(et.Size_))) 166 } 167 168 if newLen < 0 { 169 panic(errorString("growslice: len out of range")) 170 } 171 172 if et.Size_ == 0 { 173 // append should not create a slice with nil pointer but non-zero len. 174 // We assume that append doesn't need to preserve oldPtr in this case. 175 return slice{unsafe.Pointer(&zerobase), newLen, newLen} 176 } 177 178 newcap := nextslicecap(newLen, oldCap) 179 180 var overflow bool 181 var lenmem, newlenmem, capmem uintptr 182 // Specialize for common values of et.Size. 183 // For 1 we don't need any division/multiplication. 184 // For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant. 185 // For powers of 2, use a variable shift. 186 noscan := !et.Pointers() 187 switch { 188 case et.Size_ == 1: 189 lenmem = uintptr(oldLen) 190 newlenmem = uintptr(newLen) 191 capmem = roundupsize(uintptr(newcap), noscan) 192 overflow = uintptr(newcap) > maxAlloc 193 newcap = int(capmem) 194 case et.Size_ == goarch.PtrSize: 195 lenmem = uintptr(oldLen) * goarch.PtrSize 196 newlenmem = uintptr(newLen) * goarch.PtrSize 197 capmem = roundupsize(uintptr(newcap)*goarch.PtrSize, noscan) 198 overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize 199 newcap = int(capmem / goarch.PtrSize) 200 case isPowerOfTwo(et.Size_): 201 var shift uintptr 202 if goarch.PtrSize == 8 { 203 // Mask shift for better code generation. 204 shift = uintptr(sys.TrailingZeros64(uint64(et.Size_))) & 63 205 } else { 206 shift = uintptr(sys.TrailingZeros32(uint32(et.Size_))) & 31 207 } 208 lenmem = uintptr(oldLen) << shift 209 newlenmem = uintptr(newLen) << shift 210 capmem = roundupsize(uintptr(newcap)<<shift, noscan) 211 overflow = uintptr(newcap) > (maxAlloc >> shift) 212 newcap = int(capmem >> shift) 213 capmem = uintptr(newcap) << shift 214 default: 215 lenmem = uintptr(oldLen) * et.Size_ 216 newlenmem = uintptr(newLen) * et.Size_ 217 capmem, overflow = math.MulUintptr(et.Size_, uintptr(newcap)) 218 capmem = roundupsize(capmem, noscan) 219 newcap = int(capmem / et.Size_) 220 capmem = uintptr(newcap) * et.Size_ 221 } 222 223 // The check of overflow in addition to capmem > maxAlloc is needed 224 // to prevent an overflow which can be used to trigger a segfault 225 // on 32bit architectures with this example program: 226 // 227 // type T [1<<27 + 1]int64 228 // 229 // var d T 230 // var s []T 231 // 232 // func main() { 233 // s = append(s, d, d, d, d) 234 // print(len(s), "\n") 235 // } 236 if overflow || capmem > maxAlloc { 237 panic(errorString("growslice: len out of range")) 238 } 239 240 var p unsafe.Pointer 241 if !et.Pointers() { 242 p = mallocgc(capmem, nil, false) 243 // The append() that calls growslice is going to overwrite from oldLen to newLen. 244 // Only clear the part that will not be overwritten. 245 // The reflect_growslice() that calls growslice will manually clear 246 // the region not cleared here. 247 memclrNoHeapPointers(add(p, newlenmem), capmem-newlenmem) 248 } else { 249 // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. 250 p = mallocgc(capmem, et, true) 251 if lenmem > 0 && writeBarrier.enabled { 252 // Only shade the pointers in oldPtr since we know the destination slice p 253 // only contains nil pointers because it has been cleared during alloc. 254 // 255 // It's safe to pass a type to this function as an optimization because 256 // from and to only ever refer to memory representing whole values of 257 // type et. See the comment on bulkBarrierPreWrite. 258 bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.Size_+et.PtrBytes, et) 259 } 260 } 261 memmove(p, oldPtr, lenmem) 262 263 return slice{p, newLen, newcap} 264 } 265 266 // nextslicecap computes the next appropriate slice length. 267 func nextslicecap(newLen, oldCap int) int { 268 newcap := oldCap 269 doublecap := newcap + newcap 270 if newLen > doublecap { 271 return newLen 272 } 273 274 const threshold = 256 275 if oldCap < threshold { 276 return doublecap 277 } 278 for { 279 // Transition from growing 2x for small slices 280 // to growing 1.25x for large slices. This formula 281 // gives a smooth-ish transition between the two. 282 newcap += (newcap + 3*threshold) >> 2 283 284 // We need to check `newcap >= newLen` and whether `newcap` overflowed. 285 // newLen is guaranteed to be larger than zero, hence 286 // when newcap overflows then `uint(newcap) > uint(newLen)`. 287 // This allows to check for both with the same comparison. 288 if uint(newcap) >= uint(newLen) { 289 break 290 } 291 } 292 293 // Set newcap to the requested cap when 294 // the newcap calculation overflowed. 295 if newcap <= 0 { 296 return newLen 297 } 298 return newcap 299 } 300 301 //go:linkname reflect_growslice reflect.growslice 302 func reflect_growslice(et *_type, old slice, num int) slice { 303 // Semantically equivalent to slices.Grow, except that the caller 304 // is responsible for ensuring that old.len+num > old.cap. 305 num -= old.cap - old.len // preserve memory of old[old.len:old.cap] 306 new := growslice(old.array, old.cap+num, old.cap, num, et) 307 // growslice does not zero out new[old.cap:new.len] since it assumes that 308 // the memory will be overwritten by an append() that called growslice. 309 // Since the caller of reflect_growslice is not append(), 310 // zero out this region before returning the slice to the reflect package. 311 if !et.Pointers() { 312 oldcapmem := uintptr(old.cap) * et.Size_ 313 newlenmem := uintptr(new.len) * et.Size_ 314 memclrNoHeapPointers(add(new.array, oldcapmem), newlenmem-oldcapmem) 315 } 316 new.len = old.len // preserve the old length 317 return new 318 } 319 320 func isPowerOfTwo(x uintptr) bool { 321 return x&(x-1) == 0 322 } 323 324 // slicecopy is used to copy from a string or slice of pointerless elements into a slice. 325 func slicecopy(toPtr unsafe.Pointer, toLen int, fromPtr unsafe.Pointer, fromLen int, width uintptr) int { 326 if fromLen == 0 || toLen == 0 { 327 return 0 328 } 329 330 n := fromLen 331 if toLen < n { 332 n = toLen 333 } 334 335 if width == 0 { 336 return n 337 } 338 339 size := uintptr(n) * width 340 if raceenabled { 341 callerpc := getcallerpc() 342 pc := abi.FuncPCABIInternal(slicecopy) 343 racereadrangepc(fromPtr, size, callerpc, pc) 344 racewriterangepc(toPtr, size, callerpc, pc) 345 } 346 if msanenabled { 347 msanread(fromPtr, size) 348 msanwrite(toPtr, size) 349 } 350 if asanenabled { 351 asanread(fromPtr, size) 352 asanwrite(toPtr, size) 353 } 354 355 if size == 1 { // common case worth about 2x to do here 356 // TODO: is this still worth it with new memmove impl? 357 *(*byte)(toPtr) = *(*byte)(fromPtr) // known to be a byte pointer 358 } else { 359 memmove(toPtr, fromPtr, size) 360 } 361 return n 362 } 363 364 //go:linkname bytealg_MakeNoZero internal/bytealg.MakeNoZero 365 func bytealg_MakeNoZero(len int) []byte { 366 if uintptr(len) > maxAlloc { 367 panicmakeslicelen() 368 } 369 cap := roundupsize(uintptr(len), true) 370 return unsafe.Slice((*byte)(mallocgc(uintptr(cap), nil, false)), cap)[:len] 371 }