github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/slice.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/math" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 type slice struct { 14 array unsafe.Pointer 15 len int 16 cap int 17 } 18 19 // A notInHeapSlice is a slice backed by go:notinheap memory. 20 type notInHeapSlice struct { 21 array *notInHeap 22 len int 23 cap int 24 } 25 26 func panicmakeslicelen() { 27 panic(errorString("makeslice: len out of range")) 28 } 29 30 func panicmakeslicecap() { 31 panic(errorString("makeslice: cap out of range")) 32 } 33 34 // makeslicecopy allocates a slice of "tolen" elements of type "et", 35 // then copies "fromlen" elements of type "et" into that new allocation from "from". 36 func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer { 37 var tomem, copymem uintptr 38 if uintptr(tolen) > uintptr(fromlen) { 39 var overflow bool 40 tomem, overflow = math.MulUintptr(et.size, uintptr(tolen)) 41 if overflow || tomem > maxAlloc || tolen < 0 { 42 panicmakeslicelen() 43 } 44 copymem = et.size * uintptr(fromlen) 45 } else { 46 // fromlen is a known good length providing and equal or greater than tolen, 47 // thereby making tolen a good slice length too as from and to slices have the 48 // same element width. 49 tomem = et.size * uintptr(tolen) 50 copymem = tomem 51 } 52 53 var to unsafe.Pointer 54 if et.ptrdata == 0 { 55 to = mallocgc(tomem, nil, false) 56 if copymem < tomem { 57 memclrNoHeapPointers(add(to, copymem), tomem-copymem) 58 } 59 } else { 60 // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. 61 to = mallocgc(tomem, et, true) 62 if copymem > 0 && writeBarrier.enabled { 63 // Only shade the pointers in old.array since we know the destination slice to 64 // only contains nil pointers because it has been cleared during alloc. 65 bulkBarrierPreWriteSrcOnly(uintptr(to), uintptr(from), copymem) 66 } 67 } 68 69 if raceenabled { 70 callerpc := getcallerpc() 71 pc := funcPC(makeslicecopy) 72 racereadrangepc(from, copymem, callerpc, pc) 73 } 74 if msanenabled { 75 msanread(from, copymem) 76 } 77 78 memmove(to, from, copymem) 79 80 return to 81 } 82 83 func makeslice(et *_type, len, cap int) unsafe.Pointer { 84 mem, overflow := math.MulUintptr(et.size, uintptr(cap)) 85 if overflow || mem > maxAlloc || len < 0 || len > cap { 86 // NOTE: Produce a 'len out of range' error instead of a 87 // 'cap out of range' error when someone does make([]T, bignumber). 88 // 'cap out of range' is true too, but since the cap is only being 89 // supplied implicitly, saying len is clearer. 90 // See golang.org/issue/4085. 91 mem, overflow := math.MulUintptr(et.size, uintptr(len)) 92 if overflow || mem > maxAlloc || len < 0 { 93 panicmakeslicelen() 94 } 95 panicmakeslicecap() 96 } 97 98 return mallocgc(mem, et, true) 99 } 100 101 func makeslice64(et *_type, len64, cap64 int64) unsafe.Pointer { 102 len := int(len64) 103 if int64(len) != len64 { 104 panicmakeslicelen() 105 } 106 107 cap := int(cap64) 108 if int64(cap) != cap64 { 109 panicmakeslicecap() 110 } 111 112 return makeslice(et, len, cap) 113 } 114 115 // growslice handles slice growth during append. 116 // It is passed the slice element type, the old slice, and the desired new minimum capacity, 117 // and it returns a new slice with at least that capacity, with the old data 118 // copied into it. 119 // The new slice's length is set to the old slice's length, 120 // NOT to the new requested capacity. 121 // This is for codegen convenience. The old slice's length is used immediately 122 // to calculate where to write new values during an append. 123 // TODO: When the old backend is gone, reconsider this decision. 124 // The SSA backend might prefer the new length or to return only ptr/cap and save stack space. 125 func growslice(et *_type, old slice, cap int) slice { 126 if raceenabled { 127 callerpc := getcallerpc() 128 racereadrangepc(old.array, uintptr(old.len*int(et.size)), callerpc, funcPC(growslice)) 129 } 130 if msanenabled { 131 msanread(old.array, uintptr(old.len*int(et.size))) 132 } 133 134 if cap < old.cap { 135 panic(errorString("growslice: cap out of range")) 136 } 137 138 if et.size == 0 { 139 // append should not create a slice with nil pointer but non-zero len. 140 // We assume that append doesn't need to preserve old.array in this case. 141 return slice{unsafe.Pointer(&zerobase), old.len, cap} 142 } 143 144 newcap := old.cap 145 doublecap := newcap + newcap 146 if cap > doublecap { 147 newcap = cap 148 } else { 149 if old.cap < 1024 { 150 newcap = doublecap 151 } else { 152 // Check 0 < newcap to detect overflow 153 // and prevent an infinite loop. 154 for 0 < newcap && newcap < cap { 155 newcap += newcap / 4 156 } 157 // Set newcap to the requested cap when 158 // the newcap calculation overflowed. 159 if newcap <= 0 { 160 newcap = cap 161 } 162 } 163 } 164 165 var overflow bool 166 var lenmem, newlenmem, capmem uintptr 167 // Specialize for common values of et.size. 168 // For 1 we don't need any division/multiplication. 169 // For sys.PtrSize, compiler will optimize division/multiplication into a shift by a constant. 170 // For powers of 2, use a variable shift. 171 switch { 172 case et.size == 1: 173 lenmem = uintptr(old.len) 174 newlenmem = uintptr(cap) 175 capmem = roundupsize(uintptr(newcap)) 176 overflow = uintptr(newcap) > maxAlloc 177 newcap = int(capmem) 178 case et.size == sys.PtrSize: 179 lenmem = uintptr(old.len) * sys.PtrSize 180 newlenmem = uintptr(cap) * sys.PtrSize 181 capmem = roundupsize(uintptr(newcap) * sys.PtrSize) 182 overflow = uintptr(newcap) > maxAlloc/sys.PtrSize 183 newcap = int(capmem / sys.PtrSize) 184 case isPowerOfTwo(et.size): 185 var shift uintptr 186 if sys.PtrSize == 8 { 187 // Mask shift for better code generation. 188 shift = uintptr(sys.Ctz64(uint64(et.size))) & 63 189 } else { 190 shift = uintptr(sys.Ctz32(uint32(et.size))) & 31 191 } 192 lenmem = uintptr(old.len) << shift 193 newlenmem = uintptr(cap) << shift 194 capmem = roundupsize(uintptr(newcap) << shift) 195 overflow = uintptr(newcap) > (maxAlloc >> shift) 196 newcap = int(capmem >> shift) 197 default: 198 lenmem = uintptr(old.len) * et.size 199 newlenmem = uintptr(cap) * et.size 200 capmem, overflow = math.MulUintptr(et.size, uintptr(newcap)) 201 capmem = roundupsize(capmem) 202 newcap = int(capmem / et.size) 203 } 204 205 // The check of overflow in addition to capmem > maxAlloc is needed 206 // to prevent an overflow which can be used to trigger a segfault 207 // on 32bit architectures with this example program: 208 // 209 // type T [1<<27 + 1]int64 210 // 211 // var d T 212 // var s []T 213 // 214 // func main() { 215 // s = append(s, d, d, d, d) 216 // print(len(s), "\n") 217 // } 218 if overflow || capmem > maxAlloc { 219 panic(errorString("growslice: cap out of range")) 220 } 221 222 var p unsafe.Pointer 223 if et.ptrdata == 0 { 224 p = mallocgc(capmem, nil, false) 225 // The append() that calls growslice is going to overwrite from old.len to cap (which will be the new length). 226 // Only clear the part that will not be overwritten. 227 memclrNoHeapPointers(add(p, newlenmem), capmem-newlenmem) 228 } else { 229 // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. 230 p = mallocgc(capmem, et, true) 231 if lenmem > 0 && writeBarrier.enabled { 232 // Only shade the pointers in old.array since we know the destination slice p 233 // only contains nil pointers because it has been cleared during alloc. 234 bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(old.array), lenmem-et.size+et.ptrdata) 235 } 236 } 237 memmove(p, old.array, lenmem) 238 239 return slice{p, old.len, newcap} 240 } 241 242 func isPowerOfTwo(x uintptr) bool { 243 return x&(x-1) == 0 244 } 245 246 // slicecopy is used to copy from a string or slice of pointerless elements into a slice. 247 func slicecopy(toPtr unsafe.Pointer, toLen int, fromPtr unsafe.Pointer, fromLen int, width uintptr) int { 248 if fromLen == 0 || toLen == 0 { 249 return 0 250 } 251 252 n := fromLen 253 if toLen < n { 254 n = toLen 255 } 256 257 if width == 0 { 258 return n 259 } 260 261 size := uintptr(n) * width 262 if raceenabled { 263 callerpc := getcallerpc() 264 pc := funcPC(slicecopy) 265 racereadrangepc(fromPtr, size, callerpc, pc) 266 racewriterangepc(toPtr, size, callerpc, pc) 267 } 268 if msanenabled { 269 msanread(fromPtr, size) 270 msanwrite(toPtr, size) 271 } 272 273 if size == 1 { // common case worth about 2x to do here 274 // TODO: is this still worth it with new memmove impl? 275 *(*byte)(toPtr) = *(*byte)(fromPtr) // known to be a byte pointer 276 } else { 277 memmove(toPtr, fromPtr, size) 278 } 279 return n 280 }