github.com/primecitizens/pcz/std@v0.2.1/runtime/builtin_defer.go (about) 1 // SPDX-License-Identifier: Apache-2.0 2 // Copyright 2023 The Prime Citizens 3 4 //go:build pcz 5 6 package runtime 7 8 import ( 9 "unsafe" 10 11 "github.com/primecitizens/pcz/std/core/arch" 12 "github.com/primecitizens/pcz/std/core/assert" 13 ) 14 15 // See ${GOROOT}/src/runtime/panic.go 16 17 // deferreturn runs deferred functions for the caller's frame. 18 // The compiler inserts a call to this at the end of any 19 // function which calls defer. 20 func deferreturn() { 21 gp := getg() 22 for { 23 d := gp.Defer 24 if d == nil { 25 return 26 } 27 sp := getcallersp() 28 if d.SP != sp { 29 return 30 } 31 if d.OpenDefer { 32 done := runOpenDeferFrame(d) 33 if !done { 34 assert.Throw("unfinished", "open-coded", "defers", "in", "deferreturn") 35 } 36 gp.Defer = d.Link 37 freedefer(d) 38 // If this frame uses open defers, then this 39 // must be the only defer record for the 40 // frame, so we can just return. 41 return 42 } 43 44 fn := d.Fn 45 d.Fn = nil 46 gp.Defer = d.Link 47 freedefer(d) 48 fn() 49 } 50 } 51 52 // runOpenDeferFrame runs the active open-coded defers in the frame specified by 53 // d. It normally processes all active defers in the frame, but stops immediately 54 // if a defer does a successful recover. It returns true if there are no 55 // remaining defers to run in the frame. 56 func runOpenDeferFrame(d *_defer) bool { 57 done := true 58 fd := d.FuncData 59 60 deferBitsOffset, fd := readvarintUnsafe(fd) 61 nDefers, fd := readvarintUnsafe(fd) 62 deferBits := *(*uint8)(unsafe.Pointer(d.Varp - uintptr(deferBitsOffset))) 63 64 for i := int(nDefers) - 1; i >= 0; i-- { 65 // read the funcdata info for this defer 66 var closureOffset uint32 67 closureOffset, fd = readvarintUnsafe(fd) 68 if deferBits&(1<<i) == 0 { 69 continue 70 } 71 closure := *(*func())(unsafe.Pointer(d.Varp - uintptr(closureOffset))) 72 d.Fn = closure 73 deferBits = deferBits &^ (1 << i) 74 *(*uint8)(unsafe.Pointer(d.Varp - uintptr(deferBitsOffset))) = deferBits 75 p := d.Panic 76 // Call the defer. Note that this can change d.Varp if 77 // the stack moves. 78 deferCallSave(p, d.Fn) 79 if p != nil && p.Aborted { 80 break 81 } 82 d.Fn = nil 83 if d.Panic != nil && d.Panic.Recovered { 84 done = deferBits == 0 85 break 86 } 87 } 88 89 return done 90 } 91 92 // deferCallSave calls fn() after saving the caller's pc and sp in the 93 // panic record. This allows the runtime to return to the Goexit defer 94 // processing loop, in the unusual case where the Goexit may be 95 // bypassed by a successful recover. 96 // 97 // This is marked as a wrapper by the compiler so it doesn't appear in 98 // tracebacks. 99 func deferCallSave(p *_panic, fn func()) { 100 if p != nil { 101 p.Argp = unsafe.Pointer(getargp()) 102 p.PC = getcallerpc() 103 p.SP = unsafe.Pointer(getcallersp()) 104 } 105 fn() 106 if p != nil { 107 p.PC = 0 108 p.SP = unsafe.Pointer(nil) 109 } 110 } 111 112 // getargp returns the location where the caller 113 // writes outgoing function call arguments. 114 // 115 //go:nosplit 116 //go:noinline 117 func getargp() uintptr { 118 return getcallersp() + arch.MinFrameSize 119 } 120 121 // Free the given defer. 122 // The defer cannot be used after this call. 123 // 124 // This is nosplit because the incoming defer is in a perilous state. 125 // It's not on any defer list, so stack copying won't adjust stack 126 // pointers in it (namely, d.link). Hence, if we were to copy the 127 // stack, d could then contain a stale pointer. 128 // 129 //go:nosplit 130 func freedefer(d *_defer) { 131 d.Link = nil 132 // After this point we can copy the stack. 133 134 if d.Panic != nil { 135 freedeferpanic() 136 } 137 if d.Fn != nil { 138 freedeferfn() 139 } 140 if !d.Heap { 141 return 142 } 143 144 // mp := acquirem() 145 // pp := mp.p.ptr() 146 // if len(pp.deferpool) == cap(pp.deferpool) { 147 // // Transfer half of local cache to the central cache. 148 // var first, last *_defer 149 // for len(pp.deferpool) > cap(pp.deferpool)/2 { 150 // n := len(pp.deferpool) 151 // d := pp.deferpool[n-1] 152 // pp.deferpool[n-1] = nil 153 // pp.deferpool = pp.deferpool[:n-1] 154 // if first == nil { 155 // first = d 156 // } else { 157 // last.link = d 158 // } 159 // last = d 160 // } 161 // lock(&sched.deferlock) 162 // last.link = sched.deferpool 163 // sched.deferpool = first 164 // unlock(&sched.deferlock) 165 // } 166 167 *d = _defer{} 168 169 // pp.deferpool = append(pp.deferpool, d) 170 // 171 // releasem(mp) 172 // mp, pp = nil, nil 173 } 174 175 // Separate function so that it can split stack. 176 // Windows otherwise runs out of stack space. 177 func freedeferpanic() { 178 // _panic must be cleared before d is unlinked from gp. 179 assert.Throw("freedefer", "with", "d.Panic", "!=", "nil") 180 } 181 182 func freedeferfn() { 183 // fn must be cleared before d is unlinked from gp. 184 assert.Throw("freedefer", "with", "d.Fn", "!=", "nil") 185 } 186 187 // readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the 188 // uint32 and a pointer to the byte following the varint. 189 // 190 // There is a similar function runtime.readvarint, which takes a slice of bytes, 191 // rather than an unsafe pointer. These functions are duplicated, because one of 192 // the two use cases for the functions would get slower if the functions were 193 // combined. 194 func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) { 195 var r uint32 196 var shift int 197 for { 198 b := *(*uint8)((unsafe.Pointer(fd))) 199 fd = unsafe.Add(fd, unsafe.Sizeof(b)) 200 if b < 128 { 201 return r + uint32(b)<<shift, fd 202 } 203 r += ((uint32(b) &^ 128) << shift) 204 shift += 7 205 if shift > 28 { 206 assert.Throw("bad", "varint") 207 } 208 } 209 }