github.com/twelsh-aw/go/src@v0.0.0-20230516233729-a56fe86a7c81/runtime/netpoll.go (about) 1 // Copyright 2013 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 //go:build unix || (js && wasm) || wasip1 || windows 6 7 package runtime 8 9 import ( 10 "runtime/internal/atomic" 11 "runtime/internal/sys" 12 "unsafe" 13 ) 14 15 // Integrated network poller (platform-independent part). 16 // A particular implementation (epoll/kqueue/port/AIX/Windows) 17 // must define the following functions: 18 // 19 // func netpollinit() 20 // Initialize the poller. Only called once. 21 // 22 // func netpollopen(fd uintptr, pd *pollDesc) int32 23 // Arm edge-triggered notifications for fd. The pd argument is to pass 24 // back to netpollready when fd is ready. Return an errno value. 25 // 26 // func netpollclose(fd uintptr) int32 27 // Disable notifications for fd. Return an errno value. 28 // 29 // func netpoll(delta int64) gList 30 // Poll the network. If delta < 0, block indefinitely. If delta == 0, 31 // poll without blocking. If delta > 0, block for up to delta nanoseconds. 32 // Return a list of goroutines built by calling netpollready. 33 // 34 // func netpollBreak() 35 // Wake up the network poller, assumed to be blocked in netpoll. 36 // 37 // func netpollIsPollDescriptor(fd uintptr) bool 38 // Reports whether fd is a file descriptor used by the poller. 39 40 // Error codes returned by runtime_pollReset and runtime_pollWait. 41 // These must match the values in internal/poll/fd_poll_runtime.go. 42 const ( 43 pollNoError = 0 // no error 44 pollErrClosing = 1 // descriptor is closed 45 pollErrTimeout = 2 // I/O timeout 46 pollErrNotPollable = 3 // general error polling descriptor 47 ) 48 49 // pollDesc contains 2 binary semaphores, rg and wg, to park reader and writer 50 // goroutines respectively. The semaphore can be in the following states: 51 // 52 // pdReady - io readiness notification is pending; 53 // a goroutine consumes the notification by changing the state to pdNil. 54 // pdWait - a goroutine prepares to park on the semaphore, but not yet parked; 55 // the goroutine commits to park by changing the state to G pointer, 56 // or, alternatively, concurrent io notification changes the state to pdReady, 57 // or, alternatively, concurrent timeout/close changes the state to pdNil. 58 // G pointer - the goroutine is blocked on the semaphore; 59 // io notification or timeout/close changes the state to pdReady or pdNil respectively 60 // and unparks the goroutine. 61 // pdNil - none of the above. 62 const ( 63 pdNil uintptr = 0 64 pdReady uintptr = 1 65 pdWait uintptr = 2 66 ) 67 68 const pollBlockSize = 4 * 1024 69 70 // Network poller descriptor. 71 // 72 // No heap pointers. 73 type pollDesc struct { 74 _ sys.NotInHeap 75 link *pollDesc // in pollcache, protected by pollcache.lock 76 fd uintptr // constant for pollDesc usage lifetime 77 fdseq atomic.Uintptr // protects against stale pollDesc 78 79 // atomicInfo holds bits from closing, rd, and wd, 80 // which are only ever written while holding the lock, 81 // summarized for use by netpollcheckerr, 82 // which cannot acquire the lock. 83 // After writing these fields under lock in a way that 84 // might change the summary, code must call publishInfo 85 // before releasing the lock. 86 // Code that changes fields and then calls netpollunblock 87 // (while still holding the lock) must call publishInfo 88 // before calling netpollunblock, because publishInfo is what 89 // stops netpollblock from blocking anew 90 // (by changing the result of netpollcheckerr). 91 // atomicInfo also holds the eventErr bit, 92 // recording whether a poll event on the fd got an error; 93 // atomicInfo is the only source of truth for that bit. 94 atomicInfo atomic.Uint32 // atomic pollInfo 95 96 // rg, wg are accessed atomically and hold g pointers. 97 // (Using atomic.Uintptr here is similar to using guintptr elsewhere.) 98 rg atomic.Uintptr // pdReady, pdWait, G waiting for read or pdNil 99 wg atomic.Uintptr // pdReady, pdWait, G waiting for write or pdNil 100 101 lock mutex // protects the following fields 102 closing bool 103 user uint32 // user settable cookie 104 rseq uintptr // protects from stale read timers 105 rt timer // read deadline timer (set if rt.f != nil) 106 rd int64 // read deadline (a nanotime in the future, -1 when expired) 107 wseq uintptr // protects from stale write timers 108 wt timer // write deadline timer 109 wd int64 // write deadline (a nanotime in the future, -1 when expired) 110 self *pollDesc // storage for indirect interface. See (*pollDesc).makeArg. 111 } 112 113 // pollInfo is the bits needed by netpollcheckerr, stored atomically, 114 // mostly duplicating state that is manipulated under lock in pollDesc. 115 // The one exception is the pollEventErr bit, which is maintained only 116 // in the pollInfo. 117 type pollInfo uint32 118 119 const ( 120 pollClosing = 1 << iota 121 pollEventErr 122 pollExpiredReadDeadline 123 pollExpiredWriteDeadline 124 pollFDSeq // 20 bit field, low 20 bits of fdseq field 125 ) 126 127 const ( 128 pollFDSeqBits = 20 // number of bits in pollFDSeq 129 pollFDSeqMask = 1<<pollFDSeqBits - 1 // mask for pollFDSeq 130 ) 131 132 func (i pollInfo) closing() bool { return i&pollClosing != 0 } 133 func (i pollInfo) eventErr() bool { return i&pollEventErr != 0 } 134 func (i pollInfo) expiredReadDeadline() bool { return i&pollExpiredReadDeadline != 0 } 135 func (i pollInfo) expiredWriteDeadline() bool { return i&pollExpiredWriteDeadline != 0 } 136 137 // info returns the pollInfo corresponding to pd. 138 func (pd *pollDesc) info() pollInfo { 139 return pollInfo(pd.atomicInfo.Load()) 140 } 141 142 // publishInfo updates pd.atomicInfo (returned by pd.info) 143 // using the other values in pd. 144 // It must be called while holding pd.lock, 145 // and it must be called after changing anything 146 // that might affect the info bits. 147 // In practice this means after changing closing 148 // or changing rd or wd from < 0 to >= 0. 149 func (pd *pollDesc) publishInfo() { 150 var info uint32 151 if pd.closing { 152 info |= pollClosing 153 } 154 if pd.rd < 0 { 155 info |= pollExpiredReadDeadline 156 } 157 if pd.wd < 0 { 158 info |= pollExpiredWriteDeadline 159 } 160 info |= uint32(pd.fdseq.Load()&pollFDSeqMask) << pollFDSeq 161 162 // Set all of x except the pollEventErr bit. 163 x := pd.atomicInfo.Load() 164 for !pd.atomicInfo.CompareAndSwap(x, (x&pollEventErr)|info) { 165 x = pd.atomicInfo.Load() 166 } 167 } 168 169 // setEventErr sets the result of pd.info().eventErr() to b. 170 // We only change the error bit if seq == 0 or if seq matches pollFDSeq 171 // (issue #59545). 172 func (pd *pollDesc) setEventErr(b bool, seq uintptr) { 173 mSeq := uint32(seq & pollFDSeqMask) 174 x := pd.atomicInfo.Load() 175 xSeq := (x >> pollFDSeq) & pollFDSeqMask 176 if seq != 0 && xSeq != mSeq { 177 return 178 } 179 for (x&pollEventErr != 0) != b && !pd.atomicInfo.CompareAndSwap(x, x^pollEventErr) { 180 x = pd.atomicInfo.Load() 181 xSeq := (x >> pollFDSeq) & pollFDSeqMask 182 if seq != 0 && xSeq != mSeq { 183 return 184 } 185 } 186 } 187 188 type pollCache struct { 189 lock mutex 190 first *pollDesc 191 // PollDesc objects must be type-stable, 192 // because we can get ready notification from epoll/kqueue 193 // after the descriptor is closed/reused. 194 // Stale notifications are detected using seq variable, 195 // seq is incremented when deadlines are changed or descriptor is reused. 196 } 197 198 var ( 199 netpollInitLock mutex 200 netpollInited atomic.Uint32 201 202 pollcache pollCache 203 netpollWaiters atomic.Uint32 204 ) 205 206 //go:linkname poll_runtime_pollServerInit internal/poll.runtime_pollServerInit 207 func poll_runtime_pollServerInit() { 208 netpollGenericInit() 209 } 210 211 func netpollGenericInit() { 212 if netpollInited.Load() == 0 { 213 lockInit(&netpollInitLock, lockRankNetpollInit) 214 lock(&netpollInitLock) 215 if netpollInited.Load() == 0 { 216 netpollinit() 217 netpollInited.Store(1) 218 } 219 unlock(&netpollInitLock) 220 } 221 } 222 223 func netpollinited() bool { 224 return netpollInited.Load() != 0 225 } 226 227 //go:linkname poll_runtime_isPollServerDescriptor internal/poll.runtime_isPollServerDescriptor 228 229 // poll_runtime_isPollServerDescriptor reports whether fd is a 230 // descriptor being used by netpoll. 231 func poll_runtime_isPollServerDescriptor(fd uintptr) bool { 232 return netpollIsPollDescriptor(fd) 233 } 234 235 //go:linkname poll_runtime_pollOpen internal/poll.runtime_pollOpen 236 func poll_runtime_pollOpen(fd uintptr) (*pollDesc, int) { 237 pd := pollcache.alloc() 238 lock(&pd.lock) 239 wg := pd.wg.Load() 240 if wg != pdNil && wg != pdReady { 241 throw("runtime: blocked write on free polldesc") 242 } 243 rg := pd.rg.Load() 244 if rg != pdNil && rg != pdReady { 245 throw("runtime: blocked read on free polldesc") 246 } 247 pd.fd = fd 248 if pd.fdseq.Load() == 0 { 249 // The value 0 is special in setEventErr, so don't use it. 250 pd.fdseq.Store(1) 251 } 252 pd.closing = false 253 pd.setEventErr(false, 0) 254 pd.rseq++ 255 pd.rg.Store(pdNil) 256 pd.rd = 0 257 pd.wseq++ 258 pd.wg.Store(pdNil) 259 pd.wd = 0 260 pd.self = pd 261 pd.publishInfo() 262 unlock(&pd.lock) 263 264 errno := netpollopen(fd, pd) 265 if errno != 0 { 266 pollcache.free(pd) 267 return nil, int(errno) 268 } 269 return pd, 0 270 } 271 272 //go:linkname poll_runtime_pollClose internal/poll.runtime_pollClose 273 func poll_runtime_pollClose(pd *pollDesc) { 274 if !pd.closing { 275 throw("runtime: close polldesc w/o unblock") 276 } 277 wg := pd.wg.Load() 278 if wg != pdNil && wg != pdReady { 279 throw("runtime: blocked write on closing polldesc") 280 } 281 rg := pd.rg.Load() 282 if rg != pdNil && rg != pdReady { 283 throw("runtime: blocked read on closing polldesc") 284 } 285 netpollclose(pd.fd) 286 pollcache.free(pd) 287 } 288 289 func (c *pollCache) free(pd *pollDesc) { 290 // Increment the fdseq field, so that any currently 291 // running netpoll calls will not mark pd as ready. 292 fdseq := pd.fdseq.Load() 293 fdseq = (fdseq + 1) & (1<<taggedPointerBits - 1) 294 pd.fdseq.Store(fdseq) 295 296 lock(&c.lock) 297 pd.link = c.first 298 c.first = pd 299 unlock(&c.lock) 300 } 301 302 // poll_runtime_pollReset, which is internal/poll.runtime_pollReset, 303 // prepares a descriptor for polling in mode, which is 'r' or 'w'. 304 // This returns an error code; the codes are defined above. 305 // 306 //go:linkname poll_runtime_pollReset internal/poll.runtime_pollReset 307 func poll_runtime_pollReset(pd *pollDesc, mode int) int { 308 errcode := netpollcheckerr(pd, int32(mode)) 309 if errcode != pollNoError { 310 return errcode 311 } 312 if mode == 'r' { 313 pd.rg.Store(pdNil) 314 } else if mode == 'w' { 315 pd.wg.Store(pdNil) 316 } 317 return pollNoError 318 } 319 320 // poll_runtime_pollWait, which is internal/poll.runtime_pollWait, 321 // waits for a descriptor to be ready for reading or writing, 322 // according to mode, which is 'r' or 'w'. 323 // This returns an error code; the codes are defined above. 324 // 325 //go:linkname poll_runtime_pollWait internal/poll.runtime_pollWait 326 func poll_runtime_pollWait(pd *pollDesc, mode int) int { 327 errcode := netpollcheckerr(pd, int32(mode)) 328 if errcode != pollNoError { 329 return errcode 330 } 331 // As for now only Solaris, illumos, and AIX use level-triggered IO. 332 if GOOS == "solaris" || GOOS == "illumos" || GOOS == "aix" { 333 netpollarm(pd, mode) 334 } 335 for !netpollblock(pd, int32(mode), false) { 336 errcode = netpollcheckerr(pd, int32(mode)) 337 if errcode != pollNoError { 338 return errcode 339 } 340 // Can happen if timeout has fired and unblocked us, 341 // but before we had a chance to run, timeout has been reset. 342 // Pretend it has not happened and retry. 343 } 344 return pollNoError 345 } 346 347 //go:linkname poll_runtime_pollWaitCanceled internal/poll.runtime_pollWaitCanceled 348 func poll_runtime_pollWaitCanceled(pd *pollDesc, mode int) { 349 // This function is used only on windows after a failed attempt to cancel 350 // a pending async IO operation. Wait for ioready, ignore closing or timeouts. 351 for !netpollblock(pd, int32(mode), true) { 352 } 353 } 354 355 //go:linkname poll_runtime_pollSetDeadline internal/poll.runtime_pollSetDeadline 356 func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) { 357 lock(&pd.lock) 358 if pd.closing { 359 unlock(&pd.lock) 360 return 361 } 362 rd0, wd0 := pd.rd, pd.wd 363 combo0 := rd0 > 0 && rd0 == wd0 364 if d > 0 { 365 d += nanotime() 366 if d <= 0 { 367 // If the user has a deadline in the future, but the delay calculation 368 // overflows, then set the deadline to the maximum possible value. 369 d = 1<<63 - 1 370 } 371 } 372 if mode == 'r' || mode == 'r'+'w' { 373 pd.rd = d 374 } 375 if mode == 'w' || mode == 'r'+'w' { 376 pd.wd = d 377 } 378 pd.publishInfo() 379 combo := pd.rd > 0 && pd.rd == pd.wd 380 rtf := netpollReadDeadline 381 if combo { 382 rtf = netpollDeadline 383 } 384 if pd.rt.f == nil { 385 if pd.rd > 0 { 386 pd.rt.f = rtf 387 // Copy current seq into the timer arg. 388 // Timer func will check the seq against current descriptor seq, 389 // if they differ the descriptor was reused or timers were reset. 390 pd.rt.arg = pd.makeArg() 391 pd.rt.seq = pd.rseq 392 resettimer(&pd.rt, pd.rd) 393 } 394 } else if pd.rd != rd0 || combo != combo0 { 395 pd.rseq++ // invalidate current timers 396 if pd.rd > 0 { 397 modtimer(&pd.rt, pd.rd, 0, rtf, pd.makeArg(), pd.rseq) 398 } else { 399 deltimer(&pd.rt) 400 pd.rt.f = nil 401 } 402 } 403 if pd.wt.f == nil { 404 if pd.wd > 0 && !combo { 405 pd.wt.f = netpollWriteDeadline 406 pd.wt.arg = pd.makeArg() 407 pd.wt.seq = pd.wseq 408 resettimer(&pd.wt, pd.wd) 409 } 410 } else if pd.wd != wd0 || combo != combo0 { 411 pd.wseq++ // invalidate current timers 412 if pd.wd > 0 && !combo { 413 modtimer(&pd.wt, pd.wd, 0, netpollWriteDeadline, pd.makeArg(), pd.wseq) 414 } else { 415 deltimer(&pd.wt) 416 pd.wt.f = nil 417 } 418 } 419 // If we set the new deadline in the past, unblock currently pending IO if any. 420 // Note that pd.publishInfo has already been called, above, immediately after modifying rd and wd. 421 var rg, wg *g 422 if pd.rd < 0 { 423 rg = netpollunblock(pd, 'r', false) 424 } 425 if pd.wd < 0 { 426 wg = netpollunblock(pd, 'w', false) 427 } 428 unlock(&pd.lock) 429 if rg != nil { 430 netpollgoready(rg, 3) 431 } 432 if wg != nil { 433 netpollgoready(wg, 3) 434 } 435 } 436 437 //go:linkname poll_runtime_pollUnblock internal/poll.runtime_pollUnblock 438 func poll_runtime_pollUnblock(pd *pollDesc) { 439 lock(&pd.lock) 440 if pd.closing { 441 throw("runtime: unblock on closing polldesc") 442 } 443 pd.closing = true 444 pd.rseq++ 445 pd.wseq++ 446 var rg, wg *g 447 pd.publishInfo() 448 rg = netpollunblock(pd, 'r', false) 449 wg = netpollunblock(pd, 'w', false) 450 if pd.rt.f != nil { 451 deltimer(&pd.rt) 452 pd.rt.f = nil 453 } 454 if pd.wt.f != nil { 455 deltimer(&pd.wt) 456 pd.wt.f = nil 457 } 458 unlock(&pd.lock) 459 if rg != nil { 460 netpollgoready(rg, 3) 461 } 462 if wg != nil { 463 netpollgoready(wg, 3) 464 } 465 } 466 467 // netpollready is called by the platform-specific netpoll function. 468 // It declares that the fd associated with pd is ready for I/O. 469 // The toRun argument is used to build a list of goroutines to return 470 // from netpoll. The mode argument is 'r', 'w', or 'r'+'w' to indicate 471 // whether the fd is ready for reading or writing or both. 472 // 473 // This may run while the world is stopped, so write barriers are not allowed. 474 // 475 //go:nowritebarrier 476 func netpollready(toRun *gList, pd *pollDesc, mode int32) { 477 var rg, wg *g 478 if mode == 'r' || mode == 'r'+'w' { 479 rg = netpollunblock(pd, 'r', true) 480 } 481 if mode == 'w' || mode == 'r'+'w' { 482 wg = netpollunblock(pd, 'w', true) 483 } 484 if rg != nil { 485 toRun.push(rg) 486 } 487 if wg != nil { 488 toRun.push(wg) 489 } 490 } 491 492 func netpollcheckerr(pd *pollDesc, mode int32) int { 493 info := pd.info() 494 if info.closing() { 495 return pollErrClosing 496 } 497 if (mode == 'r' && info.expiredReadDeadline()) || (mode == 'w' && info.expiredWriteDeadline()) { 498 return pollErrTimeout 499 } 500 // Report an event scanning error only on a read event. 501 // An error on a write event will be captured in a subsequent 502 // write call that is able to report a more specific error. 503 if mode == 'r' && info.eventErr() { 504 return pollErrNotPollable 505 } 506 return pollNoError 507 } 508 509 func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool { 510 r := atomic.Casuintptr((*uintptr)(gpp), pdWait, uintptr(unsafe.Pointer(gp))) 511 if r { 512 // Bump the count of goroutines waiting for the poller. 513 // The scheduler uses this to decide whether to block 514 // waiting for the poller if there is nothing else to do. 515 netpollWaiters.Add(1) 516 } 517 return r 518 } 519 520 func netpollgoready(gp *g, traceskip int) { 521 netpollWaiters.Add(-1) 522 goready(gp, traceskip+1) 523 } 524 525 // returns true if IO is ready, or false if timed out or closed 526 // waitio - wait only for completed IO, ignore errors 527 // Concurrent calls to netpollblock in the same mode are forbidden, as pollDesc 528 // can hold only a single waiting goroutine for each mode. 529 func netpollblock(pd *pollDesc, mode int32, waitio bool) bool { 530 gpp := &pd.rg 531 if mode == 'w' { 532 gpp = &pd.wg 533 } 534 535 // set the gpp semaphore to pdWait 536 for { 537 // Consume notification if already ready. 538 if gpp.CompareAndSwap(pdReady, pdNil) { 539 return true 540 } 541 if gpp.CompareAndSwap(pdNil, pdWait) { 542 break 543 } 544 545 // Double check that this isn't corrupt; otherwise we'd loop 546 // forever. 547 if v := gpp.Load(); v != pdReady && v != pdNil { 548 throw("runtime: double wait") 549 } 550 } 551 552 // need to recheck error states after setting gpp to pdWait 553 // this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl 554 // do the opposite: store to closing/rd/wd, publishInfo, load of rg/wg 555 if waitio || netpollcheckerr(pd, mode) == pollNoError { 556 gopark(netpollblockcommit, unsafe.Pointer(gpp), waitReasonIOWait, traceEvGoBlockNet, 5) 557 } 558 // be careful to not lose concurrent pdReady notification 559 old := gpp.Swap(pdNil) 560 if old > pdWait { 561 throw("runtime: corrupted polldesc") 562 } 563 return old == pdReady 564 } 565 566 func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g { 567 gpp := &pd.rg 568 if mode == 'w' { 569 gpp = &pd.wg 570 } 571 572 for { 573 old := gpp.Load() 574 if old == pdReady { 575 return nil 576 } 577 if old == pdNil && !ioready { 578 // Only set pdReady for ioready. runtime_pollWait 579 // will check for timeout/cancel before waiting. 580 return nil 581 } 582 var new uintptr 583 if ioready { 584 new = pdReady 585 } 586 if gpp.CompareAndSwap(old, new) { 587 if old == pdWait { 588 old = pdNil 589 } 590 return (*g)(unsafe.Pointer(old)) 591 } 592 } 593 } 594 595 func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) { 596 lock(&pd.lock) 597 // Seq arg is seq when the timer was set. 598 // If it's stale, ignore the timer event. 599 currentSeq := pd.rseq 600 if !read { 601 currentSeq = pd.wseq 602 } 603 if seq != currentSeq { 604 // The descriptor was reused or timers were reset. 605 unlock(&pd.lock) 606 return 607 } 608 var rg *g 609 if read { 610 if pd.rd <= 0 || pd.rt.f == nil { 611 throw("runtime: inconsistent read deadline") 612 } 613 pd.rd = -1 614 pd.publishInfo() 615 rg = netpollunblock(pd, 'r', false) 616 } 617 var wg *g 618 if write { 619 if pd.wd <= 0 || pd.wt.f == nil && !read { 620 throw("runtime: inconsistent write deadline") 621 } 622 pd.wd = -1 623 pd.publishInfo() 624 wg = netpollunblock(pd, 'w', false) 625 } 626 unlock(&pd.lock) 627 if rg != nil { 628 netpollgoready(rg, 0) 629 } 630 if wg != nil { 631 netpollgoready(wg, 0) 632 } 633 } 634 635 func netpollDeadline(arg any, seq uintptr) { 636 netpolldeadlineimpl(arg.(*pollDesc), seq, true, true) 637 } 638 639 func netpollReadDeadline(arg any, seq uintptr) { 640 netpolldeadlineimpl(arg.(*pollDesc), seq, true, false) 641 } 642 643 func netpollWriteDeadline(arg any, seq uintptr) { 644 netpolldeadlineimpl(arg.(*pollDesc), seq, false, true) 645 } 646 647 func (c *pollCache) alloc() *pollDesc { 648 lock(&c.lock) 649 if c.first == nil { 650 const pdSize = unsafe.Sizeof(pollDesc{}) 651 n := pollBlockSize / pdSize 652 if n == 0 { 653 n = 1 654 } 655 // Must be in non-GC memory because can be referenced 656 // only from epoll/kqueue internals. 657 mem := persistentalloc(n*pdSize, 0, &memstats.other_sys) 658 for i := uintptr(0); i < n; i++ { 659 pd := (*pollDesc)(add(mem, i*pdSize)) 660 pd.link = c.first 661 c.first = pd 662 } 663 } 664 pd := c.first 665 c.first = pd.link 666 lockInit(&pd.lock, lockRankPollDesc) 667 unlock(&c.lock) 668 return pd 669 } 670 671 // makeArg converts pd to an interface{}. 672 // makeArg does not do any allocation. Normally, such 673 // a conversion requires an allocation because pointers to 674 // types which embed runtime/internal/sys.NotInHeap (which pollDesc is) 675 // must be stored in interfaces indirectly. See issue 42076. 676 func (pd *pollDesc) makeArg() (i any) { 677 x := (*eface)(unsafe.Pointer(&i)) 678 x._type = pdType 679 x.data = unsafe.Pointer(&pd.self) 680 return 681 } 682 683 var ( 684 pdEface any = (*pollDesc)(nil) 685 pdType *_type = efaceOf(&pdEface)._type 686 )