github.com/sagernet/gvisor@v0.0.0-20240428053021-e691de28565f/pkg/abi/linux/linux_arm64_abi_autogen_unsafe.go (about) 1 // Automatically generated marshal implementation. See tools/go_marshal. 2 3 // If there are issues with build constraint aggregation, see 4 // tools/go_marshal/gomarshal/generator.go:writeHeader(). The constraints here 5 // come from the input set of files used to generate this file. This input set 6 // is filtered based on pre-defined file suffixes related to build constraints, 7 // see tools/defs.bzl:calculate_sets(). 8 9 //go:build arm64 && arm64 && arm64 && arm64 && arm64 10 // +build arm64,arm64,arm64,arm64,arm64 11 12 package linux 13 14 import ( 15 "github.com/sagernet/gvisor/pkg/gohacks" 16 "github.com/sagernet/gvisor/pkg/hostarch" 17 "github.com/sagernet/gvisor/pkg/marshal" 18 "io" 19 "reflect" 20 "runtime" 21 "unsafe" 22 ) 23 24 // Marshallable types used by this file. 25 var _ marshal.Marshallable = (*EpollEvent)(nil) 26 var _ marshal.Marshallable = (*IPCPerm)(nil) 27 var _ marshal.Marshallable = (*PtraceRegs)(nil) 28 var _ marshal.Marshallable = (*SemidDS)(nil) 29 var _ marshal.Marshallable = (*Stat)(nil) 30 var _ marshal.Marshallable = (*TimeT)(nil) 31 var _ marshal.Marshallable = (*Timespec)(nil) 32 33 // SizeBytes implements marshal.Marshallable.SizeBytes. 34 func (e *EpollEvent) SizeBytes() int { 35 return 8 + 36 4*2 37 } 38 39 // MarshalBytes implements marshal.Marshallable.MarshalBytes. 40 func (e *EpollEvent) MarshalBytes(dst []byte) []byte { 41 hostarch.ByteOrder.PutUint32(dst[:4], uint32(e.Events)) 42 dst = dst[4:] 43 // Padding: dst[:sizeof(int32)] ~= int32(0) 44 dst = dst[4:] 45 for idx := 0; idx < 2; idx++ { 46 hostarch.ByteOrder.PutUint32(dst[:4], uint32(e.Data[idx])) 47 dst = dst[4:] 48 } 49 return dst 50 } 51 52 // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. 53 func (e *EpollEvent) UnmarshalBytes(src []byte) []byte { 54 e.Events = uint32(hostarch.ByteOrder.Uint32(src[:4])) 55 src = src[4:] 56 // Padding: var _ int32 ~= src[:sizeof(int32)] 57 src = src[4:] 58 for idx := 0; idx < 2; idx++ { 59 e.Data[idx] = int32(hostarch.ByteOrder.Uint32(src[:4])) 60 src = src[4:] 61 } 62 return src 63 } 64 65 // Packed implements marshal.Marshallable.Packed. 66 //go:nosplit 67 func (e *EpollEvent) Packed() bool { 68 return true 69 } 70 71 // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. 72 func (e *EpollEvent) MarshalUnsafe(dst []byte) []byte { 73 size := e.SizeBytes() 74 gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(e), uintptr(size)) 75 return dst[size:] 76 } 77 78 // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. 79 func (e *EpollEvent) UnmarshalUnsafe(src []byte) []byte { 80 size := e.SizeBytes() 81 gohacks.Memmove(unsafe.Pointer(e), unsafe.Pointer(&src[0]), uintptr(size)) 82 return src[size:] 83 } 84 85 // CopyOutN implements marshal.Marshallable.CopyOutN. 86 func (e *EpollEvent) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { 87 // Construct a slice backed by dst's underlying memory. 88 var buf []byte 89 hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) 90 hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e))) 91 hdr.Len = e.SizeBytes() 92 hdr.Cap = e.SizeBytes() 93 94 length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. 95 // Since we bypassed the compiler's escape analysis, indicate that e 96 // must live until the use above. 97 runtime.KeepAlive(e) // escapes: replaced by intrinsic. 98 return length, err 99 } 100 101 // CopyOut implements marshal.Marshallable.CopyOut. 102 func (e *EpollEvent) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { 103 return e.CopyOutN(cc, addr, e.SizeBytes()) 104 } 105 106 // CopyInN implements marshal.Marshallable.CopyInN. 107 func (e *EpollEvent) CopyInN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { 108 // Construct a slice backed by dst's underlying memory. 109 var buf []byte 110 hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) 111 hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e))) 112 hdr.Len = e.SizeBytes() 113 hdr.Cap = e.SizeBytes() 114 115 length, err := cc.CopyInBytes(addr, buf[:limit]) // escapes: okay. 116 // Since we bypassed the compiler's escape analysis, indicate that e 117 // must live until the use above. 118 runtime.KeepAlive(e) // escapes: replaced by intrinsic. 119 return length, err 120 } 121 122 // CopyIn implements marshal.Marshallable.CopyIn. 123 func (e *EpollEvent) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { 124 return e.CopyInN(cc, addr, e.SizeBytes()) 125 } 126 127 // WriteTo implements io.WriterTo.WriteTo. 128 func (e *EpollEvent) WriteTo(writer io.Writer) (int64, error) { 129 // Construct a slice backed by dst's underlying memory. 130 var buf []byte 131 hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) 132 hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(e))) 133 hdr.Len = e.SizeBytes() 134 hdr.Cap = e.SizeBytes() 135 136 length, err := writer.Write(buf) 137 // Since we bypassed the compiler's escape analysis, indicate that e 138 // must live until the use above. 139 runtime.KeepAlive(e) // escapes: replaced by intrinsic. 140 return int64(length), err 141 } 142 143 // CopyEpollEventSliceIn copies in a slice of EpollEvent objects from the task's memory. 144 func CopyEpollEventSliceIn(cc marshal.CopyContext, addr hostarch.Addr, dst []EpollEvent) (int, error) { 145 count := len(dst) 146 if count == 0 { 147 return 0, nil 148 } 149 size := (*EpollEvent)(nil).SizeBytes() 150 151 ptr := unsafe.Pointer(&dst) 152 val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) 153 154 // Construct a slice backed by dst's underlying memory. 155 var buf []byte 156 hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) 157 hdr.Data = uintptr(val) 158 hdr.Len = size * count 159 hdr.Cap = size * count 160 161 length, err := cc.CopyInBytes(addr, buf) 162 // Since we bypassed the compiler's escape analysis, indicate that dst 163 // must live until the use above. 164 runtime.KeepAlive(dst) // escapes: replaced by intrinsic. 165 return length, err 166 } 167 168 // CopyEpollEventSliceOut copies a slice of EpollEvent objects to the task's memory. 169 func CopyEpollEventSliceOut(cc marshal.CopyContext, addr hostarch.Addr, src []EpollEvent) (int, error) { 170 count := len(src) 171 if count == 0 { 172 return 0, nil 173 } 174 size := (*EpollEvent)(nil).SizeBytes() 175 176 ptr := unsafe.Pointer(&src) 177 val := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data)) 178 179 // Construct a slice backed by dst's underlying memory. 180 var buf []byte 181 hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) 182 hdr.Data = uintptr(val) 183 hdr.Len = size * count 184 hdr.Cap = size * count 185 186 length, err := cc.CopyOutBytes(addr, buf) 187 // Since we bypassed the compiler's escape analysis, indicate that src 188 // must live until the use above. 189 runtime.KeepAlive(src) // escapes: replaced by intrinsic. 190 return length, err 191 } 192 193 // MarshalUnsafeEpollEventSlice is like EpollEvent.MarshalUnsafe, but for a []EpollEvent. 194 func MarshalUnsafeEpollEventSlice(src []EpollEvent, dst []byte) []byte { 195 count := len(src) 196 if count == 0 { 197 return dst 198 } 199 200 size := (*EpollEvent)(nil).SizeBytes() 201 buf := dst[:size*count] 202 gohacks.Memmove(unsafe.Pointer(&buf[0]), unsafe.Pointer(&src[0]), uintptr(len(buf))) 203 return dst[size*count:] 204 } 205 206 // UnmarshalUnsafeEpollEventSlice is like EpollEvent.UnmarshalUnsafe, but for a []EpollEvent. 207 func UnmarshalUnsafeEpollEventSlice(dst []EpollEvent, src []byte) []byte { 208 count := len(dst) 209 if count == 0 { 210 return src 211 } 212 213 size := (*EpollEvent)(nil).SizeBytes() 214 buf := src[:size*count] 215 gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&buf[0]), uintptr(len(buf))) 216 return src[size*count:] 217 } 218 219 // SizeBytes implements marshal.Marshallable.SizeBytes. 220 func (s *Stat) SizeBytes() int { 221 return 72 + 222 (*Timespec)(nil).SizeBytes() + 223 (*Timespec)(nil).SizeBytes() + 224 (*Timespec)(nil).SizeBytes() + 225 4*2 226 } 227 228 // MarshalBytes implements marshal.Marshallable.MarshalBytes. 229 func (s *Stat) MarshalBytes(dst []byte) []byte { 230 hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Dev)) 231 dst = dst[8:] 232 hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Ino)) 233 dst = dst[8:] 234 hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Mode)) 235 dst = dst[4:] 236 hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Nlink)) 237 dst = dst[4:] 238 hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.UID)) 239 dst = dst[4:] 240 hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.GID)) 241 dst = dst[4:] 242 hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Rdev)) 243 dst = dst[8:] 244 // Padding: dst[:sizeof(uint64)] ~= uint64(0) 245 dst = dst[8:] 246 hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Size)) 247 dst = dst[8:] 248 hostarch.ByteOrder.PutUint32(dst[:4], uint32(s.Blksize)) 249 dst = dst[4:] 250 // Padding: dst[:sizeof(int32)] ~= int32(0) 251 dst = dst[4:] 252 hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.Blocks)) 253 dst = dst[8:] 254 dst = s.ATime.MarshalUnsafe(dst) 255 dst = s.MTime.MarshalUnsafe(dst) 256 dst = s.CTime.MarshalUnsafe(dst) 257 // Padding: dst[:sizeof(int32)*2] ~= [2]int32{0} 258 dst = dst[4*(2):] 259 return dst 260 } 261 262 // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. 263 func (s *Stat) UnmarshalBytes(src []byte) []byte { 264 s.Dev = uint64(hostarch.ByteOrder.Uint64(src[:8])) 265 src = src[8:] 266 s.Ino = uint64(hostarch.ByteOrder.Uint64(src[:8])) 267 src = src[8:] 268 s.Mode = uint32(hostarch.ByteOrder.Uint32(src[:4])) 269 src = src[4:] 270 s.Nlink = uint32(hostarch.ByteOrder.Uint32(src[:4])) 271 src = src[4:] 272 s.UID = uint32(hostarch.ByteOrder.Uint32(src[:4])) 273 src = src[4:] 274 s.GID = uint32(hostarch.ByteOrder.Uint32(src[:4])) 275 src = src[4:] 276 s.Rdev = uint64(hostarch.ByteOrder.Uint64(src[:8])) 277 src = src[8:] 278 // Padding: var _ uint64 ~= src[:sizeof(uint64)] 279 src = src[8:] 280 s.Size = int64(hostarch.ByteOrder.Uint64(src[:8])) 281 src = src[8:] 282 s.Blksize = int32(hostarch.ByteOrder.Uint32(src[:4])) 283 src = src[4:] 284 // Padding: var _ int32 ~= src[:sizeof(int32)] 285 src = src[4:] 286 s.Blocks = int64(hostarch.ByteOrder.Uint64(src[:8])) 287 src = src[8:] 288 src = s.ATime.UnmarshalUnsafe(src) 289 src = s.MTime.UnmarshalUnsafe(src) 290 src = s.CTime.UnmarshalUnsafe(src) 291 // Padding: ~ copy([2]int32(s._), src[:sizeof(int32)*2]) 292 src = src[4*(2):] 293 return src 294 } 295 296 // Packed implements marshal.Marshallable.Packed. 297 //go:nosplit 298 func (s *Stat) Packed() bool { 299 return s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed() 300 } 301 302 // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. 303 func (s *Stat) MarshalUnsafe(dst []byte) []byte { 304 if s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed() { 305 size := s.SizeBytes() 306 gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(size)) 307 return dst[size:] 308 } 309 // Type Stat doesn't have a packed layout in memory, fallback to MarshalBytes. 310 return s.MarshalBytes(dst) 311 } 312 313 // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. 314 func (s *Stat) UnmarshalUnsafe(src []byte) []byte { 315 if s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed() { 316 size := s.SizeBytes() 317 gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(size)) 318 return src[size:] 319 } 320 // Type Stat doesn't have a packed layout in memory, fallback to UnmarshalBytes. 321 return s.UnmarshalBytes(src) 322 } 323 324 // CopyOutN implements marshal.Marshallable.CopyOutN. 325 func (s *Stat) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { 326 if !s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed() { 327 // Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes. 328 buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. 329 s.MarshalBytes(buf) // escapes: fallback. 330 return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. 331 } 332 333 // Construct a slice backed by dst's underlying memory. 334 var buf []byte 335 hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) 336 hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) 337 hdr.Len = s.SizeBytes() 338 hdr.Cap = s.SizeBytes() 339 340 length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. 341 // Since we bypassed the compiler's escape analysis, indicate that s 342 // must live until the use above. 343 runtime.KeepAlive(s) // escapes: replaced by intrinsic. 344 return length, err 345 } 346 347 // CopyOut implements marshal.Marshallable.CopyOut. 348 func (s *Stat) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { 349 return s.CopyOutN(cc, addr, s.SizeBytes()) 350 } 351 352 // CopyInN implements marshal.Marshallable.CopyInN. 353 func (s *Stat) CopyInN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { 354 if !s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed() { 355 // Type Stat doesn't have a packed layout in memory, fall back to UnmarshalBytes. 356 buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. 357 length, err := cc.CopyInBytes(addr, buf[:limit]) // escapes: okay. 358 // Unmarshal unconditionally. If we had a short copy-in, this results in a 359 // partially unmarshalled struct. 360 s.UnmarshalBytes(buf) // escapes: fallback. 361 return length, err 362 } 363 364 // Construct a slice backed by dst's underlying memory. 365 var buf []byte 366 hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) 367 hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) 368 hdr.Len = s.SizeBytes() 369 hdr.Cap = s.SizeBytes() 370 371 length, err := cc.CopyInBytes(addr, buf[:limit]) // escapes: okay. 372 // Since we bypassed the compiler's escape analysis, indicate that s 373 // must live until the use above. 374 runtime.KeepAlive(s) // escapes: replaced by intrinsic. 375 return length, err 376 } 377 378 // CopyIn implements marshal.Marshallable.CopyIn. 379 func (s *Stat) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { 380 return s.CopyInN(cc, addr, s.SizeBytes()) 381 } 382 383 // WriteTo implements io.WriterTo.WriteTo. 384 func (s *Stat) WriteTo(writer io.Writer) (int64, error) { 385 if !s.ATime.Packed() && s.CTime.Packed() && s.MTime.Packed() { 386 // Type Stat doesn't have a packed layout in memory, fall back to MarshalBytes. 387 buf := make([]byte, s.SizeBytes()) 388 s.MarshalBytes(buf) 389 length, err := writer.Write(buf) 390 return int64(length), err 391 } 392 393 // Construct a slice backed by dst's underlying memory. 394 var buf []byte 395 hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) 396 hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) 397 hdr.Len = s.SizeBytes() 398 hdr.Cap = s.SizeBytes() 399 400 length, err := writer.Write(buf) 401 // Since we bypassed the compiler's escape analysis, indicate that s 402 // must live until the use above. 403 runtime.KeepAlive(s) // escapes: replaced by intrinsic. 404 return int64(length), err 405 } 406 407 // SizeBytes implements marshal.Marshallable.SizeBytes. 408 func (p *PtraceRegs) SizeBytes() int { 409 return 24 + 410 8*31 411 } 412 413 // MarshalBytes implements marshal.Marshallable.MarshalBytes. 414 func (p *PtraceRegs) MarshalBytes(dst []byte) []byte { 415 for idx := 0; idx < 31; idx++ { 416 hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Regs[idx])) 417 dst = dst[8:] 418 } 419 hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Sp)) 420 dst = dst[8:] 421 hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Pc)) 422 dst = dst[8:] 423 hostarch.ByteOrder.PutUint64(dst[:8], uint64(p.Pstate)) 424 dst = dst[8:] 425 return dst 426 } 427 428 // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. 429 func (p *PtraceRegs) UnmarshalBytes(src []byte) []byte { 430 for idx := 0; idx < 31; idx++ { 431 p.Regs[idx] = uint64(hostarch.ByteOrder.Uint64(src[:8])) 432 src = src[8:] 433 } 434 p.Sp = uint64(hostarch.ByteOrder.Uint64(src[:8])) 435 src = src[8:] 436 p.Pc = uint64(hostarch.ByteOrder.Uint64(src[:8])) 437 src = src[8:] 438 p.Pstate = uint64(hostarch.ByteOrder.Uint64(src[:8])) 439 src = src[8:] 440 return src 441 } 442 443 // Packed implements marshal.Marshallable.Packed. 444 //go:nosplit 445 func (p *PtraceRegs) Packed() bool { 446 return true 447 } 448 449 // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. 450 func (p *PtraceRegs) MarshalUnsafe(dst []byte) []byte { 451 size := p.SizeBytes() 452 gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(p), uintptr(size)) 453 return dst[size:] 454 } 455 456 // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. 457 func (p *PtraceRegs) UnmarshalUnsafe(src []byte) []byte { 458 size := p.SizeBytes() 459 gohacks.Memmove(unsafe.Pointer(p), unsafe.Pointer(&src[0]), uintptr(size)) 460 return src[size:] 461 } 462 463 // CopyOutN implements marshal.Marshallable.CopyOutN. 464 func (p *PtraceRegs) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { 465 // Construct a slice backed by dst's underlying memory. 466 var buf []byte 467 hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) 468 hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p))) 469 hdr.Len = p.SizeBytes() 470 hdr.Cap = p.SizeBytes() 471 472 length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. 473 // Since we bypassed the compiler's escape analysis, indicate that p 474 // must live until the use above. 475 runtime.KeepAlive(p) // escapes: replaced by intrinsic. 476 return length, err 477 } 478 479 // CopyOut implements marshal.Marshallable.CopyOut. 480 func (p *PtraceRegs) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { 481 return p.CopyOutN(cc, addr, p.SizeBytes()) 482 } 483 484 // CopyInN implements marshal.Marshallable.CopyInN. 485 func (p *PtraceRegs) CopyInN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { 486 // Construct a slice backed by dst's underlying memory. 487 var buf []byte 488 hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) 489 hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p))) 490 hdr.Len = p.SizeBytes() 491 hdr.Cap = p.SizeBytes() 492 493 length, err := cc.CopyInBytes(addr, buf[:limit]) // escapes: okay. 494 // Since we bypassed the compiler's escape analysis, indicate that p 495 // must live until the use above. 496 runtime.KeepAlive(p) // escapes: replaced by intrinsic. 497 return length, err 498 } 499 500 // CopyIn implements marshal.Marshallable.CopyIn. 501 func (p *PtraceRegs) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { 502 return p.CopyInN(cc, addr, p.SizeBytes()) 503 } 504 505 // WriteTo implements io.WriterTo.WriteTo. 506 func (p *PtraceRegs) WriteTo(writer io.Writer) (int64, error) { 507 // Construct a slice backed by dst's underlying memory. 508 var buf []byte 509 hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) 510 hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(p))) 511 hdr.Len = p.SizeBytes() 512 hdr.Cap = p.SizeBytes() 513 514 length, err := writer.Write(buf) 515 // Since we bypassed the compiler's escape analysis, indicate that p 516 // must live until the use above. 517 runtime.KeepAlive(p) // escapes: replaced by intrinsic. 518 return int64(length), err 519 } 520 521 // SizeBytes implements marshal.Marshallable.SizeBytes. 522 func (s *SemidDS) SizeBytes() int { 523 return 24 + 524 (*IPCPerm)(nil).SizeBytes() + 525 (*TimeT)(nil).SizeBytes() + 526 (*TimeT)(nil).SizeBytes() 527 } 528 529 // MarshalBytes implements marshal.Marshallable.MarshalBytes. 530 func (s *SemidDS) MarshalBytes(dst []byte) []byte { 531 dst = s.SemPerm.MarshalUnsafe(dst) 532 dst = s.SemOTime.MarshalUnsafe(dst) 533 dst = s.SemCTime.MarshalUnsafe(dst) 534 hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.SemNSems)) 535 dst = dst[8:] 536 hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.unused3)) 537 dst = dst[8:] 538 hostarch.ByteOrder.PutUint64(dst[:8], uint64(s.unused4)) 539 dst = dst[8:] 540 return dst 541 } 542 543 // UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes. 544 func (s *SemidDS) UnmarshalBytes(src []byte) []byte { 545 src = s.SemPerm.UnmarshalUnsafe(src) 546 src = s.SemOTime.UnmarshalUnsafe(src) 547 src = s.SemCTime.UnmarshalUnsafe(src) 548 s.SemNSems = uint64(hostarch.ByteOrder.Uint64(src[:8])) 549 src = src[8:] 550 s.unused3 = uint64(hostarch.ByteOrder.Uint64(src[:8])) 551 src = src[8:] 552 s.unused4 = uint64(hostarch.ByteOrder.Uint64(src[:8])) 553 src = src[8:] 554 return src 555 } 556 557 // Packed implements marshal.Marshallable.Packed. 558 //go:nosplit 559 func (s *SemidDS) Packed() bool { 560 return s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed() 561 } 562 563 // MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe. 564 func (s *SemidDS) MarshalUnsafe(dst []byte) []byte { 565 if s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed() { 566 size := s.SizeBytes() 567 gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(s), uintptr(size)) 568 return dst[size:] 569 } 570 // Type SemidDS doesn't have a packed layout in memory, fallback to MarshalBytes. 571 return s.MarshalBytes(dst) 572 } 573 574 // UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe. 575 func (s *SemidDS) UnmarshalUnsafe(src []byte) []byte { 576 if s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed() { 577 size := s.SizeBytes() 578 gohacks.Memmove(unsafe.Pointer(s), unsafe.Pointer(&src[0]), uintptr(size)) 579 return src[size:] 580 } 581 // Type SemidDS doesn't have a packed layout in memory, fallback to UnmarshalBytes. 582 return s.UnmarshalBytes(src) 583 } 584 585 // CopyOutN implements marshal.Marshallable.CopyOutN. 586 func (s *SemidDS) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { 587 if !s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed() { 588 // Type SemidDS doesn't have a packed layout in memory, fall back to MarshalBytes. 589 buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. 590 s.MarshalBytes(buf) // escapes: fallback. 591 return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. 592 } 593 594 // Construct a slice backed by dst's underlying memory. 595 var buf []byte 596 hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) 597 hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) 598 hdr.Len = s.SizeBytes() 599 hdr.Cap = s.SizeBytes() 600 601 length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay. 602 // Since we bypassed the compiler's escape analysis, indicate that s 603 // must live until the use above. 604 runtime.KeepAlive(s) // escapes: replaced by intrinsic. 605 return length, err 606 } 607 608 // CopyOut implements marshal.Marshallable.CopyOut. 609 func (s *SemidDS) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { 610 return s.CopyOutN(cc, addr, s.SizeBytes()) 611 } 612 613 // CopyInN implements marshal.Marshallable.CopyInN. 614 func (s *SemidDS) CopyInN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) { 615 if !s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed() { 616 // Type SemidDS doesn't have a packed layout in memory, fall back to UnmarshalBytes. 617 buf := cc.CopyScratchBuffer(s.SizeBytes()) // escapes: okay. 618 length, err := cc.CopyInBytes(addr, buf[:limit]) // escapes: okay. 619 // Unmarshal unconditionally. If we had a short copy-in, this results in a 620 // partially unmarshalled struct. 621 s.UnmarshalBytes(buf) // escapes: fallback. 622 return length, err 623 } 624 625 // Construct a slice backed by dst's underlying memory. 626 var buf []byte 627 hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) 628 hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) 629 hdr.Len = s.SizeBytes() 630 hdr.Cap = s.SizeBytes() 631 632 length, err := cc.CopyInBytes(addr, buf[:limit]) // escapes: okay. 633 // Since we bypassed the compiler's escape analysis, indicate that s 634 // must live until the use above. 635 runtime.KeepAlive(s) // escapes: replaced by intrinsic. 636 return length, err 637 } 638 639 // CopyIn implements marshal.Marshallable.CopyIn. 640 func (s *SemidDS) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) { 641 return s.CopyInN(cc, addr, s.SizeBytes()) 642 } 643 644 // WriteTo implements io.WriterTo.WriteTo. 645 func (s *SemidDS) WriteTo(writer io.Writer) (int64, error) { 646 if !s.SemCTime.Packed() && s.SemOTime.Packed() && s.SemPerm.Packed() { 647 // Type SemidDS doesn't have a packed layout in memory, fall back to MarshalBytes. 648 buf := make([]byte, s.SizeBytes()) 649 s.MarshalBytes(buf) 650 length, err := writer.Write(buf) 651 return int64(length), err 652 } 653 654 // Construct a slice backed by dst's underlying memory. 655 var buf []byte 656 hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) 657 hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(s))) 658 hdr.Len = s.SizeBytes() 659 hdr.Cap = s.SizeBytes() 660 661 length, err := writer.Write(buf) 662 // Since we bypassed the compiler's escape analysis, indicate that s 663 // must live until the use above. 664 runtime.KeepAlive(s) // escapes: replaced by intrinsic. 665 return int64(length), err 666 } 667