github.com/metacubex/gvisor@v0.0.0-20240320004321-933faba989ec/pkg/sentry/fsimpl/iouringfs/iouringfs.go (about) 1 // Copyright 2022 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 // Package iouringfs provides a filesystem implementation for IO_URING basing 16 // it on anonfs. Currently, we don't support neither IOPOLL nor SQPOLL modes. 17 // Thus, user needs to set up IO_URING first with io_uring_setup(2) syscall and 18 // then issue submission request using io_uring_enter(2). 19 // 20 // Another important note, as of now, we don't support deferred CQE. In other 21 // words, the size of the backlogged set of CQE is zero. Whenever, completion 22 // queue ring buffer is full, we drop the subsequent completion queue entries. 23 package iouringfs 24 25 import ( 26 "fmt" 27 "io" 28 29 "github.com/metacubex/gvisor/pkg/abi/linux" 30 "github.com/metacubex/gvisor/pkg/atomicbitops" 31 "github.com/metacubex/gvisor/pkg/context" 32 "github.com/metacubex/gvisor/pkg/errors/linuxerr" 33 "github.com/metacubex/gvisor/pkg/hostarch" 34 "github.com/metacubex/gvisor/pkg/safemem" 35 "github.com/metacubex/gvisor/pkg/sentry/kernel" 36 "github.com/metacubex/gvisor/pkg/sentry/memmap" 37 "github.com/metacubex/gvisor/pkg/sentry/pgalloc" 38 "github.com/metacubex/gvisor/pkg/sentry/usage" 39 "github.com/metacubex/gvisor/pkg/sentry/vfs" 40 "github.com/metacubex/gvisor/pkg/usermem" 41 ) 42 43 // FileDescription implements vfs.FileDescriptionImpl for file-based IO_URING. 44 // It is based on io_rings struct. See io_uring/io_uring.c. 45 // 46 // +stateify savable 47 type FileDescription struct { 48 vfsfd vfs.FileDescription 49 vfs.FileDescriptionDefaultImpl 50 vfs.DentryMetadataFileDescriptionImpl 51 vfs.NoLockFD 52 53 mf *pgalloc.MemoryFile `state:"nosave"` 54 55 rbmf ringsBufferFile 56 sqemf sqEntriesFile 57 58 // running indicates whether the submission queue is currently being 59 // processed. This is either 0 for not running, or 1 for running. 60 running atomicbitops.Uint32 61 // runC is used to wake up serialized task goroutines waiting for any 62 // concurrent processors of the submission queue. 63 runC chan struct{} `state:"nosave"` 64 65 ioRings linux.IORings 66 67 ioRingsBuf sharedBuffer `state:"nosave"` 68 sqesBuf sharedBuffer `state:"nosave"` 69 cqesBuf sharedBuffer `state:"nosave"` 70 71 // remap indicates whether the shared buffers need to be remapped 72 // due to a S/R. Protected by ProcessSubmissions critical section. 73 remap bool 74 } 75 76 var _ vfs.FileDescriptionImpl = (*FileDescription)(nil) 77 78 func roundUpPowerOfTwo(n uint32) (uint32, bool) { 79 if n > (1 << 31) { 80 return 0, false 81 } 82 result := uint32(1) 83 for result < n { 84 result = result << 1 85 } 86 return result, true 87 } 88 89 // New creates a new iouring fd. 90 func New(ctx context.Context, vfsObj *vfs.VirtualFilesystem, entries uint32, params *linux.IOUringParams) (*vfs.FileDescription, error) { 91 if entries > linux.IORING_MAX_ENTRIES { 92 return nil, linuxerr.EINVAL 93 } 94 95 vd := vfsObj.NewAnonVirtualDentry("[io_uring]") 96 defer vd.DecRef(ctx) 97 98 mf := pgalloc.MemoryFileFromContext(ctx) 99 if mf == nil { 100 panic(fmt.Sprintf("context.Context %T lacks non-nil value for key %T", ctx, pgalloc.CtxMemoryFile)) 101 } 102 103 numSqEntries, ok := roundUpPowerOfTwo(entries) 104 if !ok { 105 return nil, linuxerr.EOVERFLOW 106 } 107 var numCqEntries uint32 108 if params.Flags&linux.IORING_SETUP_CQSIZE != 0 { 109 var ok bool 110 numCqEntries, ok = roundUpPowerOfTwo(params.CqEntries) 111 if !ok || numCqEntries < numSqEntries || numCqEntries > linux.IORING_MAX_CQ_ENTRIES { 112 return nil, linuxerr.EINVAL 113 } 114 } else { 115 numCqEntries = 2 * numSqEntries 116 } 117 118 // Allocate enough space to store the `struct io_rings` plus a given number of indexes 119 // corresponding to the number of SQEs. 120 ioRingsWithCqesSize := uint32((*linux.IORings)(nil).SizeBytes()) + 121 numCqEntries*uint32((*linux.IOUringCqe)(nil).SizeBytes()) 122 ringsBufferSize := uint64(ioRingsWithCqesSize + 123 numSqEntries*uint32((*linux.IORingIndex)(nil).SizeBytes())) 124 ringsBufferSize = uint64(hostarch.Addr(ringsBufferSize).MustRoundUp()) 125 126 memCgID := pgalloc.MemoryCgroupIDFromContext(ctx) 127 rbfr, err := mf.Allocate(ringsBufferSize, pgalloc.AllocOpts{Kind: usage.Anonymous, MemCgID: memCgID}) 128 if err != nil { 129 return nil, linuxerr.ENOMEM 130 } 131 132 // Allocate enough space to store the given number of submission queue entries. 133 sqEntriesSize := uint64(numSqEntries * uint32((*linux.IOUringSqe)(nil).SizeBytes())) 134 sqEntriesSize = uint64(hostarch.Addr(sqEntriesSize).MustRoundUp()) 135 sqefr, err := mf.Allocate(sqEntriesSize, pgalloc.AllocOpts{Kind: usage.Anonymous, MemCgID: memCgID}) 136 if err != nil { 137 return nil, linuxerr.ENOMEM 138 } 139 140 iouringfd := &FileDescription{ 141 mf: mf, 142 rbmf: ringsBufferFile{ 143 fr: rbfr, 144 }, 145 sqemf: sqEntriesFile{ 146 fr: sqefr, 147 }, 148 // See ProcessSubmissions for why the capacity is 1. 149 runC: make(chan struct{}, 1), 150 } 151 152 // iouringfd is always set up with read/write mode. 153 // See io_uring/io_uring.c:io_uring_install_fd(). 154 if err := iouringfd.vfsfd.Init(iouringfd, uint32(linux.O_RDWR), vd.Mount(), vd.Dentry(), &vfs.FileDescriptionOptions{ 155 UseDentryMetadata: true, 156 DenyPRead: true, 157 DenyPWrite: true, 158 DenySpliceIn: true, 159 }); err != nil { 160 return nil, err 161 } 162 163 params.SqEntries = numSqEntries 164 params.CqEntries = numCqEntries 165 166 arrayOffset := uint64(hostarch.Addr(ioRingsWithCqesSize)) 167 arrayOffset, ok = hostarch.CacheLineRoundUp(arrayOffset) 168 if !ok { 169 return nil, linuxerr.EOVERFLOW 170 } 171 172 params.SqOff = linux.PreComputedIOSqRingOffsets() 173 params.SqOff.Array = uint32(arrayOffset) 174 175 cqesOffset := uint64(hostarch.Addr((*linux.IORings)(nil).SizeBytes())) 176 cqesOffset, ok = hostarch.CacheLineRoundUp(cqesOffset) 177 if !ok { 178 return nil, linuxerr.EOVERFLOW 179 } 180 181 params.CqOff = linux.PreComputedIOCqRingOffsets() 182 params.CqOff.Cqes = uint32(cqesOffset) 183 184 // Set features supported by the current IO_URING implementation. 185 params.Features = linux.IORING_FEAT_SINGLE_MMAP 186 187 // Map all shared buffers. 188 if err := iouringfd.mapSharedBuffers(); err != nil { 189 return nil, err 190 } 191 192 // Initialize IORings struct from params. 193 iouringfd.ioRings.SqRingMask = params.SqEntries - 1 194 iouringfd.ioRings.CqRingMask = params.CqEntries - 1 195 iouringfd.ioRings.SqRingEntries = params.SqEntries 196 iouringfd.ioRings.CqRingEntries = params.CqEntries 197 198 // Write IORings out to shared buffer. 199 view, err := iouringfd.ioRingsBuf.view(iouringfd.ioRings.SizeBytes()) 200 if err != nil { 201 return nil, err 202 } 203 iouringfd.ioRings.MarshalUnsafe(view) 204 205 buf := make([]byte, iouringfd.ioRings.SizeBytes()) 206 iouringfd.ioRings.MarshalUnsafe(buf) 207 208 if _, err := iouringfd.ioRingsBuf.writeback(iouringfd.ioRings.SizeBytes()); err != nil { 209 return nil, err 210 } 211 212 return &iouringfd.vfsfd, nil 213 } 214 215 // Release implements vfs.FileDescriptionImpl.Release. 216 func (fd *FileDescription) Release(ctx context.Context) { 217 fd.mf.DecRef(fd.rbmf.fr) 218 fd.mf.DecRef(fd.sqemf.fr) 219 } 220 221 // mapSharedBuffers caches internal mappings for the ring's shared memory 222 // regions. 223 func (fd *FileDescription) mapSharedBuffers() error { 224 // Mapping for the IORings header struct. 225 rb, err := fd.mf.MapInternal(fd.rbmf.fr, hostarch.ReadWrite) 226 if err != nil { 227 return err 228 } 229 fd.ioRingsBuf.init(rb) 230 231 // Mapping for the CQEs array. This is contiguous to the header struct. 232 cqesOffset := uint64(fd.ioRings.SizeBytes()) 233 cqesOffset, ok := hostarch.CacheLineRoundUp(cqesOffset) 234 if !ok { 235 return linuxerr.EOVERFLOW 236 } 237 cqes := rb.DropFirst(int(cqesOffset)) 238 fd.cqesBuf.init(cqes) 239 240 // Mapping for the SQEs array. 241 sqes, err := fd.mf.MapInternal(fd.sqemf.fr, hostarch.ReadWrite) 242 if err != nil { 243 return err 244 } 245 fd.sqesBuf.init(sqes) 246 247 return nil 248 249 } 250 251 // ConfigureMMap implements vfs.FileDescriptionImpl.ConfigureMMap. 252 func (fd *FileDescription) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error { 253 var mf memmap.Mappable 254 switch opts.Offset { 255 case linux.IORING_OFF_SQ_RING, linux.IORING_OFF_CQ_RING: 256 mf = &fd.rbmf 257 case linux.IORING_OFF_SQES: 258 mf = &fd.sqemf 259 default: 260 return linuxerr.EINVAL 261 } 262 263 opts.Offset = 0 264 265 return vfs.GenericConfigureMMap(&fd.vfsfd, mf, opts) 266 } 267 268 // ProcessSubmissions processes the submission queue. Concurrent calls to 269 // ProcessSubmissions serialize, yielding task goroutines with Task.Block since 270 // processing can take a long time. 271 func (fd *FileDescription) ProcessSubmissions(t *kernel.Task, toSubmit uint32, minComplete uint32, flags uint32) (int, error) { 272 // We use a combination of fd.running and fd.runC to serialize concurrent 273 // callers to ProcessSubmissions. runC has a capacity of 1. The protocol 274 // works as follows: 275 // 276 // * Becoming the active task 277 // 278 // On entry to ProcessSubmissions, we try to transition running from 0 to 279 // 1. If there is already an active task, this will fail and we'll go to 280 // sleep with Task.Block(). If we succeed, we're the active task. 281 // 282 // * Sleep, Wakeup 283 // 284 // If we had to sleep, on wakeup we try to transition running to 1 again as 285 // we could still be racing with other tasks. Note that if multiple tasks 286 // are sleeping, only one will wake up since only one will successfully 287 // receive from runC. However we could still race with a new caller of 288 // ProcessSubmissions that hasn't gone to sleep yet. Only one waiting task 289 // will succeed and become the active task, the rest will go to sleep. 290 // 291 // runC needs to be buffered to avoid a race between checking running and 292 // going back to sleep. With an unbuffered channel, we could miss a wakeup 293 // like this: 294 // 295 // Task B (entering, sleeping) | Task A (active, releasing) 296 // ---------------------------------------------------+------------------------- 297 // | fd.running.Store(0) 298 // for !fd.running.CompareAndSwap(0, 1) { // Success | 299 // | nonblockingSend(runC) // Missed! 300 // t.Block(fd.runC) // Will block forever | 301 // } 302 // 303 // Task A's send would have to be non-blocking, as there may not be a 304 // concurrent Task B. 305 // 306 // A side-effect of using a buffered channel is the first task that needs to 307 // sleep may wake up once immediately due to a previously queued 308 // wakeup. This isn't a problem, as it'll immediately try to transition 309 // running to 1, likely fail again and go back to sleep. Task.Block has a 310 // fast path if runC already has a queued message so this won't result in a 311 // task state change. 312 // 313 // * Release 314 // 315 // When the active task is done, it releases the critical section by setting 316 // running = 0, then doing a non-blocking send on runC. The send needs to be 317 // non-blocking, as there may not be a concurrent sleeper. 318 for !fd.running.CompareAndSwap(0, 1) { 319 t.Block(fd.runC) 320 } 321 // We successfully set fd.running, so we're the active task now. 322 defer func() { 323 // Unblock any potentially waiting tasks. 324 if !fd.running.CompareAndSwap(1, 0) { 325 panic(fmt.Sprintf("iouringfs.FileDescription.ProcessSubmissions: active task encountered invalid fd.running state %v", fd.running.Load())) 326 } 327 select { 328 case fd.runC <- struct{}{}: 329 default: 330 } 331 }() 332 333 // The rest of this function is a critical section with respect to 334 // concurrent callers. 335 336 if fd.remap { 337 fd.mapSharedBuffers() 338 fd.remap = false 339 } 340 341 var err error 342 var sqe linux.IOUringSqe 343 344 sqOff := linux.PreComputedIOSqRingOffsets() 345 cqOff := linux.PreComputedIOCqRingOffsets() 346 sqArraySize := sqe.SizeBytes() * int(fd.ioRings.SqRingEntries) 347 cqArraySize := (*linux.IOUringCqe)(nil).SizeBytes() * int(fd.ioRings.CqRingEntries) 348 349 // Fetch all buffers initially. 350 fetchRB := true 351 fetchSQA := true 352 fetchCQA := true 353 354 var view, sqaView, cqaView []byte 355 submitted := uint32(0) 356 357 for toSubmit > submitted { 358 // This loop can take a long time to process, so periodically check for 359 // interrupts. This also pets the watchdog. 360 if t.Interrupted() { 361 return -1, linuxerr.EINTR 362 } 363 364 if fetchRB { 365 view, err = fd.ioRingsBuf.view(fd.ioRings.SizeBytes()) 366 if err != nil { 367 return -1, err 368 } 369 } 370 371 // Note: The kernel uses sqHead as a cursor and writes cqTail. Userspace 372 // uses cqHead as a cursor and writes sqTail. 373 374 sqHeadPtr := atomicUint32AtOffset(view, int(sqOff.Head)) 375 sqTailPtr := atomicUint32AtOffset(view, int(sqOff.Tail)) 376 cqHeadPtr := atomicUint32AtOffset(view, int(cqOff.Head)) 377 cqTailPtr := atomicUint32AtOffset(view, int(cqOff.Tail)) 378 overflowPtr := atomicUint32AtOffset(view, int(cqOff.Overflow)) 379 380 // Load the pointers once, so we work with a stable value. Particularly, 381 // userspace can update the SQ tail at any time. 382 sqHead := sqHeadPtr.Load() 383 sqTail := sqTailPtr.Load() 384 385 // Is the submission queue is empty? 386 if sqHead == sqTail { 387 return int(submitted), nil 388 } 389 390 // We have at least one pending sqe, unmarshal the first from the 391 // submission queue. 392 if fetchSQA { 393 sqaView, err = fd.sqesBuf.view(sqArraySize) 394 if err != nil { 395 return -1, err 396 } 397 } 398 sqaOff := int(sqHead&fd.ioRings.SqRingMask) * sqe.SizeBytes() 399 sqe.UnmarshalUnsafe(sqaView[sqaOff : sqaOff+sqe.SizeBytes()]) 400 fetchSQA = fd.sqesBuf.drop() 401 402 // Dispatch request from unmarshalled entry. 403 cqe := fd.ProcessSubmission(t, &sqe, flags) 404 405 // Advance sq head. 406 sqHeadPtr.Add(1) 407 408 // Load once so we have stable values. Particularly, userspace can 409 // update the CQ head at any time. 410 cqHead := cqHeadPtr.Load() 411 cqTail := cqTailPtr.Load() 412 413 // Marshal response to completion queue. 414 if (cqTail - cqHead) >= fd.ioRings.CqRingEntries { 415 // CQ ring full. 416 fd.ioRings.CqOverflow++ 417 overflowPtr.Store(fd.ioRings.CqOverflow) 418 } else { 419 // Have room in CQ, marshal CQE. 420 if fetchCQA { 421 cqaView, err = fd.cqesBuf.view(cqArraySize) 422 if err != nil { 423 return -1, err 424 } 425 } 426 cqaOff := int(cqTail&fd.ioRings.CqRingMask) * cqe.SizeBytes() 427 cqe.MarshalUnsafe(cqaView[cqaOff : cqaOff+cqe.SizeBytes()]) 428 fetchCQA, err = fd.cqesBuf.writebackWindow(cqaOff, cqe.SizeBytes()) 429 if err != nil { 430 return -1, err 431 } 432 433 // Advance cq tail. 434 cqTailPtr.Add(1) 435 } 436 437 fetchRB, err = fd.ioRingsBuf.writeback(fd.ioRings.SizeBytes()) 438 if err != nil { 439 return -1, err 440 } 441 442 submitted++ 443 } 444 445 return int(submitted), nil 446 } 447 448 // ProcessSubmission processes a single submission request. 449 func (fd *FileDescription) ProcessSubmission(t *kernel.Task, sqe *linux.IOUringSqe, flags uint32) *linux.IOUringCqe { 450 var ( 451 cqeErr error 452 cqeFlags uint32 453 retValue int32 454 ) 455 456 switch op := sqe.Opcode; op { 457 case linux.IORING_OP_NOP: 458 // For the NOP operation, we don't do anything special. 459 case linux.IORING_OP_READV: 460 retValue, cqeErr = fd.handleReadv(t, sqe, flags) 461 if cqeErr == io.EOF { 462 // Don't raise EOF as errno, error translation will fail. Short 463 // reads aren't failures. 464 cqeErr = nil 465 } 466 default: // Unsupported operation 467 retValue = -int32(linuxerr.EINVAL.Errno()) 468 } 469 470 if cqeErr != nil { 471 retValue = -int32(kernel.ExtractErrno(cqeErr, -1)) 472 } 473 474 return &linux.IOUringCqe{ 475 UserData: sqe.UserData, 476 Res: retValue, 477 Flags: cqeFlags, 478 } 479 } 480 481 // handleReadv handles IORING_OP_READV. 482 func (fd *FileDescription) handleReadv(t *kernel.Task, sqe *linux.IOUringSqe, flags uint32) (int32, error) { 483 // Check that a file descriptor is valid. 484 if sqe.Fd < 0 { 485 return 0, linuxerr.EBADF 486 } 487 // Currently we don't support any flags for the SQEs. 488 if sqe.Flags != 0 { 489 return 0, linuxerr.EINVAL 490 } 491 // If the file is not seekable then offset must be zero. And currently, we don't support them. 492 if sqe.OffOrAddrOrCmdOp != 0 { 493 return 0, linuxerr.EINVAL 494 } 495 // ioprio should not be set for the READV operation. 496 if sqe.IoPrio != 0 { 497 return 0, linuxerr.EINVAL 498 } 499 500 // AddressSpaceActive is set to true as we are doing this from the task goroutine.And this is a 501 // case as we currently don't support neither IOPOLL nor SQPOLL modes. 502 dst, err := t.IovecsIOSequence(hostarch.Addr(sqe.AddrOrSpliceOff), int(sqe.Len), usermem.IOOpts{ 503 AddressSpaceActive: true, 504 }) 505 if err != nil { 506 return 0, err 507 } 508 file := t.GetFile(sqe.Fd) 509 if file == nil { 510 return 0, linuxerr.EBADF 511 } 512 defer file.DecRef(t) 513 n, err := file.PRead(t, dst, 0, vfs.ReadOptions{}) 514 if err != nil { 515 return 0, err 516 } 517 518 return int32(n), nil 519 } 520 521 // updateCq updates a completion queue by adding a given completion queue entry. 522 func (fd *FileDescription) updateCq(cqes *safemem.BlockSeq, cqe *linux.IOUringCqe, cqTail uint32) error { 523 cqeSize := uint32((*linux.IOUringCqe)(nil).SizeBytes()) 524 if cqes.NumBlocks() == 1 && !cqes.Head().NeedSafecopy() { 525 cqe.MarshalBytes(cqes.Head().ToSlice()[cqTail*cqeSize : (cqTail+1)*cqeSize]) 526 527 return nil 528 } 529 530 buf := make([]byte, cqes.NumBytes()) 531 cqe.MarshalBytes(buf) 532 cp, cperr := safemem.CopySeq(cqes.DropFirst64(uint64(cqTail*cqeSize)), safemem.BlockSeqOf(safemem.BlockFromSafeSlice(buf))) 533 if cp == 0 { 534 return cperr 535 } 536 537 return nil 538 } 539 540 // sqEntriesFile implements memmap.Mappable for SQ entries. 541 // 542 // +stateify savable 543 type sqEntriesFile struct { 544 fr memmap.FileRange 545 } 546 547 // AddMapping implements memmap.Mappable.AddMapping. 548 func (sqemf *sqEntriesFile) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error { 549 return nil 550 } 551 552 // RemoveMapping implements memmap.Mappable.RemoveMapping. 553 func (sqemf *sqEntriesFile) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) { 554 } 555 556 // CopyMapping implements memmap.Mappable.CopyMapping. 557 func (sqemf *sqEntriesFile) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error { 558 return nil 559 } 560 561 // Translate implements memmap.Mappable.Translate. 562 func (sqemf *sqEntriesFile) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) { 563 if required.End > sqemf.fr.Length() { 564 return nil, &memmap.BusError{linuxerr.EFAULT} 565 } 566 567 if source := optional.Intersect(memmap.MappableRange{0, sqemf.fr.Length()}); source.Length() != 0 { 568 return []memmap.Translation{ 569 { 570 Source: source, 571 File: pgalloc.MemoryFileFromContext(ctx), 572 Offset: sqemf.fr.Start + source.Start, 573 Perms: at, 574 }, 575 }, nil 576 } 577 578 return nil, linuxerr.EFAULT 579 } 580 581 // InvalidateUnsavable implements memmap.Mappable.InvalidateUnsavable. 582 func (sqemf *sqEntriesFile) InvalidateUnsavable(ctx context.Context) error { 583 return nil 584 } 585 586 // ringBuffersFile implements memmap.Mappable for SQ and CQ ring buffers. 587 // 588 // +stateify savable 589 type ringsBufferFile struct { 590 fr memmap.FileRange 591 } 592 593 // AddMapping implements memmap.Mappable.AddMapping. 594 func (rbmf *ringsBufferFile) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) error { 595 return nil 596 } 597 598 // RemoveMapping implements memmap.Mappable.RemoveMapping. 599 func (rbmf *ringsBufferFile) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar hostarch.AddrRange, offset uint64, writable bool) { 600 } 601 602 // CopyMapping implements memmap.Mappable.CopyMapping. 603 func (rbmf *ringsBufferFile) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR hostarch.AddrRange, offset uint64, writable bool) error { 604 return nil 605 } 606 607 // Translate implements memmap.Mappable.Translate. 608 func (rbmf *ringsBufferFile) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) { 609 if required.End > rbmf.fr.Length() { 610 return nil, &memmap.BusError{linuxerr.EFAULT} 611 } 612 613 if source := optional.Intersect(memmap.MappableRange{0, rbmf.fr.Length()}); source.Length() != 0 { 614 return []memmap.Translation{ 615 { 616 Source: source, 617 File: pgalloc.MemoryFileFromContext(ctx), 618 Offset: rbmf.fr.Start + source.Start, 619 Perms: at, 620 }, 621 }, nil 622 } 623 624 return nil, linuxerr.EFAULT 625 } 626 627 // InvalidateUnsavable implements memmap.Mappable.InvalidateUnsavable. 628 func (rbmf *ringsBufferFile) InvalidateUnsavable(ctx context.Context) error { 629 return nil 630 }