github.com/ttpreport/gvisor-ligolo@v0.0.0-20240123134145-a858404967ba/pkg/sentry/devices/nvproxy/frontend.go (about)

     1  // Copyright 2023 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package nvproxy
    16  
    17  import (
    18  	"fmt"
    19  	"sync/atomic"
    20  
    21  	"github.com/ttpreport/gvisor-ligolo/pkg/abi/linux"
    22  	"github.com/ttpreport/gvisor-ligolo/pkg/abi/nvgpu"
    23  	"github.com/ttpreport/gvisor-ligolo/pkg/cleanup"
    24  	"github.com/ttpreport/gvisor-ligolo/pkg/context"
    25  	"github.com/ttpreport/gvisor-ligolo/pkg/errors/linuxerr"
    26  	"github.com/ttpreport/gvisor-ligolo/pkg/fdnotifier"
    27  	"github.com/ttpreport/gvisor-ligolo/pkg/hostarch"
    28  	"github.com/ttpreport/gvisor-ligolo/pkg/log"
    29  	"github.com/ttpreport/gvisor-ligolo/pkg/sentry/arch"
    30  	"github.com/ttpreport/gvisor-ligolo/pkg/sentry/kernel"
    31  	"github.com/ttpreport/gvisor-ligolo/pkg/sentry/memmap"
    32  	"github.com/ttpreport/gvisor-ligolo/pkg/sentry/mm"
    33  	"github.com/ttpreport/gvisor-ligolo/pkg/sentry/vfs"
    34  	"github.com/ttpreport/gvisor-ligolo/pkg/usermem"
    35  	"github.com/ttpreport/gvisor-ligolo/pkg/waiter"
    36  	"golang.org/x/sys/unix"
    37  )
    38  
    39  // frontendDevice implements vfs.Device for /dev/nvidia# and /dev/nvidiactl.
    40  //
    41  // +stateify savable
    42  type frontendDevice struct {
    43  	nvp   *nvproxy
    44  	minor uint32
    45  }
    46  
    47  // Open implements vfs.Device.Open.
    48  func (dev *frontendDevice) Open(ctx context.Context, mnt *vfs.Mount, vfsd *vfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
    49  	var hostPath string
    50  	if dev.minor == nvgpu.NV_CONTROL_DEVICE_MINOR {
    51  		hostPath = "/dev/nvidiactl"
    52  	} else {
    53  		hostPath = fmt.Sprintf("/dev/nvidia%d", dev.minor)
    54  	}
    55  	hostFD, err := unix.Openat(-1, hostPath, int((opts.Flags&unix.O_ACCMODE)|unix.O_NOFOLLOW), 0)
    56  	if err != nil {
    57  		ctx.Warningf("nvproxy: failed to open host %s: %v", hostPath, err)
    58  		return nil, err
    59  	}
    60  	fd := &frontendFD{
    61  		nvp:       dev.nvp,
    62  		hostFD:    int32(hostFD),
    63  		isControl: dev.minor == nvgpu.NV_CONTROL_DEVICE_MINOR,
    64  	}
    65  	if err := fd.vfsfd.Init(fd, opts.Flags, mnt, vfsd, &vfs.FileDescriptionOptions{
    66  		UseDentryMetadata: true,
    67  	}); err != nil {
    68  		unix.Close(hostFD)
    69  		return nil, err
    70  	}
    71  	if err := fdnotifier.AddFD(int32(hostFD), &fd.queue); err != nil {
    72  		unix.Close(hostFD)
    73  		return nil, err
    74  	}
    75  	fd.memmapFile.fd = fd
    76  	return &fd.vfsfd, nil
    77  }
    78  
    79  // frontendFD implements vfs.FileDescriptionImpl for /dev/nvidia# and
    80  // /dev/nvidiactl.
    81  //
    82  // frontendFD is not savable; we do not implement save/restore of host GPU
    83  // state.
    84  type frontendFD struct {
    85  	vfsfd vfs.FileDescription
    86  	vfs.FileDescriptionDefaultImpl
    87  	vfs.DentryMetadataFileDescriptionImpl
    88  	vfs.NoLockFD
    89  
    90  	nvp        *nvproxy
    91  	hostFD     int32
    92  	isControl  bool
    93  	memmapFile frontendFDMemmapFile
    94  
    95  	queue waiter.Queue
    96  
    97  	haveMmapContext atomic.Bool
    98  }
    99  
   100  // Release implements vfs.FileDescriptionImpl.Release.
   101  func (fd *frontendFD) Release(context.Context) {
   102  	fdnotifier.RemoveFD(fd.hostFD)
   103  	fd.queue.Notify(waiter.EventHUp)
   104  	unix.Close(int(fd.hostFD))
   105  }
   106  
   107  // EventRegister implements waiter.Waitable.EventRegister.
   108  func (fd *frontendFD) EventRegister(e *waiter.Entry) error {
   109  	fd.queue.EventRegister(e)
   110  	if err := fdnotifier.UpdateFD(fd.hostFD); err != nil {
   111  		fd.queue.EventUnregister(e)
   112  		return err
   113  	}
   114  	return nil
   115  }
   116  
   117  // EventUnregister implements waiter.Waitable.EventUnregister.
   118  func (fd *frontendFD) EventUnregister(e *waiter.Entry) {
   119  	fd.queue.EventUnregister(e)
   120  	if err := fdnotifier.UpdateFD(fd.hostFD); err != nil {
   121  		panic(fmt.Sprint("UpdateFD:", err))
   122  	}
   123  }
   124  
   125  // Readiness implements waiter.Waitable.Readiness.
   126  func (fd *frontendFD) Readiness(mask waiter.EventMask) waiter.EventMask {
   127  	return fdnotifier.NonBlockingPoll(fd.hostFD, mask)
   128  }
   129  
   130  // Epollable implements vfs.FileDescriptionImpl.Epollable.
   131  func (fd *frontendFD) Epollable() bool {
   132  	return true
   133  }
   134  
   135  // Ioctl implements vfs.FileDescriptionImpl.Ioctl.
   136  func (fd *frontendFD) Ioctl(ctx context.Context, uio usermem.IO, sysno uintptr, args arch.SyscallArguments) (uintptr, error) {
   137  	cmd := args[1].Uint()
   138  	nr := linux.IOC_NR(cmd)
   139  	argPtr := args[2].Pointer()
   140  	argSize := linux.IOC_SIZE(cmd)
   141  
   142  	t := kernel.TaskFromContext(ctx)
   143  	if t == nil {
   144  		panic("Ioctl should be called from a task context")
   145  	}
   146  
   147  	fi := frontendIoctlState{
   148  		fd:              fd,
   149  		ctx:             ctx,
   150  		t:               t,
   151  		nr:              nr,
   152  		ioctlParamsAddr: argPtr,
   153  		ioctlParamsSize: argSize,
   154  	}
   155  
   156  	// nr determines the argument type.
   157  	// Don't log nr since it's already visible as the last byte of cmd in
   158  	// strace logging.
   159  	// Implementors:
   160  	// - To map nr to a symbol, look in
   161  	// src/nvidia/arch/nvalloc/unix/include/nv_escape.h,
   162  	// kernel-open/common/inc/nv-ioctl-numbers.h, and
   163  	// kernel-open/common/inc/nv-ioctl-numa.h.
   164  	// - To determine the parameter type, find the implementation in
   165  	// kernel-open/nvidia/nv.c:nvidia_ioctl() or
   166  	// src/nvidia/arch/nvalloc/unix/src/escape.c:RmIoctl().
   167  	// - Add symbol and parameter type definitions to //pkg/abi/nvgpu.
   168  	// - Add filter to seccomp_filters.go.
   169  	// - Add handling below.
   170  	switch nr {
   171  	case
   172  		nvgpu.NV_ESC_CARD_INFO,                     // nv_ioctl_card_info_t
   173  		nvgpu.NV_ESC_CHECK_VERSION_STR,             // nv_rm_api_version_t
   174  		nvgpu.NV_ESC_SYS_PARAMS,                    // nv_ioctl_sys_params_t
   175  		nvgpu.NV_ESC_RM_DUP_OBJECT,                 // NVOS55_PARAMETERS
   176  		nvgpu.NV_ESC_RM_SHARE,                      // NVOS57_PARAMETERS
   177  		nvgpu.NV_ESC_RM_UNMAP_MEMORY,               // NVOS34_PARAMETERS
   178  		nvgpu.NV_ESC_RM_UPDATE_DEVICE_MAPPING_INFO: // NVOS56_PARAMETERS
   179  		return frontendIoctlSimple(&fi)
   180  	case nvgpu.NV_ESC_REGISTER_FD:
   181  		return frontendRegisterFD(&fi)
   182  	case nvgpu.NV_ESC_ALLOC_OS_EVENT:
   183  		return rmAllocOSEvent(&fi)
   184  	case nvgpu.NV_ESC_FREE_OS_EVENT:
   185  		return rmFreeOSEvent(&fi)
   186  	case nvgpu.NV_ESC_NUMA_INFO:
   187  		// The CPU topology seen by the host driver differs from the CPU
   188  		// topology presented by the sentry to the application, so reject this
   189  		// ioctl; doing so is non-fatal.
   190  		ctx.Debugf("nvproxy: ignoring NV_ESC_NUMA_INFO")
   191  		return 0, linuxerr.EINVAL
   192  	case nvgpu.NV_ESC_RM_ALLOC_MEMORY:
   193  		return rmAllocMemory(&fi)
   194  	case nvgpu.NV_ESC_RM_FREE:
   195  		return rmFree(&fi)
   196  	case nvgpu.NV_ESC_RM_CONTROL:
   197  		return rmControl(&fi)
   198  	case nvgpu.NV_ESC_RM_ALLOC:
   199  		return rmAlloc(&fi)
   200  	case nvgpu.NV_ESC_RM_VID_HEAP_CONTROL:
   201  		return rmVidHeapControl(&fi)
   202  	case nvgpu.NV_ESC_RM_MAP_MEMORY:
   203  		return rmMapMemory(&fi)
   204  	default:
   205  		ctx.Warningf("nvproxy: unknown frontend ioctl %d == %#x (argSize=%d, cmd=%#x)", nr, nr, argSize, cmd)
   206  		return 0, linuxerr.EINVAL
   207  	}
   208  }
   209  
   210  func frontendIoctlCmd(nr, argSize uint32) uintptr {
   211  	return uintptr(linux.IOWR(nvgpu.NV_IOCTL_MAGIC, nr, argSize))
   212  }
   213  
   214  // frontendIoctlState holds the state of a call to frontendFD.Ioctl().
   215  type frontendIoctlState struct {
   216  	fd              *frontendFD
   217  	ctx             context.Context
   218  	t               *kernel.Task
   219  	nr              uint32
   220  	ioctlParamsAddr hostarch.Addr
   221  	ioctlParamsSize uint32
   222  }
   223  
   224  // frontendIoctlSimple implements a frontend ioctl whose parameters don't
   225  // contain any pointers requiring translation, file descriptors, or special
   226  // cases or effects, and consequently don't need to be typed by the sentry.
   227  func frontendIoctlSimple(fi *frontendIoctlState) (uintptr, error) {
   228  	if fi.ioctlParamsSize == 0 {
   229  		return frontendIoctlInvoke[byte](fi, nil)
   230  	}
   231  
   232  	ioctlParams := make([]byte, fi.ioctlParamsSize)
   233  	if _, err := fi.t.CopyInBytes(fi.ioctlParamsAddr, ioctlParams); err != nil {
   234  		return 0, err
   235  	}
   236  	n, err := frontendIoctlInvoke(fi, &ioctlParams[0])
   237  	if err != nil {
   238  		return n, err
   239  	}
   240  	if _, err := fi.t.CopyOutBytes(fi.ioctlParamsAddr, ioctlParams); err != nil {
   241  		return n, err
   242  	}
   243  	return n, nil
   244  }
   245  
   246  func frontendRegisterFD(fi *frontendIoctlState) (uintptr, error) {
   247  	var ioctlParams nvgpu.IoctlRegisterFD
   248  	if fi.ioctlParamsSize != nvgpu.SizeofIoctlRegisterFD {
   249  		return 0, linuxerr.EINVAL
   250  	}
   251  	if _, err := ioctlParams.CopyIn(fi.t, fi.ioctlParamsAddr); err != nil {
   252  		return 0, err
   253  	}
   254  	ctlFileGeneric, _ := fi.t.FDTable().Get(ioctlParams.CtlFD)
   255  	if ctlFileGeneric == nil {
   256  		return 0, linuxerr.EINVAL
   257  	}
   258  	defer ctlFileGeneric.DecRef(fi.ctx)
   259  	ctlFile, ok := ctlFileGeneric.Impl().(*frontendFD)
   260  	if !ok {
   261  		return 0, linuxerr.EINVAL
   262  	}
   263  	sentryIoctlParams := nvgpu.IoctlRegisterFD{
   264  		CtlFD: ctlFile.hostFD,
   265  	}
   266  	// The returned ctl_fd can't change, so skip copying out.
   267  	return frontendIoctlInvoke(fi, &sentryIoctlParams)
   268  }
   269  
   270  func rmAllocOSEvent(fi *frontendIoctlState) (uintptr, error) {
   271  	var ioctlParams nvgpu.IoctlAllocOSEvent
   272  	if fi.ioctlParamsSize != nvgpu.SizeofIoctlAllocOSEvent {
   273  		return 0, linuxerr.EINVAL
   274  	}
   275  	if _, err := ioctlParams.CopyIn(fi.t, fi.ioctlParamsAddr); err != nil {
   276  		return 0, err
   277  	}
   278  	eventFileGeneric, _ := fi.t.FDTable().Get(int32(ioctlParams.FD))
   279  	if eventFileGeneric == nil {
   280  		return 0, linuxerr.EINVAL
   281  	}
   282  	defer eventFileGeneric.DecRef(fi.ctx)
   283  	eventFile, ok := eventFileGeneric.Impl().(*frontendFD)
   284  	if !ok {
   285  		return 0, linuxerr.EINVAL
   286  	}
   287  	sentryIoctlParams := ioctlParams
   288  	sentryIoctlParams.FD = uint32(eventFile.hostFD)
   289  
   290  	n, err := frontendIoctlInvoke(fi, &sentryIoctlParams)
   291  	if err != nil {
   292  		return n, err
   293  	}
   294  
   295  	outIoctlParams := sentryIoctlParams
   296  	outIoctlParams.FD = ioctlParams.FD
   297  	if _, err := outIoctlParams.CopyOut(fi.t, fi.ioctlParamsAddr); err != nil {
   298  		return n, err
   299  	}
   300  
   301  	return n, nil
   302  }
   303  
   304  func rmFreeOSEvent(fi *frontendIoctlState) (uintptr, error) {
   305  	var ioctlParams nvgpu.IoctlFreeOSEvent
   306  	if fi.ioctlParamsSize != nvgpu.SizeofIoctlFreeOSEvent {
   307  		return 0, linuxerr.EINVAL
   308  	}
   309  	if _, err := ioctlParams.CopyIn(fi.t, fi.ioctlParamsAddr); err != nil {
   310  		return 0, err
   311  	}
   312  	eventFileGeneric, _ := fi.t.FDTable().Get(int32(ioctlParams.FD))
   313  	if eventFileGeneric == nil {
   314  		return 0, linuxerr.EINVAL
   315  	}
   316  	defer eventFileGeneric.DecRef(fi.ctx)
   317  	eventFile, ok := eventFileGeneric.Impl().(*frontendFD)
   318  	if !ok {
   319  		return 0, linuxerr.EINVAL
   320  	}
   321  	sentryIoctlParams := ioctlParams
   322  	sentryIoctlParams.FD = uint32(eventFile.hostFD)
   323  
   324  	n, err := frontendIoctlInvoke(fi, &sentryIoctlParams)
   325  	if err != nil {
   326  		return n, err
   327  	}
   328  
   329  	outIoctlParams := sentryIoctlParams
   330  	outIoctlParams.FD = ioctlParams.FD
   331  	if _, err := outIoctlParams.CopyOut(fi.t, fi.ioctlParamsAddr); err != nil {
   332  		return n, err
   333  	}
   334  
   335  	return n, nil
   336  }
   337  
   338  func rmAllocMemory(fi *frontendIoctlState) (uintptr, error) {
   339  	var ioctlParams nvgpu.IoctlNVOS02ParametersWithFD
   340  	if fi.ioctlParamsSize != nvgpu.SizeofIoctlNVOS02ParametersWithFD {
   341  		return 0, linuxerr.EINVAL
   342  	}
   343  	if _, err := ioctlParams.CopyIn(fi.t, fi.ioctlParamsAddr); err != nil {
   344  		return 0, err
   345  	}
   346  
   347  	if log.IsLogging(log.Debug) {
   348  		fi.ctx.Debugf("nvproxy: NV_ESC_RM_ALLOC_MEMORY class %#08x", ioctlParams.Params.HClass)
   349  	}
   350  	// See src/nvidia/arch/nvalloc/unix/src/escape.c:RmIoctl() and
   351  	// src/nvidia/interface/deprecated/rmapi_deprecated_allocmemory.c:rmAllocMemoryTable
   352  	// for implementation.
   353  	switch ioctlParams.Params.HClass {
   354  	case nvgpu.NV01_MEMORY_SYSTEM_OS_DESCRIPTOR:
   355  		return rmAllocOSDescriptor(fi, &ioctlParams)
   356  	default:
   357  		fi.ctx.Warningf("nvproxy: unknown NV_ESC_RM_ALLOC_MEMORY class %#08x", ioctlParams.Params.HClass)
   358  		return 0, linuxerr.EINVAL
   359  	}
   360  }
   361  
   362  func rmAllocOSDescriptor(fi *frontendIoctlState, ioctlParams *nvgpu.IoctlNVOS02ParametersWithFD) (uintptr, error) {
   363  	// Compare src/nvidia/arch/nvalloc/unix/src/escape.c:RmAllocOsDescriptor()
   364  	// => RmCreateOsDescriptor().
   365  	failWithStatus := func(status uint32) error {
   366  		outIoctlParams := *ioctlParams
   367  		outIoctlParams.Params.Status = status
   368  		_, err := outIoctlParams.CopyOut(fi.t, fi.ioctlParamsAddr)
   369  		return err
   370  	}
   371  	appAddr := addrFromP64(ioctlParams.Params.PMemory)
   372  	if !appAddr.IsPageAligned() {
   373  		return 0, failWithStatus(nvgpu.NV_ERR_NOT_SUPPORTED)
   374  	}
   375  	arLen := ioctlParams.Params.Limit + 1
   376  	if arLen == 0 { // integer overflow
   377  		return 0, failWithStatus(nvgpu.NV_ERR_INVALID_LIMIT)
   378  	}
   379  	var ok bool
   380  	arLen, ok = hostarch.PageRoundUp(arLen)
   381  	if !ok {
   382  		return 0, failWithStatus(nvgpu.NV_ERR_INVALID_ADDRESS)
   383  	}
   384  	appAR, ok := appAddr.ToRange(arLen)
   385  	if !ok {
   386  		return 0, failWithStatus(nvgpu.NV_ERR_INVALID_ADDRESS)
   387  	}
   388  
   389  	// The host driver will collect pages from our address space starting at
   390  	// PMemory, so we must assemble a contiguous mapping equivalent to the
   391  	// application's.
   392  	at := hostarch.Read
   393  	if ((ioctlParams.Params.Flags >> 21) & 0x1) == 0 /* NVOS02_FLAGS_ALLOC_USER_READ_ONLY_NO */ {
   394  		at.Write = true
   395  	}
   396  	// Reserve a range in our address space.
   397  	m, _, errno := unix.RawSyscall6(unix.SYS_MMAP, 0 /* addr */, uintptr(arLen), unix.PROT_NONE, unix.MAP_PRIVATE|unix.MAP_ANONYMOUS, ^uintptr(0) /* fd */, 0 /* offset */)
   398  	if errno != 0 {
   399  		return 0, errno
   400  	}
   401  	cu := cleanup.Make(func() {
   402  		unix.RawSyscall(unix.SYS_MUNMAP, m, uintptr(arLen), 0)
   403  	})
   404  	defer cu.Clean()
   405  	// Mirror application mappings into the reserved range.
   406  	prs, err := fi.t.MemoryManager().Pin(fi.ctx, appAR, at, false /* ignorePermissions */)
   407  	cu.Add(func() {
   408  		mm.Unpin(prs)
   409  	})
   410  	if err != nil {
   411  		return 0, err
   412  	}
   413  	sentryAddr := uintptr(m)
   414  	for _, pr := range prs {
   415  		ims, err := pr.File.MapInternal(memmap.FileRange{pr.Offset, pr.Offset + uint64(pr.Source.Length())}, at)
   416  		if err != nil {
   417  			return 0, err
   418  		}
   419  		for !ims.IsEmpty() {
   420  			im := ims.Head()
   421  			if _, _, errno := unix.RawSyscall6(unix.SYS_MREMAP, im.Addr(), 0 /* old_size */, uintptr(im.Len()), linux.MREMAP_MAYMOVE|linux.MREMAP_FIXED, sentryAddr, 0); errno != 0 {
   422  				return 0, errno
   423  			}
   424  			sentryAddr += uintptr(im.Len())
   425  			ims = ims.Tail()
   426  		}
   427  	}
   428  	sentryIoctlParams := *ioctlParams
   429  	sentryIoctlParams.Params.PMemory = nvgpu.P64(uint64(m))
   430  	// NV01_MEMORY_SYSTEM_OS_DESCRIPTOR shouldn't use ioctlParams.FD; clobber
   431  	// it to be sure.
   432  	sentryIoctlParams.FD = -1
   433  
   434  	fi.fd.nvp.objsMu.Lock()
   435  	n, err := frontendIoctlInvoke(fi, &sentryIoctlParams)
   436  	if err != nil {
   437  		fi.fd.nvp.objsMu.Unlock()
   438  		return n, err
   439  	}
   440  	// Transfer ownership of pinned pages to an osDescMem object, to be
   441  	// unpinned when the driver OsDescMem is freed.
   442  	o := &osDescMem{
   443  		pinnedRanges: prs,
   444  	}
   445  	o.object.init(o)
   446  	fi.fd.nvp.objsLive[sentryIoctlParams.Params.HObjectNew] = &o.object
   447  	fi.fd.nvp.objsMu.Unlock()
   448  	cu.Release()
   449  	fi.ctx.Infof("nvproxy: pinned pages for OS descriptor with handle %#x", sentryIoctlParams.Params.HObjectNew)
   450  	// Unmap the reserved range, which is no longer required.
   451  	unix.RawSyscall(unix.SYS_MUNMAP, m, uintptr(arLen), 0)
   452  
   453  	outIoctlParams := sentryIoctlParams
   454  	outIoctlParams.Params.PMemory = ioctlParams.Params.PMemory
   455  	outIoctlParams.FD = ioctlParams.FD
   456  	if _, err := outIoctlParams.CopyOut(fi.t, fi.ioctlParamsAddr); err != nil {
   457  		return n, err
   458  	}
   459  
   460  	return n, nil
   461  }
   462  
   463  func rmFree(fi *frontendIoctlState) (uintptr, error) {
   464  	var ioctlParams nvgpu.NVOS00Parameters
   465  	if fi.ioctlParamsSize != nvgpu.SizeofNVOS00Parameters {
   466  		return 0, linuxerr.EINVAL
   467  	}
   468  	if _, err := ioctlParams.CopyIn(fi.t, fi.ioctlParamsAddr); err != nil {
   469  		return 0, err
   470  	}
   471  
   472  	fi.fd.nvp.objsMu.Lock()
   473  	n, err := frontendIoctlInvoke(fi, &ioctlParams)
   474  	if err != nil {
   475  		fi.fd.nvp.objsMu.Unlock()
   476  		return n, err
   477  	}
   478  	o, ok := fi.fd.nvp.objsLive[ioctlParams.HObjectOld]
   479  	if ok {
   480  		delete(fi.fd.nvp.objsLive, ioctlParams.HObjectOld)
   481  	}
   482  	fi.fd.nvp.objsMu.Unlock()
   483  	if ok {
   484  		o.Release(fi.ctx)
   485  	}
   486  
   487  	if _, err := ioctlParams.CopyOut(fi.t, fi.ioctlParamsAddr); err != nil {
   488  		return n, err
   489  	}
   490  	return n, nil
   491  }
   492  
   493  func rmControl(fi *frontendIoctlState) (uintptr, error) {
   494  	var ioctlParams nvgpu.NVOS54Parameters
   495  	if fi.ioctlParamsSize != nvgpu.SizeofNVOS54Parameters {
   496  		return 0, linuxerr.EINVAL
   497  	}
   498  	if _, err := ioctlParams.CopyIn(fi.t, fi.ioctlParamsAddr); err != nil {
   499  		return 0, err
   500  	}
   501  
   502  	// Cmd determines the type of Params.
   503  	if log.IsLogging(log.Debug) {
   504  		fi.ctx.Debugf("nvproxy: control command %#x", ioctlParams.Cmd)
   505  	}
   506  	if ioctlParams.Cmd&nvgpu.RM_GSS_LEGACY_MASK != 0 {
   507  		// This is a "legacy GSS control" that is implemented by the GPU System
   508  		// Processor (GSP). Conseqeuently, its parameters cannot reasonably
   509  		// contain application pointers, and the control is in any case
   510  		// undocumented.
   511  		// See
   512  		// src/nvidia/src/kernel/rmapi/entry_points.c:_nv04ControlWithSecInfo()
   513  		// =>
   514  		// src/nvidia/interface/deprecated/rmapi_deprecated_control.c:RmDeprecatedGetControlHandler()
   515  		// =>
   516  		// src/nvidia/interface/deprecated/rmapi_gss_legacy_control.c:RmGssLegacyRpcCmd().
   517  		return rmControlSimple(fi, &ioctlParams)
   518  	}
   519  	// Implementors:
   520  	// - Top two bytes of Cmd specifies class; third byte specifies category;
   521  	// fourth byte specifies "message ID" (command within class/category).
   522  	//   e.g. 0x800288:
   523  	//   - Class 0x0080 => look in
   524  	//   src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h for categories.
   525  	//   - Category 0x02 => NV0080_CTRL_GPU => look in
   526  	//   src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h for
   527  	//   `#define NV0080_CTRL_CMD_GPU_QUERY_SW_STATE_PERSISTENCE (0x800288)`
   528  	//   and accompanying documentation, parameter type.
   529  	// - If this fails, or to find implementation, grep for `methodId=.*0x<Cmd
   530  	// in lowercase hex without leading 0s>` to find entry in g_*_nvoc.c;
   531  	// implementing function is is "pFunc".
   532  	// - Add symbol definition to //pkg/abi/nvgpu. Parameter type definition is
   533  	// only required for non-simple commands.
   534  	// - Add handling below.
   535  	switch ioctlParams.Cmd {
   536  	case
   537  		nvgpu.NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE,
   538  		nvgpu.NV0000_CTRL_CMD_CLIENT_SET_INHERITED_SHARE_POLICY,
   539  		nvgpu.NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS,
   540  		nvgpu.NV0000_CTRL_CMD_GPU_GET_ID_INFO,
   541  		nvgpu.NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2,
   542  		nvgpu.NV0000_CTRL_CMD_GPU_GET_PROBED_IDS,
   543  		nvgpu.NV0000_CTRL_CMD_GPU_ATTACH_IDS,
   544  		nvgpu.NV0000_CTRL_CMD_GPU_DETACH_IDS,
   545  		nvgpu.NV0000_CTRL_CMD_GPU_GET_PCI_INFO,
   546  		nvgpu.NV0000_CTRL_CMD_GPU_QUERY_DRAIN_STATE,
   547  		nvgpu.NV0000_CTRL_CMD_GPU_GET_MEMOP_ENABLE,
   548  		nvgpu.NV0000_CTRL_CMD_SYNC_GPU_BOOST_GROUP_INFO,
   549  		nvgpu.NV0000_CTRL_CMD_SYSTEM_GET_FABRIC_STATUS,
   550  		nvgpu.NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_MATRIX,
   551  		nvgpu.NV0080_CTRL_CMD_FB_GET_CAPS_V2,
   552  		nvgpu.NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES,
   553  		nvgpu.NV0080_CTRL_CMD_GPU_QUERY_SW_STATE_PERSISTENCE,
   554  		nvgpu.NV0080_CTRL_CMD_GPU_GET_VIRTUALIZATION_MODE,
   555  		0x80028b, // unknown, paramsSize == 1
   556  		nvgpu.NV0080_CTRL_CMD_GPU_GET_CLASSLIST_V2,
   557  		nvgpu.NV0080_CTRL_CMD_HOST_GET_CAPS_V2,
   558  		nvgpu.NV2080_CTRL_CMD_BUS_GET_PCI_INFO,
   559  		nvgpu.NV2080_CTRL_CMD_BUS_GET_PCI_BAR_INFO,
   560  		nvgpu.NV2080_CTRL_CMD_BUS_GET_INFO_V2,
   561  		nvgpu.NV2080_CTRL_CMD_BUS_GET_PCIE_SUPPORTED_GPU_ATOMICS,
   562  		nvgpu.NV2080_CTRL_CMD_CE_GET_ALL_CAPS,
   563  		nvgpu.NV2080_CTRL_CMD_FB_GET_INFO_V2,
   564  		nvgpu.NV2080_CTRL_CMD_GPU_GET_INFO_V2,
   565  		nvgpu.NV2080_CTRL_CMD_GPU_GET_NAME_STRING,
   566  		nvgpu.NV2080_CTRL_CMD_GPU_GET_SHORT_NAME_STRING,
   567  		nvgpu.NV2080_CTRL_CMD_GPU_GET_SIMULATION_INFO,
   568  		nvgpu.NV2080_CTRL_CMD_GPU_QUERY_ECC_STATUS,
   569  		nvgpu.NV2080_CTRL_CMD_GPU_QUERY_COMPUTE_MODE_RULES,
   570  		nvgpu.NV2080_CTRL_CMD_GPU_ACQUIRE_COMPUTE_MODE_RESERVATION,
   571  		nvgpu.NV2080_CTRL_CMD_GPU_RELEASE_COMPUTE_MODE_RESERVATION,
   572  		nvgpu.NV2080_CTRL_CMD_GPU_GET_GID_INFO,
   573  		nvgpu.NV2080_CTRL_CMD_GPU_GET_ENGINES_V2,
   574  		nvgpu.NV2080_CTRL_CMD_GPU_GET_ACTIVE_PARTITION_IDS,
   575  		nvgpu.NV2080_CTRL_CMD_GPU_GET_COMPUTE_POLICY_CONFIG,
   576  		nvgpu.NV2080_CTRL_CMD_GET_GPU_FABRIC_PROBE_INFO,
   577  		nvgpu.NV2080_CTRL_CMD_GR_SET_CTXSW_PREEMPTION_MODE,
   578  		nvgpu.NV2080_CTRL_CMD_GR_GET_CTX_BUFFER_SIZE,
   579  		nvgpu.NV2080_CTRL_CMD_GR_GET_GLOBAL_SM_ORDER,
   580  		nvgpu.NV2080_CTRL_CMD_GR_GET_CAPS_V2,
   581  		nvgpu.NV2080_CTRL_CMD_GR_GET_GPC_MASK,
   582  		nvgpu.NV2080_CTRL_CMD_GR_GET_TPC_MASK,
   583  		nvgpu.NV2080_CTRL_CMD_GSP_GET_FEATURES,
   584  		nvgpu.NV2080_CTRL_CMD_MC_GET_ARCH_INFO,
   585  		nvgpu.NV2080_CTRL_CMD_MC_SERVICE_INTERRUPTS,
   586  		nvgpu.NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS,
   587  		nvgpu.NV2080_CTRL_CMD_PERF_BOOST,
   588  		nvgpu.NV2080_CTRL_CMD_RC_GET_WATCHDOG_INFO,
   589  		nvgpu.NV2080_CTRL_CMD_RC_RELEASE_WATCHDOG_REQUESTS,
   590  		nvgpu.NV2080_CTRL_CMD_RC_SOFT_DISABLE_WATCHDOG,
   591  		nvgpu.NV2080_CTRL_CMD_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO,
   592  		nvgpu.NV503C_CTRL_CMD_REGISTER_VA_SPACE,
   593  		nvgpu.NV503C_CTRL_CMD_REGISTER_VIDMEM,
   594  		nvgpu.NV503C_CTRL_CMD_UNREGISTER_VIDMEM,
   595  		nvgpu.NV83DE_CTRL_CMD_DEBUG_SET_EXCEPTION_MASK,
   596  		nvgpu.NV83DE_CTRL_CMD_DEBUG_READ_ALL_SM_ERROR_STATES,
   597  		nvgpu.NV83DE_CTRL_CMD_DEBUG_CLEAR_ALL_SM_ERROR_STATES,
   598  		nvgpu.NV906F_CTRL_CMD_RESET_CHANNEL,
   599  		nvgpu.NV90E6_CTRL_CMD_MASTER_GET_ERROR_INTR_OFFSET_MASK,
   600  		nvgpu.NV90E6_CTRL_CMD_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK,
   601  		nvgpu.NVC36F_CTRL_GET_CLASS_ENGINEID,
   602  		nvgpu.NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN,
   603  		nvgpu.NVA06C_CTRL_CMD_GPFIFO_SCHEDULE,
   604  		nvgpu.NVA06C_CTRL_CMD_SET_TIMESLICE,
   605  		nvgpu.NVA06C_CTRL_CMD_PREEMPT:
   606  		return rmControlSimple(fi, &ioctlParams)
   607  
   608  	case nvgpu.NV0000_CTRL_CMD_SYSTEM_GET_BUILD_VERSION:
   609  		return ctrlClientSystemGetBuildVersion(fi, &ioctlParams)
   610  
   611  	case nvgpu.NV0080_CTRL_CMD_FIFO_GET_CHANNELLIST:
   612  		return ctrlDevFIFOGetChannelList(fi, &ioctlParams)
   613  
   614  	case nvgpu.NV2080_CTRL_CMD_FIFO_DISABLE_CHANNELS:
   615  		return ctrlSubdevFIFODisableChannels(fi, &ioctlParams)
   616  
   617  	case nvgpu.NV2080_CTRL_CMD_GR_GET_INFO:
   618  		return ctrlSubdevGRGetInfo(fi, &ioctlParams)
   619  
   620  	default:
   621  		fi.ctx.Warningf("nvproxy: unknown control command %#x (paramsSize=%d)", ioctlParams.Cmd, ioctlParams.ParamsSize)
   622  		return 0, linuxerr.EINVAL
   623  	}
   624  }
   625  
   626  func rmControlSimple(fi *frontendIoctlState, ioctlParams *nvgpu.NVOS54Parameters) (uintptr, error) {
   627  	if ioctlParams.ParamsSize == 0 {
   628  		if ioctlParams.Params != 0 {
   629  			return 0, linuxerr.EINVAL
   630  		}
   631  		return rmControlInvoke[byte](fi, ioctlParams, nil)
   632  	}
   633  	if ioctlParams.Params == 0 {
   634  		return 0, linuxerr.EINVAL
   635  	}
   636  
   637  	ctrlParams := make([]byte, ioctlParams.ParamsSize)
   638  	if _, err := fi.t.CopyInBytes(addrFromP64(ioctlParams.Params), ctrlParams); err != nil {
   639  		return 0, err
   640  	}
   641  	n, err := rmControlInvoke(fi, ioctlParams, &ctrlParams[0])
   642  	if err != nil {
   643  		return n, err
   644  	}
   645  	if _, err := fi.t.CopyOutBytes(addrFromP64(ioctlParams.Params), ctrlParams); err != nil {
   646  		return n, err
   647  	}
   648  	return n, nil
   649  }
   650  
   651  func ctrlClientSystemGetBuildVersion(fi *frontendIoctlState, ioctlParams *nvgpu.NVOS54Parameters) (uintptr, error) {
   652  	var ctrlParams nvgpu.NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_PARAMS
   653  	if ctrlParams.SizeBytes() != int(ioctlParams.ParamsSize) {
   654  		return 0, linuxerr.EINVAL
   655  	}
   656  	if _, err := ctrlParams.CopyIn(fi.t, addrFromP64(ioctlParams.Params)); err != nil {
   657  		return 0, err
   658  	}
   659  
   660  	if ctrlParams.PDriverVersionBuffer == 0 || ctrlParams.PVersionBuffer == 0 || ctrlParams.PTitleBuffer == 0 {
   661  		// No strings are written if any are null. See
   662  		// src/nvidia/interface/deprecated/rmapi_deprecated_control.c:V2_CONVERTER(_NV0000_CTRL_CMD_SYSTEM_GET_BUILD_VERSION).
   663  		return ctrlClientSystemGetBuildVersionInvoke(fi, ioctlParams, &ctrlParams, nil, nil, nil)
   664  	}
   665  
   666  	// Need to buffer strings for copy-out.
   667  	if ctrlParams.SizeOfStrings == 0 {
   668  		return 0, linuxerr.EINVAL
   669  	}
   670  	driverVersionBuf := make([]byte, ctrlParams.SizeOfStrings)
   671  	versionBuf := make([]byte, ctrlParams.SizeOfStrings)
   672  	titleBuf := make([]byte, ctrlParams.SizeOfStrings)
   673  	n, err := ctrlClientSystemGetBuildVersionInvoke(fi, ioctlParams, &ctrlParams, &driverVersionBuf[0], &versionBuf[0], &titleBuf[0])
   674  	if err != nil {
   675  		return n, err
   676  	}
   677  	if _, err := fi.t.CopyOutBytes(addrFromP64(ctrlParams.PDriverVersionBuffer), driverVersionBuf); err != nil {
   678  		return n, err
   679  	}
   680  	if _, err := fi.t.CopyOutBytes(addrFromP64(ctrlParams.PVersionBuffer), versionBuf); err != nil {
   681  		return n, err
   682  	}
   683  	if _, err := fi.t.CopyOutBytes(addrFromP64(ctrlParams.PTitleBuffer), titleBuf); err != nil {
   684  		return n, err
   685  	}
   686  	return n, nil
   687  }
   688  
   689  func ctrlSubdevFIFODisableChannels(fi *frontendIoctlState, ioctlParams *nvgpu.NVOS54Parameters) (uintptr, error) {
   690  	var ctrlParams nvgpu.NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS
   691  	if ctrlParams.SizeBytes() != int(ioctlParams.ParamsSize) {
   692  		return 0, linuxerr.EINVAL
   693  	}
   694  	if _, err := ctrlParams.CopyIn(fi.t, addrFromP64(ioctlParams.Params)); err != nil {
   695  		return 0, err
   696  	}
   697  	// This pointer must be NULL; see
   698  	// src/nvidia/src/kernel/gpu/fifo/kernel_fifo_ctrl.c:subdeviceCtrlCmdFifoDisableChannels_IMPL().
   699  	// Consequently, we don't need to translate it, but we do want to ensure
   700  	// that it actually is NULL.
   701  	if ctrlParams.PRunlistPreemptEvent != 0 {
   702  		return 0, linuxerr.EINVAL
   703  	}
   704  	n, err := rmControlInvoke(fi, ioctlParams, &ctrlParams)
   705  	if err != nil {
   706  		return n, err
   707  	}
   708  	if _, err := ctrlParams.CopyOut(fi.t, addrFromP64(ioctlParams.Params)); err != nil {
   709  		return n, err
   710  	}
   711  	return n, nil
   712  }
   713  
   714  func rmAlloc(fi *frontendIoctlState) (uintptr, error) {
   715  	// Copy in parameters and convert to NVOS64Parameters.
   716  	var (
   717  		ioctlParams nvgpu.NVOS64Parameters
   718  		isNVOS64    bool
   719  	)
   720  	switch fi.ioctlParamsSize {
   721  	case nvgpu.SizeofNVOS21Parameters:
   722  		var buf nvgpu.NVOS21Parameters
   723  		if _, err := buf.CopyIn(fi.t, fi.ioctlParamsAddr); err != nil {
   724  			return 0, err
   725  		}
   726  		ioctlParams = nvgpu.NVOS64Parameters{
   727  			HRoot:         buf.HRoot,
   728  			HObjectParent: buf.HObjectParent,
   729  			HObjectNew:    buf.HObjectNew,
   730  			HClass:        buf.HClass,
   731  			PAllocParms:   buf.PAllocParms,
   732  			Status:        buf.Status,
   733  		}
   734  	case nvgpu.SizeofNVOS64Parameters:
   735  		if _, err := ioctlParams.CopyIn(fi.t, fi.ioctlParamsAddr); err != nil {
   736  			return 0, err
   737  		}
   738  		isNVOS64 = true
   739  	default:
   740  		return 0, linuxerr.EINVAL
   741  	}
   742  
   743  	// hClass determines the type of pAllocParms.
   744  	if log.IsLogging(log.Debug) {
   745  		fi.ctx.Debugf("nvproxy: allocation class %#08x", ioctlParams.HClass)
   746  	}
   747  	// Implementors:
   748  	// - To map hClass to a symbol, look in
   749  	// src/nvidia/generated/g_allclasses.h.
   750  	// - See src/nvidia/src/kernel/rmapi/resource_list.h for table mapping class
   751  	// ("External Class") to the type of pAllocParms ("Alloc Param Info") and
   752  	// the class whose constructor interprets it ("Internal Class").
   753  	// - Add symbol and parameter type definitions to //pkg/abi/nvgpu.
   754  	// - Add handling below.
   755  	switch ioctlParams.HClass {
   756  	case nvgpu.NV01_ROOT, nvgpu.NV01_ROOT_NON_PRIV, nvgpu.NV01_ROOT_CLIENT:
   757  		return rmAllocSimple[nvgpu.Handle](fi, &ioctlParams, isNVOS64)
   758  	case nvgpu.NV01_EVENT_OS_EVENT:
   759  		return rmAllocEventOSEvent(fi, &ioctlParams, isNVOS64)
   760  	case nvgpu.NV01_DEVICE_0:
   761  		return rmAllocSimple[nvgpu.NV0080_ALLOC_PARAMETERS](fi, &ioctlParams, isNVOS64)
   762  	case nvgpu.NV20_SUBDEVICE_0:
   763  		return rmAllocSimple[nvgpu.NV2080_ALLOC_PARAMETERS](fi, &ioctlParams, isNVOS64)
   764  	case nvgpu.NV50_THIRD_PARTY_P2P:
   765  		return rmAllocSimple[nvgpu.NV503C_ALLOC_PARAMETERS](fi, &ioctlParams, isNVOS64)
   766  	case nvgpu.GT200_DEBUGGER:
   767  		return rmAllocSimple[nvgpu.NV83DE_ALLOC_PARAMETERS](fi, &ioctlParams, isNVOS64)
   768  	case nvgpu.FERMI_CONTEXT_SHARE_A:
   769  		return rmAllocSimple[nvgpu.NV_CTXSHARE_ALLOCATION_PARAMETERS](fi, &ioctlParams, isNVOS64)
   770  	case nvgpu.FERMI_VASPACE_A:
   771  		return rmAllocSimple[nvgpu.NV_VASPACE_ALLOCATION_PARAMETERS](fi, &ioctlParams, isNVOS64)
   772  	case nvgpu.KEPLER_CHANNEL_GROUP_A:
   773  		return rmAllocSimple[nvgpu.NV_CHANNEL_GROUP_ALLOCATION_PARAMETERS](fi, &ioctlParams, isNVOS64)
   774  	case nvgpu.VOLTA_CHANNEL_GPFIFO_A, nvgpu.TURING_CHANNEL_GPFIFO_A, nvgpu.AMPERE_CHANNEL_GPFIFO_A:
   775  		return rmAllocSimple[nvgpu.NV_CHANNEL_ALLOC_PARAMS](fi, &ioctlParams, isNVOS64)
   776  	case nvgpu.VOLTA_DMA_COPY_A, nvgpu.TURING_DMA_COPY_A, nvgpu.AMPERE_DMA_COPY_A, nvgpu.AMPERE_DMA_COPY_B, nvgpu.HOPPER_DMA_COPY_A:
   777  		return rmAllocSimple[nvgpu.NVB0B5_ALLOCATION_PARAMETERS](fi, &ioctlParams, isNVOS64)
   778  	case nvgpu.VOLTA_COMPUTE_A, nvgpu.TURING_COMPUTE_A, nvgpu.AMPERE_COMPUTE_A, nvgpu.AMPERE_COMPUTE_B, nvgpu.ADA_COMPUTE_A, nvgpu.HOPPER_COMPUTE_A:
   779  		return rmAllocSimple[nvgpu.NV_GR_ALLOCATION_PARAMETERS](fi, &ioctlParams, isNVOS64)
   780  	case nvgpu.HOPPER_USERMODE_A:
   781  		return rmAllocSimple[nvgpu.NV_HOPPER_USERMODE_A_PARAMS](fi, &ioctlParams, isNVOS64)
   782  	case nvgpu.GF100_SUBDEVICE_MASTER, nvgpu.VOLTA_USERMODE_A, nvgpu.TURING_USERMODE_A:
   783  		return rmAllocNoParams(fi, &ioctlParams, isNVOS64)
   784  	default:
   785  		fi.ctx.Warningf("nvproxy: unknown allocation class %#08x", ioctlParams.HClass)
   786  		return 0, linuxerr.EINVAL
   787  	}
   788  }
   789  
   790  // Unlike frontendIoctlSimple and rmControlSimple, rmAllocSimple requires the
   791  // parameter type since the parameter's size is otherwise unknown.
   792  func rmAllocSimple[Params any, PParams marshalPtr[Params]](fi *frontendIoctlState, ioctlParams *nvgpu.NVOS64Parameters, isNVOS64 bool) (uintptr, error) {
   793  	if ioctlParams.PAllocParms == 0 {
   794  		return rmAllocInvoke[byte](fi, ioctlParams, nil, isNVOS64)
   795  	}
   796  
   797  	var allocParams Params
   798  	if _, err := (PParams)(&allocParams).CopyIn(fi.t, addrFromP64(ioctlParams.PAllocParms)); err != nil {
   799  		return 0, err
   800  	}
   801  	n, err := rmAllocInvoke(fi, ioctlParams, &allocParams, isNVOS64)
   802  	if err != nil {
   803  		return n, err
   804  	}
   805  	if _, err := (PParams)(&allocParams).CopyOut(fi.t, addrFromP64(ioctlParams.PAllocParms)); err != nil {
   806  		return n, err
   807  	}
   808  	return n, nil
   809  }
   810  
   811  func rmAllocNoParams(fi *frontendIoctlState, ioctlParams *nvgpu.NVOS64Parameters, isNVOS64 bool) (uintptr, error) {
   812  	return rmAllocInvoke[byte](fi, ioctlParams, nil, isNVOS64)
   813  }
   814  
   815  func rmAllocEventOSEvent(fi *frontendIoctlState, ioctlParams *nvgpu.NVOS64Parameters, isNVOS64 bool) (uintptr, error) {
   816  	var allocParams nvgpu.NV0005_ALLOC_PARAMETERS
   817  	if _, err := allocParams.CopyIn(fi.t, addrFromP64(ioctlParams.PAllocParms)); err != nil {
   818  		return 0, err
   819  	}
   820  	eventFileGeneric, _ := fi.t.FDTable().Get(int32(allocParams.Data))
   821  	if eventFileGeneric == nil {
   822  		return 0, linuxerr.EINVAL
   823  	}
   824  	defer eventFileGeneric.DecRef(fi.ctx)
   825  	eventFile, ok := eventFileGeneric.Impl().(*frontendFD)
   826  	if !ok {
   827  		return 0, linuxerr.EINVAL
   828  	}
   829  	sentryAllocParams := allocParams
   830  	sentryAllocParams.Data = nvgpu.P64(uint64(eventFile.hostFD))
   831  
   832  	n, err := rmAllocInvoke(fi, ioctlParams, &sentryAllocParams, isNVOS64)
   833  	if err != nil {
   834  		return n, err
   835  	}
   836  
   837  	outAllocParams := sentryAllocParams
   838  	outAllocParams.Data = allocParams.Data
   839  	if _, err := outAllocParams.CopyOut(fi.t, addrFromP64(ioctlParams.PAllocParms)); err != nil {
   840  		return n, err
   841  	}
   842  	return n, nil
   843  }
   844  
   845  func rmVidHeapControl(fi *frontendIoctlState) (uintptr, error) {
   846  	var ioctlParams nvgpu.NVOS32Parameters
   847  	if fi.ioctlParamsSize != nvgpu.SizeofNVOS32Parameters {
   848  		return 0, linuxerr.EINVAL
   849  	}
   850  	if _, err := ioctlParams.CopyIn(fi.t, fi.ioctlParamsAddr); err != nil {
   851  		return 0, err
   852  	}
   853  
   854  	// Function determines the type of Data.
   855  	if log.IsLogging(log.Debug) {
   856  		fi.ctx.Debugf("nvproxy: VID_HEAP_CONTROL function %d", ioctlParams.Function)
   857  	}
   858  	// See
   859  	// src/nvidia/interface/deprecated/rmapi_deprecated_vidheapctrl.c:rmVidHeapControlTable
   860  	// for implementation.
   861  	switch ioctlParams.Function {
   862  	case nvgpu.NVOS32_FUNCTION_ALLOC_SIZE:
   863  		return rmVidHeapControlAllocSize(fi, &ioctlParams)
   864  	default:
   865  		fi.ctx.Warningf("nvproxy: unknown VID_HEAP_CONTROL function %d", ioctlParams.Function)
   866  		return 0, linuxerr.EINVAL
   867  	}
   868  }
   869  
   870  func rmMapMemory(fi *frontendIoctlState) (uintptr, error) {
   871  	var ioctlParams nvgpu.IoctlNVOS33ParametersWithFD
   872  	if fi.ioctlParamsSize != nvgpu.SizeofIoctlNVOS33ParametersWithFD {
   873  		return 0, linuxerr.EINVAL
   874  	}
   875  	if _, err := ioctlParams.CopyIn(fi.t, fi.ioctlParamsAddr); err != nil {
   876  		return 0, err
   877  	}
   878  	mapFileGeneric, _ := fi.t.FDTable().Get(ioctlParams.FD)
   879  	if mapFileGeneric == nil {
   880  		return 0, linuxerr.EINVAL
   881  	}
   882  	defer mapFileGeneric.DecRef(fi.ctx)
   883  	mapFile, ok := mapFileGeneric.Impl().(*frontendFD)
   884  	if !ok {
   885  		return 0, linuxerr.EINVAL
   886  	}
   887  	if mapFile.haveMmapContext.Load() || !mapFile.haveMmapContext.CompareAndSwap(false, true) {
   888  		fi.ctx.Warningf("nvproxy: attempted to reuse FD %d for NV_ESC_RM_MAP_MEMORY", ioctlParams.FD)
   889  		return 0, linuxerr.EINVAL
   890  	}
   891  	sentryIoctlParams := ioctlParams
   892  	sentryIoctlParams.FD = mapFile.hostFD
   893  
   894  	n, err := frontendIoctlInvoke(fi, &sentryIoctlParams)
   895  	if err != nil {
   896  		return n, err
   897  	}
   898  
   899  	outIoctlParams := sentryIoctlParams
   900  	outIoctlParams.FD = ioctlParams.FD
   901  	if _, err := outIoctlParams.CopyOut(fi.t, fi.ioctlParamsAddr); err != nil {
   902  		return n, err
   903  	}
   904  
   905  	return n, nil
   906  }