gorgonia.org/gorgonia@v0.9.17/device_cuda.go (about)

     1  // +build cuda
     2  
     3  package gorgonia
     4  
     5  import (
     6  	"gorgonia.org/cu"
     7  	"gorgonia.org/tensor"
     8  )
     9  
    10  // Device represents the device where the code will be executed on. It can either be a GPU or CPU
    11  type Device cu.Device
    12  
    13  // CPU is the default the graph will be executed on.
    14  const CPU = Device(cu.CPU)
    15  
    16  // String implements fmt.Stringer and runtime.Stringer
    17  func (d Device) String() string { return cu.Device(d).String() }
    18  
    19  // Alloc allocates memory on the device. If the device is CPU, the allocations is a NO-OP because Go handles all the allocations in the CPU
    20  func (d Device) Alloc(extern External, size int64) (tensor.Memory, error) {
    21  	if d == CPU {
    22  		cudaLogf("device is CPU")
    23  		return nil, nil // well there should be an error because this wouldn't be called
    24  	}
    25  
    26  	machine := extern.(CUDAMachine)
    27  	ctxes := machine.Contexts()
    28  	if len(ctxes) == 0 {
    29  		cudaLogf("allocate nothing")
    30  		return nil, nil
    31  	}
    32  	ctx := ctxes[int(d)]
    33  
    34  	cudaLogf("calling ctx.MemAlloc(%d)", size)
    35  	return ctx.MemAlloc(size)
    36  }
    37  
    38  // Free the memory of the device
    39  func (d Device) Free(extern External, mem tensor.Memory, size int64) (err error) {
    40  	var devptr cu.DevicePtr
    41  	var ok bool
    42  	if devptr, ok = mem.(cu.DevicePtr); !ok {
    43  		return nil
    44  	}
    45  
    46  	machine := extern.(CUDAMachine)
    47  	machine.Put(d, devptr, size)
    48  
    49  	// FUTURE: actually free memory if there ain't enough to go round
    50  
    51  	// ctx := machine.Contexts()[int(d)]
    52  	// cudaLogf("MemFree %v", devptr)
    53  	// ctx.MemFree(devptr)
    54  	return nil
    55  }