github.com/iceber/iouring-go@v0.0.0-20230403020409-002cfd2e2a90/types.go (about)

     1  //go:build linux
     2  // +build linux
     3  
     4  package iouring
     5  
     6  import (
     7  	"reflect"
     8  	"sync/atomic"
     9  	"unsafe"
    10  
    11  	iouring_syscall "github.com/iceber/iouring-go/syscall"
    12  )
    13  
    14  // iouring operations
    15  const (
    16  	OpNop uint8 = iota
    17  	OpReadv
    18  	OpWritev
    19  	OpFsync
    20  	OpReadFixed
    21  	OpWriteFixed
    22  	OpPollAdd
    23  	OpPollRemove
    24  	OpSyncFileRange
    25  	OpSendmsg
    26  	OpRecvmsg
    27  	OpTimeout
    28  	OpTimeoutRemove
    29  	OpAccept
    30  	OpAsyncCancel
    31  	OpLinkTimeout
    32  	OpConnect
    33  	OpFallocate
    34  	OpOpenat
    35  	OpClose
    36  	OpFilesUpdate
    37  	OpStatx
    38  	OpRead
    39  	OpWrite
    40  	OpFadvise
    41  	OpMadvise
    42  	OpSend
    43  	OpRecv
    44  	OpOpenat2
    45  	OpEpollCtl
    46  	OpSplice
    47  	OpProvideBuffers
    48  	OpRemoveBuffers
    49  	OpTee
    50  	OpShutdown
    51  	OpRenameat
    52  	OpUnlinkat
    53  	OpMkdirat
    54  	OpSymlinkat
    55  	OpLinkat
    56  )
    57  
    58  // cancel operation return value
    59  const (
    60  	RequestCanceledSuccessfully = 0
    61  	RequestMaybeCanceled        = 1
    62  )
    63  
    64  // timeout operation return value
    65  const (
    66  	TimeoutExpiration = 0
    67  	CountCompletion   = 1
    68  )
    69  
    70  var _zero uintptr
    71  
    72  type SubmissionQueueRing interface {
    73  	isActive() bool
    74  	entrySz() uint32
    75  	ringSz() uint32
    76  	assignQueue(ptr uintptr, len int)
    77  	mappedPtr() uintptr
    78  	index(index uint32) iouring_syscall.SubmissionQueueEntry
    79  }
    80  
    81  func makeSubmissionQueueRing(flags uint32) SubmissionQueueRing {
    82  	if flags&iouring_syscall.IORING_SETUP_SQE128 == 0 {
    83  		return new(SubmissionQueueRing64)
    84  	} else {
    85  		return new(SubmissionQueueRing128)
    86  	}
    87  }
    88  
    89  type SubmissionQueueRing64 struct {
    90  	queue []iouring_syscall.SubmissionQueueEntry64
    91  }
    92  
    93  func (ring *SubmissionQueueRing64) isActive() bool {
    94  	return ring.queue != nil && len(ring.queue) > 0
    95  }
    96  
    97  func (ring *SubmissionQueueRing64) entrySz() uint32 {
    98  	return uint32(unsafe.Sizeof(iouring_syscall.SubmissionQueueEntry64{}))
    99  }
   100  
   101  func (ring *SubmissionQueueRing64) ringSz() uint32 {
   102  	return uint32(len(ring.queue)) * ring.entrySz()
   103  }
   104  
   105  func (ring *SubmissionQueueRing64) assignQueue(ptr uintptr, len int) {
   106  	ring.queue = *(*[]iouring_syscall.SubmissionQueueEntry64)(
   107  		unsafe.Pointer(&reflect.SliceHeader{
   108  			Data: ptr,
   109  			Len:  len,
   110  			Cap:  len,
   111  		}))
   112  }
   113  
   114  func (ring *SubmissionQueueRing64) mappedPtr() uintptr {
   115  	return uintptr(unsafe.Pointer(&ring.queue[0]))
   116  }
   117  
   118  func (ring *SubmissionQueueRing64) index(index uint32) iouring_syscall.SubmissionQueueEntry {
   119  	return &ring.queue[index]
   120  }
   121  
   122  type SubmissionQueueRing128 struct {
   123  	queue []iouring_syscall.SubmissionQueueEntry128
   124  }
   125  
   126  func (ring *SubmissionQueueRing128) isActive() bool {
   127  	return ring.queue != nil && len(ring.queue) > 0
   128  }
   129  
   130  func (ring *SubmissionQueueRing128) entrySz() uint32 {
   131  	return uint32(unsafe.Sizeof(iouring_syscall.SubmissionQueueEntry128{}))
   132  }
   133  
   134  func (ring *SubmissionQueueRing128) ringSz() uint32 {
   135  	return uint32(len(ring.queue)) * ring.entrySz()
   136  }
   137  
   138  func (ring *SubmissionQueueRing128) assignQueue(ptr uintptr, len int) {
   139  	ring.queue = *(*[]iouring_syscall.SubmissionQueueEntry128)(
   140  		unsafe.Pointer(&reflect.SliceHeader{
   141  			Data: ptr,
   142  			Len:  len,
   143  			Cap:  len,
   144  		}))
   145  }
   146  
   147  func (ring *SubmissionQueueRing128) mappedPtr() uintptr {
   148  	return uintptr(unsafe.Pointer(&ring.queue[0]))
   149  }
   150  
   151  func (ring *SubmissionQueueRing128) index(index uint32) iouring_syscall.SubmissionQueueEntry {
   152  	return &ring.queue[index]
   153  }
   154  
   155  type SubmissionQueue struct {
   156  	ptr  uintptr
   157  	size uint32
   158  
   159  	head    *uint32
   160  	tail    *uint32
   161  	mask    *uint32
   162  	entries *uint32 // specifies the number of submission queue ring entries
   163  	flags   *uint32 // used by the kernel to communicate stat information to the application
   164  	dropped *uint32 // incrementd for each invalid submission queue entry encountered in the ring buffer
   165  
   166  	array []uint32
   167  	sqes  SubmissionQueueRing // submission queue ring
   168  
   169  	sqeHead uint32
   170  	sqeTail uint32
   171  }
   172  
   173  func (queue *SubmissionQueue) getSQEntry() iouring_syscall.SubmissionQueueEntry {
   174  	head := atomic.LoadUint32(queue.head)
   175  	next := queue.sqeTail + 1
   176  
   177  	if (next - head) <= *queue.entries {
   178  		sqe := queue.sqes.index(queue.sqeTail & *queue.mask)
   179  		queue.sqeTail = next
   180  		sqe.Reset()
   181  		return sqe
   182  	}
   183  	return nil
   184  }
   185  
   186  func (queue *SubmissionQueue) fallback(i uint32) {
   187  	queue.sqeTail -= i
   188  }
   189  
   190  func (queue *SubmissionQueue) cqOverflow() bool {
   191  	return (atomic.LoadUint32(queue.flags) & iouring_syscall.IORING_SQ_CQ_OVERFLOW) != 0
   192  }
   193  
   194  func (queue *SubmissionQueue) needWakeup() bool {
   195  	return (atomic.LoadUint32(queue.flags) & iouring_syscall.IORING_SQ_NEED_WAKEUP) != 0
   196  }
   197  
   198  // sync internal status with kernel ring state on the SQ side
   199  // return the number of pending items in the SQ ring, for the shared ring.
   200  func (queue *SubmissionQueue) flush() int {
   201  	if queue.sqeHead == queue.sqeTail {
   202  		return int(*queue.tail - *queue.head)
   203  	}
   204  
   205  	tail := *queue.tail
   206  	for toSubmit := queue.sqeTail - queue.sqeHead; toSubmit > 0; toSubmit-- {
   207  		queue.array[tail&*queue.mask] = queue.sqeHead & *queue.mask
   208  		tail++
   209  		queue.sqeHead++
   210  	}
   211  
   212  	atomic.StoreUint32(queue.tail, tail)
   213  	return int(tail - *queue.head)
   214  }
   215  
   216  type CompletionQueueRing interface {
   217  	isActive() bool
   218  	entrySz() uint32
   219  	ringSz() uint32
   220  	assignQueue(ptr uintptr, len int)
   221  	mappedPtr() uintptr
   222  	index(index uint32) iouring_syscall.CompletionQueueEvent
   223  }
   224  
   225  func makeCompletionQueueRing(flags uint32) CompletionQueueRing {
   226  	if flags&iouring_syscall.IORING_SETUP_CQE32 == 0 {
   227  		return new(CompletionQueueRing16)
   228  	} else {
   229  		return new(CompletionQueueRing32)
   230  	}
   231  }
   232  
   233  type CompletionQueueRing16 struct {
   234  	queue []iouring_syscall.CompletionQueueEvent16
   235  }
   236  
   237  func (ring *CompletionQueueRing16) isActive() bool {
   238  	return ring.queue != nil && len(ring.queue) > 0
   239  }
   240  
   241  func (ring *CompletionQueueRing16) entrySz() uint32 {
   242  	return uint32(unsafe.Sizeof(iouring_syscall.CompletionQueueEvent16{}))
   243  }
   244  
   245  func (ring *CompletionQueueRing16) ringSz() uint32 {
   246  	return uint32(len(ring.queue)) * ring.entrySz()
   247  }
   248  
   249  func (ring *CompletionQueueRing16) assignQueue(ptr uintptr, len int) {
   250  	ring.queue = *(*[]iouring_syscall.CompletionQueueEvent16)(
   251  		unsafe.Pointer(&reflect.SliceHeader{
   252  			Data: ptr,
   253  			Len:  len,
   254  			Cap:  len,
   255  		}))
   256  }
   257  
   258  func (ring *CompletionQueueRing16) mappedPtr() uintptr {
   259  	return uintptr(unsafe.Pointer(&ring.queue[0]))
   260  }
   261  
   262  func (ring *CompletionQueueRing16) index(index uint32) iouring_syscall.CompletionQueueEvent {
   263  	return &ring.queue[index]
   264  }
   265  
   266  type CompletionQueueRing32 struct {
   267  	queue []iouring_syscall.CompletionQueueEvent32
   268  }
   269  
   270  func (ring *CompletionQueueRing32) isActive() bool {
   271  	return ring.queue != nil && len(ring.queue) > 0
   272  }
   273  
   274  func (ring *CompletionQueueRing32) entrySz() uint32 {
   275  	return uint32(unsafe.Sizeof(iouring_syscall.CompletionQueueEvent32{}))
   276  }
   277  
   278  func (ring *CompletionQueueRing32) ringSz() uint32 {
   279  	return uint32(len(ring.queue)) * ring.entrySz()
   280  }
   281  
   282  func (ring *CompletionQueueRing32) assignQueue(ptr uintptr, len int) {
   283  	ring.queue = *(*[]iouring_syscall.CompletionQueueEvent32)(
   284  		unsafe.Pointer(&reflect.SliceHeader{
   285  			Data: ptr,
   286  			Len:  len,
   287  			Cap:  len,
   288  		}))
   289  }
   290  
   291  func (ring *CompletionQueueRing32) mappedPtr() uintptr {
   292  	return uintptr(unsafe.Pointer(&ring.queue[0]))
   293  }
   294  
   295  func (ring *CompletionQueueRing32) index(index uint32) iouring_syscall.CompletionQueueEvent {
   296  	return &ring.queue[index]
   297  }
   298  
   299  type CompletionQueue struct {
   300  	ptr  uintptr
   301  	size uint32
   302  
   303  	head     *uint32
   304  	tail     *uint32
   305  	mask     *uint32
   306  	entries  *uint32
   307  	flags    *uint32
   308  	overflow *uint32
   309  
   310  	cqes CompletionQueueRing
   311  }
   312  
   313  func (queue *CompletionQueue) peek() (cqe iouring_syscall.CompletionQueueEvent) {
   314  	head := *queue.head
   315  	if head != atomic.LoadUint32(queue.tail) {
   316  		//	if head < atomic.LoadUint32(queue.tail) {
   317  		cqe = queue.cqes.index(head & *queue.mask)
   318  	}
   319  	return
   320  }
   321  
   322  func (queue *CompletionQueue) advance(num uint32) {
   323  	if num != 0 {
   324  		atomic.AddUint32(queue.head, num)
   325  	}
   326  }