github.com/ii64/gouring@v0.4.1/queue_test.go (about)

     1  package gouring
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"os"
     7  	"reflect"
     8  	"runtime"
     9  	"sync"
    10  	"syscall"
    11  	"testing"
    12  	"unsafe"
    13  
    14  	"github.com/davecgh/go-spew/spew"
    15  	"github.com/stretchr/testify/assert"
    16  	"github.com/stretchr/testify/require"
    17  )
    18  
    19  func TestRingQueueGetSQE(t *testing.T) {
    20  	h := testNewIoUring(t, 256, 0)
    21  	defer h.Close()
    22  
    23  	assert.NotEqual(t, 0, h.RingFd)
    24  	assert.NotEqual(t, 0, h.EnterRingFd)
    25  
    26  	sqe := h.io_uring_get_sqe()
    27  	assert.NotNil(t, sqe)
    28  	fmt.Printf("%+#v\n", sqe)
    29  }
    30  
    31  // func TestRingSqpollOnly(t *testing.T) {
    32  // 	h := testNewIoUringWithParams(t, 256, &IoUringParams{
    33  // 		Flags:        IORING_SETUP_SQPOLL,
    34  // 		SqThreadCpu:  10, // ms
    35  // 		SqThreadIdle: 10_000,
    36  // 	})
    37  // 	for i := 0; i < 10; i++ {
    38  // 		sqe := h.GetSqe()
    39  // 		PrepNop(sqe)
    40  // 	}
    41  // 	h.Submit()
    42  // 	var cqe *IoUringCqe
    43  
    44  // 	for {
    45  // 		h.WaitCqe(&cqe)
    46  // 		spew.Dump(cqe)
    47  // 		h.SeenCqe(cqe)
    48  // 	}
    49  // }
    50  
    51  func TestRingQueueOrderRetrieval(t *testing.T) {
    52  	const entries = 256
    53  	h := testNewIoUring(t, entries, 0)
    54  	defer h.Close()
    55  
    56  	var i uint64
    57  	for i = 0; i < entries; i++ {
    58  		sqe := h.GetSqe()
    59  		PrepNop(sqe)
    60  		sqe.UserData.SetUint64(i)
    61  		sqe.Flags |= IOSQE_IO_LINK // ordered
    62  	}
    63  
    64  	submitted, err := h.SubmitAndWait(entries)
    65  	require.NoError(t, err)
    66  	require.Equal(t, int(entries), submitted)
    67  
    68  	var cqe *IoUringCqe
    69  	for i = 0; i < entries; i++ {
    70  		err = h.WaitCqe(&cqe)
    71  		require.NoError(t, err)
    72  		require.NotNil(t, cqe)
    73  		require.Equal(t, i, cqe.UserData.GetUint64())
    74  		h.SeenCqe(cqe)
    75  	}
    76  
    77  }
    78  
    79  func TestRingQueueSubmitSingleConsumer(t *testing.T) {
    80  	type opt struct {
    81  		name     string
    82  		jobCount int
    83  
    84  		entries uint32
    85  		p       IoUringParams
    86  	}
    87  	ts := []opt{
    88  		{"def-1-256", 1, 256, IoUringParams{}},
    89  		{"def-128-256", 256, 256, IoUringParams{}}, // passed 128
    90  		{"def-128-256", 256, 256, IoUringParams{}}, // passed 128
    91  		{"def-8-256", 8, 256, IoUringParams{}},
    92  		{"def-16-256", 16, 256, IoUringParams{}},
    93  		{"def-32-256", 32, 256, IoUringParams{}},
    94  		{"def-64-256", 64, 256, IoUringParams{}},
    95  		{"def-128-256", 128, 256, IoUringParams{}},
    96  		{"def-128+1-256", 128 + 1, 256, IoUringParams{}}, // passed 128
    97  		{"def-128+2-256", 128 + 2, 256, IoUringParams{}}, // passed 128
    98  		{"def-256-256", 256, 256, IoUringParams{}},
    99  
   100  		{"sqpoll-127-256", 127, 256, IoUringParams{Flags: IORING_SETUP_SQPOLL, SqThreadCpu: 4, SqThreadIdle: 10_000}},
   101  		{"sqpoll-128+2-256", 128 + 2, 256, IoUringParams{Flags: IORING_SETUP_SQPOLL, SqThreadCpu: 4, SqThreadIdle: 10_000}},
   102  		{"sqpoll-256-256", 256, 256, IoUringParams{Flags: IORING_SETUP_SQPOLL, SqThreadCpu: 4, SqThreadIdle: 10_000}},
   103  
   104  		// we can have other test for queue overflow.
   105  	}
   106  	for _, tc := range ts {
   107  
   108  		t.Run(tc.name, func(t *testing.T) {
   109  			ftmp, err := os.CreateTemp(os.TempDir(), "test_iouring_queue_sc_*")
   110  			require.NoError(t, err)
   111  			defer ftmp.Close()
   112  			fdTemp := ftmp.Fd()
   113  
   114  			consumer := func(h *IoUring, ctx context.Context, wg *sync.WaitGroup) {
   115  				var cqe *IoUringCqe
   116  				var err error
   117  				defer func() {
   118  					rec := recover()
   119  					if rec != nil {
   120  						spew.Dump(cqe)
   121  					}
   122  				}()
   123  				for ctx.Err() == nil {
   124  					err = h.io_uring_wait_cqe(&cqe)
   125  					if err == syscall.EINTR {
   126  						// ignore INTR
   127  						continue
   128  					}
   129  					if err != nil {
   130  						panic(err)
   131  					}
   132  					if cqe.Res < 0 {
   133  						panic(syscall.Errno(-cqe.Res))
   134  					}
   135  					// cqe data check
   136  					if int(cqe.Res) < len("data ") {
   137  						panic(fmt.Sprintf("write less that it should"))
   138  					}
   139  					if (cqe.UserData.GetUintptr()>>(8<<2))&0xff == 0x00 {
   140  						panic(fmt.Sprintf("cqe userdata should contain canonical address got %+#v", cqe.UserData))
   141  					}
   142  
   143  					bufPtr := (*[]byte)(cqe.UserData.GetUnsafe())
   144  					buf := *bufPtr // deref check
   145  					_ = buf
   146  					// fmt.Printf("%+#v %s", buf, buf)
   147  
   148  					h.io_uring_cqe_seen(cqe) // necessary
   149  					wg.Done()
   150  				}
   151  			}
   152  
   153  			submit := func(t *testing.T, opt *IoUringParams, h *IoUring, expectedSubmitCount int) {
   154  				submitted, err := h.io_uringn_submit()
   155  				assert.NoError(t, err)
   156  				if opt.Flags&IORING_SETUP_SQPOLL == 0 {
   157  					assert.Equal(t, expectedSubmitCount, submitted)
   158  				}
   159  			}
   160  
   161  			t.Run("submit_single", func(t *testing.T) {
   162  				var wg sync.WaitGroup
   163  
   164  				h := testNewIoUringWithParams(t, 256, &tc.p)
   165  				defer h.Close()
   166  
   167  				wg.Add(tc.jobCount)
   168  				ctx, cancel := context.WithCancel(context.Background())
   169  				defer cancel()
   170  				go consumer(h, ctx, &wg)
   171  
   172  				for i := 0; i < tc.jobCount; i++ {
   173  					var sqe *IoUringSqe
   174  					for { // sqe could be nil if SQ is already full so we spin until we got one
   175  						sqe = h.io_uring_get_sqe()
   176  						if sqe != nil {
   177  							break
   178  						}
   179  					}
   180  
   181  					var buf = new([]byte)
   182  					*buf = append(*buf, []byte(fmt.Sprintf("data %d\n", i))...)
   183  					reflect.ValueOf(buf) // escape the `buf`
   184  
   185  					PrepWrite(sqe, int(fdTemp), &(*buf)[0], len((*buf)), 0)
   186  					runtime.KeepAlive(buf)
   187  					sqe.UserData.SetUnsafe(unsafe.Pointer(buf))
   188  
   189  					// submit
   190  					submit(t, &tc.p, h, 1)
   191  				}
   192  				runtime.GC()
   193  				wg.Wait()
   194  			})
   195  
   196  			t.Run("submit_bulk", func(t *testing.T) {
   197  				var wg sync.WaitGroup
   198  
   199  				h := testNewIoUringWithParams(t, 256, &tc.p)
   200  				defer h.Close()
   201  
   202  				wg.Add(tc.jobCount)
   203  				ctx, cancel := context.WithCancel(context.Background())
   204  				defer cancel()
   205  				go consumer(h, ctx, &wg)
   206  
   207  				for i := 0; i < tc.jobCount; i++ {
   208  					sqe := h.io_uring_get_sqe()
   209  					if sqe == nil {
   210  						// spin until we got one
   211  						continue
   212  					}
   213  
   214  					buf := new([]byte)
   215  					*buf = append(*buf, []byte(fmt.Sprintf("data %d\n", i))...)
   216  
   217  					PrepWrite(sqe, int(fdTemp), &(*buf)[0], len((*buf)), 0)
   218  					sqe.UserData.SetUnsafe(unsafe.Pointer(buf))
   219  				}
   220  
   221  				submit(t, &tc.p, h, tc.jobCount)
   222  				runtime.GC()
   223  				wg.Wait()
   224  			})
   225  
   226  		})
   227  	}
   228  }