diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index d412b76d8..7fa0ea4d9 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -568,7 +568,7 @@ func (t *Wrapper) write(buf []byte, offset int) (int, error) { func (t *Wrapper) read(buf []byte, offset int) (n int, err error) { // TODO: upstream has graceful shutdown error handling here. buff := buf[offset-4:] - const useIOUring = false + const useIOUring = true if useIOUring { n, err = t.ring.Read(buff[:]) } else { diff --git a/net/uring/io_uring_linux.go b/net/uring/io_uring_linux.go index 55e215a6e..391b5841d 100644 --- a/net/uring/io_uring_linux.go +++ b/net/uring/io_uring_linux.go @@ -277,7 +277,7 @@ type File struct { close sync.Once file *os.File // must keep file from being GC'd fd C.int - readReqs [8]*C.goreq + readReqs [1]*C.goreq // Whoops! The kernel apparently cannot handle more than 1 concurrent preadv calls on a tun device! writeReqs [8]*C.goreq writeReqC chan int // indices into reqs } @@ -299,10 +299,11 @@ func NewFile(file *os.File) (*File, error) { } // Initialize buffers - for _, reqs := range []*[8]*C.goreq{&u.readReqs, &u.writeReqs} { - for i := range reqs { - reqs[i] = C.initializeReq(bufferSize) - } + for i := range &u.readReqs { + u.readReqs[i] = C.initializeReq(bufferSize) + } + for i := range &u.writeReqs { + u.writeReqs[i] = C.initializeReq(bufferSize) } // Initialize read half. @@ -413,10 +414,11 @@ func (u *File) Close() error { u.fd = 0 // Free buffers - for _, reqs := range []*[8]*C.goreq{&u.readReqs, &u.writeReqs} { - for _, r := range reqs { - C.freeReq(r) - } + for _, r := range u.readReqs { + C.freeReq(r) + } + for _, r := range u.writeReqs { + C.freeReq(r) } }) return nil