diff options
author | Jens Axboe <axboe@kernel.dk> | 2025-06-25 10:17:06 -0600 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2025-06-25 10:17:06 -0600 |
commit | 9a709b7e98e6fa51600b5f2d24c5068efa6d39de (patch) | |
tree | 3b43fd5fbd6fe88d858b53c3f90d7a16290c5a16 | |
parent | 7cac633a42a7b3c8146eb1db76fb80dc652998de (diff) |
io_uring/net: mark iov as dynamically allocated even for single segments
A bigger array of vecs could've been allocated, but
io_ring_buffers_peek() still decided to cap the mapped range depending
on how much data was available. Hence don't rely on the segment count
to know if the request should be marked as needing cleanup, always
check upfront if the iov array is different than the fast_iov array.
Fixes: 26ec15e4b0c1 ("io_uring/kbuf: don't truncate end buffer for multiple buffer peeks")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | io_uring/net.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/io_uring/net.c b/io_uring/net.c index 9550d4c8f866..5c1e8c4ba468 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -1077,6 +1077,12 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg if (unlikely(ret < 0)) return ret; + if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->vec.iovec) { + kmsg->vec.nr = ret; + kmsg->vec.iovec = arg.iovs; + req->flags |= REQ_F_NEED_CLEANUP; + } + /* special case 1 vec, can be a fast path */ if (ret == 1) { sr->buf = arg.iovs[0].iov_base; @@ -1085,11 +1091,6 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg } iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, arg.iovs, ret, arg.out_len); - if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->vec.iovec) { - kmsg->vec.nr = ret; - kmsg->vec.iovec = arg.iovs; - req->flags |= REQ_F_NEED_CLEANUP; - } } else { void __user *buf; |