summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2025-08-27 15:27:30 -0600
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2025-09-04 16:55:43 +0200
commit390a61d284e1ced088d43928dfcf6f86fffdd780 (patch)
tree63c3a4931eb2c289664f0850a113732481a9ad4e
parent3fdc52cb8471bf75046c603f0a65e904745b7fdb (diff)
io_uring/kbuf: always use READ_ONCE() to read ring provided buffer lengths
[ Upstream commit 98b6fa62c84f2e129161e976a5b9b3cb4ccd117b ] Since the buffers are mapped from userspace, it is prudent to use READ_ONCE() to read the value into a local variable, and use that for any other actions taken. Having a stable read of the buffer length avoids worrying about it changing after checking, or being read multiple times. Similarly, the buffer may well change in between it being picked and being committed. Ensure the looping for incremental ring buffer commit stops if it hits a zero sized buffer, as no further progress can be made at that point. Fixes: ae98dbf43d75 ("io_uring/kbuf: add support for incremental buffer consumption") Link: https://lore.kernel.org/io-uring/tencent_000C02641F6250C856D0C26228DE29A3D30A@qq.com/ Reported-by: Qingyue Zhang <chunzhennn@qq.com> Reported-by: Suoxing Zhang <aftern00n@qq.com> Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--io_uring/kbuf.c20
1 files changed, 13 insertions, 7 deletions
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 81a13338dfab..19a8bde5e1e1 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -36,15 +36,19 @@ static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len)
{
while (len) {
struct io_uring_buf *buf;
- u32 this_len;
+ u32 buf_len, this_len;
buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
- this_len = min_t(u32, len, buf->len);
- buf->len -= this_len;
- if (buf->len) {
+ buf_len = READ_ONCE(buf->len);
+ this_len = min_t(u32, len, buf_len);
+ buf_len -= this_len;
+ /* Stop looping for invalid buffer length of 0 */
+ if (buf_len || !this_len) {
buf->addr += this_len;
+ buf->len = buf_len;
return false;
}
+ buf->len = 0;
bl->head++;
len -= this_len;
}
@@ -159,6 +163,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
__u16 tail, head = bl->head;
struct io_uring_buf *buf;
void __user *ret;
+ u32 buf_len;
tail = smp_load_acquire(&br->tail);
if (unlikely(tail == head))
@@ -168,8 +173,9 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
req->flags |= REQ_F_BL_EMPTY;
buf = io_ring_head_to_buf(br, head, bl->mask);
- if (*len == 0 || *len > buf->len)
- *len = buf->len;
+ buf_len = READ_ONCE(buf->len);
+ if (*len == 0 || *len > buf_len)
+ *len = buf_len;
req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
req->buf_list = bl;
req->buf_index = buf->bid;
@@ -265,7 +271,7 @@ static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
req->buf_index = buf->bid;
do {
- u32 len = buf->len;
+ u32 len = READ_ONCE(buf->len);
/* truncate end piece, if needed, for non partial buffers */
if (len > arg->max_len) {