diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-02-02 10:45:17 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-02-02 10:45:17 -0800 |
commit | 717ca0b8e55eea49c5d71c026eafbe1e64d4b556 (patch) | |
tree | 40889fa3e19f9626e5fe9184abef01ec3163ebb8 /io_uring/poll.c | |
parent | ec86369c88f6794a6cfa0383f715f276305399ed (diff) | |
parent | 72bd80252feeb3bef8724230ee15d9f7ab541c6e (diff) |
Merge tag 'io_uring-6.8-2024-02-01' of git://git.kernel.dk/linux
Pull io_uring fixes from Jens Axboe:
- Fix for missing retry for read multishot.
If we trigger the execution of it and there's more than one buffer to
be read, then we don't always read more than the first one. As it's
edge triggered, this can lead to stalls.
- Limit inline receive multishot retries for fairness reasons.
If we have a very bursty socket receiving data, we still need to
ensure we process other requests as well. This is really two minor
cleanups, then adding a way for poll reissue to trigger a requeue,
and then finally having multishot receive utilize that.
- Fix for a weird corner case for non-multishot receive with
MSG_WAITALL, using provided buffers, and setting the length to
zero (to let the buffer dictate the receive size).
* tag 'io_uring-6.8-2024-02-01' of git://git.kernel.dk/linux:
io_uring/net: fix sr->len for IORING_OP_RECV with MSG_WAITALL and buffers
io_uring/net: limit inline multishot retries
io_uring/poll: add requeue return code from poll multishot handling
io_uring/net: un-indent mshot retry path in io_recv_finish()
io_uring/poll: move poll execution helpers higher up
io_uring/rw: ensure poll based multishot read retries appropriately
Diffstat (limited to 'io_uring/poll.c')
-rw-r--r-- | io_uring/poll.c | 49 |
1 files changed, 28 insertions, 21 deletions
diff --git a/io_uring/poll.c b/io_uring/poll.c index d59b74a99d4e..7513afc7b702 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -226,8 +226,29 @@ enum { IOU_POLL_NO_ACTION = 1, IOU_POLL_REMOVE_POLL_USE_RES = 2, IOU_POLL_REISSUE = 3, + IOU_POLL_REQUEUE = 4, }; +static void __io_poll_execute(struct io_kiocb *req, int mask) +{ + unsigned flags = 0; + + io_req_set_res(req, mask, 0); + req->io_task_work.func = io_poll_task_func; + + trace_io_uring_task_add(req, mask); + + if (!(req->flags & REQ_F_POLL_NO_LAZY)) + flags = IOU_F_TWQ_LAZY_WAKE; + __io_req_task_work_add(req, flags); +} + +static inline void io_poll_execute(struct io_kiocb *req, int res) +{ + if (io_poll_get_ownership(req)) + __io_poll_execute(req, res); +} + /* * All poll tw should go through this. Checks for poll events, manages * references, does rewait, etc. @@ -309,6 +330,8 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts) int ret = io_poll_issue(req, ts); if (ret == IOU_STOP_MULTISHOT) return IOU_POLL_REMOVE_POLL_USE_RES; + else if (ret == IOU_REQUEUE) + return IOU_POLL_REQUEUE; if (ret < 0) return ret; } @@ -331,8 +354,12 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts) int ret; ret = io_poll_check_events(req, ts); - if (ret == IOU_POLL_NO_ACTION) + if (ret == IOU_POLL_NO_ACTION) { + return; + } else if (ret == IOU_POLL_REQUEUE) { + __io_poll_execute(req, 0); return; + } io_poll_remove_entries(req); io_poll_tw_hash_eject(req, ts); @@ -364,26 +391,6 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts) } } -static void __io_poll_execute(struct io_kiocb *req, int mask) -{ - unsigned flags = 0; - - io_req_set_res(req, mask, 0); - req->io_task_work.func = io_poll_task_func; - - trace_io_uring_task_add(req, mask); - - if (!(req->flags & REQ_F_POLL_NO_LAZY)) - flags = IOU_F_TWQ_LAZY_WAKE; - __io_req_task_work_add(req, flags); -} - -static inline void io_poll_execute(struct io_kiocb *req, int res) -{ - if (io_poll_get_ownership(req)) - __io_poll_execute(req, res); -} - static void io_poll_cancel_req(struct io_kiocb *req) { io_poll_mark_cancelled(req); |