diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2022-11-23 11:33:36 +0000 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2022-12-31 13:26:53 +0100 |
commit | 4ffd5d0a7b105bedac1f50e33e6019b18331bdc8 (patch) | |
tree | 8ac840b0d28ba9176b91e21cb0ea994e358561e4 | |
parent | cb4dff498468b62e8c520568559b3a9007e104d7 (diff) |
io_uring: add completion locking for iopoll
commit 2ccc92f4effcfa1c51c4fcf1e34d769099d3cad4 upstream.
There are pieces of code that may allow iopoll to race filling cqes,
temporarily add spinlocking around posting events.
Cc: stable@vger.kernel.org
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/84d86b5c117feda075471c5c9e65208e0dccf5d0.1669203009.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | io_uring/rw.c | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/io_uring/rw.c b/io_uring/rw.c index 93d7cb5eb9fe..ffa5a86ba828 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -1063,6 +1063,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) else if (!pos) return 0; + spin_lock(&ctx->completion_lock); prev = start; wq_list_for_each_resume(pos, prev) { struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); @@ -1077,11 +1078,11 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) req->cqe.flags = io_put_kbuf(req, 0); __io_fill_cqe_req(req->ctx, req); } - + io_commit_cqring(ctx); + spin_unlock(&ctx->completion_lock); if (unlikely(!nr_events)) return 0; - io_commit_cqring(ctx); io_cqring_ev_posted_iopoll(ctx); pos = start ? start->next : ctx->iopoll_list.first; wq_list_cut(&ctx->iopoll_list, prev, start); |