diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2025-09-25 11:13:51 +0200 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2025-09-25 11:13:51 +0200 |
commit | 56fb05093756ed55ba1cdf5d432a68004da67860 (patch) | |
tree | 87dc333d4f606f375d6253eb5b8ef6f04674ffa6 /io_uring | |
parent | b6a153b0829afbc63032e8271d3ca9a19e704e03 (diff) | |
parent | da274362a7bd9ab3a6e46d15945029145ebce672 (diff) |
Merge v6.12.49linux-rolling-lts
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'io_uring')
-rw-r--r-- | io_uring/io_uring.c | 12 | ||||
-rw-r--r-- | io_uring/io_uring.h | 13 | ||||
-rw-r--r-- | io_uring/kbuf.h | 2 | ||||
-rw-r--r-- | io_uring/msg_ring.c | 24 | ||||
-rw-r--r-- | io_uring/notif.c | 2 | ||||
-rw-r--r-- | io_uring/poll.c | 3 | ||||
-rw-r--r-- | io_uring/timeout.c | 2 | ||||
-rw-r--r-- | io_uring/uring_cmd.c | 6 |
8 files changed, 28 insertions, 36 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 52ada466bf98..68439eb0dc8f 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -316,9 +316,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) sizeof(struct io_async_rw)); ret |= io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX, sizeof(struct uring_cache)); - spin_lock_init(&ctx->msg_lock); - ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX, - sizeof(struct io_kiocb)); ret |= io_futex_cache_init(ctx); if (ret) goto free_ref; @@ -358,7 +355,6 @@ err: io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free); io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free); io_alloc_cache_free(&ctx->uring_cache, kfree); - io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free); io_futex_cache_free(ctx); kfree(ctx->cancel_table.hbs); kfree(ctx->cancel_table_locked.hbs); @@ -1358,9 +1354,10 @@ static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts) void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts) { - io_tw_lock(req->ctx, ts); - /* req->task == current here, checking PF_EXITING is safe */ - if (unlikely(req->task->flags & PF_EXITING)) + struct io_ring_ctx *ctx = req->ctx; + + io_tw_lock(ctx, ts); + if (unlikely(io_should_terminate_tw(ctx))) io_req_defer_failed(req, -EFAULT); else if (req->flags & REQ_F_FORCE_ASYNC) io_queue_iowq(req); @@ -2742,7 +2739,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free); io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free); io_alloc_cache_free(&ctx->uring_cache, kfree); - io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free); io_futex_cache_free(ctx); io_destroy_buffers(ctx); mutex_unlock(&ctx->uring_lock); diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 70b6675941ff..e8a3b75bc6c6 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -421,6 +421,19 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) ctx->submitter_task == current); } +/* + * Terminate the request if either of these conditions are true: + * + * 1) It's being executed by the original task, but that task is marked + * with PF_EXITING as it's exiting. + * 2) PF_KTHREAD is set, in which case the invoker of the task_work is + * our fallback task_work. + */ +static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx) +{ + return (current->flags & (PF_KTHREAD | PF_EXITING)) || percpu_ref_is_dying(&ctx->refs); +} + static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res) { io_req_set_res(req, res, 0); diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h index 2586a292dfb9..a3ad8aea45c8 100644 --- a/io_uring/kbuf.h +++ b/io_uring/kbuf.h @@ -143,7 +143,7 @@ static inline bool io_kbuf_commit(struct io_kiocb *req, struct io_uring_buf *buf; buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask); - if (WARN_ON_ONCE(len > buf->len)) + if (len > buf->len) len = buf->len; buf->len -= len; if (buf->len) { diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c index b68e009bce21..97708e5132bc 100644 --- a/io_uring/msg_ring.c +++ b/io_uring/msg_ring.c @@ -11,7 +11,6 @@ #include "io_uring.h" #include "rsrc.h" #include "filetable.h" -#include "alloc_cache.h" #include "msg_ring.h" /* All valid masks for MSG_RING */ @@ -76,13 +75,7 @@ static void io_msg_tw_complete(struct io_kiocb *req, struct io_tw_state *ts) struct io_ring_ctx *ctx = req->ctx; io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags); - if (spin_trylock(&ctx->msg_lock)) { - if (io_alloc_cache_put(&ctx->msg_cache, req)) - req = NULL; - spin_unlock(&ctx->msg_lock); - } - if (req) - kfree_rcu(req, rcu_head); + kfree_rcu(req, rcu_head); percpu_ref_put(&ctx->refs); } @@ -104,19 +97,6 @@ static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req, return 0; } -static struct io_kiocb *io_msg_get_kiocb(struct io_ring_ctx *ctx) -{ - struct io_kiocb *req = NULL; - - if (spin_trylock(&ctx->msg_lock)) { - req = io_alloc_cache_get(&ctx->msg_cache); - spin_unlock(&ctx->msg_lock); - if (req) - return req; - } - return kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); -} - static int io_msg_data_remote(struct io_kiocb *req) { struct io_ring_ctx *target_ctx = req->file->private_data; @@ -124,7 +104,7 @@ static int io_msg_data_remote(struct io_kiocb *req) struct io_kiocb *target; u32 flags = 0; - target = io_msg_get_kiocb(req->ctx); + target = kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); if (unlikely(!target)) return -ENOMEM; diff --git a/io_uring/notif.c b/io_uring/notif.c index 28859ae3ee6e..d4cf5a1328e6 100644 --- a/io_uring/notif.c +++ b/io_uring/notif.c @@ -85,7 +85,7 @@ static int io_link_skb(struct sk_buff *skb, struct ubuf_info *uarg) return -EEXIST; prev_nd = container_of(prev_uarg, struct io_notif_data, uarg); - prev_notif = cmd_to_io_kiocb(nd); + prev_notif = cmd_to_io_kiocb(prev_nd); /* make sure all noifications can be finished in the same task_work */ if (unlikely(notif->ctx != prev_notif->ctx || diff --git a/io_uring/poll.c b/io_uring/poll.c index 17dea8aa09c9..bfdb537572f7 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -265,8 +265,7 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts) { int v; - /* req->task == current here, checking PF_EXITING is safe */ - if (unlikely(req->task->flags & PF_EXITING)) + if (unlikely(io_should_terminate_tw(req->ctx))) return -ECANCELED; do { diff --git a/io_uring/timeout.c b/io_uring/timeout.c index 21c4bfea79f1..b215b2fbddd0 100644 --- a/io_uring/timeout.c +++ b/io_uring/timeout.c @@ -303,7 +303,7 @@ static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *t int ret = -ENOENT; if (prev) { - if (!(req->task->flags & PF_EXITING)) { + if (!io_should_terminate_tw(req->ctx)) { struct io_cancel_data cd = { .ctx = req->ctx, .data = prev->cqe.user_data, diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index b2ce4b561002..f927844c8ada 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -116,9 +116,13 @@ EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable); static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts) { struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); + unsigned int flags = IO_URING_F_COMPLETE_DEFER; + + if (io_should_terminate_tw(req->ctx)) + flags |= IO_URING_F_TASK_DEAD; /* task_work executor checks the deffered list completion */ - ioucmd->task_work_cb(ioucmd, IO_URING_F_COMPLETE_DEFER); + ioucmd->task_work_cb(ioucmd, flags); } void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, |