summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2024-09-21 01:59:52 -0600
committerJens Axboe <axboe@kernel.dk>2024-10-29 13:43:26 -0600
commitf4bb2f65bb8154c1a2c2d7e01db0c98dffb5918f (patch)
tree2398f4ce0d2b7ac68fa9c912ab213cd7e44f0e1e
parent83a4f865e273b83426eafdd3aa51334cc21ac0fd (diff)
io_uring/eventfd: move ctx->evfd_last_cq_tail into io_ev_fd
Everything else about the io_uring eventfd support is nicely kept private to that code, except the cached_cq_tail tracking. With everything else in place, move io_eventfd_flush_signal() to using the ev_fd grab+release helpers, which then enables the direct use of io_ev_fd for this tracking too. Link: https://lore.kernel.org/r/20240921080307.185186-7-axboe@kernel.dk Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--io_uring/eventfd.c50
1 files changed, 29 insertions, 21 deletions
diff --git a/io_uring/eventfd.c b/io_uring/eventfd.c
index d1fdecd0c458..fab936d31ba8 100644
--- a/io_uring/eventfd.c
+++ b/io_uring/eventfd.c
@@ -13,10 +13,12 @@
struct io_ev_fd {
struct eventfd_ctx *cq_ev_fd;
- unsigned int eventfd_async: 1;
- struct rcu_head rcu;
+ unsigned int eventfd_async;
+ /* protected by ->completion_lock */
+ unsigned last_cq_tail;
refcount_t refs;
atomic_t ops;
+ struct rcu_head rcu;
};
enum {
@@ -123,25 +125,31 @@ void io_eventfd_signal(struct io_ring_ctx *ctx)
void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
{
- bool skip;
-
- spin_lock(&ctx->completion_lock);
-
- /*
- * Eventfd should only get triggered when at least one event has been
- * posted. Some applications rely on the eventfd notification count
- * only changing IFF a new CQE has been added to the CQ ring. There's
- * no depedency on 1:1 relationship between how many times this
- * function is called (and hence the eventfd count) and number of CQEs
- * posted to the CQ ring.
- */
- skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail;
- ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
- spin_unlock(&ctx->completion_lock);
- if (skip)
- return;
+ struct io_ev_fd *ev_fd;
- io_eventfd_signal(ctx);
+ ev_fd = io_eventfd_grab(ctx);
+ if (ev_fd) {
+ bool skip, put_ref = true;
+
+ /*
+ * Eventfd should only get triggered when at least one event
+ * has been posted. Some applications rely on the eventfd
+ * notification count only changing IFF a new CQE has been
+ * added to the CQ ring. There's no dependency on 1:1
+ * relationship between how many times this function is called
+ * (and hence the eventfd count) and number of CQEs posted to
+ * the CQ ring.
+ */
+ spin_lock(&ctx->completion_lock);
+ skip = ctx->cached_cq_tail == ev_fd->last_cq_tail;
+ ev_fd->last_cq_tail = ctx->cached_cq_tail;
+ spin_unlock(&ctx->completion_lock);
+
+ if (!skip)
+ put_ref = __io_eventfd_signal(ev_fd);
+
+ io_eventfd_release(ev_fd, put_ref);
+ }
}
int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
@@ -172,7 +180,7 @@ int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
}
spin_lock(&ctx->completion_lock);
- ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
+ ev_fd->last_cq_tail = ctx->cached_cq_tail;
spin_unlock(&ctx->completion_lock);
ev_fd->eventfd_async = eventfd_async;