summaryrefslogtreecommitdiff
path: root/io_uring/uring_cmd.c
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.com>2025-03-26 13:42:07 +0100
committerJiri Kosina <jkosina@suse.com>2025-03-26 13:42:07 +0100
commitb3cc7428a32202936904b5b07cf9f135025bafd6 (patch)
treed4a1a6180ac5939fccd92acd6f8d7d1388575c4a /io_uring/uring_cmd.c
parentdb52926fb0be40e1d588a346df73f5ea3a34a4c6 (diff)
parent01601fdd40ecf4467c8ae4d215dbb7d2a0599a2c (diff)
Merge branch 'for-6.15/amd_sfh' into for-linus
From: Mario Limonciello <mario.limonciello@amd.com> Some platforms include a human presence detection (HPD) sensor. When enabled and a user is detected a wake event will be emitted from the sensor fusion hub that software can react to. Example use cases are "wake from suspend on approach" or to "lock when leaving". This is currently enabled by default on supported systems, but users can't control it. This essentially means that wake on approach is enabled which is a really surprising behavior to users that don't expect it. Instead of defaulting to enabled add a sysfs knob that users can use to enable the feature if desirable and set it to disabled by default.
Diffstat (limited to 'io_uring/uring_cmd.c')
-rw-r--r--io_uring/uring_cmd.c19
1 files changed, 6 insertions, 13 deletions
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index fc94c465a9850..1f6a82128b475 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -168,23 +168,16 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, u64 res2,
}
EXPORT_SYMBOL_GPL(io_uring_cmd_done);
-static void io_uring_cmd_init_once(void *obj)
-{
- struct io_uring_cmd_data *data = obj;
-
- data->op_data = NULL;
-}
-
static int io_uring_cmd_prep_setup(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
struct io_uring_cmd_data *cache;
- cache = io_uring_alloc_async_data(&req->ctx->uring_cache, req,
- io_uring_cmd_init_once);
+ cache = io_uring_alloc_async_data(&req->ctx->uring_cache, req);
if (!cache)
return -ENOMEM;
+ cache->op_data = NULL;
if (!(req->flags & REQ_F_FORCE_ASYNC)) {
/* defer memcpy until we need it */
@@ -192,8 +185,8 @@ static int io_uring_cmd_prep_setup(struct io_kiocb *req,
return 0;
}
- memcpy(req->async_data, sqe, uring_sqe_size(req->ctx));
- ioucmd->sqe = req->async_data;
+ memcpy(cache->sqes, sqe, uring_sqe_size(req->ctx));
+ ioucmd->sqe = cache->sqes;
return 0;
}
@@ -260,7 +253,7 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
struct io_uring_cmd_data *cache = req->async_data;
if (ioucmd->sqe != (void *) cache)
- memcpy(cache, ioucmd->sqe, uring_sqe_size(req->ctx));
+ memcpy(cache->sqes, ioucmd->sqe, uring_sqe_size(req->ctx));
return -EAGAIN;
} else if (ret == -EIOCBQUEUED) {
return -EIOCBQUEUED;
@@ -350,7 +343,7 @@ int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
if (!prot || !prot->ioctl)
return -EOPNOTSUPP;
- switch (cmd->sqe->cmd_op) {
+ switch (cmd->cmd_op) {
case SOCKET_URING_OP_SIOCINQ:
ret = prot->ioctl(sk, SIOCINQ, &arg);
if (ret)