summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@kernel.org>2025-07-08 12:38:28 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2025-08-28 16:34:14 +0200
commit02caf91b2fa11c723a39bb9ac8c8f7986a3ae7fb (patch)
treed118a44a664222c47cdd22aedb4f4521f597d0d4
parente59a52e429e13df3feb34f4853a8e36d121ed937 (diff)
crypto: x86/aegis - Fix sleeping when disallowed on PREEMPT_RT
commit c7f49dadfcdf27e1f747442e874e9baa52ab7674 upstream. skcipher_walk_done() can call kfree(), which takes a spinlock, which makes it incorrect to call while preemption is disabled on PREEMPT_RT. Therefore, end the kernel-mode FPU section before calling skcipher_walk_done(), and restart it afterwards. Moreover, pass atomic=false to skcipher_walk_aead_encrypt() instead of atomic=true. The point of atomic=true was to make skcipher_walk_done() safe to call while in a kernel-mode FPU section, but that does not actually work. So just use the usual atomic=false. Fixes: 1d373d4e8e15 ("crypto: x86 - Add optimized AEGIS implementations") Cc: stable@vger.kernel.org Signed-off-by: Eric Biggers <ebiggers@kernel.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/x86/crypto/aegis128-aesni-glue.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index f1b6d40154e3..3cb5c193038b 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -119,7 +119,9 @@ crypto_aegis128_aesni_process_crypt(struct aegis_state *state,
walk->dst.virt.addr,
round_down(walk->nbytes,
AEGIS128_BLOCK_SIZE));
+ kernel_fpu_end();
skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE);
+ kernel_fpu_begin();
}
if (walk->nbytes) {
@@ -131,7 +133,9 @@ crypto_aegis128_aesni_process_crypt(struct aegis_state *state,
aegis128_aesni_dec_tail(state, walk->src.virt.addr,
walk->dst.virt.addr,
walk->nbytes);
+ kernel_fpu_end();
skcipher_walk_done(walk, 0);
+ kernel_fpu_begin();
}
}
@@ -176,9 +180,9 @@ crypto_aegis128_aesni_crypt(struct aead_request *req,
struct aegis_state state;
if (enc)
- skcipher_walk_aead_encrypt(&walk, req, true);
+ skcipher_walk_aead_encrypt(&walk, req, false);
else
- skcipher_walk_aead_decrypt(&walk, req, true);
+ skcipher_walk_aead_decrypt(&walk, req, false);
kernel_fpu_begin();