summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@google.com>2025-01-27 13:16:09 -0800
committerHerbert Xu <herbert@gondor.apana.org.au>2025-02-09 18:08:11 +0800
commit50dd4b4d41c64e86937dda9a1464549c8ae7238a (patch)
tree7dcecc3423cad04ca57cb1cab6b5fed2ad2afe0f
parent1d19058d86e55b1fc89a53b854a97b63b0f4c2b9 (diff)
crypto: x86/aes-xts - make the fast path 64-bit specific
Remove 32-bit support from the fast path in xts_crypt(). Then optimize it for 64-bit, and simplify the code, by switching to sg_virt() and removing the now-unnecessary checks for crossing a page boundary. The result is simpler code that is slightly smaller and faster in the case that actually matters (64-bit). Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c30
1 files changed, 10 insertions, 20 deletions
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 11e95fc62636..3e0cc15050f3 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -581,11 +581,8 @@ xts_crypt(struct skcipher_request *req, xts_encrypt_iv_func encrypt_iv,
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
- const unsigned int cryptlen = req->cryptlen;
- struct scatterlist *src = req->src;
- struct scatterlist *dst = req->dst;
- if (unlikely(cryptlen < AES_BLOCK_SIZE))
+ if (unlikely(req->cryptlen < AES_BLOCK_SIZE))
return -EINVAL;
kernel_fpu_begin();
@@ -593,23 +590,16 @@ xts_crypt(struct skcipher_request *req, xts_encrypt_iv_func encrypt_iv,
/*
* In practice, virtually all XTS plaintexts and ciphertexts are either
- * 512 or 4096 bytes, aligned such that they don't span page boundaries.
- * To optimize the performance of these cases, and also any other case
- * where no page boundary is spanned, the below fast-path handles
- * single-page sources and destinations as efficiently as possible.
+ * 512 or 4096 bytes and do not use multiple scatterlist elements. To
+ * optimize the performance of these cases, the below fast-path handles
+ * single-scatterlist-element messages as efficiently as possible. The
+ * code is 64-bit specific, as it assumes no page mapping is needed.
*/
- if (likely(src->length >= cryptlen && dst->length >= cryptlen &&
- src->offset + cryptlen <= PAGE_SIZE &&
- dst->offset + cryptlen <= PAGE_SIZE)) {
- struct page *src_page = sg_page(src);
- struct page *dst_page = sg_page(dst);
- void *src_virt = kmap_local_page(src_page) + src->offset;
- void *dst_virt = kmap_local_page(dst_page) + dst->offset;
-
- (*crypt_func)(&ctx->crypt_ctx, src_virt, dst_virt, cryptlen,
- req->iv);
- kunmap_local(dst_virt);
- kunmap_local(src_virt);
+ if (IS_ENABLED(CONFIG_X86_64) &&
+ likely(req->src->length >= req->cryptlen &&
+ req->dst->length >= req->cryptlen)) {
+ (*crypt_func)(&ctx->crypt_ctx, sg_virt(req->src),
+ sg_virt(req->dst), req->cryptlen, req->iv);
kernel_fpu_end();
return 0;
}