summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/crypto/internal/sha2.h6
-rw-r--r--lib/crypto/Kconfig8
-rw-r--r--lib/crypto/arm/Kconfig1
-rw-r--r--lib/crypto/arm/sha256-armv4.pl20
-rw-r--r--lib/crypto/arm/sha256.c14
-rw-r--r--lib/crypto/arm64/Kconfig1
-rw-r--r--lib/crypto/arm64/sha2-armv8.pl2
-rw-r--r--lib/crypto/arm64/sha256.c14
-rw-r--r--lib/crypto/arm64/sha512.h6
-rw-r--r--lib/crypto/riscv/Kconfig1
-rw-r--r--lib/crypto/riscv/sha256.c12
-rw-r--r--lib/crypto/x86/Kconfig1
-rw-r--r--lib/crypto/x86/sha256.c12
13 files changed, 34 insertions, 64 deletions
diff --git a/include/crypto/internal/sha2.h b/include/crypto/internal/sha2.h
index 21a27fd5e198..5a25ccc49388 100644
--- a/include/crypto/internal/sha2.h
+++ b/include/crypto/internal/sha2.h
@@ -3,7 +3,6 @@
#ifndef _CRYPTO_INTERNAL_SHA2_H
#define _CRYPTO_INTERNAL_SHA2_H
-#include <crypto/internal/simd.h>
#include <crypto/sha2.h>
#include <linux/compiler_attributes.h>
#include <linux/string.h>
@@ -22,8 +21,6 @@ void sha256_blocks_generic(u32 state[SHA256_STATE_WORDS],
const u8 *data, size_t nblocks);
void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
const u8 *data, size_t nblocks);
-void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
- const u8 *data, size_t nblocks);
static __always_inline void sha256_choose_blocks(
u32 state[SHA256_STATE_WORDS], const u8 *data, size_t nblocks,
@@ -31,9 +28,6 @@ static __always_inline void sha256_choose_blocks(
{
if (!IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256) || force_generic)
sha256_blocks_generic(state, data, nblocks);
- else if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD) &&
- (force_simd || crypto_simd_usable()))
- sha256_blocks_simd(state, data, nblocks);
else
sha256_blocks_arch(state, data, nblocks);
}
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index 2460ddff967f..9bd740475a89 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -150,14 +150,6 @@ config CRYPTO_ARCH_HAVE_LIB_SHA256
Declares whether the architecture provides an arch-specific
accelerated implementation of the SHA-256 library interface.
-config CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD
- bool
- help
- Declares whether the architecture provides an arch-specific
- accelerated implementation of the SHA-256 library interface
- that is SIMD-based and therefore not usable in hardirq
- context.
-
config CRYPTO_LIB_SHA256_GENERIC
tristate
default CRYPTO_LIB_SHA256 if !CRYPTO_ARCH_HAVE_LIB_SHA256
diff --git a/lib/crypto/arm/Kconfig b/lib/crypto/arm/Kconfig
index d1ad664f0c67..9f3ff30f4032 100644
--- a/lib/crypto/arm/Kconfig
+++ b/lib/crypto/arm/Kconfig
@@ -28,4 +28,3 @@ config CRYPTO_SHA256_ARM
depends on !CPU_V7M
default CRYPTO_LIB_SHA256
select CRYPTO_ARCH_HAVE_LIB_SHA256
- select CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD
diff --git a/lib/crypto/arm/sha256-armv4.pl b/lib/crypto/arm/sha256-armv4.pl
index 8122db7fd599..f3a2b54efd4e 100644
--- a/lib/crypto/arm/sha256-armv4.pl
+++ b/lib/crypto/arm/sha256-armv4.pl
@@ -204,18 +204,18 @@ K256:
.word 0 @ terminator
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
.LOPENSSL_armcap:
-.word OPENSSL_armcap_P-sha256_blocks_arch
+.word OPENSSL_armcap_P-sha256_block_data_order
#endif
.align 5
-.global sha256_blocks_arch
-.type sha256_blocks_arch,%function
-sha256_blocks_arch:
-.Lsha256_blocks_arch:
+.global sha256_block_data_order
+.type sha256_block_data_order,%function
+sha256_block_data_order:
+.Lsha256_block_data_order:
#if __ARM_ARCH__<7
- sub r3,pc,#8 @ sha256_blocks_arch
+ sub r3,pc,#8 @ sha256_block_data_order
#else
- adr r3,.Lsha256_blocks_arch
+ adr r3,.Lsha256_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
@@ -282,7 +282,7 @@ $code.=<<___;
moveq pc,lr @ be binary compatible with V4, yet
bx lr @ interoperable with Thumb ISA:-)
#endif
-.size sha256_blocks_arch,.-sha256_blocks_arch
+.size sha256_block_data_order,.-sha256_block_data_order
___
######################################################################
# NEON stuff
@@ -470,8 +470,8 @@ sha256_block_data_order_neon:
stmdb sp!,{r4-r12,lr}
sub $H,sp,#16*4+16
- adr $Ktbl,.Lsha256_blocks_arch
- sub $Ktbl,$Ktbl,#.Lsha256_blocks_arch-K256
+ adr $Ktbl,.Lsha256_block_data_order
+ sub $Ktbl,$Ktbl,#.Lsha256_block_data_order-K256
bic $H,$H,#15 @ align for 128-bit stores
mov $t2,sp
mov sp,$H @ alloca
diff --git a/lib/crypto/arm/sha256.c b/lib/crypto/arm/sha256.c
index 109192e54b0f..2c9cfdaaa069 100644
--- a/lib/crypto/arm/sha256.c
+++ b/lib/crypto/arm/sha256.c
@@ -6,12 +6,12 @@
*/
#include <asm/neon.h>
#include <crypto/internal/sha2.h>
+#include <crypto/internal/simd.h>
#include <linux/kernel.h>
#include <linux/module.h>
-asmlinkage void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
- const u8 *data, size_t nblocks);
-EXPORT_SYMBOL_GPL(sha256_blocks_arch);
+asmlinkage void sha256_block_data_order(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
asmlinkage void sha256_block_data_order_neon(u32 state[SHA256_STATE_WORDS],
const u8 *data, size_t nblocks);
asmlinkage void sha256_ce_transform(u32 state[SHA256_STATE_WORDS],
@@ -20,11 +20,11 @@ asmlinkage void sha256_ce_transform(u32 state[SHA256_STATE_WORDS],
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
-void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
+void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
const u8 *data, size_t nblocks)
{
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
- static_branch_likely(&have_neon)) {
+ static_branch_likely(&have_neon) && crypto_simd_usable()) {
kernel_neon_begin();
if (static_branch_likely(&have_ce))
sha256_ce_transform(state, data, nblocks);
@@ -32,10 +32,10 @@ void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
sha256_block_data_order_neon(state, data, nblocks);
kernel_neon_end();
} else {
- sha256_blocks_arch(state, data, nblocks);
+ sha256_block_data_order(state, data, nblocks);
}
}
-EXPORT_SYMBOL_GPL(sha256_blocks_simd);
+EXPORT_SYMBOL_GPL(sha256_blocks_arch);
bool sha256_is_arch_optimized(void)
{
diff --git a/lib/crypto/arm64/Kconfig b/lib/crypto/arm64/Kconfig
index 129a7685cb4c..49e57bfdb5b5 100644
--- a/lib/crypto/arm64/Kconfig
+++ b/lib/crypto/arm64/Kconfig
@@ -17,4 +17,3 @@ config CRYPTO_SHA256_ARM64
tristate
default CRYPTO_LIB_SHA256
select CRYPTO_ARCH_HAVE_LIB_SHA256
- select CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD
diff --git a/lib/crypto/arm64/sha2-armv8.pl b/lib/crypto/arm64/sha2-armv8.pl
index 4aebd20c498b..35ec9ae99fe1 100644
--- a/lib/crypto/arm64/sha2-armv8.pl
+++ b/lib/crypto/arm64/sha2-armv8.pl
@@ -95,7 +95,7 @@ if ($output =~ /512/) {
$reg_t="w";
}
-$func="sha${BITS}_blocks_arch";
+$func="sha${BITS}_block_data_order";
($ctx,$inp,$num,$Ktbl)=map("x$_",(0..2,30));
diff --git a/lib/crypto/arm64/sha256.c b/lib/crypto/arm64/sha256.c
index bcf7a3adc0c4..fb9bff40357b 100644
--- a/lib/crypto/arm64/sha256.c
+++ b/lib/crypto/arm64/sha256.c
@@ -6,12 +6,12 @@
*/
#include <asm/neon.h>
#include <crypto/internal/sha2.h>
+#include <crypto/internal/simd.h>
#include <linux/kernel.h>
#include <linux/module.h>
-asmlinkage void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
- const u8 *data, size_t nblocks);
-EXPORT_SYMBOL_GPL(sha256_blocks_arch);
+asmlinkage void sha256_block_data_order(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
asmlinkage void sha256_block_neon(u32 state[SHA256_STATE_WORDS],
const u8 *data, size_t nblocks);
asmlinkage size_t __sha256_ce_transform(u32 state[SHA256_STATE_WORDS],
@@ -20,11 +20,11 @@ asmlinkage size_t __sha256_ce_transform(u32 state[SHA256_STATE_WORDS],
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
-void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
+void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
const u8 *data, size_t nblocks)
{
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
- static_branch_likely(&have_neon)) {
+ static_branch_likely(&have_neon) && crypto_simd_usable()) {
if (static_branch_likely(&have_ce)) {
do {
size_t rem;
@@ -42,10 +42,10 @@ void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
kernel_neon_end();
}
} else {
- sha256_blocks_arch(state, data, nblocks);
+ sha256_block_data_order(state, data, nblocks);
}
}
-EXPORT_SYMBOL_GPL(sha256_blocks_simd);
+EXPORT_SYMBOL_GPL(sha256_blocks_arch);
bool sha256_is_arch_optimized(void)
{
diff --git a/lib/crypto/arm64/sha512.h b/lib/crypto/arm64/sha512.h
index eae14f9752e0..6abb40b467f2 100644
--- a/lib/crypto/arm64/sha512.h
+++ b/lib/crypto/arm64/sha512.h
@@ -11,8 +11,8 @@
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha512_insns);
-asmlinkage void sha512_blocks_arch(struct sha512_block_state *state,
- const u8 *data, size_t nblocks);
+asmlinkage void sha512_block_data_order(struct sha512_block_state *state,
+ const u8 *data, size_t nblocks);
asmlinkage size_t __sha512_ce_transform(struct sha512_block_state *state,
const u8 *data, size_t nblocks);
@@ -32,7 +32,7 @@ static void sha512_blocks(struct sha512_block_state *state,
nblocks = rem;
} while (nblocks);
} else {
- sha512_blocks_arch(state, data, nblocks);
+ sha512_block_data_order(state, data, nblocks);
}
}
diff --git a/lib/crypto/riscv/Kconfig b/lib/crypto/riscv/Kconfig
index 47c99ea97ce2..c100571feb7e 100644
--- a/lib/crypto/riscv/Kconfig
+++ b/lib/crypto/riscv/Kconfig
@@ -12,5 +12,4 @@ config CRYPTO_SHA256_RISCV64
depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO
default CRYPTO_LIB_SHA256
select CRYPTO_ARCH_HAVE_LIB_SHA256
- select CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD
select CRYPTO_LIB_SHA256_GENERIC
diff --git a/lib/crypto/riscv/sha256.c b/lib/crypto/riscv/sha256.c
index 71808397dff4..aa77349d08f3 100644
--- a/lib/crypto/riscv/sha256.c
+++ b/lib/crypto/riscv/sha256.c
@@ -11,6 +11,7 @@
#include <asm/vector.h>
#include <crypto/internal/sha2.h>
+#include <crypto/internal/simd.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -19,10 +20,10 @@ asmlinkage void sha256_transform_zvknha_or_zvknhb_zvkb(
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_extensions);
-void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
+void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
const u8 *data, size_t nblocks)
{
- if (static_branch_likely(&have_extensions)) {
+ if (static_branch_likely(&have_extensions) && crypto_simd_usable()) {
kernel_vector_begin();
sha256_transform_zvknha_or_zvknhb_zvkb(state, data, nblocks);
kernel_vector_end();
@@ -30,13 +31,6 @@ void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
sha256_blocks_generic(state, data, nblocks);
}
}
-EXPORT_SYMBOL_GPL(sha256_blocks_simd);
-
-void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
- const u8 *data, size_t nblocks)
-{
- sha256_blocks_generic(state, data, nblocks);
-}
EXPORT_SYMBOL_GPL(sha256_blocks_arch);
bool sha256_is_arch_optimized(void)
diff --git a/lib/crypto/x86/Kconfig b/lib/crypto/x86/Kconfig
index 5e94cdee492c..e344579db3d8 100644
--- a/lib/crypto/x86/Kconfig
+++ b/lib/crypto/x86/Kconfig
@@ -30,5 +30,4 @@ config CRYPTO_SHA256_X86_64
depends on 64BIT
default CRYPTO_LIB_SHA256
select CRYPTO_ARCH_HAVE_LIB_SHA256
- select CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD
select CRYPTO_LIB_SHA256_GENERIC
diff --git a/lib/crypto/x86/sha256.c b/lib/crypto/x86/sha256.c
index 80380f8fdcee..baba74d7d26f 100644
--- a/lib/crypto/x86/sha256.c
+++ b/lib/crypto/x86/sha256.c
@@ -6,6 +6,7 @@
*/
#include <asm/fpu/api.h>
#include <crypto/internal/sha2.h>
+#include <crypto/internal/simd.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/static_call.h>
@@ -23,10 +24,10 @@ static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha256_x86);
DEFINE_STATIC_CALL(sha256_blocks_x86, sha256_transform_ssse3);
-void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
+void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
const u8 *data, size_t nblocks)
{
- if (static_branch_likely(&have_sha256_x86)) {
+ if (static_branch_likely(&have_sha256_x86) && crypto_simd_usable()) {
kernel_fpu_begin();
static_call(sha256_blocks_x86)(state, data, nblocks);
kernel_fpu_end();
@@ -34,13 +35,6 @@ void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
sha256_blocks_generic(state, data, nblocks);
}
}
-EXPORT_SYMBOL_GPL(sha256_blocks_simd);
-
-void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
- const u8 *data, size_t nblocks)
-{
- sha256_blocks_generic(state, data, nblocks);
-}
EXPORT_SYMBOL_GPL(sha256_blocks_arch);
bool sha256_is_arch_optimized(void)