summaryrefslogtreecommitdiff
path: root/arch/riscv/lib/riscv_v_helpers.c
diff options
context:
space:
mode:
authorAlexandre Ghiti <alexghiti@rivosinc.com>2025-06-02 21:39:14 +0200
committerPalmer Dabbelt <palmer@dabbelt.com>2025-06-05 11:39:15 -0700
commita4348546332c9fae12b29acb514535e0a52b9b3c (patch)
treed1206b2e3e413fb2f86c41fde4b990463cc3cf16 /arch/riscv/lib/riscv_v_helpers.c
parent259aaf03d7a03fe3c2f909deaeea7ce84ed47880 (diff)
riscv: make unsafe user copy routines use existing assembly routines
The current implementation is underperforming and in addition, it triggers misaligned access traps on platforms which do not handle misaligned accesses in hardware. Use the existing assembly routines to solve both problems at once. Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Link: https://lore.kernel.org/r/20250602193918.868962-2-cleger@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@dabbelt.com>
Diffstat (limited to 'arch/riscv/lib/riscv_v_helpers.c')
-rw-r--r--arch/riscv/lib/riscv_v_helpers.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/arch/riscv/lib/riscv_v_helpers.c b/arch/riscv/lib/riscv_v_helpers.c
index be38a93cedaec..7bbdfc6d4552d 100644
--- a/arch/riscv/lib/riscv_v_helpers.c
+++ b/arch/riscv/lib/riscv_v_helpers.c
@@ -16,8 +16,11 @@
#ifdef CONFIG_MMU
size_t riscv_v_usercopy_threshold = CONFIG_RISCV_ISA_V_UCOPY_THRESHOLD;
int __asm_vector_usercopy(void *dst, void *src, size_t n);
+int __asm_vector_usercopy_sum_enabled(void *dst, void *src, size_t n);
int fallback_scalar_usercopy(void *dst, void *src, size_t n);
-asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n)
+int fallback_scalar_usercopy_sum_enabled(void *dst, void *src, size_t n);
+asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n,
+ bool enable_sum)
{
size_t remain, copied;
@@ -26,7 +29,8 @@ asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n)
goto fallback;
kernel_vector_begin();
- remain = __asm_vector_usercopy(dst, src, n);
+ remain = enable_sum ? __asm_vector_usercopy(dst, src, n) :
+ __asm_vector_usercopy_sum_enabled(dst, src, n);
kernel_vector_end();
if (remain) {
@@ -40,6 +44,7 @@ asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n)
return remain;
fallback:
- return fallback_scalar_usercopy(dst, src, n);
+ return enable_sum ? fallback_scalar_usercopy(dst, src, n) :
+ fallback_scalar_usercopy_sum_enabled(dst, src, n);
}
#endif