summaryrefslogtreecommitdiff
path: root/arch/x86/entry/common.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-04-03 16:36:44 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-04-10 16:19:43 +0200
commit55516b355b0c6c747fa89edc53f10cf4b03441ef (patch)
tree14be4f2abd3c38b845379e720d1522423df9e701 /arch/x86/entry/common.c
parent276fb9a658d0f44bc2e11b5f838cbe4e5759a223 (diff)
x86/syscall: Don't force use of indirect calls for system calls
commit 1e3ad78334a69b36e107232e337f9d693dcc9df2 upstream. Make <asm/syscall.h> build a switch statement instead, and the compiler can either decide to generate an indirect jump, or - more likely these days due to mitigations - just a series of conditional branches. Yes, the conditional branches also have branch prediction, but the branch prediction is much more controlled, in that it just causes speculatively running the wrong system call (harmless), rather than speculatively running possibly wrong random less controlled code gadgets. This doesn't mitigate other indirect calls, but the system call indirection is the first and most easily triggered case. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org> Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/x86/entry/common.c')
-rw-r--r--arch/x86/entry/common.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 6c2826417b33..e160f502d1dc 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -47,7 +47,7 @@ static __always_inline bool do_syscall_x64(struct pt_regs *regs, int nr)
if (likely(unr < NR_syscalls)) {
unr = array_index_nospec(unr, NR_syscalls);
- regs->ax = sys_call_table[unr](regs);
+ regs->ax = x64_sys_call(regs, unr);
return true;
}
return false;
@@ -64,7 +64,7 @@ static __always_inline bool do_syscall_x32(struct pt_regs *regs, int nr)
if (IS_ENABLED(CONFIG_X86_X32_ABI) && likely(xnr < X32_NR_syscalls)) {
xnr = array_index_nospec(xnr, X32_NR_syscalls);
- regs->ax = x32_sys_call_table[xnr](regs);
+ regs->ax = x32_sys_call(regs, xnr);
return true;
}
return false;
@@ -109,7 +109,7 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
if (likely(unr < IA32_NR_syscalls)) {
unr = array_index_nospec(unr, IA32_NR_syscalls);
- regs->ax = ia32_sys_call_table[unr](regs);
+ regs->ax = ia32_sys_call(regs, unr);
} else if (nr != -1) {
regs->ax = __ia32_sys_ni_syscall(regs);
}