summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/efi_stub_64.S
diff options
context:
space:
mode:
authorHuang, Ying <ying.huang@intel.com>2008-01-30 13:31:19 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-30 13:31:19 +0100
commit5b83683f32b113d07edfb67a33ce389fc624423d (patch)
tree03efde0750c9d7e477ab695aeee26173ffcc4abf /arch/x86/kernel/efi_stub_64.S
parent8c8b8859b64baf6d7c33900e8720c7bafe775b2c (diff)
x86: EFI runtime service support
This patch adds basic runtime services support for EFI x86_64 system. The main file of the patch is the addition of efi_64.c for x86_64. This file is modeled after the EFI IA32 avatar. EFI runtime services initialization are implemented in efi_64.c. Some x86_64 specifics are worth noting here. On x86_64, parameters passed to EFI firmware services need to follow the EFI calling convention. For this purpose, a set of functions named efi_call<x> (<x> is the number of parameters) are implemented. EFI function calls are wrapped before calling the firmware service. The duplicated code between efi_32.c and efi_64.c is placed in efi.c to remove them from efi_32.c. Signed-off-by: Chandramouli Narayanan <mouli@linux.intel.com> Signed-off-by: Huang Ying <ying.huang@intel.com> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/efi_stub_64.S')
-rw-r--r--arch/x86/kernel/efi_stub_64.S109
1 files changed, 109 insertions, 0 deletions
diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
new file mode 100644
index 00000000000..99b47d48c9f
--- /dev/null
+++ b/arch/x86/kernel/efi_stub_64.S
@@ -0,0 +1,109 @@
+/*
+ * Function calling ABI conversion from Linux to EFI for x86_64
+ *
+ * Copyright (C) 2007 Intel Corp
+ * Bibo Mao <bibo.mao@intel.com>
+ * Huang Ying <ying.huang@intel.com>
+ */
+
+#include <linux/linkage.h>
+
+#define SAVE_XMM \
+ mov %rsp, %rax; \
+ subq $0x70, %rsp; \
+ and $~0xf, %rsp; \
+ mov %rax, (%rsp); \
+ mov %cr0, %rax; \
+ clts; \
+ mov %rax, 0x8(%rsp); \
+ movaps %xmm0, 0x60(%rsp); \
+ movaps %xmm1, 0x50(%rsp); \
+ movaps %xmm2, 0x40(%rsp); \
+ movaps %xmm3, 0x30(%rsp); \
+ movaps %xmm4, 0x20(%rsp); \
+ movaps %xmm5, 0x10(%rsp)
+
+#define RESTORE_XMM \
+ movaps 0x60(%rsp), %xmm0; \
+ movaps 0x50(%rsp), %xmm1; \
+ movaps 0x40(%rsp), %xmm2; \
+ movaps 0x30(%rsp), %xmm3; \
+ movaps 0x20(%rsp), %xmm4; \
+ movaps 0x10(%rsp), %xmm5; \
+ mov 0x8(%rsp), %rsi; \
+ mov %rsi, %cr0; \
+ mov (%rsp), %rsp
+
+ENTRY(efi_call0)
+ SAVE_XMM
+ subq $32, %rsp
+ call *%rdi
+ addq $32, %rsp
+ RESTORE_XMM
+ ret
+
+ENTRY(efi_call1)
+ SAVE_XMM
+ subq $32, %rsp
+ mov %rsi, %rcx
+ call *%rdi
+ addq $32, %rsp
+ RESTORE_XMM
+ ret
+
+ENTRY(efi_call2)
+ SAVE_XMM
+ subq $32, %rsp
+ mov %rsi, %rcx
+ call *%rdi
+ addq $32, %rsp
+ RESTORE_XMM
+ ret
+
+ENTRY(efi_call3)
+ SAVE_XMM
+ subq $32, %rsp
+ mov %rcx, %r8
+ mov %rsi, %rcx
+ call *%rdi
+ addq $32, %rsp
+ RESTORE_XMM
+ ret
+
+ENTRY(efi_call4)
+ SAVE_XMM
+ subq $32, %rsp
+ mov %r8, %r9
+ mov %rcx, %r8
+ mov %rsi, %rcx
+ call *%rdi
+ addq $32, %rsp
+ RESTORE_XMM
+ ret
+
+ENTRY(efi_call5)
+ SAVE_XMM
+ subq $48, %rsp
+ mov %r9, 32(%rsp)
+ mov %r8, %r9
+ mov %rcx, %r8
+ mov %rsi, %rcx
+ call *%rdi
+ addq $48, %rsp
+ RESTORE_XMM
+ ret
+
+ENTRY(efi_call6)
+ SAVE_XMM
+ mov (%rsp), %rax
+ mov 8(%rax), %rax
+ subq $48, %rsp
+ mov %r9, 32(%rsp)
+ mov %rax, 40(%rsp)
+ mov %r8, %r9
+ mov %rcx, %r8
+ mov %rsi, %rcx
+ call *%rdi
+ addq $48, %rsp
+ RESTORE_XMM
+ ret