1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
|
/*
* Copyright (c) 2012-2017 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <kern/init.h>
#include <machine/asm.h>
#include <machine/cpu.h>
.section INIT_SECTION
#ifdef __LP64__
ASM_ENTRY(tcb_context_load)
movq (%rdi), %rbp /* load frame pointer from TCB */
movq 8(%rdi), %rsp /* load stack pointer from TCB */
jmp tcb_context_restore
ASM_END(tcb_context_load)
#else /* __LP64__ */
ASM_ENTRY(tcb_context_load)
movl 4(%esp), %eax /* load TCB address */
movl (%eax), %ebp /* load frame pointer from TCB */
movl 4(%eax), %esp /* load stack pointer from TCB */
jmp tcb_context_restore
ASM_END(tcb_context_load)
#endif /* __LP64__ */
.text
#ifdef __LP64__
ASM_ENTRY(tcb_start)
popq %rdi /* load function */
popq %rsi /* load argument */
/*
* Use the call instruction to start a clean stack trace.
*
* Note that, on amd64, the stack must be 16-byte before the call
* instruction, so that "$(rsp + 8) is a multiple is always a multiple
* of 16 when control is transferred to the function entry point",
* which is another reason to use call instead of a bare jump.
*/
call thread_main
/* Never reached */
nop /* make the return address point to an instruction
inside the function to build a clean stack trace */
ASM_END(tcb_start)
ASM_ENTRY(tcb_context_switch)
pushq %rbx /* store registers as required by ABI */
pushq %r12
pushq %r13
pushq %r14
pushq %r15
movq %rbp, (%rdi) /* store frame pointer into prev TCB */
movq %rsp, 8(%rdi) /* store stack pointer into prev TCB */
movq (%rsi), %rbp /* load frame pointer from next TCB */
movq 8(%rsi), %rsp /* load stack pointer from next TCB */
.global tcb_context_restore
tcb_context_restore:
popq %r15 /* load registers as required by ABI */
popq %r14
popq %r13
popq %r12
popq %rbx
ret
ASM_END(tcb_context_switch)
#else /* __LP64__ */
ASM_ENTRY(tcb_start)
call thread_main /* the stack already contains the expected arguments
in the expected order, use the call instruction to
start a clean stack trace */
/* Never reached */
nop /* make the return address point to an instruction
inside the function to build a clean stack trace */
ASM_END(tcb_start)
ASM_ENTRY(tcb_context_switch)
movl 4(%esp), %eax /* load prev TCB address */
movl 8(%esp), %ecx /* load next TCB address */
pushl %ebx /* store registers as required by ABI */
pushl %edi
pushl %esi
movl %ebp, (%eax) /* store frame pointer into prev TCB */
movl %esp, 4(%eax) /* store stack pointer into prev TCB */
movl (%ecx), %ebp /* load frame pointer from next TCB */
movl 4(%ecx), %esp /* load stack pointer from next TCB */
/*
* This code is run on context restoration. The frame and stack pointers
* have already been loaded to their correct values. Load registers which
* were stored on the stack when the context was saved and return.
*/
.global tcb_context_restore
tcb_context_restore:
popl %esi
popl %edi
popl %ebx
ret
ASM_END(tcb_context_switch)
#endif /* __LP64__ */
|