summaryrefslogtreecommitdiff
path: root/viengoos/sysdeps/x86_64/x86-64.h
blob: 332f571b59498a9540b3ea89b815ed7a0fc3b9ab (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
/* x86_64.h - The x86_64 architecture.
   Copyright (C) 2006, 2007 Marcus Brinkmann  <marcus@gnu.org>
   Copyright (C) 2007 Tom Bachmann <e_mc_h2@web.de>

   This file is part of the GNU Hird.

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation; either version 3 of the License, or
   (at your option) any later version.

   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.

   You should have received a copy of the GNU General Public License along
   with this program; if not, write to the Free Software Foundation, Inc.,
   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */

#ifndef X86_64_X86_64_H
#define X86_64_X86_64_H X86_64_X86_64_H

#include <stdbool.h>
#include <stdint.h>
#include <assert.h>

/* read the time stamp counter */
static inline uint64_t
__attribute__ ((__always_inline__))
x86_64_rdtsc (void)
{
  uint64_t eax, edx;
  asm volatile ("rdtsc" : "=a" (eax), "=d" (edx));
  return eax | edx << 32;
}


/* The supported privilege levels.  These are used as requested,
   current and descriptor privilege levels.  */
#define X86_64_PL0	(0)
#define X86_64_PL1	(1)
#define X86_64_PL2	(2)
#define X86_64_PL3	(3)


/* system segment descriptor */
typedef struct __attribute__ ((__packed__))
{
  uintptr_t limit_l : 16;
  uintptr_t base_l  : 24;
  uintptr_t type    : 4;
  uintptr_t mbz1    : 1;
  uintptr_t dpl     : 2;
  uintptr_t p       : 1;
  uintptr_t limit_h : 4;
  uintptr_t avl     : 1;
  uintptr_t ignored : 2;
  uintptr_t g       : 1;
  uintptr_t base_m  : 8;

  uintptr_t base_h  : 32;
  uintptr_t reserved_ign1 : 8;
  uintptr_t mbz2    : 5;
  uintptr_t reserved_ign2 : 19;
} x86_64_sysdesc_t;

/* system segment descriptor types */
#define X86_64_SYSDESC_TYPE_LDT       0x2
#define X86_64_SYSDESC_TYPE_AVL_TSS   0x9
#define X86_64_SYSDESC_TYPE_BUSY_TSS  0xb
#define X86_64_SYSDESC_TYPE_CALL_GATE 0xc
#define X86_64_SYSDESC_TYPE_INT_GATE  0xe
#define X86_64_SYSDESC_TYPE_TRAP_GATE 0xf


/* descriptor registers (tr, [lgi]dtr) */
typedef struct __attribute__ ((__packed__))
{
  uint16_t size;
  uint64_t address;
} x86_64_pseudosel_t;


/* cr0 (3.1.1) */

/* Monitor Coprocessor.  */
#define X86_64_CR0_MP	(1 << 1)

/* Emulation.  */
#define X86_64_CR0_EM	(1 << 2)

/* Set or clear the flags FLAGS in CR0.  */
static inline void
__attribute__ ((__always_inline__))
x86_64_load_cr0 (uintptr_t flags, bool enable)
{
  uintptr_t cr0;

  asm volatile ("movq %%cr0, %[cr0]"
		: [cr0] "=r" (cr0));
  if (enable)
    cr0 |= flags;
  else
    cr0 &= ~flags;

  asm volatile ("movq %[cr0], %%cr0"
		: : [cr0] "r" (cr0));
}


/* cr4 (3.1.3) */

/* Page Global Enable.  */
#define X86_64_CR4_PGE	(1 << 7)

/* Set or clear the flags FLAGS in CR4.  */
static inline void
__attribute__ ((__always_inline__))
x86_64_load_cr4 (uintptr_t flags, bool enable)
{
  uintptr_t cr4;

  asm volatile ("movq %%cr4, %[cr4]"
		: [cr4] "=r" (cr4));
  if (enable)
    cr4 |= flags;
  else
    cr4 &= ~flags;

  asm volatile ("movq %[cr4], %%cr4"
		: : [cr4] "r" (cr4));
}


/* model specific registers */
#define X86_64_MSR_EFER   0xc0000080
#define X86_64_MSR_STAR   0xc0000081
#define X86_64_MSR_LSTAR  0xc0000082
#define X86_64_MSR_CSTAR  0xc0000083
#define X86_64_MSR_SFMASK 0xc0000084

/* these msrs can be used to set 64 bit base addresses of gs and fs */
#define X86_64_MSR_FSBASE 0xc0000100
#define X86_64_MSR_GSBASE 0xc0000101


/* functions to access model specific registers */
static inline uintptr_t
__attribute__ ((__always_inline__))
x86_64_rdmsr (uint32_t msr)
{
  uint64_t eax, edx;
  asm volatile ("rdmsr" : "=a" (eax), "=d" (edx)
		        : "c" (msr));
  return eax | edx << 32;
}


static inline void
__attribute__ ((__always_inline__))
x86_64_wrmsr (uint32_t msr, uintptr_t v)
{
  uint64_t eax = v, edx = v >> 32;
  asm volatile ("wrmsr" :: "a" (eax), "d" (edx), "c"(msr));
}


/* efer - extended feature enable register (3.1.8) */
/* System call extensions.  */
#define X86_64_EFER_SCE (1 << 0)
/* Long mode enable.  */
#define X86_64_EFER_LME (1 << 8)
/* Long mode active.  */
#define X86_64_EFER_LMA (1 << 10)


/* STAR (6.1.1) */
static inline void
__attribute__ ((__always_inline__))
x86_64_star_set (uintptr_t call_selector, uintptr_t ret_selector)
{
  x86_64_wrmsr (X86_64_MSR_STAR, ret_selector << 48 | call_selector << 32);
}


/* RFLAGS (3.1.7).  */
struct rflags
{
  union
  {
    struct
    {
      uintptr_t cf   : 1; /* carry flag */
      uintptr_t rsv_rao : 1; /* reserved, read as one */
      uintptr_t pf   : 1; /* parity flag */
      uintptr_t rsv_raz1 : 1; /* reserved, read as zero */
      uintptr_t af   : 1; /* auxiliary flag */
      uintptr_t rsv_raz2 : 1;
      uintptr_t zf   : 1; /* zero flag */
      uintptr_t sf   : 1; /* sign flag */
      uintptr_t tf   : 1; /* trap flag */
      uintptr_t if_  : 1; /* interrupt flag (if is a keyword) */
      uintptr_t df   : 1; /* direction flag */
      uintptr_t of   : 1; /* overflow flag */
      uintptr_t iopl : 2; /* io privilege level */
      uintptr_t nt   : 1; /* nested task */
      uintptr_t rsv_raz3 : 1;
      uintptr_t rf   : 1; /* resume flag */
      uintptr_t vm   : 1; /* virtual 8086 mode */
      uintptr_t ac   : 1; /* alignment check */
      uintptr_t vif  : 1; /* virtual interrupt flag */
      uintptr_t vip  : 1; /* virtual interrupt pending */
      uintptr_t id   : 1; /* id flag */
      uintptr_t rsv_raz4 : 42;
    };
    uintptr_t raw;
  };
};
build_assert (sizeof (struct rflags) == sizeof (uint64_t));

#define RFLAGS(value)						\
  ({								\
    struct rflags _rflags = { { .raw = (value) } };		\
    _rflags;							\
  })

#define X86_64_RFL_IF  (1 << 9)

static inline struct rflags
x86_64_rflags_get (void)
{
  struct rflags rflags;
  asm volatile ("pushfq; popq %0" : "=r" (rflags.raw));
  return rflags;
}

static inline void
x86_64_rflags_set (struct rflags rflags)
{
  asm volatile ("pushq %0; popfq" :: "r" (rflags.raw));
}

#endif