summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNeal H. Walfield <neal@gnu.org>2008-12-17 18:45:14 +0100
committerNeal H. Walfield <neal@gnu.org>2008-12-17 18:45:14 +0100
commit4b34130b861911b2fbc62e706239d55ce817d203 (patch)
tree0fb7f4a87b0813e7884b86b7a0a7390265083a2d
parenta7416b7c63f4954ff78eecf31e5146cb86cda6a6 (diff)
Add a "vg_" to public viengoos identifiers.
2008-12-17 Neal H. Walfield <neal@gnu.org> * viengoos/activity.h: Add a "vg_" to public viengoos identifiers. Update users. * viengoos/addr-trans.h: Likewise. * viengoos/addr.h: Likewise. * viengoos/cap.h: Likewise. * viengoos/folio.h: Likewise. * viengoos/futex.h: Likewise. * viengoos/rpc.h: Likewise. * viengoos/thread.h: Likewise.
-rw-r--r--benchmarks/GCbench.c34
-rw-r--r--benchmarks/activity-distribution.c22
-rw-r--r--benchmarks/boehm-gc/patches/05-viengoos-scheduler.patch8
-rw-r--r--benchmarks/cache.c56
-rw-r--r--benchmarks/shared-memory-distribution.c12
-rw-r--r--hieronymus/hieronymus.c52
-rw-r--r--hurd/exceptions.h2
-rw-r--r--hurd/startup.h14
-rw-r--r--libc-parts/_exit.c14
-rw-r--r--libc-parts/ia32-cmain.c2
-rw-r--r--libc-parts/process-spawn.c290
-rw-r--r--libc-parts/process-spawn.h10
-rw-r--r--libc-parts/s_printf.c2
-rw-r--r--libhurd-mm/anonymous.c90
-rw-r--r--libhurd-mm/anonymous.h10
-rw-r--r--libhurd-mm/as-build-custom.c14
-rw-r--r--libhurd-mm/as-build.c296
-rw-r--r--libhurd-mm/as-compute-gbits.h34
-rw-r--r--libhurd-mm/as-dump.c82
-rw-r--r--libhurd-mm/as-lookup.c190
-rw-r--r--libhurd-mm/as.c336
-rw-r--r--libhurd-mm/as.h184
-rw-r--r--libhurd-mm/capalloc.c66
-rw-r--r--libhurd-mm/capalloc.h4
-rw-r--r--libhurd-mm/exceptions.c122
-rw-r--r--libhurd-mm/map.c34
-rw-r--r--libhurd-mm/map.h6
-rw-r--r--libhurd-mm/message-buffer.c76
-rw-r--r--libhurd-mm/message-buffer.h6
-rw-r--r--libhurd-mm/mm-init.c18
-rw-r--r--libhurd-mm/mm.h2
-rw-r--r--libhurd-mm/mmap.c4
-rw-r--r--libhurd-mm/mprotect.c22
-rw-r--r--libhurd-mm/pager.h4
-rw-r--r--libhurd-mm/storage.c216
-rw-r--r--libhurd-mm/storage.h26
-rw-r--r--libpthread/sysdeps/viengoos/bits/pthread-np.h2
-rw-r--r--libpthread/sysdeps/viengoos/ia32/signal-dispatch-lowlevel.c12
-rw-r--r--libpthread/sysdeps/viengoos/pt-block.c2
-rw-r--r--libpthread/sysdeps/viengoos/pt-setactivity-np.c6
-rw-r--r--libpthread/sysdeps/viengoos/pt-sysdep.h2
-rw-r--r--libpthread/sysdeps/viengoos/pt-thread-alloc.c22
-rw-r--r--libpthread/sysdeps/viengoos/pt-thread-halt.c6
-rw-r--r--libpthread/sysdeps/viengoos/pt-thread-start.c12
-rw-r--r--libpthread/sysdeps/viengoos/pt-wakeup.c2
-rw-r--r--libviengoos/t-addr-trans.c22
-rw-r--r--libviengoos/t-addr.c40
-rw-r--r--libviengoos/t-rpc.c22
-rw-r--r--libviengoos/viengoos/activity.h30
-rw-r--r--libviengoos/viengoos/addr-trans.h98
-rw-r--r--libviengoos/viengoos/addr.h118
-rw-r--r--libviengoos/viengoos/cap.h480
-rw-r--r--libviengoos/viengoos/folio.h171
-rw-r--r--libviengoos/viengoos/futex.h48
-rw-r--r--libviengoos/viengoos/ipc.h66
-rw-r--r--libviengoos/viengoos/message.h26
-rw-r--r--libviengoos/viengoos/messenger.h2
-rw-r--r--libviengoos/viengoos/rpc.h40
-rw-r--r--libviengoos/viengoos/thread.h58
-rw-r--r--newlib/addon/newlib/libc/sys/hurd/getreent.c12
-rw-r--r--newlib/addon/newlib/libc/sys/hurd/pipefile.c4
-rw-r--r--ruth/ruth.c246
-rw-r--r--viengoos/activity.c70
-rw-r--r--viengoos/activity.h22
-rw-r--r--viengoos/ager.c10
-rw-r--r--viengoos/cap.c106
-rw-r--r--viengoos/cap.h18
-rw-r--r--viengoos/memory.c14
-rw-r--r--viengoos/messenger.c48
-rw-r--r--viengoos/messenger.h14
-rw-r--r--viengoos/object.c296
-rw-r--r--viengoos/object.h88
-rw-r--r--viengoos/pager.c14
-rw-r--r--viengoos/server.c600
-rw-r--r--viengoos/t-activity.c40
-rw-r--r--viengoos/t-as.c188
-rw-r--r--viengoos/t-guard.c6
-rw-r--r--viengoos/thread.c64
-rw-r--r--viengoos/thread.h18
-rw-r--r--viengoos/viengoos.c2
80 files changed, 2765 insertions, 2762 deletions
diff --git a/benchmarks/GCbench.c b/benchmarks/GCbench.c
index eb3652e..a7a65d7 100644
--- a/benchmarks/GCbench.c
+++ b/benchmarks/GCbench.c
@@ -94,8 +94,8 @@ static int iter;
#include <hurd/anonymous.h>
#include <string.h>
-addr_t gc_activity;
-addr_t hog_activity;
+vg_addr_t gc_activity;
+vg_addr_t hog_activity;
bool have_a_hog = false;
@@ -176,7 +176,7 @@ helper (void *arg)
{
pagers[c]
= anonymous_pager_alloc (hog_activity, NULL, s, MAP_ACCESS_ALL,
- OBJECT_POLICY (false, OBJECT_PRIORITY_DEFAULT), 0,
+ VG_OBJECT_POLICY (false, VG_OBJECT_PRIORITY_DEFAULT), 0,
NULL, &buffers[c]);
assert (pagers[c]);
assert (buffers[c]);
@@ -238,24 +238,24 @@ helper_fork (void)
int err;
#ifdef __gnu_hurd_viengoos__
- gc_activity = storage_alloc (ADDR_VOID,
- cap_activity_control, STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID).addr;
- if (ADDR_IS_VOID (gc_activity))
+ gc_activity = storage_alloc (VG_ADDR_VOID,
+ vg_cap_activity_control, STORAGE_LONG_LIVED,
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID).addr;
+ if (VG_ADDR_IS_VOID (gc_activity))
panic ("Failed to allocate main activity");
struct object_name name;
snprintf (&name.name[0], sizeof (name.name), "gc.%x", l4_myself ());
- rm_object_name (ADDR_VOID, gc_activity, name);
+ rm_object_name (VG_ADDR_VOID, gc_activity, name);
- hog_activity = storage_alloc (ADDR_VOID,
- cap_activity_control, STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID).addr;
- if (ADDR_IS_VOID (hog_activity))
+ hog_activity = storage_alloc (VG_ADDR_VOID,
+ vg_cap_activity_control, STORAGE_LONG_LIVED,
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID).addr;
+ if (VG_ADDR_IS_VOID (hog_activity))
panic ("Failed to allocate hog activity");
snprintf (&name.name[0], sizeof (name.name), "hog.%x", l4_myself ());
- rm_object_name (ADDR_VOID, hog_activity, name);
+ rm_object_name (VG_ADDR_VOID, hog_activity, name);
/* We give the main thread and the hog the same priority and
weight. */
@@ -267,16 +267,16 @@ helper_fork (void)
in.child_rel.priority = 2;
in.child_rel.weight = 20;
- err = rm_activity_policy (ADDR_VOID,
- ACTIVITY_POLICY_CHILD_REL_SET, in, &out);
+ err = rm_activity_policy (VG_ADDR_VOID,
+ VG_ACTIVITY_POLICY_CHILD_REL_SET, in, &out);
assert (err == 0);
err = rm_activity_policy (hog_activity,
- ACTIVITY_POLICY_SIBLING_REL_SET, in, &out);
+ VG_ACTIVITY_POLICY_SIBLING_REL_SET, in, &out);
assert (err == 0);
err = rm_activity_policy (gc_activity,
- ACTIVITY_POLICY_SIBLING_REL_SET, in, &out);
+ VG_ACTIVITY_POLICY_SIBLING_REL_SET, in, &out);
assert (err == 0);
diff --git a/benchmarks/activity-distribution.c b/benchmarks/activity-distribution.c
index 52cc6a3..4b822c3 100644
--- a/benchmarks/activity-distribution.c
+++ b/benchmarks/activity-distribution.c
@@ -12,7 +12,7 @@
#include <hurd/startup.h>
#include <hurd/anonymous.h>
-static addr_t activity;
+static vg_addr_t activity;
/* Initialized by the machine-specific startup-code. */
extern struct hurd_startup_data *__hurd_startup_data;
@@ -32,23 +32,23 @@ main (int argc, char *argv[])
#define THREADS 4
/* The activities. */
- addr_t activities[THREADS];
+ vg_addr_t activities[THREADS];
/* Create THREADS activities, each with an increasing weight. */
int i;
for (i = 0; i < THREADS; i ++)
{
- activities[i] = storage_alloc (activity, cap_activity,
+ activities[i] = storage_alloc (activity, vg_cap_activity,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT,
- ADDR_VOID).addr;
+ VG_OBJECT_POLICY_DEFAULT,
+ VG_ADDR_VOID).addr;
struct activity_policy in;
in.sibling_rel.priority = i == 0 ? 2 : 1;
in.sibling_rel.weight = i + 1;
struct activity_policy out;
err = rm_activity_policy (activity, activities[i],
- ACTIVITY_POLICY_SIBLING_REL_SET, in,
+ VG_ACTIVITY_POLICY_SIBLING_REL_SET, in,
&out);
assert (err == 0);
}
@@ -74,7 +74,7 @@ main (int argc, char *argv[])
bool my_fill (struct anonymous_pager *anon,
uintptr_t offset, uintptr_t count,
void *pages[],
- struct activation_fault_info info)
+ struct vg_activation_fault_info info)
{
uintptr_t *p = pages[0];
p[0] = offset;
@@ -111,9 +111,9 @@ main (int argc, char *argv[])
/* Allocate a (discardable) buffer. */
{
pagers[i]
- = anonymous_pager_alloc (ADDR_VOID, NULL, SIZE, MAP_ACCESS_ALL,
- OBJECT_POLICY (true,
- OBJECT_PRIORITY_DEFAULT),
+ = anonymous_pager_alloc (VG_ADDR_VOID, NULL, SIZE, MAP_ACCESS_ALL,
+ VG_OBJECT_POLICY (true,
+ VG_OBJECT_PRIORITY_DEFAULT),
0, my_fill, &buffers[i]);
assert (pagers[i]);
assert (buffers[i]);
@@ -192,7 +192,7 @@ main (int argc, char *argv[])
printf ("parent ");
for (i = 0; i < THREADS; i ++)
- printf (ADDR_FMT " ", ADDR_PRINTF (activities[i]));
+ printf (VG_ADDR_FMT " ", VG_ADDR_PRINTF (activities[i]));
printf ("\n");
for (i = 0; i < ITERATIONS; i ++)
diff --git a/benchmarks/boehm-gc/patches/05-viengoos-scheduler.patch b/benchmarks/boehm-gc/patches/05-viengoos-scheduler.patch
index e986cdf..db51fe2 100644
--- a/benchmarks/boehm-gc/patches/05-viengoos-scheduler.patch
+++ b/benchmarks/boehm-gc/patches/05-viengoos-scheduler.patch
@@ -663,8 +663,8 @@ diff -uprN -x '*.applied' -x config.guess -x '*~' -x autom4te.cache -x config.su
# undef DYNAMIC_LOADING
+
+# include <hurd/addr.h>
-+extern addr_t gc_activity __attribute__ ((weak));
-+# define ACTIVITY (&gc_activity ? gc_activity : ADDR_VOID)
++extern vg_addr_t gc_activity __attribute__ ((weak));
++# define ACTIVITY (&gc_activity ? gc_activity : VG_ADDR_VOID)
+extern int GC_available_bytes;
+
# endif
@@ -682,8 +682,8 @@ diff -uprN -x '*.applied' -x config.guess -x '*~' -x autom4te.cache -x config.su
+#include <hurd/viengoos.h>
+#include <hurd/as.h>
+
-+extern addr_t gc_activity __attribute__ ((weak));
-+#define ACTIVITY (&gc_activity ? gc_activity : ADDR_VOID)
++extern vg_addr_t gc_activity __attribute__ ((weak));
++#define ACTIVITY (&gc_activity ? gc_activity : VG_ADDR_VOID)
+
+#endif
+
diff --git a/benchmarks/cache.c b/benchmarks/cache.c
index 84942d8..7745a55 100644
--- a/benchmarks/cache.c
+++ b/benchmarks/cache.c
@@ -188,8 +188,8 @@ static struct hurd_ihash cache;
#include <hurd/as.h>
#include <string.h>
-addr_t main_activity;
-addr_t hog_activity;
+vg_addr_t main_activity;
+vg_addr_t hog_activity;
#endif
void *
@@ -211,7 +211,7 @@ helper (void *arg)
/* First the main thread. */
error_t err;
- err = rm_activity_info (ADDR_VOID, main_activity, activity_info_stats,
+ err = rm_activity_info (VG_ADDR_VOID, main_activity, activity_info_stats,
stat_count == 0
? 0 : stats[stat_count - 1].period + 1,
&info);
@@ -227,7 +227,7 @@ helper (void *arg)
stats[stat_count].period = info.stats.stats[0].period;
/* Then, the hog. */
- err = rm_activity_info (ADDR_VOID, hog_activity, activity_info_stats,
+ err = rm_activity_info (VG_ADDR_VOID, hog_activity, activity_info_stats,
stat_count == 0
? 0 : stats[stat_count - 1].period + 1,
&info);
@@ -390,24 +390,24 @@ helper_fork (void)
#ifdef __gnu_hurd_viengoos__
int err;
- main_activity = storage_alloc (ADDR_VOID,
- cap_activity_control, STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID).addr;
- if (ADDR_IS_VOID (main_activity))
+ main_activity = storage_alloc (VG_ADDR_VOID,
+ vg_cap_activity_control, STORAGE_LONG_LIVED,
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID).addr;
+ if (VG_ADDR_IS_VOID (main_activity))
panic ("Failed to allocate main activity");
struct object_name name;
snprintf (&name.name[0], sizeof (name.name), "main.%x", l4_myself ());
- rm_object_name (ADDR_VOID, main_activity, name);
+ rm_object_name (VG_ADDR_VOID, main_activity, name);
- hog_activity = storage_alloc (ADDR_VOID,
- cap_activity_control, STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID).addr;
- if (ADDR_IS_VOID (hog_activity))
+ hog_activity = storage_alloc (VG_ADDR_VOID,
+ vg_cap_activity_control, STORAGE_LONG_LIVED,
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID).addr;
+ if (VG_ADDR_IS_VOID (hog_activity))
panic ("Failed to allocate hog activity");
snprintf (&name.name[0], sizeof (name.name), "hog.%x", l4_myself ());
- rm_object_name (ADDR_VOID, hog_activity, name);
+ rm_object_name (VG_ADDR_VOID, hog_activity, name);
/* We give the main thread and the hog the same priority and
weight. */
@@ -419,16 +419,16 @@ helper_fork (void)
in.child_rel.priority = 2;
in.child_rel.weight = 20;
- err = rm_activity_policy (ADDR_VOID, meta_data_activity,
- ACTIVITY_POLICY_CHILD_REL_SET, in, &out);
+ err = rm_activity_policy (VG_ADDR_VOID, meta_data_activity,
+ VG_ACTIVITY_POLICY_CHILD_REL_SET, in, &out);
assert (err == 0);
- err = rm_activity_policy (ADDR_VOID, hog_activity,
- ACTIVITY_POLICY_SIBLING_REL_SET, in, &out);
+ err = rm_activity_policy (VG_ADDR_VOID, hog_activity,
+ VG_ACTIVITY_POLICY_SIBLING_REL_SET, in, &out);
assert (err == 0);
- err = rm_activity_policy (ADDR_VOID, main_activity,
- ACTIVITY_POLICY_SIBLING_REL_SET, in, &out);
+ err = rm_activity_policy (VG_ADDR_VOID, main_activity,
+ VG_ACTIVITY_POLICY_SIBLING_REL_SET, in, &out);
assert (err == 0);
@@ -632,7 +632,7 @@ bool
object_fill (struct anonymous_pager *anon,
uintptr_t offset, uintptr_t count,
void *pages[],
- struct activation_fault_info info)
+ struct vg_activation_fault_info info)
{
profile_region (NULL);
@@ -649,7 +649,7 @@ object_fill (struct anonymous_pager *anon,
abort ();
}
- struct obj *object = (struct obj *) (uintptr_t) addr_prefix (anon->map_area);
+ struct obj *object = (struct obj *) (uintptr_t) vg_addr_prefix (anon->map_area);
// debug (0, "Filling %d at %p", id, object);
@@ -693,10 +693,10 @@ object_lookup_hard (int id)
if (! chunk || offset + OBJECT_SIZE > size)
{
static struct anonymous_pager *pager
- = anonymous_pager_alloc (ADDR_VOID, NULL,
+ = anonymous_pager_alloc (VG_ADDR_VOID, NULL,
size, MAP_ACCESS_ALL,
- OBJECT_POLICY (true,
- OBJECT_PRIORITY_DEFAULT - 1),
+ VG_OBJECT_POLICY (true,
+ VG_OBJECT_PRIORITY_DEFAULT - 1),
0, NULL, &chunk);
assert (pager);
@@ -714,10 +714,10 @@ object_lookup_hard (int id)
void *chunk;
struct anonymous_pager *pager
- = anonymous_pager_alloc (ADDR_VOID, NULL,
+ = anonymous_pager_alloc (VG_ADDR_VOID, NULL,
size, MAP_ACCESS_ALL,
- OBJECT_POLICY (true,
- OBJECT_PRIORITY_DEFAULT - 1),
+ VG_OBJECT_POLICY (true,
+ VG_OBJECT_PRIORITY_DEFAULT - 1),
ANONYMOUS_NO_RECURSIVE, object_fill, &chunk);
assert (pager);
assert (chunk);
diff --git a/benchmarks/shared-memory-distribution.c b/benchmarks/shared-memory-distribution.c
index 20e38d1..27e2d6e 100644
--- a/benchmarks/shared-memory-distribution.c
+++ b/benchmarks/shared-memory-distribution.c
@@ -10,7 +10,7 @@
#include <hurd/storage.h>
#include <hurd/startup.h>
-static addr_t activity;
+static vg_addr_t activity;
/* Initialized by the machine-specific startup-code. */
extern struct hurd_startup_data *__hurd_startup_data;
@@ -36,14 +36,14 @@ main (int argc, char *argv[])
#define THREADS 3
/* And the activities. */
- addr_t activities[THREADS];
+ vg_addr_t activities[THREADS];
int i;
for (i = 0; i < THREADS; i ++)
- activities[i] = storage_alloc (activity, cap_activity,
+ activities[i] = storage_alloc (activity, vg_cap_activity,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT,
- ADDR_VOID).addr;
+ VG_OBJECT_POLICY_DEFAULT,
+ VG_ADDR_VOID).addr;
bool terminate = false;
l4_thread_id_t tids[THREADS];
@@ -143,7 +143,7 @@ main (int argc, char *argv[])
printf ("parent ");
for (i = 0; i < THREADS; i ++)
- printf (ADDR_FMT " ", ADDR_PRINTF (activities[i]));
+ printf (VG_ADDR_FMT " ", VG_ADDR_PRINTF (activities[i]));
printf ("\n");
for (i = 0; i < ITERATIONS; i ++)
diff --git a/hieronymus/hieronymus.c b/hieronymus/hieronymus.c
index 7a0d3da..06534a2 100644
--- a/hieronymus/hieronymus.c
+++ b/hieronymus/hieronymus.c
@@ -54,7 +54,7 @@ struct module
#include "modules.h"
static int module_count;
-static addr_t *activities;
+static vg_addr_t *activities;
/* Initialized by the machine-specific startup-code. */
extern struct hurd_startup_data *__hurd_startup_data;
@@ -66,16 +66,16 @@ static struct storage
activity_alloc (struct activity_policy policy)
{
struct storage storage
- = storage_alloc (root_activity, cap_activity_control, STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
+ = storage_alloc (root_activity, vg_cap_activity_control, STORAGE_LONG_LIVED,
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
if (! storage.cap)
panic ("Failed to allocate storage.");
struct activity_policy out;
- error_t err = rm_activity_policy (ADDR_VOID, storage.addr,
- ACTIVITY_POLICY_STORAGE_SET
- | ACTIVITY_POLICY_CHILD_REL_SET
- | ACTIVITY_POLICY_SIBLING_REL_SET,
+ error_t err = rm_activity_policy (VG_ADDR_VOID, storage.addr,
+ VG_ACTIVITY_POLICY_STORAGE_SET
+ | VG_ACTIVITY_POLICY_CHILD_REL_SET
+ | VG_ACTIVITY_POLICY_SIBLING_REL_SET,
policy, &out);
if (err)
panic ("Failed to set policy on activity");
@@ -137,7 +137,7 @@ do_gather_stats (void *arg)
for (i = 0; i < module_count; i ++, stat ++)
{
error_t err;
- err = rm_activity_info (ADDR_VOID, activities[i], activity_info_stats,
+ err = rm_activity_info (VG_ADDR_VOID, activities[i], activity_info_stats,
period, &info);
assert_perror (err);
assert (info.event == activity_info_stats);
@@ -177,7 +177,7 @@ main (int argc, char *argv[])
module_count = sizeof (modules) / sizeof (modules[0]);
- addr_t a[module_count];
+ vg_addr_t a[module_count];
activities = &a[0];
/* Create the activities. */
@@ -185,14 +185,14 @@ main (int argc, char *argv[])
for (i = 0; i < module_count; i ++)
{
struct activity_memory_policy sibling_policy
- = ACTIVITY_MEMORY_POLICY (modules[i].priority, modules[i].weight);
+ = VG_ACTIVITY_MEMORY_POLICY (modules[i].priority, modules[i].weight);
struct activity_policy policy
- = ACTIVITY_POLICY (sibling_policy, ACTIVITY_MEMORY_POLICY_VOID, 0);
+ = VG_ACTIVITY_POLICY (sibling_policy, VG_ACTIVITY_MEMORY_POLICY_VOID, 0);
activities[i] = activity_alloc (policy).addr;
struct object_name name;
strncpy (&name.name[0], modules[i].name, sizeof (name.name));
- rm_object_name (ADDR_VOID, activities[i], name);
+ rm_object_name (VG_ADDR_VOID, activities[i], name);
}
bool gather_stats = false;
@@ -215,7 +215,7 @@ main (int argc, char *argv[])
}
/* Load each program (but don't yet start it). */
- addr_t thread[module_count];
+ vg_addr_t thread[module_count];
for (i = 0; i < module_count; i ++)
{
struct md5_ctx ctx;
@@ -268,22 +268,22 @@ main (int argc, char *argv[])
{
struct hurd_object_desc *desc = &__hurd_startup_data->descs[j];
- if ((desc->type == cap_page || desc->type == cap_rpage)
- && ! ADDR_IS_VOID (desc->storage)
- && addr_depth (desc->object) == ADDR_BITS - PAGESIZE_LOG2)
+ if ((desc->type == vg_cap_page || desc->type == vg_cap_rpage)
+ && ! VG_ADDR_IS_VOID (desc->storage)
+ && vg_addr_depth (desc->object) == VG_ADDR_BITS - PAGESIZE_LOG2)
{
int i;
for (i = 0; i < module_count; i ++)
- if ((uintptr_t) modules[i].start <= addr_prefix (desc->object)
- && (addr_prefix (desc->object) + PAGESIZE - 1
+ if ((uintptr_t) modules[i].start <= vg_addr_prefix (desc->object)
+ && (vg_addr_prefix (desc->object) + PAGESIZE - 1
<= (uintptr_t) modules[i].end))
break;
if (i != module_count)
{
- debug (5, "Freeing " ADDR_FMT "(" ADDR_FMT "), a %s",
- ADDR_PRINTF (desc->object), ADDR_PRINTF (desc->storage),
- cap_type_string (desc->type));
+ debug (5, "Freeing " VG_ADDR_FMT "(" VG_ADDR_FMT "), a %s",
+ VG_ADDR_PRINTF (desc->object), VG_ADDR_PRINTF (desc->storage),
+ vg_cap_type_string (desc->type));
storage_free (desc->storage, true);
}
}
@@ -312,7 +312,7 @@ main (int argc, char *argv[])
modules[i].delay = -1U;
debug (0, DEBUG_BOLD ("Starting %s"), modules[i].name);
- thread_start (thread[i]);
+ vg_thread_start (thread[i]);
}
else if (deadline > modules[i].delay * 1000000ULL)
{
@@ -339,12 +339,12 @@ main (int argc, char *argv[])
rm_object_reply_on_destruction (root_activity,
thread[i], &rt);
- addr_t folio = addr_chop (activities[i], FOLIO_OBJECTS_LOG2);
- int index = addr_extract (activities[i], FOLIO_OBJECTS_LOG2);
+ vg_addr_t folio = vg_addr_chop (activities[i], VG_FOLIO_OBJECTS_LOG2);
+ int index = vg_addr_extract (activities[i], VG_FOLIO_OBJECTS_LOG2);
error_t err;
- err = rm_folio_object_alloc (ADDR_VOID, folio, index,
- cap_void, OBJECT_POLICY_VOID,
+ err = rm_folio_object_alloc (VG_ADDR_VOID, folio, index,
+ vg_cap_void, VG_OBJECT_POLICY_VOID,
(uintptr_t) rt,
NULL, NULL);
if (err)
diff --git a/hurd/exceptions.h b/hurd/exceptions.h
index 4e7dfd5..efd779b 100644
--- a/hurd/exceptions.h
+++ b/hurd/exceptions.h
@@ -48,7 +48,7 @@ extern struct hurd_utcb *(*hurd_utcb) (void);
but should not be running). Installs the UTCB and exception
messenger in the thread object. Returns the new UTCB in *UTCB.
Returns 0 on success, otherwise an error code. */
-extern error_t hurd_activation_state_alloc (addr_t thread,
+extern error_t hurd_activation_state_alloc (vg_addr_t thread,
struct hurd_utcb **utcb);
/* Release the state allocated by hurd_activation_state_alloc. May
diff --git a/hurd/startup.h b/hurd/startup.h
index 8ca0da0..98fe209 100644
--- a/hurd/startup.h
+++ b/hurd/startup.h
@@ -39,13 +39,13 @@
struct hurd_object_desc
{
/* The object. */
- addr_t object;
+ vg_addr_t object;
/* If the object is not a folio, then: */
- /* The location of the storage. (addr_chop (STORAGE,
- FOLIO_OBJECTS_LOG2) => the folio.) */
- addr_t storage;
+ /* The location of the storage. (vg_addr_chop (STORAGE,
+ VG_FOLIO_OBJECTS_LOG2) => the folio.) */
+ vg_addr_t storage;
/* The type of the object (for convenience). */
unsigned char type;
@@ -83,16 +83,16 @@ struct hurd_startup_data
/* Slot in which a capability designating the task's primary
activity is stored. */
- addr_t activity;
+ vg_addr_t activity;
/* Slot in which a capability designating the task's first thread is
stored. */
- addr_t thread;
+ vg_addr_t thread;
/* To allow a task to boot strap itself, it needs a couple of
messengers (one to send and another to receive). Here they
are. */
- addr_t messengers[2];
+ vg_addr_t messengers[2];
struct hurd_object_desc *descs;
int desc_count;
diff --git a/libc-parts/_exit.c b/libc-parts/_exit.c
index 47ff758..bda719f 100644
--- a/libc-parts/_exit.c
+++ b/libc-parts/_exit.c
@@ -40,7 +40,7 @@ _exit (int ret)
/* We try to kill the activity and, if that fails, the main
thread. */
- addr_t objs[] = { __hurd_startup_data->activity,
+ vg_addr_t objs[] = { __hurd_startup_data->activity,
__hurd_startup_data->thread };
int o;
@@ -50,19 +50,19 @@ _exit (int ret)
for (i = 0; i < __hurd_startup_data->desc_count; i ++)
{
struct hurd_object_desc *desc = &__hurd_startup_data->descs[i];
- if (ADDR_EQ (desc->object, objs[o]))
+ if (VG_ADDR_EQ (desc->object, objs[o]))
{
- if (ADDR_IS_VOID (desc->storage))
+ if (VG_ADDR_IS_VOID (desc->storage))
/* We don't own the storage and thus can't deallocate
the object. */
continue;
- addr_t folio = addr_chop (desc->storage, FOLIO_OBJECTS_LOG2);
- int index = addr_extract (desc->storage, FOLIO_OBJECTS_LOG2);
+ vg_addr_t folio = vg_addr_chop (desc->storage, VG_FOLIO_OBJECTS_LOG2);
+ int index = vg_addr_extract (desc->storage, VG_FOLIO_OBJECTS_LOG2);
error_t err;
- err = rm_folio_object_alloc (ADDR_VOID, folio, index,
- cap_void, OBJECT_POLICY_VOID,
+ err = rm_folio_object_alloc (VG_ADDR_VOID, folio, index,
+ vg_cap_void, VG_OBJECT_POLICY_VOID,
(uintptr_t) ret,
NULL, NULL);
if (err)
diff --git a/libc-parts/ia32-cmain.c b/libc-parts/ia32-cmain.c
index 3f3fd2f..10390f6 100644
--- a/libc-parts/ia32-cmain.c
+++ b/libc-parts/ia32-cmain.c
@@ -66,7 +66,7 @@ finish (void)
i < __hurd_startup_data->desc_count;
i ++, desc ++)
{
- if (ADDR_EQ (PTR_TO_PAGE (p), desc->object))
+ if (VG_ADDR_EQ (VG_PTR_TO_PAGE (p), desc->object))
{
storage_free (desc->storage, true);
break;
diff --git a/libc-parts/process-spawn.c b/libc-parts/process-spawn.c
index 843da21..23a44dc 100644
--- a/libc-parts/process-spawn.c
+++ b/libc-parts/process-spawn.c
@@ -56,13 +56,13 @@
#ifdef RM_INTERN
#include "../viengoos/activity.h"
#else
-#define root_activity ADDR_VOID
+#define root_activity VG_ADDR_VOID
#endif
#ifdef RM_INTERN
# define AS_DUMP_ as_dump_from (root_activity, as_root_cap, __func__)
#else
-# define AS_DUMP_ rm_as_dump (ADDR_VOID, as_root)
+# define AS_DUMP_ rm_as_dump (VG_ADDR_VOID, as_root)
#endif
#define AS_DUMP \
do \
@@ -73,14 +73,14 @@
while (0) \
#ifdef RM_INTERN
-# define rt_to_object(rt) cap_to_object (root_activity, &(rt).cap)
+# define rt_to_object(rt) vg_cap_to_object (root_activity, &(rt).cap)
#else
# define rt_to_object(rt) \
- ADDR_TO_PTR (addr_extend ((rt).storage, 0, PAGESIZE_LOG2))
+ VG_ADDR_TO_PTR (vg_addr_extend ((rt).storage, 0, PAGESIZE_LOG2))
#endif
-thread_t
-process_spawn (addr_t activity,
+vg_thread_t
+process_spawn (vg_addr_t activity,
void *start, void *end,
const char *const argv[], const char *const env[],
bool make_runnable)
@@ -95,18 +95,18 @@ process_spawn (addr_t activity,
sroot_, scap_, saddr_, \
alloc_, index_) \
({ \
- debug (5, "Copying " ADDR_FMT " to " ADDR_FMT , \
- ADDR_PRINTF (saddr_), ADDR_PRINTF (taddr_)); \
+ debug (5, "Copying " VG_ADDR_FMT " to " VG_ADDR_FMT , \
+ VG_ADDR_PRINTF (saddr_), VG_ADDR_PRINTF (taddr_)); \
as_insert_full (root_activity, \
- ADDR_VOID, as_root_cap_, taddr_, \
- ADDR_VOID, ADDR_VOID, scap_, alloc_); \
+ VG_ADDR_VOID, as_root_cap_, taddr_, \
+ VG_ADDR_VOID, VG_ADDR_VOID, scap_, alloc_); \
})
#else
struct shadow
{
- addr_t addr;
- struct cap cap;
+ vg_addr_t addr;
+ struct vg_cap cap;
struct shadow *next;
};
struct shadow *shadow_list = NULL;
@@ -114,9 +114,9 @@ process_spawn (addr_t activity,
struct hurd_ihash as;
hurd_ihash_init (&as, true, HURD_IHASH_NO_LOCP);
- struct cap *add_shadow (addr_t addr)
+ struct vg_cap *add_shadow (vg_addr_t addr)
{
- debug (5, ADDR_FMT, ADDR_PRINTF (addr));
+ debug (5, VG_ADDR_FMT, VG_ADDR_PRINTF (addr));
struct shadow *s = calloc (sizeof (struct shadow), 1);
s->next = shadow_list;
@@ -141,37 +141,37 @@ process_spawn (addr_t activity,
bind them to the page table. That is, if there is a page table
at X, and we index it, we don't refer to X but simply extend its
address and return the shadow pte at that address. */
- struct cap *do_index (activity_t activity,
- struct cap *pt, addr_t pt_addr, int idx,
- struct cap *fake_slot)
+ struct vg_cap *do_index (activity_t activity,
+ struct vg_cap *pt, vg_addr_t pt_addr, int idx,
+ struct vg_cap *fake_slot)
{
- assert (pt->type == cap_cappage || pt->type == cap_rcappage
- || pt->type == cap_folio);
+ assert (pt->type == vg_cap_cappage || pt->type == vg_cap_rcappage
+ || pt->type == vg_cap_folio);
- debug (5, "-> " ADDR_FMT "[%d/%d], %s",
- ADDR_PRINTF (pt_addr), idx, CAPPAGE_SLOTS / CAP_SUBPAGES (pt),
- cap_type_string (pt->type));
+ debug (5, "-> " VG_ADDR_FMT "[%d/%d], %s",
+ VG_ADDR_PRINTF (pt_addr), idx, VG_CAPPAGE_SLOTS / VG_CAP_SUBPAGES (pt),
+ vg_cap_type_string (pt->type));
- addr_t pte_addr;
+ vg_addr_t pte_addr;
switch (pt->type)
{
- case cap_cappage:
- case cap_rcappage:
- pte_addr = addr_extend (pt_addr, idx, CAP_SUBPAGE_SIZE_LOG2 (pt));
+ case vg_cap_cappage:
+ case vg_cap_rcappage:
+ pte_addr = vg_addr_extend (pt_addr, idx, VG_CAP_SUBPAGE_SIZE_LOG2 (pt));
break;
- case cap_folio:
- pte_addr = addr_extend (pt_addr, idx, FOLIO_OBJECTS_LOG2);
+ case vg_cap_folio:
+ pte_addr = vg_addr_extend (pt_addr, idx, VG_FOLIO_OBJECTS_LOG2);
break;
default:
panic ("Expected cappage or folio but got a %s",
- cap_type_string (pt->type));
+ vg_cap_type_string (pt->type));
}
struct shadow *s = hurd_ihash_find (&as, pte_addr.raw);
- struct cap *cap;
+ struct vg_cap *cap;
if (s)
{
- assert (ADDR_EQ (s->addr, pte_addr));
+ assert (VG_ADDR_EQ (s->addr, pte_addr));
cap = &s->cap;
}
else
@@ -180,11 +180,11 @@ process_spawn (addr_t activity,
assert (cap);
- debug (5, "<- " ADDR_FMT "[%d], %s",
- ADDR_PRINTF (pte_addr), CAPPAGE_SLOTS / CAP_SUBPAGES (cap),
- cap_type_string (cap->type));
+ debug (5, "<- " VG_ADDR_FMT "[%d], %s",
+ VG_ADDR_PRINTF (pte_addr), VG_CAPPAGE_SLOTS / VG_CAP_SUBPAGES (cap),
+ vg_cap_type_string (cap->type));
- if (pt->type == cap_folio)
+ if (pt->type == vg_cap_folio)
{
*fake_slot = *cap;
return fake_slot;
@@ -284,33 +284,33 @@ process_spawn (addr_t activity,
/* Root of new address space. */
#ifdef RM_INTERN
-# define as_root ADDR_VOID
- struct cap as_root_cap;
+# define as_root VG_ADDR_VOID
+ struct vg_cap as_root_cap;
memset (&as_root_cap, 0, sizeof (as_root_cap));
# define as_root_cap (&as_root_cap)
#else
- addr_t as_root = capalloc ();
- struct cap *as_root_cap = add_shadow (ADDR (0, 0));
+ vg_addr_t as_root = capalloc ();
+ struct vg_cap *as_root_cap = add_shadow (VG_ADDR (0, 0));
/* This is sort of a hack. To copy a capability, we need to invoke
the source object that contains the capability. A capability
slot is not an object. Finding the object corresponding to
AS_ROOT is possible, but iterposing a thread is just easier. */
struct storage thread_root
- = storage_alloc (ADDR_VOID, cap_thread, STORAGE_EPHEMERAL,
- OBJECT_POLICY_DEFAULT, as_root);
+ = storage_alloc (VG_ADDR_VOID, vg_cap_thread, STORAGE_EPHEMERAL,
+ VG_OBJECT_POLICY_DEFAULT, as_root);
#endif
/* Allocation support. */
/* Address of first folio in new task. */
-#define FOLIO_START (1ULL << (FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2))
+#define FOLIO_START (1ULL << (VG_FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2))
bool have_folio = false;
/* Local address. */
- folio_t folio_local_addr;
+ vg_folio_t folio_local_addr;
/* Address in task. */
- addr_t folio_task_addr;
+ vg_addr_t folio_task_addr;
/* Next unallocated object in folio. */
int folio_index;
@@ -318,43 +318,43 @@ process_spawn (addr_t activity,
struct as_region
{
struct as_region *next;
- addr_t addr;
+ vg_addr_t addr;
};
struct as_region *as_regions = NULL;
#endif
- struct as_allocate_pt_ret allocate_object (enum cap_type type, addr_t addr)
+ struct as_allocate_pt_ret allocate_object (enum vg_cap_type type, vg_addr_t addr)
{
debug (5, "(%s, 0x%llx/%d)",
- cap_type_string (type), addr_prefix (addr), addr_depth (addr));
+ vg_cap_type_string (type), vg_addr_prefix (addr), vg_addr_depth (addr));
- assert (type != cap_void);
- assert (type != cap_folio);
+ assert (type != vg_cap_void);
+ assert (type != vg_cap_folio);
- if (! have_folio || folio_index == FOLIO_OBJECTS)
+ if (! have_folio || folio_index == VG_FOLIO_OBJECTS)
/* Allocate additional storage. */
{
- int w = FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2;
+ int w = VG_FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2;
if (! have_folio)
{
- folio_task_addr = ADDR (FOLIO_START, ADDR_BITS - w);
+ folio_task_addr = VG_ADDR (FOLIO_START, VG_ADDR_BITS - w);
have_folio = true;
}
else
/* Move to the next free space. */
- folio_task_addr = ADDR (addr_prefix (folio_task_addr) + (1ULL << w),
- ADDR_BITS - w);
+ folio_task_addr = VG_ADDR (vg_addr_prefix (folio_task_addr) + (1ULL << w),
+ VG_ADDR_BITS - w);
- debug (5, "Allocating folio at " ADDR_FMT,
- ADDR_PRINTF (folio_task_addr));
+ debug (5, "Allocating folio at " VG_ADDR_FMT,
+ VG_ADDR_PRINTF (folio_task_addr));
#ifdef RM_INTERN
- folio_local_addr = folio_alloc (root_activity, FOLIO_POLICY_DEFAULT);
+ folio_local_addr = folio_alloc (root_activity, VG_FOLIO_POLICY_DEFAULT);
if (! folio_local_addr)
panic ("Out of memory");
#else
folio_local_addr = as_alloc (w, 1, true);
- if (ADDR_IS_VOID (folio_local_addr))
+ if (VG_ADDR_IS_VOID (folio_local_addr))
panic ("Failed to allocate address space for folio");
struct as_region *as_region = malloc (sizeof (*as_region));
@@ -368,16 +368,16 @@ process_spawn (addr_t activity,
as_ensure (folio_local_addr);
error_t err = rm_folio_alloc (activity, activity,
- FOLIO_POLICY_DEFAULT,
+ VG_FOLIO_POLICY_DEFAULT,
&folio_local_addr);
if (err)
panic ("Failed to allocate folio");
- assert (! ADDR_IS_VOID (folio_local_addr));
+ assert (! VG_ADDR_IS_VOID (folio_local_addr));
as_slot_lookup_use (folio_local_addr,
({
- slot->type = cap_folio;
- CAP_SET_SUBPAGE (slot, 0, 1);
+ slot->type = vg_cap_folio;
+ VG_CAP_SET_SUBPAGE (slot, 0, 1);
}));
#endif
@@ -390,7 +390,7 @@ process_spawn (addr_t activity,
struct hurd_object_desc *desc = &descs[startup_data->desc_count ++];
desc->object = folio_task_addr;
- desc->type = cap_folio;
+ desc->type = vg_cap_folio;
/* We need to insert the folio into the task's address
space, however, that is not yet possible as we may be
@@ -410,30 +410,30 @@ process_spawn (addr_t activity,
int index = folio_index ++;
- debug (5, "Allocating " ADDR_FMT " (%s)",
- ADDR_PRINTF (addr_extend (folio_task_addr,
- index, FOLIO_OBJECTS_LOG2)),
- cap_type_string (type));
+ debug (5, "Allocating " VG_ADDR_FMT " (%s)",
+ VG_ADDR_PRINTF (vg_addr_extend (folio_task_addr,
+ index, VG_FOLIO_OBJECTS_LOG2)),
+ vg_cap_type_string (type));
#ifdef RM_INTERN
rt.cap = folio_object_alloc (root_activity,
folio_local_addr, index,
- cap_type_strengthen (type),
- OBJECT_POLICY_VOID, 0);
+ vg_cap_type_strengthen (type),
+ VG_OBJECT_POLICY_VOID, 0);
#else
- rm_folio_object_alloc (ADDR_VOID,
+ rm_folio_object_alloc (VG_ADDR_VOID,
folio_local_addr, index,
- cap_type_strengthen (type),
- OBJECT_POLICY_VOID, 0, NULL, NULL);
- rt.cap.type = cap_type_strengthen (type);
- CAP_PROPERTIES_SET (&rt.cap, CAP_PROPERTIES_VOID);
+ vg_cap_type_strengthen (type),
+ VG_OBJECT_POLICY_VOID, 0, NULL, NULL);
+ rt.cap.type = vg_cap_type_strengthen (type);
+ VG_CAP_PROPERTIES_SET (&rt.cap, VG_CAP_PROPERTIES_VOID);
#ifndef NDEBUG
- if (rt.cap.type == cap_page)
+ if (rt.cap.type == vg_cap_page)
{
unsigned int *p
- = ADDR_TO_PTR (addr_extend (addr_extend (folio_local_addr,
- index, FOLIO_OBJECTS_LOG2),
+ = VG_ADDR_TO_PTR (vg_addr_extend (vg_addr_extend (folio_local_addr,
+ index, VG_FOLIO_OBJECTS_LOG2),
0, PAGESIZE_LOG2));
int i;
for (i = 0; i < PAGESIZE / sizeof (int); i ++)
@@ -449,8 +449,8 @@ process_spawn (addr_t activity,
panic ("Initial task too large.");
struct hurd_object_desc *desc = &descs[startup_data->desc_count ++];
- desc->storage = addr_extend (folio_task_addr, index, FOLIO_OBJECTS_LOG2);
- if (ADDR_IS_VOID (addr))
+ desc->storage = vg_addr_extend (folio_task_addr, index, VG_FOLIO_OBJECTS_LOG2);
+ if (VG_ADDR_IS_VOID (addr))
desc->object = desc->storage;
else
desc->object = addr;
@@ -462,17 +462,17 @@ process_spawn (addr_t activity,
rt.storage = desc->storage;
#else
/* We need to reference the storage in our address space. */
- rt.storage = addr_extend (folio_local_addr, index, FOLIO_OBJECTS_LOG2);
+ rt.storage = vg_addr_extend (folio_local_addr, index, VG_FOLIO_OBJECTS_LOG2);
#endif
- debug (5, "cap: " CAP_FMT, CAP_PRINTF (&rt.cap));
+ debug (5, "cap: " VG_CAP_FMT, VG_CAP_PRINTF (&rt.cap));
return rt;
}
- struct as_allocate_pt_ret allocate_page_table (addr_t addr)
+ struct as_allocate_pt_ret allocate_page_table (vg_addr_t addr)
{
- debug (5, ADDR_FMT, ADDR_PRINTF (addr));
- return allocate_object (cap_cappage, addr);
+ debug (5, VG_ADDR_FMT, VG_ADDR_PRINTF (addr));
+ return allocate_object (vg_cap_cappage, addr);
}
struct as_allocate_pt_ret rt;
@@ -492,63 +492,63 @@ process_spawn (addr_t activity,
A way around this problem would be the approach that EROS takes:
start with a hand-created system image. */
- rt = allocate_object (cap_activity_control, ADDR_VOID);
+ rt = allocate_object (vg_cap_activity_control, VG_ADDR_VOID);
startup_data->activity = rt.storage;
- root_activity = (struct activity *) cap_to_object (root_activity, &rt.cap);
+ root_activity = (struct activity *) vg_cap_to_object (root_activity, &rt.cap);
folio_parent (root_activity, folio_local_addr);
/* We know that we are the only one who can access the data
structure, however, the object_claim asserts that this lock is
held. */
object_claim (root_activity, (struct object *) root_activity,
- OBJECT_POLICY_VOID, true);
+ VG_OBJECT_POLICY_VOID, true);
object_claim (root_activity, (struct object *) folio_local_addr,
- OBJECT_POLICY_VOID, true);
+ VG_OBJECT_POLICY_VOID, true);
#else
struct hurd_object_desc *desc;
- struct cap cap;
+ struct vg_cap cap;
memset (&cap, 0, sizeof (cap));
bool r;
/* Stash the activity two pages before the first folio. */
desc = &descs[startup_data->desc_count ++];
- desc->storage = ADDR_VOID;
- desc->object = ADDR (FOLIO_START - 2 * PAGESIZE, ADDR_BITS - PAGESIZE_LOG2);
- desc->type = cap_activity;
+ desc->storage = VG_ADDR_VOID;
+ desc->object = VG_ADDR (FOLIO_START - 2 * PAGESIZE, VG_ADDR_BITS - PAGESIZE_LOG2);
+ desc->type = vg_cap_activity;
startup_data->activity = desc->object;
/* Insert it into the target address space. */
- cap.type = cap_activity;
- struct cap *slot = as_insert_custom (ADDR_VOID,
- as_root, as_root_cap, desc->object,
- ADDR_VOID, cap, activity,
- allocate_page_table, do_index);
+ cap.type = vg_cap_activity;
+ struct vg_cap *slot = as_insert_custom (VG_ADDR_VOID,
+ as_root, as_root_cap, desc->object,
+ VG_ADDR_VOID, cap, activity,
+ allocate_page_table, do_index);
/* Weaken the capability. */
- r = cap_copy_x (root_activity, as_root, slot, desc->object,
- as_root, *slot, desc->object,
- CAP_COPY_WEAKEN, CAP_PROPERTIES_VOID);
+ r = vg_cap_copy_x (root_activity, as_root, slot, desc->object,
+ as_root, *slot, desc->object,
+ VG_CAP_COPY_WEAKEN, VG_CAP_PROPERTIES_VOID);
assert (r);
#endif
/* Allocate the thread. */
- rt = allocate_object (cap_thread, ADDR_VOID);
- assert (descs[startup_data->desc_count - 1].type == cap_thread);
+ rt = allocate_object (vg_cap_thread, VG_ADDR_VOID);
+ assert (descs[startup_data->desc_count - 1].type == vg_cap_thread);
startup_data->thread = descs[startup_data->desc_count - 1].object;
#ifdef RM_INTERN
- struct thread *thread = (struct thread *) cap_to_object (root_activity,
+ struct thread *thread = (struct thread *) vg_cap_to_object (root_activity,
&rt.cap);
#else
- addr_t thread = capalloc ();
- cap.type = cap_thread;
+ vg_addr_t thread = capalloc ();
+ cap.type = vg_cap_thread;
as_slot_lookup_use
(thread,
({
- r = cap_copy (root_activity,
- ADDR_VOID, slot, thread,
- ADDR_VOID, rt.cap, rt.storage);
+ r = vg_cap_copy (root_activity,
+ VG_ADDR_VOID, slot, thread,
+ VG_ADDR_VOID, rt.cap, rt.storage);
assert (r);
}));
#endif
@@ -567,23 +567,23 @@ process_spawn (addr_t activity,
debug (5, "%x (ro:%d)", ptr, ro);
- addr_t addr = addr_chop (PTR_TO_ADDR (ptr), PAGESIZE_LOG2);
+ vg_addr_t addr = vg_addr_chop (VG_PTR_TO_ADDR (ptr), PAGESIZE_LOG2);
- struct as_allocate_pt_ret rt = allocate_object (cap_page, addr);
+ struct as_allocate_pt_ret rt = allocate_object (vg_cap_page, addr);
as_insert_custom (root_activity,
as_root, as_root_cap, addr,
- ADDR_VOID, rt.cap, rt.storage,
+ VG_ADDR_VOID, rt.cap, rt.storage,
allocate_page_table, do_index);
if (ro)
as_slot_lookup_rel_use (root_activity, as_root_cap, addr,
({
- bool r = cap_copy_x (root_activity,
+ bool r = vg_cap_copy_x (root_activity,
as_root, slot, addr,
as_root, *slot, addr,
- CAP_COPY_WEAKEN,
- CAP_PROPERTIES_VOID);
+ VG_CAP_COPY_WEAKEN,
+ VG_CAP_PROPERTIES_VOID);
assert (r);
}));
@@ -613,11 +613,11 @@ process_spawn (addr_t activity,
assert ((ptr & (PAGESIZE - 1)) == 0);
#ifdef RM_INTERN
- addr_t addr = addr_chop (PTR_TO_ADDR (ptr), PAGESIZE_LOG2);
- struct cap cap = as_object_lookup_rel (root_activity,
+ vg_addr_t addr = vg_addr_chop (VG_PTR_TO_ADDR (ptr), PAGESIZE_LOG2);
+ struct vg_cap cap = as_object_lookup_rel (root_activity,
as_root_cap, addr,
- cap_rpage, NULL);
- local = cap_to_object (root_activity, &cap);
+ vg_cap_rpage, NULL);
+ local = vg_cap_to_object (root_activity, &cap);
#else
local = hurd_ihash_find (&map, ptr);
#endif
@@ -638,11 +638,11 @@ process_spawn (addr_t activity,
int i;
for (i = 0; i < 2; i ++)
{
- rt = allocate_object (cap_messenger, ADDR_VOID);
- assert (descs[startup_data->desc_count - 1].type == cap_messenger);
+ rt = allocate_object (vg_cap_messenger, VG_ADDR_VOID);
+ assert (descs[startup_data->desc_count - 1].type == vg_cap_messenger);
startup_data->messengers[i] = descs[startup_data->desc_count - 1].object;
- debug (5, "Messenger %d: " ADDR_FMT,
- i, ADDR_PRINTF (startup_data->messengers[i]));
+ debug (5, "Messenger %d: " VG_ADDR_FMT,
+ i, VG_ADDR_PRINTF (startup_data->messengers[i]));
}
/* We need to 1) insert the folios in the address space, 2) fix up
@@ -661,21 +661,21 @@ process_spawn (addr_t activity,
{
struct hurd_object_desc *desc = &descs[d];
- if (desc->type == cap_folio)
+ if (desc->type == vg_cap_folio)
{
- struct cap cap;
+ struct vg_cap cap;
#ifdef RM_INTERN
cap = object_to_cap ((struct object *) (uintptr_t)
desc->storage.raw);
- assert (cap.type == cap_folio);
+ assert (cap.type == vg_cap_folio);
#else
memset (&cap, 0, sizeof (cap));
- cap.type = cap_folio;
+ cap.type = vg_cap_folio;
#endif
- as_insert_custom (ADDR_VOID,
+ as_insert_custom (VG_ADDR_VOID,
as_root, as_root_cap, desc->object,
- ADDR_VOID, cap, desc->storage,
+ VG_ADDR_VOID, cap, desc->storage,
allocate_page_table, do_index);
desc->storage = desc->object;
@@ -691,15 +691,15 @@ process_spawn (addr_t activity,
+ PAGESIZE - 1) / PAGESIZE;
page ++)
{
- addr_t addr = ADDR (STARTUP_DATA_ADDR + page * PAGESIZE,
- ADDR_BITS - PAGESIZE_LOG2);
+ vg_addr_t addr = VG_ADDR (STARTUP_DATA_ADDR + page * PAGESIZE,
+ VG_ADDR_BITS - PAGESIZE_LOG2);
- struct as_allocate_pt_ret rt = allocate_object (cap_page, addr);
+ struct as_allocate_pt_ret rt = allocate_object (vg_cap_page, addr);
pages[page] = rt_to_object (rt);
- as_insert_custom (ADDR_VOID, as_root, as_root_cap, addr,
- ADDR_VOID, rt.cap, rt.storage,
+ as_insert_custom (VG_ADDR_VOID, as_root, as_root_cap, addr,
+ VG_ADDR_VOID, rt.cap, rt.storage,
allocate_page_table, do_index);
}
}
@@ -729,10 +729,10 @@ process_spawn (addr_t activity,
{
debug (0, "%d descriptors", startup_data->desc_count);
for (i = 0; i < startup_data->desc_count; i ++)
- debug (0, ADDR_FMT " (" ADDR_FMT "): %s",
- ADDR_PRINTF (descs[i].object),
- ADDR_PRINTF (descs[i].storage),
- cap_type_string (descs[i].type));
+ debug (0, VG_ADDR_FMT " (" VG_ADDR_FMT "): %s",
+ VG_ADDR_PRINTF (descs[i].object),
+ VG_ADDR_PRINTF (descs[i].storage),
+ vg_cap_type_string (descs[i].type));
}
/* Free the staging area. */
@@ -753,8 +753,8 @@ process_spawn (addr_t activity,
HURD_EXREGS_SET_SP_IP
| (make_runnable ? HURD_EXREGS_START : 0)
| HURD_EXREGS_ABORT_IPC,
- CAP_VOID, 0, CAP_PROPERTIES_VOID,
- CAP_VOID, CAP_VOID, CAP_VOID,
+ VG_CAP_VOID, 0, VG_CAP_PROPERTIES_VOID,
+ VG_CAP_VOID, VG_CAP_VOID, VG_CAP_VOID,
&sp, &ip, NULL, NULL);
#else
/* Start thread. */
@@ -762,21 +762,21 @@ process_spawn (addr_t activity,
/* Per the API (cf. <hurd/startup.h>). */
in.sp = STARTUP_DATA_ADDR;
in.ip = ip;
- in.aspace_cap_properties = CAP_PROPERTIES_VOID;
- in.aspace_cap_properties_flags = CAP_COPY_COPY_SOURCE_GUARD;
+ in.aspace_cap_properties = VG_CAP_PROPERTIES_VOID;
+ in.aspace_cap_properties_flags = VG_CAP_COPY_COPY_SOURCE_GUARD;
error_t err;
struct hurd_thread_exregs_out out;
/* XXX: Use a weakened activity. */
- err = rm_thread_exregs (ADDR_VOID, thread,
+ err = rm_thread_exregs (VG_ADDR_VOID, thread,
HURD_EXREGS_SET_SP_IP
| HURD_EXREGS_SET_ASPACE
| HURD_EXREGS_SET_ACTIVITY
| (make_runnable ? HURD_EXREGS_START : 0)
| HURD_EXREGS_ABORT_IPC,
- in, addr_extend (as_root, THREAD_ASPACE_SLOT,
- THREAD_SLOTS_LOG2),
- activity, ADDR_VOID, ADDR_VOID,
+ in, vg_addr_extend (as_root, VG_THREAD_ASPACE_SLOT,
+ VG_THREAD_SLOTS_LOG2),
+ activity, VG_ADDR_VOID, VG_ADDR_VOID,
&out, NULL, NULL, NULL, NULL);
#endif
if (err)
diff --git a/libc-parts/process-spawn.h b/libc-parts/process-spawn.h
index b11d5ae..8d68f1f 100644
--- a/libc-parts/process-spawn.h
+++ b/libc-parts/process-spawn.h
@@ -25,8 +25,8 @@
slot is allocated with capalloc. If MAKE_RUNNABLE is true, makes
the process' thread runnable. Otherwise, the thread remains
suspended and may be started with thread_start. */
-extern thread_t process_spawn (addr_t activity,
- void *start, void *end,
- const char *const argv[],
- const char *const env[],
- bool make_runnable);
+extern vg_thread_t process_spawn (vg_addr_t activity,
+ void *start, void *end,
+ const char *const argv[],
+ const char *const env[],
+ bool make_runnable);
diff --git a/libc-parts/s_printf.c b/libc-parts/s_printf.c
index fa417fb..f7e55f9 100644
--- a/libc-parts/s_printf.c
+++ b/libc-parts/s_printf.c
@@ -40,7 +40,7 @@ io_buffer_flush (struct io_buffer *buffer)
if (buffer->len == 0)
return;
- // rm_write_send_nonblocking (ADDR_VOID, ADDR_VOID, *buffer, ADDR_VOID);
+ // rm_write_send_nonblocking (VG_ADDR_VOID, VG_ADDR_VOID, *buffer, VG_ADDR_VOID);
l4_msg_tag_t tag = l4_niltag;
l4_msg_tag_set_label (&tag, 2132);
diff --git a/libhurd-mm/anonymous.c b/libhurd-mm/anonymous.c
index c6f0bcb..8bb91d3 100644
--- a/libhurd-mm/anonymous.c
+++ b/libhurd-mm/anonymous.c
@@ -46,7 +46,7 @@ struct storage_desc
/* Offset from start of pager. */
uintptr_t offset;
/* The allocated storage. */
- addr_t storage;
+ vg_addr_t storage;
};
static int
@@ -83,12 +83,12 @@ slab_alloc (void *hook, size_t size, void **ptr)
{
assert (size == PAGESIZE);
- struct storage storage = storage_alloc (meta_data_activity, cap_page,
+ struct storage storage = storage_alloc (meta_data_activity, vg_cap_page,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Out of space.");
- *ptr = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ *ptr = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr, 0, PAGESIZE_LOG2));
return 0;
}
@@ -98,7 +98,7 @@ slab_dealloc (void *hook, void *buffer, size_t size)
{
assert (size == PAGESIZE);
- addr_t addr = addr_chop (PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
+ vg_addr_t addr = vg_addr_chop (VG_PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
storage_free (addr, false);
return 0;
@@ -133,7 +133,7 @@ static struct hurd_slab_space anonymous_pager_slab
static bool
fault (struct pager *pager, uintptr_t offset, int count, bool read_only,
- uintptr_t fault_addr, uintptr_t ip, struct activation_fault_info info)
+ uintptr_t fault_addr, uintptr_t ip, struct vg_activation_fault_info info)
{
struct anonymous_pager *anon = (struct anonymous_pager *) pager;
assert (anon->magic == ANONYMOUS_MAGIC);
@@ -141,8 +141,8 @@ fault (struct pager *pager, uintptr_t offset, int count, bool read_only,
debug (5, "%p: fault at %p, spans %d pg (%d kb); "
"pager: %p-%p (%d pages; %d kb), offset: %x",
anon, (void *) fault_addr, count, count * PAGESIZE / 1024,
- (void *) (uintptr_t) addr_prefix (anon->map_area),
- (void *) (uintptr_t) addr_prefix (anon->map_area) + anon->pager.length,
+ (void *) (uintptr_t) vg_addr_prefix (anon->map_area),
+ (void *) (uintptr_t) vg_addr_prefix (anon->map_area) + anon->pager.length,
anon->pager.length / PAGESIZE, anon->pager.length / 1024,
offset);
@@ -216,10 +216,10 @@ fault (struct pager *pager, uintptr_t offset, int count, bool read_only,
"%x + %d pages <= %x",
offset, count, pager->length);
- debug (5, "Faulting %p - %p (%d pages; %d kb); pager at " ADDR_FMT "+%d",
+ debug (5, "Faulting %p - %p (%d pages; %d kb); pager at " VG_ADDR_FMT "+%d",
(void *) fault_addr, (void *) fault_addr + count * PAGE_SIZE,
count, count * PAGESIZE / 1024,
- ADDR_PRINTF (anon->map_area), offset);
+ VG_ADDR_PRINTF (anon->map_area), offset);
}
pages = __builtin_alloca (sizeof (void *) * count);
@@ -253,13 +253,13 @@ fault (struct pager *pager, uintptr_t offset, int count, bool read_only,
storage address as object_discarded_clear also
returns a mapping and we are likely to access the
data at the fault address. */
- err = rm_object_discarded_clear (ADDR_VOID, ADDR_VOID,
+ err = rm_object_discarded_clear (VG_ADDR_VOID, VG_ADDR_VOID,
storage_desc->storage);
assertx (err == 0, "%d", err);
- debug (5, "Clearing discarded bit for %p / " ADDR_FMT,
+ debug (5, "Clearing discarded bit for %p / " VG_ADDR_FMT,
(void *) fault_addr + i * PAGESIZE,
- ADDR_PRINTF (storage_desc->storage));
+ VG_ADDR_PRINTF (storage_desc->storage));
}
else if (! storage_desc)
/* Seems we have not yet allocated a page. */
@@ -271,9 +271,9 @@ fault (struct pager *pager, uintptr_t offset, int count, bool read_only,
struct storage storage
= storage_alloc (anon->activity,
- cap_page, STORAGE_UNKNOWN, anon->policy,
- ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ vg_cap_page, STORAGE_UNKNOWN, anon->policy,
+ VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Out of memory.");
storage_desc->storage = storage.addr;
@@ -286,32 +286,32 @@ fault (struct pager *pager, uintptr_t offset, int count, bool read_only,
"Fault address: %p, offset: %x",
(void *) fault_addr + i * PAGESIZE, o);
- debug (5, "Allocating storage for %p at " ADDR_FMT,
+ debug (5, "Allocating storage for %p at " VG_ADDR_FMT,
(void *) fault_addr + i * PAGESIZE,
- ADDR_PRINTF (storage_desc->storage));
+ VG_ADDR_PRINTF (storage_desc->storage));
profile_region ("install");
/* We generate a fake shadow cap for the storage as we know
its contents (It is a page that is in a folio with the
policy ANON->POLICY.) */
- struct cap page;
+ struct vg_cap page;
memset (&page, 0, sizeof (page));
- page.type = cap_page;
- CAP_POLICY_SET (&page, anon->policy);
+ page.type = vg_cap_page;
+ VG_CAP_POLICY_SET (&page, anon->policy);
- addr_t addr = addr_chop (PTR_TO_ADDR (fault_addr + i * PAGESIZE),
+ vg_addr_t addr = vg_addr_chop (VG_PTR_TO_ADDR (fault_addr + i * PAGESIZE),
PAGESIZE_LOG2);
as_ensure_use
(addr,
({
bool ret;
- ret = cap_copy_x (anon->activity,
- ADDR_VOID, slot, addr,
- ADDR_VOID, page, storage_desc->storage,
- read_only ? CAP_COPY_WEAKEN : 0,
- CAP_PROPERTIES_VOID);
+ ret = vg_cap_copy_x (anon->activity,
+ VG_ADDR_VOID, slot, addr,
+ VG_ADDR_VOID, page, storage_desc->storage,
+ read_only ? VG_CAP_COPY_WEAKEN : 0,
+ VG_CAP_PROPERTIES_VOID);
assert (ret);
}));
@@ -319,7 +319,7 @@ fault (struct pager *pager, uintptr_t offset, int count, bool read_only,
}
if (! recursive || ! (anon->flags & ANONYMOUS_NO_RECURSIVE))
- pages[i] = ADDR_TO_PTR (addr_extend (storage_desc->storage,
+ pages[i] = VG_ADDR_TO_PTR (vg_addr_extend (storage_desc->storage,
0, PAGESIZE_LOG2));
}
@@ -327,7 +327,7 @@ fault (struct pager *pager, uintptr_t offset, int count, bool read_only,
int faulted;
for (i = 0; i < count; i += faulted)
{
- error_t err = rm_fault (ADDR_VOID, fault_addr + i * PAGESIZE,
+ error_t err = rm_fault (VG_ADDR_VOID, fault_addr + i * PAGESIZE,
count - i, &faulted);
if (err || faulted == 0)
break;
@@ -471,7 +471,7 @@ mdestroy (struct map *map)
debug (5, "Freed %d pages", count);
/* Free the map area. Should we also free the staging area? */
- as_free (PTR_TO_ADDR (map->region.start), map->region.length);
+ as_free (VG_PTR_TO_ADDR (map->region.start), map->region.length);
}
static void
@@ -490,7 +490,7 @@ destroy (struct pager *pager)
/* Free the staging area. */
{
assert ((anon->flags & ANONYMOUS_STAGING_AREA));
- as_free (addr_chop (PTR_TO_ADDR (anon->staging_area), PAGESIZE_LOG2),
+ as_free (vg_addr_chop (VG_PTR_TO_ADDR (anon->staging_area), PAGESIZE_LOG2),
anon->pager.length / PAGESIZE);
}
else
@@ -556,14 +556,14 @@ advise (struct pager *pager,
case pager_advice_normal:
{
- struct activation_fault_info info;
+ struct vg_activation_fault_info info;
info.discarded = anon->policy.discardable;
- info.type = cap_page;
+ info.type = vg_cap_page;
/* XXX: What should we set info.access to? */
info.access = MAP_ACCESS_ALL;
bool r = fault (pager, start, length / PAGESIZE, false,
- addr_prefix (anon->map_area) + start, 0, info);
+ vg_addr_prefix (anon->map_area) + start, 0, info);
if (! r)
debug (5, "Did not resolve fault for anonymous pager");
@@ -577,7 +577,7 @@ advise (struct pager *pager,
}
struct anonymous_pager *
-anonymous_pager_alloc (addr_t activity,
+anonymous_pager_alloc (vg_addr_t activity,
void *hint, uintptr_t length, enum map_access access,
struct object_policy policy,
uintptr_t flags, anonymous_pager_fill_t fill,
@@ -640,7 +640,7 @@ anonymous_pager_alloc (addr_t activity,
may not cover all of the requested region if the starting
address is not aligned on a 1 << WIDTH boundary. Consider
a requested address of 12k and a size of 8k. In this case,
- WIDTH is 13 and addr_chop (hint, WIDTH) => 8k thus yielding
+ WIDTH is 13 and vg_addr_chop (hint, WIDTH) => 8k thus yielding
the region 8-16k, yet, the requested region is 12k-20k! In
such cases, we just need to double the width to cover the
whole region. */
@@ -659,7 +659,7 @@ anonymous_pager_alloc (addr_t activity,
{
/* NB: this may round HINT down if we need a power-of-2 staging
area! */
- anon->map_area = addr_chop (PTR_TO_ADDR (hint), width);
+ anon->map_area = vg_addr_chop (VG_PTR_TO_ADDR (hint), width);
bool r = as_alloc_at (anon->map_area, count);
if (! r)
@@ -667,10 +667,10 @@ anonymous_pager_alloc (addr_t activity,
{
if ((flags & ANONYMOUS_FIXED))
{
- debug (0, "(%p, %x (%p)): Specified range " ADDR_FMT "+%d "
+ debug (0, "(%p, %x (%p)): Specified range " VG_ADDR_FMT "+%d "
"in use and ANONYMOUS_FIXED specified",
hint, length, hint + length - 1,
- ADDR_PRINTF (anon->map_area), count);
+ VG_ADDR_PRINTF (anon->map_area), count);
goto error_with_buffer;
}
}
@@ -683,14 +683,14 @@ anonymous_pager_alloc (addr_t activity,
if (! alloced)
{
anon->map_area = as_alloc (width, count, true);
- if (ADDR_IS_VOID (anon->map_area))
+ if (VG_ADDR_IS_VOID (anon->map_area))
{
debug (0, "(%p, %x (%p)): No VA available",
hint, length, hint + length - 1);
goto error_with_buffer;
}
- *addr_out = ADDR_TO_PTR (addr_extend (anon->map_area, 0, width));
+ *addr_out = VG_ADDR_TO_PTR (vg_addr_extend (anon->map_area, 0, width));
}
anon->map_area_count = count;
@@ -699,11 +699,11 @@ anonymous_pager_alloc (addr_t activity,
if ((flags & ANONYMOUS_STAGING_AREA))
/* We need a staging area. */
{
- addr_t staging_area = as_alloc (PAGESIZE_LOG2, length / PAGESIZE, true);
- if (ADDR_IS_VOID (staging_area))
+ vg_addr_t staging_area = as_alloc (PAGESIZE_LOG2, length / PAGESIZE, true);
+ if (VG_ADDR_IS_VOID (staging_area))
goto error_with_map_area;
- anon->staging_area = ADDR_TO_PTR (addr_extend (staging_area,
+ anon->staging_area = VG_ADDR_TO_PTR (vg_addr_extend (staging_area,
0, PAGESIZE_LOG2));
}
diff --git a/libhurd-mm/anonymous.h b/libhurd-mm/anonymous.h
index fe5491e..aac7f7b 100644
--- a/libhurd-mm/anonymous.h
+++ b/libhurd-mm/anonymous.h
@@ -80,7 +80,7 @@ enum
typedef bool (*anonymous_pager_fill_t) (struct anonymous_pager *anon,
uintptr_t offset, uintptr_t count,
void *pages[],
- struct activation_fault_info info);
+ struct vg_activation_fault_info info);
#define ANONYMOUS_MAGIC 0xa707a707
@@ -103,7 +103,7 @@ struct anonymous_pager
/* The user's window onto the pager. */
- addr_t map_area;
+ vg_addr_t map_area;
int map_area_count;
ss_mutex_t lock;
@@ -115,7 +115,7 @@ struct anonymous_pager
/* Activity against which storage should be allocated. */
- addr_t activity;
+ vg_addr_t activity;
/* The policy to use when allocating memory. */
struct object_policy policy;
@@ -137,7 +137,7 @@ struct anonymous_pager
ADDR_HINT indicates the preferred starting address. Unless
ANONYMOUS_FIXED is included in FLAGS, the implementation may choose
another address. (The region will be allocated using as_alloc.)
- Both ADDR and LENGTH must be a multiple of the base page size. If
+ Both ADDR_HINT and LENGTH must be a multiple of the base page size. If
the specified region overlaps with an existing pager, EEXIST is
returned. The chosen start address is returned in *ADDR_OUT.
@@ -164,7 +164,7 @@ struct anonymous_pager
up. When the fill function is invoked, access to the main region
is disabled; any access is blocked until the fill function
returns. */
-extern struct anonymous_pager *anonymous_pager_alloc (addr_t activity,
+extern struct anonymous_pager *anonymous_pager_alloc (vg_addr_t activity,
void *addr_hint,
uintptr_t length,
enum map_access access,
diff --git a/libhurd-mm/as-build-custom.c b/libhurd-mm/as-build-custom.c
index 5c200ce..0acc415 100644
--- a/libhurd-mm/as-build-custom.c
+++ b/libhurd-mm/as-build-custom.c
@@ -28,9 +28,9 @@
#include "as-build.c"
-struct cap *
+struct vg_cap *
as_ensure_full_custom (activity_t activity,
- addr_t as_root_addr, struct cap *root, addr_t addr,
+ vg_addr_t as_root_addr, struct vg_cap *root, vg_addr_t addr,
as_allocate_page_table_t as_allocate_page_table,
as_object_index_t object_index)
{
@@ -39,18 +39,18 @@ as_ensure_full_custom (activity_t activity,
true);
}
-struct cap *
+struct vg_cap *
as_insert_custom (activity_t activity,
- addr_t as_root_addr, struct cap *root, addr_t addr,
- addr_t entry_as, struct cap entry, addr_t entry_addr,
+ vg_addr_t as_root_addr, struct vg_cap *root, vg_addr_t addr,
+ vg_addr_t entry_as, struct vg_cap entry, vg_addr_t entry_addr,
as_allocate_page_table_t as_allocate_page_table,
as_object_index_t object_index)
{
- struct cap *slot = as_build_custom (activity, as_root_addr, root, addr,
+ struct vg_cap *slot = as_build_custom (activity, as_root_addr, root, addr,
as_allocate_page_table,
object_index, false);
assert (slot);
- cap_copy (activity, as_root_addr, slot, addr, entry_as, entry, entry_addr);
+ vg_cap_copy (activity, as_root_addr, slot, addr, entry_as, entry, entry_addr);
return slot;
}
diff --git a/libhurd-mm/as-build.c b/libhurd-mm/as-build.c
index b2266cb..25ba5b9 100644
--- a/libhurd-mm/as-build.c
+++ b/libhurd-mm/as-build.c
@@ -85,7 +85,7 @@ struct trace_buffer as_trace = TRACE_BUFFER_INIT ("as_trace", 0,
#ifdef RM_INTERN
# define AS_DUMP as_dump_from (activity, as_root, __func__)
#else
-# define AS_DUMP rm_as_dump (ADDR_VOID, as_root_addr)
+# define AS_DUMP rm_as_dump (VG_ADDR_VOID, as_root_addr)
#endif
/* The following macros allow providing specialized address-space
@@ -120,49 +120,49 @@ struct trace_buffer as_trace = TRACE_BUFFER_INIT ("as_trace", 0,
location of the idx'th capability slot. If the capability is
implicit (in the case of a folio), return a fabricated capability
in *FAKE_SLOT and return FAKE_SLOT. Return NULL on failure. */
-static inline struct cap *
-do_index (activity_t activity, struct cap *pte, addr_t pt_addr, int idx,
- struct cap *fake_slot)
+static inline struct vg_cap *
+do_index (activity_t activity, struct vg_cap *pte, vg_addr_t pt_addr, int idx,
+ struct vg_cap *fake_slot)
{
- assert (pte->type == cap_cappage || pte->type == cap_rcappage
- || pte->type == cap_folio
- || pte->type == cap_thread
- || pte->type == cap_messenger || pte->type == cap_rmessenger);
+ assert (pte->type == vg_cap_cappage || pte->type == vg_cap_rcappage
+ || pte->type == vg_cap_folio
+ || pte->type == vg_cap_thread
+ || pte->type == vg_cap_messenger || pte->type == vg_cap_rmessenger);
/* Load the referenced object. */
- struct object *pt = cap_to_object (activity, pte);
+ struct object *pt = vg_cap_to_object (activity, pte);
if (! pt)
/* PTE's type was not void but its designation was invalid. This
can only happen if we inserted an object and subsequently
destroyed it. */
{
- /* The type should now have been set to cap_void. */
- assert (pte->type == cap_void);
- PANIC ("No object at " ADDR_FMT, ADDR_PRINTF (pt_addr));
+ /* The type should now have been set to vg_cap_void. */
+ assert (pte->type == vg_cap_void);
+ PANIC ("No object at " VG_ADDR_FMT, VG_ADDR_PRINTF (pt_addr));
}
switch (pte->type)
{
- case cap_cappage:
- case cap_rcappage:
- return &pt->caps[CAP_SUBPAGE_OFFSET (pte) + idx];
+ case vg_cap_cappage:
+ case vg_cap_rcappage:
+ return &pt->caps[VG_CAP_SUBPAGE_OFFSET (pte) + idx];
- case cap_folio:;
+ case vg_cap_folio:;
struct folio *folio = (struct folio *) pt;
- if (folio_object_type (folio, idx) == cap_void)
- PANIC ("Can't use void object at " ADDR_FMT " for address translation",
- ADDR_PRINTF (pt_addr));
+ if (vg_folio_object_type (folio, idx) == vg_cap_void)
+ PANIC ("Can't use void object at " VG_ADDR_FMT " for address translation",
+ VG_ADDR_PRINTF (pt_addr));
- *fake_slot = folio_object_cap (folio, idx);
+ *fake_slot = vg_folio_object_cap (folio, idx);
return fake_slot;
- case cap_thread:
- assert (idx < THREAD_SLOTS);
+ case vg_cap_thread:
+ assert (idx < VG_THREAD_SLOTS);
return &pt->caps[idx];
- case cap_messenger:
+ case vg_cap_messenger:
/* Note: rmessengers don't expose their capability slots. */
assert (idx < VG_MESSENGER_SLOTS);
return &pt->caps[idx];
@@ -186,53 +186,53 @@ do_index (activity_t activity, struct cap *pte, addr_t pt_addr, int idx,
If MAY_OVERWRITE is true, the function may overwrite an existing
capability. Otherwise, only capability slots containing a void
capability are used. */
-struct cap *
+struct vg_cap *
ID (as_build) (activity_t activity,
- addr_t as_root_addr, struct cap *as_root, addr_t addr,
+ vg_addr_t as_root_addr, struct vg_cap *as_root, vg_addr_t addr,
as_allocate_page_table_t allocate_page_table
OBJECT_INDEX_PARAM,
bool may_overwrite)
{
- struct cap *pte = as_root;
+ struct vg_cap *pte = as_root;
- DEBUG (5, DEBUG_BOLD ("Ensuring slot at " ADDR_FMT) " may overwrite: %d",
- ADDR_PRINTF (addr), may_overwrite);
- assert (! ADDR_IS_VOID (addr));
+ DEBUG (5, DEBUG_BOLD ("Ensuring slot at " VG_ADDR_FMT) " may overwrite: %d",
+ VG_ADDR_PRINTF (addr), may_overwrite);
+ assert (! VG_ADDR_IS_VOID (addr));
/* The number of bits to translate. */
- int remaining = addr_depth (addr);
+ int remaining = vg_addr_depth (addr);
/* The REMAINING bits to translates are in the REMAINING most significant
bits of PREFIX. Here it is more convenient to have them in the
lower bits. */
- uint64_t prefix = addr_prefix (addr) >> (ADDR_BITS - remaining);
+ uint64_t prefix = vg_addr_prefix (addr) >> (VG_ADDR_BITS - remaining);
/* Folios are not made up of capability slots and cannot be written
to. When traversing a folio, we manufacture a capability to used
object in FAKE_SLOT. If ADDR ends up designating such a
capability, we fail. */
- struct cap fake_slot;
+ struct vg_cap fake_slot;
do
{
- addr_t pte_addr = addr_chop (addr, remaining);
+ vg_addr_t pte_addr = vg_addr_chop (addr, remaining);
- DEBUG (5, "Cap at " ADDR_FMT ": " CAP_FMT " -> " ADDR_FMT " (%p); "
+ DEBUG (5, "Cap at " VG_ADDR_FMT ": " VG_CAP_FMT " -> " VG_ADDR_FMT " (%p); "
"remaining: %d",
- ADDR_PRINTF (pte_addr),
- CAP_PRINTF (pte),
- ADDR_PRINTF (addr_chop (addr,
- remaining - CAP_GUARD_BITS (pte))),
+ VG_ADDR_PRINTF (pte_addr),
+ VG_CAP_PRINTF (pte),
+ VG_ADDR_PRINTF (vg_addr_chop (addr,
+ remaining - VG_CAP_GUARD_BITS (pte))),
#ifdef RM_INTERN
NULL,
#else
- cap_get_shadow (pte),
+ vg_cap_get_shadow (pte),
#endif
remaining);
AS_CHECK_SHADOW (as_root_addr, pte_addr, pte, {});
- uint64_t pte_guard = CAP_GUARD (pte);
- int pte_gbits = CAP_GUARD_BITS (pte);
+ uint64_t pte_guard = VG_CAP_GUARD (pte);
+ int pte_gbits = VG_CAP_GUARD_BITS (pte);
uint64_t addr_guard;
if (remaining >= pte_gbits)
@@ -254,14 +254,14 @@ ID (as_build) (activity_t activity,
the other context may only use a slot if it owns the
area. */
break;
- else if ((pte->type == cap_cappage || pte->type == cap_rcappage
- || pte->type == cap_folio
- || pte->type == cap_thread
- || pte->type == cap_messenger)
+ else if ((pte->type == vg_cap_cappage || pte->type == vg_cap_rcappage
+ || pte->type == vg_cap_folio
+ || pte->type == vg_cap_thread
+ || pte->type == vg_cap_messenger)
&& remaining >= pte_gbits
&& pte_guard == addr_guard)
/* PTE's (possibly zero-width) guard matches and the
- designated object translates ADDR. We index the object
+ designated object translates VG_ADDR. We index the object
below. */
{
remaining -= pte_gbits;
@@ -352,18 +352,18 @@ ID (as_build) (activity_t activity,
length of the pte in the new cappage. */
int gbits;
- bool need_pivot = ! (pte->type == cap_void && pte_gbits == 0);
+ bool need_pivot = ! (pte->type == vg_cap_void && pte_gbits == 0);
if (! need_pivot)
/* The slot is available. */
{
int space = vg_msb64 (extract_bits64 (prefix, 0, remaining));
- if (space <= CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS)
+ if (space <= VG_CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS)
/* The remaining bits to translate fit in the
guard, we are done. */
break;
/* The guard value requires more than
- CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS bits. We need to
+ VG_CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS bits. We need to
insert a page table. */
gbits = tilobject = remaining;
}
@@ -386,23 +386,23 @@ ID (as_build) (activity_t activity,
area. */
int firstset = vg_msb64 (extract_bits64_inv (prefix,
remaining - 1, gbits));
- if (firstset > CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS)
+ if (firstset > VG_CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS)
/* FIRSTSET is the first (most significant) non-zero guard
bit. GBITS - FIRSTSET are the number of zero bits
before the most significant non-zero bit. We can
include all of the initial zero bits plus up to the
- next CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS bits. */
- gbits -= firstset - CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS;
+ next VG_CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS bits. */
+ gbits -= firstset - VG_CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS;
/* We want to choose the guard length such that the cappage
that we insert occurs at certain positions so as minimize
small partial cappages and painful rearrangements of the
tree. In particular, we want the total remaining bits to
translate after accounting the guard to be equal to
- FOLIO_OBJECTS_LOG2 + i * CAPPAGE_SLOTS_LOG2 where i >= 0.
+ VG_FOLIO_OBJECTS_LOG2 + i * VG_CAPPAGE_SLOTS_LOG2 where i >= 0.
As GBITS is maximal, we may have to remove guard bits to
achieve this. */
- int untranslated_bits = remaining + ADDR_BITS - addr_depth (addr);
+ int untranslated_bits = remaining + VG_ADDR_BITS - vg_addr_depth (addr);
if (! (untranslated_bits > 0 && tilobject > 0 && gbits >= 0
&& untranslated_bits >= tilobject
@@ -422,25 +422,25 @@ ID (as_build) (activity_t activity,
remaining -= gbits;
int pt_width = gc.cappage_width;
- if (! (pt_width > 0 && pt_width <= CAPPAGE_SLOTS_LOG2))
+ if (! (pt_width > 0 && pt_width <= VG_CAPPAGE_SLOTS_LOG2))
PANIC ("pt_width: %d", pt_width);
/* Allocate a new page table. */
/* XXX: If we use a subpage, we just ignore the rest of the
page. This is a bit of a waste but makes the code
simpler. */
- addr_t pt_addr = addr_chop (addr, remaining);
+ vg_addr_t pt_addr = vg_addr_chop (addr, remaining);
struct as_allocate_pt_ret rt = allocate_page_table (pt_addr);
- if (rt.cap.type == cap_void)
+ if (rt.cap.type == vg_cap_void)
/* No memory. */
return NULL;
- struct cap pt_cap = rt.cap;
- addr_t pt_phys_addr = rt.storage;
+ struct vg_cap pt_cap = rt.cap;
+ vg_addr_t pt_phys_addr = rt.storage;
/* do_index requires that the subpage specification be
correct. */
- CAP_SET_SUBPAGE (&pt_cap,
- 0, 1 << (CAPPAGE_SLOTS_LOG2 - pt_width));
+ VG_CAP_SET_SUBPAGE (&pt_cap,
+ 0, 1 << (VG_CAPPAGE_SLOTS_LOG2 - pt_width));
@@ -471,50 +471,50 @@ ID (as_build) (activity_t activity,
int pivot_idx = extract_bits_inv (pte_guard,
pte_gbits - gbits - 1,
pt_width);
- addr_t pivot_addr = addr_extend (pt_addr,
+ vg_addr_t pivot_addr = vg_addr_extend (pt_addr,
pivot_idx, pt_width);
- addr_t pivot_phys_addr = addr_extend (pt_phys_addr,
+ vg_addr_t pivot_phys_addr = vg_addr_extend (pt_phys_addr,
pivot_idx,
- CAPPAGE_SLOTS_LOG2);
+ VG_CAPPAGE_SLOTS_LOG2);
int pivot_gbits = pte_gbits - gbits - pt_width;
int pivot_guard = extract_bits64 (pte_guard, 0, pivot_gbits);
- if (! ADDR_EQ (addr_extend (pivot_addr, pivot_guard, pivot_gbits),
- addr_extend (pte_addr, pte_guard, pte_gbits)))
+ if (! VG_ADDR_EQ (vg_addr_extend (pivot_addr, pivot_guard, pivot_gbits),
+ vg_addr_extend (pte_addr, pte_guard, pte_gbits)))
{
- PANIC ("old pte target: " ADDR_FMT " != pivot target: " ADDR_FMT,
- ADDR_PRINTF (addr_extend (pte_addr,
+ PANIC ("old pte target: " VG_ADDR_FMT " != pivot target: " VG_ADDR_FMT,
+ VG_ADDR_PRINTF (vg_addr_extend (pte_addr,
pte_guard, pte_gbits)),
- ADDR_PRINTF (addr_extend (pivot_addr,
+ VG_ADDR_PRINTF (vg_addr_extend (pivot_addr,
pivot_guard, pivot_gbits)));
}
- DEBUG (5, ADDR_FMT ": indirecting pte at " ADDR_FMT
- " -> " ADDR_FMT " " CAP_FMT " with page table/%d at "
- ADDR_FMT "(%p) " "common guard: %d, remaining: %d; "
+ DEBUG (5, VG_ADDR_FMT ": indirecting pte at " VG_ADDR_FMT
+ " -> " VG_ADDR_FMT " " VG_CAP_FMT " with page table/%d at "
+ VG_ADDR_FMT "(%p) " "common guard: %d, remaining: %d; "
"old target (need pivot: %d) now via pt[%d] "
- "(" ADDR_FMT "-> " DEBUG_BOLD (ADDR_FMT) ")",
- ADDR_PRINTF (addr),
- ADDR_PRINTF (pte_addr),
- ADDR_PRINTF (addr_extend (pte_addr, CAP_GUARD (pte),
- CAP_GUARD_BITS (pte))),
- CAP_PRINTF (pte),
- pt_width, ADDR_PRINTF (pt_addr),
+ "(" VG_ADDR_FMT "-> " DEBUG_BOLD (VG_ADDR_FMT) ")",
+ VG_ADDR_PRINTF (addr),
+ VG_ADDR_PRINTF (pte_addr),
+ VG_ADDR_PRINTF (vg_addr_extend (pte_addr, VG_CAP_GUARD (pte),
+ VG_CAP_GUARD_BITS (pte))),
+ VG_CAP_PRINTF (pte),
+ pt_width, VG_ADDR_PRINTF (pt_addr),
#ifdef RM_INTERN
NULL,
#else
- cap_get_shadow (&pt_cap),
+ vg_cap_get_shadow (&pt_cap),
#endif
gbits, remaining,
- need_pivot, pivot_idx, ADDR_PRINTF (pivot_addr),
- ADDR_PRINTF (addr_extend (pivot_addr,
+ need_pivot, pivot_idx, VG_ADDR_PRINTF (pivot_addr),
+ VG_ADDR_PRINTF (vg_addr_extend (pivot_addr,
pivot_guard, pivot_gbits)));
/* 1.) Copy the PTE into the new page table. Adjust the
guard in the process. This is only necessary if PTE
actually designates something. */
- struct cap *pivot_cap = NULL;
+ struct vg_cap *pivot_cap = NULL;
if (need_pivot)
{
/* 1.a) Get the pivot PTE. */
@@ -526,18 +526,18 @@ ID (as_build) (activity_t activity,
/* 1.b) Make the pivot designate the object the PTE
currently designates. */
- struct cap_addr_trans addr_trans = CAP_ADDR_TRANS_VOID;
+ struct vg_cap_addr_trans addr_trans = VG_CAP_ADDR_TRANS_VOID;
bool r;
- r = CAP_ADDR_TRANS_SET_GUARD (&addr_trans,
+ r = VG_CAP_ADDR_TRANS_SET_GUARD (&addr_trans,
pivot_guard, pivot_gbits);
assert (r);
- r = cap_copy_x (activity,
- ADDR_VOID, pivot_cap, pivot_phys_addr,
+ r = vg_cap_copy_x (activity,
+ VG_ADDR_VOID, pivot_cap, pivot_phys_addr,
as_root_addr, *pte, pte_addr,
- CAP_COPY_COPY_ADDR_TRANS_GUARD,
- CAP_PROPERTIES (OBJECT_POLICY_DEFAULT,
+ VG_CAP_COPY_COPY_ADDR_TRANS_GUARD,
+ VG_CAP_PROPERTIES (VG_OBJECT_POLICY_DEFAULT,
addr_trans));
assert (r);
}
@@ -547,22 +547,22 @@ ID (as_build) (activity_t activity,
pte_gbits - 1, gbits);
pte_gbits = gbits;
- struct cap_addr_trans addr_trans = CAP_ADDR_TRANS_VOID;
+ struct vg_cap_addr_trans addr_trans = VG_CAP_ADDR_TRANS_VOID;
bool r;
- r = CAP_ADDR_TRANS_SET_GUARD_SUBPAGE (&addr_trans,
+ r = VG_CAP_ADDR_TRANS_SET_GUARD_SUBPAGE (&addr_trans,
pte_guard, pte_gbits,
0 /* We always use the
first subpage in
a page. */,
- 1 << (CAPPAGE_SLOTS_LOG2
+ 1 << (VG_CAPPAGE_SLOTS_LOG2
- pt_width));
assert (r);
- r = cap_copy_x (activity, as_root_addr, pte, pte_addr,
- ADDR_VOID, pt_cap, rt.storage,
- CAP_COPY_COPY_ADDR_TRANS_SUBPAGE
- | CAP_COPY_COPY_ADDR_TRANS_GUARD,
- CAP_PROPERTIES (OBJECT_POLICY_DEFAULT, addr_trans));
+ r = vg_cap_copy_x (activity, as_root_addr, pte, pte_addr,
+ VG_ADDR_VOID, pt_cap, rt.storage,
+ VG_CAP_COPY_COPY_ADDR_TRANS_SUBPAGE
+ | VG_CAP_COPY_COPY_ADDR_TRANS_GUARD,
+ VG_CAP_PROPERTIES (VG_OBJECT_POLICY_DEFAULT, addr_trans));
assert (r);
#ifndef NDEBUG
@@ -579,8 +579,8 @@ ID (as_build) (activity_t activity,
if (! (ret && rt.capp == pivot_cap))
as_dump_from (activity, as_root, "");
assertx (ret && rt.capp == pivot_cap,
- ADDR_FMT ": %sfound, got %p, expected %p",
- ADDR_PRINTF (pivot_addr),
+ VG_ADDR_FMT ": %sfound, got %p, expected %p",
+ VG_ADDR_PRINTF (pivot_addr),
ret ? "" : "not ", ret ? rt.capp : 0, pivot_cap);
AS_CHECK_SHADOW (as_root_addr, pivot_addr, pivot_cap, { });
@@ -595,52 +595,52 @@ ID (as_build) (activity_t activity,
int width;
switch (pte->type)
{
- case cap_cappage:
- case cap_rcappage:
- width = CAP_SUBPAGE_SIZE_LOG2 (pte);
+ case vg_cap_cappage:
+ case vg_cap_rcappage:
+ width = VG_CAP_SUBPAGE_SIZE_LOG2 (pte);
break;
- case cap_folio:
- width = FOLIO_OBJECTS_LOG2;
+ case vg_cap_folio:
+ width = VG_FOLIO_OBJECTS_LOG2;
break;
- case cap_thread:
- width = THREAD_SLOTS_LOG2;
+ case vg_cap_thread:
+ width = VG_THREAD_SLOTS_LOG2;
break;
- case cap_messenger:
+ case vg_cap_messenger:
/* Note: rmessengers don't expose their capability slots. */
width = VG_MESSENGER_SLOTS_LOG2;
break;
default:
AS_DUMP;
- PANIC ("Can't insert object at " ADDR_FMT ": "
- CAP_FMT " does translate address bits",
- ADDR_PRINTF (addr),
- CAP_PRINTF (pte));
+ PANIC ("Can't insert object at " VG_ADDR_FMT ": "
+ VG_CAP_FMT " does translate address bits",
+ VG_ADDR_PRINTF (addr),
+ VG_CAP_PRINTF (pte));
}
/* That should not be more than we have left to translate. */
if (width > remaining)
{
AS_DUMP;
- PANIC ("Translating " ADDR_FMT ": can't index %d-bit %s at "
- ADDR_FMT "; not enough bits (%d)",
- ADDR_PRINTF (addr), width, cap_type_string (pte->type),
- ADDR_PRINTF (addr_chop (addr, remaining)), remaining);
+ PANIC ("Translating " VG_ADDR_FMT ": can't index %d-bit %s at "
+ VG_ADDR_FMT "; not enough bits (%d)",
+ VG_ADDR_PRINTF (addr), width, vg_cap_type_string (pte->type),
+ VG_ADDR_PRINTF (vg_addr_chop (addr, remaining)), remaining);
}
int idx = extract_bits64_inv (prefix, remaining - 1, width);
- enum cap_type type = pte->type;
- pte = do_index (activity, pte, addr_chop (addr, remaining), idx,
+ enum vg_cap_type type = pte->type;
+ pte = do_index (activity, pte, vg_addr_chop (addr, remaining), idx,
&fake_slot);
if (! pte)
- PANIC ("Failed to index object at " ADDR_FMT,
- ADDR_PRINTF (addr_chop (addr, remaining)));
+ PANIC ("Failed to index object at " VG_ADDR_FMT,
+ VG_ADDR_PRINTF (vg_addr_chop (addr, remaining)));
- if (type == cap_folio)
+ if (type == vg_cap_folio)
assert (pte == &fake_slot);
else
assert (pte != &fake_slot);
@@ -648,41 +648,41 @@ ID (as_build) (activity_t activity,
remaining -= width;
DEBUG (5, "Indexing %s/%d[%d]; remaining: %d",
- cap_type_string (type), width, idx, remaining);
+ vg_cap_type_string (type), width, idx, remaining);
if (remaining == 0)
AS_CHECK_SHADOW (as_root_addr, addr, pte, {});
}
while (remaining > 0);
- if (! (pte->type == cap_void && CAP_GUARD_BITS (pte) == 0))
+ if (! (pte->type == vg_cap_void && VG_CAP_GUARD_BITS (pte) == 0))
/* PTE in use. */
{
- if (remaining != CAP_GUARD_BITS (pte)
- && extract_bits64 (prefix, 0, remaining) != CAP_GUARD (pte))
- DEBUG (0, "Overwriting " CAP_FMT " at " ADDR_FMT " -> " ADDR_FMT,
- CAP_PRINTF (pte),
- ADDR_PRINTF (addr),
- ADDR_PRINTF (addr_extend (addr, CAP_GUARD (pte),
- CAP_GUARD_BITS (pte))));
+ if (remaining != VG_CAP_GUARD_BITS (pte)
+ && extract_bits64 (prefix, 0, remaining) != VG_CAP_GUARD (pte))
+ DEBUG (0, "Overwriting " VG_CAP_FMT " at " VG_ADDR_FMT " -> " VG_ADDR_FMT,
+ VG_CAP_PRINTF (pte),
+ VG_ADDR_PRINTF (addr),
+ VG_ADDR_PRINTF (vg_addr_extend (addr, VG_CAP_GUARD (pte),
+ VG_CAP_GUARD_BITS (pte))));
if (may_overwrite)
{
- DEBUG (5, "Overwriting " CAP_FMT " at " ADDR_FMT " -> " ADDR_FMT,
- CAP_PRINTF (pte),
- ADDR_PRINTF (addr),
- ADDR_PRINTF (addr_extend (addr, CAP_GUARD (pte),
- CAP_GUARD_BITS (pte))));
+ DEBUG (5, "Overwriting " VG_CAP_FMT " at " VG_ADDR_FMT " -> " VG_ADDR_FMT,
+ VG_CAP_PRINTF (pte),
+ VG_ADDR_PRINTF (addr),
+ VG_ADDR_PRINTF (vg_addr_extend (addr, VG_CAP_GUARD (pte),
+ VG_CAP_GUARD_BITS (pte))));
/* XXX: Free any data associated with the capability
(e.g., shadow pages). */
}
else
{
AS_DUMP;
- PANIC ("There is already an object at " ADDR_FMT
- " (" CAP_FMT ") but may not overwrite.",
- ADDR_PRINTF (addr),
- CAP_PRINTF (pte));
+ PANIC ("There is already an object at " VG_ADDR_FMT
+ " (" VG_CAP_FMT ") but may not overwrite.",
+ VG_ADDR_PRINTF (addr),
+ VG_CAP_PRINTF (pte));
}
}
@@ -691,19 +691,19 @@ ID (as_build) (activity_t activity,
/* It is safe to use an int as a guard has a most 22 significant
bits. */
int guard = extract_bits64 (prefix, 0, gbits);
- if (gbits != CAP_GUARD_BITS (pte) || guard != CAP_GUARD (pte))
+ if (gbits != VG_CAP_GUARD_BITS (pte) || guard != VG_CAP_GUARD (pte))
{
- struct cap_addr_trans addr_trans = CAP_ADDR_TRANS_VOID;
- bool r = CAP_ADDR_TRANS_SET_GUARD_SUBPAGE (&addr_trans, guard, gbits,
+ struct vg_cap_addr_trans addr_trans = VG_CAP_ADDR_TRANS_VOID;
+ bool r = VG_CAP_ADDR_TRANS_SET_GUARD_SUBPAGE (&addr_trans, guard, gbits,
0, 1);
assert (r);
- r = cap_copy_x (activity, as_root_addr, pte, addr_chop (addr, gbits),
- as_root_addr, *pte, addr_chop (addr, gbits),
- CAP_COPY_COPY_ADDR_TRANS_GUARD,
- CAP_PROPERTIES (OBJECT_POLICY_DEFAULT, addr_trans));
+ r = vg_cap_copy_x (activity, as_root_addr, pte, vg_addr_chop (addr, gbits),
+ as_root_addr, *pte, vg_addr_chop (addr, gbits),
+ VG_CAP_COPY_COPY_ADDR_TRANS_GUARD,
+ VG_CAP_PROPERTIES (VG_OBJECT_POLICY_DEFAULT, addr_trans));
assert (r);
- AS_CHECK_SHADOW (as_root_addr, addr_chop (addr, gbits), pte, { });
+ AS_CHECK_SHADOW (as_root_addr, vg_addr_chop (addr, gbits), pte, { });
}
#ifndef NDEBUG
@@ -719,8 +719,8 @@ ID (as_build) (activity_t activity,
if (! (ret && rt.capp == pte))
as_dump_from (activity, as_root, "");
assertx (ret && rt.capp == pte,
- ADDR_FMT ": %sfound, got %p, expected %p",
- ADDR_PRINTF (addr),
+ VG_ADDR_FMT ": %sfound, got %p, expected %p",
+ VG_ADDR_PRINTF (addr),
ret ? "" : "not ", ret ? rt.capp : 0, pte);
}
# endif
diff --git a/libhurd-mm/as-compute-gbits.h b/libhurd-mm/as-compute-gbits.h
index 4fe1d42..4236ee7 100644
--- a/libhurd-mm/as-compute-gbits.h
+++ b/libhurd-mm/as-compute-gbits.h
@@ -35,9 +35,9 @@ as_compute_gbits_cappage (int untranslated_bits, int to_translate,
/* Our strategy is as follows: we want to avoid 1) having to move
page tables around, and 2) small cappages. We know that folios
will be mapped such that their data pages are visible in the data
- address space of the process, i.e., at /ADDR_BITS-7-12. Thus, we
- try to ensure that we have 7-bit cappages at /ADDR_BITS-7-12 and
- then 8-bit cappage at /ADDR_BITS-7-12-i*8, i > 0, i.e., /44, /36,
+ address space of the process, i.e., at /VG_ADDR_BITS-7-12. Thus, we
+ try to ensure that we have 7-bit cappages at /VG_ADDR_BITS-7-12 and
+ then 8-bit cappage at /VG_ADDR_BITS-7-12-i*8, i > 0, i.e., /44, /36,
etc. */
assertx (untranslated_bits > 0 && to_translate > 0 && gbits >= 0
@@ -51,7 +51,7 @@ as_compute_gbits_cappage (int untranslated_bits, int to_translate,
;
/* There could be less than PAGESIZE_LOG2 untranslated bits. Place
- a cappage at /ADDR_BITS-PAGESIZE_LOG2.
+ a cappage at /VG_ADDR_BITS-PAGESIZE_LOG2.
UNTRANSLATED_BITS
|--------------------|
@@ -65,9 +65,9 @@ as_compute_gbits_cappage (int untranslated_bits, int to_translate,
else if (untranslated_bits - gbits <= PAGESIZE_LOG2)
gbits = untranslated_bits - PAGESIZE_LOG2;
- /* There could be less than FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2
+ /* There could be less than VG_FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2
untranslated bits. Place a cappage at
- /ADDR_BITS-FOLIO_OBJECTS_LOG2-PAGESIZE_LOG2.
+ /VG_ADDR_BITS-VG_FOLIO_OBJECTS_LOG2-PAGESIZE_LOG2.
UNTRANSLATED_BITS
|--------------------|
@@ -75,12 +75,12 @@ as_compute_gbits_cappage (int untranslated_bits, int to_translate,
GBITS
|------|-------|
| PAGESIZE_LOG2
- `FOLIO_OBJECTS_LOG2
+ `VG_FOLIO_OBJECTS_LOG2
^
*/
- else if (untranslated_bits - gbits <= FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2)
- gbits = untranslated_bits - FOLIO_OBJECTS_LOG2 - PAGESIZE_LOG2;
+ else if (untranslated_bits - gbits <= VG_FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2)
+ gbits = untranslated_bits - VG_FOLIO_OBJECTS_LOG2 - PAGESIZE_LOG2;
/*
UNTRANSLATED_BITS
@@ -88,20 +88,20 @@ as_compute_gbits_cappage (int untranslated_bits, int to_translate,
|----------|-------|----|-----|
| | | PAGESIZE_LOG2
- | | `FOLIO_OBJECTS_LOG2
+ | | `VG_FOLIO_OBJECTS_LOG2
`GBITS `REMAINDER
Shrink GBITS such that REMAINDER becomes a multiple of
- CAPPAGE_SLOTS_LOG2.
+ VG_CAPPAGE_SLOTS_LOG2.
*/
else
{
int remainder = untranslated_bits - gbits
- - FOLIO_OBJECTS_LOG2 - PAGESIZE_LOG2;
+ - VG_FOLIO_OBJECTS_LOG2 - PAGESIZE_LOG2;
/* Amount to remove from GBITS such that REMAINDER + TO_REMOVE is a
- multiple of CAPPAGE_SLOTS_LOG2. */
- int to_remove = CAPPAGE_SLOTS_LOG2 - (remainder % CAPPAGE_SLOTS_LOG2);
+ multiple of VG_CAPPAGE_SLOTS_LOG2. */
+ int to_remove = VG_CAPPAGE_SLOTS_LOG2 - (remainder % VG_CAPPAGE_SLOTS_LOG2);
if (to_remove < gbits)
gbits -= to_remove;
@@ -112,10 +112,10 @@ as_compute_gbits_cappage (int untranslated_bits, int to_translate,
assert (gbits >= 0);
struct as_guard_cappage gc;
- if (untranslated_bits - gbits == FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2)
- gc.cappage_width = FOLIO_OBJECTS_LOG2;
+ if (untranslated_bits - gbits == VG_FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2)
+ gc.cappage_width = VG_FOLIO_OBJECTS_LOG2;
else
- gc.cappage_width = CAPPAGE_SLOTS_LOG2;
+ gc.cappage_width = VG_CAPPAGE_SLOTS_LOG2;
if (gbits + gc.cappage_width > to_translate)
gc.cappage_width = to_translate - gbits;
diff --git a/libhurd-mm/as-dump.c b/libhurd-mm/as-dump.c
index 4eb4168..628b5d3 100644
--- a/libhurd-mm/as-dump.c
+++ b/libhurd-mm/as-dump.c
@@ -65,16 +65,16 @@ print_nr (int width, int64_t nr, bool hex)
static void
do_walk (activity_t activity, int index,
- struct cap *root, addr_t addr,
+ struct vg_cap *root, vg_addr_t addr,
int indent, bool descend, const char *output_prefix)
{
int i;
- struct cap cap = as_cap_lookup_rel (activity, root, addr, -1, NULL);
- if (cap.type == cap_void)
+ struct vg_cap vg_cap = as_cap_lookup_rel (activity, root, addr, -1, NULL);
+ if (vg_cap.type == vg_cap_void)
return;
- if (! cap_to_object (activity, &cap))
+ if (! vg_cap_to_object (activity, &vg_cap))
/* Cap is there but the object has been deallocated. */
return;
@@ -90,30 +90,30 @@ do_walk (activity_t activity, int index,
S_PRINTF ("root");
S_PRINTF (" ] ");
- print_nr (12, addr_prefix (addr), true);
- S_PRINTF ("/%d ", addr_depth (addr));
- if (CAP_GUARD_BITS (&cap))
- S_PRINTF ("| 0x%llx/%d ", CAP_GUARD (&cap), CAP_GUARD_BITS (&cap));
- if (CAP_SUBPAGES (&cap) != 1)
- S_PRINTF ("(%d/%d) ", CAP_SUBPAGE (&cap), CAP_SUBPAGES (&cap));
+ print_nr (12, vg_addr_prefix (addr), true);
+ S_PRINTF ("/%d ", vg_addr_depth (addr));
+ if (VG_CAP_GUARD_BITS (&vg_cap))
+ S_PRINTF ("| 0x%llx/%d ", VG_CAP_GUARD (&vg_cap), VG_CAP_GUARD_BITS (&vg_cap));
+ if (VG_CAP_SUBPAGES (&vg_cap) != 1)
+ S_PRINTF ("(%d/%d) ", VG_CAP_SUBPAGE (&vg_cap), VG_CAP_SUBPAGES (&vg_cap));
- if (CAP_GUARD_BITS (&cap)
- && ADDR_BITS - addr_depth (addr) >= CAP_GUARD_BITS (&cap))
+ if (VG_CAP_GUARD_BITS (&vg_cap)
+ && VG_ADDR_BITS - vg_addr_depth (addr) >= VG_CAP_GUARD_BITS (&vg_cap))
S_PRINTF ("=> 0x%llx/%d ",
- addr_prefix (addr_extend (addr,
- CAP_GUARD (&cap),
- CAP_GUARD_BITS (&cap))),
- addr_depth (addr) + CAP_GUARD_BITS (&cap));
+ vg_addr_prefix (vg_addr_extend (addr,
+ VG_CAP_GUARD (&vg_cap),
+ VG_CAP_GUARD_BITS (&vg_cap))),
+ vg_addr_depth (addr) + VG_CAP_GUARD_BITS (&vg_cap));
#ifdef RM_INTERN
- S_PRINTF ("@" OID_FMT " ", OID_PRINTF (cap.oid));
+ S_PRINTF ("@" VG_OID_FMT " ", VG_OID_PRINTF (vg_cap.oid));
#endif
- S_PRINTF ("%s", cap_type_string (cap.type));
+ S_PRINTF ("%s", vg_cap_type_string (vg_cap.type));
#ifdef RM_INTERN
- if (cap.type == cap_page || cap.type == cap_rpage)
+ if (vg_cap.type == vg_cap_page || vg_cap.type == vg_cap_rpage)
{
- struct object *object = cap_to_object_soft (root_activity, &cap);
+ struct object *object = cap_to_object_soft (root_activity, &vg_cap);
if (object)
{
struct md5_ctx ctx;
@@ -145,55 +145,55 @@ do_walk (activity_t activity, int index,
if (! descend)
return;
- if (addr_depth (addr) + CAP_GUARD_BITS (&cap) > ADDR_BITS)
+ if (vg_addr_depth (addr) + VG_CAP_GUARD_BITS (&vg_cap) > VG_ADDR_BITS)
return;
- addr = addr_extend (addr, CAP_GUARD (&cap), CAP_GUARD_BITS (&cap));
+ addr = vg_addr_extend (addr, VG_CAP_GUARD (&vg_cap), VG_CAP_GUARD_BITS (&vg_cap));
- switch (cap.type)
+ switch (vg_cap.type)
{
- case cap_cappage:
- case cap_rcappage:
- if (addr_depth (addr) + CAP_SUBPAGE_SIZE_LOG2 (&cap) > ADDR_BITS)
+ case vg_cap_cappage:
+ case vg_cap_rcappage:
+ if (vg_addr_depth (addr) + VG_CAP_SUBPAGE_SIZE_LOG2 (&vg_cap) > VG_ADDR_BITS)
return;
- for (i = 0; i < CAP_SUBPAGE_SIZE (&cap); i ++)
+ for (i = 0; i < VG_CAP_SUBPAGE_SIZE (&vg_cap); i ++)
do_walk (activity, i, root,
- addr_extend (addr, i, CAP_SUBPAGE_SIZE_LOG2 (&cap)),
+ vg_addr_extend (addr, i, VG_CAP_SUBPAGE_SIZE_LOG2 (&vg_cap)),
indent + 1, true, output_prefix);
return;
- case cap_folio:
- if (addr_depth (addr) + FOLIO_OBJECTS_LOG2 > ADDR_BITS)
+ case vg_cap_folio:
+ if (vg_addr_depth (addr) + VG_FOLIO_OBJECTS_LOG2 > VG_ADDR_BITS)
return;
- for (i = 0; i < FOLIO_OBJECTS; i ++)
+ for (i = 0; i < VG_FOLIO_OBJECTS; i ++)
do_walk (activity, i, root,
- addr_extend (addr, i, FOLIO_OBJECTS_LOG2),
+ vg_addr_extend (addr, i, VG_FOLIO_OBJECTS_LOG2),
indent + 1, false, output_prefix);
return;
- case cap_thread:
- if (addr_depth (addr) + THREAD_SLOTS_LOG2 > ADDR_BITS)
+ case vg_cap_thread:
+ if (vg_addr_depth (addr) + VG_THREAD_SLOTS_LOG2 > VG_ADDR_BITS)
return;
- for (i = 0; i < THREAD_SLOTS; i ++)
+ for (i = 0; i < VG_THREAD_SLOTS; i ++)
do_walk (activity, i, root,
- addr_extend (addr, i, THREAD_SLOTS_LOG2),
+ vg_addr_extend (addr, i, VG_THREAD_SLOTS_LOG2),
indent + 1, true, output_prefix);
return;
- case cap_messenger:
+ case vg_cap_messenger:
/* rmessenger's don't expose their capability slots. */
- if (addr_depth (addr) + VG_MESSENGER_SLOTS_LOG2 > ADDR_BITS)
+ if (vg_addr_depth (addr) + VG_MESSENGER_SLOTS_LOG2 > VG_ADDR_BITS)
return;
for (i = 0; i < VG_MESSENGER_SLOTS; i ++)
do_walk (activity, i, root,
- addr_extend (addr, i, VG_MESSENGER_SLOTS_LOG2),
+ vg_addr_extend (addr, i, VG_MESSENGER_SLOTS_LOG2),
indent + 1, true, output_prefix);
return;
@@ -205,11 +205,11 @@ do_walk (activity_t activity, int index,
/* AS_LOCK must not be held. */
void
-as_dump_from (activity_t activity, struct cap *root, const char *prefix)
+as_dump_from (activity_t activity, struct vg_cap *root, const char *prefix)
{
debug (0, "Dumping address space.");
backtrace_print ();
if (0)
- do_walk (activity, -1, root, ADDR (0, 0), 0, true, prefix);
+ do_walk (activity, -1, root, VG_ADDR (0, 0), 0, true, prefix);
}
diff --git a/libhurd-mm/as-lookup.c b/libhurd-mm/as-lookup.c
index 7639fd1..0de3c03 100644
--- a/libhurd-mm/as-lookup.c
+++ b/libhurd-mm/as-lookup.c
@@ -57,14 +57,14 @@
static bool
as_lookup_rel_internal (activity_t activity,
- struct cap *root, addr_t address,
- enum cap_type type, bool *writable,
+ struct vg_cap *root, vg_addr_t address,
+ enum vg_cap_type type, bool *writable,
enum as_lookup_mode mode, union as_lookup_ret *rt,
bool dump)
{
assert (root);
- struct cap *start = root;
+ struct vg_cap *start = root;
#ifndef NDEBUG
bool dump_path = dump;
@@ -74,49 +74,49 @@ as_lookup_rel_internal (activity_t activity,
#endif
root = start;
- uint64_t addr = addr_prefix (address);
- uintptr_t remaining = addr_depth (address);
+ uint64_t addr = vg_addr_prefix (address);
+ uintptr_t remaining = vg_addr_depth (address);
/* The code below assumes that the REMAINING significant bits are in the
lower bits, not upper. */
- addr >>= (ADDR_BITS - remaining);
+ addr >>= (VG_ADDR_BITS - remaining);
- struct cap fake_slot;
+ struct vg_cap fake_slot;
/* Assume the object is writable until proven otherwise. */
int w = true;
if (dump_path)
- debug (0, "Looking up %s at " ADDR_FMT,
- mode == as_lookup_want_cap ? "cap"
+ debug (0, "Looking up %s at " VG_ADDR_FMT,
+ mode == as_lookup_want_cap ? "vg_cap"
: (mode == as_lookup_want_slot ? "slot" : "object"),
- ADDR_PRINTF (address));
+ VG_ADDR_PRINTF (address));
while (remaining > 0)
{
if (dump_path)
- debug (0, "Cap at " ADDR_FMT ": " CAP_FMT " -> " ADDR_FMT " (%d)",
- ADDR_PRINTF (addr_chop (address, remaining)),
- CAP_PRINTF (root),
- ADDR_PRINTF (addr_chop (address,
- remaining - CAP_GUARD_BITS (root))),
+ debug (0, "Cap at " VG_ADDR_FMT ": " VG_CAP_FMT " -> " VG_ADDR_FMT " (%d)",
+ VG_ADDR_PRINTF (vg_addr_chop (address, remaining)),
+ VG_CAP_PRINTF (root),
+ VG_ADDR_PRINTF (vg_addr_chop (address,
+ remaining - VG_CAP_GUARD_BITS (root))),
remaining);
- assertx (CAP_TYPE_MIN <= root->type && root->type <= CAP_TYPE_MAX,
- "Cap at " ADDR_FMT " has type %d?! (" ADDR_FMT ")",
- ADDR_PRINTF (addr_chop (address, remaining)), root->type,
- ADDR_PRINTF (address));
+ assertx (VG_CAP_TYPE_MIN <= root->type && root->type <= VG_CAP_TYPE_MAX,
+ "Cap at " VG_ADDR_FMT " has type %d?! (" VG_ADDR_FMT ")",
+ VG_ADDR_PRINTF (vg_addr_chop (address, remaining)), root->type,
+ VG_ADDR_PRINTF (address));
- if (root->type == cap_rcappage)
+ if (root->type == vg_cap_rcappage)
/* The page directory is read-only. Note the weakened access
appropriately. */
{
- if (type != -1 && ! cap_type_weak_p (type))
+ if (type != -1 && ! vg_cap_type_weak_p (type))
{
debug (1, "Read-only cappage at %llx/%d but %s requires "
"write access",
- addr_prefix (addr_chop (address, remaining)),
- addr_depth (address) - remaining,
- cap_type_string (type));
+ vg_addr_prefix (vg_addr_chop (address, remaining)),
+ vg_addr_depth (address) - remaining,
+ vg_cap_type_string (type));
/* Translating this capability does not provide write
access. The requested type is strong, bail. */
@@ -126,29 +126,29 @@ as_lookup_rel_internal (activity_t activity,
w = false;
}
- if (CAP_GUARD_BITS (root))
- /* Check that ADDR contains the guard. */
+ if (VG_CAP_GUARD_BITS (root))
+ /* Check that VG_ADDR contains the guard. */
{
- int gdepth = CAP_GUARD_BITS (root);
+ int gdepth = VG_CAP_GUARD_BITS (root);
if (gdepth > remaining)
{
debug (1, "Translating %llx/%d; not enough bits (%d) to "
"translate %d-bit guard at /%d",
- addr_prefix (address), addr_depth (address),
- remaining, gdepth, ADDR_BITS - remaining);
+ vg_addr_prefix (address), vg_addr_depth (address),
+ remaining, gdepth, VG_ADDR_BITS - remaining);
DUMP_OR_RET (false);
}
int guard = extract_bits64_inv (addr, remaining - 1, gdepth);
- if (CAP_GUARD (root) != guard)
+ if (VG_CAP_GUARD (root) != guard)
{
debug (dump_path ? 0 : 5,
- "Translating " ADDR_FMT ": guard 0x%llx/%d does "
+ "Translating " VG_ADDR_FMT ": guard 0x%llx/%d does "
"not match 0x%llx's bits %d-%d => 0x%x",
- ADDR_PRINTF (address),
- CAP_GUARD (root), CAP_GUARD_BITS (root), addr,
+ VG_ADDR_PRINTF (address),
+ VG_CAP_GUARD (root), VG_CAP_GUARD_BITS (root), addr,
remaining - gdepth, remaining - 1, guard);
return false;
}
@@ -169,34 +169,34 @@ as_lookup_rel_internal (activity_t activity,
switch (root->type)
{
- case cap_cappage:
- case cap_rcappage:
+ case vg_cap_cappage:
+ case vg_cap_rcappage:
{
/* Index the page table. */
- int bits = CAP_SUBPAGE_SIZE_LOG2 (root);
+ int bits = VG_CAP_SUBPAGE_SIZE_LOG2 (root);
if (remaining < bits)
{
- debug (1, "Translating " ADDR_FMT "; not enough bits (%d) "
- "to index %d-bit cappage at " ADDR_FMT,
- ADDR_PRINTF (address), remaining, bits,
- ADDR_PRINTF (addr_chop (address, remaining)));
+ debug (1, "Translating " VG_ADDR_FMT "; not enough bits (%d) "
+ "to index %d-bit cappage at " VG_ADDR_FMT,
+ VG_ADDR_PRINTF (address), remaining, bits,
+ VG_ADDR_PRINTF (vg_addr_chop (address, remaining)));
DUMP_OR_RET (false);
}
- struct object *object = cap_to_object (activity, root);
+ struct object *object = vg_cap_to_object (activity, root);
if (! object)
{
#ifdef RM_INTERN
- debug (1, "Failed to get object with OID " OID_FMT,
- OID_PRINTF (root->oid));
+ debug (1, "Failed to get object with OID " VG_OID_FMT,
+ VG_OID_PRINTF (root->oid));
DUMP_OR_RET (false);
#endif
return false;
}
- int offset = CAP_SUBPAGE_OFFSET (root)
+ int offset = VG_CAP_SUBPAGE_OFFSET (root)
+ extract_bits64_inv (addr, remaining - 1, bits);
- assert (0 <= offset && offset < CAPPAGE_SLOTS);
+ assert (0 <= offset && offset < VG_CAPPAGE_SLOTS);
remaining -= bits;
if (dump_path)
@@ -207,77 +207,77 @@ as_lookup_rel_internal (activity_t activity,
break;
}
- case cap_folio:
- if (remaining < FOLIO_OBJECTS_LOG2)
+ case vg_cap_folio:
+ if (remaining < VG_FOLIO_OBJECTS_LOG2)
{
- debug (1, "Translating " ADDR_FMT "; not enough bits (%d) "
- "to index folio at " ADDR_FMT,
- ADDR_PRINTF (address), remaining,
- ADDR_PRINTF (addr_chop (address, remaining)));
+ debug (1, "Translating " VG_ADDR_FMT "; not enough bits (%d) "
+ "to index folio at " VG_ADDR_FMT,
+ VG_ADDR_PRINTF (address), remaining,
+ VG_ADDR_PRINTF (vg_addr_chop (address, remaining)));
DUMP_OR_RET (false);
}
- struct object *object = cap_to_object (activity, root);
+ struct object *object = vg_cap_to_object (activity, root);
if (! object)
{
#ifdef RM_INTERN
- debug (1, "Failed to get object with OID " OID_FMT,
- OID_PRINTF (root->oid));
+ debug (1, "Failed to get object with OID " VG_OID_FMT,
+ VG_OID_PRINTF (root->oid));
#endif
DUMP_OR_RET (false);
}
struct folio *folio = (struct folio *) object;
- int i = extract_bits64_inv (addr, remaining - 1, FOLIO_OBJECTS_LOG2);
+ int i = extract_bits64_inv (addr, remaining - 1, VG_FOLIO_OBJECTS_LOG2);
#ifdef RM_INTERN
root = &fake_slot;
- *root = folio_object_cap (folio, i);
+ *root = vg_folio_object_cap (folio, i);
#else
root = &folio->objects[i];
#endif
- remaining -= FOLIO_OBJECTS_LOG2;
+ remaining -= VG_FOLIO_OBJECTS_LOG2;
if (dump_path)
debug (0, "Indexing folio: %d/%d (%d)",
- i, FOLIO_OBJECTS_LOG2, remaining);
+ i, VG_FOLIO_OBJECTS_LOG2, remaining);
break;
- case cap_thread:
- case cap_messenger:
+ case vg_cap_thread:
+ case vg_cap_messenger:
/* Note: rmessengers don't expose their capability slots. */
{
/* Index the object. */
int bits;
switch (root->type)
{
- case cap_thread:
- bits = THREAD_SLOTS_LOG2;
+ case vg_cap_thread:
+ bits = VG_THREAD_SLOTS_LOG2;
break;
- case cap_messenger:
+ case vg_cap_messenger:
bits = VG_MESSENGER_SLOTS_LOG2;
break;
}
if (remaining < bits)
{
- debug (1, "Translating " ADDR_FMT "; not enough bits (%d) "
- "to index %d-bit %s at " ADDR_FMT,
- ADDR_PRINTF (address), remaining, bits,
- cap_type_string (root->type),
- ADDR_PRINTF (addr_chop (address, remaining)));
+ debug (1, "Translating " VG_ADDR_FMT "; not enough bits (%d) "
+ "to index %d-bit %s at " VG_ADDR_FMT,
+ VG_ADDR_PRINTF (address), remaining, bits,
+ vg_cap_type_string (root->type),
+ VG_ADDR_PRINTF (vg_addr_chop (address, remaining)));
DUMP_OR_RET (false);
}
- struct object *object = cap_to_object (activity, root);
+ struct object *object = vg_cap_to_object (activity, root);
if (! object)
{
#ifdef RM_INTERN
- debug (1, "Failed to get object with OID " OID_FMT,
- OID_PRINTF (root->oid));
+ debug (1, "Failed to get object with OID " VG_OID_FMT,
+ VG_OID_PRINTF (root->oid));
DUMP_OR_RET (false);
#endif
return false;
@@ -292,7 +292,7 @@ as_lookup_rel_internal (activity_t activity,
if (dump_path)
debug (0, "Indexing %s: %d/%d (%d)",
- cap_type_string (root->type), offset, bits, remaining);
+ vg_cap_type_string (root->type), offset, bits, remaining);
root = &object->caps[offset];
break;
@@ -306,11 +306,11 @@ as_lookup_rel_internal (activity_t activity,
do_debug (4)
as_dump_from (activity, start, NULL);
debug (dump_path ? 0 : 5,
- "Translating " ADDR_FMT ", encountered a %s at "
- ADDR_FMT " but expected a cappage or a folio",
- ADDR_PRINTF (address),
- cap_type_string (root->type),
- ADDR_PRINTF (addr_chop (address, remaining)));
+ "Translating " VG_ADDR_FMT ", encountered a %s at "
+ VG_ADDR_FMT " but expected a cappage or a folio",
+ VG_ADDR_PRINTF (address),
+ vg_cap_type_string (root->type),
+ VG_ADDR_PRINTF (vg_addr_chop (address, remaining)));
return false;
}
@@ -318,16 +318,16 @@ as_lookup_rel_internal (activity_t activity,
/* We've indexed the object and have no bits remaining to
translate. */
{
- if (CAP_GUARD_BITS (root) && mode == as_lookup_want_object)
+ if (VG_CAP_GUARD_BITS (root) && mode == as_lookup_want_object)
/* The caller wants an object but we haven't translated
the slot's guard. */
{
debug (dump_path ? 0 : 4,
"Found slot at %llx/%d but referenced object "
"(%s) has an untranslated guard of %lld/%d!",
- addr_prefix (address), addr_depth (address),
- cap_type_string (root->type), CAP_GUARD (root),
- CAP_GUARD_BITS (root));
+ vg_addr_prefix (address), vg_addr_depth (address),
+ vg_cap_type_string (root->type), VG_CAP_GUARD (root),
+ VG_CAP_GUARD_BITS (root));
return false;
}
@@ -337,17 +337,17 @@ as_lookup_rel_internal (activity_t activity,
assert (remaining == 0);
if (dump_path)
- debug (0, "Cap at " ADDR_FMT ": " CAP_FMT " -> " ADDR_FMT " (%d)",
- ADDR_PRINTF (addr_chop (address, remaining)),
- CAP_PRINTF (root),
- ADDR_PRINTF (addr_chop (address,
- remaining - CAP_GUARD_BITS (root))),
+ debug (0, "Cap at " VG_ADDR_FMT ": " VG_CAP_FMT " -> " VG_ADDR_FMT " (%d)",
+ VG_ADDR_PRINTF (vg_addr_chop (address, remaining)),
+ VG_CAP_PRINTF (root),
+ VG_ADDR_PRINTF (vg_addr_chop (address,
+ remaining - VG_CAP_GUARD_BITS (root))),
remaining);
if (type != -1 && type != root->type)
/* Types don't match. */
{
- if (cap_type_strengthen (type) == root->type)
+ if (vg_cap_type_strengthen (type) == root->type)
/* The capability just provides more strength than
requested. That's fine. */
;
@@ -357,14 +357,14 @@ as_lookup_rel_internal (activity_t activity,
do_debug (4)
as_dump_from (activity, start, __func__);
debug (dump_path ? 0 : 4,
- "cap at " ADDR_FMT " designates a %s but want a %s",
- ADDR_PRINTF (address), cap_type_string (root->type),
- cap_type_string (type));
+ "vg_cap at " VG_ADDR_FMT " designates a %s but want a %s",
+ VG_ADDR_PRINTF (address), vg_cap_type_string (root->type),
+ vg_cap_type_string (type));
return false;
}
}
- if (mode == as_lookup_want_object && cap_type_weak_p (root->type))
+ if (mode == as_lookup_want_object && vg_cap_type_weak_p (root->type))
w = false;
if (writable)
@@ -375,7 +375,7 @@ as_lookup_rel_internal (activity_t activity,
if (root == &fake_slot)
{
debug (1, "%llx/%d resolves to a folio object but want a slot",
- addr_prefix (address), addr_depth (address));
+ vg_addr_prefix (address), vg_addr_depth (address));
DUMP_OR_RET (false);
}
rt->capp = root;
@@ -390,8 +390,8 @@ as_lookup_rel_internal (activity_t activity,
bool
as_lookup_rel (activity_t activity,
- struct cap *root, addr_t address,
- enum cap_type type, bool *writable,
+ struct vg_cap *root, vg_addr_t address,
+ enum vg_cap_type type, bool *writable,
enum as_lookup_mode mode, union as_lookup_ret *rt)
{
bool r;
@@ -410,7 +410,7 @@ as_lookup_rel (activity_t activity,
}
void
-as_dump_path_rel (activity_t activity, struct cap *root, addr_t addr)
+as_dump_path_rel (activity_t activity, struct vg_cap *root, vg_addr_t addr)
{
union as_lookup_ret rt;
diff --git a/libhurd-mm/as.c b/libhurd-mm/as.c
index be9dc09..7e47727 100644
--- a/libhurd-mm/as.c
+++ b/libhurd-mm/as.c
@@ -97,9 +97,9 @@ free_space_desc_slab_alloc (void *hook, size_t size, void **ptr)
assert (size == PAGESIZE);
struct storage storage = storage_alloc (meta_data_activity,
- cap_page, STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- *ptr = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ vg_cap_page, STORAGE_LONG_LIVED,
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ *ptr = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr, 0, PAGESIZE_LOG2));
return 0;
}
@@ -109,7 +109,7 @@ free_space_desc_slab_dealloc (void *hook, void *buffer, size_t size)
{
assert (size == PAGESIZE);
- addr_t addr = addr_chop (PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
+ vg_addr_t addr = vg_addr_chop (VG_PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
storage_free (addr, false);
return 0;
@@ -171,7 +171,7 @@ free_space_split (struct free_space *f, uint64_t start, uint64_t end)
}
}
-addr_t
+vg_addr_t
as_alloc (int width, uint64_t count, bool data_mappable)
{
assert (as_init_done);
@@ -189,10 +189,10 @@ as_alloc (int width, uint64_t count, bool data_mappable)
else if (w <= PAGESIZE_LOG2)
w = PAGESIZE_LOG2;
else
- /* Make W - PAGESIZE_LOG2 a multiple of CAPPAGE_SLOTS_LOG2;
+ /* Make W - PAGESIZE_LOG2 a multiple of VG_CAPPAGE_SLOTS_LOG2;
this greatly simplifies page table construction. */
- w += (CAPPAGE_SLOTS_LOG2
- - ((w - PAGESIZE_LOG2) % CAPPAGE_SLOTS_LOG2));
+ w += (VG_CAPPAGE_SLOTS_LOG2
+ - ((w - PAGESIZE_LOG2) % VG_CAPPAGE_SLOTS_LOG2));
}
uint64_t align = 1ULL << w;
@@ -200,7 +200,7 @@ as_alloc (int width, uint64_t count, bool data_mappable)
ss_mutex_lock (&free_spaces_lock);
- addr_t addr = ADDR_VOID;
+ vg_addr_t addr = VG_ADDR_VOID;
struct free_space *free_space;
for (free_space = hurd_btree_free_space_first (&free_spaces);
@@ -220,24 +220,24 @@ as_alloc (int width, uint64_t count, bool data_mappable)
break;
free_space_split (free_space, start, start + length - 1);
- addr = ADDR (start, ADDR_BITS - (w - shift));
+ addr = VG_ADDR (start, VG_ADDR_BITS - (w - shift));
break;
}
}
ss_mutex_unlock (&free_spaces_lock);
- if (ADDR_IS_VOID (addr))
+ if (VG_ADDR_IS_VOID (addr))
debug (0, "No space for object of size 0x%x", 1 << (width - 1));
return addr;
}
bool
-as_alloc_at (addr_t addr, uint64_t count)
+as_alloc_at (vg_addr_t addr, uint64_t count)
{
- uint64_t start = addr_prefix (addr);
- uint64_t length = (1ULL << (ADDR_BITS - addr_depth (addr))) * count;
+ uint64_t start = vg_addr_prefix (addr);
+ uint64_t length = (1ULL << (VG_ADDR_BITS - vg_addr_depth (addr))) * count;
uint64_t end = start + length - 1;
struct region region = { start, end };
@@ -259,10 +259,10 @@ as_alloc_at (addr_t addr, uint64_t count)
}
void
-as_free (addr_t addr, uint64_t count)
+as_free (vg_addr_t addr, uint64_t count)
{
- uint64_t start = addr_prefix (addr);
- uint64_t length = (1ULL << (ADDR_BITS - addr_depth (addr))) * count;
+ uint64_t start = vg_addr_prefix (addr);
+ uint64_t length = (1ULL << (VG_ADDR_BITS - vg_addr_depth (addr))) * count;
uint64_t end = start + length - 1;
struct free_space *space = free_space_desc_alloc ();
@@ -325,29 +325,29 @@ as_free (addr_t addr, uint64_t count)
}
struct as_allocate_pt_ret
-as_allocate_page_table (addr_t addr)
+as_allocate_page_table (vg_addr_t addr)
{
struct as_allocate_pt_ret ret;
memset (&ret, 0, sizeof (ret));
- ret.cap.type = cap_void;
+ ret.cap.type = vg_cap_void;
/* First allocate the real object. */
- struct storage storage = storage_alloc (meta_data_activity, cap_cappage,
+ struct storage storage = storage_alloc (meta_data_activity, vg_cap_cappage,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
return ret;
- debug (4, ADDR_FMT " -> " ADDR_FMT,
- ADDR_PRINTF (addr), ADDR_PRINTF (storage.addr));
+ debug (4, VG_ADDR_FMT " -> " VG_ADDR_FMT,
+ VG_ADDR_PRINTF (addr), VG_ADDR_PRINTF (storage.addr));
/* Then, allocate the shadow object. */
- struct storage shadow = storage_alloc (meta_data_activity, cap_page,
+ struct storage shadow = storage_alloc (meta_data_activity, vg_cap_page,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT,
- ADDR_VOID);
- if (ADDR_IS_VOID (shadow.addr))
+ VG_OBJECT_POLICY_DEFAULT,
+ VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (shadow.addr))
{
storage_free (storage.addr, false);
return ret;
@@ -355,8 +355,8 @@ as_allocate_page_table (addr_t addr)
ret.storage = storage.addr;
ret.cap = *storage.cap;
- cap_set_shadow (&ret.cap,
- ADDR_TO_PTR (addr_extend (shadow.addr,
+ vg_cap_set_shadow (&ret.cap,
+ VG_ADDR_TO_PTR (vg_addr_extend (shadow.addr,
0, PAGESIZE_LOG2)));
return ret;
@@ -374,26 +374,26 @@ as_alloc_slow (int width)
{
assert (! as_init_done);
- addr_t slot = ADDR_VOID;
+ vg_addr_t slot = VG_ADDR_VOID;
- int find_free_slot (addr_t addr,
- uintptr_t type, struct cap_properties properties,
+ int find_free_slot (vg_addr_t addr,
+ uintptr_t type, struct vg_cap_properties properties,
bool writable,
void *cookie)
{
- if (type == cap_folio)
+ if (type == vg_cap_folio)
/* We avoid allocating out of folios. */
return -1;
- assert (type == cap_void);
+ assert (type == vg_cap_void);
- if (ADDR_BITS - addr_depth (addr) < width)
+ if (VG_ADDR_BITS - vg_addr_depth (addr) < width)
return -1;
if (! writable)
return 0;
- uint64_t start = addr_prefix (addr);
+ uint64_t start = vg_addr_prefix (addr);
uint64_t end = start + (1 << width) - 1;
if (end >= DATA_ADDR_MAX)
@@ -414,8 +414,8 @@ as_alloc_slow (int width)
for (i = 0; i < desc_additional_count; i ++)
{
struct hurd_object_desc *desc = &desc_additional[i];
- if (ADDR_EQ (addr, addr_chop (desc->object,
- CAP_ADDR_TRANS_GUARD_BITS
+ if (VG_ADDR_EQ (addr, vg_addr_chop (desc->object,
+ VG_CAP_ADDR_TRANS_GUARD_BITS
(properties.addr_trans))))
return 0;
}
@@ -426,31 +426,31 @@ as_alloc_slow (int width)
error_t err;
- if (! as_walk (find_free_slot, 1 << cap_void | 1 << cap_folio,
+ if (! as_walk (find_free_slot, 1 << vg_cap_void | 1 << vg_cap_folio,
(void *) &slot))
panic ("Failed to find a free slot!");
- assert (! ADDR_IS_VOID (slot));
+ assert (! VG_ADDR_IS_VOID (slot));
/* Set the guard on the slot. */
- int gbits = ADDR_BITS - addr_depth (slot) - width;
+ int gbits = VG_ADDR_BITS - vg_addr_depth (slot) - width;
assert (gbits >= 0);
- struct cap_properties properties = CAP_PROPERTIES_DEFAULT;
- CAP_ADDR_TRANS_SET_GUARD (&properties.addr_trans, 0, gbits);
- err = rm_cap_copy (meta_data_activity, ADDR_VOID, slot, ADDR_VOID, slot,
- CAP_COPY_COPY_ADDR_TRANS_GUARD, properties);
+ struct vg_cap_properties properties = VG_CAP_PROPERTIES_DEFAULT;
+ VG_CAP_ADDR_TRANS_SET_GUARD (&properties.addr_trans, 0, gbits);
+ err = rm_cap_copy (meta_data_activity, VG_ADDR_VOID, slot, VG_ADDR_VOID, slot,
+ VG_CAP_COPY_COPY_ADDR_TRANS_GUARD, properties);
if (err)
panic ("failed to copy capability: %d", err);
- slot = addr_extend (slot, 0, gbits);
+ slot = vg_addr_extend (slot, 0, gbits);
/* Fill in a descriptor. */
assertx ((((uintptr_t) &desc_additional[0]) & (PAGESIZE - 1)) == 0,
"%p", &desc_additional[0]);
- debug (5, "Allocating space for " ADDR_FMT
+ debug (5, "Allocating space for " VG_ADDR_FMT
"; using additional descriptor %d",
- ADDR_PRINTF (slot), desc_additional_count);
+ VG_ADDR_PRINTF (slot), desc_additional_count);
struct hurd_object_desc *desc = &desc_additional[desc_additional_count ++];
if (desc_additional_count > DESC_ADDITIONAL)
@@ -460,7 +460,7 @@ as_alloc_slow (int width)
return desc;
}
-struct cap shadow_root;
+struct vg_cap shadow_root;
void
as_init (void)
@@ -471,10 +471,10 @@ as_init (void)
debug (0, "%d descriptors", __hurd_startup_data->desc_count);
for (i = 0; i < __hurd_startup_data->desc_count; i ++)
{
- debug (0, ADDR_FMT " (" ADDR_FMT "): %s",
- ADDR_PRINTF (__hurd_startup_data->descs[i].object),
- ADDR_PRINTF (__hurd_startup_data->descs[i].storage),
- cap_type_string (__hurd_startup_data->descs[i].type));
+ debug (0, VG_ADDR_FMT " (" VG_ADDR_FMT "): %s",
+ VG_ADDR_PRINTF (__hurd_startup_data->descs[i].object),
+ VG_ADDR_PRINTF (__hurd_startup_data->descs[i].storage),
+ vg_cap_type_string (__hurd_startup_data->descs[i].type));
}
}
@@ -490,37 +490,37 @@ as_init (void)
/* We start with a tabula rasa and then "allocate" the regions that
are actually in use. */
- as_free (ADDR (0, 0), 1);
+ as_free (VG_ADDR (0, 0), 1);
/* Then, we create the shadow page tables and mark the allocation
regions appropriately. */
- void add (struct hurd_object_desc *desc, addr_t addr)
+ void add (struct hurd_object_desc *desc, vg_addr_t addr)
{
error_t err;
- debug (5, "Adding object " ADDR_FMT " (%s)",
- ADDR_PRINTF (addr), cap_type_string (desc->type));
+ debug (5, "Adding object " VG_ADDR_FMT " (%s)",
+ VG_ADDR_PRINTF (addr), vg_cap_type_string (desc->type));
uintptr_t type;
- struct cap_properties properties;
- err = rm_cap_read (meta_data_activity, ADDR_VOID, addr,
+ struct vg_cap_properties properties;
+ err = rm_cap_read (meta_data_activity, VG_ADDR_VOID, addr,
&type, &properties);
assert (! err);
- if (! cap_types_compatible (type, desc->type))
- rm_as_dump (ADDR_VOID, ADDR_VOID);
- assertx (cap_types_compatible (type, desc->type),
- "Object at " ADDR_FMT ": %s != %s",
- ADDR_PRINTF (addr),
- cap_type_string (type), cap_type_string (desc->type));
+ if (! vg_cap_types_compatible (type, desc->type))
+ rm_as_dump (VG_ADDR_VOID, VG_ADDR_VOID);
+ assertx (vg_cap_types_compatible (type, desc->type),
+ "Object at " VG_ADDR_FMT ": %s != %s",
+ VG_ADDR_PRINTF (addr),
+ vg_cap_type_string (type), vg_cap_type_string (desc->type));
- int gbits = CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans);
- addr_t slot_addr = addr_chop (addr, gbits);
+ int gbits = VG_CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans);
+ vg_addr_t slot_addr = vg_addr_chop (addr, gbits);
as_slot_lookup_use (slot_addr,
({
slot->type = type;
- CAP_PROPERTIES_SET (slot, properties);
+ VG_CAP_PROPERTIES_SET (slot, properties);
}));
switch (desc->type)
@@ -528,42 +528,42 @@ as_init (void)
default:
/* Don't allocate the AS associated with the storage. It is
dominated by its containing folio. */
- if (! ADDR_EQ (addr, desc->storage))
+ if (! VG_ADDR_EQ (addr, desc->storage))
as_alloc_at (addr, 1);
break;
- case cap_void:
+ case vg_cap_void:
assert (! "void descriptor?");
return;
- case cap_cappage:
- case cap_rcappage:
- if (ADDR_BITS - addr_depth (addr)
- < CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 (properties.addr_trans))
+ case vg_cap_cappage:
+ case vg_cap_rcappage:
+ if (VG_ADDR_BITS - vg_addr_depth (addr)
+ < VG_CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 (properties.addr_trans))
/* The cappage is unusable for addressing, assuming it is
in-use. */
{
- if (! ADDR_EQ (addr, desc->storage))
+ if (! VG_ADDR_EQ (addr, desc->storage))
as_alloc_at (addr, 1);
return;
}
struct storage shadow_storage
= storage_alloc (meta_data_activity,
- cap_page, STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (shadow_storage.addr))
+ vg_cap_page, STORAGE_LONG_LIVED,
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (shadow_storage.addr))
panic ("Out of space.");
struct object *shadow
- = ADDR_TO_PTR (addr_extend (shadow_storage.addr,
+ = VG_ADDR_TO_PTR (vg_addr_extend (shadow_storage.addr,
0, PAGESIZE_LOG2));
as_slot_lookup_use (addr,
({
- cap_set_shadow (slot, shadow);
+ vg_cap_set_shadow (slot, shadow);
}));
break;
- case cap_folio:
+ case vg_cap_folio:
/* Folios are not available for use. */
as_alloc_at (addr, 1);
as_slot_lookup_use (addr,
@@ -593,8 +593,8 @@ as_init (void)
i < __hurd_startup_data->desc_count;
i ++, desc ++)
{
- depths |= 1ULL << addr_depth (desc->object);
- depths |= 1ULL << addr_depth (desc->storage);
+ depths |= 1ULL << vg_addr_depth (desc->object);
+ depths |= 1ULL << vg_addr_depth (desc->storage);
}
while (depths)
@@ -606,20 +606,20 @@ as_init (void)
i < __hurd_startup_data->desc_count;
i ++, desc ++)
{
- if (addr_depth (desc->object) == depth)
+ if (vg_addr_depth (desc->object) == depth)
add (desc, desc->object);
- if (! ADDR_EQ (desc->object, desc->storage)
- && addr_depth (desc->storage) == depth)
+ if (! VG_ADDR_EQ (desc->object, desc->storage)
+ && vg_addr_depth (desc->storage) == depth)
add (desc, desc->storage);
}
}
/* Reserve the kip and the utcb. */
- as_alloc_at (ADDR ((uintptr_t) l4_kip (), ADDR_BITS), l4_kip_area_size ());
- as_alloc_at (ADDR ((uintptr_t) _L4_utcb (), ADDR_BITS), l4_utcb_size ());
+ as_alloc_at (VG_ADDR ((uintptr_t) l4_kip (), VG_ADDR_BITS), l4_kip_area_size ());
+ as_alloc_at (VG_ADDR ((uintptr_t) _L4_utcb (), VG_ADDR_BITS), l4_utcb_size ());
/* And the page at 0. */
- as_alloc_at (addr_chop (PTR_TO_ADDR (0), PAGESIZE_LOG2), 1);
+ as_alloc_at (vg_addr_chop (VG_PTR_TO_ADDR (0), PAGESIZE_LOG2), 1);
/* Now we add any additional descriptors that describe memory that
we have allocated in the mean time. */
@@ -628,14 +628,14 @@ as_init (void)
desc = &desc_additional[i];
debug (5, "Considering additional descriptor (%d): "
- ADDR_FMT "(" ADDR_FMT "), a %s",
- i, ADDR_PRINTF (desc->object), ADDR_PRINTF (desc->storage),
- cap_type_string (desc->type));
+ VG_ADDR_FMT "(" VG_ADDR_FMT "), a %s",
+ i, VG_ADDR_PRINTF (desc->object), VG_ADDR_PRINTF (desc->storage),
+ vg_cap_type_string (desc->type));
- assert (desc->type != cap_void);
- assert (! ADDR_IS_VOID (desc->storage));
+ assert (desc->type != vg_cap_void);
+ assert (! VG_ADDR_IS_VOID (desc->storage));
- if (! ADDR_EQ (desc->object, desc->storage))
+ if (! VG_ADDR_EQ (desc->object, desc->storage))
add (desc, desc->storage);
add (desc, desc->object);
}
@@ -645,30 +645,30 @@ as_init (void)
/* Walk the address space the hard way and make sure that we've got
everything. */
- int visit (addr_t addr,
- uintptr_t type, struct cap_properties properties,
+ int visit (vg_addr_t addr,
+ uintptr_t type, struct vg_cap_properties properties,
bool writable, void *cookie)
{
- debug (5, "Checking that " ADDR_FMT " is a %s",
- ADDR_PRINTF (addr), cap_type_string (type));
+ debug (5, "Checking that " VG_ADDR_FMT " is a %s",
+ VG_ADDR_PRINTF (addr), vg_cap_type_string (type));
- struct cap cap = as_cap_lookup (addr, -1, NULL);
+ struct vg_cap cap = as_cap_lookup (addr, -1, NULL);
assertx (cap.type == type,
"user: %s != kernel: %s",
- cap_type_string (cap.type), cap_type_string (type));
+ vg_cap_type_string (cap.type), vg_cap_type_string (type));
- struct cap_properties properties2 = CAP_PROPERTIES_GET (cap);
+ struct vg_cap_properties properties2 = VG_CAP_PROPERTIES_GET (cap);
assert (properties.policy.discardable == properties2.policy.discardable);
assertx (properties.policy.priority == properties2.policy.priority,
- ADDR_FMT "(%s) %d != %d",
- ADDR_PRINTF (addr), cap_type_string (type),
+ VG_ADDR_FMT "(%s) %d != %d",
+ VG_ADDR_PRINTF (addr), vg_cap_type_string (type),
properties.policy.priority, properties2.policy.priority);
assert (properties.addr_trans.raw == properties2.addr_trans.raw);
- if (type == cap_folio)
+ if (type == vg_cap_folio)
{
- processing_folio = FOLIO_OBJECTS;
+ processing_folio = VG_FOLIO_OBJECTS;
return 0;
}
@@ -690,11 +690,11 @@ as_init (void)
for (i = 0, desc = &__hurd_startup_data->descs[0];
i < __hurd_startup_data->desc_count;
i ++, desc ++)
- if (ADDR_EQ (desc->object,
- addr_chop (PTR_TO_ADDR (desc_additional), PAGESIZE_LOG2)))
+ if (VG_ADDR_EQ (desc->object,
+ vg_addr_chop (VG_PTR_TO_ADDR (desc_additional), PAGESIZE_LOG2)))
{
storage_free (desc->storage, false);
- as_free (addr_chop (PTR_TO_ADDR (desc_additional), PAGESIZE_LOG2), 1);
+ as_free (vg_addr_chop (VG_PTR_TO_ADDR (desc_additional), PAGESIZE_LOG2), 1);
break;
}
assert (i != __hurd_startup_data->desc_count);
@@ -726,8 +726,8 @@ as_alloced_dump (const char *prefix)
exited. For other non-zero values, the walk is aborted and that
value is returned. If the walk is not aborted, 0 is returned. */
int
-as_walk (int (*visit) (addr_t addr,
- uintptr_t type, struct cap_properties properties,
+as_walk (int (*visit) (vg_addr_t addr,
+ uintptr_t type, struct vg_cap_properties properties,
bool writable,
void *cookie),
int types,
@@ -740,8 +740,8 @@ as_walk (int (*visit) (addr_t addr,
/* We keep track of the child that we should visit at a
particular depth. If child[0] is 2, that means traverse the
root's object's child #2. */
- unsigned short child[1 + ADDR_BITS];
- assert (CAPPAGE_SLOTS_LOG2 < sizeof (child[0]) * 8);
+ unsigned short child[1 + VG_ADDR_BITS];
+ assert (VG_CAPPAGE_SLOTS_LOG2 < sizeof (child[0]) * 8);
/* Depth is the current level that we are visiting. If depth is
1, we are visiting the root object's children. */
@@ -749,16 +749,16 @@ as_walk (int (*visit) (addr_t addr,
child[0] = 0;
error_t err;
- struct cap_properties properties;
+ struct vg_cap_properties properties;
uintptr_t type;
/* Just caching the root capability cuts the number of RPCs by
about 25%. */
- struct cap_properties root_properties;
+ struct vg_cap_properties root_properties;
uintptr_t root_type;
- err = rm_cap_read (meta_data_activity, ADDR_VOID,
- ADDR (0, 0), &root_type, &root_properties);
+ err = rm_cap_read (meta_data_activity, VG_ADDR_VOID,
+ VG_ADDR (0, 0), &root_type, &root_properties);
assert (err == 0);
restart:
@@ -766,7 +766,7 @@ as_walk (int (*visit) (addr_t addr,
int slots_log2;
- addr_t addr = ADDR (0, 0);
+ vg_addr_t addr = VG_ADDR (0, 0);
bool writable = true;
int d;
@@ -779,31 +779,31 @@ as_walk (int (*visit) (addr_t addr,
}
else
{
- err = rm_cap_read (meta_data_activity, ADDR_VOID,
+ err = rm_cap_read (meta_data_activity, VG_ADDR_VOID,
addr, &type, &properties);
assert (err == 0);
}
addr
- = addr_extend (addr, CAP_ADDR_TRANS_GUARD (properties.addr_trans),
- CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans));
+ = vg_addr_extend (addr, VG_CAP_ADDR_TRANS_GUARD (properties.addr_trans),
+ VG_CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans));
switch (type)
{
- case cap_rcappage:
+ case vg_cap_rcappage:
writable = false;
/* Fall through. */
- case cap_cappage:
+ case vg_cap_cappage:
slots_log2
- = CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 (properties.addr_trans);
+ = VG_CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 (properties.addr_trans);
break;
- case cap_folio:
- slots_log2 = FOLIO_OBJECTS_LOG2;
+ case vg_cap_folio:
+ slots_log2 = VG_FOLIO_OBJECTS_LOG2;
break;
- case cap_thread:
- slots_log2 = THREAD_SLOTS_LOG2;
+ case vg_cap_thread:
+ slots_log2 = VG_THREAD_SLOTS_LOG2;
break;
- case cap_messenger:
+ case vg_cap_messenger:
slots_log2 = VG_MESSENGER_SLOTS_LOG2;
break;
default:
@@ -831,15 +831,15 @@ as_walk (int (*visit) (addr_t addr,
goto restart;
}
- addr = addr_extend (addr, child[d], slots_log2);
- err = rm_cap_read (meta_data_activity, ADDR_VOID,
+ addr = vg_addr_extend (addr, child[d], slots_log2);
+ err = rm_cap_read (meta_data_activity, VG_ADDR_VOID,
addr, &type, &properties);
assert (err == 0);
}
for (;;)
{
- err = rm_cap_read (meta_data_activity, ADDR_VOID,
+ err = rm_cap_read (meta_data_activity, VG_ADDR_VOID,
addr, &type, &properties);
if (err)
/* Dangling pointer. */
@@ -854,8 +854,8 @@ as_walk (int (*visit) (addr_t addr,
do_debug (5)
{
- s_printf ("Considering " ADDR_FMT "(%s): ",
- ADDR_PRINTF (addr), cap_type_string (type));
+ s_printf ("Considering " VG_ADDR_FMT "(%s): ",
+ VG_ADDR_PRINTF (addr), vg_cap_type_string (type));
int i;
for (i = 0; i < depth; i ++)
s_printf ("%s%d", i == 0 ? "" : " -> ", child[i]);
@@ -883,27 +883,27 @@ as_walk (int (*visit) (addr_t addr,
return r;
}
- if (addr_depth (addr)
- + CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans)
- > ADDR_BITS)
+ if (vg_addr_depth (addr)
+ + VG_CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans)
+ > VG_ADDR_BITS)
{
child[depth - 1] ++;
goto restart;
}
addr
- = addr_extend (addr, CAP_ADDR_TRANS_GUARD (properties.addr_trans),
- CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans));
+ = vg_addr_extend (addr, VG_CAP_ADDR_TRANS_GUARD (properties.addr_trans),
+ VG_CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans));
switch (type)
{
- case cap_rcappage:
- case cap_cappage:
+ case vg_cap_rcappage:
+ case vg_cap_cappage:
slots_log2
- = CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 (properties.addr_trans);
+ = VG_CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 (properties.addr_trans);
break;
- case cap_folio:
- slots_log2 = FOLIO_OBJECTS_LOG2;
+ case vg_cap_folio:
+ slots_log2 = VG_FOLIO_OBJECTS_LOG2;
break;
default:
if (depth == 0)
@@ -914,34 +914,34 @@ as_walk (int (*visit) (addr_t addr,
goto restart;
}
- if (addr_depth (addr) + slots_log2 > ADDR_BITS)
+ if (vg_addr_depth (addr) + slots_log2 > VG_ADDR_BITS)
{
child[depth - 1] ++;
goto restart;
}
/* Visit the first child. */
- addr = addr_extend (addr, 0, slots_log2);
+ addr = vg_addr_extend (addr, 0, slots_log2);
child[depth] = 0;
depth ++;
}
}
/* We have the shadow page tables and presumably a normal stack. */
- int do_walk (struct cap *cap, addr_t addr, bool writable)
+ int do_walk (struct vg_cap *cap, vg_addr_t addr, bool writable)
{
uintptr_t type;
- struct cap_properties cap_properties;
+ struct vg_cap_properties vg_cap_properties;
type = cap->type;
- cap_properties = CAP_PROPERTIES_GET (*cap);
+ vg_cap_properties = VG_CAP_PROPERTIES_GET (*cap);
- debug (5, ADDR_FMT " (%s)", ADDR_PRINTF (addr), cap_type_string (type));
+ debug (5, VG_ADDR_FMT " (%s)", VG_ADDR_PRINTF (addr), vg_cap_type_string (type));
int r;
if (((1 << type) & types))
{
- r = visit (addr, type, cap_properties, writable, cookie);
+ r = visit (addr, type, vg_cap_properties, writable, cookie);
if (r == -1)
/* Don't go deeper. */
return 0;
@@ -949,49 +949,49 @@ as_walk (int (*visit) (addr_t addr,
return r;
}
- if (addr_depth (addr)
- + CAP_ADDR_TRANS_GUARD_BITS (cap_properties.addr_trans)
- > ADDR_BITS)
+ if (vg_addr_depth (addr)
+ + VG_CAP_ADDR_TRANS_GUARD_BITS (vg_cap_properties.addr_trans)
+ > VG_ADDR_BITS)
return 0;
addr
- = addr_extend (addr, CAP_ADDR_TRANS_GUARD (cap_properties.addr_trans),
- CAP_ADDR_TRANS_GUARD_BITS (cap_properties.addr_trans));
+ = vg_addr_extend (addr, VG_CAP_ADDR_TRANS_GUARD (vg_cap_properties.addr_trans),
+ VG_CAP_ADDR_TRANS_GUARD_BITS (vg_cap_properties.addr_trans));
int slots_log2 = 0;
switch (type)
{
- case cap_cappage:
- case cap_rcappage:
- if (type == cap_rcappage)
+ case vg_cap_cappage:
+ case vg_cap_rcappage:
+ if (type == vg_cap_rcappage)
writable = false;
slots_log2
- = CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 (cap_properties.addr_trans);
+ = VG_CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 (vg_cap_properties.addr_trans);
break;
- case cap_folio:
- slots_log2 = FOLIO_OBJECTS_LOG2;
+ case vg_cap_folio:
+ slots_log2 = VG_FOLIO_OBJECTS_LOG2;
break;
default:
return 0;
}
- if (addr_depth (addr) + slots_log2 > ADDR_BITS)
+ if (vg_addr_depth (addr) + slots_log2 > VG_ADDR_BITS)
return 0;
struct object *shadow = NULL;
if (as_init_done)
- shadow = cap_to_object (meta_data_activity, cap);
+ shadow = vg_cap_to_object (meta_data_activity, cap);
int i;
for (i = 0; i < (1 << slots_log2); i ++)
{
- struct cap *object = NULL;
+ struct vg_cap *object = NULL;
if (as_init_done)
object = &shadow->caps[i];
- r = do_walk (object, addr_extend (addr, i, slots_log2), writable);
+ r = do_walk (object, vg_addr_extend (addr, i, slots_log2), writable);
if (r)
return r;
}
@@ -999,5 +999,5 @@ as_walk (int (*visit) (addr_t addr,
return 0;
}
- return do_walk (&shadow_root, ADDR (0, 0), true);
+ return do_walk (&shadow_root, VG_ADDR (0, 0), true);
}
diff --git a/libhurd-mm/as.h b/libhurd-mm/as.h
index 8343fdd..b771fe7 100644
--- a/libhurd-mm/as.h
+++ b/libhurd-mm/as.h
@@ -35,11 +35,11 @@
are allocated as well. */
/* Allocate COUNT contiguous subtree such that the root's depth of
- each is at least ADDR_BITS - WIDTH. If DATA_MAPPABLE is true, then
+ each is at least VG_ADDR_BITS - WIDTH. If DATA_MAPPABLE is true, then
ensures that the leaves of each subtree are mappable in the region
accessible to data instructions. On success returns the address of
- the first subtree. Otherwise, returns ADDR_VOID. */
-extern addr_t as_alloc (int width, uint64_t count,
+ the first subtree. Otherwise, returns VG_ADDR_VOID. */
+extern vg_addr_t as_alloc (int width, uint64_t count,
bool data_mappable);
/* Like as_alloc but may be called before as_init is called. Address
@@ -49,14 +49,14 @@ extern struct hurd_object_desc *as_alloc_slow (int width);
/* Allocate the COUNT contiguous addresses strating at address ADDR.
Returns true on success, false otherwise. */
-extern bool as_alloc_at (addr_t addr, uint64_t count);
+extern bool as_alloc_at (vg_addr_t addr, uint64_t count);
-/* Free the COUNT contiguous addresses starting at ADDR. Each ADDR
+/* Free the COUNT contiguous addresses starting at VG_ADDR. Each ADDR
must have been previously returned by a call to as_chunk_alloc or
as_region_alloc. All address returned by a call to as_chunk_alloc
or as_region_alloc need not be freed by a single call to
as_free. */
-extern void as_free (addr_t addr, uint64_t count);
+extern void as_free (vg_addr_t addr, uint64_t count);
/* Whether as_init has completed. */
extern bool as_init_done;
@@ -187,7 +187,7 @@ as_unlock (void)
extern activity_t meta_data_activity;
/* The root of the shadow page tables. */
-extern struct cap shadow_root;
+extern struct vg_cap shadow_root;
#endif
#if defined (RM_INTERN) || defined (NDEBUG)
@@ -201,7 +201,7 @@ extern struct cap shadow_root;
do \
{ \
uintptr_t __acs_type = -1; \
- struct cap_properties __acs_p; \
+ struct vg_cap_properties __acs_p; \
error_t __acs_err; \
\
__acs_err = rm_cap_read (meta_data_activity, \
@@ -211,7 +211,7 @@ extern struct cap shadow_root;
bool die = false; \
if (__acs_err) \
die = true; \
- else if (__acs_type == cap_void) \
+ else if (__acs_type == vg_cap_void) \
/* The kernel's type is void. Either the shadow has not yet \
been updated or the object is dead. */ \
; \
@@ -221,26 +221,26 @@ extern struct cap shadow_root;
&& (!!__acs_p.policy.discardable \
== !!(__acs_cap)->discardable))) \
die = true; \
- else if ((__acs_type == cap_cappage || __acs_type == cap_rcappage) \
+ else if ((__acs_type == vg_cap_cappage || __acs_type == vg_cap_rcappage) \
&& __acs_p.addr_trans.raw != (__acs_cap)->addr_trans.raw) \
die = true; \
\
if (die) \
{ \
debug (0, \
- ADDR_FMT "@" ADDR_FMT ": err: %d; type: %s =? %s; " \
+ VG_ADDR_FMT "@" VG_ADDR_FMT ": err: %d; type: %s =? %s; " \
"guard: %lld/%d =? %lld/%d; subpage: %d/%d =? %d/%d; " \
"priority: %d =? %d; discardable: %d =? %d", \
- ADDR_PRINTF ((__acs_root_addr)), ADDR_PRINTF ((__acs_addr)), \
+ VG_ADDR_PRINTF ((__acs_root_addr)), VG_ADDR_PRINTF ((__acs_addr)), \
__acs_err, \
- cap_type_string ((__acs_cap)->type), \
- cap_type_string (__acs_type), \
- CAP_GUARD ((__acs_cap)), CAP_GUARD_BITS ((__acs_cap)), \
- CAP_ADDR_TRANS_GUARD (__acs_p.addr_trans), \
- CAP_ADDR_TRANS_GUARD_BITS (__acs_p.addr_trans), \
- CAP_SUBPAGE ((__acs_cap)), CAP_SUBPAGES_LOG2 ((__acs_cap)), \
- CAP_ADDR_TRANS_SUBPAGE (__acs_p.addr_trans), \
- CAP_ADDR_TRANS_SUBPAGES_LOG2 (__acs_p.addr_trans), \
+ vg_cap_type_string ((__acs_cap)->type), \
+ vg_cap_type_string (__acs_type), \
+ VG_CAP_GUARD ((__acs_cap)), VG_CAP_GUARD_BITS ((__acs_cap)), \
+ VG_CAP_ADDR_TRANS_GUARD (__acs_p.addr_trans), \
+ VG_CAP_ADDR_TRANS_GUARD_BITS (__acs_p.addr_trans), \
+ VG_CAP_SUBPAGE ((__acs_cap)), VG_CAP_SUBPAGES_LOG2 ((__acs_cap)), \
+ VG_CAP_ADDR_TRANS_SUBPAGE (__acs_p.addr_trans), \
+ VG_CAP_ADDR_TRANS_SUBPAGES_LOG2 (__acs_p.addr_trans), \
(__acs_cap)->priority, __acs_p.policy.priority, \
!!(__acs_cap)->discardable, !!__acs_p.policy.discardable); \
{ \
@@ -258,7 +258,7 @@ extern struct cap shadow_root;
do \
{ \
if ((__acs_root_cap) == &shadow_root) \
- AS_CHECK_SHADOW(ADDR_VOID, (__acs_addr), (__acs_cap), \
+ AS_CHECK_SHADOW(VG_ADDR_VOID, (__acs_addr), (__acs_cap), \
(__acs_code)); \
} \
while (0)
@@ -266,21 +266,21 @@ extern struct cap shadow_root;
struct as_allocate_pt_ret
{
- struct cap cap;
- addr_t storage;
+ struct vg_cap cap;
+ vg_addr_t storage;
};
/* Page table allocator used by as_build. */
-typedef struct as_allocate_pt_ret (*as_allocate_page_table_t) (addr_t addr);
+typedef struct as_allocate_pt_ret (*as_allocate_page_table_t) (vg_addr_t addr);
-/* Default page table allocator. Allocates a cap_cappage and the
+/* Default page table allocator. Allocates a vg_cap_cappage and the
accompanying shadow page table. */
-extern struct as_allocate_pt_ret as_allocate_page_table (addr_t addr);
+extern struct as_allocate_pt_ret as_allocate_page_table (vg_addr_t addr);
/* Build up the address space, which is root at AS_ROOT_ADDR (and
shadowed by AS_ROOT_CAP), such that there is a capability slot at
- address ADDR. Return the shadow capability.
+ address VG_ADDR. Return the shadow capability.
If MAY_OVERWRITE is true, the function is permitted to overwrite an
existing capability. Otherwise, only capability slots containing a
@@ -294,9 +294,9 @@ extern struct as_allocate_pt_ret as_allocate_page_table (addr_t addr);
Must be called with a write lock on AS_LOCK. Must be called with
8kb of stack that will not fault. */
-struct cap *as_build (activity_t activity,
- addr_t as_root_addr, struct cap *as_root_cap,
- addr_t addr,
+struct vg_cap *as_build (activity_t activity,
+ vg_addr_t as_root_addr, struct vg_cap *as_root_cap,
+ vg_addr_t addr,
as_allocate_page_table_t allocate_page_table,
bool may_overwrite);
@@ -306,21 +306,21 @@ struct cap *as_build (activity_t activity,
is implicit (in the case of a folio), return a fabricated
capability in *FAKE_SLOT and return FAKE_SLOT. Return NULL on
failure. */
-typedef struct cap *(*as_object_index_t) (activity_t activity,
- struct cap *pt,
- addr_t pt_addr, int idx,
- struct cap *fake_slot);
+typedef struct vg_cap *(*as_object_index_t) (activity_t activity,
+ struct vg_cap *pt,
+ vg_addr_t pt_addr, int idx,
+ struct vg_cap *fake_slot);
/* Like as_buildup, but using a custom shadow page table
implementation. */
-struct cap *as_build_custom (activity_t activity,
- addr_t as_root_addr, struct cap *as_root_cap,
- addr_t addr,
+struct vg_cap *as_build_custom (activity_t activity,
+ vg_addr_t as_root_addr, struct vg_cap *as_root_cap,
+ vg_addr_t addr,
as_allocate_page_table_t allocate_page_table,
as_object_index_t object_index,
bool may_overwrite);
-/* Ensure that the slot designated by ADDR in the address space rooted
+/* Ensure that the slot designated by VG_ADDR in the address space rooted
at AS_ROOT_ADDR (which is shadowed by AS_ROOT_CAP) is accessible by
allocating any required page tables and rearranging the address
space as necessary. Execute CODE (with AS_LOCK held) with the
@@ -334,15 +334,15 @@ struct cap *as_build_custom (activity_t activity,
do \
{ \
activity_t __asef_activity = (__asef_activity_); \
- addr_t __asef_as_root_addr = (__asef_as_root_addr_); \
- struct cap *__asef_as_root_cap = (__asef_as_root_cap_); \
- addr_t __asef_addr = (__asef_addr_); \
+ vg_addr_t __asef_as_root_addr = (__asef_as_root_addr_); \
+ struct vg_cap *__asef_as_root_cap = (__asef_as_root_cap_); \
+ vg_addr_t __asef_addr = (__asef_addr_); \
as_allocate_page_table_t __asef_allocate_page_table \
= (__asef_allocate_page_table_); \
\
as_lock (); \
\
- struct cap *slot = as_build (__asef_activity, \
+ struct vg_cap *slot = as_build (__asef_activity, \
__asef_as_root_addr, \
__asef_as_root_cap, \
__asef_addr, \
@@ -365,10 +365,10 @@ struct cap *as_build_custom (activity_t activity,
{ \
assert (as_init_done); \
\
- addr_t __ase_as_addr = (__ase_as_addr_); \
+ vg_addr_t __ase_as_addr = (__ase_as_addr_); \
\
as_ensure_full (meta_data_activity, \
- ADDR_VOID, &shadow_root, \
+ VG_ADDR_VOID, &shadow_root, \
__ase_as_addr, \
as_allocate_page_table, \
(__ase_code)); \
@@ -378,7 +378,7 @@ struct cap *as_build_custom (activity_t activity,
/* Like as_ensure_use, however, does not execute any code. */
#define as_ensure(__ae_addr) \
as_ensure_full (meta_data_activity, \
- ADDR_VOID, &shadow_root, __ae_addr, \
+ VG_ADDR_VOID, &shadow_root, __ae_addr, \
as_allocate_page_table, \
({;}))
#endif
@@ -397,10 +397,10 @@ struct cap *as_build_custom (activity_t activity,
accompanying shadow page tables. See as_build for details. */
static inline void
as_insert_full (activity_t activity,
- addr_t target_as_root_addr, struct cap *target_as_root_cap,
- addr_t target_addr,
- addr_t source_as_root_addr,
- addr_t source_addr, struct cap source_cap,
+ vg_addr_t target_as_root_addr, struct vg_cap *target_as_root_cap,
+ vg_addr_t target_addr,
+ vg_addr_t source_as_root_addr,
+ vg_addr_t source_addr, struct vg_cap source_cap,
as_allocate_page_table_t allocate_page_table)
{
AS_CHECK_SHADOW (source_as_root_addr, source_addr, &source_cap, {});
@@ -411,7 +411,7 @@ as_insert_full (activity_t activity,
allocate_page_table,
({
bool ret;
- ret = cap_copy (activity,
+ ret = vg_cap_copy (activity,
target_as_root_addr,
slot,
target_addr,
@@ -419,24 +419,24 @@ as_insert_full (activity_t activity,
source_cap,
source_addr);
assertx (ret,
- ADDR_FMT "@" ADDR_FMT
- " <- " ADDR_FMT "@" ADDR_FMT " (" CAP_FMT ")",
- ADDR_PRINTF (target_as_root_addr),
- ADDR_PRINTF (target_addr),
- ADDR_PRINTF (source_as_root_addr),
- ADDR_PRINTF (source_addr),
- CAP_PRINTF (&source_cap));
+ VG_ADDR_FMT "@" VG_ADDR_FMT
+ " <- " VG_ADDR_FMT "@" VG_ADDR_FMT " (" VG_CAP_FMT ")",
+ VG_ADDR_PRINTF (target_as_root_addr),
+ VG_ADDR_PRINTF (target_addr),
+ VG_ADDR_PRINTF (source_as_root_addr),
+ VG_ADDR_PRINTF (source_addr),
+ VG_CAP_PRINTF (&source_cap));
}));
}
#ifndef RM_INTERN
static inline void
-as_insert (addr_t target_addr,
- addr_t source_addr, struct cap source_cap)
+as_insert (vg_addr_t target_addr,
+ vg_addr_t source_addr, struct vg_cap source_cap)
{
as_insert_full (meta_data_activity,
- ADDR_VOID, &shadow_root, target_addr,
- ADDR_VOID, source_addr, source_cap,
+ VG_ADDR_VOID, &shadow_root, target_addr,
+ VG_ADDR_VOID, source_addr, source_cap,
as_allocate_page_table);
}
#endif
@@ -445,26 +445,26 @@ as_insert (addr_t target_addr,
#ifndef RM_INTERN
/* Variant of as_ensure_full that doesn't assume the default shadow
page table format but calls OBJECT_INDEX to index objects. */
-extern struct cap *as_ensure_full_custom
+extern struct vg_cap *as_ensure_full_custom
(activity_t activity,
- addr_t as, struct cap *root, addr_t addr,
+ vg_addr_t as, struct vg_cap *root, vg_addr_t addr,
as_allocate_page_table_t allocate_page_table,
as_object_index_t object_index);
/* Variant of as_insert that doesn't assume the default shadow page
table format but calls OBJECT_INDEX to index objects. */
-extern struct cap *as_insert_custom
+extern struct vg_cap *as_insert_custom
(activity_t activity,
- addr_t target_as, struct cap *t_as_cap, addr_t target,
- addr_t source_as, struct cap c_cap, addr_t source,
+ vg_addr_t target_as, struct vg_cap *t_as_cap, vg_addr_t target,
+ vg_addr_t source_as, struct vg_cap c_cap, vg_addr_t source,
as_allocate_page_table_t allocate_page_table,
as_object_index_t object_index);
#endif
union as_lookup_ret
{
- struct cap cap;
- struct cap *capp;
+ struct vg_cap cap;
+ struct vg_cap *capp;
};
enum as_lookup_mode
@@ -497,8 +497,8 @@ enum as_lookup_mode
On success, whether the slot or the object is writable is returned
in *WRITABLE. */
extern bool as_lookup_rel (activity_t activity,
- struct cap *as_root_cap, addr_t addr,
- enum cap_type type, bool *writable,
+ struct vg_cap *as_root_cap, vg_addr_t addr,
+ enum vg_cap_type type, bool *writable,
enum as_lookup_mode mode,
union as_lookup_ret *ret);
@@ -513,8 +513,8 @@ extern bool as_lookup_rel (activity_t activity,
__alru_code) \
({ \
activity_t __alru_activity = (__alru_activity_); \
- struct cap *__alru_root = (__alru_root_); \
- addr_t __alru_addr = (__alru_addr_); \
+ struct vg_cap *__alru_root = (__alru_root_); \
+ vg_addr_t __alru_addr = (__alru_addr_); \
\
union as_lookup_ret __alru_ret_val; \
\
@@ -527,7 +527,7 @@ extern bool as_lookup_rel (activity_t activity,
&__alru_ret_val); \
if (__alru_ret) \
{ \
- struct cap *slot __attribute__ ((unused)) = __alru_ret_val.capp; \
+ struct vg_cap *slot __attribute__ ((unused)) = __alru_ret_val.capp; \
(__alru_code); \
\
AS_CHECK_SHADOW2(__alru_root, __alru_addr, slot, {}); \
@@ -553,15 +553,15 @@ extern bool as_lookup_rel (activity_t activity,
space rooted by ROOT.
TYPE is the required type. If the type is incompatible
- (cap_rcappage => cap_cappage and cap_rpage => cap_page), bails. If
+ (vg_cap_rcappage => vg_cap_cappage and vg_cap_rpage => vg_cap_page), bails. If
TYPE is -1, then any type is acceptable. May cause paging. If
non-NULL, returns whether the slot is writable in *WRITABLE.
This function locks (and unlocks) as_lock. */
-static inline struct cap
+static inline struct vg_cap
as_cap_lookup_rel (activity_t activity,
- struct cap *root, addr_t addr,
- enum cap_type type, bool *writable)
+ struct vg_cap *root, vg_addr_t addr,
+ enum vg_cap_type type, bool *writable)
{
union as_lookup_ret ret_val;
@@ -576,15 +576,15 @@ as_cap_lookup_rel (activity_t activity,
as_unlock ();
if (! ret)
- return (struct cap) { .type = cap_void };
+ return (struct vg_cap) { .type = vg_cap_void };
return ret_val.cap;
}
#ifndef RM_INTERN
-static inline struct cap
-as_cap_lookup (addr_t addr, enum cap_type type, bool *writable)
+static inline struct vg_cap
+as_cap_lookup (vg_addr_t addr, enum vg_cap_type type, bool *writable)
{
return as_cap_lookup_rel (meta_data_activity,
&shadow_root, addr, -1, writable);
@@ -598,15 +598,15 @@ as_cap_lookup (addr_t addr, enum cap_type type, bool *writable)
than the object itself.
TYPE is the required type. If the type is incompatible
- (cap_rcappage => cap_cappage and cap_rpage => cap_page), bails. If
+ (vg_cap_rcappage => vg_cap_cappage and vg_cap_rpage => vg_cap_page), bails. If
TYPE is -1, then any type is acceptable. May cause paging. If
non-NULL, returns whether the object is writable in *WRITABLE.
This function locks (and unlocks) as_lock. */
-static inline struct cap
+static inline struct vg_cap
as_object_lookup_rel (activity_t activity,
- struct cap *root, addr_t addr,
- enum cap_type type, bool *writable)
+ struct vg_cap *root, vg_addr_t addr,
+ enum vg_cap_type type, bool *writable)
{
union as_lookup_ret ret_val;
@@ -621,15 +621,15 @@ as_object_lookup_rel (activity_t activity,
as_unlock ();
if (! ret)
- return (struct cap) { .type = cap_void };
+ return (struct vg_cap) { .type = vg_cap_void };
return ret_val.cap;
}
#ifndef RM_INTERN
-static inline struct cap
-as_object_lookup (addr_t addr, enum cap_type type, bool *writable)
+static inline struct vg_cap
+as_object_lookup (vg_addr_t addr, enum vg_cap_type type, bool *writable)
{
return as_object_lookup_rel (meta_data_activity,
&shadow_root, addr, -1, writable);
@@ -638,11 +638,11 @@ as_object_lookup (addr_t addr, enum cap_type type, bool *writable)
/* Print the path taken to get to the slot at address ADDRESS. */
extern void as_dump_path_rel (activity_t activity,
- struct cap *root, addr_t addr);
+ struct vg_cap *root, vg_addr_t addr);
#ifndef RM_INTERN
static inline void
-as_dump_path (addr_t addr)
+as_dump_path (vg_addr_t addr)
{
as_dump_path_rel (meta_data_activity, &shadow_root, addr);
}
@@ -655,16 +655,16 @@ as_dump_path (addr_t addr)
properties. WRITABLE is whether the slot is writable. If VISIT
returns a non-zero value, the walk is aborted and that value is
returned. If the walk is not aborted, 0 is returned. */
-extern int as_walk (int (*visit) (addr_t cap,
+extern int as_walk (int (*visit) (vg_addr_t cap,
uintptr_t type,
- struct cap_properties properties,
+ struct vg_cap_properties properties,
bool writable,
void *cookie),
int types,
void *cookie);
/* AS_LOCK must not be held. */
-extern void as_dump_from (activity_t activity, struct cap *root,
+extern void as_dump_from (activity_t activity, struct vg_cap *root,
const char *prefix);
#ifndef RM_INTERN
diff --git a/libhurd-mm/capalloc.c b/libhurd-mm/capalloc.c
index 998b123..56715e4 100644
--- a/libhurd-mm/capalloc.c
+++ b/libhurd-mm/capalloc.c
@@ -36,10 +36,10 @@
struct cappage_desc
{
- addr_t cappage;
- struct cap *cap;
+ vg_addr_t cappage;
+ struct vg_cap *cap;
- unsigned char alloced[CAPPAGE_SLOTS / 8];
+ unsigned char alloced[VG_CAPPAGE_SLOTS / 8];
unsigned short free;
pthread_mutex_t lock;
@@ -71,15 +71,15 @@ list_unlink (struct cappage_desc *e)
}
static int
-addr_compare (const addr_t *a, const addr_t *b)
+addr_compare (const vg_addr_t *a, const vg_addr_t *b)
{
- if (addr_prefix (*a) < addr_prefix (*b))
+ if (vg_addr_prefix (*a) < vg_addr_prefix (*b))
return -1;
- return addr_prefix (*a) != addr_prefix (*b);
+ return vg_addr_prefix (*a) != vg_addr_prefix (*b);
}
BTREE_CLASS (cappage_desc, struct cappage_desc,
- addr_t, cappage, node, addr_compare, false)
+ vg_addr_t, cappage, node, addr_compare, false)
static pthread_mutex_t cappage_descs_lock = PTHREAD_MUTEX_INITIALIZER;
static hurd_btree_cappage_desc_t cappage_descs;
@@ -91,11 +91,11 @@ cappage_desc_slab_alloc (void *hook, size_t size, void **ptr)
assert (size == PAGESIZE);
struct storage storage = storage_alloc (meta_data_activity,
- cap_page, STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ vg_cap_page, STORAGE_LONG_LIVED,
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Out of storage");
- *ptr = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ *ptr = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr, 0, PAGESIZE_LOG2));
return 0;
}
@@ -105,7 +105,7 @@ cappage_desc_slab_dealloc (void *hook, void *buffer, size_t size)
{
assert (size == PAGESIZE);
- addr_t addr = addr_chop (PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
+ vg_addr_t addr = vg_addr_chop (VG_PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
storage_free (addr, false);
return 0;
@@ -135,7 +135,7 @@ cappage_desc_free (struct cappage_desc *storage)
struct cappage_desc *nonempty;
-addr_t
+vg_addr_t
capalloc (void)
{
/* Find an appropriate storage area. */
@@ -170,12 +170,12 @@ capalloc (void)
/* As there is such a large number of caps per cappage, we
expect that the page will be long lived. */
struct storage storage = storage_alloc (meta_data_activity,
- cap_cappage, STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ vg_cap_cappage, STORAGE_LONG_LIVED,
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
{
cappage_desc_free (area);
- return ADDR_VOID;
+ return VG_ADDR_VOID;
}
area->lock = (pthread_mutex_t) PTHREAD_MUTEX_INITIALIZER;
@@ -186,29 +186,29 @@ capalloc (void)
/* Then, allocate the shadow object. */
struct storage shadow_storage
- = storage_alloc (meta_data_activity, cap_page,
- STORAGE_LONG_LIVED, OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (shadow_storage.addr))
+ = storage_alloc (meta_data_activity, vg_cap_page,
+ STORAGE_LONG_LIVED, VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (shadow_storage.addr))
{
/* No memory. */
storage_free (area->cappage, false);
cappage_desc_free (area);
- return ADDR_VOID;
+ return VG_ADDR_VOID;
}
- struct object *shadow = ADDR_TO_PTR (addr_extend (shadow_storage.addr,
+ struct object *shadow = VG_ADDR_TO_PTR (vg_addr_extend (shadow_storage.addr,
0, PAGESIZE_LOG2));
memset (shadow, 0, PAGESIZE);
- cap_set_shadow (area->cap, shadow);
+ vg_cap_set_shadow (area->cap, shadow);
memset (&area->alloced, 0, sizeof (area->alloced));
- area->free = CAPPAGE_SLOTS;
+ area->free = VG_CAPPAGE_SLOTS;
}
int idx = bit_alloc (area->alloced, sizeof (area->alloced), 0);
assert (idx != -1);
- addr_t addr = addr_extend (area->cappage, idx, CAPPAGE_SLOTS_LOG2);
+ vg_addr_t addr = vg_addr_extend (area->cappage, idx, VG_CAPPAGE_SLOTS_LOG2);
area->free --;
if (area->free == 0)
@@ -241,9 +241,9 @@ capalloc (void)
}
void
-capfree (addr_t cap)
+capfree (vg_addr_t cap)
{
- addr_t cappage = addr_chop (cap, CAPPAGE_SLOTS_LOG2);
+ vg_addr_t cappage = vg_addr_chop (cap, VG_CAPPAGE_SLOTS_LOG2);
struct cappage_desc *desc;
@@ -252,14 +252,14 @@ capfree (addr_t cap)
assert (desc);
pthread_mutex_lock (&desc->lock);
- bit_dealloc (desc->alloced, addr_extract (cap, CAPPAGE_SLOTS_LOG2));
+ bit_dealloc (desc->alloced, vg_addr_extract (cap, VG_CAPPAGE_SLOTS_LOG2));
desc->free ++;
if (desc->free == 1)
/* The cappage is no longer full. Add it back to the list of
nonempty cappages. */
list_link (&nonempty, desc);
- else if (desc->free == CAPPAGE_SLOTS)
+ else if (desc->free == VG_CAPPAGE_SLOTS)
/* No slots in the cappage are allocated. Free it if there is at
least one cappage on NONEMPTY. */
{
@@ -270,12 +270,12 @@ capfree (addr_t cap)
list_unlink (desc);
pthread_mutex_unlock (&cappage_descs_lock);
- struct object *shadow = cap_get_shadow (desc->cap);
- storage_free (addr_chop (PTR_TO_ADDR (shadow), PAGESIZE_LOG2),
+ struct object *shadow = vg_cap_get_shadow (desc->cap);
+ storage_free (vg_addr_chop (VG_PTR_TO_ADDR (shadow), PAGESIZE_LOG2),
false);
- cap_set_shadow (desc->cap, NULL);
+ vg_cap_set_shadow (desc->cap, NULL);
- desc->cap->type = cap_void;
+ desc->cap->type = vg_cap_void;
cappage_desc_free (desc);
diff --git a/libhurd-mm/capalloc.h b/libhurd-mm/capalloc.h
index 5583f3f..f71e032 100644
--- a/libhurd-mm/capalloc.h
+++ b/libhurd-mm/capalloc.h
@@ -25,9 +25,9 @@
#include <viengoos/addr.h>
/* Allocate a capability slot. */
-extern addr_t capalloc (void);
+extern vg_addr_t capalloc (void);
/* Free a capability previously allocated by capalloc. */
-extern void capfree (addr_t cap);
+extern void capfree (vg_addr_t vg_cap);
#endif /* _HURD_CAP_ALLOC_H */
diff --git a/libhurd-mm/exceptions.c b/libhurd-mm/exceptions.c
index 73d9a9f..42dfacb 100644
--- a/libhurd-mm/exceptions.c
+++ b/libhurd-mm/exceptions.c
@@ -141,8 +141,8 @@ hurd_activation_fetch (void)
/* Any reply will come in the form of a pending activation being
delivered. This RPC does not generate a response. */
- error_t err = rm_thread_activation_collect_send (ADDR_VOID, ADDR_VOID,
- ADDR_VOID);
+ error_t err = rm_thread_activation_collect_send (VG_ADDR_VOID, VG_ADDR_VOID,
+ VG_ADDR_VOID);
if (err)
panic ("Sending thread_activation_collect failed: %d", err);
}
@@ -199,9 +199,9 @@ activation_frame_slab_alloc (void *hook, size_t size, void **ptr)
assert (size == PAGESIZE);
struct storage storage = storage_alloc (meta_data_activity,
- cap_page, STORAGE_EPHEMERAL,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- *ptr = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ vg_cap_page, STORAGE_EPHEMERAL,
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ *ptr = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr, 0, PAGESIZE_LOG2));
return 0;
}
@@ -211,7 +211,7 @@ activation_frame_slab_dealloc (void *hook, void *buffer, size_t size)
{
assert (size == PAGESIZE);
- addr_t addr = addr_chop (PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
+ vg_addr_t addr = vg_addr_chop (VG_PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
storage_free (addr, false);
return 0;
@@ -342,10 +342,10 @@ hurd_activation_handler_normal (struct activation_frame *activation_frame,
{
case ACTIVATION_fault:
{
- addr_t fault;
+ vg_addr_t fault;
uintptr_t ip;
uintptr_t sp;
- struct activation_fault_info info;
+ struct vg_activation_fault_info info;
error_t err;
err = activation_fault_send_unmarshal (mb->reply,
@@ -354,10 +354,10 @@ hurd_activation_handler_normal (struct activation_frame *activation_frame,
if (err)
panic ("Failed to unmarshal exception: %d", err);
- debug (5, "Fault at " ADDR_FMT " (ip: %p, sp: %p, eax: %p, "
+ debug (5, "Fault at " VG_ADDR_FMT " (ip: %p, sp: %p, eax: %p, "
"ebx: %p, ecx: %p, edx: %p, edi: %p, esi: %p, ebp: %p, "
"eflags: %p)",
- ADDR_PRINTF (fault),
+ VG_ADDR_PRINTF (fault),
(void *) ip, (void *) sp,
(void *) activation_frame->eax,
(void *) activation_frame->ebx,
@@ -375,7 +375,7 @@ hurd_activation_handler_normal (struct activation_frame *activation_frame,
r = map_fault (fault, ip, info);
if (! r)
{
- uintptr_t f = (uintptr_t) ADDR_TO_PTR (fault);
+ uintptr_t f = (uintptr_t) VG_ADDR_TO_PTR (fault);
struct hurd_fault_catcher *catcher;
for (catcher = utcb->catchers; catcher; catcher = catcher->next)
{
@@ -404,10 +404,10 @@ hurd_activation_handler_normal (struct activation_frame *activation_frame,
if (as_rwlock_owner == l4_myself ())
debug (0, "I hold as_rwlock!");
- debug (0, "SIGSEGV at " ADDR_FMT " "
+ debug (0, "SIGSEGV at " VG_ADDR_FMT " "
"(ip: %p, sp: %p, eax: %p, ebx: %p, ecx: %p, "
"edx: %p, edi: %p, esi: %p, ebp: %p, eflags: %p)",
- ADDR_PRINTF (fault),
+ VG_ADDR_PRINTF (fault),
(void *) ip, (void *) sp,
(void *) activation_frame->eax,
(void *) activation_frame->ebx,
@@ -423,7 +423,7 @@ hurd_activation_handler_normal (struct activation_frame *activation_frame,
siginfo_t si;
memset (&si, 0, sizeof (si));
si.si_signo = SIGSEGV;
- si.si_addr = ADDR_TO_PTR (fault);
+ si.si_addr = VG_ADDR_TO_PTR (fault);
/* XXX: Should set si.si_code to SEGV_MAPERR or
SEGV_ACCERR. */
@@ -545,10 +545,10 @@ hurd_activation_handler_activated (struct hurd_utcb *utcb)
{
case ACTIVATION_fault:
{
- addr_t fault;
+ vg_addr_t fault;
uintptr_t ip;
uintptr_t sp;
- struct activation_fault_info info;
+ struct vg_activation_fault_info info;
error_t err;
err = activation_fault_send_unmarshal (mb->reply,
@@ -557,10 +557,10 @@ hurd_activation_handler_activated (struct hurd_utcb *utcb)
if (err)
panic ("Failed to unmarshal exception: %d", err);
- debug (4, "Fault at " ADDR_FMT "(ip: %x, sp: %x).",
- ADDR_PRINTF (fault), ip, sp);
+ debug (4, "Fault at " VG_ADDR_FMT "(ip: %x, sp: %x).",
+ VG_ADDR_PRINTF (fault), ip, sp);
- uintptr_t f = (uintptr_t) ADDR_TO_PTR (fault);
+ uintptr_t f = (uintptr_t) VG_ADDR_TO_PTR (fault);
uintptr_t stack_page = (sp & ~(PAGESIZE - 1));
uintptr_t fault_page = (f & ~(PAGESIZE - 1));
if (stack_page == fault_page
@@ -569,8 +569,8 @@ hurd_activation_handler_activated (struct hurd_utcb *utcb)
the following page. It is likely a stack fault.
Handle it using the alternate stack. */
{
- debug (5, "Stack fault at " ADDR_FMT "(ip: %x, sp: %x).",
- ADDR_PRINTF (fault), ip, sp);
+ debug (5, "Stack fault at " VG_ADDR_FMT "(ip: %x, sp: %x).",
+ VG_ADDR_PRINTF (fault), ip, sp);
assert (! utcb->alternate_stack_inuse);
utcb->alternate_stack_inuse = true;
@@ -580,9 +580,9 @@ hurd_activation_handler_activated (struct hurd_utcb *utcb)
activation_frame->normal_mode_stack = utcb->alternate_stack;
}
- debug (5, "Handling fault at " ADDR_FMT " in normal mode "
+ debug (5, "Handling fault at " VG_ADDR_FMT " in normal mode "
"(ip: %x, sp: %x).",
- ADDR_PRINTF (fault), ip, sp);
+ VG_ADDR_PRINTF (fault), ip, sp);
break;
}
@@ -594,9 +594,9 @@ hurd_activation_handler_activated (struct hurd_utcb *utcb)
/* Unblock the exception handler messenger. */
error_t err = vg_ipc (VG_IPC_RECEIVE | VG_IPC_RECEIVE_ACTIVATE
| VG_IPC_RETURN,
- ADDR_VOID, utcb->exception_buffer->receiver,
- ADDR_VOID,
- ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID);
+ VG_ADDR_VOID, utcb->exception_buffer->receiver,
+ VG_ADDR_VOID,
+ VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID);
assert (! err);
}
else if (mb->just_free)
@@ -685,8 +685,8 @@ hurd_activation_handler_init_early (void)
struct vg_message *msg = (void *) &activation_handler_msg[0];
rm_thread_exregs_send_marshal (msg, HURD_EXREGS_SET_UTCB, in,
- ADDR_VOID, ADDR_VOID,
- PTR_TO_PAGE (utcb), ADDR_VOID,
+ VG_ADDR_VOID, VG_ADDR_VOID,
+ VG_PTR_TO_PAGE (utcb), VG_ADDR_VOID,
__hurd_startup_data->messengers[1]);
error_t err;
@@ -696,11 +696,11 @@ hurd_activation_handler_init_early (void)
| VG_IPC_RECEIVE_INLINE
| VG_IPC_SEND_SET_THREAD_TO_CALLER
| VG_IPC_SEND_SET_ASROOT_TO_CALLERS,
- ADDR_VOID,
- __hurd_startup_data->messengers[1], ADDR_VOID, ADDR_VOID,
- ADDR_VOID, __hurd_startup_data->thread,
- __hurd_startup_data->messengers[0], PTR_TO_PAGE (msg),
- 0, 0, ADDR_VOID);
+ VG_ADDR_VOID,
+ __hurd_startup_data->messengers[1], VG_ADDR_VOID, VG_ADDR_VOID,
+ VG_ADDR_VOID, __hurd_startup_data->thread,
+ __hurd_startup_data->messengers[0], VG_PTR_TO_PAGE (msg),
+ 0, 0, VG_ADDR_VOID);
if (err)
panic ("Failed to send IPC: %d", err);
if (utcb->vg.inline_words[0])
@@ -730,14 +730,14 @@ hurd_activation_handler_init (void)
#define ACTIVATION_AREA_SIZE (1 << ACTIVATION_AREA_SIZE_LOG2)
error_t
-hurd_activation_state_alloc (addr_t thread, struct hurd_utcb **utcbp)
+hurd_activation_state_alloc (vg_addr_t thread, struct hurd_utcb **utcbp)
{
- debug (5, DEBUG_BOLD ("allocating activation state for " ADDR_FMT),
- ADDR_PRINTF (thread));
+ debug (5, DEBUG_BOLD ("allocating activation state for " VG_ADDR_FMT),
+ VG_ADDR_PRINTF (thread));
- addr_t activation_area = as_alloc (ACTIVATION_AREA_SIZE_LOG2, 1, true);
+ vg_addr_t activation_area = as_alloc (ACTIVATION_AREA_SIZE_LOG2, 1, true);
void *activation_area_base
- = ADDR_TO_PTR (addr_extend (activation_area,
+ = VG_ADDR_TO_PTR (vg_addr_extend (activation_area,
0, ACTIVATION_AREA_SIZE_LOG2));
debug (0, "Activation area: %p-%p",
@@ -746,22 +746,22 @@ hurd_activation_state_alloc (addr_t thread, struct hurd_utcb **utcbp)
int page_count = 0;
/* Be careful! We assume that pages is properly set up after at
most 2 allocations! */
- addr_t pages_[2];
- addr_t *pages = pages_;
+ vg_addr_t pages_[2];
+ vg_addr_t *pages = pages_;
void alloc (void *addr)
{
- addr_t slot = addr_chop (PTR_TO_ADDR (addr), PAGESIZE_LOG2);
+ vg_addr_t slot = vg_addr_chop (VG_PTR_TO_ADDR (addr), PAGESIZE_LOG2);
as_ensure (slot);
struct storage storage;
- storage = storage_alloc (ADDR_VOID, cap_page,
+ storage = storage_alloc (VG_ADDR_VOID, vg_cap_page,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT,
+ VG_OBJECT_POLICY_DEFAULT,
slot);
- if (ADDR_IS_VOID (storage.addr))
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Failed to allocate page for exception state");
if (pages == pages_)
@@ -804,12 +804,12 @@ hurd_activation_state_alloc (addr_t thread, struct hurd_utcb **utcbp)
/* At the top of the stack page, we use some space to remember the
storage we allocate so that we can free it later. */
utcb->vg.activation_handler_sp
- -= sizeof (addr_t) * ACTIVATION_AREA_SIZE / PAGESIZE;
+ -= sizeof (vg_addr_t) * ACTIVATION_AREA_SIZE / PAGESIZE;
memset ((void *) utcb->vg.activation_handler_sp, 0,
- sizeof (addr_t) * ACTIVATION_AREA_SIZE / PAGESIZE);
+ sizeof (vg_addr_t) * ACTIVATION_AREA_SIZE / PAGESIZE);
memcpy ((void *) utcb->vg.activation_handler_sp, pages,
- sizeof (addr_t) * page_count);
- pages = (addr_t *) utcb->vg.activation_handler_sp;
+ sizeof (vg_addr_t) * page_count);
+ pages = (vg_addr_t *) utcb->vg.activation_handler_sp;
/* The word beyond the base of the stack is a pointer to the
exception page. */
@@ -848,8 +848,8 @@ hurd_activation_state_alloc (addr_t thread, struct hurd_utcb **utcbp)
/* Unblock the exception handler messenger. */
error_t err = vg_ipc (VG_IPC_RECEIVE | VG_IPC_RECEIVE_ACTIVATE
| VG_IPC_RETURN,
- ADDR_VOID, utcb->exception_buffer->receiver, ADDR_VOID,
- ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID);
+ VG_ADDR_VOID, utcb->exception_buffer->receiver, VG_ADDR_VOID,
+ VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID);
assert (! err);
@@ -858,20 +858,20 @@ hurd_activation_state_alloc (addr_t thread, struct hurd_utcb **utcbp)
struct hurd_thread_exregs_in in;
struct hurd_thread_exregs_out out;
- err = rm_thread_exregs (ADDR_VOID, thread,
+ err = rm_thread_exregs (VG_ADDR_VOID, thread,
HURD_EXREGS_SET_UTCB
| HURD_EXREGS_SET_EXCEPTION_MESSENGER,
- in, ADDR_VOID, ADDR_VOID,
- PTR_TO_PAGE (utcb), utcb->exception_buffer->receiver,
+ in, VG_ADDR_VOID, VG_ADDR_VOID,
+ VG_PTR_TO_PAGE (utcb), utcb->exception_buffer->receiver,
&out, NULL, NULL, NULL, NULL);
if (err)
panic ("Failed to install utcb");
- err = rm_cap_copy (ADDR_VOID,
+ err = rm_cap_copy (VG_ADDR_VOID,
utcb->exception_buffer->receiver,
- ADDR (VG_MESSENGER_THREAD_SLOT, VG_MESSENGER_SLOTS_LOG2),
- ADDR_VOID, thread,
- 0, CAP_PROPERTIES_DEFAULT);
+ VG_ADDR (VG_MESSENGER_THREAD_SLOT, VG_MESSENGER_SLOTS_LOG2),
+ VG_ADDR_VOID, thread,
+ 0, VG_CAP_PROPERTIES_DEFAULT);
if (err)
panic ("Failed to set messenger's thread");
@@ -901,20 +901,20 @@ hurd_activation_state_free (struct hurd_utcb *utcb)
/* Free the allocated storage. */
/* Copy the array as we're going to free the storage that it is
in. */
- addr_t pages[ACTIVATION_AREA_SIZE / PAGESIZE];
+ vg_addr_t pages[ACTIVATION_AREA_SIZE / PAGESIZE];
memcpy (pages,
(void *) utcb->vg.activation_handler_sp + sizeof (uintptr_t),
- sizeof (addr_t) * ACTIVATION_AREA_SIZE / PAGESIZE);
+ sizeof (vg_addr_t) * ACTIVATION_AREA_SIZE / PAGESIZE);
int i;
for (i = 0; i < sizeof (pages) / sizeof (pages[0]); i ++)
- if (! ADDR_IS_VOID (pages[i]))
+ if (! VG_ADDR_IS_VOID (pages[i]))
storage_free (pages[i], false);
/* Finally, free the address space. */
int page = SKIP;
void *activation_area_base = (void *) utcb - page * PAGESIZE;
- as_free (addr_chop (PTR_TO_ADDR (activation_area_base),
+ as_free (vg_addr_chop (VG_PTR_TO_ADDR (activation_area_base),
ACTIVATION_AREA_SIZE_LOG2),
false);
}
diff --git a/libhurd-mm/map.c b/libhurd-mm/map.c
index 001b97d..2147d6b 100644
--- a/libhurd-mm/map.c
+++ b/libhurd-mm/map.c
@@ -35,12 +35,12 @@
static error_t
slab_alloc (void *hook, size_t size, void **ptr)
{
- struct storage storage = storage_alloc (meta_data_activity, cap_page,
+ struct storage storage = storage_alloc (meta_data_activity, vg_cap_page,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Out of space.");
- *ptr = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ *ptr = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr, 0, PAGESIZE_LOG2));
return 0;
}
@@ -50,7 +50,7 @@ slab_dealloc (void *hook, void *buffer, size_t size)
{
assert (size == PAGESIZE);
- addr_t addr = addr_chop (PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
+ vg_addr_t addr = vg_addr_chop (VG_PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
storage_free (addr, false);
return 0;
@@ -314,15 +314,15 @@ map_join (struct map *first, struct map *second)
}
bool
-map_fault (addr_t fault_addr, uintptr_t ip, struct activation_fault_info info)
+map_fault (vg_addr_t fault_addr, uintptr_t ip, struct vg_activation_fault_info info)
{
/* Find the map. */
struct region region;
- if (addr_depth (fault_addr) == ADDR_BITS - PAGESIZE_LOG2)
- fault_addr = addr_extend (fault_addr, 0, PAGESIZE_LOG2);
+ if (vg_addr_depth (fault_addr) == VG_ADDR_BITS - PAGESIZE_LOG2)
+ fault_addr = vg_addr_extend (fault_addr, 0, PAGESIZE_LOG2);
- region.start = (uintptr_t) ADDR_TO_PTR (fault_addr);
+ region.start = (uintptr_t) VG_ADDR_TO_PTR (fault_addr);
region.length = 1;
maps_lock_lock ();
@@ -332,9 +332,9 @@ map_fault (addr_t fault_addr, uintptr_t ip, struct activation_fault_info info)
{
do_debug (5)
{
- debug (0, "No map covers " ADDR_FMT "(" ACTIVATION_FAULT_INFO_FMT ")",
- ADDR_PRINTF (fault_addr),
- ACTIVATION_FAULT_INFO_PRINTF (info));
+ debug (0, "No map covers " VG_ADDR_FMT "(" VG_ACTIVATION_FAULT_INFO_FMT ")",
+ VG_ADDR_PRINTF (fault_addr),
+ VG_ACTIVATION_FAULT_INFO_PRINTF (info));
for (map = hurd_btree_map_first (&maps);
map;
map = hurd_btree_map_next (map))
@@ -349,9 +349,9 @@ map_fault (addr_t fault_addr, uintptr_t ip, struct activation_fault_info info)
if (((info.access & L4_FPAGE_WRITABLE) && ! (map->access & MAP_ACCESS_WRITE))
|| ! map->access)
{
- debug (0, "Invalid %s access at " ADDR_FMT ": " MAP_FMT,
+ debug (0, "Invalid %s access at " VG_ADDR_FMT ": " MAP_FMT,
info.access & L4_FPAGE_WRITABLE ? "write" : "read",
- ADDR_PRINTF (fault_addr), MAP_PRINTF (map));
+ VG_ADDR_PRINTF (fault_addr), MAP_PRINTF (map));
maps_lock_unlock ();
return false;
@@ -365,10 +365,10 @@ map_fault (addr_t fault_addr, uintptr_t ip, struct activation_fault_info info)
/* Propagate the fault. */
bool r = pager->fault (pager, offset, 1, ro,
- (uintptr_t) ADDR_TO_PTR (fault_addr), ip, info);
+ (uintptr_t) VG_ADDR_TO_PTR (fault_addr), ip, info);
if (! r)
- debug (5, "Map did not resolve fault at " ADDR_FMT,
- ADDR_PRINTF (fault_addr));
+ debug (5, "Map did not resolve fault at " VG_ADDR_FMT,
+ VG_ADDR_PRINTF (fault_addr));
return r;
}
diff --git a/libhurd-mm/map.h b/libhurd-mm/map.h
index 206b392..d9aedef 100644
--- a/libhurd-mm/map.h
+++ b/libhurd-mm/map.h
@@ -228,9 +228,9 @@ extern struct map *map_split (struct map *map, uintptr_t offset);
This function takes and releases MAP->PAGER->LOCK. */
extern bool map_join (struct map *first, struct map *second);
-/* Raise a fault at address ADDR. Returns true if the fault was
+/* Raise a fault at address VG_ADDR. Returns true if the fault was
handled, false otherwise. */
-extern bool map_fault (addr_t addr,
- uintptr_t ip, struct activation_fault_info info);
+extern bool map_fault (vg_addr_t addr,
+ uintptr_t ip, struct vg_activation_fault_info info);
#endif
diff --git a/libhurd-mm/message-buffer.c b/libhurd-mm/message-buffer.c
index c1326ab..dfa87c8 100644
--- a/libhurd-mm/message-buffer.c
+++ b/libhurd-mm/message-buffer.c
@@ -47,12 +47,12 @@ slab_alloc (void *hook, size_t size, void **ptr)
return 0;
}
- struct storage storage = storage_alloc (meta_data_activity, cap_page,
+ struct storage storage = storage_alloc (meta_data_activity, vg_cap_page,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Out of space.");
- *ptr = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ *ptr = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr, 0, PAGESIZE_LOG2));
return 0;
}
@@ -62,7 +62,7 @@ slab_dealloc (void *hook, void *buffer, size_t size)
{
assert (size == PAGESIZE);
- addr_t addr = addr_chop (PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
+ vg_addr_t addr = vg_addr_chop (VG_PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
storage_free (addr, false);
return 0;
@@ -91,10 +91,10 @@ slab_destructor (void *hook, void *object)
}
storage_free (mb->sender, false);
- storage_free (addr_chop (PTR_TO_ADDR (mb->request), PAGESIZE_LOG2),
+ storage_free (vg_addr_chop (VG_PTR_TO_ADDR (mb->request), PAGESIZE_LOG2),
false);
storage_free (mb->receiver, false);
- storage_free (addr_chop (PTR_TO_ADDR (mb->reply), PAGESIZE_LOG2),
+ storage_free (vg_addr_chop (VG_PTR_TO_ADDR (mb->reply), PAGESIZE_LOG2),
false);
}
@@ -129,10 +129,10 @@ hurd_message_buffer_alloc_hard (void)
mb->sender = __hurd_startup_data->messengers[initial_messenger ++];
else
{
- storage = storage_alloc (meta_data_activity, cap_messenger,
+ storage = storage_alloc (meta_data_activity, vg_cap_messenger,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Out of space.");
mb->sender = storage.addr;
@@ -143,10 +143,10 @@ hurd_message_buffer_alloc_hard (void)
mb->receiver_strong = __hurd_startup_data->messengers[initial_messenger ++];
else
{
- storage = storage_alloc (meta_data_activity, cap_messenger,
+ storage = storage_alloc (meta_data_activity, vg_cap_messenger,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Out of space.");
mb->receiver_strong = storage.addr;
@@ -155,17 +155,17 @@ hurd_message_buffer_alloc_hard (void)
/* Weaken it. */
#if 0
mb->receiver = capalloc ();
- struct cap receiver_cap = as_cap_lookup (mb->receiver_strong, cap_messenger,
+ struct vg_cap receiver_cap = as_cap_lookup (mb->receiver_strong, vg_cap_messenger,
NULL);
- assert (receiver_cap.type == cap_messenger);
+ assert (receiver_cap.type == vg_cap_messenger);
as_slot_lookup_use
(mb->receiver,
({
- bool ret = cap_copy_x (ADDR_VOID,
- ADDR_VOID, slot, mb->receiver,
- ADDR_VOID, receiver_cap, mb->receiver_strong,
- CAP_COPY_WEAKEN,
- CAP_PROPERTIES_VOID);
+ bool ret = vg_cap_copy_x (VG_ADDR_VOID,
+ VG_ADDR_VOID, slot, mb->receiver,
+ VG_ADDR_VOID, receiver_cap, mb->receiver_strong,
+ VG_CAP_COPY_WEAKEN,
+ VG_CAP_PROPERTIES_VOID);
assert (ret);
}));
#endif
@@ -176,13 +176,13 @@ hurd_message_buffer_alloc_hard (void)
mb->request = (void *) &initial_pages[initial_page ++][0];
else
{
- storage = storage_alloc (meta_data_activity, cap_page,
+ storage = storage_alloc (meta_data_activity, vg_cap_page,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Out of space.");
- mb->request = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ mb->request = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr, 0, PAGESIZE_LOG2));
}
/* And the receive buffer. */
@@ -190,13 +190,13 @@ hurd_message_buffer_alloc_hard (void)
mb->reply = (void *) &initial_pages[initial_page ++][0];
else
{
- storage = storage_alloc (meta_data_activity, cap_page,
+ storage = storage_alloc (meta_data_activity, vg_cap_page,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Out of space.");
- mb->reply = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ mb->reply = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr, 0, PAGESIZE_LOG2));
}
@@ -213,11 +213,11 @@ hurd_message_buffer_alloc_hard (void)
err = vg_ipc_full (VG_IPC_RECEIVE | VG_IPC_SEND | VG_IPC_RECEIVE_ACTIVATE
| VG_IPC_RECEIVE_SET_THREAD_TO_CALLER
| VG_IPC_SEND_SET_THREAD_TO_CALLER,
- ADDR_VOID, mb->receiver, PTR_TO_PAGE (mb->reply),
- ADDR_VOID,
- ADDR_VOID, mb->receiver,
- mb->sender, PTR_TO_PAGE (mb->request),
- 0, 0, ADDR_VOID);
+ VG_ADDR_VOID, mb->receiver, VG_PTR_TO_PAGE (mb->reply),
+ VG_ADDR_VOID,
+ VG_ADDR_VOID, mb->receiver,
+ mb->sender, VG_PTR_TO_PAGE (mb->request),
+ 0, 0, VG_ADDR_VOID);
if (err)
panic ("Failed to set receiver's id");
@@ -227,11 +227,11 @@ hurd_message_buffer_alloc_hard (void)
hurd_activation_message_register (mb);
err = vg_ipc_full (VG_IPC_RECEIVE | VG_IPC_SEND | VG_IPC_RECEIVE_ACTIVATE,
- ADDR_VOID, mb->receiver, PTR_TO_PAGE (mb->reply),
- ADDR_VOID,
- ADDR_VOID, mb->sender,
- mb->sender, PTR_TO_PAGE (mb->request),
- 0, 0, ADDR_VOID);
+ VG_ADDR_VOID, mb->receiver, VG_PTR_TO_PAGE (mb->reply),
+ VG_ADDR_VOID,
+ VG_ADDR_VOID, mb->sender,
+ mb->sender, VG_PTR_TO_PAGE (mb->request),
+ 0, 0, VG_ADDR_VOID);
if (err)
panic ("Failed to set sender's id");
diff --git a/libhurd-mm/message-buffer.h b/libhurd-mm/message-buffer.h
index 0c35b27..37eb2a0 100644
--- a/libhurd-mm/message-buffer.h
+++ b/libhurd-mm/message-buffer.h
@@ -38,13 +38,13 @@ struct hurd_message_buffer
/* A messenger associated REQUEST. The messenger's identifier is
set to the data structure's address. */
- addr_t sender;
+ vg_addr_t sender;
struct vg_message *request;
/* A messenger associated with REPLY. The messenger's identifier is
set to the data structure's address. */
- addr_t receiver_strong;
+ vg_addr_t receiver_strong;
/* A weakened version. */
- addr_t receiver;
+ vg_addr_t receiver;
struct vg_message *reply;
/* If not NULL, then this routine is called. */
diff --git a/libhurd-mm/mm-init.c b/libhurd-mm/mm-init.c
index caf362e..896b69f 100644
--- a/libhurd-mm/mm-init.c
+++ b/libhurd-mm/mm-init.c
@@ -38,19 +38,19 @@
extern struct hurd_startup_data *__hurd_startup_data;
-addr_t meta_data_activity;
+vg_addr_t meta_data_activity;
int mm_init_done;
void
-mm_init (addr_t activity)
+mm_init (vg_addr_t activity)
{
assert (! mm_init_done);
extern int output_debug;
output_debug = 1;
- if (ADDR_IS_VOID (activity))
+ if (VG_ADDR_IS_VOID (activity))
meta_data_activity = __hurd_startup_data->activity;
else
meta_data_activity = activity;
@@ -74,8 +74,8 @@ mm_init (addr_t activity)
#ifdef i386
void test (int nesting)
{
- addr_t addr = as_alloc (PAGESIZE_LOG2, 1, true);
- void *a = ADDR_TO_PTR (addr_extend (addr, 0, PAGESIZE_LOG2));
+ vg_addr_t addr = as_alloc (PAGESIZE_LOG2, 1, true);
+ void *a = VG_ADDR_TO_PTR (vg_addr_extend (addr, 0, PAGESIZE_LOG2));
int recursed = false;
@@ -83,7 +83,7 @@ mm_init (addr_t activity)
bool fault (struct pager *pager,
uintptr_t offset, int count, bool ro,
uintptr_t fault_addr, uintptr_t ip,
- struct activation_fault_info info)
+ struct vg_activation_fault_info info)
{
assert (a == (void *) (fault_addr & ~(PAGESIZE - 1)));
assert (count == 1);
@@ -115,9 +115,9 @@ mm_init (addr_t activity)
/* We cannot easily check esp and eip here. */
as_ensure (addr);
- storage = storage_alloc (ADDR_VOID,
- cap_page, STORAGE_UNKNOWN,
- OBJECT_POLICY_DEFAULT,
+ storage = storage_alloc (VG_ADDR_VOID,
+ vg_cap_page, STORAGE_UNKNOWN,
+ VG_OBJECT_POLICY_DEFAULT,
addr);
if (nesting > 1 && ! recursed)
diff --git a/libhurd-mm/mm.h b/libhurd-mm/mm.h
index 8baace8..2ba0262 100644
--- a/libhurd-mm/mm.h
+++ b/libhurd-mm/mm.h
@@ -29,6 +29,6 @@ extern int mm_init_done;
/* Initialize the memory management sub-system. ACTIVITY is the
activity to use to account meta-data resources. */
-extern void mm_init (addr_t activity);
+extern void mm_init (vg_addr_t activity);
#endif /* HURD_MM_MM_H */
diff --git a/libhurd-mm/mmap.c b/libhurd-mm/mmap.c
index bda87cf..425f6b4 100644
--- a/libhurd-mm/mmap.c
+++ b/libhurd-mm/mmap.c
@@ -77,8 +77,8 @@ mmap (void *addr, size_t length, int protect, int flags,
debug (5, "Trying to allocate memory %p-%p", addr, addr + length);
struct anonymous_pager *pager;
- pager = anonymous_pager_alloc (ADDR_VOID, addr, length, access,
- OBJECT_POLICY_DEFAULT,
+ pager = anonymous_pager_alloc (VG_ADDR_VOID, addr, length, access,
+ VG_OBJECT_POLICY_DEFAULT,
(flags & MAP_FIXED) ? ANONYMOUS_FIXED: 0,
NULL, &addr);
if (! pager)
diff --git a/libhurd-mm/mprotect.c b/libhurd-mm/mprotect.c
index e80d08c..5c70427 100644
--- a/libhurd-mm/mprotect.c
+++ b/libhurd-mm/mprotect.c
@@ -124,10 +124,10 @@ mprotect (void *addr, size_t length, int prot)
{
map->access = access;
- addr_t addr;
- for (addr = ADDR (map_start, ADDR_BITS - PAGESIZE_LOG2);
- addr_prefix (addr) < map_end;
- addr = addr_add (addr, 1))
+ vg_addr_t addr;
+ for (addr = VG_ADDR (map_start, VG_ADDR_BITS - PAGESIZE_LOG2);
+ vg_addr_prefix (addr) < map_end;
+ addr = vg_addr_add (addr, 1))
{
/* This may fail if the page has not yet been faulted
in. That's okay: it will get the right
@@ -139,18 +139,18 @@ mprotect (void *addr, size_t length, int prot)
{
error_t err;
err = rm_cap_rubout (meta_data_activity,
- ADDR_VOID, addr);
+ VG_ADDR_VOID, addr);
assert (! err);
- slot->type = cap_void;
+ slot->type = vg_cap_void;
}
else
{
bool ret;
- ret = cap_copy_x (meta_data_activity,
- ADDR_VOID, slot, addr,
- ADDR_VOID, *slot, addr,
- CAP_COPY_WEAKEN,
- CAP_PROPERTIES_VOID);
+ ret = vg_cap_copy_x (meta_data_activity,
+ VG_ADDR_VOID, slot, addr,
+ VG_ADDR_VOID, *slot, addr,
+ VG_CAP_COPY_WEAKEN,
+ VG_CAP_PROPERTIES_VOID);
assert (ret);
}
}));
diff --git a/libhurd-mm/pager.h b/libhurd-mm/pager.h
index 66b75ef..b647573 100644
--- a/libhurd-mm/pager.h
+++ b/libhurd-mm/pager.h
@@ -36,12 +36,12 @@ struct pager;
typedef bool (*pager_fault_t) (struct pager *pager,
uintptr_t offset, int count, bool ro,
uintptr_t fault_addr, uintptr_t ip,
- struct activation_fault_info info);
+ struct vg_activation_fault_info info);
/* The count sub-trees starting at ADDR are no longer referenced and
their associated storage may be reclaimed. */
typedef void (*pager_reclaim_t) (struct pager *pager,
- addr_t addr, int count);
+ vg_addr_t addr, int count);
/* Called when the last map to a pager has been destroyed. (This
function should not call pager_deinit!) Called with PAGER->LOCK
diff --git a/libhurd-mm/storage.c b/libhurd-mm/storage.c
index 8079036..bdb6ce0 100644
--- a/libhurd-mm/storage.c
+++ b/libhurd-mm/storage.c
@@ -56,12 +56,12 @@ static uatomic32_t free_count;
struct storage_desc
{
/* The address of the folio. */
- addr_t folio;
- /* The location of the shadow cap designating this folio. */
+ vg_addr_t folio;
+ /* The location of the shadow vg_cap designating this folio. */
struct object *shadow;
/* Which objects are allocated. */
- unsigned char alloced[FOLIO_OBJECTS / 8];
+ unsigned char alloced[VG_FOLIO_OBJECTS / 8];
/* The number of free objects. */
unsigned char free;
@@ -119,7 +119,7 @@ list_unlink (struct storage_desc *e)
}
static int
-addr_compare (const addr_t *a, const addr_t *b)
+addr_compare (const vg_addr_t *a, const vg_addr_t *b)
{
if (a->raw < b->raw)
return -1;
@@ -127,7 +127,7 @@ addr_compare (const addr_t *a, const addr_t *b)
}
BTREE_CLASS (storage_desc, struct storage_desc,
- addr_t, folio, node, addr_compare, false)
+ vg_addr_t, folio, node, addr_compare, false)
static hurd_btree_storage_desc_t storage_descs;
@@ -154,16 +154,16 @@ check_slab_space_reserve (void)
return;
/* We don't have a reserve. Allocate one now. */
- struct storage storage = storage_alloc (meta_data_activity, cap_page,
+ struct storage storage = storage_alloc (meta_data_activity, vg_cap_page,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- void *buffer = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ void *buffer = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr, 0, PAGESIZE_LOG2));
buffer = (void *) atomic_exchange_acq (&slab_space_reserve, buffer);
if (buffer)
/* Someone else allocated a buffer. We don't need two, so
deallocate it. */
- storage_free (addr_chop (PTR_TO_ADDR (buffer), PAGESIZE_LOG2), false);
+ storage_free (vg_addr_chop (VG_PTR_TO_ADDR (buffer), PAGESIZE_LOG2), false);
}
static error_t
@@ -187,7 +187,7 @@ storage_desc_slab_dealloc (void *hook, void *buffer, size_t size)
assert (size == PAGESIZE);
- addr_t addr = addr_chop (PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
+ vg_addr_t addr = vg_addr_chop (VG_PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
storage_free (addr, false);
return 0;
@@ -235,10 +235,10 @@ static struct storage_desc *short_lived;
/* Once there are more free objects in a LONG_LIVED_STABLE folio than
FREEING_THRESHOLD, we change the folios state from stable to
freeing. */
-#define FREEING_THRESHOLD (FOLIO_OBJECTS / 2)
+#define FREEING_THRESHOLD (VG_FOLIO_OBJECTS / 2)
static void
-shadow_setup (struct cap *cap, struct storage_desc *desc)
+shadow_setup (struct vg_cap *cap, struct storage_desc *desc)
{
/* We do not need to hold DESC->LOCK here as either we are in the
init phase and thus single threaded or we are initializing a new
@@ -254,12 +254,12 @@ shadow_setup (struct cap *cap, struct storage_desc *desc)
atomic_decrement (&free_count);
error_t err = rm_folio_object_alloc (meta_data_activity,
- desc->folio, idx, cap_page,
- OBJECT_POLICY_DEFAULT, 0,
+ desc->folio, idx, vg_cap_page,
+ VG_OBJECT_POLICY_DEFAULT, 0,
NULL, NULL);
assert (err == 0);
- shadow = ADDR_TO_PTR (addr_extend (addr_extend (desc->folio,
- idx, FOLIO_OBJECTS_LOG2),
+ shadow = VG_ADDR_TO_PTR (vg_addr_extend (vg_addr_extend (desc->folio,
+ idx, VG_FOLIO_OBJECTS_LOG2),
0, PAGESIZE_LOG2));
if (desc->free == 0)
@@ -285,32 +285,32 @@ shadow_setup (struct cap *cap, struct storage_desc *desc)
{
assert (! as_init_done);
- struct storage storage = storage_alloc (meta_data_activity, cap_page,
+ struct storage storage = storage_alloc (meta_data_activity, vg_cap_page,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT,
- ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ VG_OBJECT_POLICY_DEFAULT,
+ VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Out of storage.");
- shadow = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ shadow = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr, 0, PAGESIZE_LOG2));
}
desc->shadow = shadow;
- cap->type = cap_folio;
- CAP_SET_SUBPAGE (cap, 0, 1);
- cap_set_shadow (cap, shadow);
+ cap->type = vg_cap_folio;
+ VG_CAP_SET_SUBPAGE (cap, 0, 1);
+ vg_cap_set_shadow (cap, shadow);
if (idx != -1)
{
- shadow->caps[idx].type = cap_page;
- CAP_PROPERTIES_SET (&shadow->caps[idx],
- CAP_PROPERTIES (OBJECT_POLICY_DEFAULT,
- CAP_ADDR_TRANS_VOID));
+ shadow->caps[idx].type = vg_cap_page;
+ VG_CAP_PROPERTIES_SET (&shadow->caps[idx],
+ VG_CAP_PROPERTIES (VG_OBJECT_POLICY_DEFAULT,
+ VG_CAP_ADDR_TRANS_VOID));
}
}
void
-storage_shadow_setup (struct cap *cap, addr_t folio)
+storage_shadow_setup (struct vg_cap *cap, vg_addr_t folio)
{
/* This code is only called from the initialization code. When this
code runs, there is exactly one thread. Thus, there is no need
@@ -357,7 +357,7 @@ static bool do_serialize;
static void
storage_check_reserve_internal (bool force_allocate,
- addr_t activity,
+ vg_addr_t activity,
enum storage_expectancy expectancy,
bool i_may_have_lock)
{
@@ -435,18 +435,18 @@ storage_check_reserve_internal (bool force_allocate,
/* Although we have not yet allocated the objects, allocating
support structures for the folio may require memory causing
us to recurse. Thus, we add them first. */
- atomic_add (&free_count, FOLIO_OBJECTS);
+ atomic_add (&free_count, VG_FOLIO_OBJECTS);
/* Here is the big recursive dependency! Using the address that
as_alloc returns might require allocating one (or more) page
tables to make a slot available. Moreover, each of those
page tables requires not only a cappage but also a shadow
page table. */
- addr_t addr;
+ vg_addr_t addr;
if (likely (as_init_done))
{
- addr = as_alloc (FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2, 1, true);
- if (ADDR_IS_VOID (addr))
+ addr = as_alloc (VG_FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2, 1, true);
+ if (VG_ADDR_IS_VOID (addr))
panic ("Failed to allocate address space!");
as_ensure (addr);
@@ -454,21 +454,21 @@ storage_check_reserve_internal (bool force_allocate,
else
{
struct hurd_object_desc *desc;
- desc = as_alloc_slow (FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2);
- if (! desc || ADDR_IS_VOID (desc->object))
+ desc = as_alloc_slow (VG_FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2);
+ if (! desc || VG_ADDR_IS_VOID (desc->object))
panic ("Failed to allocate address space!");
addr = desc->object;
desc->storage = addr;
- desc->type = cap_folio;
+ desc->type = vg_cap_folio;
}
/* And then the folio. */
- addr_t a = addr;
- error_t err = rm_folio_alloc (activity, activity, FOLIO_POLICY_DEFAULT,
+ vg_addr_t a = addr;
+ error_t err = rm_folio_alloc (activity, activity, VG_FOLIO_POLICY_DEFAULT,
&a);
assert (! err);
- assert (ADDR_EQ (addr, a));
+ assert (VG_ADDR_EQ (addr, a));
/* Allocate and fill a descriptor. */
struct storage_desc *s = storage_desc_alloc ();
@@ -476,7 +476,7 @@ storage_check_reserve_internal (bool force_allocate,
s->lock = (ss_mutex_t) 0;
s->folio = addr;
memset (&s->alloced, 0, sizeof (s->alloced));
- s->free = FOLIO_OBJECTS;
+ s->free = VG_FOLIO_OBJECTS;
if (likely (as_init_done))
{
@@ -529,10 +529,10 @@ storage_check_reserve (bool i_may_have_lock)
#undef storage_alloc
struct storage
-storage_alloc (addr_t activity,
- enum cap_type type, enum storage_expectancy expectancy,
+storage_alloc (vg_addr_t activity,
+ enum vg_cap_type type, enum storage_expectancy expectancy,
struct object_policy policy,
- addr_t addr)
+ vg_addr_t addr)
{
assert (storage_init_done);
@@ -610,17 +610,17 @@ storage_alloc (addr_t activity,
int idx = bit_alloc (desc->alloced, sizeof (desc->alloced), 0);
assertx (idx != -1,
- "Folio (" ADDR_FMT ") full (free: %d) but on a list!",
- ADDR_PRINTF (desc->folio), desc->free);
+ "Folio (" VG_ADDR_FMT ") full (free: %d) but on a list!",
+ VG_ADDR_PRINTF (desc->folio), desc->free);
- addr_t folio = desc->folio;
- addr_t object = addr_extend (folio, idx, FOLIO_OBJECTS_LOG2);
+ vg_addr_t folio = desc->folio;
+ vg_addr_t object = vg_addr_extend (folio, idx, VG_FOLIO_OBJECTS_LOG2);
- debug (5, "Allocating object %d as %s from " ADDR_FMT " (" ADDR_FMT ") "
- "(%d left), installing at " ADDR_FMT,
- idx, cap_type_string (type),
- ADDR_PRINTF (folio), ADDR_PRINTF (object),
- desc->free, ADDR_PRINTF (addr));
+ debug (5, "Allocating object %d as %s from " VG_ADDR_FMT " (" VG_ADDR_FMT ") "
+ "(%d left), installing at " VG_ADDR_FMT,
+ idx, vg_cap_type_string (type),
+ VG_ADDR_PRINTF (folio), VG_ADDR_PRINTF (object),
+ desc->free, VG_ADDR_PRINTF (addr));
atomic_decrement (&free_count);
desc->free --;
@@ -632,7 +632,7 @@ storage_alloc (addr_t activity,
{
assert (bit_alloc (desc->alloced, sizeof (desc->alloced), 0) == -1);
- debug (3, "Folio at " ADDR_FMT " full", ADDR_PRINTF (folio));
+ debug (3, "Folio at " VG_ADDR_FMT " full", VG_ADDR_PRINTF (folio));
list_unlink (desc);
@@ -644,20 +644,20 @@ storage_alloc (addr_t activity,
ss_mutex_unlock (&storage_descs_lock);
}
- addr_t a = addr;
+ vg_addr_t a = addr;
error_t err = rm_folio_object_alloc (activity, folio, idx, type, policy, 0,
&a, NULL);
assertx (! err,
- "Allocating object %d from " ADDR_FMT " at " ADDR_FMT ": %d!",
- idx, ADDR_PRINTF (folio), ADDR_PRINTF (addr), err);
- assert (ADDR_EQ (a, addr));
+ "Allocating object %d from " VG_ADDR_FMT " at " VG_ADDR_FMT ": %d!",
+ idx, VG_ADDR_PRINTF (folio), VG_ADDR_PRINTF (addr), err);
+ assert (VG_ADDR_EQ (a, addr));
struct object *shadow = desc->shadow;
- struct cap *cap = NULL;
+ struct vg_cap *cap = NULL;
if (likely (!! shadow))
{
cap = &shadow->caps[idx];
- CAP_PROPERTIES_SET (cap, CAP_PROPERTIES (policy, CAP_ADDR_TRANS_VOID));
+ VG_CAP_PROPERTIES_SET (cap, VG_CAP_PROPERTIES (policy, VG_CAP_ADDR_TRANS_VOID));
cap->type = type;
}
else
@@ -666,16 +666,16 @@ storage_alloc (addr_t activity,
/* We drop DESC->LOCK. */
ss_mutex_unlock (&desc->lock);
- if (! ADDR_IS_VOID (addr))
- /* We also have to update the shadow for ADDR. Unfortunately, we
+ if (! VG_ADDR_IS_VOID (addr))
+ /* We also have to update the shadow for VG_ADDR. Unfortunately, we
don't have the cap although the caller might. */
{
bool ret = as_slot_lookup_use
(addr,
({
slot->type = type;
- cap_set_shadow (slot, NULL);
- CAP_POLICY_SET (slot, policy);
+ vg_cap_set_shadow (slot, NULL);
+ VG_CAP_POLICY_SET (slot, policy);
}));
if (! ret)
{
@@ -689,29 +689,29 @@ storage_alloc (addr_t activity,
storage.addr = object;
#ifndef NDEBUG
- if (type == cap_page)
+ if (type == vg_cap_page)
{
- unsigned int *p = ADDR_TO_PTR (addr_extend (storage.addr,
+ unsigned int *p = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr,
0, PAGESIZE_LOG2));
int c;
for (c = 0; c < PAGESIZE / sizeof (int); c ++)
assertx (p[c] == 0,
- ADDR_FMT "(%p)[%d] = %x",
- ADDR_PRINTF (storage.addr), p, c * sizeof (int), p[c]);
+ VG_ADDR_FMT "(%p)[%d] = %x",
+ VG_ADDR_PRINTF (storage.addr), p, c * sizeof (int), p[c]);
}
#endif
- debug (5, "Allocated " ADDR_FMT "; " ADDR_FMT,
- ADDR_PRINTF (storage.addr), ADDR_PRINTF (addr));
+ debug (5, "Allocated " VG_ADDR_FMT "; " VG_ADDR_FMT,
+ VG_ADDR_PRINTF (storage.addr), VG_ADDR_PRINTF (addr));
return storage;
}
void
-storage_free_ (addr_t object, bool unmap_now)
+storage_free_ (vg_addr_t object, bool unmap_now)
{
- debug (5, DEBUG_BOLD ("Freeing " ADDR_FMT), ADDR_PRINTF (object));
+ debug (5, DEBUG_BOLD ("Freeing " VG_ADDR_FMT), VG_ADDR_PRINTF (object));
- addr_t folio = addr_chop (object, FOLIO_OBJECTS_LOG2);
+ vg_addr_t folio = vg_addr_chop (object, VG_FOLIO_OBJECTS_LOG2);
atomic_increment (&free_count);
@@ -721,9 +721,9 @@ storage_free_ (addr_t object, bool unmap_now)
struct storage_desc *storage;
storage = hurd_btree_storage_desc_find (&storage_descs, &folio);
assertx (storage,
- "No storage associated with " ADDR_FMT " "
+ "No storage associated with " VG_ADDR_FMT " "
"(did you pass the storage address?)",
- ADDR_PRINTF (object));
+ VG_ADDR_PRINTF (object));
ss_mutex_lock (&storage->lock);
@@ -731,20 +731,20 @@ storage_free_ (addr_t object, bool unmap_now)
struct object *shadow = storage->shadow;
- if (storage->free == FOLIO_OBJECTS
- || ((storage->free == FOLIO_OBJECTS - 1)
+ if (storage->free == VG_FOLIO_OBJECTS
+ || ((storage->free == VG_FOLIO_OBJECTS - 1)
&& shadow
- && ADDR_EQ (folio, addr_chop (PTR_TO_ADDR (shadow),
- FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2))))
+ && VG_ADDR_EQ (folio, vg_addr_chop (VG_PTR_TO_ADDR (shadow),
+ VG_FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2))))
/* The folio is now empty. */
{
- debug (1, "Folio at " ADDR_FMT " now empty", ADDR_PRINTF (folio));
+ debug (1, "Folio at " VG_ADDR_FMT " now empty", VG_ADDR_PRINTF (folio));
- if (free_count - FOLIO_OBJECTS > FREE_PAGES_LOW_WATER)
+ if (free_count - VG_FOLIO_OBJECTS > FREE_PAGES_LOW_WATER)
/* There are sufficient reserve pages not including this
folio. Thus, we free STORAGE. */
{
- atomic_add (&free_count, - FOLIO_OBJECTS);
+ atomic_add (&free_count, - VG_FOLIO_OBJECTS);
list_unlink (storage);
hurd_btree_storage_desc_detach (&storage_descs, storage);
@@ -757,18 +757,18 @@ storage_free_ (addr_t object, bool unmap_now)
as_slot_lookup_use (folio,
({
- cap_set_shadow (slot, NULL);
- slot->type = cap_void;
+ vg_cap_set_shadow (slot, NULL);
+ slot->type = vg_cap_void;
}));
storage_desc_free (storage);
if (shadow)
{
- addr_t shadow_addr = addr_chop (PTR_TO_ADDR (shadow),
+ vg_addr_t shadow_addr = vg_addr_chop (VG_PTR_TO_ADDR (shadow),
PAGESIZE_LOG2);
- if (ADDR_EQ (addr_chop (shadow_addr, FOLIO_OBJECTS_LOG2), folio))
+ if (VG_ADDR_EQ (vg_addr_chop (shadow_addr, VG_FOLIO_OBJECTS_LOG2), folio))
{
/* The shadow was allocate from ourself, which we
already freed. */
@@ -803,22 +803,22 @@ storage_free_ (addr_t object, bool unmap_now)
ss_mutex_unlock (&storage_descs_lock);
- int idx = addr_extract (object, FOLIO_OBJECTS_LOG2);
+ int idx = vg_addr_extract (object, VG_FOLIO_OBJECTS_LOG2);
bit_dealloc (storage->alloced, idx);
error_t err = rm_folio_object_alloc (meta_data_activity,
- folio, idx, cap_void,
- OBJECT_POLICY_DEFAULT, 0,
+ folio, idx, vg_cap_void,
+ VG_OBJECT_POLICY_DEFAULT, 0,
NULL, NULL);
assert (err == 0);
if (likely (!! shadow))
{
- shadow->caps[idx].type = cap_void;
- cap_set_shadow (&shadow->caps[idx], NULL);
- CAP_PROPERTIES_SET (&shadow->caps[idx],
- CAP_PROPERTIES (OBJECT_POLICY_DEFAULT,
- CAP_ADDR_TRANS_VOID));
+ shadow->caps[idx].type = vg_cap_void;
+ vg_cap_set_shadow (&shadow->caps[idx], NULL);
+ VG_CAP_PROPERTIES_SET (&shadow->caps[idx],
+ VG_CAP_PROPERTIES (VG_OBJECT_POLICY_DEFAULT,
+ VG_CAP_ADDR_TRANS_VOID));
}
else
assert (! as_init_done);
@@ -851,28 +851,28 @@ storage_init (void)
i < __hurd_startup_data->desc_count;
i ++, odesc ++)
{
- if (ADDR_IS_VOID (odesc->storage))
+ if (VG_ADDR_IS_VOID (odesc->storage))
continue;
- addr_t folio;
- if (odesc->type == cap_folio)
+ vg_addr_t folio;
+ if (odesc->type == vg_cap_folio)
folio = odesc->object;
else
- folio = addr_chop (odesc->storage, FOLIO_OBJECTS_LOG2);
+ folio = vg_addr_chop (odesc->storage, VG_FOLIO_OBJECTS_LOG2);
struct storage_desc *sdesc;
sdesc = hurd_btree_storage_desc_find (&storage_descs, &folio);
if (! sdesc)
/* Haven't seen this folio yet. */
{
- debug (5, "Adding folio " ADDR_FMT, ADDR_PRINTF (folio));
+ debug (5, "Adding folio " VG_ADDR_FMT, VG_ADDR_PRINTF (folio));
folio_count ++;
sdesc = storage_desc_alloc ();
sdesc->lock = (ss_mutex_t) 0;
sdesc->folio = folio;
- sdesc->free = FOLIO_OBJECTS;
+ sdesc->free = VG_FOLIO_OBJECTS;
sdesc->mode = LONG_LIVED_ALLOCING;
list_link (&long_lived_allocing, sdesc);
@@ -881,20 +881,20 @@ storage_init (void)
/* Assume that the folio is free. As we encounter objects,
we will mark them as allocated. */
- free_count += FOLIO_OBJECTS;
+ free_count += VG_FOLIO_OBJECTS;
}
- if (odesc->type != cap_folio)
+ if (odesc->type != vg_cap_folio)
{
- int idx = addr_extract (odesc->storage, FOLIO_OBJECTS_LOG2);
+ int idx = vg_addr_extract (odesc->storage, VG_FOLIO_OBJECTS_LOG2);
debug (5, "%llx/%d, %d -> %llx/%d (%s)",
- addr_prefix (folio),
- addr_depth (folio),
+ vg_addr_prefix (folio),
+ vg_addr_depth (folio),
idx,
- addr_prefix (odesc->storage),
- addr_depth (odesc->storage),
- cap_type_string (odesc->type));
+ vg_addr_prefix (odesc->storage),
+ vg_addr_depth (odesc->storage),
+ vg_cap_type_string (odesc->type));
bit_set (sdesc->alloced, sizeof (sdesc->alloced), idx);
diff --git a/libhurd-mm/storage.h b/libhurd-mm/storage.h
index c7b4f66..e454b83 100644
--- a/libhurd-mm/storage.h
+++ b/libhurd-mm/storage.h
@@ -42,16 +42,16 @@ enum storage_expectancy
struct storage
{
- struct cap *cap;
- addr_t addr;
+ struct vg_cap *cap;
+ vg_addr_t addr;
};
/* Allocate an object of type TYPE. The object has a life expectancy
- of EXPECTANCY. If ADDR is not ADDR_VOID, a capability to the
+ of EXPECTANCY. If ADDR is not VG_ADDR_VOID, a capability to the
storage will be saved at ADDR (and the shadow object updated
appropriately). On success, the shadow capability slot for the
storage is returned (useful for setting up a shadow object) and the
- address of the storage object. Otherwise, NULL and ADDR_VOID,
+ address of the storage object. Otherwise, NULL and VG_ADDR_VOID,
respectively, are returned. ACTIVITY is the activity to use to
account the storage.
@@ -61,11 +61,11 @@ struct storage
caller wants to use the allocated object for address translation,
the caller must allocate the shadow object. If not, functions
including the cap_lookup family will fail. */
-extern struct storage storage_alloc (addr_t activity,
- enum cap_type type,
+extern struct storage storage_alloc (vg_addr_t activity,
+ enum vg_cap_type type,
enum storage_expectancy expectancy,
struct object_policy policy,
- addr_t addr);
+ vg_addr_t addr);
#define storage_alloc(__sa_activity, __sa_type, __sa_expectancy, \
__sa_policy, __sa_addr) \
({ \
@@ -73,9 +73,9 @@ extern struct storage storage_alloc (addr_t activity,
__sa_storage = storage_alloc (__sa_activity, __sa_type, \
__sa_expectancy, __sa_policy, \
__sa_addr); \
- debug (5, "storage_alloc (%s, " ADDR_FMT ") -> " ADDR_FMT, \
- cap_type_string (__sa_type), ADDR_PRINTF (__sa_addr), \
- ADDR_PRINTF (__sa_storage.addr)); \
+ debug (5, "storage_alloc (%s, " VG_ADDR_FMT ") -> " VG_ADDR_FMT, \
+ vg_cap_type_string (__sa_type), VG_ADDR_PRINTF (__sa_addr), \
+ VG_ADDR_PRINTF (__sa_storage.addr)); \
__sa_storage; \
})
@@ -83,10 +83,10 @@ extern struct storage storage_alloc (addr_t activity,
/* Frees the storage at STORAGE. STORAGE must be the address returned
by storage_alloc (NOT the address provided to storage_alloc). If
UNMAP_NOW is not true, revoking the storage may be delayed. */
-extern void storage_free_ (addr_t storage, bool unmap_now);
+extern void storage_free_ (vg_addr_t storage, bool unmap_now);
#define storage_free(__sf_storage, __sf_unmap_now) \
({ \
- debug (5, "storage_free (" ADDR_FMT ")", ADDR_PRINTF (__sf_storage)); \
+ debug (5, "storage_free (" VG_ADDR_FMT ")", VG_ADDR_PRINTF (__sf_storage)); \
storage_free_ (__sf_storage, __sf_unmap_now); \
})
@@ -94,7 +94,7 @@ extern void storage_free_ (addr_t storage, bool unmap_now);
extern void storage_init (void);
/* Used by as_init to initialize a folio's shadow object. */
-extern void storage_shadow_setup (struct cap *cap, addr_t folio);
+extern void storage_shadow_setup (struct vg_cap *cap, vg_addr_t folio);
/* Return whether there is sufficient reserve storage. */
extern bool storage_have_reserve (void);
diff --git a/libpthread/sysdeps/viengoos/bits/pthread-np.h b/libpthread/sysdeps/viengoos/bits/pthread-np.h
index 9986a62..a38f4a7 100644
--- a/libpthread/sysdeps/viengoos/bits/pthread-np.h
+++ b/libpthread/sysdeps/viengoos/bits/pthread-np.h
@@ -27,7 +27,7 @@
#include <viengoos/addr.h>
#include <hurd/thread.h>
-int pthread_setactivity_np (addr_t activity);
+int pthread_setactivity_np (vg_addr_t activity);
/* Returns the caller's activation state block. */
struct hurd_utcb *pthread_hurd_utcb_np (void) __attribute__ ((pure));
diff --git a/libpthread/sysdeps/viengoos/ia32/signal-dispatch-lowlevel.c b/libpthread/sysdeps/viengoos/ia32/signal-dispatch-lowlevel.c
index 091815b..bbb4ae4 100644
--- a/libpthread/sysdeps/viengoos/ia32/signal-dispatch-lowlevel.c
+++ b/libpthread/sysdeps/viengoos/ia32/signal-dispatch-lowlevel.c
@@ -125,14 +125,14 @@ signal_dispatch_lowlevel (struct signal_state *ss, pthread_t tid,
struct hurd_thread_exregs_out out;
error_t err;
- err = rm_thread_exregs (ADDR_VOID, thread->object,
+ err = rm_thread_exregs (VG_ADDR_VOID, thread->object,
HURD_EXREGS_STOP | HURD_EXREGS_ABORT_IPC
| HURD_EXREGS_GET_REGS,
- in, ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ in, VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID,
&out, NULL, NULL, NULL, NULL);
if (err)
- panic ("Failed to modify thread " ADDR_FMT,
- ADDR_PRINTF (thread->object));
+ panic ("Failed to modify thread " VG_ADDR_FMT,
+ VG_ADDR_PRINTF (thread->object));
intr_sp = out.sp;
@@ -207,10 +207,10 @@ signal_dispatch_lowlevel (struct signal_state *ss, pthread_t tid,
in.sp = sp;
in.ip = (uintptr_t) &_signal_dispatch_entry;
- rm_thread_exregs (ADDR_VOID, thread->object,
+ rm_thread_exregs (VG_ADDR_VOID, thread->object,
HURD_EXREGS_SET_SP_IP
| HURD_EXREGS_START | HURD_EXREGS_ABORT_IPC,
- in, ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ in, VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID,
&out, NULL, NULL, NULL, NULL);
}
}
diff --git a/libpthread/sysdeps/viengoos/pt-block.c b/libpthread/sysdeps/viengoos/pt-block.c
index 8e6a17f..e9a82ff 100644
--- a/libpthread/sysdeps/viengoos/pt-block.c
+++ b/libpthread/sysdeps/viengoos/pt-block.c
@@ -34,7 +34,7 @@ __pthread_block (struct __pthread *thread)
thread->lock_message_buffer = NULL;
#endif
- futex_wait_using (mb, &thread->threadid, thread->threadid);
+ vg_futex_wait_using (mb, &thread->threadid, thread->threadid);
#ifndef NDEBUG
thread->lock_message_buffer = mb;
diff --git a/libpthread/sysdeps/viengoos/pt-setactivity-np.c b/libpthread/sysdeps/viengoos/pt-setactivity-np.c
index db695b9..5894ad7 100644
--- a/libpthread/sysdeps/viengoos/pt-setactivity-np.c
+++ b/libpthread/sysdeps/viengoos/pt-setactivity-np.c
@@ -23,15 +23,15 @@
#include <viengoos/thread.h>
int
-pthread_setactivity_np (addr_t activity)
+pthread_setactivity_np (vg_addr_t activity)
{
struct __pthread *self = _pthread_self ();
struct hurd_thread_exregs_in in;
struct hurd_thread_exregs_out out;
- int err = rm_thread_exregs (ADDR_VOID, self->object,
+ int err = rm_thread_exregs (VG_ADDR_VOID, self->object,
HURD_EXREGS_SET_ACTIVITY,
- in, ADDR_VOID, activity, ADDR_VOID, ADDR_VOID,
+ in, VG_ADDR_VOID, activity, VG_ADDR_VOID, VG_ADDR_VOID,
&out, NULL, NULL, NULL, NULL);
return err;
diff --git a/libpthread/sysdeps/viengoos/pt-sysdep.h b/libpthread/sysdeps/viengoos/pt-sysdep.h
index 15e0a03..b276b87 100644
--- a/libpthread/sysdeps/viengoos/pt-sysdep.h
+++ b/libpthread/sysdeps/viengoos/pt-sysdep.h
@@ -34,7 +34,7 @@
#include <hurd/message-buffer.h>
#define PTHREAD_SYSDEP_MEMBERS \
- addr_t object; \
+ vg_addr_t object; \
vg_thread_id_t threadid; \
struct hurd_utcb *utcb; \
struct hurd_message_buffer *lock_message_buffer; \
diff --git a/libpthread/sysdeps/viengoos/pt-thread-alloc.c b/libpthread/sysdeps/viengoos/pt-thread-alloc.c
index 3c7df34..a65024f 100644
--- a/libpthread/sysdeps/viengoos/pt-thread-alloc.c
+++ b/libpthread/sysdeps/viengoos/pt-thread-alloc.c
@@ -31,7 +31,7 @@
extern struct hurd_startup_data *__hurd_startup_data;
-extern addr_t meta_data_activity;
+extern vg_addr_t meta_data_activity;
int
__pthread_thread_alloc (struct __pthread *thread)
@@ -58,11 +58,11 @@ __pthread_thread_alloc (struct __pthread *thread)
else
{
struct storage storage;
- storage = storage_alloc (meta_data_activity, cap_thread,
+ storage = storage_alloc (meta_data_activity, vg_cap_thread,
/* Threads are rarely shortly lived. */
- STORAGE_MEDIUM_LIVED, OBJECT_POLICY_DEFAULT,
- ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ STORAGE_MEDIUM_LIVED, VG_OBJECT_POLICY_DEFAULT,
+ VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
{
debug (0, DEBUG_BOLD ("Out of memory"));
return EAGAIN;
@@ -75,20 +75,20 @@ __pthread_thread_alloc (struct __pthread *thread)
if (unlikely (err))
panic ("Failed to initialize thread's activation state: %d", err);
- err = rm_cap_copy (ADDR_VOID,
+ err = rm_cap_copy (VG_ADDR_VOID,
thread->lock_message_buffer->receiver,
- ADDR (VG_MESSENGER_THREAD_SLOT,
+ VG_ADDR (VG_MESSENGER_THREAD_SLOT,
VG_MESSENGER_SLOTS_LOG2),
- ADDR_VOID, thread->object,
- 0, CAP_PROPERTIES_DEFAULT);
+ VG_ADDR_VOID, thread->object,
+ 0, VG_CAP_PROPERTIES_DEFAULT);
if (err)
panic ("Failed to set lock messenger's thread");
/* Unblock the lock messenger. */
err = vg_ipc (VG_IPC_RECEIVE | VG_IPC_RECEIVE_ACTIVATE
| VG_IPC_RETURN,
- ADDR_VOID, thread->lock_message_buffer->receiver, ADDR_VOID,
- ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID);
+ VG_ADDR_VOID, thread->lock_message_buffer->receiver, VG_ADDR_VOID,
+ VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID);
if (err)
panic ("Failed to unblock messenger's thread");
}
diff --git a/libpthread/sysdeps/viengoos/pt-thread-halt.c b/libpthread/sysdeps/viengoos/pt-thread-halt.c
index d721500..ba9f2b0 100644
--- a/libpthread/sysdeps/viengoos/pt-thread-halt.c
+++ b/libpthread/sysdeps/viengoos/pt-thread-halt.c
@@ -42,10 +42,10 @@ __pthread_thread_halt (struct __pthread *thread)
}
else
{
- error_t err = thread_stop (thread->object);
+ error_t err = vg_thread_stop (thread->object);
if (err)
- panic ("Failed to halt " ADDR_FMT ": %d",
- ADDR_PRINTF (thread->object), err);
+ panic ("Failed to halt " VG_ADDR_FMT ": %d",
+ VG_ADDR_PRINTF (thread->object), err);
}
}
}
diff --git a/libpthread/sysdeps/viengoos/pt-thread-start.c b/libpthread/sysdeps/viengoos/pt-thread-start.c
index f4478ac..6a9fc90 100644
--- a/libpthread/sysdeps/viengoos/pt-thread-start.c
+++ b/libpthread/sysdeps/viengoos/pt-thread-start.c
@@ -41,24 +41,24 @@ __pthread_thread_start (struct __pthread *thread)
struct hurd_thread_exregs_in in;
struct hurd_thread_exregs_out out;
- addr_t aspace = ADDR (0, 0);
- in.aspace_cap_properties = CAP_PROPERTIES_VOID;
- in.aspace_cap_properties_flags = CAP_COPY_COPY_SOURCE_GUARD;
+ vg_addr_t aspace = VG_ADDR (0, 0);
+ in.aspace_cap_properties = VG_CAP_PROPERTIES_VOID;
+ in.aspace_cap_properties_flags = VG_CAP_COPY_COPY_SOURCE_GUARD;
- addr_t activity = ADDR_VOID;
+ vg_addr_t activity = VG_ADDR_VOID;
in.sp = (l4_word_t) thread->mcontext.sp;
in.ip = (l4_word_t) thread->mcontext.pc;
in.user_handle = (l4_word_t) thread;
- err = rm_thread_exregs (ADDR_VOID, thread->object,
+ err = rm_thread_exregs (VG_ADDR_VOID, thread->object,
HURD_EXREGS_SET_ASPACE
| HURD_EXREGS_SET_ACTIVITY
| HURD_EXREGS_SET_SP_IP
| HURD_EXREGS_SET_USER_HANDLE
| HURD_EXREGS_START
| HURD_EXREGS_ABORT_IPC,
- in, aspace, activity, ADDR_VOID, ADDR_VOID,
+ in, aspace, activity, VG_ADDR_VOID, VG_ADDR_VOID,
&out, NULL, NULL, NULL, NULL);
assert (err == 0);
}
diff --git a/libpthread/sysdeps/viengoos/pt-wakeup.c b/libpthread/sysdeps/viengoos/pt-wakeup.c
index 6435689..4bd51b4 100644
--- a/libpthread/sysdeps/viengoos/pt-wakeup.c
+++ b/libpthread/sysdeps/viengoos/pt-wakeup.c
@@ -40,7 +40,7 @@ __pthread_wakeup (struct __pthread *thread)
long ret;
do
{
- ret = futex_wake_using (self->lock_message_buffer,
+ ret = vg_futex_wake_using (self->lock_message_buffer,
&thread->threadid, INT_MAX);
assertx (ret <= 1, "tid: %x, ret: %d", thread->threadid, ret);
diff --git a/libviengoos/t-addr-trans.c b/libviengoos/t-addr-trans.c
index c3607ad..7ce57eb 100644
--- a/libviengoos/t-addr-trans.c
+++ b/libviengoos/t-addr-trans.c
@@ -1,4 +1,4 @@
-/* t-cap.c - Test the implementation of the various cap functions.
+/* t-cap.c - Test the implementation of the various vg_cap functions.
Copyright (C) 2007, 2008 Free Software Foundation, Inc.
Written by Neal H. Walfield <neal@gnu.org>.
@@ -31,9 +31,9 @@ char *program_name = "t-addr-trans";
int
main (int argc, char *argv[])
{
- printf ("Checking CAP_ADDR_TRANS_SET_GUARD_SUBPAGE... ");
+ printf ("Checking VG_CAP_ADDR_TRANS_SET_GUARD_SUBPAGE... ");
- struct cap_addr_trans cap_addr_trans;
+ struct vg_cap_addr_trans cap_addr_trans;
bool r;
int subpage_bits;
@@ -45,14 +45,14 @@ main (int argc, char *argv[])
memset (&cap_addr_trans, 0, sizeof (cap_addr_trans));
- r = CAP_ADDR_TRANS_SET_SUBPAGE (&cap_addr_trans, 0, subpages);
+ r = VG_CAP_ADDR_TRANS_SET_SUBPAGE (&cap_addr_trans, 0, subpages);
assert (r == (subpage_bits <= 8));
if (subpage_bits >= 8)
continue;
- assert (CAP_ADDR_TRANS_SUBPAGES (cap_addr_trans) == subpages);
- assert (CAP_ADDR_TRANS_SUBPAGE_SIZE (cap_addr_trans) == subpage_size);
- assert (CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 (cap_addr_trans)
+ assert (VG_CAP_ADDR_TRANS_SUBPAGES (cap_addr_trans) == subpages);
+ assert (VG_CAP_ADDR_TRANS_SUBPAGE_SIZE (cap_addr_trans) == subpage_size);
+ assert (VG_CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 (cap_addr_trans)
== subpage_size_log2);
int gdepth;
@@ -62,15 +62,15 @@ main (int argc, char *argv[])
for (guard_bits = 0; guard_bits < sizeof (uintptr_t) * 8; guard_bits ++)
{
int guard = (1 << guard_bits) - 1;
- r = CAP_ADDR_TRANS_SET_GUARD (&cap_addr_trans, guard, gdepth);
+ r = VG_CAP_ADDR_TRANS_SET_GUARD (&cap_addr_trans, guard, gdepth);
if (guard_bits <= gdepth
&& (guard_bits + subpage_bits
- <= CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS))
+ <= VG_CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS))
{
assert (r);
- assert (CAP_ADDR_TRANS_GUARD_BITS (cap_addr_trans)
+ assert (VG_CAP_ADDR_TRANS_GUARD_BITS (cap_addr_trans)
== gdepth);
- assert (CAP_ADDR_TRANS_GUARD (cap_addr_trans) == guard);
+ assert (VG_CAP_ADDR_TRANS_GUARD (cap_addr_trans) == guard);
}
else
assert (! r);
diff --git a/libviengoos/t-addr.c b/libviengoos/t-addr.c
index a69eb83..2a58887 100644
--- a/libviengoos/t-addr.c
+++ b/libviengoos/t-addr.c
@@ -30,41 +30,41 @@ int output_debug = 0;
int
main (int argc, char *argv[])
{
- addr_t addr;
+ vg_addr_t addr;
int i, j;
- printf ("Checking ADDR... ");
- for (i = 0; i < ADDR_BITS; i ++)
+ printf ("Checking VG_ADDR... ");
+ for (i = 0; i < VG_ADDR_BITS; i ++)
{
- addr = ADDR (1ULL << i, ADDR_BITS - i);
+ addr = VG_ADDR (1ULL << i, VG_ADDR_BITS - i);
debug (1, "%llx/%d =? %llx/%d\n",
- 1ULL << i, ADDR_BITS - i,
- addr_prefix (addr), addr_depth (addr));
- assert (addr_depth (addr) == ADDR_BITS - i);
- assert (addr_prefix (addr) == 1ull << i);
+ 1ULL << i, VG_ADDR_BITS - i,
+ vg_addr_prefix (addr), vg_addr_depth (addr));
+ assert (vg_addr_depth (addr) == VG_ADDR_BITS - i);
+ assert (vg_addr_prefix (addr) == 1ull << i);
}
printf ("ok.\n");
- printf ("Checking addr_extend... ");
- addr = ADDR (0, 0);
- for (i = 1; i < ADDR_BITS; i ++)
+ printf ("Checking vg_addr_extend... ");
+ addr = VG_ADDR (0, 0);
+ for (i = 1; i < VG_ADDR_BITS; i ++)
{
- addr = addr_extend (addr, 1, 1);
- assert (addr_depth (addr) == i);
- assert (vg_msb64 (addr_prefix (addr)) == ADDR_BITS);
- assert (vg_lsb64 (addr_prefix (addr)) == ADDR_BITS - i + 1);
+ addr = vg_addr_extend (addr, 1, 1);
+ assert (vg_addr_depth (addr) == i);
+ assert (vg_msb64 (vg_addr_prefix (addr)) == VG_ADDR_BITS);
+ assert (vg_lsb64 (vg_addr_prefix (addr)) == VG_ADDR_BITS - i + 1);
}
printf ("ok.\n");
- printf ("Checking addr_extract... ");
- addr = ADDR (0, 0);
- for (i = 0; i < ADDR_BITS; i ++)
+ printf ("Checking vg_addr_extract... ");
+ addr = VG_ADDR (0, 0);
+ for (i = 0; i < VG_ADDR_BITS; i ++)
{
- addr = ADDR (((1ULL << i) - 1) << (ADDR_BITS - i), i);
+ addr = VG_ADDR (((1ULL << i) - 1) << (VG_ADDR_BITS - i), i);
for (j = 0; j <= i; j ++)
{
- l4_uint64_t idx = addr_extract (addr, j);
+ l4_uint64_t idx = vg_addr_extract (addr, j);
assert (idx == (1ULL << j) - 1);
}
}
diff --git a/libviengoos/t-rpc.c b/libviengoos/t-rpc.c
index 0f40fe9..c2d9614 100644
--- a/libviengoos/t-rpc.c
+++ b/libviengoos/t-rpc.c
@@ -53,14 +53,14 @@ main (int argc, char *argv[])
struct vg_message *msg;
-#define REPLY ADDR (0x1000, ADDR_BITS - 12)
- addr_t reply = REPLY;
+#define REPLY VG_ADDR (0x1000, VG_ADDR_BITS - 12)
+ vg_addr_t reply = REPLY;
msg = malloc (sizeof (*msg));
rpc_noargs_send_marshal (msg, REPLY);
err = rpc_noargs_send_unmarshal (msg, &reply);
assert (! err);
- assert (ADDR_EQ (reply, REPLY));
+ assert (VG_ADDR_EQ (reply, REPLY));
free (msg);
msg = malloc (sizeof (*msg));
@@ -79,7 +79,7 @@ main (int argc, char *argv[])
err = rpc_onein_send_unmarshal (msg, &arg_out, &reply);
assert (! err);
assert (arg_out == VALUE);
- assert (ADDR_EQ (reply, REPLY));
+ assert (VG_ADDR_EQ (reply, REPLY));
free (msg);
msg = malloc (sizeof (*msg));
@@ -92,7 +92,7 @@ main (int argc, char *argv[])
rpc_oneout_send_marshal (msg, REPLY);
err = rpc_oneout_send_unmarshal (msg, &reply);
assert (! err);
- assert (ADDR_EQ (reply, REPLY));
+ assert (VG_ADDR_EQ (reply, REPLY));
free (msg);
msg = malloc (sizeof (*msg));
@@ -120,7 +120,7 @@ main (int argc, char *argv[])
assert (foo_out.a == foo.a);
assert (foo_out.b == foo.b);
assert (p_out == true);
- assert (ADDR_EQ (reply, REPLY));
+ assert (VG_ADDR_EQ (reply, REPLY));
free (msg);
msg = malloc (sizeof (*msg));
@@ -133,7 +133,7 @@ main (int argc, char *argv[])
rpc_onlyout_send_marshal (msg, REPLY);
err = rpc_onlyout_send_unmarshal (msg, &reply);
assert (! err);
- assert (ADDR_EQ (reply, REPLY));
+ assert (VG_ADDR_EQ (reply, REPLY));
free (msg);
msg = malloc (sizeof (*msg));
@@ -155,7 +155,7 @@ main (int argc, char *argv[])
assert (! err);
assert (arg_out == arg);
assert (idx_out == 456789);
- assert (ADDR_EQ (reply, REPLY));
+ assert (VG_ADDR_EQ (reply, REPLY));
free (msg);
msg = malloc (sizeof (*msg));
@@ -170,12 +170,12 @@ main (int argc, char *argv[])
free (msg);
msg = malloc (sizeof (*msg));
- rpc_caps_send_marshal (msg, 54, ADDR (1, ADDR_BITS), foo, REPLY);
- addr_t addr;
+ rpc_caps_send_marshal (msg, 54, VG_ADDR (1, VG_ADDR_BITS), foo, REPLY);
+ vg_addr_t addr;
err = rpc_caps_send_unmarshal (msg, &i_out, &addr, &foo_out, &reply);
assert (! err);
assert (i_out == 54);
- assert (ADDR_EQ (addr, ADDR (1, ADDR_BITS)));
+ assert (VG_ADDR_EQ (addr, VG_ADDR (1, VG_ADDR_BITS)));
assert (foo_out.a == foo.a);
assert (foo_out.b == foo.b);
free (msg);
diff --git a/libviengoos/viengoos/activity.h b/libviengoos/viengoos/activity.h
index a180527..8f5375b 100644
--- a/libviengoos/viengoos/activity.h
+++ b/libviengoos/viengoos/activity.h
@@ -35,9 +35,9 @@ struct activity_memory_policy
uint16_t weight;
};
-#define ACTIVITY_MEMORY_POLICY(__amp_priority, __amp_weight) \
+#define VG_ACTIVITY_MEMORY_POLICY(__amp_priority, __amp_weight) \
(struct activity_memory_policy) { __amp_priority, __amp_weight }
-#define ACTIVITY_MEMORY_POLICY_VOID ACTIVITY_MEMORY_POLICY(0, 0)
+#define VG_ACTIVITY_MEMORY_POLICY_VOID VG_ACTIVITY_MEMORY_POLICY(0, 0)
struct activity_policy
{
@@ -158,11 +158,11 @@ struct activity_stats
uint32_t saved;
};
-#define ACTIVITY_POLICY(__ap_sibling_rel, __ap_child_rel, __ap_storage) \
+#define VG_ACTIVITY_POLICY(__ap_sibling_rel, __ap_child_rel, __ap_storage) \
(struct activity_policy) { __ap_sibling_rel, __ap_child_rel, __ap_storage }
-#define ACTIVITY_POLICY_VOID \
- ACTIVITY_POLICY(ACTIVITY_MEMORY_POLICY_VOID, \
- ACTIVITY_MEMORY_POLICY_VOID, \
+#define VG_ACTIVITY_POLICY_VOID \
+ VG_ACTIVITY_POLICY(VG_ACTIVITY_MEMORY_POLICY_VOID, \
+ VG_ACTIVITY_MEMORY_POLICY_VOID, \
0)
#define RPC_STUB_PREFIX rm
@@ -172,17 +172,17 @@ struct activity_stats
enum
{
- ACTIVITY_POLICY_CHILD_REL_PRIORITY_SET = 1 << 0,
- ACTIVITY_POLICY_CHILD_REL_WEIGHT_SET = 1 << 1,
- ACTIVITY_POLICY_SIBLING_REL_PRIORITY_SET = 1 << 2,
- ACTIVITY_POLICY_SIBLING_REL_WEIGHT_SET = 1 << 3,
- ACTIVITY_POLICY_STORAGE_SET = 1 << 4,
+ VG_ACTIVITY_POLICY_CHILD_REL_PRIORITY_SET = 1 << 0,
+ VG_ACTIVITY_POLICY_CHILD_REL_WEIGHT_SET = 1 << 1,
+ VG_ACTIVITY_POLICY_SIBLING_REL_PRIORITY_SET = 1 << 2,
+ VG_ACTIVITY_POLICY_SIBLING_REL_WEIGHT_SET = 1 << 3,
+ VG_ACTIVITY_POLICY_STORAGE_SET = 1 << 4,
- ACTIVITY_POLICY_CHILD_REL_SET = (ACTIVITY_POLICY_CHILD_REL_PRIORITY_SET
- | ACTIVITY_POLICY_CHILD_REL_WEIGHT_SET),
+ VG_ACTIVITY_POLICY_CHILD_REL_SET = (VG_ACTIVITY_POLICY_CHILD_REL_PRIORITY_SET
+ | VG_ACTIVITY_POLICY_CHILD_REL_WEIGHT_SET),
- ACTIVITY_POLICY_SIBLING_REL_SET = (ACTIVITY_POLICY_SIBLING_REL_PRIORITY_SET
- | ACTIVITY_POLICY_SIBLING_REL_WEIGHT_SET),
+ VG_ACTIVITY_POLICY_SIBLING_REL_SET = (VG_ACTIVITY_POLICY_SIBLING_REL_PRIORITY_SET
+ | VG_ACTIVITY_POLICY_SIBLING_REL_WEIGHT_SET),
};
/* Get ACTIVITY's policy and set according to FLAGS and IN. */
diff --git a/libviengoos/viengoos/addr-trans.h b/libviengoos/viengoos/addr-trans.h
index b48f132..e7ca995 100644
--- a/libviengoos/viengoos/addr-trans.h
+++ b/libviengoos/viengoos/addr-trans.h
@@ -30,11 +30,11 @@
how the page table walker translates bits when passing through this
capability. */
-#define CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS 22
-#define CAP_ADDR_TRANS_SUBPAGES_BITS 4
-#define CAP_ADDR_TRANS_GDEPTH_BITS 6
+#define VG_CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS 22
+#define VG_CAP_ADDR_TRANS_SUBPAGES_BITS 4
+#define VG_CAP_ADDR_TRANS_GDEPTH_BITS 6
-struct cap_addr_trans
+struct vg_cap_addr_trans
{
union
{
@@ -53,61 +53,61 @@ struct cap_addr_trans
only valid offset is 0) and 21 possible guard bits. If
SUBPAGES_LOG2 is 0, there are 256 subpages, 8 subpage bits and a
maximum of 21-8=15 guard bits. */
- uint32_t guard_subpage: CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS;
+ uint32_t guard_subpage: VG_CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS;
/* The log2 of the subpages. The size of a subpage is thus 2^(8 -
SUBPAGES_LOG2). Values of SUBPAGES_LOG2 other than 0 are only
- allowed for cap pages. */
- uint32_t subpages_log2: CAP_ADDR_TRANS_SUBPAGES_BITS;
+ allowed for vg_cap pages. */
+ uint32_t subpages_log2: VG_CAP_ADDR_TRANS_SUBPAGES_BITS;
/* Number of significant guard bits. The value of the GUARD is zero
extended if GDEPTH is greater than the number of available guard
bits. */
- uint32_t gdepth: CAP_ADDR_TRANS_GDEPTH_BITS;
+ uint32_t gdepth: VG_CAP_ADDR_TRANS_GDEPTH_BITS;
};
uint32_t raw;
};
};
-#define CAP_ADDR_TRANS_INIT { { .raw = 0 } }
-#define CAP_ADDR_TRANS_VOID (struct cap_addr_trans) { { .raw = 0 } }
+#define VG_CAP_ADDR_TRANS_INIT { { .raw = 0 } }
+#define VG_CAP_ADDR_TRANS_VOID (struct vg_cap_addr_trans) { { .raw = 0 } }
/* The log2 number of subpages. */
-#define CAP_ADDR_TRANS_SUBPAGES_LOG2(cap_addr_trans_) \
+#define VG_CAP_ADDR_TRANS_SUBPAGES_LOG2(cap_addr_trans_) \
((cap_addr_trans_).subpages_log2)
/* The number of subpages. */
-#define CAP_ADDR_TRANS_SUBPAGES(cap_addr_trans_) \
- (1 << CAP_ADDR_TRANS_SUBPAGES_LOG2((cap_addr_trans_)))
+#define VG_CAP_ADDR_TRANS_SUBPAGES(cap_addr_trans_) \
+ (1 << VG_CAP_ADDR_TRANS_SUBPAGES_LOG2((cap_addr_trans_)))
/* The designated subpage. */
-#define CAP_ADDR_TRANS_SUBPAGE(cap_addr_trans_) \
+#define VG_CAP_ADDR_TRANS_SUBPAGE(cap_addr_trans_) \
((cap_addr_trans_).guard_subpage \
- & (CAP_ADDR_TRANS_SUBPAGES ((cap_addr_trans_)) - 1))
+ & (VG_CAP_ADDR_TRANS_SUBPAGES ((cap_addr_trans_)) - 1))
/* The log2 of the size of the named subpage (in capability
units). */
-#define CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2(cap_addr_trans_) \
+#define VG_CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2(cap_addr_trans_) \
(8 - (cap_addr_trans_).subpages_log2)
/* The number of caps addressed by this capability. */
-#define CAP_ADDR_TRANS_SUBPAGE_SIZE(cap_addr_trans_) \
- (1 << CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 ((cap_addr_trans_)))
+#define VG_CAP_ADDR_TRANS_SUBPAGE_SIZE(cap_addr_trans_) \
+ (1 << VG_CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 ((cap_addr_trans_)))
/* The offset in capability units (with respect to the start of the
capability page) of the first capability in the designated
sub-page. */
-#define CAP_ADDR_TRANS_SUBPAGE_OFFSET(cap_addr_trans_) \
- (CAP_ADDR_TRANS_SUBPAGE ((cap_addr_trans_)) \
- * CAP_ADDR_TRANS_SUBPAGE_SIZE ((cap_addr_trans_)))
+#define VG_CAP_ADDR_TRANS_SUBPAGE_OFFSET(cap_addr_trans_) \
+ (VG_CAP_ADDR_TRANS_SUBPAGE ((cap_addr_trans_)) \
+ * VG_CAP_ADDR_TRANS_SUBPAGE_SIZE ((cap_addr_trans_)))
/* The number of guard bits. */
-#define CAP_ADDR_TRANS_GUARD_BITS(cap_addr_trans_) ((cap_addr_trans_).gdepth)
+#define VG_CAP_ADDR_TRANS_GUARD_BITS(cap_addr_trans_) ((cap_addr_trans_).gdepth)
/* The value of the guard. */
-#define CAP_ADDR_TRANS_GUARD(cap_addr_trans_) \
+#define VG_CAP_ADDR_TRANS_GUARD(cap_addr_trans_) \
((uint64_t) ((cap_addr_trans_).guard_subpage \
>> (cap_addr_trans_).subpages_log2))
-#define CATSGST_(test_, format, args...) \
+#define VG_CATSGST_(test_, format, args...) \
if (! (test_)) \
{ \
r_ = false; \
@@ -116,29 +116,29 @@ struct cap_addr_trans
/* Set CAP_ADDR_TRANS_P_'s guard and the subpage. Returns true on success
(parameters valid), false otherwise. */
-#define CAP_ADDR_TRANS_SET_GUARD_SUBPAGE(cap_addr_trans_p_, guard_, gdepth_, \
+#define VG_CAP_ADDR_TRANS_SET_GUARD_SUBPAGE(cap_addr_trans_p_, guard_, gdepth_, \
subpage_, subpages_) \
({ bool r_ = true; \
/* There must be at least 1 subpage. */ \
- CATSGST_ (((subpages_) > 0), \
+ VG_CATSGST_ (((subpages_) > 0), \
"subpages_ (%d) must be at least 1\n", (subpages_)); \
- CATSGST_ (((subpages_) & ((subpages_) - 1)) == 0, \
+ VG_CATSGST_ (((subpages_) & ((subpages_) - 1)) == 0, \
"SUBPAGES_ (%d) must be a power of 2\n", (subpages_)); \
int subpages_log2_ = vg_msb ((subpages_)) - 1; \
- CATSGST_ (subpages_log2_ <= 8, \
+ VG_CATSGST_ (subpages_log2_ <= 8, \
"maximum subpages is 256 (%d)\n", (subpages_)); \
- CATSGST_ (0 <= (subpage_) && (subpage_) < (subpages_), \
+ VG_CATSGST_ (0 <= (subpage_) && (subpage_) < (subpages_), \
"subpage (%d) must be between 0 and SUBPAGES_ (%d) - 1\n", \
(subpage_), (subpages_)); \
\
/* The number of required guard bits. */ \
int gbits_ = vg_msb64 ((guard_)); \
- CATSGST_ (gbits_ <= (gdepth_), \
+ VG_CATSGST_ (gbits_ <= (gdepth_), \
"Significant guard bits (%d) must be less than depth (%d)\n", \
gbits_, (gdepth_)); \
- CATSGST_ (gbits_ + subpages_log2_ <= CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS, \
+ VG_CATSGST_ (gbits_ + subpages_log2_ <= VG_CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS, \
"Significant guard bits (%d) plus subpage bits (%d) > %d\n", \
- gbits_, subpages_log2_, CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS); \
+ gbits_, subpages_log2_, VG_CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS); \
\
if (r_) \
{ \
@@ -152,38 +152,38 @@ struct cap_addr_trans
/* Set *CAP_ADDR_TRANS_P_'s guard. Returns true on success (parameters
valid), false otherwise. */
-#define CAP_ADDR_TRANS_SET_GUARD(cap_addr_trans_p_, guard_, gdepth_) \
- ({ int subpage_ = CAP_ADDR_TRANS_SUBPAGE (*(cap_addr_trans_p_)); \
- int subpages_ = CAP_ADDR_TRANS_SUBPAGES (*(cap_addr_trans_p_)); \
- CAP_ADDR_TRANS_SET_GUARD_SUBPAGE ((cap_addr_trans_p_), \
+#define VG_CAP_ADDR_TRANS_SET_GUARD(cap_addr_trans_p_, guard_, gdepth_) \
+ ({ int subpage_ = VG_CAP_ADDR_TRANS_SUBPAGE (*(cap_addr_trans_p_)); \
+ int subpages_ = VG_CAP_ADDR_TRANS_SUBPAGES (*(cap_addr_trans_p_)); \
+ VG_CAP_ADDR_TRANS_SET_GUARD_SUBPAGE ((cap_addr_trans_p_), \
(guard_), (gdepth_), \
(subpage_), (subpages_)); \
})
/* Set *CAP_ADDR_TRANS_P_'s subpage. Returns true on success (parameters
valid), false otherwise. */
-#define CAP_ADDR_TRANS_SET_SUBPAGE(cap_addr_trans_p_, subpage_, subpages_) \
- ({ int gdepth_ = CAP_ADDR_TRANS_GUARD_BITS (*(cap_addr_trans_p_)); \
- int guard_ = CAP_ADDR_TRANS_GUARD (*(cap_addr_trans_p_)); \
- CAP_ADDR_TRANS_SET_GUARD_SUBPAGE ((cap_addr_trans_p_), \
+#define VG_CAP_ADDR_TRANS_SET_SUBPAGE(cap_addr_trans_p_, subpage_, subpages_) \
+ ({ int gdepth_ = VG_CAP_ADDR_TRANS_GUARD_BITS (*(cap_addr_trans_p_)); \
+ int guard_ = VG_CAP_ADDR_TRANS_GUARD (*(cap_addr_trans_p_)); \
+ VG_CAP_ADDR_TRANS_SET_GUARD_SUBPAGE ((cap_addr_trans_p_), \
(guard_), (gdepth_), \
(subpage_), (subpages_)); \
})
/* Returns whether the capability address CAP_ADDR_TRANS is well-formed. */
-#define CAP_ADDR_TRANS_VALID(cap_addr_trans) \
+#define VG_CAP_ADDR_TRANS_VALID(vg_cap_addr_trans) \
({ bool r_ = true; \
- CATSGST_ (CAP_ADDR_TRANS_GUARD_BITS (cap_addr_trans) <= WORDSIZE, \
+ VG_CATSGST_ (VG_CAP_ADDR_TRANS_GUARD_BITS (vg_cap_addr_trans) <= WORDSIZE, \
"Invalid guard depth (%d)", \
- CAP_ADDR_TRANS_GUARD_BITS (cap_addr_trans)); \
- CATSGST_ (CAP_ADDR_TRANS_SUBPAGES_LOG2 (cap_addr_trans) <= 8, \
+ VG_CAP_ADDR_TRANS_GUARD_BITS (vg_cap_addr_trans)); \
+ VG_CATSGST_ (VG_CAP_ADDR_TRANS_SUBPAGES_LOG2 (vg_cap_addr_trans) <= 8, \
"Invalid number of subpages (%d)", \
- CAP_ADDR_TRANS_SUBPAGES (cap_addr_trans)); \
- CATSGST_ (vg_msb (CAP_ADDR_TRANS_GUARD (cap_addr_trans)) \
- <= CAP_ADDR_TRANS_GUARD_BITS (cap_addr_trans), \
+ VG_CAP_ADDR_TRANS_SUBPAGES (vg_cap_addr_trans)); \
+ VG_CATSGST_ (vg_msb (VG_CAP_ADDR_TRANS_GUARD (vg_cap_addr_trans)) \
+ <= VG_CAP_ADDR_TRANS_GUARD_BITS (vg_cap_addr_trans), \
"Significant guard bits (%d) exceeds guard depth (%d)", \
- vg_msb (CAP_ADDR_TRANS_GUARD (cap_addr_trans)), \
- CAP_ADDR_TRANS_GUARD_BITS (cap_addr_trans)); \
+ vg_msb (VG_CAP_ADDR_TRANS_GUARD (vg_cap_addr_trans)), \
+ VG_CAP_ADDR_TRANS_GUARD_BITS (vg_cap_addr_trans)); \
r_; \
})
diff --git a/libviengoos/viengoos/addr.h b/libviengoos/viengoos/addr.h
index 8fa59be..d3c9009 100644
--- a/libviengoos/viengoos/addr.h
+++ b/libviengoos/viengoos/addr.h
@@ -49,131 +49,131 @@ struct addr
{
uint64_t raw;
};
-#define ADDR_BITS 63
+#define VG_ADDR_BITS 63
/* Client-side capability handle. */
-typedef struct addr addr_t;
+typedef struct addr vg_addr_t;
-#define ADDR_FMT "%llx/%d"
-#define ADDR_PRINTF(addr_) addr_prefix ((addr_)), addr_depth ((addr_))
+#define VG_ADDR_FMT "%llx/%d"
+#define VG_ADDR_PRINTF(addr_) vg_addr_prefix ((addr_)), vg_addr_depth ((addr_))
/* Create an address given a prefix and a depth. */
-#define ADDR(prefix_, depth_) \
+#define VG_ADDR(prefix_, depth_) \
({ \
uint64_t p_ = (prefix_); \
uint64_t d_ = (depth_); \
- assert (0 <= d_ && d_ <= ADDR_BITS); \
- assert ((p_ & ((1 << (ADDR_BITS - d_)) - 1)) == 0); \
- assert (p_ < (1ULL << ADDR_BITS)); \
- (struct addr) { (p_ << 1ULL) | (1ULL << (ADDR_BITS - d_)) }; \
+ assert (0 <= d_ && d_ <= VG_ADDR_BITS); \
+ assert ((p_ & ((1 << (VG_ADDR_BITS - d_)) - 1)) == 0); \
+ assert (p_ < (1ULL << VG_ADDR_BITS)); \
+ (struct addr) { (p_ << 1ULL) | (1ULL << (VG_ADDR_BITS - d_)) }; \
})
/* Create an address given a prefix and a depth. Appropriate for use
as an initializer. */
-#define ADDR_INIT(prefix_, depth_) \
- { .raw = ((((prefix_) << 1) | 1) << (ADDR_BITS - (depth_))) }
+#define VG_ADDR_INIT(prefix_, depth_) \
+ { .raw = ((((prefix_) << 1) | 1) << (VG_ADDR_BITS - (depth_))) }
-#define ADDR_VOID ((struct addr) { 0ULL })
-#define ADDR_EQ(a, b) (a.raw == b.raw)
-#define ADDR_IS_VOID(a) (ADDR_EQ (a, ADDR_VOID))
+#define VG_ADDR_VOID ((struct addr) { 0ULL })
+#define VG_ADDR_EQ(a, b) (a.raw == b.raw)
+#define VG_ADDR_IS_VOID(a) (VG_ADDR_EQ (a, VG_ADDR_VOID))
/* Return ADDR_'s depth. */
static inline int
-addr_depth (addr_t addr)
+vg_addr_depth (vg_addr_t addr)
{
- return ADDR_BITS - (vg_lsb64 (addr.raw) - 1);
+ return VG_ADDR_BITS - (vg_lsb64 (addr.raw) - 1);
}
-/* Return ADDR's prefix. */
+/* Return VG_ADDR's prefix. */
static inline uint64_t
-addr_prefix (addr_t addr)
+vg_addr_prefix (vg_addr_t addr)
{
/* (Clear the boundary bit and shift right 1.) */
- return (addr.raw & ~(1ULL << (ADDR_BITS - addr_depth (addr)))) >> 1;
+ return (addr.raw & ~(1ULL << (VG_ADDR_BITS - vg_addr_depth (addr)))) >> 1;
}
-/* Extend the address ADDR by concatenating the lowest DEPTH bits of
+/* Extend the address VG_ADDR by concatenating the lowest DEPTH bits of
PREFIX. */
#if 0
-static inline addr_t
-addr_extend (addr_t addr, uint64_t prefix, int depth)
+static inline vg_addr_t
+vg_addr_extend (vg_addr_t addr, uint64_t prefix, int depth)
{
assertx (depth >= 0, "depth: %d", depth);
- assertx (addr_depth (addr) + depth <= ADDR_BITS,
- "addr: " ADDR_FMT "; depth: %d", ADDR_PRINTF (addr), depth);
+ assertx (vg_addr_depth (addr) + depth <= VG_ADDR_BITS,
+ "addr: " VG_ADDR_FMT "; depth: %d", VG_ADDR_PRINTF (addr), depth);
assertx (prefix < (1ULL << depth),
"prefix: %llx; depth: %lld", prefix, 1ULL << depth);
- return ADDR (addr_prefix (addr)
- | (prefix << (ADDR_BITS - addr_depth (addr) - depth)),
- addr_depth (addr) + depth);
+ return VG_ADDR (vg_addr_prefix (addr)
+ | (prefix << (VG_ADDR_BITS - vg_addr_depth (addr) - depth)),
+ vg_addr_depth (addr) + depth);
}
#else
-#define addr_extend(addr_, prefix_, depth_) \
+#define vg_addr_extend(addr_, prefix_, depth_) \
({ \
- addr_t a__ = (addr_); \
+ vg_addr_t a__ = (addr_); \
uint64_t p__ = (prefix_); \
int d__ = (depth_); \
assertx (d__ >= 0, "depth: %d", d__); \
- assertx (addr_depth ((a__)) + (d__) <= ADDR_BITS, \
- "addr: " ADDR_FMT "; depth: %d", ADDR_PRINTF (a__), d__); \
+ assertx (vg_addr_depth ((a__)) + (d__) <= VG_ADDR_BITS, \
+ "addr: " VG_ADDR_FMT "; depth: %d", VG_ADDR_PRINTF (a__), d__); \
assertx (p__ < (1ULL << d__), \
"prefix: %llx; depth: %lld", p__, 1ULL << d__); \
- ADDR (addr_prefix ((a__)) \
- | ((p__) << (ADDR_BITS - addr_depth ((a__)) - (d__))), \
- addr_depth ((a__)) + (d__)); \
+ VG_ADDR (vg_addr_prefix ((a__)) \
+ | ((p__) << (VG_ADDR_BITS - vg_addr_depth ((a__)) - (d__))), \
+ vg_addr_depth ((a__)) + (d__)); \
})
#endif
-/* Decrease the depth of ADDR by DEPTH. */
-static inline addr_t
-addr_chop (addr_t addr, int depth)
+/* Decrease the depth of VG_ADDR by DEPTH. */
+static inline vg_addr_t
+vg_addr_chop (vg_addr_t addr, int depth)
{
- int d = addr_depth (addr) - depth;
+ int d = vg_addr_depth (addr) - depth;
assert (d >= 0);
- return ADDR (addr_prefix (addr) & ~((1ULL << (ADDR_BITS - d)) - 1), d);
+ return VG_ADDR (vg_addr_prefix (addr) & ~((1ULL << (VG_ADDR_BITS - d)) - 1), d);
}
-/* Return the last WIDTH bits of address's ADDR prefix. */
+/* Return the last WIDTH bits of address's VG_ADDR prefix. */
static inline uint64_t
-addr_extract (addr_t addr, int width)
+vg_addr_extract (vg_addr_t addr, int width)
{
- assert (width <= addr_depth (addr));
+ assert (width <= vg_addr_depth (addr));
- return (addr_prefix (addr) >> (ADDR_BITS - addr_depth (addr)))
+ return (vg_addr_prefix (addr) >> (VG_ADDR_BITS - vg_addr_depth (addr)))
& ((1ULL << width) - 1);
}
/* Convert an address to a pointer. The address must name an object
mapped in the machine data instruction accessible part of the
address space. */
-#define ADDR_TO_PTR(addr_) \
+#define VG_ADDR_TO_PTR(addr_) \
({ \
- assert (addr_prefix ((addr_)) < ((uintptr_t) -1)); \
- assert (addr_depth ((addr_)) == ADDR_BITS); \
- (void *) (uintptr_t) addr_prefix ((addr_)); \
+ assert (vg_addr_prefix ((addr_)) < ((uintptr_t) -1)); \
+ assert (vg_addr_depth ((addr_)) == VG_ADDR_BITS); \
+ (void *) (uintptr_t) vg_addr_prefix ((addr_)); \
})
/* Convert a pointer to an address. */
-#define PTR_TO_ADDR(ptr_) \
- (ADDR ((uintptr_t) (ptr_), ADDR_BITS))
+#define VG_PTR_TO_ADDR(ptr_) \
+ (VG_ADDR ((uintptr_t) (ptr_), VG_ADDR_BITS))
/* Return the address of the page that would contain pointer PTR_. */
-#define PTR_TO_PAGE(ptr_) \
- addr_chop (ADDR ((uintptr_t) (ptr_), ADDR_BITS), PAGESIZE_LOG2)
+#define VG_PTR_TO_PAGE(ptr_) \
+ vg_addr_chop (VG_ADDR ((uintptr_t) (ptr_), VG_ADDR_BITS), PAGESIZE_LOG2)
-static inline addr_t
-addr_add (addr_t addr, uint64_t count)
+static inline vg_addr_t
+vg_addr_add (vg_addr_t addr, uint64_t count)
{
- int w = ADDR_BITS - addr_depth (addr);
+ int w = VG_ADDR_BITS - vg_addr_depth (addr);
- return ADDR (addr_prefix (addr) + (count << w),
- addr_depth (addr));
+ return VG_ADDR (vg_addr_prefix (addr) + (count << w),
+ vg_addr_depth (addr));
}
-static inline addr_t
-addr_sub (addr_t addr, uint64_t count)
+static inline vg_addr_t
+vg_addr_sub (vg_addr_t addr, uint64_t count)
{
- return addr_add (addr, - count);
+ return vg_addr_add (addr, - count);
}
#endif
diff --git a/libviengoos/viengoos/cap.h b/libviengoos/viengoos/cap.h
index c15d66f..32e475a 100644
--- a/libviengoos/viengoos/cap.h
+++ b/libviengoos/viengoos/cap.h
@@ -1,4 +1,4 @@
-/* cap.h - Capability definitions.
+/* vg_cap.h - Capability definitions.
Copyright (C) 2007, 2008 Free Software Foundation, Inc.
Written by Neal H. Walfield <neal@gnu.org>.
@@ -37,82 +37,82 @@
used to control how the designated object should be managed. */
/* The types of objects designated by capabilities. */
-enum cap_type
+enum vg_cap_type
{
-#define CAP_TYPE_MIN cap_void
- cap_void,
- cap_page,
- cap_rpage,
- cap_cappage,
- cap_rcappage,
- cap_folio,
- cap_activity,
- cap_activity_control,
- cap_thread,
- cap_messenger,
- cap_rmessenger,
- cap_type_count,
-#define CAP_TYPE_MAX (cap_type_count - 1)
+#define VG_CAP_TYPE_MIN vg_cap_void
+ vg_cap_void,
+ vg_cap_page,
+ vg_cap_rpage,
+ vg_cap_cappage,
+ vg_cap_rcappage,
+ vg_cap_folio,
+ vg_cap_activity,
+ vg_cap_activity_control,
+ vg_cap_thread,
+ vg_cap_messenger,
+ vg_cap_rmessenger,
+ vg_cap_type_count,
+#define VG_CAP_TYPE_MAX (vg_cap_type_count - 1)
};
static inline const char *
-cap_type_string (enum cap_type type)
+vg_cap_type_string (enum vg_cap_type type)
{
switch (type)
{
- case cap_void:
+ case vg_cap_void:
return "void";
- case cap_page:
+ case vg_cap_page:
return "page";
- case cap_rpage:
+ case vg_cap_rpage:
return "rpage";
- case cap_cappage:
+ case vg_cap_cappage:
return "cappage";
- case cap_rcappage:
+ case vg_cap_rcappage:
return "rcappage";
- case cap_folio:
+ case vg_cap_folio:
return "folio";
- case cap_activity:
+ case vg_cap_activity:
return "activity";
- case cap_activity_control:
+ case vg_cap_activity_control:
return "activity_control";
- case cap_thread:
+ case vg_cap_thread:
return "thread";
- case cap_messenger:
+ case vg_cap_messenger:
return "messenger";
- case cap_rmessenger:
+ case vg_cap_rmessenger:
return "rmessenger";
default:
- return "unknown cap type";
+ return "unknown vg_cap type";
};
}
/* Return whether two types are compatible in the sense that two caps
with the given types can designate the same object. */
static inline bool
-cap_types_compatible (enum cap_type a, enum cap_type b)
+vg_cap_types_compatible (enum vg_cap_type a, enum vg_cap_type b)
{
if (a == b)
return true;
- if (a == cap_page && b == cap_rpage)
+ if (a == vg_cap_page && b == vg_cap_rpage)
return true;
- if (a == cap_rpage && b == cap_page)
+ if (a == vg_cap_rpage && b == vg_cap_page)
return true;
- if (a == cap_cappage && b == cap_rcappage)
+ if (a == vg_cap_cappage && b == vg_cap_rcappage)
return true;
- if (a == cap_rcappage && b == cap_cappage)
+ if (a == vg_cap_rcappage && b == vg_cap_cappage)
return true;
- if (a == cap_activity && b == cap_activity_control)
+ if (a == vg_cap_activity && b == vg_cap_activity_control)
return true;
- if (a == cap_activity_control && b == cap_activity)
+ if (a == vg_cap_activity_control && b == vg_cap_activity)
return true;
- if (a == cap_messenger && b == cap_rmessenger)
+ if (a == vg_cap_messenger && b == vg_cap_rmessenger)
return true;
- if (a == cap_rmessenger && b == cap_messenger)
+ if (a == vg_cap_rmessenger && b == vg_cap_messenger)
return true;
return false;
@@ -120,14 +120,14 @@ cap_types_compatible (enum cap_type a, enum cap_type b)
/* Returns weather TYPE corresponds to a weak type. */
static inline bool
-cap_type_weak_p (enum cap_type type)
+vg_cap_type_weak_p (enum vg_cap_type type)
{
switch (type)
{
- case cap_rpage:
- case cap_rcappage:
- case cap_activity:
- case cap_rmessenger:
+ case vg_cap_rpage:
+ case vg_cap_rcappage:
+ case vg_cap_activity:
+ case vg_cap_rmessenger:
return true;
default:
@@ -137,54 +137,54 @@ cap_type_weak_p (enum cap_type type)
/* Returns the weakened type corresponding to TYPE. If type is
already a weak type, returns TYPE. */
-static inline enum cap_type
-cap_type_weaken (enum cap_type type)
+static inline enum vg_cap_type
+vg_cap_type_weaken (enum vg_cap_type type)
{
switch (type)
{
- case cap_page:
- case cap_rpage:
- return cap_rpage;
+ case vg_cap_page:
+ case vg_cap_rpage:
+ return vg_cap_rpage;
- case cap_cappage:
- case cap_rcappage:
- return cap_rcappage;
+ case vg_cap_cappage:
+ case vg_cap_rcappage:
+ return vg_cap_rcappage;
- case cap_activity_control:
- case cap_activity:
- return cap_activity;
+ case vg_cap_activity_control:
+ case vg_cap_activity:
+ return vg_cap_activity;
- case cap_messenger:
- case cap_rmessenger:
- return cap_rmessenger;
+ case vg_cap_messenger:
+ case vg_cap_rmessenger:
+ return vg_cap_rmessenger;
default:
- return cap_void;
+ return vg_cap_void;
}
}
/* Returns the strong type corresponding to TYPE. If type is already
a strong type, returns TYPE. */
-static inline enum cap_type
-cap_type_strengthen (enum cap_type type)
+static inline enum vg_cap_type
+vg_cap_type_strengthen (enum vg_cap_type type)
{
switch (type)
{
- case cap_page:
- case cap_rpage:
- return cap_page;
+ case vg_cap_page:
+ case vg_cap_rpage:
+ return vg_cap_page;
- case cap_cappage:
- case cap_rcappage:
- return cap_cappage;
+ case vg_cap_cappage:
+ case vg_cap_rcappage:
+ return vg_cap_cappage;
- case cap_activity_control:
- case cap_activity:
- return cap_activity_control;
+ case vg_cap_activity_control:
+ case vg_cap_activity:
+ return vg_cap_activity_control;
- case cap_messenger:
- case cap_rmessenger:
- return cap_messenger;
+ case vg_cap_messenger:
+ case vg_cap_rmessenger:
+ return vg_cap_messenger;
default:
return type;
@@ -195,11 +195,11 @@ cap_type_strengthen (enum cap_type type)
/* The object priority is a signed 7-bit number (-64 -> 63). A lower
numeric value corresponds to a lower priority. */
-#define OBJECT_PRIORITY_BITS 7
-#define OBJECT_PRIORITY_LEVELS (1 << OBJECT_PRIORITY_BITS)
-#define OBJECT_PRIORITY_MIN (-(1 << (OBJECT_PRIORITY_BITS - 1)))
-#define OBJECT_PRIORITY_DEFAULT (0)
-#define OBJECT_PRIORITY_MAX ((1 << (OBJECT_PRIORITY_BITS - 1)) - 1)
+#define VG_OBJECT_PRIORITY_BITS 7
+#define VG_OBJECT_PRIORITY_LEVELS (1 << VG_OBJECT_PRIORITY_BITS)
+#define VG_OBJECT_PRIORITY_MIN (-(1 << (VG_OBJECT_PRIORITY_BITS - 1)))
+#define VG_OBJECT_PRIORITY_DEFAULT (0)
+#define VG_OBJECT_PRIORITY_MAX ((1 << (VG_OBJECT_PRIORITY_BITS - 1)) - 1)
struct object_policy
{
@@ -216,93 +216,93 @@ struct object_policy
eviction. When a memory object is to be evicted, we select
the object with the lowest priority (higher value = lower
priority). */
- int8_t priority : OBJECT_PRIORITY_BITS;
+ int8_t priority : VG_OBJECT_PRIORITY_BITS;
};
uint8_t raw;
};
};
-#define OBJECT_POLICY_INIT { { raw: 0 } }
-#define OBJECT_POLICY(__op_discardable, __op_priority) \
+#define VG_OBJECT_POLICY_INIT { { raw: 0 } }
+#define VG_OBJECT_POLICY(__op_discardable, __op_priority) \
(struct object_policy) { { { (__op_discardable), (__op_priority) } } }
/* The default object policy: not discardable, managed by LRU. */
-#define OBJECT_POLICY_VOID \
- OBJECT_POLICY (false, OBJECT_PRIORITY_DEFAULT)
-/* Synonym for OBJECT_POLICY_VOID. */
-#define OBJECT_POLICY_DEFAULT OBJECT_POLICY_VOID
+#define VG_OBJECT_POLICY_VOID \
+ VG_OBJECT_POLICY (false, VG_OBJECT_PRIORITY_DEFAULT)
+/* Synonym for VG_OBJECT_POLICY_VOID. */
+#define VG_OBJECT_POLICY_DEFAULT VG_OBJECT_POLICY_VOID
/* Capability properties. */
-struct cap_properties
+struct vg_cap_properties
{
struct object_policy policy;
- struct cap_addr_trans addr_trans;
+ struct vg_cap_addr_trans addr_trans;
};
-#define CAP_PROPERTIES_INIT \
- { OBJECT_POLICY_INIT, CAP_ADDR_TRANS_INIT }
-#define CAP_PROPERTIES(__op_object_policy, __op_addr_trans) \
- (struct cap_properties) { __op_object_policy, __op_addr_trans }
-#define CAP_PROPERTIES_VOID \
- CAP_PROPERTIES (OBJECT_POLICY_INIT, CAP_ADDR_TRANS_INIT)
-#define CAP_PROPERTIES_DEFAULT CAP_PROPERTIES_VOID
+#define VG_CAP_PROPERTIES_INIT \
+ { VG_OBJECT_POLICY_INIT, VG_CAP_ADDR_TRANS_INIT }
+#define VG_CAP_PROPERTIES(__op_object_policy, __op_addr_trans) \
+ (struct vg_cap_properties) { __op_object_policy, __op_addr_trans }
+#define VG_CAP_PROPERTIES_VOID \
+ VG_CAP_PROPERTIES (VG_OBJECT_POLICY_INIT, VG_CAP_ADDR_TRANS_INIT)
+#define VG_CAP_PROPERTIES_DEFAULT VG_CAP_PROPERTIES_VOID
/* Capability representation. */
#ifdef RM_INTERN
/* An OID corresponds to a page on a volume. Only the least 54 bits
are significant. */
-typedef uint64_t oid_t;
-#define OID_FMT "0x%llx"
-#define OID_PRINTF(__op_oid) ((oid_t) (__op_oid))
+typedef uint64_t vg_oid_t;
+#define VG_OID_FMT "0x%llx"
+#define VG_OID_PRINTF(__op_oid) ((vg_oid_t) (__op_oid))
#endif
-#define CAP_VERSION_BITS 20
-#define CAP_TYPE_BITS 6
+#define VG_CAP_VERSION_BITS 20
+#define VG_CAP_TYPE_BITS 6
-struct cap
+struct vg_cap
{
#ifdef RM_INTERN
/* For a description of how versioning works, refer to the comment
titled "Object versioning" in object.h. */
- uint32_t version : CAP_VERSION_BITS;
+ uint32_t version : VG_CAP_VERSION_BITS;
/* Whether the capability is weak. */
uint32_t weak_p : 1;
/* Whether the designated object may be discarded. */
uint32_t discardable : 1;
/* The designated object's priority. */
- int32_t priority : OBJECT_PRIORITY_BITS;
+ int32_t priority : VG_OBJECT_PRIORITY_BITS;
- struct cap_addr_trans addr_trans;
+ struct vg_cap_addr_trans addr_trans;
- uint64_t type : CAP_TYPE_BITS;
+ uint64_t type : VG_CAP_TYPE_BITS;
/* If the capability designates an object, the object id. */
- uint64_t oid : 64 - CAP_TYPE_BITS;
+ uint64_t oid : 64 - VG_CAP_TYPE_BITS;
#else
/* The shadow object (only for cappages and folios). */
struct object *shadow;
uint32_t discardable : 1;
- int32_t priority : OBJECT_PRIORITY_BITS;
+ int32_t priority : VG_OBJECT_PRIORITY_BITS;
- uint32_t type : CAP_TYPE_BITS;
+ uint32_t type : VG_CAP_TYPE_BITS;
- uint32_t pad0 : 32 - 1 - OBJECT_PRIORITY_BITS - CAP_TYPE_BITS;
+ uint32_t pad0 : 32 - 1 - VG_OBJECT_PRIORITY_BITS - VG_CAP_TYPE_BITS;
/* This capability's address description. */
- struct cap_addr_trans addr_trans;
+ struct vg_cap_addr_trans addr_trans;
#endif
};
-#define CAP_VOID ((struct cap) { .type = cap_void })
+#define VG_CAP_VOID ((struct vg_cap) { .type = vg_cap_void })
/* Return CAP's policy. */
-#define CAP_POLICY_GET(__cpg_cap) \
- OBJECT_POLICY ((__cpg_cap).discardable, (__cpg_cap).priority)
+#define VG_CAP_POLICY_GET(__cpg_cap) \
+ VG_OBJECT_POLICY ((__cpg_cap).discardable, (__cpg_cap).priority)
/* Set CAP's policy to POLICY. */
-#define CAP_POLICY_SET(__cps_cap, __cps_policy) \
+#define VG_CAP_POLICY_SET(__cps_cap, __cps_policy) \
do \
{ \
(__cps_cap)->discardable = (__cps_policy).discardable; \
@@ -311,14 +311,14 @@ struct cap
while (0)
/* Return CAP's properties. */
-#define CAP_PROPERTIES_GET(__cpg_cap) \
- CAP_PROPERTIES (CAP_POLICY_GET (__cpg_cap), \
+#define VG_CAP_PROPERTIES_GET(__cpg_cap) \
+ VG_CAP_PROPERTIES (VG_CAP_POLICY_GET (__cpg_cap), \
(__cpg_cap).addr_trans)
/* Set *CAP's properties to PROPERTIES. */
-#define CAP_PROPERTIES_SET(__cps_cap, __cps_properties) \
+#define VG_CAP_PROPERTIES_SET(__cps_cap, __cps_properties) \
do \
{ \
- CAP_POLICY_SET (__cps_cap, (__cps_properties).policy); \
+ VG_CAP_POLICY_SET (__cps_cap, (__cps_properties).policy); \
(__cps_cap)->addr_trans = (__cps_properties).addr_trans; \
} \
while (0)
@@ -326,55 +326,55 @@ struct cap
/* Convenience macros for printing capabilities. */
#ifdef RM_INTERN
-#define CAP_FMT "{ " OID_FMT ".%d:%s %llx/%d; %d/%d }"
-#define CAP_PRINTF(cap) \
- OID_PRINTF ((cap)->oid), (cap)->version, cap_type_string ((cap)->type), \
- CAP_GUARD ((cap)), CAP_GUARD_BITS ((cap)), \
- CAP_SUBPAGE ((cap)), CAP_SUBPAGES ((cap))
+#define VG_CAP_FMT "{ " VG_OID_FMT ".%d:%s %llx/%d; %d/%d }"
+#define VG_CAP_PRINTF(vg_cap) \
+ VG_OID_PRINTF ((vg_cap)->oid), (vg_cap)->version, vg_cap_type_string ((vg_cap)->type), \
+ VG_CAP_GUARD ((vg_cap)), VG_CAP_GUARD_BITS ((vg_cap)), \
+ VG_CAP_SUBPAGE ((vg_cap)), VG_CAP_SUBPAGES ((vg_cap))
#else
-#define CAP_FMT "{ %s %llx/%d; %d/%d }"
-#define CAP_PRINTF(cap) \
- cap_type_string ((cap)->type), \
- CAP_GUARD ((cap)), CAP_GUARD_BITS ((cap)), \
- CAP_SUBPAGE ((cap)), CAP_SUBPAGES ((cap))
+#define VG_CAP_FMT "{ %s %llx/%d; %d/%d }"
+#define VG_CAP_PRINTF(vg_cap) \
+ vg_cap_type_string ((vg_cap)->type), \
+ VG_CAP_GUARD ((vg_cap)), VG_CAP_GUARD_BITS ((vg_cap)), \
+ VG_CAP_SUBPAGE ((vg_cap)), VG_CAP_SUBPAGES ((vg_cap))
#endif
/* Accessors corresponding to the CAP_ADDR_TRANS macros. */
-#define CAP_SUBPAGES_LOG2(cap_) \
- CAP_ADDR_TRANS_SUBPAGES_LOG2((cap_)->addr_trans)
-#define CAP_SUBPAGES(cap_) CAP_ADDR_TRANS_SUBPAGES ((cap_)->addr_trans)
-#define CAP_SUBPAGE(cap_) CAP_ADDR_TRANS_SUBPAGE((cap_)->addr_trans)
-#define CAP_SUBPAGE_SIZE_LOG2(cap_) \
- CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 ((cap_)->addr_trans)
-#define CAP_SUBPAGE_SIZE(cap_) \
- CAP_ADDR_TRANS_SUBPAGE_SIZE ((cap_)->addr_trans)
-#define CAP_SUBPAGE_OFFSET(cap_) \
- CAP_ADDR_TRANS_SUBPAGE_OFFSET((cap_)->addr_trans)
-#define CAP_GUARD_BITS(cap_) CAP_ADDR_TRANS_GUARD_BITS((cap_)->addr_trans)
-#define CAP_GUARD(cap_) CAP_ADDR_TRANS_GUARD((cap_)->addr_trans)
+#define VG_CAP_SUBPAGES_LOG2(cap_) \
+ VG_CAP_ADDR_TRANS_SUBPAGES_LOG2((cap_)->addr_trans)
+#define VG_CAP_SUBPAGES(cap_) VG_CAP_ADDR_TRANS_SUBPAGES ((cap_)->addr_trans)
+#define VG_CAP_SUBPAGE(cap_) VG_CAP_ADDR_TRANS_SUBPAGE((cap_)->addr_trans)
+#define VG_CAP_SUBPAGE_SIZE_LOG2(cap_) \
+ VG_CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 ((cap_)->addr_trans)
+#define VG_CAP_SUBPAGE_SIZE(cap_) \
+ VG_CAP_ADDR_TRANS_SUBPAGE_SIZE ((cap_)->addr_trans)
+#define VG_CAP_SUBPAGE_OFFSET(cap_) \
+ VG_CAP_ADDR_TRANS_SUBPAGE_OFFSET((cap_)->addr_trans)
+#define VG_CAP_GUARD_BITS(cap_) VG_CAP_ADDR_TRANS_GUARD_BITS((cap_)->addr_trans)
+#define VG_CAP_GUARD(cap_) VG_CAP_ADDR_TRANS_GUARD((cap_)->addr_trans)
/* NB: Only updates the shadow guard; NOT the capability. If the
- latter behavior is desired, use cap_copy_x instead. */
-#define CAP_SET_GUARD_SUBPAGE(cap_, guard_, gdepth_, subpage_, subpages_) \
+ latter behavior is desired, use vg_cap_copy_x instead. */
+#define VG_CAP_SET_GUARD_SUBPAGE(cap_, guard_, gdepth_, subpage_, subpages_) \
({ bool r_ = true; \
if ((subpages_) != 1 \
- && ! ((cap_)->type == cap_cappage || (cap_)->type == cap_rcappage)) \
+ && ! ((cap_)->type == vg_cap_cappage || (cap_)->type == vg_cap_rcappage)) \
{ \
debug (1, "Subpages are only allow for cappages."); \
r_ = false; \
} \
if (r_) \
- r_ = CAP_ADDR_TRANS_SET_GUARD_SUBPAGE (&(cap_)->addr_trans, \
+ r_ = VG_CAP_ADDR_TRANS_SET_GUARD_SUBPAGE (&(cap_)->addr_trans, \
(guard_), (gdepth_), \
(subpage_), (subpages_)); \
r_; \
})
-#define CAP_SET_GUARD(cap_, guard_, gdepth_) \
- CAP_SET_GUARD_SUBPAGE ((cap_), (guard_), (gdepth_), \
- CAP_SUBPAGE ((cap_)), CAP_SUBPAGES ((cap_)))
-#define CAP_SET_SUBPAGE(cap_, subpage_, subpages_) \
- CAP_SET_GUARD_SUBPAGE ((cap_), CAP_GUARD (cap_), CAP_GUARD_BITS (cap_), \
+#define VG_CAP_SET_GUARD(cap_, guard_, gdepth_) \
+ VG_CAP_SET_GUARD_SUBPAGE ((cap_), (guard_), (gdepth_), \
+ VG_CAP_SUBPAGE ((cap_)), VG_CAP_SUBPAGES ((cap_)))
+#define VG_CAP_SET_SUBPAGE(cap_, subpage_, subpages_) \
+ VG_CAP_SET_GUARD_SUBPAGE ((cap_), VG_CAP_GUARD (cap_), VG_CAP_GUARD_BITS (cap_), \
(subpage_), (subpages_))
/* Capability-related methods. */
@@ -401,65 +401,65 @@ enum
{
/* Use subpage in CAP_ADDR_TRANS (must be a subset of subpage in
SOURCE). */
- CAP_COPY_COPY_ADDR_TRANS_SUBPAGE = 1 << 0,
+ VG_CAP_COPY_COPY_ADDR_TRANS_SUBPAGE = 1 << 0,
/* Use guard in TARGET, not the guard in CAP_ADDR_TRANS. */
- CAP_COPY_COPY_ADDR_TRANS_GUARD = 1 << 1,
+ VG_CAP_COPY_COPY_ADDR_TRANS_GUARD = 1 << 1,
/* Use guard in SOURCE. */
- CAP_COPY_COPY_SOURCE_GUARD = 1 << 2,
+ VG_CAP_COPY_COPY_SOURCE_GUARD = 1 << 2,
/* When copying the capability copies a weakened reference. */
- CAP_COPY_WEAKEN = 1 << 3,
+ VG_CAP_COPY_WEAKEN = 1 << 3,
/* Set the discardable bit on the capability. */
- CAP_COPY_DISCARDABLE_SET = 1 << 4,
+ VG_CAP_COPY_DISCARDABLE_SET = 1 << 4,
/* Set the priority of the object. */
- CAP_COPY_PRIORITY_SET = 1 << 5,
+ VG_CAP_COPY_PRIORITY_SET = 1 << 5,
};
-/* Copy the capability in capability slot SOURCE to the slot at ADDR
- in the object OBJECT. If OBJECT is ADDR_VOID, then the calling
+/* Copy the capability in capability slot SOURCE to the slot at VG_ADDR
+ in the object OBJECT. If OBJECT is VG_ADDR_VOID, then the calling
thread's address space root is used.
By default, preserves SOURCE's subpage specification and copies
TARGET's guard and policy.
If CAP_COPY_COPY_SUBPAGE is set, then uses the subpage
- specification in CAP_PROPERTIES. If CAP_COPY_COPY_ADDR_TRANS_GUARD
- is set, uses the guard description in CAP_PROPERTIES.
+ specification in VG_CAP_PROPERTIES. If VG_CAP_COPY_COPY_ADDR_TRANS_GUARD
+ is set, uses the guard description in VG_CAP_PROPERTIES.
- If CAP_COPY_COPY_SOURCE_GUARD is set, uses the guard description in
+ If VG_CAP_COPY_COPY_SOURCE_GUARD is set, uses the guard description in
source. Otherwise, preserves the guard in TARGET.
- If CAP_COPY_WEAKEN is set, saves a weakened version of SOURCE
- (e.g., if SOURCE's type is cap_page, a cap_rpage is saved).
+ If VG_CAP_COPY_WEAKEN is set, saves a weakened version of SOURCE
+ (e.g., if SOURCE's type is vg_cap_page, a vg_cap_rpage is saved).
- If CAP_COPY_DISCARDABLE_SET is set, then sets the discardable bit
+ If VG_CAP_COPY_DISCARDABLE_SET is set, then sets the discardable bit
based on the value in PROPERTIES. Otherwise, copies SOURCE's
value.
- If CAP_COPY_PRIORITY_SET is set, then sets the priority based on
+ If VG_CAP_COPY_PRIORITY_SET is set, then sets the priority based on
the value in properties. Otherwise, copies SOURCE's value. */
RPC(cap_copy, 5, 0, 0,
- /* cap_t activity, cap_t object, */ addr_t, addr,
- cap_t, source_object, addr_t, source_addr,
- uintptr_t, flags, struct cap_properties, properties)
+ /* cap_t activity, cap_t object, */ vg_addr_t, addr,
+ cap_t, source_object, vg_addr_t, source_addr,
+ uintptr_t, flags, struct vg_cap_properties, properties)
-/* Overwrite the capability slot at ADDR in the object OBJECT with a
+/* Overwrite the capability slot at VG_ADDR in the object OBJECT with a
void capability. */
RPC(cap_rubout, 1, 0, 0,
- /* cap_t activity, cap_t object, */ addr_t, addr)
+ /* cap_t activity, cap_t object, */ vg_addr_t, addr)
-/* Returns the public bits of the capability at address ADDR in OBJECT
- in TYPE and CAP_PROPERTIES. */
+/* Returns the public bits of the capability at address VG_ADDR in OBJECT
+ in TYPE and VG_CAP_PROPERTIES. */
RPC(cap_read, 1, 2, 0,
- /* cap_t activity, cap_t object, */ addr_t, addr,
+ /* cap_t activity, cap_t object, */ vg_addr_t, addr,
/* Out: */
- uintptr_t, type, struct cap_properties, properties)
+ uintptr_t, type, struct vg_cap_properties, properties)
-/* Clear the discarded bit of the object at ADDR in object OBJECT. */
+/* Clear the discarded bit of the object at VG_ADDR in object OBJECT. */
RPC(object_discarded_clear, 1, 0, 0,
- /* cap_t activity, cap_t object, */ addr_t, addr)
+ /* cap_t activity, cap_t object, */ vg_addr_t, addr)
/* If the object designated by OBJECT is in memory, discard it.
OBJECT must have write authority. This does not set the object's
@@ -480,7 +480,7 @@ enum
whether the object has been modified since the last time it the
dirty bit was cleared.) */
RPC (object_status, 1, 1, 0,
- /* addr_t activity, addr_t object, */ bool, clear,
+ /* vg_addr_t activity, vg_addr_t object, */ bool, clear,
uintptr_t, status)
/* Returns the object's return code in RETURN_CODE on object
@@ -510,12 +510,12 @@ RPC (object_name, 1, 0, 0,
/* The number of capabilities per page. */
enum
{
- CAPPAGE_SLOTS = PAGESIZE / 16,
+ VG_CAPPAGE_SLOTS = PAGESIZE / 16,
};
/* The log2 of the number of capabilities per page. */
enum
{
- CAPPAGE_SLOTS_LOG2 = PAGESIZE_LOG2 - 4,
+ VG_CAPPAGE_SLOTS_LOG2 = PAGESIZE_LOG2 - 4,
};
struct object
@@ -523,69 +523,69 @@ struct object
union
{
char data[PAGESIZE];
- struct cap caps[CAPPAGE_SLOTS];
+ struct vg_cap caps[VG_CAPPAGE_SLOTS];
};
};
#ifdef RM_INTERN
typedef struct activity *activity_t;
#else
-typedef addr_t activity_t;
+typedef vg_addr_t activity_t;
#endif
#ifndef RM_INTERN
-/* Return the address of cap CAP's shadow object. */
+/* Return the address of vg_cap CAP's shadow object. */
static inline void *
-cap_get_shadow (const struct cap *cap)
+vg_cap_get_shadow (const struct vg_cap *vg_cap)
{
- return cap->shadow;
+ return vg_cap->shadow;
}
/* Set CAP's shadow object to SHADOW. */
static inline void
-cap_set_shadow (struct cap *cap, void *shadow)
+vg_cap_set_shadow (struct vg_cap *vg_cap, void *shadow)
{
- cap->shadow = shadow;
+ vg_cap->shadow = shadow;
}
#endif
-/* Given cap CAP, return the corresponding object, or NULL, if there
+/* Given vg_cap CAP, return the corresponding object, or NULL, if there
is none. */
#ifdef RM_INTERN
-extern struct object *cap_to_object (activity_t activity, struct cap *cap);
+extern struct object *vg_cap_to_object (activity_t activity, struct vg_cap *vg_cap);
#else
static inline struct object *
-cap_to_object (activity_t activity, struct cap *cap)
+vg_cap_to_object (activity_t activity, struct vg_cap *vg_cap)
{
- return cap_get_shadow (cap);
+ return vg_cap_get_shadow (vg_cap);
}
#endif
-/* Wrapper for the cap_copy method. Also updates shadow
+/* Wrapper for the vg_cap_copy method. Also updates shadow
capabilities. */
static inline bool
-cap_copy_x (activity_t activity,
- addr_t target_address_space, struct cap *target, addr_t target_addr,
- addr_t source_address_space, struct cap source, addr_t source_addr,
- int flags, struct cap_properties properties)
+vg_cap_copy_x (activity_t activity,
+ vg_addr_t target_address_space, struct vg_cap *target, vg_addr_t target_addr,
+ vg_addr_t source_address_space, struct vg_cap source, vg_addr_t source_addr,
+ int flags, struct vg_cap_properties properties)
{
/* By default, we preserve SOURCE's subpage specification. */
- int subpage = CAP_SUBPAGE (&source);
- int subpages = CAP_SUBPAGES (&source);
+ int subpage = VG_CAP_SUBPAGE (&source);
+ int subpages = VG_CAP_SUBPAGES (&source);
- if ((flags & CAP_COPY_COPY_ADDR_TRANS_SUBPAGE))
+ if ((flags & VG_CAP_COPY_COPY_ADDR_TRANS_SUBPAGE))
/* Copy the subpage descriptor from PROPERTIES.ADDR_TRANS. */
{
- if (CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans) != 1
- && (source.type != cap_cappage
- && source.type != cap_rcappage))
+ if (VG_CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans) != 1
+ && (source.type != vg_cap_cappage
+ && source.type != vg_cap_rcappage))
/* A subpage descriptor is only valid for
cappages. */
{
debug (1, "subpages (%d) specified for non-cappage "
- "cap " CAP_FMT,
- CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans),
- CAP_PRINTF (&source));
+ "vg_cap " VG_CAP_FMT,
+ VG_CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans),
+ VG_CAP_PRINTF (&source));
return false;
}
@@ -593,48 +593,48 @@ cap_copy_x (activity_t activity,
(/* Start of PROPERTIES.ADDR_TRANS must be at or after start of
SOURCE. */
subpage * (256 / subpages)
- <= (CAP_ADDR_TRANS_SUBPAGE (properties.addr_trans) *
- (256 / CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans)))
+ <= (VG_CAP_ADDR_TRANS_SUBPAGE (properties.addr_trans) *
+ (256 / VG_CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans)))
/* End of PROPERTIES.ADDR_TRANS must be before or at end of
SOURCE. */
- && (((CAP_ADDR_TRANS_SUBPAGE (properties.addr_trans) + 1) *
- (256 / CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans)))
+ && (((VG_CAP_ADDR_TRANS_SUBPAGE (properties.addr_trans) + 1) *
+ (256 / VG_CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans)))
<= (subpage + 1) * (256 / subpages))))
/* The subpage descriptor does not narrow the
rights. */
{
debug (1, "specified subpage (%d/%d) not a subset "
- " of source " CAP_FMT,
- CAP_ADDR_TRANS_SUBPAGE (properties.addr_trans),
- CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans),
- CAP_PRINTF (&source));
+ " of source " VG_CAP_FMT,
+ VG_CAP_ADDR_TRANS_SUBPAGE (properties.addr_trans),
+ VG_CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans),
+ VG_CAP_PRINTF (&source));
return false;
}
- subpage = CAP_ADDR_TRANS_SUBPAGE (properties.addr_trans);
- subpages = CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans);
+ subpage = VG_CAP_ADDR_TRANS_SUBPAGE (properties.addr_trans);
+ subpages = VG_CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans);
}
/* By default, we preserve the guard in TARGET. */
- int guard = CAP_GUARD (target);
- int gbits = CAP_GUARD_BITS (target);
+ int guard = VG_CAP_GUARD (target);
+ int gbits = VG_CAP_GUARD_BITS (target);
- if ((flags & CAP_COPY_COPY_ADDR_TRANS_GUARD))
+ if ((flags & VG_CAP_COPY_COPY_ADDR_TRANS_GUARD))
/* Copy guard from PROPERTIES.ADDR_TRANS. */
{
- guard = CAP_ADDR_TRANS_GUARD (properties.addr_trans);
- gbits = CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans);
+ guard = VG_CAP_ADDR_TRANS_GUARD (properties.addr_trans);
+ gbits = VG_CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans);
}
- else if ((flags & CAP_COPY_COPY_SOURCE_GUARD))
+ else if ((flags & VG_CAP_COPY_COPY_SOURCE_GUARD))
/* Copy guard from SOURCE. */
{
- guard = CAP_GUARD (&source);
- gbits = CAP_GUARD_BITS (&source);
+ guard = VG_CAP_GUARD (&source);
+ gbits = VG_CAP_GUARD_BITS (&source);
}
int type = source.type;
- if ((flags & CAP_COPY_WEAKEN))
- type = cap_type_weaken (type);
+ if ((flags & VG_CAP_COPY_WEAKEN))
+ type = vg_cap_type_weaken (type);
#ifdef RM_INTERN
/* Changing a capability can change how addresses are translated.
@@ -652,19 +652,19 @@ cap_copy_x (activity_t activity,
changes_translation = true;
}
- if (subpage != CAP_SUBPAGE (target) || subpages != CAP_SUBPAGES (target))
+ if (subpage != VG_CAP_SUBPAGE (target) || subpages != VG_CAP_SUBPAGES (target))
{
debug (5, "Subpage specification differs %d/%d -> %d/%d.",
- subpage, subpages, CAP_SUBPAGE (target), CAP_SUBPAGES (target));
+ subpage, subpages, VG_CAP_SUBPAGE (target), VG_CAP_SUBPAGES (target));
changes_translation = true;
}
- if (guard != CAP_GUARD (target)
- || gbits != CAP_GUARD_BITS (target))
+ if (guard != VG_CAP_GUARD (target)
+ || gbits != VG_CAP_GUARD_BITS (target))
{
debug (5, "Guard changed invalidating translation "
"0x%x/%d -> %llx/%d",
- guard, gbits, CAP_GUARD (target), CAP_GUARD_BITS (target));
+ guard, gbits, VG_CAP_GUARD (target), VG_CAP_GUARD_BITS (target));
changes_translation = true;
}
@@ -676,23 +676,23 @@ cap_copy_x (activity_t activity,
if (changes_translation)
{
- extern void cap_shootdown (struct activity *activity, struct cap *cap);
+ extern void cap_shootdown (struct activity *activity, struct vg_cap *vg_cap);
- debug (5, "Translation changed: " CAP_FMT " -> " CAP_FMT,
- CAP_PRINTF (target), CAP_PRINTF (&source));
+ debug (5, "Translation changed: " VG_CAP_FMT " -> " VG_CAP_FMT,
+ VG_CAP_PRINTF (target), VG_CAP_PRINTF (&source));
cap_shootdown (activity, target);
}
#endif
- if (! CAP_ADDR_TRANS_SET_GUARD_SUBPAGE (&properties.addr_trans,
+ if (! VG_CAP_ADDR_TRANS_SET_GUARD_SUBPAGE (&properties.addr_trans,
guard, gbits,
subpage, subpages))
return false;
#ifndef RM_INTERN
- assert (! ADDR_IS_VOID (target_addr));
- assert (! ADDR_IS_VOID (source_addr));
+ assert (! VG_ADDR_IS_VOID (target_addr));
+ assert (! VG_ADDR_IS_VOID (source_addr));
error_t err = rm_cap_copy (activity, target_address_space, target_addr,
source_address_space, source_addr,
@@ -704,10 +704,10 @@ cap_copy_x (activity_t activity,
target->addr_trans = properties.addr_trans;
target->type = type;
- if ((flags & CAP_COPY_DISCARDABLE_SET))
+ if ((flags & VG_CAP_COPY_DISCARDABLE_SET))
target->discardable = properties.policy.discardable;
- if ((flags & CAP_COPY_PRIORITY_SET))
+ if ((flags & VG_CAP_COPY_PRIORITY_SET))
target->priority = properties.policy.priority;
return true;
@@ -717,14 +717,14 @@ cap_copy_x (activity_t activity,
SOURCE's subpage specification and TARGET's guard. Copies SOURCE's
policy. */
static inline bool
-cap_copy (activity_t activity,
- addr_t target_as, struct cap *target, addr_t target_addr,
- addr_t source_as, struct cap source, addr_t source_addr)
+vg_cap_copy (activity_t activity,
+ vg_addr_t target_as, struct vg_cap *target, vg_addr_t target_addr,
+ vg_addr_t source_as, struct vg_cap source, vg_addr_t source_addr)
{
- return cap_copy_x (activity, target_as, target, target_addr,
+ return vg_cap_copy_x (activity, target_as, target, target_addr,
source_as, source, source_addr,
- CAP_COPY_DISCARDABLE_SET | CAP_COPY_PRIORITY_SET,
- CAP_PROPERTIES_GET (source));
+ VG_CAP_COPY_DISCARDABLE_SET | VG_CAP_COPY_PRIORITY_SET,
+ VG_CAP_PROPERTIES_GET (source));
}
#endif
diff --git a/libviengoos/viengoos/folio.h b/libviengoos/viengoos/folio.h
index 4172e5d..d135e02 100644
--- a/libviengoos/viengoos/folio.h
+++ b/libviengoos/viengoos/folio.h
@@ -29,27 +29,27 @@
/* Number of user objects per folio. */
enum
{
- FOLIO_OBJECTS = 128,
+ VG_FOLIO_OBJECTS = 128,
};
enum
{
- FOLIO_OBJECTS_LOG2 = 7,
+ VG_FOLIO_OBJECTS_LOG2 = 7,
};
/* User settable folio policy. */
/* The range of valid folio priorities. A lower numerical value
corresponds to a lower priority. */
-#define FOLIO_PRIORITY_BITS 15
-#define FOLIO_PRIORITY_MIN (-(1 << (FOLIO_PRIORITY_BITS - 1)))
-#define FOLIO_PRIORITY_LRU (0)
-#define FOLIO_PRIORITY_MAX ((1 << (FOLIO_PRIORITY_BITS - 1)) - 1)
+#define VG_FOLIO_PRIORITY_BITS 15
+#define VG_FOLIO_PRIORITY_MIN (-(1 << (VG_FOLIO_PRIORITY_BITS - 1)))
+#define VG_FOLIO_PRIORITY_LRU (0)
+#define VG_FOLIO_PRIORITY_MAX ((1 << (VG_FOLIO_PRIORITY_BITS - 1)) - 1)
/* The folio group range. */
-#define FOLIO_GROUP_BITS 15
-#define FOLIO_GROUP_NONE 0
-#define FOLIO_GROUP_MIN 0
-#define FOLIO_GROUP_MAX ((1 << FOLIO_BITS) - 1)
+#define VG_FOLIO_GROUP_BITS 15
+#define VG_FOLIO_GROUP_NONE 0
+#define VG_FOLIO_GROUP_MIN 0
+#define VG_FOLIO_GROUP_MAX ((1 << FOLIO_BITS) - 1)
struct folio_policy
{
@@ -68,26 +68,26 @@ struct folio_policy
/* Folios can belong to a group. When one folio is discarded,
all folios in that group are discarded, unless GROUP is
- FOLIO_GROUP_NONE. */
- uint32_t group : FOLIO_GROUP_BITS;
+ VG_FOLIO_GROUP_NONE. */
+ uint32_t group : VG_FOLIO_GROUP_BITS;
/* By default, the system tries to discard folios according to
an LRU policy. This can be overridden using this field. In
this case, folios from the lowest priority group are
discarded. */
- int32_t priority : FOLIO_PRIORITY_BITS;
+ int32_t priority : VG_FOLIO_PRIORITY_BITS;
};
uint32_t raw;
};
};
-#define FOLIO_POLICY_INIT { { raw: 0 } }
-#define FOLIO_POLICY_VOID (struct folio_policy) FOLIO_POLICY_INIT
+#define VG_FOLIO_POLICY_INIT { { raw: 0 } }
+#define VG_FOLIO_POLICY_VOID (struct folio_policy) VG_FOLIO_POLICY_INIT
/* The default policy is not discardable. */
-#define FOLIO_POLICY_DEFAULT FOLIO_POLICY_VOID
+#define VG_FOLIO_POLICY_DEFAULT VG_FOLIO_POLICY_VOID
/* The format of the first page of a folio. This page is followed (on
- disk) by FOLIO_OBJECTS pages. */
+ disk) by VG_FOLIO_OBJECTS pages. */
struct folio
{
#ifdef RM_INTERN
@@ -95,9 +95,9 @@ struct folio
to exactly one activity. To track what folios belong to a
particular activity, each folio is attached to a doubly-linked
list originating at its owner activity. */
- struct cap activity;
- struct cap next;
- struct cap prev;
+ struct vg_cap activity;
+ struct vg_cap next;
+ struct vg_cap prev;
/* The storage policy. */
struct folio_policy policy;
@@ -105,7 +105,7 @@ struct folio
struct
{
/* Each object in the folio Disk version of each object. */
- uint32_t version : CAP_VERSION_BITS;
+ uint32_t version : VG_CAP_VERSION_BITS;
/* Whether a page has any content (i.e., if it is not
uninitialized). */
@@ -113,64 +113,65 @@ struct folio
/* The object's memory policy when accessed via the folio. */
uint32_t discardable : 1;
- int32_t priority : OBJECT_PRIORITY_BITS;
- } misc[1 + FOLIO_OBJECTS];
+ int32_t priority : VG_OBJECT_PRIORITY_BITS;
+ } misc[1 + VG_FOLIO_OBJECTS];
/* The type. */
- uint8_t types[FOLIO_OBJECTS];
+ uint8_t types[VG_FOLIO_OBJECTS];
/* Bit array indicating whether the an object has a non-empty wait
queue. */
- uint8_t wait_queues_p[(1 + FOLIO_OBJECTS + (8 - 1)) / 8];
+ uint8_t wait_queues_p[(1 + VG_FOLIO_OBJECTS + (8 - 1)) / 8];
- uint8_t discarded[(FOLIO_OBJECTS + (8 - 1)) / 8];
+ uint8_t discarded[(VG_FOLIO_OBJECTS + (8 - 1)) / 8];
/* User reference and dirty bits. Optionally reset on read. Set
respectively when an object is referenced or modified. Flushing
the object to disk does not clear this. */
- uint8_t dirty[(1 + FOLIO_OBJECTS + (8 - 1)) / 8];
- uint8_t referenced[(1 + FOLIO_OBJECTS + (8 - 1)) / 8];
+ uint8_t dirty[(1 + VG_FOLIO_OBJECTS + (8 - 1)) / 8];
+ uint8_t referenced[(1 + VG_FOLIO_OBJECTS + (8 - 1)) / 8];
/* Head of the list of objects waiting for some event on this
object. An element of this array is only valid if the
corresponding element of WAIT_QUEUES_P is true. The list is a
circular list. HEAD->PREV points to the tail. TAIL->NEXT points
to the OBJECT (NOT HEAD). */
- oid_t wait_queues[1 + FOLIO_OBJECTS];
+ vg_oid_t wait_queues[1 + VG_FOLIO_OBJECTS];
- uint64_t checksums[1 + FOLIO_OBJECTS][2];
+ uint64_t checksums[1 + VG_FOLIO_OBJECTS][2];
#else
/* User-space folio. */
- struct cap objects[FOLIO_OBJECTS];
+ struct vg_cap objects[VG_FOLIO_OBJECTS];
#endif
};
#ifdef RM_INTERN
-typedef struct folio *folio_t;
+typedef struct folio *vg_folio_t;
#else
-typedef addr_t folio_t;
+typedef vg_addr_t vg_folio_t;
#endif
-/* OBJECT is from -1 to FOLIO_OBJECTS. */
-static inline enum cap_type
-folio_object_type (struct folio *folio, int object)
+/* OBJECT is from -1 to VG_FOLIO_OBJECTS. */
+static inline enum vg_cap_type
+vg_folio_object_type (struct folio *folio, int object)
{
#ifdef RM_INTERN
- assert (object >= -1 && object < FOLIO_OBJECTS);
+ assert (object >= -1 && object < VG_FOLIO_OBJECTS);
if (object == -1)
- return cap_folio;
+ return vg_cap_folio;
return folio->types[object];
#else
- assert (object >= 0 && object < FOLIO_OBJECTS);
+ assert (object >= 0 && object < VG_FOLIO_OBJECTS);
return folio->objects[object].type;
#endif
}
static inline void
-folio_object_type_set (struct folio *folio, int object, enum cap_type type)
+vg_folio_object_type_set (struct folio *folio, int object,
+ enum vg_cap_type type)
{
- assert (object >= 0 && object < FOLIO_OBJECTS);
+ assert (object >= 0 && object < VG_FOLIO_OBJECTS);
#ifdef RM_INTERN
folio->types[object] = type;
@@ -180,17 +181,17 @@ folio_object_type_set (struct folio *folio, int object, enum cap_type type)
}
static inline struct object_policy
-folio_object_policy (struct folio *folio, int object)
+vg_folio_object_policy (struct folio *folio, int object)
{
struct object_policy policy;
#ifdef RM_INTERN
- assert (object >= -1 && object < FOLIO_OBJECTS);
+ assert (object >= -1 && object < VG_FOLIO_OBJECTS);
policy.discardable = folio->misc[object + 1].discardable;
policy.priority = folio->misc[object + 1].priority;
#else
- assert (object >= 0 && object < FOLIO_OBJECTS);
+ assert (object >= 0 && object < VG_FOLIO_OBJECTS);
policy.discardable = folio->objects[object].discardable;
policy.priority = folio->objects[object].priority;
@@ -200,16 +201,16 @@ folio_object_policy (struct folio *folio, int object)
}
static inline void
-folio_object_policy_set (struct folio *folio, int object,
- struct object_policy policy)
+vg_folio_object_policy_set (struct folio *folio, int object,
+ struct object_policy policy)
{
#ifdef RM_INTERN
- assert (object >= -1 && object < FOLIO_OBJECTS);
+ assert (object >= -1 && object < VG_FOLIO_OBJECTS);
folio->misc[object + 1].discardable = policy.discardable;
folio->misc[object + 1].priority = policy.priority;
#else
- assert (object >= 0 && object < FOLIO_OBJECTS);
+ assert (object >= 0 && object < VG_FOLIO_OBJECTS);
folio->objects[object].discardable = policy.discardable;
folio->objects[object].priority = policy.priority;
@@ -222,7 +223,7 @@ folio_object_policy_set (struct folio *folio, int object,
static inline bool
folio_object_wait_queue_p (struct folio *folio, int object)
{
- assert (object >= -1 && object < FOLIO_OBJECTS);
+ assert (object >= -1 && object < VG_FOLIO_OBJECTS);
return bit_test (folio->wait_queues_p, object + 1);
}
@@ -231,25 +232,25 @@ static inline void
folio_object_wait_queue_p_set (struct folio *folio, int object,
bool valid)
{
- assert (object >= -1 && object < FOLIO_OBJECTS);
+ assert (object >= -1 && object < VG_FOLIO_OBJECTS);
bit_set_to (folio->wait_queues_p, sizeof (folio->wait_queues_p),
object + 1, valid);
}
-static inline oid_t
+static inline vg_oid_t
folio_object_wait_queue (struct folio *folio, int object)
{
- assert (object >= -1 && object < FOLIO_OBJECTS);
+ assert (object >= -1 && object < VG_FOLIO_OBJECTS);
return folio->wait_queues[object + 1];
}
static inline void
folio_object_wait_queue_set (struct folio *folio, int object,
- oid_t head)
+ vg_oid_t head)
{
- assert (object >= -1 && object < FOLIO_OBJECTS);
+ assert (object >= -1 && object < VG_FOLIO_OBJECTS);
folio->wait_queues[object + 1] = head;
}
@@ -257,7 +258,7 @@ folio_object_wait_queue_set (struct folio *folio, int object,
static inline uint32_t
folio_object_version (struct folio *folio, int object)
{
- assert (object >= -1 && object < FOLIO_OBJECTS);
+ assert (object >= -1 && object < VG_FOLIO_OBJECTS);
return folio->misc[object + 1].version;
}
@@ -266,7 +267,7 @@ static inline void
folio_object_version_set (struct folio *folio, int object,
uint32_t version)
{
- assert (object >= -1 && object < FOLIO_OBJECTS);
+ assert (object >= -1 && object < VG_FOLIO_OBJECTS);
folio->misc[object + 1].version = version;
}
@@ -274,7 +275,7 @@ folio_object_version_set (struct folio *folio, int object,
static inline bool
folio_object_content (struct folio *folio, int object)
{
- assert (object >= -1 && object < FOLIO_OBJECTS);
+ assert (object >= -1 && object < VG_FOLIO_OBJECTS);
return folio->misc[object + 1].content;
}
@@ -283,7 +284,7 @@ static inline void
folio_object_content_set (struct folio *folio, int object,
bool content)
{
- assert (object >= -1 && object < FOLIO_OBJECTS);
+ assert (object >= -1 && object < VG_FOLIO_OBJECTS);
folio->misc[object + 1].content = content;
}
@@ -291,7 +292,7 @@ folio_object_content_set (struct folio *folio, int object,
static inline bool
folio_object_discarded (struct folio *folio, int object)
{
- assert (object >= 0 && object < FOLIO_OBJECTS);
+ assert (object >= 0 && object < VG_FOLIO_OBJECTS);
return bit_test (folio->discarded, object);
}
@@ -299,7 +300,7 @@ folio_object_discarded (struct folio *folio, int object)
static inline void
folio_object_discarded_set (struct folio *folio, int object, bool valid)
{
- assert (object >= 0 && object < FOLIO_OBJECTS);
+ assert (object >= 0 && object < VG_FOLIO_OBJECTS);
bit_set_to (folio->discarded, sizeof (folio->discarded),
object, valid);
@@ -308,7 +309,7 @@ folio_object_discarded_set (struct folio *folio, int object, bool valid)
static inline bool
folio_object_referenced (struct folio *folio, int object)
{
- assert (object >= -1 && object < FOLIO_OBJECTS);
+ assert (object >= -1 && object < VG_FOLIO_OBJECTS);
return bit_test (folio->referenced, object + 1);
}
@@ -316,7 +317,7 @@ folio_object_referenced (struct folio *folio, int object)
static inline void
folio_object_referenced_set (struct folio *folio, int object, bool p)
{
- assert (object >= -1 && object < FOLIO_OBJECTS);
+ assert (object >= -1 && object < VG_FOLIO_OBJECTS);
bit_set_to (folio->referenced, sizeof (folio->referenced), object + 1, p);
}
@@ -324,7 +325,7 @@ folio_object_referenced_set (struct folio *folio, int object, bool p)
static inline bool
folio_object_dirty (struct folio *folio, int object)
{
- assert (object >= -1 && object < FOLIO_OBJECTS);
+ assert (object >= -1 && object < VG_FOLIO_OBJECTS);
return bit_test (folio->dirty, object + 1);
}
@@ -332,28 +333,28 @@ folio_object_dirty (struct folio *folio, int object)
static inline void
folio_object_dirty_set (struct folio *folio, int object, bool p)
{
- assert (object >= -1 && object < FOLIO_OBJECTS);
+ assert (object >= -1 && object < VG_FOLIO_OBJECTS);
bit_set_to (folio->dirty, sizeof (folio->dirty), object + 1, p);
}
#endif /* RM_INTERN */
-/* Return a cap designating folio FOLIO's OBJECT'th object. */
+/* Return a vg_cap designating folio FOLIO's OBJECT'th object. */
#ifdef RM_INTERN
/* This needs to be a macro as we use object_to_object_desc which is
made available by object.h but object.h includes this file. */
-#define folio_object_cap(__foc_folio, __foc_object) \
+#define vg_folio_object_cap(__foc_folio, __foc_object) \
({ \
- struct cap __foc_cap; \
+ struct vg_cap __foc_cap; \
\
- __foc_cap.type = folio_object_type (__foc_folio, __foc_object); \
+ __foc_cap.type = vg_folio_object_type (__foc_folio, __foc_object); \
__foc_cap.version = folio_object_version (__foc_folio, \
- __foc_object); \
+ __foc_object); \
\
- struct cap_properties __foc_cap_properties \
- = CAP_PROPERTIES (folio_object_policy (__foc_folio, __foc_object), \
- CAP_ADDR_TRANS_VOID); \
- CAP_PROPERTIES_SET (&__foc_cap, __foc_cap_properties); \
+ struct vg_cap_properties __foc_cap_properties \
+ = VG_CAP_PROPERTIES (vg_folio_object_policy (__foc_folio, __foc_object), \
+ VG_CAP_ADDR_TRANS_VOID); \
+ VG_CAP_PROPERTIES_SET (&__foc_cap, __foc_cap_properties); \
\
__foc_cap.oid \
= object_to_object_desc ((struct object *) __foc_folio)->oid \
@@ -362,10 +363,10 @@ folio_object_dirty_set (struct folio *folio, int object, bool p)
__foc_cap; \
})
#else
-static inline struct cap
-folio_object_cap (struct folio *folio, int object)
+static inline struct vg_cap
+vg_folio_object_cap (struct folio *folio, int object)
{
- assert (0 <= object && object < FOLIO_OBJECTS);
+ assert (0 <= object && object < VG_FOLIO_OBJECTS);
return folio->objects[object];
}
#endif
@@ -395,7 +396,7 @@ RPC(folio_free, 0, 0, 0
/* cap_t, principal, cap_t, folio */)
/* Destroys the INDEXth object in folio FOLIO and allocate in its
- place an object of tye TYPE. If TYPE is CAP_VOID, any existing
+ place an object of tye TYPE. If TYPE is VG_CAP_VOID, any existing
object is destroyed, however, no object is instantiated in its
place. POLICY specifies the object's policy when accessed via the
folio. If an object is destroyed and there are waiters, they are
@@ -413,20 +414,20 @@ RPC(folio_object_alloc, 4, 0, 2,
/* Flags for folio_policy. */
enum
{
- FOLIO_POLICY_DELIVER = 1 << 0,
+ VG_FOLIO_POLICY_DELIVER = 1 << 0,
- FOLIO_POLICY_DISCARDABLE_SET = 1 << 1,
- FOLIO_POLICY_GROUP_SET = 1 << 2,
- FOLIO_POLICY_PRIORITY_SET = 1 << 3,
+ VG_FOLIO_POLICY_DISCARDABLE_SET = 1 << 1,
+ VG_FOLIO_POLICY_GROUP_SET = 1 << 2,
+ VG_FOLIO_POLICY_PRIORITY_SET = 1 << 3,
- FOLIO_POLICY_SET = (FOLIO_POLICY_DISCARDABLE_SET
- | FOLIO_POLICY_GROUP_SET
- | FOLIO_POLICY_PRIORITY_SET)
+ VG_FOLIO_POLICY_SET = (VG_FOLIO_POLICY_DISCARDABLE_SET
+ | VG_FOLIO_POLICY_GROUP_SET
+ | VG_FOLIO_POLICY_PRIORITY_SET)
};
/* Get and set the management policy for folio FOLIO.
- If FOLIO_POLICY_DELIVER is set in FLAGS, then return FOLIO's
+ If VG_FOLIO_POLICY_DELIVER is set in FLAGS, then return FOLIO's
current paging policy in OLD. Then, if any of the set flags are
set, set the corresponding values based on the value of POLICY. */
RPC(folio_policy, 2, 1, 0,
diff --git a/libviengoos/viengoos/futex.h b/libviengoos/viengoos/futex.h
index 3f77b6d..49cb4a2 100644
--- a/libviengoos/viengoos/futex.h
+++ b/libviengoos/viengoos/futex.h
@@ -1,4 +1,4 @@
-/* futex.h - Futex definitions.
+/* vg_futex.h - Futex definitions.
Copyright (C) 2008 Free Software Foundation, Inc.
Written by Neal H. Walfield <neal@gnu.org>.
@@ -111,17 +111,17 @@ RPC (futex, 7, 1, 0,
#ifndef RM_INTERN
#include <errno.h>
-struct futex_return
+struct vg_futex_return
{
error_t err;
long ret;
};
-static inline struct futex_return
+static inline struct vg_futex_return
__attribute__((always_inline))
-futex_using (struct hurd_message_buffer *mb,
- void *addr1, int op, int val1, struct timespec *timespec,
- void *addr2, int val3)
+vg_futex_using (struct hurd_message_buffer *mb,
+ void *addr1, int op, int val1, struct timespec *timespec,
+ void *addr2, int val3)
{
union futex_val2 val2;
if (timespec)
@@ -133,34 +133,34 @@ futex_using (struct hurd_message_buffer *mb,
long ret = 0; /* Elide gcc warning. */
if (mb)
err = rm_futex_using (mb,
- ADDR_VOID, ADDR_VOID,
+ VG_ADDR_VOID, VG_ADDR_VOID,
addr1, op, val1, !! timespec, val2, addr2,
(union futex_val3) val3, &ret);
else
- err = rm_futex (ADDR_VOID, ADDR_VOID,
+ err = rm_futex (VG_ADDR_VOID, VG_ADDR_VOID,
addr1, op, val1, !! timespec, val2, addr2,
(union futex_val3) val3, &ret);
- return (struct futex_return) { err, ret };
+ return (struct vg_futex_return) { err, ret };
}
-/* Standard futex signatures. See futex documentation, e.g., Futexes
+/* Standard vg_futex signatures. See vg_futex documentation, e.g., Futexes
are Tricky by Ulrich Drepper. */
-static inline struct futex_return
+static inline struct vg_futex_return
__attribute__((always_inline))
-futex (void *addr1, int op, int val1, struct timespec *timespec,
+vg_futex (void *addr1, int op, int val1, struct timespec *timespec,
void *addr2, int val3)
{
- return futex_using (NULL, addr1, op, val1, timespec, addr2, val3);
+ return vg_futex_using (NULL, addr1, op, val1, timespec, addr2, val3);
}
/* If *F is VAL, wait until woken. */
static inline long
__attribute__((always_inline))
-futex_wait_using (struct hurd_message_buffer *mb, int *f, int val)
+vg_futex_wait_using (struct hurd_message_buffer *mb, int *f, int val)
{
- struct futex_return ret;
- ret = futex_using (mb, f, FUTEX_WAIT, val, NULL, 0, 0);
+ struct vg_futex_return ret;
+ ret = vg_futex_using (mb, f, FUTEX_WAIT, val, NULL, 0, 0);
if (ret.err)
{
errno = ret.err;
@@ -173,7 +173,7 @@ static inline long
__attribute__((always_inline))
futex_wait (int *f, int val)
{
- return futex_wait_using (NULL, f, val);
+ return vg_futex_wait_using (NULL, f, val);
}
@@ -182,8 +182,8 @@ static inline long
__attribute__((always_inline))
futex_timed_wait (int *f, int val, struct timespec *timespec)
{
- struct futex_return ret;
- ret = futex (f, FUTEX_WAIT, val, timespec, 0, 0);
+ struct vg_futex_return ret;
+ ret = vg_futex (f, FUTEX_WAIT, val, timespec, 0, 0);
if (ret.err)
{
errno = ret.err;
@@ -193,13 +193,13 @@ futex_timed_wait (int *f, int val, struct timespec *timespec)
}
-/* Signal NWAKE waiters waiting on futex F. */
+/* Signal NWAKE waiters waiting on vg_futex F. */
static inline long
__attribute__((always_inline))
-futex_wake_using (struct hurd_message_buffer *mb, int *f, int nwake)
+vg_futex_wake_using (struct hurd_message_buffer *mb, int *f, int nwake)
{
- struct futex_return ret;
- ret = futex_using (mb, f, FUTEX_WAKE, nwake, NULL, 0, 0);
+ struct vg_futex_return ret;
+ ret = vg_futex_using (mb, f, FUTEX_WAKE, nwake, NULL, 0, 0);
if (ret.err)
{
errno = ret.err;
@@ -212,7 +212,7 @@ static inline long
__attribute__((always_inline))
futex_wake (int *f, int nwake)
{
- return futex_wake_using (NULL, f, nwake);
+ return vg_futex_wake_using (NULL, f, nwake);
}
#endif /* !RM_INTERN */
diff --git a/libviengoos/viengoos/ipc.h b/libviengoos/viengoos/ipc.h
index 67c2bad..ae00ec4 100644
--- a/libviengoos/viengoos/ipc.h
+++ b/libviengoos/viengoos/ipc.h
@@ -69,7 +69,7 @@ enum
VG_IPC_SEND_INLINE = 1 << 12,
/* Which inline data to transfer when sending a message. Inline
- data is ignored if the send buffer is not ADDR_VOID. */
+ data is ignored if the send buffer is not VG_ADDR_VOID. */
VG_IPC_SEND_INLINE_WORD1 = 1 << 13,
VG_IPC_SEND_INLINE_WORD2 = 1 << 14,
VG_IPC_SEND_INLINE_CAP1 = 1 << 15,
@@ -92,7 +92,7 @@ enum
If FLAGS contains VG_IPC_RECEIVE, the IPC includes a receive phase.
- If RECV_BUF is not ADDR_VOID, associates RECV_BUF with
+ If RECV_BUF is not VG_ADDR_VOID, associates RECV_BUF with
RECV_MESSENGER.
If FLAGS contains VG_IPC_RECEIVE_NONBLOCKING:
@@ -115,10 +115,10 @@ enum
If FLAGS contains VG_IPC_SEND, the IPC includes a send phase.
- If SEND_MESSENGER is ADDR_VOID, an implicit messenger is allocated
+ If SEND_MESSENGER is VG_ADDR_VOID, an implicit messenger is allocated
and VG_IPC_SEND_NONBLOCKING is assumed to be on.
- If SEND_BUF is not ADDR_VOID, assocaiates SEND_BUF with
+ If SEND_BUF is not VG_ADDR_VOID, assocaiates SEND_BUF with
SEND_MESSENGER. Otherwise, associates inline data (INLINE_WORD1,
INLINE_WORD2 and INLINE_CAP) according to the inline flags with
SEND_MESSENGER.
@@ -148,12 +148,12 @@ enum
calling thread is suspended until it is next activated. */
static inline error_t
vg_ipc_full (uintptr_t flags,
- addr_t recv_activity, addr_t recv_messenger, addr_t recv_buf,
- addr_t recv_inline_cap,
- addr_t send_activity, addr_t target_messenger,
- addr_t send_messenger, addr_t send_buf,
+ vg_addr_t recv_activity, vg_addr_t recv_messenger, vg_addr_t recv_buf,
+ vg_addr_t recv_inline_cap,
+ vg_addr_t send_activity, vg_addr_t target_messenger,
+ vg_addr_t send_messenger, vg_addr_t send_buf,
uintptr_t send_inline_word1, uintptr_t send_inline_word2,
- addr_t send_inline_cap)
+ vg_addr_t send_inline_cap)
{
error_t err = 0;
@@ -165,10 +165,10 @@ vg_ipc_full (uintptr_t flags,
l4_msg_clear (msg);
l4_msg_set_msg_tag (msg, tag);
- void msg_append_addr (addr_t addr)
+ void msg_append_addr (vg_addr_t addr)
{
int i;
- for (i = 0; i < sizeof (addr_t) / sizeof (uintptr_t); i ++)
+ for (i = 0; i < sizeof (vg_addr_t) / sizeof (uintptr_t); i ++)
l4_msg_append_word (msg, ((uintptr_t *) &addr)[i]);
}
@@ -235,51 +235,51 @@ vg_ipc_full (uintptr_t flags,
static inline error_t
vg_ipc (uintptr_t flags,
- addr_t recv_activity, addr_t recv_messenger, addr_t recv_buf,
- addr_t send_activity, addr_t target_messenger,
- addr_t send_messenger, addr_t send_buf)
+ vg_addr_t recv_activity, vg_addr_t recv_messenger, vg_addr_t recv_buf,
+ vg_addr_t send_activity, vg_addr_t target_messenger,
+ vg_addr_t send_messenger, vg_addr_t send_buf)
{
return vg_ipc_full (flags,
- recv_activity, recv_messenger, recv_buf, ADDR_VOID,
+ recv_activity, recv_messenger, recv_buf, VG_ADDR_VOID,
send_activity, target_messenger,
send_messenger, send_buf,
- 0, 0, ADDR_VOID);
+ 0, 0, VG_ADDR_VOID);
}
static inline error_t
vg_ipc_short (uintptr_t flags,
- addr_t recv_activity, addr_t recv_messenger, addr_t recv_cap,
- addr_t send_activity, addr_t target_messenger,
- addr_t send_messenger,
+ vg_addr_t recv_activity, vg_addr_t recv_messenger, vg_addr_t recv_cap,
+ vg_addr_t send_activity, vg_addr_t target_messenger,
+ vg_addr_t send_messenger,
uintptr_t inline_word1, uintptr_t inline_word2,
- addr_t inline_cap)
+ vg_addr_t inline_cap)
{
return vg_ipc_full (flags,
- recv_activity, recv_messenger, ADDR_VOID, recv_cap,
+ recv_activity, recv_messenger, VG_ADDR_VOID, recv_cap,
send_activity, target_messenger,
- send_messenger, ADDR_VOID,
+ send_messenger, VG_ADDR_VOID,
inline_word1, inline_word2, inline_cap);
}
static inline error_t
-vg_send (uintptr_t flags, addr_t send_activity, addr_t target_messenger,
- addr_t send_messenger, addr_t send_buf)
+vg_send (uintptr_t flags, vg_addr_t send_activity, vg_addr_t target_messenger,
+ vg_addr_t send_messenger, vg_addr_t send_buf)
{
return vg_ipc_full (flags | VG_IPC_SEND | VG_IPC_SEND_ACTIVATE,
- ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID,
send_activity, target_messenger,
send_messenger, send_buf,
- 0, 0, ADDR_VOID);
+ 0, 0, VG_ADDR_VOID);
}
static inline error_t
-vg_reply (uintptr_t flags, addr_t send_activity, addr_t target_messenger,
- addr_t send_messenger, addr_t send_buf)
+vg_reply (uintptr_t flags, vg_addr_t send_activity, vg_addr_t target_messenger,
+ vg_addr_t send_messenger, vg_addr_t send_buf)
{
return vg_ipc_full (flags | VG_IPC_SEND | VG_IPC_SEND_NONBLOCKING,
- ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID,
send_activity, target_messenger, send_messenger, send_buf,
- 0, 0, ADDR_VOID);
+ 0, 0, VG_ADDR_VOID);
}
/* Suspend the caller until the next activation. */
@@ -287,9 +287,9 @@ static inline error_t
vg_suspend (void)
{
return vg_ipc_full (0,
- ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
- ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
- 0, 0, ADDR_VOID);
+ VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID,
+ VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID,
+ 0, 0, VG_ADDR_VOID);
}
#endif
diff --git a/libviengoos/viengoos/message.h b/libviengoos/viengoos/message.h
index bff1e9a..514248e 100644
--- a/libviengoos/viengoos/message.h
+++ b/libviengoos/viengoos/message.h
@@ -42,7 +42,7 @@ struct vg_message
/* The number of bytes of data transferred in this message. */
uint16_t data_count;
- addr_t caps[/* cap_count */];
+ vg_addr_t caps[/* cap_count */];
// char data[data_count];
};
@@ -66,7 +66,7 @@ static inline int
vg_message_cap_count (struct vg_message *msg)
{
int max = (PAGESIZE - __builtin_offsetof (struct vg_message, caps))
- / sizeof (addr_t);
+ / sizeof (vg_addr_t);
int count = msg->cap_count;
if (count > max)
@@ -80,7 +80,7 @@ static inline int
vg_message_data_count (struct vg_message *msg)
{
int max = PAGESIZE
- - vg_message_cap_count (msg) * sizeof (addr_t)
+ - vg_message_cap_count (msg) * sizeof (vg_addr_t)
- __builtin_offsetof (struct vg_message, caps);
int count = msg->data_count;
@@ -92,14 +92,14 @@ vg_message_data_count (struct vg_message *msg)
/* Return the start of the capability address array in msg MSG. */
-static inline addr_t *
+static inline vg_addr_t *
vg_message_caps (struct vg_message *msg)
{
return msg->caps;
}
/* Return capability IDX in msg MSG. */
-static inline addr_t
+static inline vg_addr_t
vg_message_cap (struct vg_message *msg, int idx)
{
assert (idx < msg->cap_count);
@@ -114,7 +114,7 @@ vg_message_data (struct vg_message *msg)
{
return (void *) msg
+ __builtin_offsetof (struct vg_message, caps)
- + msg->cap_count * sizeof (addr_t);
+ + msg->cap_count * sizeof (vg_addr_t);
}
/* Return data word WORD in msg MSG. */
@@ -130,7 +130,7 @@ vg_message_word (struct vg_message *msg, int word)
/* Append the array of capability addresses CAPS to the msg MSG.
There must be sufficient room in the message buffer. */
static inline void
-vg_message_append_caps (struct vg_message *msg, int cap_count, addr_t *caps)
+vg_message_append_caps (struct vg_message *msg, int cap_count, vg_addr_t *caps)
{
assert ((void *) vg_message_data (msg) - (void *) msg
+ vg_message_data_count (msg) + cap_count * sizeof (*caps)
@@ -142,7 +142,7 @@ vg_message_append_caps (struct vg_message *msg, int cap_count, addr_t *caps)
__builtin_memcpy (&msg->caps[msg->cap_count],
caps,
- cap_count * sizeof (addr_t));
+ cap_count * sizeof (vg_addr_t));
msg->cap_count += cap_count;
}
@@ -150,9 +150,9 @@ vg_message_append_caps (struct vg_message *msg, int cap_count, addr_t *caps)
/* Append the capability address CAP to the msg MSG. There must be
sufficient room in the message buffer. */
static inline void
-vg_message_append_cap (struct vg_message *msg, addr_t cap)
+vg_message_append_cap (struct vg_message *msg, vg_addr_t vg_cap)
{
- vg_message_append_caps (msg, 1, &cap);
+ vg_message_append_caps (msg, 1, &vg_cap);
}
@@ -162,7 +162,7 @@ static inline void
vg_message_append_data (struct vg_message *msg, int bytes, char *data)
{
int dstart = __builtin_offsetof (struct vg_message, caps)
- + msg->cap_count * sizeof (addr_t);
+ + msg->cap_count * sizeof (vg_addr_t);
int dend = dstart + msg->data_count;
int new_dend = dend + bytes;
@@ -221,8 +221,8 @@ vg_message_dump (struct vg_message *message)
}
for (i = 0; i < vg_message_cap_count (message); i ++)
- s_printf ("cap %d: " ADDR_FMT "\n",
- i, ADDR_PRINTF (vg_message_cap (message, i)));
+ s_printf ("cap %d: " VG_ADDR_FMT "\n",
+ i, VG_ADDR_PRINTF (vg_message_cap (message, i)));
}
diff --git a/libviengoos/viengoos/messenger.h b/libviengoos/viengoos/messenger.h
index fbdc5ff..79f3d8a 100644
--- a/libviengoos/viengoos/messenger.h
+++ b/libviengoos/viengoos/messenger.h
@@ -42,7 +42,7 @@
struct messenger;
typedef struct messenger *vg_messenger_t;
#else
-typedef addr_t vg_messenger_t;
+typedef vg_addr_t vg_messenger_t;
#endif
#define VG_MESSENGER_INLINE_WORDS 2
diff --git a/libviengoos/viengoos/rpc.h b/libviengoos/viengoos/rpc.h
index 14feddd..7ede9ce 100644
--- a/libviengoos/viengoos/rpc.h
+++ b/libviengoos/viengoos/rpc.h
@@ -62,7 +62,7 @@ extern bool messenger_message_load (struct activity *activity,
#else
# include <hurd/message-buffer.h>
#endif
-typedef addr_t cap_t;
+typedef vg_addr_t cap_t;
/* First we define some cpp help macros. */
#define CPP_IFELSE_0(when, whennot) whennot
@@ -224,7 +224,7 @@ typedef addr_t cap_t;
union \
{ \
__rla_type __rla_a; \
- RPC_GRAB2 (, 1, RPC_TYPE_SHIFT (1, struct cap *, cap_t, __rla_foo)); \
+ RPC_GRAB2 (, 1, RPC_TYPE_SHIFT (1, struct vg_cap *, cap_t, __rla_foo)); \
cap_t __rla_cap; \
} __rla_arg2 = { (__rla_arg) }; \
vg_message_append_cap (msg, __rla_arg2.__rla_cap); \
@@ -461,7 +461,7 @@ typedef addr_t cap_t;
RPC_GRAB2 (, out_count, ##__VA_ARGS__) \
RPC_IF_COMMA (ret_cap_count) () \
RPC_GRAB2 (, ret_cap_count, \
- RPC_TYPE_SHIFT (ret_cap_count, struct cap *, \
+ RPC_TYPE_SHIFT (ret_cap_count, struct vg_cap *, \
RPC_CHOP2 (out_count, __VA_ARGS__)))) \
{ \
vg_message_clear (msg); \
@@ -736,7 +736,7 @@ typedef addr_t cap_t;
error_t err = vg_send (VG_IPC_SEND_SET_THREAD_TO_CALLER \
| VG_IPC_SEND_SET_ASROOT_TO_CALLERS, \
__rpc_activity, __rpc_object, \
- mb->sender, ADDR_VOID); \
+ mb->sender, VG_ADDR_VOID); \
\
return err; \
}
@@ -765,7 +765,7 @@ typedef addr_t cap_t;
error_t err = vg_reply (VG_IPC_SEND_SET_THREAD_TO_CALLER \
| VG_IPC_SEND_SET_ASROOT_TO_CALLERS, \
__rpc_activity, __rpc_object, \
- mb->sender, ADDR_VOID); \
+ mb->sender, VG_ADDR_VOID); \
\
hurd_message_buffer_free (mb); \
\
@@ -782,8 +782,8 @@ typedef addr_t cap_t;
__attribute__((always_inline)) \
RPC_CONCAT (RPC_CONCAT (RPC_STUB_PREFIX_(id), _using), postfix) \
(struct hurd_message_buffer *mb, \
- addr_t __rpc_activity, \
- addr_t __rpc_object \
+ vg_addr_t __rpc_activity, \
+ vg_addr_t __rpc_object \
/* In arguments. */ \
RPC_IF_COMMA (in_count) () \
RPC_GRAB2 (, in_count, __VA_ARGS__) \
@@ -796,7 +796,7 @@ typedef addr_t cap_t;
RPC_CONCAT (RPC_STUB_PREFIX_(id), _receive_marshal) \
(mb->reply \
RPC_IF_COMMA (ret_cap_count) () \
- CPP_FOREACH(ret_cap_count, CPP_SAFE_DEREF, ADDR_VOID, \
+ CPP_FOREACH(ret_cap_count, CPP_SAFE_DEREF, VG_ADDR_VOID, \
RPC_ARGUMENTS (ret_cap_count, , \
RPC_CHOP2 (CPP_ADD (in_count, out_count), \
__VA_ARGS__)))); \
@@ -818,9 +818,9 @@ typedef addr_t cap_t;
| VG_IPC_RECEIVE_SET_THREAD_TO_CALLER \
| VG_IPC_RECEIVE_SET_ASROOT_TO_CALLERS, \
__rpc_activity, \
- mb->receiver_strong, ADDR_VOID, \
+ mb->receiver_strong, VG_ADDR_VOID, \
__rpc_activity, __rpc_object, \
- mb->sender, ADDR_VOID); \
+ mb->sender, VG_ADDR_VOID); \
if (err) \
/* Error sending the IPC. */ \
hurd_activation_message_unregister (mb); \
@@ -837,8 +837,8 @@ typedef addr_t cap_t;
static inline error_t \
__attribute__((always_inline)) \
RPC_CONCAT (RPC_STUB_PREFIX_(id), postfix) \
- (addr_t __rpc_activity, \
- addr_t __rpc_object \
+ (vg_addr_t __rpc_activity, \
+ vg_addr_t __rpc_object \
/* In arguments. */ \
RPC_IF_COMMA (in_count) () \
RPC_GRAB2 (, in_count, __VA_ARGS__) \
@@ -871,8 +871,8 @@ typedef addr_t cap_t;
#define RPC_REPLY_(id, in_count, out_count, ret_cap_count, ...) \
static inline error_t \
RPC_CONCAT (RPC_STUB_PREFIX_(id), _reply) \
- (addr_t __rpc_activity, \
- addr_t __rpc_target \
+ (vg_addr_t __rpc_activity, \
+ vg_addr_t __rpc_target \
/* Out data. */ \
RPC_IF_COMMA (out_count) () \
RPC_GRAB2 (, out_count, RPC_CHOP2 (in_count, ##__VA_ARGS__)) \
@@ -898,7 +898,7 @@ typedef addr_t cap_t;
error_t err = vg_reply (VG_IPC_SEND_SET_THREAD_TO_CALLER \
| VG_IPC_SEND_SET_ASROOT_TO_CALLERS, \
__rpc_activity, __rpc_target, \
- mb->sender, ADDR_VOID); \
+ mb->sender, VG_ADDR_VOID); \
\
hurd_message_buffer_free (mb); \
\
@@ -917,7 +917,7 @@ typedef addr_t cap_t;
/* Return capabilities. */ \
RPC_IF_COMMA (ret_cap_count) () \
RPC_GRAB2 (, ret_cap_count, \
- RPC_TYPE_SHIFT (ret_cap_count, struct cap, \
+ RPC_TYPE_SHIFT (ret_cap_count, struct vg_cap, \
RPC_CHOP2 (CPP_ADD (in_count, out_count), \
##__VA_ARGS__)))) \
{ \
@@ -991,7 +991,7 @@ typedef addr_t cap_t;
Note that *XYZZY must be initialize with the location of a
capability slot to store the returned capability. *XYZZY is set to
- ADDR_VOID if the sender did not provide a capability.
+ VG_ADDR_VOID if the sender did not provide a capability.
To send a message and not wait for a reply, a function with the
following prototype is generated:
@@ -1045,9 +1045,9 @@ rpc_error_reply (cap_t activity, cap_t target, error_t err)
{
return vg_ipc_short (VG_IPC_SEND_NONBLOCKING | VG_IPC_SEND_INLINE
| VG_IPC_SEND_INLINE_WORD1,
- ADDR_VOID, ADDR_VOID, ADDR_VOID,
- ADDR_VOID, target,
- ADDR_VOID, err, 0, ADDR_VOID);
+ VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID,
+ VG_ADDR_VOID, target,
+ VG_ADDR_VOID, err, 0, VG_ADDR_VOID);
}
#endif
diff --git a/libviengoos/viengoos/thread.h b/libviengoos/viengoos/thread.h
index 3789475..f076687 100644
--- a/libviengoos/viengoos/thread.h
+++ b/libviengoos/viengoos/thread.h
@@ -88,7 +88,7 @@ struct vg_utcb
uint64_t messenger_id;
uintptr_t inline_words[VG_MESSENGER_INLINE_WORDS];
- addr_t inline_caps[VG_MESSENGER_INLINE_CAPS];
+ vg_addr_t inline_caps[VG_MESSENGER_INLINE_CAPS];
union
{
@@ -123,19 +123,19 @@ struct vg_utcb
enum
{
/* Root of the address space. */
- THREAD_ASPACE_SLOT = 0,
+ VG_THREAD_ASPACE_SLOT = 0,
/* The activity the thread is bound to. */
- THREAD_ACTIVITY_SLOT = 1,
+ VG_THREAD_ACTIVITY_SLOT = 1,
/* The messenger to post exceptions to. */
- THREAD_EXCEPTION_MESSENGER = 2,
- /* The user thread control block. Must be a cap_page. */
- THREAD_UTCB = 3,
+ VG_THREAD_EXCEPTION_MESSENGER = 2,
+ /* The user thread control block. Must be a vg_cap_page. */
+ VG_THREAD_UTCB = 3,
/* Total number of capability slots in a thread object. This must
be a power of 2. */
- THREAD_SLOTS = 4,
+ VG_THREAD_SLOTS = 4,
};
-#define THREAD_SLOTS_LOG2 2
+#define VG_THREAD_SLOTS_LOG2 2
enum
{
@@ -176,9 +176,9 @@ enum
#ifdef RM_INTERN
struct thread;
-typedef struct thread *thread_t;
+typedef struct thread *vg_thread_t;
#else
-typedef addr_t thread_t;
+typedef vg_addr_t vg_thread_t;
#endif
#define RPC_STUB_PREFIX rm
@@ -189,7 +189,7 @@ typedef addr_t thread_t;
struct hurd_thread_exregs_in
{
uintptr_t aspace_cap_properties_flags;
- struct cap_properties aspace_cap_properties;
+ struct vg_cap_properties aspace_cap_properties;
uintptr_t sp;
uintptr_t ip;
@@ -216,19 +216,19 @@ RPC (thread_exregs, 6, 1, 4,
cap_t, exception_messenger_out)
static inline error_t
-thread_start (addr_t thread)
+vg_thread_start (vg_addr_t thread)
{
struct hurd_thread_exregs_in in;
struct hurd_thread_exregs_out out;
- return rm_thread_exregs (ADDR_VOID, thread,
+ return rm_thread_exregs (VG_ADDR_VOID, thread,
HURD_EXREGS_START | HURD_EXREGS_ABORT_IPC,
- in, ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ in, VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID,
&out, NULL, NULL, NULL, NULL);
}
static inline error_t
-thread_start_sp_ip (addr_t thread, uintptr_t sp, uintptr_t ip)
+vg_thread_start_sp_ip (vg_addr_t thread, uintptr_t sp, uintptr_t ip)
{
struct hurd_thread_exregs_in in;
struct hurd_thread_exregs_out out;
@@ -236,22 +236,22 @@ thread_start_sp_ip (addr_t thread, uintptr_t sp, uintptr_t ip)
in.sp = sp;
in.ip = ip;
- return rm_thread_exregs (ADDR_VOID, thread,
+ return rm_thread_exregs (VG_ADDR_VOID, thread,
HURD_EXREGS_START | HURD_EXREGS_ABORT_IPC
| HURD_EXREGS_SET_SP_IP,
- in, ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ in, VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID,
&out, NULL, NULL, NULL, NULL);
}
static inline error_t
-thread_stop (addr_t thread)
+vg_thread_stop (vg_addr_t thread)
{
struct hurd_thread_exregs_in in;
struct hurd_thread_exregs_out out;
- return rm_thread_exregs (ADDR_VOID, thread,
+ return rm_thread_exregs (VG_ADDR_VOID, thread,
HURD_EXREGS_STOP | HURD_EXREGS_ABORT_IPC,
- in, ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ in, VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID,
&out, NULL, NULL, NULL, NULL);
}
@@ -271,7 +271,7 @@ static inline vg_thread_id_t
vg_myself (void)
{
vg_thread_id_t tid;
- error_t err = rm_thread_id (ADDR_VOID, ADDR_VOID, &tid);
+ error_t err = rm_thread_id (VG_ADDR_VOID, VG_ADDR_VOID, &tid);
if (err)
return vg_niltid;
return tid;
@@ -289,7 +289,7 @@ enum
/* Return a string corresponding to a message id. */
static inline const char *
-activation_method_id_string (uintptr_t id)
+vg_activation_method_id_string (uintptr_t id)
{
switch (id)
{
@@ -300,7 +300,7 @@ activation_method_id_string (uintptr_t id)
}
}
-struct activation_fault_info
+struct vg_activation_fault_info
{
union
{
@@ -309,7 +309,7 @@ struct activation_fault_info
/* Type of access. */
uintptr_t access: 3;
/* Type of object that was attempting to be accessed. */
- uintptr_t type : CAP_TYPE_BITS;
+ uintptr_t type : VG_CAP_TYPE_BITS;
/* Whether the page was discarded. */
uintptr_t discarded : 1;
};
@@ -317,20 +317,20 @@ struct activation_fault_info
};
};
-#define ACTIVATION_FAULT_INFO_FMT "%c%c%c %s%s"
-#define ACTIVATION_FAULT_INFO_PRINTF(info) \
+#define VG_ACTIVATION_FAULT_INFO_FMT "%c%c%c %s%s"
+#define VG_ACTIVATION_FAULT_INFO_PRINTF(info) \
((info).access & L4_FPAGE_READABLE ? 'r' : '~'), \
((info).access & L4_FPAGE_WRITABLE ? 'w' : '~'), \
((info).access & L4_FPAGE_EXECUTABLE ? 'x' : '~'), \
- cap_type_string ((info).type), \
+ vg_cap_type_string ((info).type), \
(info.discarded) ? " discarded" : ""
/* Raise a fault at address FAULT_ADDRESS. If IP is not 0, then IP is
the value of the IP of the faulting thread at the time of the fault
and SP the value of the stack pointer at the time of the fault. */
RPC (fault, 4, 0, 0,
- addr_t, fault_address, uintptr_t, sp, uintptr_t, ip,
- struct activation_fault_info, activation_fault_info)
+ vg_addr_t, fault_address, uintptr_t, sp, uintptr_t, ip,
+ struct vg_activation_fault_info, vg_activation_fault_info)
#undef RPC_STUB_PREFIX
#undef RPC_ID_PREFIX
diff --git a/newlib/addon/newlib/libc/sys/hurd/getreent.c b/newlib/addon/newlib/libc/sys/hurd/getreent.c
index ac2edd9..d2e41f6 100644
--- a/newlib/addon/newlib/libc/sys/hurd/getreent.c
+++ b/newlib/addon/newlib/libc/sys/hurd/getreent.c
@@ -25,13 +25,13 @@
static error_t
slab_alloc (void *hook, size_t size, void **ptr)
{
- struct storage storage = storage_alloc (ADDR_VOID, cap_page,
+ struct storage storage = storage_alloc (VG_ADDR_VOID, vg_cap_page,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT,
- ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ VG_OBJECT_POLICY_DEFAULT,
+ VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Out of space.");
- *ptr = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ *ptr = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr, 0, PAGESIZE_LOG2));
return 0;
}
@@ -41,7 +41,7 @@ slab_dealloc (void *hook, void *buffer, size_t size)
{
assert (size == PAGESIZE);
- addr_t addr = addr_chop (PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
+ vg_addr_t addr = vg_addr_chop (VG_PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
storage_free (addr, false);
return 0;
diff --git a/newlib/addon/newlib/libc/sys/hurd/pipefile.c b/newlib/addon/newlib/libc/sys/hurd/pipefile.c
index f34d1a9..d6f0966 100644
--- a/newlib/addon/newlib/libc/sys/hurd/pipefile.c
+++ b/newlib/addon/newlib/libc/sys/hurd/pipefile.c
@@ -15,7 +15,7 @@ static _ssize_t
pipe_pread (struct _fd *fd, void *buf, size_t size, off_t offset)
{
struct io_buffer buffer;
- rm_read (ADDR_VOID, ADDR_VOID, size, &buffer);
+ rm_read (VG_ADDR_VOID, VG_ADDR_VOID, size, &buffer);
memcpy (buf, buffer.data, buffer.len);
return buffer.len;
@@ -34,7 +34,7 @@ io_buffer_flush (struct io_buffer *buffer)
if (buffer->len == 0)
return;
- rm_write (ADDR_VOID, ADDR_VOID, *buffer);
+ rm_write (VG_ADDR_VOID, VG_ADDR_VOID, *buffer);
buffer->len = 0;
}
diff --git a/ruth/ruth.c b/ruth/ruth.c
index 656a25e..b169cc6 100644
--- a/ruth/ruth.c
+++ b/ruth/ruth.c
@@ -49,7 +49,7 @@
extern int output_debug;
-static addr_t activity;
+static vg_addr_t activity;
/* Initialized by the machine-specific startup-code. */
extern struct hurd_startup_data *__hurd_startup_data;
@@ -78,37 +78,37 @@ main (int argc, char *argv[])
{
printf ("Checking shadow page tables... ");
- int visit (addr_t addr,
- l4_word_t type, struct cap_properties properties,
+ int visit (vg_addr_t addr,
+ l4_word_t type, struct vg_cap_properties properties,
bool writable,
void *cookie)
{
- struct cap cap = as_cap_lookup (addr, -1, NULL);
+ struct vg_cap vg_cap = as_cap_lookup (addr, -1, NULL);
- assert (type == cap.type);
- if (type == cap_cappage || type == cap_rcappage || type == cap_folio)
+ assert (type == vg_cap.type);
+ if (type == vg_cap_cappage || type == vg_cap_rcappage || type == vg_cap_folio)
{
- if (! cap.shadow)
+ if (! vg_cap.shadow)
as_dump_path (addr);
- assertx (cap.shadow,
- ADDR_FMT ", %s",
- ADDR_PRINTF (addr), cap_type_string (type));
+ assertx (vg_cap.shadow,
+ VG_ADDR_FMT ", %s",
+ VG_ADDR_PRINTF (addr), vg_cap_type_string (type));
}
else
{
- if (cap.shadow)
+ if (vg_cap.shadow)
as_dump_path (addr);
- assertx (! cap.shadow, ADDR_FMT ": " CAP_FMT " (%p)",
- ADDR_PRINTF (addr), CAP_PRINTF (&cap), cap.shadow);
+ assertx (! vg_cap.shadow, VG_ADDR_FMT ": " VG_CAP_FMT " (%p)",
+ VG_ADDR_PRINTF (addr), VG_CAP_PRINTF (&vg_cap), vg_cap.shadow);
}
- if (type == cap_folio)
+ if (type == vg_cap_folio)
return -1;
return 0;
}
- as_walk (visit, ~(1 << cap_void), NULL);
+ as_walk (visit, ~(1 << vg_cap_void), NULL);
printf ("ok.\n");
}
@@ -116,33 +116,33 @@ main (int argc, char *argv[])
printf ("Checking folio_object_alloc... ");
- addr_t folio = capalloc ();
- assert (! ADDR_IS_VOID (folio));
- error_t err = rm_folio_alloc (activity, activity, FOLIO_POLICY_DEFAULT,
+ vg_addr_t folio = capalloc ();
+ assert (! VG_ADDR_IS_VOID (folio));
+ error_t err = rm_folio_alloc (activity, activity, VG_FOLIO_POLICY_DEFAULT,
&folio);
assert (! err);
- assert (! ADDR_IS_VOID (folio));
+ assert (! VG_ADDR_IS_VOID (folio));
int i;
for (i = -10; i < 129; i ++)
{
- addr_t addr = capalloc ();
- if (ADDR_IS_VOID (addr))
+ vg_addr_t addr = capalloc ();
+ if (VG_ADDR_IS_VOID (addr))
panic ("capalloc");
- err = rm_folio_object_alloc (activity, folio, i, cap_page,
- OBJECT_POLICY_DEFAULT, 0,
+ err = rm_folio_object_alloc (activity, folio, i, vg_cap_page,
+ VG_OBJECT_POLICY_DEFAULT, 0,
&addr, NULL);
- assert ((err == 0) == (0 <= i && i < FOLIO_OBJECTS));
- assert (! ADDR_IS_VOID (addr));
+ assert ((err == 0) == (0 <= i && i < VG_FOLIO_OBJECTS));
+ assert (! VG_ADDR_IS_VOID (addr));
- if (0 <= i && i < FOLIO_OBJECTS)
+ if (0 <= i && i < VG_FOLIO_OBJECTS)
{
l4_word_t type;
- struct cap_properties properties;
- err = rm_cap_read (activity, ADDR_VOID, addr, &type, &properties);
+ struct vg_cap_properties properties;
+ err = rm_cap_read (activity, VG_ADDR_VOID, addr, &type, &properties);
assert (! err);
- assert (type == cap_page);
+ assert (type == vg_cap_page);
}
capfree (addr);
}
@@ -160,51 +160,51 @@ main (int argc, char *argv[])
/* We allocate a sub-tree and fill it with folios (specifically,
2^(bits - 1) folios). */
int bits = 2;
- addr_t root = as_alloc (bits + FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2,
+ vg_addr_t root = as_alloc (bits + VG_FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2,
1, true);
- assert (! ADDR_IS_VOID (root));
+ assert (! VG_ADDR_IS_VOID (root));
int i;
for (i = 0; i < (1 << bits); i ++)
{
struct storage shadow_storage
- = storage_alloc (activity, cap_page, STORAGE_EPHEMERAL,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- struct object *shadow = ADDR_TO_PTR (addr_extend (shadow_storage.addr,
+ = storage_alloc (activity, vg_cap_page, STORAGE_EPHEMERAL,
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ struct object *shadow = VG_ADDR_TO_PTR (vg_addr_extend (shadow_storage.addr,
0, PAGESIZE_LOG2));
- addr_t f = addr_extend (root, i, bits);
+ vg_addr_t f = vg_addr_extend (root, i, bits);
as_ensure_use (f,
({
- slot->type = cap_folio;
- cap_set_shadow (slot, shadow);
+ slot->type = vg_cap_folio;
+ vg_cap_set_shadow (slot, shadow);
}));
error_t err = rm_folio_alloc (activity, activity,
- FOLIO_POLICY_DEFAULT, &f);
+ VG_FOLIO_POLICY_DEFAULT, &f);
assert (! err);
- assert (! ADDR_IS_VOID (f));
+ assert (! VG_ADDR_IS_VOID (f));
int j;
for (j = 0; j <= i; j ++)
{
l4_word_t type;
- struct cap_properties properties;
+ struct vg_cap_properties properties;
- error_t err = rm_cap_read (activity, ADDR_VOID,
- addr_extend (root, j, bits),
+ error_t err = rm_cap_read (activity, VG_ADDR_VOID,
+ vg_addr_extend (root, j, bits),
&type, &properties);
assert (! err);
- assert (type == cap_folio);
+ assert (type == vg_cap_folio);
- struct cap cap = as_cap_lookup (f, -1, NULL);
- assert (cap.type == cap_folio);
+ struct vg_cap vg_cap = as_cap_lookup (f, -1, NULL);
+ assert (vg_cap.type == vg_cap_folio);
}
}
for (i = 0; i < (1 << bits); i ++)
{
- addr_t f = addr_extend (root, i, bits);
+ vg_addr_t f = vg_addr_extend (root, i, bits);
error_t err = rm_folio_free (activity, f);
assert (! err);
@@ -213,15 +213,15 @@ main (int argc, char *argv[])
bool ret = as_slot_lookup_use
(f,
({
- assert (slot->type == cap_folio);
- slot->type = cap_void;
+ assert (slot->type == vg_cap_folio);
+ slot->type = vg_cap_void;
- shadow = cap_get_shadow (slot);
+ shadow = vg_cap_get_shadow (slot);
}));
assert (ret);
assert (shadow);
- storage_free (addr_chop (PTR_TO_ADDR (shadow), PAGESIZE_LOG2), 1);
+ storage_free (vg_addr_chop (VG_PTR_TO_ADDR (shadow), PAGESIZE_LOG2), 1);
}
as_free (root, 1);
@@ -233,26 +233,26 @@ main (int argc, char *argv[])
{
printf ("Checking storage_alloc... ");
- const int n = 4 * FOLIO_OBJECTS;
- addr_t storage[n];
+ const int n = 4 * VG_FOLIO_OBJECTS;
+ vg_addr_t storage[n];
int i;
for (i = 0; i < n; i ++)
{
- storage[i] = storage_alloc (activity, cap_page,
+ storage[i] = storage_alloc (activity, vg_cap_page,
(i & 1) == 0
? STORAGE_LONG_LIVED
: STORAGE_EPHEMERAL,
- OBJECT_POLICY_DEFAULT,
- ADDR_VOID).addr;
- assert (! ADDR_IS_VOID (storage[i]));
- int *p = (int *) ADDR_TO_PTR (addr_extend (storage[i],
+ VG_OBJECT_POLICY_DEFAULT,
+ VG_ADDR_VOID).addr;
+ assert (! VG_ADDR_IS_VOID (storage[i]));
+ int *p = (int *) VG_ADDR_TO_PTR (vg_addr_extend (storage[i],
0, PAGESIZE_LOG2));
* (int *) p = i;
int j;
for (j = 0; j <= i; j ++)
- assert (* (int *) (ADDR_TO_PTR (addr_extend (storage[j],
+ assert (* (int *) (VG_ADDR_TO_PTR (vg_addr_extend (storage[j],
0, PAGESIZE_LOG2)))
== j);
}
@@ -330,15 +330,15 @@ main (int argc, char *argv[])
printf ("Checking thread creation... ");
- addr_t thread = capalloc ();
- debug (5, "thread: " ADDR_FMT, ADDR_PRINTF (thread));
- addr_t storage = storage_alloc (activity, cap_thread, STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, thread).addr;
+ vg_addr_t thread = capalloc ();
+ debug (5, "thread: " VG_ADDR_FMT, VG_ADDR_PRINTF (thread));
+ vg_addr_t storage = storage_alloc (activity, vg_cap_thread, STORAGE_LONG_LIVED,
+ VG_OBJECT_POLICY_DEFAULT, thread).addr;
struct hurd_thread_exregs_in in;
- in.aspace_cap_properties = CAP_PROPERTIES_DEFAULT;
- in.aspace_cap_properties_flags = CAP_COPY_COPY_SOURCE_GUARD;
+ in.aspace_cap_properties = VG_CAP_PROPERTIES_DEFAULT;
+ in.aspace_cap_properties_flags = VG_CAP_COPY_COPY_SOURCE_GUARD;
in.sp = (l4_word_t) ((void *) stack + sizeof (stack));
in.ip = (l4_word_t) &start;
@@ -349,7 +349,7 @@ main (int argc, char *argv[])
HURD_EXREGS_SET_ASPACE | HURD_EXREGS_SET_ACTIVITY
| HURD_EXREGS_SET_SP_IP | HURD_EXREGS_START
| HURD_EXREGS_ABORT_IPC,
- in, ADDR (0, 0), activity, ADDR_VOID, ADDR_VOID,
+ in, VG_ADDR (0, 0), activity, VG_ADDR_VOID, VG_ADDR_VOID,
&out, NULL, NULL, NULL, NULL);
debug (5, "Waiting for thread");
@@ -640,7 +640,7 @@ main (int argc, char *argv[])
#undef N
#define N 10
- void test (addr_t activity, addr_t folio, int depth)
+ void test (vg_addr_t activity, vg_addr_t folio, int depth)
{
error_t err;
int i;
@@ -648,9 +648,9 @@ main (int argc, char *argv[])
struct
{
- addr_t child;
- addr_t folio;
- addr_t page;
+ vg_addr_t child;
+ vg_addr_t folio;
+ vg_addr_t page;
} a[N];
for (i = 0; i < N; i ++)
@@ -658,33 +658,33 @@ main (int argc, char *argv[])
/* Allocate a new activity. */
a[i].child = capalloc ();
err = rm_folio_object_alloc (activity, folio, obj ++,
- cap_activity_control,
- OBJECT_POLICY_DEFAULT, 0,
+ vg_cap_activity_control,
+ VG_OBJECT_POLICY_DEFAULT, 0,
&a[i].child, NULL);
assert (err == 0);
- assert (! ADDR_IS_VOID (a[i].child));
+ assert (! VG_ADDR_IS_VOID (a[i].child));
/* Allocate a folio against the activity and use it. */
a[i].folio = capalloc ();
- err = rm_folio_alloc (activity, a[i].child, FOLIO_POLICY_DEFAULT,
+ err = rm_folio_alloc (activity, a[i].child, VG_FOLIO_POLICY_DEFAULT,
&a[i].folio);
assert (err == 0);
- assert (! ADDR_IS_VOID (a[i].folio));
+ assert (! VG_ADDR_IS_VOID (a[i].folio));
a[i].page = capalloc ();
- err = rm_folio_object_alloc (a[i].child, a[i].folio, 0, cap_page,
- OBJECT_POLICY_DEFAULT, 0,
+ err = rm_folio_object_alloc (a[i].child, a[i].folio, 0, vg_cap_page,
+ VG_OBJECT_POLICY_DEFAULT, 0,
&a[i].page, NULL);
assert (err == 0);
- assert (! ADDR_IS_VOID (a[i].page));
+ assert (! VG_ADDR_IS_VOID (a[i].page));
l4_word_t type;
- struct cap_properties properties;
+ struct vg_cap_properties properties;
- err = rm_cap_read (a[i].child, ADDR_VOID,
+ err = rm_cap_read (a[i].child, VG_ADDR_VOID,
a[i].page, &type, &properties);
assert (err == 0);
- assert (type == cap_page);
+ assert (type == vg_cap_page);
}
if (depth > 0)
@@ -706,8 +706,8 @@ main (int argc, char *argv[])
the object does not destroy the capability. Instead, we try to
use the object. If this fails, we assume that the folio was
destroyed. */
- err = rm_folio_object_alloc (a[i].child, a[i].folio, 1, cap_page,
- OBJECT_POLICY_DEFAULT, 0,
+ err = rm_folio_object_alloc (a[i].child, a[i].folio, 1, vg_cap_page,
+ VG_OBJECT_POLICY_DEFAULT, 0,
&a[i].page, NULL);
assert (err);
@@ -718,10 +718,10 @@ main (int argc, char *argv[])
}
error_t err;
- addr_t folio = capalloc ();
- err = rm_folio_alloc (activity, activity, FOLIO_POLICY_DEFAULT, &folio);
+ vg_addr_t folio = capalloc ();
+ err = rm_folio_alloc (activity, activity, VG_FOLIO_POLICY_DEFAULT, &folio);
assert (err == 0);
- assert (! ADDR_IS_VOID (folio));
+ assert (! VG_ADDR_IS_VOID (folio));
test (activity, folio, 2);
@@ -736,31 +736,31 @@ main (int argc, char *argv[])
{
printf ("Checking activity_policy... ");
- addr_t a = capalloc ();
- addr_t storage = storage_alloc (activity, cap_activity_control,
- STORAGE_LONG_LIVED, OBJECT_POLICY_DEFAULT,
+ vg_addr_t a = capalloc ();
+ vg_addr_t storage = storage_alloc (activity, vg_cap_activity_control,
+ STORAGE_LONG_LIVED, VG_OBJECT_POLICY_DEFAULT,
a).addr;
- addr_t weak = capalloc ();
- error_t err = rm_cap_copy (activity, ADDR_VOID, weak, ADDR_VOID, a,
- CAP_COPY_WEAKEN, CAP_PROPERTIES_VOID);
+ vg_addr_t weak = capalloc ();
+ error_t err = rm_cap_copy (activity, VG_ADDR_VOID, weak, VG_ADDR_VOID, a,
+ VG_CAP_COPY_WEAKEN, VG_CAP_PROPERTIES_VOID);
assert (! err);
struct activity_policy in, out;
in.sibling_rel.priority = 2;
in.sibling_rel.weight = 3;
- in.child_rel = ACTIVITY_MEMORY_POLICY_VOID;
+ in.child_rel = VG_ACTIVITY_MEMORY_POLICY_VOID;
in.folios = 10000;
err = rm_activity_policy (a, a,
- ACTIVITY_POLICY_SIBLING_REL_SET
- | ACTIVITY_POLICY_STORAGE_SET,
+ VG_ACTIVITY_POLICY_SIBLING_REL_SET
+ | VG_ACTIVITY_POLICY_STORAGE_SET,
in,
&out);
assert (err == 0);
err = rm_activity_policy (a, a,
- 0, ACTIVITY_POLICY_VOID,
+ 0, VG_ACTIVITY_POLICY_VOID,
&out);
assert (err == 0);
@@ -772,8 +772,8 @@ main (int argc, char *argv[])
in.sibling_rel.weight = 5;
in.folios = 10001;
err = rm_activity_policy (a, a,
- ACTIVITY_POLICY_SIBLING_REL_SET
- | ACTIVITY_POLICY_STORAGE_SET,
+ VG_ACTIVITY_POLICY_SIBLING_REL_SET
+ | VG_ACTIVITY_POLICY_STORAGE_SET,
in, &out);
assert (err == 0);
@@ -783,8 +783,8 @@ main (int argc, char *argv[])
assert (out.folios == 10000);
err = rm_activity_policy (a, weak,
- ACTIVITY_POLICY_SIBLING_REL_SET
- | ACTIVITY_POLICY_STORAGE_SET,
+ VG_ACTIVITY_POLICY_SIBLING_REL_SET
+ | VG_ACTIVITY_POLICY_STORAGE_SET,
in, &out);
assertx (err == EPERM, "%d", err);
@@ -804,7 +804,7 @@ main (int argc, char *argv[])
}
{
- printf ("Checking futex implementation... ");
+ printf ("Checking vg_futex implementation... ");
#undef N
#define N 4
@@ -881,17 +881,17 @@ main (int argc, char *argv[])
{
printf ("Checking object_reply_on_destruction... ");
- struct storage storage = storage_alloc (activity, cap_page,
+ struct storage storage = storage_alloc (activity, vg_cap_page,
STORAGE_MEDIUM_LIVED,
- OBJECT_POLICY_DEFAULT,
- ADDR_VOID);
- assert (! ADDR_IS_VOID (storage.addr));
+ VG_OBJECT_POLICY_DEFAULT,
+ VG_ADDR_VOID);
+ assert (! VG_ADDR_IS_VOID (storage.addr));
void *start (void *arg)
{
uintptr_t ret = 0;
error_t err;
- err = rm_object_reply_on_destruction (ADDR_VOID, storage.addr, &ret);
+ err = rm_object_reply_on_destruction (VG_ADDR_VOID, storage.addr, &ret);
debug (5, "object_reply_on_destruction: err: %d, ret: %d", err, ret);
assert (err == 0);
assert (ret == 10);
@@ -908,11 +908,11 @@ main (int argc, char *argv[])
/* Deallocate the object. */
debug (5, "Destroying object");
- rm_folio_object_alloc (ADDR_VOID,
- addr_chop (storage.addr, FOLIO_OBJECTS_LOG2),
- addr_extract (storage.addr, FOLIO_OBJECTS_LOG2),
- cap_void,
- OBJECT_POLICY_VOID, 10, NULL, NULL);
+ rm_folio_object_alloc (VG_ADDR_VOID,
+ vg_addr_chop (storage.addr, VG_FOLIO_OBJECTS_LOG2),
+ vg_addr_extract (storage.addr, VG_FOLIO_OBJECTS_LOG2),
+ vg_cap_void,
+ VG_OBJECT_POLICY_VOID, 10, NULL, NULL);
/* Release the memory. */
storage_free (storage.addr, true);
@@ -932,7 +932,7 @@ main (int argc, char *argv[])
bool fill (struct anonymous_pager *anon,
uintptr_t offset, uintptr_t count,
void *pages[],
- struct activation_fault_info info)
+ struct vg_activation_fault_info info)
{
assert (count == 1);
@@ -946,8 +946,8 @@ main (int argc, char *argv[])
void *addr;
struct anonymous_pager *pager
- = anonymous_pager_alloc (ADDR_VOID, NULL, s, MAP_ACCESS_ALL,
- OBJECT_POLICY_DEFAULT, 0,
+ = anonymous_pager_alloc (VG_ADDR_VOID, NULL, s, MAP_ACCESS_ALL,
+ VG_OBJECT_POLICY_DEFAULT, 0,
fill, &addr);
assert (pager);
@@ -965,7 +965,7 @@ main (int argc, char *argv[])
bool fill (struct anonymous_pager *anon,
uintptr_t offset, uintptr_t count,
void *pages[],
- struct activation_fault_info info)
+ struct vg_activation_fault_info info)
{
assert (count == 1);
@@ -981,7 +981,7 @@ main (int argc, char *argv[])
do
{
struct activity_info info;
- error_t err = rm_activity_info (ADDR_VOID, activity,
+ error_t err = rm_activity_info (VG_ADDR_VOID, activity,
activity_info_stats, 1, &info);
assert_perror (err);
assert (info.stats.count >= 1);
@@ -999,8 +999,8 @@ main (int argc, char *argv[])
void *addr;
struct anonymous_pager *pager
- = anonymous_pager_alloc (ADDR_VOID, NULL, goal * PAGESIZE, MAP_ACCESS_ALL,
- OBJECT_POLICY (true, OBJECT_PRIORITY_DEFAULT), 0,
+ = anonymous_pager_alloc (VG_ADDR_VOID, NULL, goal * PAGESIZE, MAP_ACCESS_ALL,
+ VG_OBJECT_POLICY (true, VG_OBJECT_PRIORITY_DEFAULT), 0,
fill, &addr);
assert (pager);
@@ -1023,18 +1023,18 @@ main (int argc, char *argv[])
{
printf ("Checking deallocation... ");
- addr_t addr = as_alloc (PAGESIZE_LOG2, 1, true);
- assert (! ADDR_IS_VOID (addr));
+ vg_addr_t addr = as_alloc (PAGESIZE_LOG2, 1, true);
+ assert (! VG_ADDR_IS_VOID (addr));
as_ensure (addr);
- addr_t storage = storage_alloc (activity, cap_page,
+ vg_addr_t storage = storage_alloc (activity, vg_cap_page,
STORAGE_MEDIUM_LIVED,
- OBJECT_POLICY_DEFAULT,
+ VG_OBJECT_POLICY_DEFAULT,
addr).addr;
- assert (! ADDR_IS_VOID (storage));
+ assert (! VG_ADDR_IS_VOID (storage));
- int *buffer = ADDR_TO_PTR (addr_extend (addr, 0, PAGESIZE_LOG2));
+ int *buffer = VG_ADDR_TO_PTR (vg_addr_extend (addr, 0, PAGESIZE_LOG2));
debug (5, "Writing before dealloc...");
*buffer = 0;
diff --git a/viengoos/activity.c b/viengoos/activity.c
index 220b362..36e9096 100644
--- a/viengoos/activity.c
+++ b/viengoos/activity.c
@@ -35,11 +35,11 @@ void
activity_create (struct activity *parent,
struct activity *child)
{
- struct object *old_parent = cap_to_object (parent, &child->parent_cap);
+ struct object *old_parent = vg_cap_to_object (parent, &child->parent_cap);
if (old_parent)
/* CHILD is live. Destroy it first. */
{
- assert (object_type (old_parent) == cap_activity_control);
+ assert (object_type (old_parent) == vg_cap_activity_control);
activity_destroy (parent, child);
}
@@ -54,15 +54,15 @@ activity_create (struct activity *parent,
/* Connect to PARENT's activity list. */
child->sibling_next_cap = parent->children_cap;
- child->sibling_prev_cap.type = cap_void;
+ child->sibling_prev_cap.type = vg_cap_void;
parent->children_cap = object_to_cap ((struct object *) child);
- struct object *old_head = cap_to_object (parent, &child->sibling_next_cap);
+ struct object *old_head = vg_cap_to_object (parent, &child->sibling_next_cap);
if (old_head)
{
- assert (object_type (old_head) == cap_activity_control);
+ assert (object_type (old_head) == vg_cap_activity_control);
/* The old head's previous pointer should be NULL. */
- assert (! cap_to_object
+ assert (! vg_cap_to_object
(parent, &((struct activity *) old_head)->sibling_prev_cap));
((struct activity *) old_head)->sibling_prev_cap
@@ -78,8 +78,8 @@ activity_destroy (struct activity *activity, struct activity *victim)
debug (0, "Destroying activity " OBJECT_NAME_FMT,
OBJECT_NAME_PRINTF ((struct object *) victim));
- assert (object_type ((struct object *) activity) == cap_activity_control);
- assert (object_type ((struct object *) victim) == cap_activity_control);
+ assert (object_type ((struct object *) activity) == vg_cap_activity_control);
+ assert (object_type ((struct object *) victim) == vg_cap_activity_control);
profile_stats_dump ();
@@ -109,28 +109,28 @@ activity_destroy (struct activity *activity, struct activity *victim)
/* Destroy all folios allocated to this activity. */
struct object *o;
- while ((o = cap_to_object (activity, &victim->folios)))
+ while ((o = vg_cap_to_object (activity, &victim->folios)))
{
/* If O was destroyed, it should have been removed from its
respective activity's allocation list. */
assert (o);
struct object_desc *desc = object_to_object_desc (o);
- assert (desc->type == cap_folio);
+ assert (desc->type == vg_cap_folio);
folio_free (activity, (struct folio *) o);
}
/* Activities that are sub-activities of ACTIVITY are not
necessarily allocated out of storage allocated to ACTIVITY. */
- while ((o = cap_to_object (activity, &victim->children_cap)))
+ while ((o = vg_cap_to_object (activity, &victim->children_cap)))
{
/* If O was destroyed, it should have been removed from its
respective activity's allocation list. */
assert (o);
struct object_desc *desc = object_to_object_desc (o);
- assert (desc->type == cap_activity_control);
+ assert (desc->type == vg_cap_activity_control);
object_free (activity, o);
}
@@ -142,7 +142,7 @@ activity_destroy (struct activity *activity, struct activity *victim)
/* Make ACTIVE objects inactive. */
int i;
- for (i = OBJECT_PRIORITY_MIN; i <= OBJECT_PRIORITY_MAX; i ++)
+ for (i = VG_OBJECT_PRIORITY_MIN; i <= VG_OBJECT_PRIORITY_MAX; i ++)
{
for (desc = activity_list_head (&victim->frames[i].active);
desc; desc = activity_list_next (desc))
@@ -154,19 +154,19 @@ activity_destroy (struct activity *activity, struct activity *victim)
assert (desc->policy.priority == i);
desc->age = 0;
- desc->policy.priority = OBJECT_PRIORITY_MIN;
+ desc->policy.priority = VG_OBJECT_PRIORITY_MIN;
desc->activity = victim->parent;
count ++;
}
activity_list_join
- (&victim->parent->frames[OBJECT_PRIORITY_MIN].inactive,
+ (&victim->parent->frames[VG_OBJECT_PRIORITY_MIN].inactive,
&victim->frames[i].active);
}
/* Move inactive objects to the head of VICTIM->PARENT's appropriate
inactive list (thereby making them the first eviction
candidates). */
- for (i = OBJECT_PRIORITY_MIN; i <= OBJECT_PRIORITY_MAX; i ++)
+ for (i = VG_OBJECT_PRIORITY_MIN; i <= VG_OBJECT_PRIORITY_MAX; i ++)
{
for (desc = activity_list_head (&victim->frames[i].inactive);
desc; desc = activity_list_next (desc))
@@ -178,11 +178,11 @@ activity_destroy (struct activity *activity, struct activity *victim)
assert (desc->policy.priority == i);
desc->activity = victim->parent;
- desc->policy.priority = OBJECT_PRIORITY_MIN;
+ desc->policy.priority = VG_OBJECT_PRIORITY_MIN;
count ++;
}
activity_list_join
- (&victim->parent->frames[OBJECT_PRIORITY_MIN].inactive,
+ (&victim->parent->frames[VG_OBJECT_PRIORITY_MIN].inactive,
&victim->frames[i].inactive);
}
@@ -212,8 +212,8 @@ activity_destroy (struct activity *activity, struct activity *victim)
vdesc = object_to_object_desc ((struct object *) victim);
assertx (desc->activity == victim,
- OID_FMT " != " OID_FMT,
- OID_PRINTF (adesc->oid), OID_PRINTF (vdesc->oid));
+ VG_OID_FMT " != " VG_OID_FMT,
+ VG_OID_PRINTF (adesc->oid), VG_OID_PRINTF (vdesc->oid));
#endif
assert (list_node_attached (&desc->laundry_node));
assert (desc->dirty && !desc->policy.discardable);
@@ -243,18 +243,18 @@ activity_destroy (struct activity *activity, struct activity *victim)
/* Remove from parent's activity list. */
struct activity *parent = victim->parent;
assert ((struct object *) parent
- == cap_to_object (activity, &victim->parent_cap));
+ == vg_cap_to_object (activity, &victim->parent_cap));
- struct object *prev_object = cap_to_object (activity,
+ struct object *prev_object = vg_cap_to_object (activity,
&victim->sibling_prev_cap);
assert (! prev_object
- || object_to_object_desc (prev_object)->type == cap_activity_control);
+ || object_to_object_desc (prev_object)->type == vg_cap_activity_control);
struct activity *prev = (struct activity *) prev_object;
- struct object *next_object = cap_to_object (activity,
+ struct object *next_object = vg_cap_to_object (activity,
&victim->sibling_next_cap);
assert (! next_object
- || object_to_object_desc (next_object)->type == cap_activity_control);
+ || object_to_object_desc (next_object)->type == vg_cap_activity_control);
struct activity *next = (struct activity *) next_object;
if (prev)
@@ -262,7 +262,7 @@ activity_destroy (struct activity *activity, struct activity *victim)
else
/* VICTIM is the head of PARENT's child list. */
{
- assert (cap_to_object (activity, &parent->children_cap)
+ assert (vg_cap_to_object (activity, &parent->children_cap)
== (struct object *) victim);
parent->children_cap = victim->sibling_next_cap;
}
@@ -270,15 +270,15 @@ activity_destroy (struct activity *activity, struct activity *victim)
if (next)
next->sibling_prev_cap = victim->sibling_prev_cap;
- victim->sibling_next_cap.type = cap_void;
- victim->sibling_prev_cap.type = cap_void;
+ victim->sibling_next_cap.type = vg_cap_void;
+ victim->sibling_prev_cap.type = vg_cap_void;
}
void
activity_prepare (struct activity *principal, struct activity *activity)
{
/* Lookup parent. */
- activity->parent = (struct activity *) cap_to_object (principal,
+ activity->parent = (struct activity *) vg_cap_to_object (principal,
&activity->parent_cap);
assert (activity->parent);
@@ -309,7 +309,7 @@ activity_prepare (struct activity *principal, struct activity *activity)
#ifndef NDEBUG
activity_children_list_init (&activity->children, "activity->children");
- static const char *names[OBJECT_PRIORITY_LEVELS]
+ static const char *names[VG_OBJECT_PRIORITY_LEVELS]
= { "inactive-64", "inactive-63", "inactive-62", "inactive-61",
"inactive-60", "inactive-59", "inactive-58", "inactive-57",
"inactive-56", "inactive-55", "inactive-54", "inactive-53",
@@ -344,12 +344,12 @@ activity_prepare (struct activity *principal, struct activity *activity)
"inactive60", "inactive61", "inactive62", "inactive63" };
int i;
- for (i = OBJECT_PRIORITY_MIN; i <= OBJECT_PRIORITY_MAX; i ++)
+ for (i = VG_OBJECT_PRIORITY_MIN; i <= VG_OBJECT_PRIORITY_MAX; i ++)
{
activity_list_init (&activity->frames[i].active,
- &names[i - OBJECT_PRIORITY_MIN][2]);
+ &names[i - VG_OBJECT_PRIORITY_MIN][2]);
activity_list_init (&activity->frames[i].inactive,
- names[i - OBJECT_PRIORITY_MIN]);
+ names[i - VG_OBJECT_PRIORITY_MIN]);
}
eviction_list_init (&activity->eviction_clean, "evict clean");
eviction_list_init (&activity->eviction_dirty, "evict dirty");
@@ -363,7 +363,7 @@ activity_deprepare (struct activity *principal, struct activity *victim)
paged out. */
assert (! activity_children_list_head (&victim->children));
int i;
- for (i = OBJECT_PRIORITY_MIN; i <= OBJECT_PRIORITY_MAX; i ++)
+ for (i = VG_OBJECT_PRIORITY_MIN; i <= VG_OBJECT_PRIORITY_MAX; i ++)
{
assert (! activity_list_count (&victim->frames[i].active));
assert (! activity_list_count (&victim->frames[i].inactive));
@@ -441,7 +441,7 @@ do_activity_dump (struct activity *activity, int indent)
int inactive = 0;
int i;
- for (i = OBJECT_PRIORITY_MIN; i <= OBJECT_PRIORITY_MAX; i ++)
+ for (i = VG_OBJECT_PRIORITY_MIN; i <= VG_OBJECT_PRIORITY_MAX; i ++)
{
active += activity_list_count (&activity->frames[i].active);
inactive += activity_list_count (&activity->frames[i].inactive);
diff --git a/viengoos/activity.h b/viengoos/activity.h
index 745486f..0bbb62d 100644
--- a/viengoos/activity.h
+++ b/viengoos/activity.h
@@ -38,18 +38,18 @@ struct activity
/* On-disk data. */
/* Parent activity. */
- struct cap parent_cap;
+ struct vg_cap parent_cap;
/* List of child activities (if any). Threaded via
SIBLING_NEXT. */
- struct cap children_cap;
+ struct vg_cap children_cap;
/* This activity's siblings. */
- struct cap sibling_next_cap;
- struct cap sibling_prev_cap;
+ struct vg_cap sibling_next_cap;
+ struct vg_cap sibling_prev_cap;
/* Head of the linked list of folios allocated to this activity. */
- struct cap folios;
+ struct vg_cap folios;
/* Policy. */
struct activity_policy policy;
@@ -77,12 +77,12 @@ struct activity
{
struct activity_list active;
struct activity_list inactive;
- } frames_[-OBJECT_PRIORITY_MIN];
+ } frames_[-VG_OBJECT_PRIORITY_MIN];
struct
{
struct activity_list active;
struct activity_list inactive;
- } frames[OBJECT_PRIORITY_MAX + 1];
+ } frames[VG_OBJECT_PRIORITY_MAX + 1];
/* Objects that are owned by this activity and have been selected
for eviction (DESC->EVICTION_CANDIDATE is true). These objects
@@ -248,22 +248,22 @@ activity_charge (struct activity *activity, int objects)
/* For each child of ACTIVITY, set to CHILD and execute code. The
caller may destroy CHILD, however, it may not destroy any siblings.
- Be careful of deadlock: this function calls cap_to_object, which
+ Be careful of deadlock: this function calls vg_cap_to_object, which
calls object_find, which may take LRU_LOCK. */
#define activity_for_each_child(__fec_activity, __fec_child, __fec_code) \
do { \
__fec_child \
- = (struct activity *) cap_to_object ((__fec_activity), \
+ = (struct activity *) vg_cap_to_object ((__fec_activity), \
&(__fec_activity)->children_cap); \
while (__fec_child) \
{ \
/* Grab the next child incase this child is destroyed. */ \
- struct cap __fec_next = __fec_child->sibling_next_cap; \
+ struct vg_cap __fec_next = __fec_child->sibling_next_cap; \
\
__fec_code; \
\
/* Fetch the next child. */ \
- __fec_child = (struct activity *) cap_to_object ((__fec_activity), \
+ __fec_child = (struct activity *) vg_cap_to_object ((__fec_activity), \
&__fec_next); \
if (! __fec_child) \
break; \
diff --git a/viengoos/ager.c b/viengoos/ager.c
index 1e08fec..d44eafc 100644
--- a/viengoos/ager.c
+++ b/viengoos/ager.c
@@ -482,8 +482,8 @@ ager_loop (void)
continue;
assertx (desc->activity,
- "OID: " OID_FMT " (%s), age: %d",
- OID_PRINTF (desc->oid), cap_type_string (desc->type),
+ "OID: " VG_OID_FMT " (%s), age: %d",
+ VG_OID_PRINTF (desc->oid), vg_cap_type_string (desc->type),
desc->age);
descs[count] = desc;
@@ -495,7 +495,7 @@ ager_loop (void)
/* We periodically unmap shared frames and mark them as
floating. See above for details. */
{
- if (desc->type == cap_page)
+ if (desc->type == vg_cap_page)
/* We only unmap the object if it is a page. No
other objects are actually mapped to users. */
{
@@ -551,7 +551,7 @@ ager_loop (void)
int j = 0;
l4_fpage_t unmap[count];
for (i = 0; i < count; i ++)
- if (descs[i]->shared && descs[i]->type == cap_page)
+ if (descs[i]->shared && descs[i]->type == vg_cap_page)
unmap[j ++]
= l4_fpage_add_rights (fpages[i],
L4_FPAGE_FULLY_ACCESSIBLE);
@@ -562,7 +562,7 @@ ager_loop (void)
/* Bitwise or the status bits. */
j = 0;
for (i = 0; i < count; i ++)
- if (descs[i]->shared && descs[i]->type == cap_page)
+ if (descs[i]->shared && descs[i]->type == vg_cap_page)
fpages[i] = l4_fpage_add_rights (fpages[i],
l4_rights (unmap[j ++]));
}
diff --git a/viengoos/cap.c b/viengoos/cap.c
index b765ae3..fcd92d8 100644
--- a/viengoos/cap.c
+++ b/viengoos/cap.c
@@ -27,21 +27,21 @@
#include "activity.h"
#include "thread.h"
-const int cap_type_num_slots[] = { [cap_void] = 0,
- [cap_page] = 0,
- [cap_rpage] = 0,
- [cap_cappage] = CAPPAGE_SLOTS,
- [cap_rcappage] = CAPPAGE_SLOTS,
- [cap_folio] = 0,
- [cap_activity] = 0,
- [cap_activity_control] = 0,
- [cap_thread] = THREAD_SLOTS };
+const int cap_type_num_slots[] = { [vg_cap_void] = 0,
+ [vg_cap_page] = 0,
+ [vg_cap_rpage] = 0,
+ [vg_cap_cappage] = VG_CAPPAGE_SLOTS,
+ [vg_cap_rcappage] = VG_CAPPAGE_SLOTS,
+ [vg_cap_folio] = 0,
+ [vg_cap_activity] = 0,
+ [vg_cap_activity_control] = 0,
+ [vg_cap_thread] = VG_THREAD_SLOTS };
static struct object *
-cap_to_object_internal (struct activity *activity, struct cap *cap,
+cap_to_object_internal (struct activity *activity, struct vg_cap *cap,
bool hard)
{
- if (cap->type == cap_void)
+ if (cap->type == vg_cap_void)
return NULL;
/* XXX: If CAP does not grant write access, then we need to flatten
@@ -49,10 +49,10 @@ cap_to_object_internal (struct activity *activity, struct cap *cap,
struct object *object;
if (hard)
{
- object = object_find (activity, cap->oid, CAP_POLICY_GET (*cap));
+ object = object_find (activity, cap->oid, VG_CAP_POLICY_GET (*cap));
}
else
- object = object_find_soft (activity, cap->oid, CAP_POLICY_GET (*cap));
+ object = object_find_soft (activity, cap->oid, VG_CAP_POLICY_GET (*cap));
if (! object)
return NULL;
@@ -62,54 +62,54 @@ cap_to_object_internal (struct activity *activity, struct cap *cap,
{
/* Clear the capability to save the effort of looking up the
object in the future. */
- cap->type = cap_void;
+ cap->type = vg_cap_void;
return NULL;
}
/* If the capability is valid, then the cap type and the object type
must be compatible. */
- assert (cap_types_compatible (cap->type, desc->type));
+ assert (vg_cap_types_compatible (cap->type, desc->type));
return object;
}
struct object *
-cap_to_object (struct activity *activity, struct cap *cap)
+vg_cap_to_object (struct activity *activity, struct vg_cap *cap)
{
return cap_to_object_internal (activity, cap, true);
}
struct object *
-cap_to_object_soft (struct activity *activity, struct cap *cap)
+cap_to_object_soft (struct activity *activity, struct vg_cap *cap)
{
return cap_to_object_internal (activity, cap, false);
}
void
-cap_shootdown (struct activity *activity, struct cap *root)
+cap_shootdown (struct activity *activity, struct vg_cap *root)
{
assert (activity);
/* XXX: A recursive function may not be the best idea here. We are
guaranteed, however, at most 63 nested calls. */
- void doit (struct cap *cap, int remaining)
+ void doit (struct vg_cap *cap, int remaining)
{
int i;
struct object *object;
- remaining -= CAP_GUARD_BITS (cap);
+ remaining -= VG_CAP_GUARD_BITS (cap);
switch (cap->type)
{
- case cap_page:
- case cap_rpage:
+ case vg_cap_page:
+ case vg_cap_rpage:
if (remaining < PAGESIZE_LOG2)
return;
/* If the object is not in memory, then it can't be
mapped. */
object = object_find_soft (activity, cap->oid,
- OBJECT_POLICY (cap->discardable,
+ VG_OBJECT_POLICY (cap->discardable,
cap->priority));
if (! object)
return;
@@ -119,36 +119,36 @@ cap_shootdown (struct activity *activity, struct cap *root)
{
/* Clear the capability to save the effort of looking up the
object in the future. */
- cap->type = cap_void;
+ cap->type = vg_cap_void;
return;
}
object_desc_unmap (desc);
return;
- case cap_cappage:
- case cap_rcappage:
- if (remaining < CAP_SUBPAGE_SIZE_LOG2 (cap) + PAGESIZE_LOG2)
+ case vg_cap_cappage:
+ case vg_cap_rcappage:
+ if (remaining < VG_CAP_SUBPAGE_SIZE_LOG2 (cap) + PAGESIZE_LOG2)
return;
- object = cap_to_object (activity, cap);
+ object = vg_cap_to_object (activity, cap);
if (! object)
return;
- remaining -= CAP_SUBPAGE_SIZE_LOG2 (cap);
+ remaining -= VG_CAP_SUBPAGE_SIZE_LOG2 (cap);
- for (i = 0; i < CAP_SUBPAGE_SIZE (cap); i ++)
+ for (i = 0; i < VG_CAP_SUBPAGE_SIZE (cap); i ++)
if (root->oid != object->caps[i].oid)
doit (&object->caps[i], remaining);
return;
- case cap_messenger:
- case cap_rmessenger:
+ case vg_cap_messenger:
+ case vg_cap_rmessenger:
if (remaining < VG_MESSENGER_SLOTS_LOG2 + PAGESIZE_LOG2)
return;
- object = cap_to_object (activity, cap);
+ object = vg_cap_to_object (activity, cap);
if (! object)
return;
@@ -160,50 +160,50 @@ cap_shootdown (struct activity *activity, struct cap *root)
return;
- case cap_thread:
- if (remaining < THREAD_SLOTS_LOG2 + PAGESIZE_LOG2)
+ case vg_cap_thread:
+ if (remaining < VG_THREAD_SLOTS_LOG2 + PAGESIZE_LOG2)
return;
- object = cap_to_object (activity, cap);
+ object = vg_cap_to_object (activity, cap);
if (! object)
return;
- remaining -= THREAD_SLOTS_LOG2;
+ remaining -= VG_THREAD_SLOTS_LOG2;
- for (i = 0; i < THREAD_SLOTS_LOG2; i ++)
+ for (i = 0; i < VG_THREAD_SLOTS_LOG2; i ++)
if (root->oid != object->caps[i].oid)
doit (&object->caps[i],
remaining
- + (i == THREAD_ASPACE_SLOT ? THREAD_SLOTS_LOG2 : 0));
+ + (i == VG_THREAD_ASPACE_SLOT ? VG_THREAD_SLOTS_LOG2 : 0));
return;
- case cap_folio:
- if (remaining < FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2)
+ case vg_cap_folio:
+ if (remaining < VG_FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2)
return;
- object = cap_to_object (activity, cap);
+ object = vg_cap_to_object (activity, cap);
if (! object)
return;
struct folio *folio = (struct folio *) object;
struct object_desc *fdesc = object_to_object_desc (object);
- oid_t foid = fdesc->oid;
+ vg_oid_t foid = fdesc->oid;
- remaining -= FOLIO_OBJECTS_LOG2;
+ remaining -= VG_FOLIO_OBJECTS_LOG2;
- for (i = 0; i < FOLIO_OBJECTS; i ++)
- if (folio_object_type (folio, i) == cap_page
- || folio_object_type (folio, i) == cap_rpage
- || folio_object_type (folio, i) == cap_cappage
- || folio_object_type (folio, i) == cap_rcappage)
+ for (i = 0; i < VG_FOLIO_OBJECTS; i ++)
+ if (vg_folio_object_type (folio, i) == vg_cap_page
+ || vg_folio_object_type (folio, i) == vg_cap_rpage
+ || vg_folio_object_type (folio, i) == vg_cap_cappage
+ || vg_folio_object_type (folio, i) == vg_cap_rcappage)
{
- struct cap cap;
+ struct vg_cap cap;
cap.version = folio_object_version (folio, i);
- cap.type = folio_object_type (folio, i);
- cap.addr_trans = CAP_ADDR_TRANS_VOID;
+ cap.type = vg_folio_object_type (folio, i);
+ cap.addr_trans = VG_CAP_ADDR_TRANS_VOID;
cap.oid = foid + 1 + i;
if (root->oid != cap.oid)
@@ -217,5 +217,5 @@ cap_shootdown (struct activity *activity, struct cap *root)
}
}
- doit (root, ADDR_BITS);
+ doit (root, VG_ADDR_BITS);
}
diff --git a/viengoos/cap.h b/viengoos/cap.h
index 609e4a1..d8da9a3 100644
--- a/viengoos/cap.h
+++ b/viengoos/cap.h
@@ -30,23 +30,25 @@ extern const int cap_type_num_slots[];
/* Set's the capability TARGET to point to the same object as the
capability SOURCE, however, preserves the guard in TARGET. */
static inline bool
-cap_set (struct activity *activity, struct cap *target, struct cap source)
+cap_set (struct activity *activity, struct vg_cap *target, struct vg_cap source)
{
/* This is kosher as we know the implementation of CAP_COPY. */
- return cap_copy (activity,
- ADDR_VOID, target, ADDR_VOID,
- ADDR_VOID, source, ADDR_VOID);
+ return vg_cap_copy (activity,
+ VG_ADDR_VOID, target, VG_ADDR_VOID,
+ VG_ADDR_VOID, source, VG_ADDR_VOID);
}
/* Invalidate all mappings that may depend on this object. */
-extern void cap_shootdown (struct activity *activity, struct cap *cap);
+extern void cap_shootdown (struct activity *activity, struct vg_cap *cap);
/* Return the object designated by CAP, if any. */
-struct object *cap_to_object (struct activity *activity, struct cap *cap);
+struct object *vg_cap_to_object (struct activity *activity,
+ struct vg_cap *cap);
-/* Like cap_to_object but only returns the object if it is in
+/* Like vg_cap_to_object but only returns the object if it is in
memory. */
-struct object *cap_to_object_soft (struct activity *activity, struct cap *cap);
+struct object *cap_to_object_soft (struct activity *activity,
+ struct vg_cap *cap);
diff --git a/viengoos/memory.c b/viengoos/memory.c
index 3f46ff4..54997ff 100644
--- a/viengoos/memory.c
+++ b/viengoos/memory.c
@@ -367,8 +367,8 @@ memory_frame_allocate (struct activity *activity)
struct object_desc *desc = available_list_head (&available);
while (desc)
{
- if (desc->type != cap_activity_control
- && desc->type != cap_thread)
+ if (desc->type != vg_cap_activity_control
+ && desc->type != vg_cap_thread)
/* We will detach DESC from AVAILALBE in
memory_object_destroy. */
break;
@@ -382,12 +382,12 @@ memory_frame_allocate (struct activity *activity)
assert (desc->eviction_candidate);
assert (desc->activity);
assert (object_type ((struct object *) desc->activity)
- == cap_activity_control);
+ == vg_cap_activity_control);
assert (! desc->dirty || desc->policy.discardable);
assert (! desc->mapped);
- debug (5, "Reusing OID " OID_FMT " (%s)",
- OID_PRINTF (desc->oid), cap_type_string (desc->type));
+ debug (5, "Reusing OID " VG_OID_FMT " (%s)",
+ VG_OID_PRINTF (desc->oid), vg_cap_type_string (desc->type));
struct object *object = object_desc_to_object (desc);
@@ -401,11 +401,11 @@ memory_frame_allocate (struct activity *activity)
ACTIVITY_STATS (desc->activity)->discarded ++;
}
- oid_t oid = desc->oid;
+ vg_oid_t oid = desc->oid;
memory_object_destroy (activity, object);
/* DESC is no longer valid. */
- assert (! object_find_soft (activity, oid, OBJECT_POLICY_DEFAULT));
+ assert (! object_find_soft (activity, oid, VG_OBJECT_POLICY_DEFAULT));
if (discarded)
/* Note that we discarded the page. */
diff --git a/viengoos/messenger.c b/viengoos/messenger.c
index 4e109a2..03bfb7e 100644
--- a/viengoos/messenger.c
+++ b/viengoos/messenger.c
@@ -44,9 +44,9 @@ messenger_load_internal (struct activity *activity,
struct vg_message *smessage,
bool may_block)
{
- assert (object_type ((struct object *) target) == cap_messenger);
+ assert (object_type ((struct object *) target) == vg_cap_messenger);
if (source)
- assert (object_type ((struct object *) source) == cap_messenger);
+ assert (object_type ((struct object *) source) == vg_cap_messenger);
if (source)
assert (! smessage);
@@ -97,16 +97,16 @@ messenger_load_internal (struct activity *activity,
void *tdata;
int data_count;
- addr_t *saddrs;
+ vg_addr_t *saddrs;
int saddr_count;
- addr_t *taddrs;
+ vg_addr_t *taddrs;
int taddr_count;
if (! source || source->out_of_band)
/* Source data is in a buffer. */
{
if (source)
- smessage = (struct vg_message *) cap_to_object (activity,
+ smessage = (struct vg_message *) vg_cap_to_object (activity,
&source->buffer);
else
assert (smessage);
@@ -143,7 +143,7 @@ messenger_load_internal (struct activity *activity,
if (target->out_of_band)
/* Target data is in a buffer. */
{
- tmessage = (struct vg_message *) cap_to_object (activity,
+ tmessage = (struct vg_message *) vg_cap_to_object (activity,
&target->buffer);
if (tmessage)
{
@@ -199,8 +199,8 @@ messenger_load_internal (struct activity *activity,
/* First get the target capability slot. */
bool twritable = true;
- struct cap *tcap = NULL;
- if (! ADDR_IS_VOID (taddrs[i]))
+ struct vg_cap *tcap = NULL;
+ if (! VG_ADDR_IS_VOID (taddrs[i]))
{
as_slot_lookup_rel_use (activity, &target->as_root, taddrs[i],
({
@@ -208,20 +208,20 @@ messenger_load_internal (struct activity *activity,
tcap = slot;
}));
if (! tcap || ! twritable)
- debug (0, DEBUG_BOLD ("Target " ADDR_FMT " does not designate "
+ debug (0, DEBUG_BOLD ("Target " VG_ADDR_FMT " does not designate "
"a %svalid slot!"),
- ADDR_PRINTF (taddrs[i]), twritable ? "writable " : "");
+ VG_ADDR_PRINTF (taddrs[i]), twritable ? "writable " : "");
}
if (likely (tcap && twritable))
/* We have a slot and it is writable. Look up the source
capability. */
{
- struct cap scap = CAP_VOID;
+ struct vg_cap scap = VG_CAP_VOID;
bool swritable = true;
if (source)
{
- if (! ADDR_IS_VOID (saddrs[i]))
+ if (! VG_ADDR_IS_VOID (saddrs[i]))
scap = as_cap_lookup_rel (activity,
&source->as_root, saddrs[i],
-1, &swritable);
@@ -230,28 +230,28 @@ messenger_load_internal (struct activity *activity,
/* This is a kernel provided buffer. In this case the
address is really a pointer to a capability. */
if ((uintptr_t) saddrs[i].raw)
- scap = * (struct cap *) (uintptr_t) saddrs[i].raw;
+ scap = * (struct vg_cap *) (uintptr_t) saddrs[i].raw;
if (! swritable)
- scap.type = cap_type_weaken (scap.type);
+ scap.type = vg_cap_type_weaken (scap.type);
/* Shoot down the capability. */
cap_shootdown (activity, tcap);
/* Preserve the address translator and policy. */
- struct cap_properties props = CAP_PROPERTIES_GET (*tcap);
+ struct vg_cap_properties props = VG_CAP_PROPERTIES_GET (*tcap);
*tcap = scap;
- CAP_PROPERTIES_SET (tcap, props);
+ VG_CAP_PROPERTIES_SET (tcap, props);
- debug (5, ADDR_FMT " <- " CAP_FMT,
- ADDR_PRINTF (taddrs[i]), CAP_PRINTF (tcap));
+ debug (5, VG_ADDR_FMT " <- " VG_CAP_FMT,
+ VG_ADDR_PRINTF (taddrs[i]), VG_CAP_PRINTF (tcap));
}
else
- taddrs[i] = ADDR_VOID;
+ taddrs[i] = VG_ADDR_VOID;
}
if (i < MAX (taddr_count, saddr_count) && target->out_of_band && taddrs)
/* Set the address of any non-transferred caps in the target to
- ADDR_VOID. */
+ VG_ADDR_VOID. */
memset (&taddrs[i], 0,
sizeof (taddrs[0]) * (MAX (taddr_count, saddr_count)) - i);
@@ -301,17 +301,17 @@ messenger_message_deliver (struct activity *activity,
assert (! messenger->wait_queue_p);
struct thread *thread
- = (struct thread *) cap_to_object (activity, &messenger->thread);
+ = (struct thread *) vg_cap_to_object (activity, &messenger->thread);
if (! thread)
{
debug (0, "Messenger has no thread to activate!");
return false;
}
- if (object_type ((struct object *) thread) != cap_thread)
+ if (object_type ((struct object *) thread) != vg_cap_thread)
{
- debug (0, "Messenger's thread cap does not designate a thread but a %s",
- cap_type_string (object_type ((struct object *) thread)));
+ debug (0, "Messenger's thread vg_cap does not designate a thread but a %s",
+ vg_cap_type_string (object_type ((struct object *) thread)));
return false;
}
diff --git a/viengoos/messenger.h b/viengoos/messenger.h
index 2ab669c..2ad0417 100644
--- a/viengoos/messenger.h
+++ b/viengoos/messenger.h
@@ -62,17 +62,17 @@ struct messenger
/* When this messenger is activated (that is, its contents are
delivered or it receives a message), THREAD is activated. This
is settable from user space. */
- struct cap thread;
+ struct vg_cap thread;
/* The root of the address space in which capability addresses
referenced in the message are resolved. */
- struct cap as_root;
+ struct vg_cap as_root;
/* The message buffer. */
- struct cap buffer;
+ struct vg_cap buffer;
/* The activity supplied by the sender of the message. */
- struct cap sender_activity;
+ struct vg_cap sender_activity;
/* Whether the data is inline or out of line. */
@@ -84,7 +84,7 @@ struct messenger
/* Inline data. */
uintptr_t inline_words[VG_MESSENGER_INLINE_WORDS];
- addr_t inline_caps[VG_MESSENGER_INLINE_CAPS];
+ vg_addr_t inline_caps[VG_MESSENGER_INLINE_CAPS];
/* The buffer's version. If USER_VERSION_MATCHING is true, a
@@ -142,8 +142,8 @@ struct messenger
{
/* We don't need versioning as we automatically collect on object
destruction. */
- oid_t next;
- oid_t prev;
+ vg_oid_t next;
+ vg_oid_t prev;
} wait_queue;
/* Whether the object is attached to a wait queue. (This is
diff --git a/viengoos/object.c b/viengoos/object.c
index 5d11edd..70f1812 100644
--- a/viengoos/object.c
+++ b/viengoos/object.c
@@ -66,8 +66,8 @@ object_init (void)
build_assert (sizeof (struct activity) <= PAGESIZE);
build_assert (sizeof (struct object) <= PAGESIZE);
build_assert (sizeof (struct thread) <= PAGESIZE);
- /* Assert that the size of a cap is a power of 2. */
- build_assert ((sizeof (struct cap) & (sizeof (struct cap) - 1)) == 0);
+ /* Assert that the size of a vg_cap is a power of 2. */
+ build_assert ((sizeof (struct vg_cap) & (sizeof (struct vg_cap) - 1)) == 0);
/* Allocate object hash. */
@@ -105,15 +105,15 @@ object_init (void)
correspond to the values storage on disk. */
static struct object *
memory_object_alloc (struct activity *activity,
- enum cap_type type,
- oid_t oid, l4_word_t version,
+ enum vg_cap_type type,
+ vg_oid_t oid, l4_word_t version,
struct object_policy policy)
{
- debug (5, "Allocating %llx(%d), %s", oid, version, cap_type_string (type));
+ debug (5, "Allocating %llx(%d), %s", oid, version, vg_cap_type_string (type));
assert (activity || ! root_activity);
- assert (type != cap_void);
- assert ((type == cap_folio) == ((oid % (FOLIO_OBJECTS + 1)) == 0));
+ assert (type != vg_cap_void);
+ assert ((type == vg_cap_folio) == ((oid % (VG_FOLIO_OBJECTS + 1)) == 0));
struct object *object = (struct object *) memory_frame_allocate (activity);
if (! object)
@@ -168,25 +168,25 @@ memory_object_destroy (struct activity *activity, struct object *object)
assert (desc->live);
- assertx (folio_object_type (objects_folio (activity, object),
- objects_folio_offset (object)) == desc->type,
- "(" OID_FMT ") %s != %s",
- OID_PRINTF (desc->oid),
- cap_type_string
- (folio_object_type (objects_folio (activity, object),
- objects_folio_offset (object))),
- cap_type_string (desc->type));
+ assertx (vg_folio_object_type (objects_folio (activity, object),
+ objects_folio_offset (object)) == desc->type,
+ "(" VG_OID_FMT ") %s != %s",
+ VG_OID_PRINTF (desc->oid),
+ vg_cap_type_string
+ (vg_folio_object_type (objects_folio (activity, object),
+ objects_folio_offset (object))),
+ vg_cap_type_string (desc->type));
debug (5, "Destroy %s at 0x%llx (object %d)",
- cap_type_string (desc->type), desc->oid,
+ vg_cap_type_string (desc->type), desc->oid,
((uintptr_t) desc - (uintptr_t) object_descs) / sizeof (*desc));
- struct cap cap = object_desc_to_cap (desc);
- cap_shootdown (activity, &cap);
+ struct vg_cap vg_cap = object_desc_to_cap (desc);
+ cap_shootdown (activity, &vg_cap);
object_desc_claim (NULL, desc, desc->policy, true);
- if (desc->type == cap_activity_control)
+ if (desc->type == vg_cap_activity_control)
{
struct activity *a = (struct activity *) object;
if (a->frames_total)
@@ -207,7 +207,7 @@ memory_object_destroy (struct activity *activity, struct object *object)
}
struct object *
-object_find_soft (struct activity *activity, oid_t oid,
+object_find_soft (struct activity *activity, vg_oid_t oid,
struct object_policy policy)
{
struct object_desc *odesc = hurd_ihash_find (&objects, oid);
@@ -217,21 +217,21 @@ object_find_soft (struct activity *activity, oid_t oid,
struct object *object = object_desc_to_object (odesc);
assert (oid == odesc->oid);
- if (oid % (FOLIO_OBJECTS + 1) != 0)
+ if (oid % (VG_FOLIO_OBJECTS + 1) != 0)
{
#ifndef NDEBUG
struct folio *folio = objects_folio (activity, object);
int i = objects_folio_offset (object);
- assertx (folio_object_type (folio, i) == odesc->type,
- "(" OID_FMT ") %s != %s",
- OID_PRINTF (oid),
- cap_type_string (folio_object_type (folio, i)),
- cap_type_string (odesc->type));
+ assertx (vg_folio_object_type (folio, i) == odesc->type,
+ "(" VG_OID_FMT ") %s != %s",
+ VG_OID_PRINTF (oid),
+ vg_cap_type_string (vg_folio_object_type (folio, i)),
+ vg_cap_type_string (odesc->type));
assertx (! folio_object_discarded (folio, i),
- OID_FMT ": %s",
- OID_PRINTF (oid),
- cap_type_string (odesc->type));
+ VG_OID_FMT ": %s",
+ VG_OID_PRINTF (oid),
+ vg_cap_type_string (odesc->type));
#endif
}
@@ -259,7 +259,7 @@ object_find_soft (struct activity *activity, oid_t oid,
}
struct object *
-object_find (struct activity *activity, oid_t oid,
+object_find (struct activity *activity, vg_oid_t oid,
struct object_policy policy)
{
struct object *obj = object_find_soft (activity, oid, policy);
@@ -268,16 +268,16 @@ object_find (struct activity *activity, oid_t oid,
struct folio *folio;
- int page = (oid % (FOLIO_OBJECTS + 1)) - 1;
+ int page = (oid % (VG_FOLIO_OBJECTS + 1)) - 1;
if (page == -1)
/* The object to find is a folio. */
{
- if (oid / (FOLIO_OBJECTS + 1) < FOLIOS_CORE)
+ if (oid / (VG_FOLIO_OBJECTS + 1) < FOLIOS_CORE)
/* It's an in-core folio. */
{
- assert (bit_test (folios, oid / (FOLIO_OBJECTS + 1)));
+ assert (bit_test (folios, oid / (VG_FOLIO_OBJECTS + 1)));
- return memory_object_alloc (activity, cap_folio, oid, 0,
+ return memory_object_alloc (activity, vg_cap_folio, oid, 0,
policy);
}
@@ -287,12 +287,12 @@ object_find (struct activity *activity, oid_t oid,
{
/* Find the folio corresponding to the object. */
folio = (struct folio *) object_find (activity, oid - page - 1,
- OBJECT_POLICY_DEFAULT);
+ VG_OBJECT_POLICY_DEFAULT);
assertx (folio,
- "Didn't find folio " OID_FMT,
- OID_PRINTF (oid - page - 1));
+ "Didn't find folio " VG_OID_FMT,
+ VG_OID_PRINTF (oid - page - 1));
- if (folio_object_type (folio, page) == cap_void)
+ if (vg_folio_object_type (folio, page) == vg_cap_void)
return NULL;
if (folio_object_discarded (folio, page))
@@ -303,7 +303,7 @@ object_find (struct activity *activity, oid_t oid,
if (! folio_object_content (folio, page))
/* The object is a zero page. No need to read anything from
backing store: just allocate a page and zero it. */
- return memory_object_alloc (activity, folio_object_type (folio, page),
+ return memory_object_alloc (activity, vg_folio_object_type (folio, page),
oid, folio_object_version (folio, page),
policy);
}
@@ -321,12 +321,12 @@ folio_parent (struct activity *activity, struct folio *folio)
assert (({
struct object_desc *desc;
desc = object_to_object_desc ((struct object *) folio);
- assert (desc->oid % (FOLIO_OBJECTS + 1) == 0);
+ assert (desc->oid % (VG_FOLIO_OBJECTS + 1) == 0);
true;
}));
- assert (! cap_to_object (activity, &folio->activity));
- assert (! cap_to_object (activity, &folio->next));
- assert (! cap_to_object (activity, &folio->prev));
+ assert (! vg_cap_to_object (activity, &folio->activity));
+ assert (! vg_cap_to_object (activity, &folio->next));
+ assert (! vg_cap_to_object (activity, &folio->prev));
assert (({
struct object_desc *desc;
desc = object_to_object_desc ((struct object *) folio);
@@ -335,9 +335,9 @@ folio_parent (struct activity *activity, struct folio *folio)
of it before it is parented. */
{
int i;
- for (i = 0; i < FOLIO_OBJECTS; i ++)
+ for (i = 0; i < VG_FOLIO_OBJECTS; i ++)
assert (! object_find_soft (activity, desc->oid + 1 + i,
- OBJECT_POLICY_DEFAULT));
+ VG_OBJECT_POLICY_DEFAULT));
}
true;
}));
@@ -348,11 +348,11 @@ folio_parent (struct activity *activity, struct folio *folio)
/* Add FOLIO to ACTIVITY's folio list. */
/* Update the old head's previous pointer. */
- struct object *head = cap_to_object (activity, &activity->folios);
+ struct object *head = vg_cap_to_object (activity, &activity->folios);
if (head)
{
/* It shouldn't have a previous pointer. */
- struct object *prev = cap_to_object (activity,
+ struct object *prev = vg_cap_to_object (activity,
&((struct folio *) head)->prev);
assert (! prev);
@@ -363,11 +363,11 @@ folio_parent (struct activity *activity, struct folio *folio)
folio->next = activity->folios;
/* Ensure FOLIO's PREV pointer is void. */
- folio->prev.type = cap_void;
+ folio->prev.type = vg_cap_void;
/* Finally, set ACTIVITY->FOLIOS to the new head. */
activity->folios = object_to_cap ((struct object *) folio);
- assert (cap_to_object (activity, &activity->folios)
+ assert (vg_cap_to_object (activity, &activity->folios)
== (struct object *) folio);
}
@@ -410,12 +410,12 @@ folio_alloc (struct activity *activity, struct folio_policy policy)
int f = bit_alloc (folios, sizeof (folios), 0);
if (f < 0)
panic ("Out of folios");
- oid_t foid = f * (FOLIO_OBJECTS + 1);
+ vg_oid_t foid = f * (VG_FOLIO_OBJECTS + 1);
/* We can't just allocate a fresh page: we need to preserve the
version information for the folio as well as the objects. */
struct folio *folio = (struct folio *) object_find (activity, foid,
- OBJECT_POLICY_DEFAULT);
+ VG_OBJECT_POLICY_DEFAULT);
if (activity)
folio_parent (activity, folio);
@@ -431,14 +431,14 @@ folio_free (struct activity *activity, struct folio *folio)
/* Make sure that FOLIO appears on its owner's folio list. */
assert (({
struct activity *owner
- = (struct activity *) cap_to_object (activity, &folio->activity);
+ = (struct activity *) vg_cap_to_object (activity, &folio->activity);
assert (owner);
- assert (object_type ((struct object *) owner) == cap_activity_control);
+ assert (object_type ((struct object *) owner) == vg_cap_activity_control);
struct folio *f;
- for (f = (struct folio *) cap_to_object (activity, &owner->folios);
- f; f = (struct folio *) cap_to_object (activity, &f->next))
+ for (f = (struct folio *) vg_cap_to_object (activity, &owner->folios);
+ f; f = (struct folio *) vg_cap_to_object (activity, &f->next))
{
- assert (object_type ((struct object *) folio) == cap_folio);
+ assert (object_type ((struct object *) folio) == vg_cap_folio);
if (f == folio)
break;
}
@@ -451,8 +451,8 @@ folio_free (struct activity *activity, struct folio *folio)
the paging activity, etc. */
struct object_desc *fdesc = object_to_object_desc ((struct object *) folio);
- assert (fdesc->type == cap_folio);
- assert (fdesc->oid % (FOLIO_OBJECTS + 1) == 0);
+ assert (fdesc->type == vg_cap_folio);
+ assert (fdesc->oid % (VG_FOLIO_OBJECTS + 1) == 0);
/* Free the objects. This bumps the version of any live objects.
This is correct as although the folio is being destroyed, when we
@@ -462,11 +462,11 @@ folio_free (struct activity *activity, struct folio *folio)
/* As we free the objects, we also don't have to call cap_shootdown
here. */
int i;
- for (i = 0; i < FOLIO_OBJECTS; i ++)
+ for (i = 0; i < VG_FOLIO_OBJECTS; i ++)
folio_object_free (activity, folio, i);
struct activity *owner
- = (struct activity *) cap_to_object (activity, &folio->activity);
+ = (struct activity *) vg_cap_to_object (activity, &folio->activity);
assert (owner);
/* Update the allocation information. */
@@ -474,11 +474,11 @@ folio_free (struct activity *activity, struct folio *folio)
activity_for_each_ancestor (a, ({ a->folio_count --; }));
/* Clear the owner. */
- folio->activity.type = cap_void;
+ folio->activity.type = vg_cap_void;
/* Remove FOLIO from its owner's folio list. */
- struct folio *next = (struct folio *) cap_to_object (activity, &folio->next);
- struct folio *prev = (struct folio *) cap_to_object (activity, &folio->prev);
+ struct folio *next = (struct folio *) vg_cap_to_object (activity, &folio->next);
+ struct folio *prev = (struct folio *) vg_cap_to_object (activity, &folio->prev);
if (prev)
prev->next = folio->next;
@@ -490,8 +490,8 @@ folio_free (struct activity *activity, struct folio *folio)
if (next)
next->prev = folio->prev;
- folio->next.type = cap_void;
- folio->prev.type = cap_void;
+ folio->next.type = vg_cap_void;
+ folio->prev.type = vg_cap_void;
/* And free the folio. */
@@ -499,58 +499,58 @@ folio_free (struct activity *activity, struct folio *folio)
previous data including version information. */
fdesc->version = folio_object_version (folio, -1) + 1;
folio_object_version_set (folio, -1, fdesc->version);
- bit_dealloc (folios, fdesc->oid / (FOLIO_OBJECTS + 1));
+ bit_dealloc (folios, fdesc->oid / (VG_FOLIO_OBJECTS + 1));
}
-struct cap
+struct vg_cap
folio_object_alloc (struct activity *activity,
struct folio *folio,
int idx,
- enum cap_type type,
+ enum vg_cap_type type,
struct object_policy policy,
uintptr_t return_code)
{
- assert (0 <= idx && idx < FOLIO_OBJECTS);
+ assert (0 <= idx && idx < VG_FOLIO_OBJECTS);
- type = cap_type_strengthen (type);
+ type = vg_cap_type_strengthen (type);
struct object_desc *fdesc = object_to_object_desc ((struct object *) folio);
- assert (fdesc->type == cap_folio);
- assert (fdesc->oid % (1 + FOLIO_OBJECTS) == 0);
+ assert (fdesc->type == vg_cap_folio);
+ assert (fdesc->oid % (1 + VG_FOLIO_OBJECTS) == 0);
- debug (5, OID_FMT ":%d -> %s (%s/%d)",
- OID_PRINTF (fdesc->oid), idx, cap_type_string (type),
+ debug (5, VG_OID_FMT ":%d -> %s (%s/%d)",
+ VG_OID_PRINTF (fdesc->oid), idx, vg_cap_type_string (type),
policy.discardable ? "discardable" : "precious", policy.priority);
- oid_t oid = fdesc->oid + 1 + idx;
+ vg_oid_t oid = fdesc->oid + 1 + idx;
struct object *object = NULL;
/* Deallocate any existing object. */
- if (folio_object_type (folio, idx) == cap_activity_control
- || folio_object_type (folio, idx) == cap_thread
- || folio_object_type (folio, idx) == cap_messenger)
+ if (vg_folio_object_type (folio, idx) == vg_cap_activity_control
+ || vg_folio_object_type (folio, idx) == vg_cap_thread
+ || vg_folio_object_type (folio, idx) == vg_cap_messenger)
/* These object types have state that needs to be explicitly
destroyed. */
{
- object = object_find (activity, oid, OBJECT_POLICY_DEFAULT);
+ object = object_find (activity, oid, VG_OBJECT_POLICY_DEFAULT);
assert (object_to_object_desc (object)->type
- == folio_object_type (folio, idx));
+ == vg_folio_object_type (folio, idx));
/* See if we need to destroy the object. */
- switch (folio_object_type (folio, idx))
+ switch (vg_folio_object_type (folio, idx))
{
- case cap_activity_control:
+ case vg_cap_activity_control:
debug (4, "Destroying activity at %llx", oid);
activity_destroy (activity, (struct activity *) object);
break;
- case cap_thread:
+ case vg_cap_thread:
debug (4, "Destroying thread object at %llx", oid);
thread_deinit (activity, (struct thread *) object);
break;
- case cap_messenger:
+ case vg_cap_messenger:
debug (4, "Destroying messenger object at %llx", oid);
messenger_destroy (activity, (struct messenger *) object);
break;
@@ -584,12 +584,12 @@ folio_object_alloc (struct activity *activity,
odesc = object_to_object_desc (object);
assert (odesc->oid == oid);
assert (odesc->version == folio_object_version (folio, idx));
- assertx (odesc->type == folio_object_type (folio, idx),
- OID_FMT ": %s != %s",
- OID_PRINTF (odesc->oid), cap_type_string (odesc->type),
- cap_type_string (folio_object_type (folio, idx)));
+ assertx (odesc->type == vg_folio_object_type (folio, idx),
+ VG_OID_FMT ": %s != %s",
+ VG_OID_PRINTF (odesc->oid), vg_cap_type_string (odesc->type),
+ vg_cap_type_string (vg_folio_object_type (folio, idx)));
- if (type == cap_void)
+ if (type == vg_cap_void)
/* We are deallocating the object: free associated memory. */
{
memory_object_destroy (activity, object);
@@ -601,9 +601,9 @@ folio_object_alloc (struct activity *activity,
}
else
{
- struct cap cap = object_desc_to_cap (odesc);
+ struct vg_cap vg_cap = object_desc_to_cap (odesc);
assert (activity);
- cap_shootdown (activity, &cap);
+ cap_shootdown (activity, &vg_cap);
memset ((void *) object, 0, PAGESIZE);
object_desc_flush (odesc, true);
@@ -618,7 +618,7 @@ folio_object_alloc (struct activity *activity,
}
}
- if (folio_object_type (folio, idx) != cap_void)
+ if (vg_folio_object_type (folio, idx) != vg_cap_void)
/* We know that if an object's type is void then there are no
extant pointers to it. If there are only pointers in memory,
then we need to bump the memory version. Otherwise, we need to
@@ -633,16 +633,16 @@ folio_object_alloc (struct activity *activity,
odesc->version = folio_object_version (folio, idx);
}
- folio_object_type_set (folio, idx, type);
+ vg_folio_object_type_set (folio, idx, type);
folio_object_content_set (folio, idx, false);
folio_object_discarded_set (folio, idx, false);
- folio_object_policy_set (folio, idx, policy);
+ vg_folio_object_policy_set (folio, idx, policy);
folio_object_referenced_set (folio, idx, false);
folio_object_dirty_set (folio, idx, false);
switch (type)
{
- case cap_activity_control:
+ case vg_cap_activity_control:
{
if (! object)
object = object_find (activity, oid, policy);
@@ -655,14 +655,14 @@ folio_object_alloc (struct activity *activity,
;
}
- struct cap cap;
- memset (&cap, 0, sizeof (cap));
- cap.type = type;
- cap.oid = oid;
- cap.version = folio_object_version (folio, idx);
- CAP_POLICY_SET (&cap, policy);
+ struct vg_cap vg_cap;
+ memset (&vg_cap, 0, sizeof (vg_cap));
+ vg_cap.type = type;
+ vg_cap.oid = oid;
+ vg_cap.version = folio_object_version (folio, idx);
+ VG_CAP_POLICY_SET (&vg_cap, policy);
- return cap;
+ return vg_cap;
}
void
@@ -671,26 +671,26 @@ folio_policy (struct activity *activity,
uintptr_t flags, struct folio_policy in,
struct folio_policy *out)
{
- if ((flags & FOLIO_POLICY_DELIVER) && out)
+ if ((flags & VG_FOLIO_POLICY_DELIVER) && out)
{
out->discardable = folio->policy.discardable;
out->group = folio->policy.group;
out->priority = folio->policy.priority;
}
- if (! (flags & FOLIO_POLICY_SET))
+ if (! (flags & VG_FOLIO_POLICY_SET))
return;
- if ((flags & FOLIO_POLICY_GROUP_SET))
+ if ((flags & VG_FOLIO_POLICY_GROUP_SET))
folio->policy.group = in.group;
- if ((flags & FOLIO_POLICY_DISCARDABLE_SET)
+ if ((flags & VG_FOLIO_POLICY_DISCARDABLE_SET)
&& in.discardable != folio->policy.discardable)
/* XXX: We need to move the folio from the discardable list to the
precious list (or vice versa). */
folio->policy.discardable = in.discardable;
- if ((flags & FOLIO_POLICY_PRIORITY_SET))
+ if ((flags & VG_FOLIO_POLICY_PRIORITY_SET))
folio->policy.priority = in.priority;
}
@@ -710,7 +710,7 @@ object_desc_claim (struct activity *activity, struct object_desc *desc,
int inactive = 0;
int i;
- for (i = OBJECT_PRIORITY_MIN; i <= OBJECT_PRIORITY_MAX; i ++)
+ for (i = VG_OBJECT_PRIORITY_MIN; i <= VG_OBJECT_PRIORITY_MAX; i ++)
{
active += activity_list_count (&desc->activity->frames[i].active);
inactive += activity_list_count (&desc->activity->frames[i].inactive);
@@ -736,7 +736,7 @@ object_desc_claim (struct activity *activity, struct object_desc *desc,
int inactive = 0;
int i;
- for (i = OBJECT_PRIORITY_MIN; i <= OBJECT_PRIORITY_MAX; i ++)
+ for (i = VG_OBJECT_PRIORITY_MIN; i <= VG_OBJECT_PRIORITY_MAX; i ++)
{
active += activity_list_count (&activity->frames[i].active);
inactive += activity_list_count (&activity->frames[i].inactive);
@@ -774,16 +774,16 @@ object_desc_claim (struct activity *activity, struct object_desc *desc,
not have an activity, it is being initialized. */
if (desc->activity)
{
- debug (5, OID_FMT " claims from " OID_FMT,
- OID_PRINTF (object_to_object_desc ((struct object *) desc
+ debug (5, VG_OID_FMT " claims from " VG_OID_FMT,
+ VG_OID_PRINTF (object_to_object_desc ((struct object *) desc
->activity)->oid),
- OID_PRINTF (activity
+ VG_OID_PRINTF (activity
? object_to_object_desc ((struct object *)
activity)->oid
: 0));
assert (object_type ((struct object *) desc->activity)
- == cap_activity_control);
+ == vg_cap_activity_control);
if (desc->eviction_candidate)
/* DESC is an eviction candidate. The act of claiming saves
@@ -905,10 +905,10 @@ object_desc_claim (struct activity *activity, struct object_desc *desc,
desc->activity = activity;
desc->policy.discardable = policy.discardable;
- debug (5, OID_FMT " claimed " OID_FMT " (%s): %s",
- OID_PRINTF (object_to_object_desc ((struct object *) activity)->oid),
- OID_PRINTF (desc->oid),
- cap_type_string (desc->type),
+ debug (5, VG_OID_FMT " claimed " VG_OID_FMT " (%s): %s",
+ VG_OID_PRINTF (object_to_object_desc ((struct object *) activity)->oid),
+ VG_OID_PRINTF (desc->oid),
+ vg_cap_type_string (desc->type),
desc->policy.discardable ? "discardable" : "precious");
out:;
@@ -919,7 +919,7 @@ object_desc_claim (struct activity *activity, struct object_desc *desc,
int inactive = 0;
int i;
- for (i = OBJECT_PRIORITY_MIN; i <= OBJECT_PRIORITY_MAX; i ++)
+ for (i = VG_OBJECT_PRIORITY_MIN; i <= VG_OBJECT_PRIORITY_MAX; i ++)
{
active += activity_list_count (&desc->activity->frames[i].active);
inactive += activity_list_count (&desc->activity->frames[i].inactive);
@@ -944,7 +944,7 @@ object_desc_claim (struct activity *activity, struct object_desc *desc,
int inactive = 0;
int i;
- for (i = OBJECT_PRIORITY_MIN; i <= OBJECT_PRIORITY_MAX; i ++)
+ for (i = VG_OBJECT_PRIORITY_MIN; i <= VG_OBJECT_PRIORITY_MAX; i ++)
{
active += activity_list_count (&activity->frames[i].active);
inactive += activity_list_count (&activity->frames[i].inactive);
@@ -976,10 +976,10 @@ object_wait_queue_head (struct activity *activity, struct object *object)
if (! folio_object_wait_queue_p (folio, i))
return NULL;
- oid_t h = folio_object_wait_queue (folio, i);
- struct object *head = object_find (activity, h, OBJECT_POLICY_DEFAULT);
+ vg_oid_t h = folio_object_wait_queue (folio, i);
+ struct object *head = object_find (activity, h, VG_OBJECT_POLICY_DEFAULT);
assert (head);
- assert (object_type (head) == cap_messenger);
+ assert (object_type (head) == vg_cap_messenger);
assert (((struct messenger *) head)->wait_queue_p);
assert (((struct messenger *) head)->wait_queue_head);
@@ -1000,9 +1000,9 @@ object_wait_queue_tail (struct activity *activity, struct object *object)
struct messenger *tail;
tail = (struct messenger *) object_find (activity, head->wait_queue.prev,
- OBJECT_POLICY_DEFAULT);
+ VG_OBJECT_POLICY_DEFAULT);
assert (tail);
- assert (object_type ((struct object *) tail) == cap_messenger);
+ assert (object_type ((struct object *) tail) == vg_cap_messenger);
assert (tail->wait_queue_p);
assert (tail->wait_queue_tail);
@@ -1018,9 +1018,9 @@ object_wait_queue_next (struct activity *activity, struct messenger *m)
struct messenger *next;
next = (struct messenger *) object_find (activity, m->wait_queue.next,
- OBJECT_POLICY_DEFAULT);
+ VG_OBJECT_POLICY_DEFAULT);
assert (next);
- assert (object_type ((struct object *) next) == cap_messenger);
+ assert (object_type ((struct object *) next) == vg_cap_messenger);
assert (next->wait_queue_p);
assert (! next->wait_queue_head);
@@ -1036,9 +1036,9 @@ object_wait_queue_prev (struct activity *activity, struct messenger *m)
struct messenger *prev;
prev = (struct messenger *) object_find (activity, m->wait_queue.prev,
- OBJECT_POLICY_DEFAULT);
+ VG_OBJECT_POLICY_DEFAULT);
assert (prev);
- assert (object_type ((struct object *) prev) == cap_messenger);
+ assert (object_type ((struct object *) prev) == vg_cap_messenger);
assert (prev->wait_queue_p);
assert (! prev->wait_queue_tail);
@@ -1060,12 +1060,12 @@ object_wait_queue_check (struct activity *activity, struct messenger *messenger)
break;
m = (struct messenger *) object_find (activity, last->wait_queue.next,
- OBJECT_POLICY_DEFAULT);
+ VG_OBJECT_POLICY_DEFAULT);
assert (m);
assert (m->wait_queue_p);
assert (! m->wait_queue_head);
struct object *p = object_find (activity, m->wait_queue.prev,
- OBJECT_POLICY_DEFAULT);
+ VG_OBJECT_POLICY_DEFAULT);
assert (p == (struct object *) last);
last = m;
@@ -1074,7 +1074,7 @@ object_wait_queue_check (struct activity *activity, struct messenger *messenger)
assert (last->wait_queue_tail);
struct object *o = object_find (activity, last->wait_queue.next,
- OBJECT_POLICY_DEFAULT);
+ VG_OBJECT_POLICY_DEFAULT);
assert (o);
assert (folio_object_wait_queue_p (objects_folio (activity, o),
objects_folio_offset (o)));
@@ -1086,7 +1086,7 @@ object_wait_queue_check (struct activity *activity, struct messenger *messenger)
struct messenger *tail;
tail = (struct messenger *) object_find (activity, head->wait_queue.prev,
- OBJECT_POLICY_DEFAULT);
+ VG_OBJECT_POLICY_DEFAULT);
assert (tail);
assert (tail->wait_queue_tail);
@@ -1098,13 +1098,13 @@ object_wait_queue_check (struct activity *activity, struct messenger *messenger)
assert (! last->wait_queue_tail);
m = (struct messenger *) object_find (activity, last->wait_queue.next,
- OBJECT_POLICY_DEFAULT);
+ VG_OBJECT_POLICY_DEFAULT);
assert (m);
assert (m->wait_queue_p);
assert (! m->wait_queue_head);
struct object *p = object_find (activity, m->wait_queue.prev,
- OBJECT_POLICY_DEFAULT);
+ VG_OBJECT_POLICY_DEFAULT);
assert (p == (struct object *) last);
last = m;
@@ -1116,8 +1116,8 @@ void
object_wait_queue_push (struct activity *activity,
struct object *object, struct messenger *messenger)
{
- debug (5, "Pushing " OID_FMT " onto %p",
- OID_PRINTF (object_to_object_desc ((struct object *) messenger)->oid),
+ debug (5, "Pushing " VG_OID_FMT " onto %p",
+ VG_OID_PRINTF (object_to_object_desc ((struct object *) messenger)->oid),
object);
object_wait_queue_check (activity, messenger);
@@ -1171,8 +1171,8 @@ void
object_wait_queue_enqueue (struct activity *activity,
struct object *object, struct messenger *messenger)
{
- debug (5, "Enqueueing " OID_FMT " on %p",
- OID_PRINTF (object_to_object_desc ((struct object *) messenger)->oid),
+ debug (5, "Enqueueing " VG_OID_FMT " on %p",
+ VG_OID_PRINTF (object_to_object_desc ((struct object *) messenger)->oid),
object);
object_wait_queue_check (activity, messenger);
@@ -1232,8 +1232,8 @@ void
object_wait_queue_unlink (struct activity *activity,
struct messenger *messenger)
{
- debug (5, "Removing " OID_FMT,
- OID_PRINTF (object_to_object_desc ((struct object *) messenger)->oid));
+ debug (5, "Removing " VG_OID_FMT,
+ VG_OID_PRINTF (object_to_object_desc ((struct object *) messenger)->oid));
assert (messenger->wait_queue_p);
@@ -1245,7 +1245,7 @@ object_wait_queue_unlink (struct activity *activity,
{
struct object *object;
object = object_find (activity, messenger->wait_queue.next,
- OBJECT_POLICY_DEFAULT);
+ VG_OBJECT_POLICY_DEFAULT);
assert (object);
assert (folio_object_wait_queue_p (objects_folio (activity, object),
objects_folio_offset (object)));
@@ -1256,7 +1256,7 @@ object_wait_queue_unlink (struct activity *activity,
list. */
{
assert (object_find (activity, messenger->wait_queue.prev,
- OBJECT_POLICY_DEFAULT)
+ VG_OBJECT_POLICY_DEFAULT)
== (struct object *) messenger);
folio_object_wait_queue_p_set (objects_folio (activity, object),
@@ -1270,7 +1270,7 @@ object_wait_queue_unlink (struct activity *activity,
/* HEAD->PREV == TAIL. */
assert (object_find (activity, head->wait_queue.prev,
- OBJECT_POLICY_DEFAULT)
+ VG_OBJECT_POLICY_DEFAULT)
== (struct object *) messenger);
/* HEAD->PREV = TAIL->PREV. */
@@ -1280,9 +1280,9 @@ object_wait_queue_unlink (struct activity *activity,
struct messenger *prev;
prev = (struct messenger *) object_find (activity,
messenger->wait_queue.prev,
- OBJECT_POLICY_DEFAULT);
+ VG_OBJECT_POLICY_DEFAULT);
assert (prev);
- assert (object_type ((struct object *) prev) == cap_messenger);
+ assert (object_type ((struct object *) prev) == vg_cap_messenger);
prev->wait_queue_tail = 1;
prev->wait_queue.next = messenger->wait_queue.next;
@@ -1295,9 +1295,9 @@ object_wait_queue_unlink (struct activity *activity,
assert (next);
struct object *p = object_find (activity, messenger->wait_queue.prev,
- OBJECT_POLICY_DEFAULT);
+ VG_OBJECT_POLICY_DEFAULT);
assert (p);
- assert (object_type (p) == cap_messenger);
+ assert (object_type (p) == vg_cap_messenger);
struct messenger *prev = (struct messenger *) p;
if (messenger->wait_queue_head)
@@ -1307,7 +1307,7 @@ object_wait_queue_unlink (struct activity *activity,
struct messenger *tail = prev;
struct object *object = object_find (activity, tail->wait_queue.next,
- OBJECT_POLICY_DEFAULT);
+ VG_OBJECT_POLICY_DEFAULT);
assert (object);
assert (object_wait_queue_head (activity, object) == messenger);
diff --git a/viengoos/object.h b/viengoos/object.h
index e8855f0..0e7978d 100644
--- a/viengoos/object.h
+++ b/viengoos/object.h
@@ -46,9 +46,9 @@ extern ss_mutex_t kernel_lock;
-------
A folio is a unit of disk storage. Objects are allocated out of a
- folio. Each folio consists of exactly FOLIO_OBJECTS objects each
+ folio. Each folio consists of exactly VG_FOLIO_OBJECTS objects each
PAGESIZE bytes in size. A folio also includes a 4 kb header (Thus
- a folio consists of a total of FOLIO_OBJECTS + 1 pages of storage).
+ a folio consists of a total of VG_FOLIO_OBJECTS + 1 pages of storage).
The header also describes the folio:
version
@@ -80,7 +80,7 @@ extern ss_mutex_t kernel_lock;
The implementation ensures these invariants. When a storage device
is initialized, all objects are set to have a version of 0 and a
- type of cap_void. As all objects are new, there can be no
+ type of vg_cap_void. As all objects are new, there can be no
capabilities designating them. When an object is deallocated, if
the object's type is void, nothing is done. Otherwise, the
object's version is incremented and its type is set to void. When
@@ -122,9 +122,9 @@ extern ss_mutex_t kernel_lock;
struct object_desc
{
/* The version and OID of the object. */
- oid_t oid;
- uintptr_t version : CAP_VERSION_BITS;
- uintptr_t type : CAP_TYPE_BITS;
+ vg_oid_t oid;
+ uintptr_t version : VG_CAP_VERSION_BITS;
+ uintptr_t type : VG_CAP_TYPE_BITS;
/* Whether the page is dirty. */
uintptr_t dirty: 1;
@@ -172,13 +172,13 @@ struct object_desc
union
{
/* ACTIVITY is valid, EVICTION_CANDIDATE is false, POLICY.PRIORITY
- != OBJECT_PRIORITY_DEFAULT.
+ != VG_OBJECT_PRIORITY_DEFAULT.
=> attached to ACTIVITY->PRIORITIES. */
hurd_btree_node_t priority_node;
/* ACTIVITY is valid, EVICTION_CANDIDATE is false, POLICY.PRIORITY
- == OBJECT_PRIORITY_DEFAULT,
+ == VG_OBJECT_PRIORITY_DEFAULT,
=> attached to one of ACTIVITY's LRU lists.
@@ -215,16 +215,16 @@ extern struct object_desc *object_descs;
place. The first reason is that it relies on the definition of
struct activity and struct thread and this header file includes
neither activity.h nor thread.h. */
-#define OBJECT_NAME_FMT "%s%s" OID_FMT
+#define OBJECT_NAME_FMT "%s%s" VG_OID_FMT
#define OBJECT_NAME_PRINTF(__onp) \
({ \
const char *name = ""; \
- if (object_type ((__onp)) == cap_activity_control) \
+ if (object_type ((__onp)) == vg_cap_activity_control) \
{ \
struct activity *a = (struct activity *) (__onp); \
name = a->name.name; \
} \
- else if (object_type ((__onp)) == cap_thread) \
+ else if (object_type ((__onp)) == vg_cap_thread) \
{ \
struct thread *t = (struct thread *) (__onp); \
name = t->name.name; \
@@ -233,12 +233,12 @@ extern struct object_desc *object_descs;
}), \
({ \
const char *name = ""; \
- if (object_type ((__onp)) == cap_activity_control) \
+ if (object_type ((__onp)) == vg_cap_activity_control) \
{ \
struct activity *a = (struct activity *) (__onp); \
name = a->name.name; \
} \
- else if (object_type ((__onp)) == cap_thread) \
+ else if (object_type ((__onp)) == vg_cap_thread) \
{ \
struct thread *t = (struct thread *) (__onp); \
name = t->name.name; \
@@ -247,7 +247,7 @@ extern struct object_desc *object_descs;
if (*name) \
space = " "; \
space; \
- }), OID_PRINTF (object_to_object_desc ((__onp))->oid) \
+ }), VG_OID_PRINTF (object_to_object_desc ((__onp))->oid) \
LIST_CLASS(activity, struct object_desc, activity_node, true)
LIST_CLASS(eviction, struct object_desc, activity_node, true)
@@ -281,13 +281,13 @@ extern void object_init (void);
/* Return the address of the object corresponding to object OID,
reading it from backing store if required. */
-extern struct object *object_find (struct activity *activity, oid_t oid,
+extern struct object *object_find (struct activity *activity, vg_oid_t oid,
struct object_policy policy);
/* If the object corresponding to object OID is in-memory, return it.
Otherwise, return NULL. Does not go to disk. */
extern struct object *object_find_soft (struct activity *activity,
- oid_t oid,
+ vg_oid_t oid,
struct object_policy policy);
/* Destroy the object OBJECT. Any changes must have already been
@@ -329,41 +329,41 @@ extern void memory_object_destroy (struct activity *activity,
&object_descs[((uintptr_t) (object__) - first_frame) / PAGESIZE]; \
})
-/* Return a cap referencing the object designated by OBJECT_DESC. */
-static inline struct cap
+/* Return a vg_cap referencing the object designated by OBJECT_DESC. */
+static inline struct vg_cap
object_desc_to_cap (struct object_desc *desc)
{
- struct cap cap;
+ struct vg_cap vg_cap;
- cap.type = desc->type;
- cap.oid = desc->oid;
- cap.version = desc->version;
- cap.addr_trans = CAP_ADDR_TRANS_VOID;
- cap.discardable = desc->policy.discardable;
- cap.priority = desc->policy.priority;
+ vg_cap.type = desc->type;
+ vg_cap.oid = desc->oid;
+ vg_cap.version = desc->version;
+ vg_cap.addr_trans = VG_CAP_ADDR_TRANS_VOID;
+ vg_cap.discardable = desc->policy.discardable;
+ vg_cap.priority = desc->policy.priority;
- if (cap.type == cap_cappage)
- CAP_SET_SUBPAGE (&cap, 0, 1);
- else if (cap.type == cap_folio)
- CAP_SET_SUBPAGE (&cap, 0, 1);
+ if (vg_cap.type == vg_cap_cappage)
+ VG_CAP_SET_SUBPAGE (&vg_cap, 0, 1);
+ else if (vg_cap.type == vg_cap_folio)
+ VG_CAP_SET_SUBPAGE (&vg_cap, 0, 1);
- return cap;
+ return vg_cap;
}
-/* Return a cap referencing the object OBJECT. */
-static inline struct cap
+/* Return a vg_cap referencing the object OBJECT. */
+static inline struct vg_cap
object_to_cap (struct object *object)
{
return object_desc_to_cap (object_to_object_desc (object));
}
-static inline oid_t
+static inline vg_oid_t
object_oid (struct object *object)
{
return object_to_object_desc (object)->oid;
}
-static inline enum cap_type
+static inline enum vg_cap_type
object_type (struct object *object)
{
return object_to_object_desc (object)->type;
@@ -469,13 +469,13 @@ extern void folio_free (struct activity *activity, struct folio *folio);
/* Allocate an object of type TYPE using the PAGE page from the folio
FOLIO. This implicitly destroys any existing object in that page.
If there were any waiters waiting for the descruction, they are
- woken and passed RETURN_CODE. If TYPE is cap_void, this is
+ woken and passed RETURN_CODE. If TYPE is vg_cap_void, this is
equivalent to calling folio_object_free. If OBJECTP is not-NULL,
then the in-memory location of the object is returned in
*OBJECTP. */
-extern struct cap folio_object_alloc (struct activity *activity,
+extern struct vg_cap folio_object_alloc (struct activity *activity,
struct folio *folio, int page,
- enum cap_type type,
+ enum vg_cap_type type,
struct object_policy policy,
uintptr_t return_code);
@@ -484,8 +484,8 @@ static inline void
folio_object_free (struct activity *activity,
struct folio *folio, int page)
{
- folio_object_alloc (activity, folio, page, cap_void,
- OBJECT_POLICY_VOID, 0);
+ folio_object_alloc (activity, folio, page, vg_cap_void,
+ VG_OBJECT_POLICY_VOID, 0);
}
/* Return an object's position within its folio. */
@@ -494,7 +494,7 @@ objects_folio_offset (struct object *object)
{
struct object_desc *desc = object_to_object_desc (object);
- return (desc->oid % (1 + FOLIO_OBJECTS)) - 1;
+ return (desc->oid % (1 + VG_FOLIO_OBJECTS)) - 1;
}
/* Return the folio corresponding to the object OBJECT. */
@@ -504,7 +504,7 @@ objects_folio (struct activity *activity, struct object *object)
struct object_desc *odesc = object_to_object_desc (object);
int page = objects_folio_offset (object);
- oid_t foid = odesc->oid - page - 1;
+ vg_oid_t foid = odesc->oid - page - 1;
if (odesc->maybe_folio_desc
&& odesc->maybe_folio_desc->live
@@ -512,7 +512,7 @@ objects_folio (struct activity *activity, struct object *object)
return (struct folio *) object_desc_to_object (odesc->maybe_folio_desc);
struct folio *folio = (struct folio *) object_find (activity, foid,
- OBJECT_POLICY_VOID);
+ VG_OBJECT_POLICY_VOID);
assert (folio);
odesc->maybe_folio_desc = object_to_object_desc ((struct object *) folio);
@@ -578,8 +578,8 @@ extern void object_wait_queue_unlink (struct activity *activity,
(folio_object_wait_queue_p (__owqfe_folio, __owqfe_idx) \
? object_find (__owqfe_activity, \
folio_object_wait_queue (__owqfe_folio, \
- __owqfe_idx), \
- OBJECT_POLICY_VOID) \
+ __owqfe_idx), \
+ VG_OBJECT_POLICY_VOID) \
: NULL); \
(__owqfe_messenger = __owqfe_next) \
&& ((__owqfe_next = object_wait_queue_next (__owqfe_activity, \
diff --git a/viengoos/pager.c b/viengoos/pager.c
index 8b3b038..28bb76a 100644
--- a/viengoos/pager.c
+++ b/viengoos/pager.c
@@ -37,8 +37,8 @@ is_clean (struct object_desc *desc)
l4_fpage_t result = l4_unmap_fpage (l4_fpage ((l4_word_t) object,
PAGESIZE));
assertx (! l4_was_written (result) && ! l4_was_referenced (result),
- "The %s " OID_FMT "(at %p) has status bits set (%s %s)",
- cap_type_string (desc->type), OID_PRINTF (desc->oid), object,
+ "The %s " VG_OID_FMT "(at %p) has status bits set (%s %s)",
+ vg_cap_type_string (desc->type), VG_OID_PRINTF (desc->oid), object,
l4_was_written (result) ? "dirty" : "",
l4_was_referenced (result) ? "refed" : "");
@@ -55,8 +55,8 @@ is_clean (struct object_desc *desc)
clean = false;
}
assertx (clean,
- "The %s " OID_FMT "(at %p) is dirty!",
- cap_type_string (desc->type), OID_PRINTF (desc->oid),
+ "The %s " VG_OID_FMT "(at %p) is dirty!",
+ vg_cap_type_string (desc->type), VG_OID_PRINTF (desc->oid),
object);
}
#endif
@@ -79,7 +79,7 @@ reclaim_from (struct activity *victim, int goal)
int active = 0;
int inactive = 0;
- for (i = OBJECT_PRIORITY_MIN; i <= OBJECT_PRIORITY_MAX; i ++)
+ for (i = VG_OBJECT_PRIORITY_MIN; i <= VG_OBJECT_PRIORITY_MAX; i ++)
{
active += activity_list_count (&victim->frames[i].active);
inactive += activity_list_count (&victim->frames[i].inactive);
@@ -98,7 +98,7 @@ reclaim_from (struct activity *victim, int goal)
victim->frames_local,
available_list_count (&available), laundry_list_count (&laundry));
- for (i = OBJECT_PRIORITY_MIN; i <= OBJECT_PRIORITY_MAX; i ++)
+ for (i = VG_OBJECT_PRIORITY_MIN; i <= VG_OBJECT_PRIORITY_MAX; i ++)
{
int s = count;
@@ -207,7 +207,7 @@ reclaim_from (struct activity *victim, int goal)
active = 0;
inactive = 0;
- for (i = OBJECT_PRIORITY_MIN; i <= OBJECT_PRIORITY_MAX; i ++)
+ for (i = VG_OBJECT_PRIORITY_MIN; i <= VG_OBJECT_PRIORITY_MAX; i ++)
{
active += activity_list_count (&victim->frames[i].active);
inactive += activity_list_count (&victim->frames[i].inactive);
diff --git a/viengoos/server.c b/viengoos/server.c
index 6c0be23..5a6c4cc 100644
--- a/viengoos/server.c
+++ b/viengoos/server.c
@@ -218,18 +218,18 @@ server_loop (void)
and then once we find the real principal, we just add the
charges to the former to the latter. */
struct activity *activity
- = (struct activity *) cap_to_object (root_activity,
+ = (struct activity *) vg_cap_to_object (root_activity,
&thread->activity);
if (! activity)
{
DEBUG (1, "Caller has no assigned activity");
continue;
}
- if (object_type ((struct object *) activity) != cap_activity_control)
+ if (object_type ((struct object *) activity) != vg_cap_activity_control)
{
DEBUG (1, "Caller's activity slot contains a %s,"
"not an activity_control",
- cap_type_string (object_type ((struct object *) activity)));
+ vg_cap_type_string (object_type ((struct object *) activity)));
continue;
}
@@ -248,17 +248,17 @@ server_loop (void)
uintptr_t page_addr = fault & ~(PAGESIZE - 1);
- struct cap cap;
+ struct vg_cap cap;
bool writable;
cap = as_object_lookup_rel (activity, &thread->aspace,
- addr_chop (PTR_TO_ADDR (page_addr),
+ vg_addr_chop (VG_PTR_TO_ADDR (page_addr),
PAGESIZE_LOG2),
- write_fault ? cap_page : cap_rpage,
+ write_fault ? vg_cap_page : vg_cap_rpage,
&writable);
- assert (cap.type == cap_void
- || cap.type == cap_page
- || cap.type == cap_rpage);
+ assert (cap.type == vg_cap_void
+ || cap.type == vg_cap_page
+ || cap.type == vg_cap_rpage);
bool discarded = false;
if (write_fault && ! writable)
@@ -270,32 +270,32 @@ server_loop (void)
if (! writable && cap.discardable)
{
DEBUG (4, "Ignoring discardable predicate for cap designating "
- OID_FMT " (%s)",
- OID_PRINTF (cap.oid), cap_type_string (cap.type));
+ VG_OID_FMT " (%s)",
+ VG_OID_PRINTF (cap.oid), vg_cap_type_string (cap.type));
cap.discardable = false;
}
- struct object *page = cap_to_object (activity, &cap);
- if (! page && cap.type != cap_void)
+ struct object *page = vg_cap_to_object (activity, &cap);
+ if (! page && cap.type != vg_cap_void)
/* It's not in-memory. See if it was discarded. If not,
- load it using cap_to_object. */
+ load it using vg_cap_to_object. */
{
- int object = (cap.oid % (FOLIO_OBJECTS + 1)) - 1;
- oid_t foid = cap.oid - object - 1;
+ int object = (cap.oid % (VG_FOLIO_OBJECTS + 1)) - 1;
+ vg_oid_t foid = cap.oid - object - 1;
struct folio *folio
= (struct folio *) object_find (activity, foid,
- OBJECT_POLICY_DEFAULT);
+ VG_OBJECT_POLICY_DEFAULT);
assert (folio);
- assert (object_type ((struct object *) folio) == cap_folio);
+ assert (object_type ((struct object *) folio) == vg_cap_folio);
if (cap.version == folio_object_version (folio, object))
{
if (folio_object_discarded (folio, object))
{
- DEBUG (4, OID_FMT " (%s) was discarded",
- OID_PRINTF (cap.oid),
- cap_type_string (folio_object_type (folio,
- object)));
+ DEBUG (4, VG_OID_FMT " (%s) was discarded",
+ VG_OID_PRINTF (cap.oid),
+ vg_cap_type_string (vg_folio_object_type (folio,
+ object)));
assert (! folio_object_content (folio, object));
@@ -320,14 +320,14 @@ server_loop (void)
_L4_exchange_registers (&targ, &c,
&sp, &dummy, &dummy, &dummy, &dummy);
- struct activation_fault_info info;
+ struct vg_activation_fault_info info;
info.access = access;
- info.type = write_fault ? cap_page : cap_rpage;
+ info.type = write_fault ? vg_cap_page : vg_cap_rpage;
info.discarded = discarded;
- activation_fault_send_marshal (reply_buffer, PTR_TO_ADDR (fault),
- sp, ip, info, ADDR_VOID);
+ activation_fault_send_marshal (reply_buffer, VG_PTR_TO_ADDR (fault),
+ sp, ip, info, VG_ADDR_VOID);
thread_raise_exception (activity, thread, reply_buffer);
continue;
@@ -363,17 +363,17 @@ server_loop (void)
page_addr += PAGESIZE;
cap = as_object_lookup_rel (activity, &thread->aspace,
- addr_chop (PTR_TO_ADDR (page_addr),
+ vg_addr_chop (VG_PTR_TO_ADDR (page_addr),
PAGESIZE_LOG2),
- cap_rpage, &writable);
+ vg_cap_rpage, &writable);
- if (cap.type != cap_page && cap.type != cap_rpage)
+ if (cap.type != vg_cap_page && cap.type != vg_cap_rpage)
break;
if (! writable && cap.discardable)
cap.discardable = false;
- struct object *page = cap_to_object (activity, &cap);
+ struct object *page = vg_cap_to_object (activity, &cap);
if (! page)
break;
@@ -392,7 +392,7 @@ server_loop (void)
DEBUG (5, "Prefaulting " DEBUG_BOLD ("%x") " <- %p (%x/%x/%x) %s",
page_addr,
page, l4_address (fpage), l4_size (fpage),
- l4_rights (fpage), cap_type_string (cap.type));
+ l4_rights (fpage), vg_cap_type_string (cap.type));
count ++;
}
@@ -427,7 +427,7 @@ server_loop (void)
/* Return the capability slot corresponding to address ADDR in
the address space rooted at ROOT. */
- error_t SLOT_ (struct cap *root, addr_t addr, struct cap **capp)
+ error_t SLOT_ (struct vg_cap *root, vg_addr_t addr, struct vg_cap **capp)
{
bool w;
if (! as_slot_lookup_rel_use (activity, root, addr,
@@ -437,14 +437,14 @@ server_loop (void)
})))
{
DEBUG (0, "No capability slot at 0x%llx/%d",
- addr_prefix (addr), addr_depth (addr));
+ vg_addr_prefix (addr), vg_addr_depth (addr));
as_dump_from (activity, root, "");
return ENOENT;
}
if (! w)
{
DEBUG (1, "Capability slot at 0x%llx/%d not writable",
- addr_prefix (addr), addr_depth (addr));
+ vg_addr_prefix (addr), vg_addr_depth (addr));
as_dump_from (activity, root, "");
return EPERM;
}
@@ -453,7 +453,7 @@ server_loop (void)
}
#define SLOT(root_, addr_) \
({ \
- struct cap *SLOT_ret; \
+ struct vg_cap *SLOT_ret; \
error_t err = SLOT_ (root_, addr_, &SLOT_ret); \
if (err) \
REPLY (err); \
@@ -463,27 +463,27 @@ server_loop (void)
/* Return a cap referencing the object at address ADDR of the
callers capability space if it is of type TYPE (-1 = don't care).
Whether the object is writable is stored in *WRITABLEP_. */
- error_t CAP_ (struct cap *root,
- addr_t addr, int type, bool require_writable,
- struct cap *cap)
+ error_t CAP_ (struct vg_cap *root,
+ vg_addr_t addr, int type, bool require_writable,
+ struct vg_cap *cap)
{
bool writable = true;
*cap = as_cap_lookup_rel (principal, root, addr,
type, require_writable ? &writable : NULL);
- if (type != -1 && ! cap_types_compatible (cap->type, type))
+ if (type != -1 && ! vg_cap_types_compatible (cap->type, type))
{
DEBUG (1, "Addr 0x%llx/%d does not reference object of "
"type %s but %s",
- addr_prefix (addr), addr_depth (addr),
- cap_type_string (type), cap_type_string (cap->type));
+ vg_addr_prefix (addr), vg_addr_depth (addr),
+ vg_cap_type_string (type), vg_cap_type_string (cap->type));
as_dump_from (activity, root, "");
return ENOENT;
}
if (require_writable && ! writable)
{
- DEBUG (1, "Addr " ADDR_FMT " not writable",
- ADDR_PRINTF (addr));
+ DEBUG (1, "Addr " VG_ADDR_FMT " not writable",
+ VG_ADDR_PRINTF (addr));
return EPERM;
}
@@ -491,7 +491,7 @@ server_loop (void)
}
#define CAP(root_, addr_, type_, require_writable_) \
({ \
- struct cap CAP_ret; \
+ struct vg_cap CAP_ret; \
error_t err = CAP_ (root_, addr_, type_, require_writable_, \
&CAP_ret); \
if (err) \
@@ -499,19 +499,19 @@ server_loop (void)
CAP_ret; \
})
- error_t OBJECT_ (struct cap *root,
- addr_t addr, int type, bool require_writable,
+ error_t OBJECT_ (struct vg_cap *root,
+ vg_addr_t addr, int type, bool require_writable,
struct object **objectp, bool *writable)
{
bool w = true;
- struct cap cap;
+ struct vg_cap cap;
cap = as_object_lookup_rel (principal, root, addr, type, &w);
- if (type != -1 && ! cap_types_compatible (cap.type, type))
+ if (type != -1 && ! vg_cap_types_compatible (cap.type, type))
{
DEBUG (0, "Addr 0x%llx/%d does not reference object of "
"type %s but %s",
- addr_prefix (addr), addr_depth (addr),
- cap_type_string (type), cap_type_string (cap.type));
+ vg_addr_prefix (addr), vg_addr_depth (addr),
+ vg_cap_type_string (type), vg_cap_type_string (cap.type));
return ENOENT;
}
@@ -520,18 +520,18 @@ server_loop (void)
if (require_writable && ! w)
{
- DEBUG (0, "Addr " ADDR_FMT " not writable",
- ADDR_PRINTF (addr));
+ DEBUG (0, "Addr " VG_ADDR_FMT " not writable",
+ VG_ADDR_PRINTF (addr));
return EPERM;
}
- *objectp = cap_to_object (principal, &cap);
+ *objectp = vg_cap_to_object (principal, &cap);
if (! *objectp)
{
do_debug (4)
- DEBUG (0, "Addr " ADDR_FMT " contains a dangling pointer: "
- CAP_FMT,
- ADDR_PRINTF (addr), CAP_PRINTF (&cap));
+ DEBUG (0, "Addr " VG_ADDR_FMT " contains a dangling pointer: "
+ VG_CAP_FMT,
+ VG_ADDR_PRINTF (addr), VG_CAP_PRINTF (&cap));
return ENOENT;
}
@@ -547,14 +547,14 @@ server_loop (void)
OBJECT_ret; \
})
- /* Find an address space root. If ADDR_VOID, the current
+ /* Find an address space root. If VG_ADDR_VOID, the current
thread's. Otherwise, the object identified by ROOT_ADDR_ in
the caller's address space. If that is a thread object, then
it's address space root. */
#define ROOT(root_addr_) \
({ \
- struct cap *root_; \
- if (ADDR_IS_VOID (root_addr_)) \
+ struct vg_cap *root_; \
+ if (VG_ADDR_IS_VOID (root_addr_)) \
root_ = &thread->aspace; \
else \
{ \
@@ -563,13 +563,13 @@ server_loop (void)
thread if it matches the guard exactly. */ \
struct object *t_; \
error_t err = OBJECT_ (&thread->aspace, root_addr_, \
- cap_thread, true, &t_, NULL); \
+ vg_cap_thread, true, &t_, NULL); \
if (! err) \
root_ = &((struct thread *) t_)->aspace; \
else \
root_ = SLOT (&thread->aspace, root_addr_); \
} \
- DEBUG (4, "root: " CAP_FMT, CAP_PRINTF (root_)); \
+ DEBUG (4, "root: " VG_CAP_FMT, VG_CAP_PRINTF (root_)); \
\
root_; \
})
@@ -592,7 +592,7 @@ server_loop (void)
#define ARG64_WORDS 1
#endif
-#define ARG_ADDR(word_) ((addr_t) { ARG64(word_) })
+#define ARG_ADDR(word_) ((vg_addr_t) { ARG64(word_) })
if (label == 2132)
/* write. */
@@ -613,30 +613,30 @@ server_loop (void)
int i = 0;
uintptr_t flags = ARG (i);
i ++;
- addr_t recv_activity = ARG_ADDR (i);
+ vg_addr_t recv_activity = ARG_ADDR (i);
i += ARG64_WORDS;
- addr_t recv_messenger = ARG_ADDR (i);
+ vg_addr_t recv_messenger = ARG_ADDR (i);
i += ARG64_WORDS;
- addr_t recv_buf = ARG_ADDR (i);
+ vg_addr_t recv_buf = ARG_ADDR (i);
i += ARG64_WORDS;
- addr_t recv_inline_cap = ARG_ADDR (i);
+ vg_addr_t recv_inline_cap = ARG_ADDR (i);
i += ARG64_WORDS;
- addr_t send_activity = ARG_ADDR (i);
+ vg_addr_t send_activity = ARG_ADDR (i);
i += ARG64_WORDS;
- addr_t target_messenger = ARG_ADDR (i);
+ vg_addr_t target_messenger = ARG_ADDR (i);
i += ARG64_WORDS;
- addr_t send_messenger = ARG_ADDR (i);
+ vg_addr_t send_messenger = ARG_ADDR (i);
i += ARG64_WORDS;
- addr_t send_buf = ARG_ADDR (i);
+ vg_addr_t send_buf = ARG_ADDR (i);
i += ARG64_WORDS;
uintptr_t inline_word1 = ARG (i);
i ++;
uintptr_t inline_word2 = ARG (i);
i ++;
- addr_t inline_cap = ARG_ADDR (i);
+ vg_addr_t inline_cap = ARG_ADDR (i);
#ifndef NDEBUG
/* Get the label early to improve debugging output in case the
@@ -649,21 +649,21 @@ server_loop (void)
{
principal = activity;
- struct cap cap = CAP_VOID;
- if (! ADDR_IS_VOID (send_buf))
+ struct vg_cap cap = VG_CAP_VOID;
+ if (! VG_ADDR_IS_VOID (send_buf))
/* Caller provided a send buffer. */
- CAP_ (&thread->aspace, send_buf, cap_page, true, &cap);
+ CAP_ (&thread->aspace, send_buf, vg_cap_page, true, &cap);
else
{
struct object *object = NULL;
OBJECT_ (&thread->aspace, send_messenger,
- cap_messenger, true, &object, NULL);
+ vg_cap_messenger, true, &object, NULL);
if (object)
cap = ((struct messenger *) object)->buffer;
}
struct vg_message *message;
- message = (struct vg_message *) cap_to_object (principal,
+ message = (struct vg_message *) vg_cap_to_object (principal,
&cap);
if (message)
label = vg_message_word (message, 0);
@@ -672,9 +672,9 @@ server_loop (void)
#endif
DEBUG (4, "flags: %s%s%s%s%s%s %s%s%s%s%s%s %s %s%s%s%s(%x),"
- "recv (" ADDR_FMT ", " ADDR_FMT ", " ADDR_FMT "), "
- "send (" ADDR_FMT ", " ADDR_FMT ", " ADDR_FMT ", " ADDR_FMT "), "
- "inline (" ADDR_FMT "; %x, %x, " ADDR_FMT ")",
+ "recv (" VG_ADDR_FMT ", " VG_ADDR_FMT ", " VG_ADDR_FMT "), "
+ "send (" VG_ADDR_FMT ", " VG_ADDR_FMT ", " VG_ADDR_FMT ", " VG_ADDR_FMT "), "
+ "inline (" VG_ADDR_FMT "; %x, %x, " VG_ADDR_FMT ")",
(flags & VG_IPC_RECEIVE) ? "R" : "-",
(flags & VG_IPC_RECEIVE_NONBLOCKING) ? "N" : "B",
(flags & VG_IPC_RECEIVE_ACTIVATE) ? "A" : "-",
@@ -693,22 +693,22 @@ server_loop (void)
(flags & VG_IPC_SEND_INLINE_WORD2) ? "2" : "-",
(flags & VG_IPC_SEND_INLINE_CAP1) ? "C" : "-",
flags,
- ADDR_PRINTF (recv_activity), ADDR_PRINTF (recv_messenger),
- ADDR_PRINTF (recv_buf),
- ADDR_PRINTF (send_activity), ADDR_PRINTF (target_messenger),
- ADDR_PRINTF (send_messenger), ADDR_PRINTF (send_buf),
- ADDR_PRINTF (recv_inline_cap),
- inline_word1, inline_word2, ADDR_PRINTF (inline_cap));
+ VG_ADDR_PRINTF (recv_activity), VG_ADDR_PRINTF (recv_messenger),
+ VG_ADDR_PRINTF (recv_buf),
+ VG_ADDR_PRINTF (send_activity), VG_ADDR_PRINTF (target_messenger),
+ VG_ADDR_PRINTF (send_messenger), VG_ADDR_PRINTF (send_buf),
+ VG_ADDR_PRINTF (recv_inline_cap),
+ inline_word1, inline_word2, VG_ADDR_PRINTF (inline_cap));
if ((flags & VG_IPC_RECEIVE))
/* IPC includes a receive phase. */
{
principal = activity;
- if (! ADDR_IS_VOID (recv_activity))
+ if (! VG_ADDR_IS_VOID (recv_activity))
{
principal = (struct activity *) OBJECT (&thread->aspace,
recv_activity,
- cap_activity, false,
+ vg_cap_activity, false,
NULL);
if (! principal)
{
@@ -719,7 +719,7 @@ server_loop (void)
struct messenger *messenger
= (struct messenger *) OBJECT (&thread->aspace,
- recv_messenger, cap_messenger,
+ recv_messenger, vg_cap_messenger,
true, NULL);
if (! messenger)
{
@@ -737,10 +737,10 @@ server_loop (void)
else
{
messenger->out_of_band = true;
- if (unlikely (! ADDR_IS_VOID (recv_buf)))
+ if (unlikely (! VG_ADDR_IS_VOID (recv_buf)))
/* Associate RECV_BUF with RECV_MESSENGER. */
messenger->buffer = CAP (&thread->aspace, recv_buf,
- cap_page, true);
+ vg_cap_page, true);
}
if (unlikely ((flags & VG_IPC_RECEIVE_SET_THREAD_TO_CALLER)))
@@ -799,19 +799,19 @@ server_loop (void)
label = inline_word1;
principal = activity;
- struct cap principal_cap;
- if (! ADDR_IS_VOID (send_activity))
+ struct vg_cap principal_cap;
+ if (! VG_ADDR_IS_VOID (send_activity))
{
/* We need the cap below, otherwise, we could just use
OBJECT. */
principal_cap = CAP (&thread->aspace,
- send_activity, cap_activity, false);
- principal = (struct activity *) cap_to_object (principal,
+ send_activity, vg_cap_activity, false);
+ principal = (struct activity *) vg_cap_to_object (principal,
&principal_cap);
if (! principal)
{
- DEBUG (4, "Dangling pointer at " ADDR_FMT,
- ADDR_PRINTF (send_activity));
+ DEBUG (4, "Dangling pointer at " VG_ADDR_FMT,
+ VG_ADDR_PRINTF (send_activity));
REPLY (ENOENT);
}
}
@@ -823,7 +823,7 @@ server_loop (void)
struct messenger *source
= (struct messenger *) OBJECT (&thread->aspace,
- send_messenger, cap_messenger,
+ send_messenger, vg_cap_messenger,
true, NULL);
if (unlikely (! source))
{
@@ -832,8 +832,8 @@ server_loop (void)
}
if (! (flags & VG_IPC_SEND_INLINE)
- && unlikely (! ADDR_IS_VOID (send_buf)))
- source->buffer = CAP (&thread->aspace, send_buf, cap_page, true);
+ && unlikely (! VG_ADDR_IS_VOID (send_buf)))
+ source->buffer = CAP (&thread->aspace, send_buf, vg_cap_page, true);
if (unlikely ((flags & VG_IPC_SEND_SET_THREAD_TO_CALLER)))
source->thread = object_to_cap ((struct object *) thread);
@@ -846,7 +846,7 @@ server_loop (void)
bool target_writable = true;
struct object *target;
/* We special case VOID to mean the current thread. */
- if (ADDR_IS_VOID (target_messenger))
+ if (VG_ADDR_IS_VOID (target_messenger))
target = (struct object *) thread;
else
target = OBJECT (&thread->aspace, target_messenger, -1, false,
@@ -857,13 +857,13 @@ server_loop (void)
REPLY (ENOENT);
}
- if (object_type (target) == cap_messenger && ! target_writable)
+ if (object_type (target) == vg_cap_messenger && ! target_writable)
/* TARGET is a weak reference to a messenger. Forward the
message. */
{
- DEBUG (5, "IPC: " OID_FMT " -> " OID_FMT,
- OID_PRINTF (object_oid ((struct object *) source)),
- OID_PRINTF (object_oid ((struct object *) target)));
+ DEBUG (5, "IPC: " VG_OID_FMT " -> " VG_OID_FMT,
+ VG_OID_PRINTF (object_oid ((struct object *) source)),
+ VG_OID_PRINTF (object_oid ((struct object *) target)));
if ((flags & VG_IPC_SEND_INLINE))
{
@@ -940,18 +940,18 @@ server_loop (void)
}
else
{
- if (source->buffer.type != cap_page)
+ if (source->buffer.type != vg_cap_page)
{
DEBUG (0, "Sender user-buffer has wrong type: %s",
- cap_type_string (source->buffer.type));
+ vg_cap_type_string (source->buffer.type));
REPLY (EINVAL);
}
- message = (struct vg_message *) cap_to_object (principal,
+ message = (struct vg_message *) vg_cap_to_object (principal,
&source->buffer);
if (! message)
{
DEBUG (0, "Sender user-buffer has wrong type: %s",
- cap_type_string (source->buffer.type));
+ vg_cap_type_string (source->buffer.type));
REPLY (EINVAL);
}
}
@@ -973,16 +973,16 @@ server_loop (void)
OBJECT (&thread->aspace,
vg_message_cap (message,
vg_message_cap_count (message) - 1),
- cap_rmessenger, false, NULL);
+ vg_cap_rmessenger, false, NULL);
/* There are a number of methods that look up an object relative
to the invoked object. Generate an appropriate root for
them. */
- struct cap target_root_cap;
- struct cap *target_root;
+ struct vg_cap target_root_cap;
+ struct vg_cap *target_root;
if (likely (target == (struct object *) thread))
target_root = &thread->aspace;
- else if (object_type (target) == cap_thread)
+ else if (object_type (target) == vg_cap_thread)
target_root = &((struct thread *) target)->aspace;
else
{
@@ -990,17 +990,17 @@ server_loop (void)
target_root = &target_root_cap;
}
- DEBUG (4, OID_FMT " %s(%llx) -> " OID_FMT " %s(%llx)",
- OID_PRINTF (object_oid ((struct object *) source)),
- cap_type_string (object_type ((struct object *) source)),
+ DEBUG (4, VG_OID_FMT " %s(%llx) -> " VG_OID_FMT " %s(%llx)",
+ VG_OID_PRINTF (object_oid ((struct object *) source)),
+ vg_cap_type_string (object_type ((struct object *) source)),
source->id,
- OID_PRINTF (object_oid ((struct object *) target)),
- cap_type_string (object_type (target)),
- object_type (target) == cap_messenger
+ VG_OID_PRINTF (object_oid ((struct object *) target)),
+ vg_cap_type_string (object_type (target)),
+ object_type (target) == vg_cap_messenger
? ((struct messenger *) target)->id : 0);
if (reply)
- DEBUG (4, "reply to: " OID_FMT "(%llx)",
- OID_PRINTF (object_oid ((struct object *) reply)),
+ DEBUG (4, "reply to: " VG_OID_FMT "(%llx)",
+ VG_OID_PRINTF (object_oid ((struct object *) reply)),
reply->id);
switch (label)
@@ -1066,20 +1066,20 @@ server_loop (void)
int count = 0;
for (count = 0; count < max; count ++, start += PAGESIZE)
{
- struct cap cap;
+ struct vg_cap cap;
bool writable;
cap = as_object_lookup_rel (activity, &thread->aspace,
- addr_chop (PTR_TO_ADDR (start),
+ vg_addr_chop (VG_PTR_TO_ADDR (start),
PAGESIZE_LOG2),
- cap_rpage, &writable);
+ vg_cap_rpage, &writable);
- if (cap.type != cap_page && cap.type != cap_rpage)
+ if (cap.type != vg_cap_page && cap.type != vg_cap_rpage)
break;
if (! writable && cap.discardable)
cap.discardable = false;
- struct object *page = cap_to_object (activity, &cap);
+ struct object *page = vg_cap_to_object (activity, &cap);
if (! page)
break;
@@ -1097,7 +1097,7 @@ server_loop (void)
DEBUG (4, "Prefault %d: " DEBUG_BOLD ("%x") " <- %x/%x %s",
count + 1, start,
l4_address (fpage), l4_rights (fpage),
- cap_type_string (cap.type));
+ vg_cap_type_string (cap.type));
}
if (count > 0)
@@ -1115,11 +1115,11 @@ server_loop (void)
case RM_folio_alloc:
{
- if (object_type (target) != cap_activity_control)
+ if (object_type (target) != vg_cap_activity_control)
{
- DEBUG (0, "target " ADDR_FMT " not an activity but a %s",
- ADDR_PRINTF (target_messenger),
- cap_type_string (object_type (target)));
+ DEBUG (0, "target " VG_ADDR_FMT " not an activity but a %s",
+ VG_ADDR_PRINTF (target_messenger),
+ vg_cap_type_string (object_type (target)));
REPLY (EINVAL);
}
@@ -1130,7 +1130,7 @@ server_loop (void)
if (err)
REPLY (err);
- DEBUG (4, "(" ADDR_FMT ")", ADDR_PRINTF (target_messenger));
+ DEBUG (4, "(" VG_ADDR_FMT ")", VG_ADDR_PRINTF (target_messenger));
struct folio *folio = folio_alloc (activity, policy);
if (! folio)
@@ -1143,7 +1143,7 @@ server_loop (void)
case RM_folio_free:
{
- if (object_type (target) != cap_folio)
+ if (object_type (target) != vg_cap_folio)
REPLY (EINVAL);
struct folio *folio = (struct folio *) target;
@@ -1152,7 +1152,7 @@ server_loop (void)
if (err)
REPLY (err);
- DEBUG (4, "(" ADDR_FMT ")", ADDR_PRINTF (target_messenger));
+ DEBUG (4, "(" VG_ADDR_FMT ")", VG_ADDR_PRINTF (target_messenger));
folio_free (principal, folio);
@@ -1162,7 +1162,7 @@ server_loop (void)
case RM_folio_object_alloc:
{
- if (object_type (target) != cap_folio)
+ if (object_type (target) != vg_cap_folio)
REPLY (EINVAL);
struct folio *folio = (struct folio *) target;
@@ -1178,30 +1178,30 @@ server_loop (void)
if (err)
REPLY (err);
- DEBUG (4, "(" ADDR_FMT ", %d (" ADDR_FMT "), %s, (%s, %d), %d)",
- ADDR_PRINTF (target_messenger), idx,
- addr_depth (target_messenger) + FOLIO_OBJECTS_LOG2
- <= ADDR_BITS
- ? ADDR_PRINTF (addr_extend (target_messenger,
- idx, FOLIO_OBJECTS_LOG2))
- : ADDR_PRINTF (ADDR_VOID),
- cap_type_string (type),
+ DEBUG (4, "(" VG_ADDR_FMT ", %d (" VG_ADDR_FMT "), %s, (%s, %d), %d)",
+ VG_ADDR_PRINTF (target_messenger), idx,
+ vg_addr_depth (target_messenger) + VG_FOLIO_OBJECTS_LOG2
+ <= VG_ADDR_BITS
+ ? VG_ADDR_PRINTF (vg_addr_extend (target_messenger,
+ idx, VG_FOLIO_OBJECTS_LOG2))
+ : VG_ADDR_PRINTF (VG_ADDR_VOID),
+ vg_cap_type_string (type),
policy.discardable ? "discardable" : "precious",
policy.priority,
return_code);
- if (idx >= FOLIO_OBJECTS)
+ if (idx >= VG_FOLIO_OBJECTS)
REPLY (EINVAL);
- if (! (CAP_TYPE_MIN <= type && type <= CAP_TYPE_MAX))
+ if (! (VG_CAP_TYPE_MIN <= type && type <= VG_CAP_TYPE_MAX))
REPLY (EINVAL);
- struct cap cap;
+ struct vg_cap cap;
cap = folio_object_alloc (principal,
folio, idx, type, policy, return_code);
- struct cap weak = cap;
- weak.type = cap_type_weaken (cap.type);
+ struct vg_cap weak = cap;
+ weak.type = vg_cap_type_weaken (cap.type);
rm_folio_object_alloc_reply (activity, reply, cap, weak);
break;
@@ -1209,7 +1209,7 @@ server_loop (void)
case RM_folio_policy:
{
- if (object_type (target) != cap_folio)
+ if (object_type (target) != vg_cap_folio)
REPLY (EINVAL);
struct folio *folio = (struct folio *) target;
@@ -1221,8 +1221,8 @@ server_loop (void)
if (err)
REPLY (err);
- DEBUG (4, "(" ADDR_FMT ", %d)",
- ADDR_PRINTF (target_messenger), flags);
+ DEBUG (4, "(" VG_ADDR_FMT ", %d)",
+ VG_ADDR_PRINTF (target_messenger), flags);
folio_policy (principal, folio, flags, in, &out);
@@ -1232,13 +1232,13 @@ server_loop (void)
case RM_cap_copy:
{
- addr_t source_as_addr;
- addr_t source_addr;
- struct cap source;
- addr_t target_as_addr;
- addr_t target_addr;
+ vg_addr_t source_as_addr;
+ vg_addr_t source_addr;
+ struct vg_cap source;
+ vg_addr_t target_as_addr;
+ vg_addr_t target_addr;
uint32_t flags;
- struct cap_properties properties;
+ struct vg_cap_properties properties;
err = rm_cap_copy_send_unmarshal (message,
&target_addr,
@@ -1247,67 +1247,67 @@ server_loop (void)
if (err)
REPLY (err);
- DEBUG (4, "(" ADDR_FMT "@" ADDR_FMT " <- "
- ADDR_FMT "@" ADDR_FMT ", %s%s%s%s%s%s, %s/%d %lld/%d %d/%d",
+ DEBUG (4, "(" VG_ADDR_FMT "@" VG_ADDR_FMT " <- "
+ VG_ADDR_FMT "@" VG_ADDR_FMT ", %s%s%s%s%s%s, %s/%d %lld/%d %d/%d",
- ADDR_PRINTF (target_messenger), ADDR_PRINTF (target_addr),
- ADDR_PRINTF (source_as_addr), ADDR_PRINTF (source_addr),
+ VG_ADDR_PRINTF (target_messenger), VG_ADDR_PRINTF (target_addr),
+ VG_ADDR_PRINTF (source_as_addr), VG_ADDR_PRINTF (source_addr),
- CAP_COPY_COPY_ADDR_TRANS_SUBPAGE & flags
+ VG_CAP_COPY_COPY_ADDR_TRANS_SUBPAGE & flags
? "copy subpage/" : "",
- CAP_COPY_COPY_ADDR_TRANS_GUARD & flags
+ VG_CAP_COPY_COPY_ADDR_TRANS_GUARD & flags
? "copy trans guard/" : "",
- CAP_COPY_COPY_SOURCE_GUARD & flags
+ VG_CAP_COPY_COPY_SOURCE_GUARD & flags
? "copy src guard/" : "",
- CAP_COPY_WEAKEN & flags ? "weak/" : "",
- CAP_COPY_DISCARDABLE_SET & flags ? "discardable/" : "",
- CAP_COPY_PRIORITY_SET & flags ? "priority" : "",
+ VG_CAP_COPY_WEAKEN & flags ? "weak/" : "",
+ VG_CAP_COPY_DISCARDABLE_SET & flags ? "discardable/" : "",
+ VG_CAP_COPY_PRIORITY_SET & flags ? "priority" : "",
properties.policy.discardable ? "discardable" : "precious",
properties.policy.priority,
- CAP_ADDR_TRANS_GUARD (properties.addr_trans),
- CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans),
- CAP_ADDR_TRANS_SUBPAGE (properties.addr_trans),
- CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans));
+ VG_CAP_ADDR_TRANS_GUARD (properties.addr_trans),
+ VG_CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans),
+ VG_CAP_ADDR_TRANS_SUBPAGE (properties.addr_trans),
+ VG_CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans));
- struct cap *target;
+ struct vg_cap *target;
target = SLOT (target_root, target_addr);
target_root = ROOT (source_as_addr);
source = CAP (target_root, source_addr, -1, false);
- if ((flags & ~(CAP_COPY_COPY_ADDR_TRANS_SUBPAGE
- | CAP_COPY_COPY_ADDR_TRANS_GUARD
- | CAP_COPY_COPY_SOURCE_GUARD
- | CAP_COPY_WEAKEN
- | CAP_COPY_DISCARDABLE_SET
- | CAP_COPY_PRIORITY_SET)))
+ if ((flags & ~(VG_CAP_COPY_COPY_ADDR_TRANS_SUBPAGE
+ | VG_CAP_COPY_COPY_ADDR_TRANS_GUARD
+ | VG_CAP_COPY_COPY_SOURCE_GUARD
+ | VG_CAP_COPY_WEAKEN
+ | VG_CAP_COPY_DISCARDABLE_SET
+ | VG_CAP_COPY_PRIORITY_SET)))
REPLY (EINVAL);
- DEBUG (4, "(target: (" ADDR_FMT ") " ADDR_FMT ", "
- "source: (" ADDR_FMT ") " ADDR_FMT ", "
+ DEBUG (4, "(target: (" VG_ADDR_FMT ") " VG_ADDR_FMT ", "
+ "source: (" VG_ADDR_FMT ") " VG_ADDR_FMT ", "
"%s|%s, %s {%llx/%d %d/%d})",
- ADDR_PRINTF (target_as_addr), ADDR_PRINTF (target_addr),
- ADDR_PRINTF (source_as_addr), ADDR_PRINTF (source_addr),
- flags & CAP_COPY_COPY_ADDR_TRANS_GUARD ? "copy trans"
- : (flags & CAP_COPY_COPY_SOURCE_GUARD ? "source"
+ VG_ADDR_PRINTF (target_as_addr), VG_ADDR_PRINTF (target_addr),
+ VG_ADDR_PRINTF (source_as_addr), VG_ADDR_PRINTF (source_addr),
+ flags & VG_CAP_COPY_COPY_ADDR_TRANS_GUARD ? "copy trans"
+ : (flags & VG_CAP_COPY_COPY_SOURCE_GUARD ? "source"
: "preserve"),
- flags & CAP_COPY_COPY_ADDR_TRANS_SUBPAGE ? "copy"
+ flags & VG_CAP_COPY_COPY_ADDR_TRANS_SUBPAGE ? "copy"
: "preserve",
- flags & CAP_COPY_WEAKEN ? "weaken" : "no weaken",
- CAP_ADDR_TRANS_GUARD (properties.addr_trans),
- CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans),
- CAP_ADDR_TRANS_SUBPAGE (properties.addr_trans),
- CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans));
-
- bool r = cap_copy_x (principal,
- ADDR_VOID, target, ADDR_VOID,
- ADDR_VOID, source, ADDR_VOID,
+ flags & VG_CAP_COPY_WEAKEN ? "weaken" : "no weaken",
+ VG_CAP_ADDR_TRANS_GUARD (properties.addr_trans),
+ VG_CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans),
+ VG_CAP_ADDR_TRANS_SUBPAGE (properties.addr_trans),
+ VG_CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans));
+
+ bool r = vg_cap_copy_x (principal,
+ VG_ADDR_VOID, target, VG_ADDR_VOID,
+ VG_ADDR_VOID, source, VG_ADDR_VOID,
flags, properties);
if (! r)
REPLY (EINVAL);
- if ((flags & (CAP_COPY_DISCARDABLE_SET | CAP_COPY_PRIORITY_SET)))
+ if ((flags & (VG_CAP_COPY_DISCARDABLE_SET | VG_CAP_COPY_PRIORITY_SET)))
/* The caller changed the policy. Also change it on the
object. */
{
@@ -1322,12 +1322,12 @@ server_loop (void)
/* XXX: This should only be allowed if TARGET
grants writable access to the object. */
- if ((flags & CAP_COPY_DISCARDABLE_SET))
+ if ((flags & VG_CAP_COPY_DISCARDABLE_SET))
p.discardable = properties.policy.discardable;
/* Only the current claimant can set the
priority. */
- if ((flags & CAP_COPY_PRIORITY_SET)
+ if ((flags & VG_CAP_COPY_PRIORITY_SET)
&& desc->activity == principal)
p.priority = properties.policy.priority;
@@ -1341,20 +1341,20 @@ server_loop (void)
/* XXX: Surprisingly, it appears that this may be
more expensive than just faulting the pages
normally. This needs more investivation. */
- if (ADDR_IS_VOID (target_as_addr)
- && cap_types_compatible (target->type, cap_page)
- && CAP_GUARD_BITS (target) == 0
- && addr_depth (target_addr) == ADDR_BITS - PAGESIZE_LOG2)
+ if (VG_ADDR_IS_VOID (target_as_addr)
+ && vg_cap_types_compatible (target->type, vg_cap_page)
+ && VG_CAP_GUARD_BITS (target) == 0
+ && vg_addr_depth (target_addr) == VG_ADDR_BITS - PAGESIZE_LOG2)
/* The target address space is the caller's. The target
object appears to be a page. It seems to be
installed at a point where it would appear in the
hardware address space. If this is really the case,
then we can map it now and save a fault later. */
{
- profile_region ("cap_copy-prefault");
+ profile_region ("vg_cap_copy-prefault");
- struct cap cap = *target;
- if (target->type == cap_rpage)
+ struct vg_cap cap = *target;
+ if (target->type == vg_cap_rpage)
cap.discardable = false;
struct object *page = cap_to_object_soft (principal, &cap);
@@ -1365,11 +1365,11 @@ server_loop (void)
l4_fpage_t fpage
= l4_fpage ((uintptr_t) page, PAGESIZE);
fpage = l4_fpage_add_rights (fpage, L4_FPAGE_READABLE);
- if (cap.type == cap_page)
+ if (cap.type == vg_cap_page)
fpage = l4_fpage_add_rights (fpage,
L4_FPAGE_WRITABLE);
- uintptr_t page_addr = addr_prefix (target_addr);
+ uintptr_t page_addr = vg_addr_prefix (target_addr);
l4_map_item_t map_item = l4_map_item (fpage, page_addr);
@@ -1385,19 +1385,19 @@ server_loop (void)
case RM_cap_rubout:
{
- addr_t addr;
+ vg_addr_t addr;
err = rm_cap_rubout_send_unmarshal (message, &addr, NULL);
if (err)
REPLY (err);
- DEBUG (4, ADDR_FMT "@" ADDR_FMT,
- ADDR_PRINTF (target_messenger),
- ADDR_PRINTF (addr));
+ DEBUG (4, VG_ADDR_FMT "@" VG_ADDR_FMT,
+ VG_ADDR_PRINTF (target_messenger),
+ VG_ADDR_PRINTF (addr));
/* We don't look up the argument directly as we need to
respect any subpage specification for cappages. */
- struct cap *slot = SLOT (target_root, addr);
+ struct vg_cap *slot = SLOT (target_root, addr);
cap_shootdown (principal, slot);
@@ -1409,53 +1409,53 @@ server_loop (void)
case RM_cap_read:
{
- addr_t cap_addr;
+ vg_addr_t cap_addr;
err = rm_cap_read_send_unmarshal (message, &cap_addr, NULL);
if (err)
REPLY (err);
- DEBUG (4, ADDR_FMT "@" ADDR_FMT,
- ADDR_PRINTF (target_messenger), ADDR_PRINTF (cap_addr));
+ DEBUG (4, VG_ADDR_FMT "@" VG_ADDR_FMT,
+ VG_ADDR_PRINTF (target_messenger), VG_ADDR_PRINTF (cap_addr));
- struct cap cap = CAP (target_root, cap_addr, -1, false);
+ struct vg_cap cap = CAP (target_root, cap_addr, -1, false);
/* Even if CAP.TYPE is not void, the cap may not designate
an object. Looking up the object will set CAP.TYPE to
- cap_void if this is the case. */
- if (cap.type != cap_void)
- cap_to_object (principal, &cap);
+ vg_cap_void if this is the case. */
+ if (cap.type != vg_cap_void)
+ vg_cap_to_object (principal, &cap);
rm_cap_read_reply (activity, reply, cap.type,
- CAP_PROPERTIES_GET (cap));
+ VG_CAP_PROPERTIES_GET (cap));
break;
}
case RM_object_discarded_clear:
{
- addr_t object_addr;
+ vg_addr_t object_addr;
err = rm_object_discarded_clear_send_unmarshal
(message, &object_addr, NULL);
if (err)
REPLY (err);
- DEBUG (4, ADDR_FMT, ADDR_PRINTF (object_addr));
+ DEBUG (4, VG_ADDR_FMT, VG_ADDR_PRINTF (object_addr));
/* We can't look up the object use OBJECT as object_lookup
returns NULL if the object's discardable bit is set!
Instead, we lookup the capability, find the object's
folio and then clear its discarded bit. */
- struct cap cap = CAP (&thread->aspace, object_addr, -1, true);
- if (cap.type == cap_void)
+ struct vg_cap cap = CAP (&thread->aspace, object_addr, -1, true);
+ if (cap.type == vg_cap_void)
REPLY (ENOENT);
- if (cap_type_weak_p (cap.type))
+ if (vg_cap_type_weak_p (cap.type))
REPLY (EPERM);
- int idx = (cap.oid % (1 + FOLIO_OBJECTS)) - 1;
- oid_t foid = cap.oid - idx - 1;
+ int idx = (cap.oid % (1 + VG_FOLIO_OBJECTS)) - 1;
+ vg_oid_t foid = cap.oid - idx - 1;
struct folio *folio = (struct folio *)
- object_find (activity, foid, OBJECT_POLICY_VOID);
+ object_find (activity, foid, VG_OBJECT_POLICY_VOID);
if (folio_object_version (folio, idx) != cap.version)
REPLY (ENOENT);
@@ -1470,9 +1470,9 @@ server_loop (void)
expensive than just faulting the pages normally. This
needs more investivation. */
if (was_discarded
- && cap.type == cap_page
- && CAP_GUARD_BITS (&cap) == 0
- && (addr_depth (object_addr) == ADDR_BITS - PAGESIZE_LOG2))
+ && cap.type == vg_cap_page
+ && VG_CAP_GUARD_BITS (&cap) == 0
+ && (vg_addr_depth (object_addr) == VG_ADDR_BITS - PAGESIZE_LOG2))
/* The target object was discarded, appears to be a page
and seems to be installed at a point where it would
appear in the hardware address space. If this is
@@ -1481,7 +1481,7 @@ server_loop (void)
{
profile_region ("object_discard-prefault");
- struct object *page = cap_to_object (principal, &cap);
+ struct object *page = vg_cap_to_object (principal, &cap);
if (page)
{
object_to_object_desc (page)->mapped = true;
@@ -1491,14 +1491,14 @@ server_loop (void)
L4_FPAGE_READABLE
| L4_FPAGE_WRITABLE);
- uintptr_t page_addr = addr_prefix (object_addr);
+ uintptr_t page_addr = vg_addr_prefix (object_addr);
l4_map_item_t map_item = l4_map_item (fpage, page_addr);
l4_msg_append_map_item (msg, map_item);
- DEBUG (4, "Prefaulting "ADDR_FMT"(%x) <- %p (%x/%x/%x)",
- ADDR_PRINTF (object_addr), page_addr,
+ DEBUG (4, "Prefaulting "VG_ADDR_FMT"(%x) <- %p (%x/%x/%x)",
+ VG_ADDR_PRINTF (object_addr), page_addr,
page, l4_address (fpage), l4_size (fpage),
l4_rights (fpage));
}
@@ -1516,7 +1516,7 @@ server_loop (void)
if (err)
REPLY (err);
- DEBUG (4, ADDR_FMT, ADDR_PRINTF (target_messenger));
+ DEBUG (4, VG_ADDR_FMT, VG_ADDR_PRINTF (target_messenger));
struct folio *folio = objects_folio (principal, target);
@@ -1534,8 +1534,8 @@ server_loop (void)
if (err)
REPLY (err);
- DEBUG (4, ADDR_FMT ", %sclear",
- ADDR_PRINTF (target_messenger), clear ? "" : "no ");
+ DEBUG (4, VG_ADDR_FMT ", %sclear",
+ VG_ADDR_PRINTF (target_messenger), clear ? "" : "no ");
struct object_desc *desc = object_to_object_desc (target);
uintptr_t status = (desc->user_referenced ? object_referenced : 0)
@@ -1556,14 +1556,14 @@ server_loop (void)
struct object_name name;
err = rm_object_name_send_unmarshal (message, &name, NULL);
- if (object_type (target) == cap_activity_control)
+ if (object_type (target) == vg_cap_activity_control)
{
struct activity *a = (struct activity *) target;
memcpy (a->name.name, name.name, sizeof (name));
a->name.name[sizeof (a->name.name) - 1] = 0;
}
- else if (object_type (target) == cap_thread)
+ else if (object_type (target) == vg_cap_thread)
{
struct thread *t = (struct thread *) target;
@@ -1577,16 +1577,16 @@ server_loop (void)
case RM_thread_exregs:
{
- if (object_type (target) != cap_thread)
+ if (object_type (target) != vg_cap_thread)
REPLY (EINVAL);
struct thread *t = (struct thread *) target;
struct hurd_thread_exregs_in in;
uintptr_t control;
- addr_t aspace_addr;
- addr_t activity_addr;
- addr_t utcb_addr;
- addr_t exception_messenger_addr;
+ vg_addr_t aspace_addr;
+ vg_addr_t activity_addr;
+ vg_addr_t utcb_addr;
+ vg_addr_t exception_messenger_addr;
err = rm_thread_exregs_send_unmarshal
(message, &control, &in,
&aspace_addr, &activity_addr, &utcb_addr,
@@ -1596,10 +1596,10 @@ server_loop (void)
REPLY (err);
int d = 4;
- DEBUG (d, "%s%s" ADDR_FMT "(%x): %s%s%s%s %s%s%s%s %s%s%s %s%s",
+ DEBUG (d, "%s%s" VG_ADDR_FMT "(%x): %s%s%s%s %s%s%s%s %s%s%s %s%s",
t->name.name[0] ? t->name.name : "",
t->name.name[0] ? ": " : "",
- ADDR_PRINTF (target_messenger), t->tid,
+ VG_ADDR_PRINTF (target_messenger), t->tid,
(control & HURD_EXREGS_SET_UTCB) ? "U" : "-",
(control & HURD_EXREGS_SET_EXCEPTION_MESSENGER) ? "E" : "-",
(control & HURD_EXREGS_SET_ASPACE) ? "R" : "-",
@@ -1615,14 +1615,14 @@ server_loop (void)
(control & _L4_XCHG_REGS_SET_HALT) ? "Y" : "N");
if ((control & HURD_EXREGS_SET_UTCB))
- DEBUG (d, "utcb: " ADDR_FMT, ADDR_PRINTF (utcb_addr));
+ DEBUG (d, "utcb: " VG_ADDR_FMT, VG_ADDR_PRINTF (utcb_addr));
if ((control & HURD_EXREGS_SET_EXCEPTION_MESSENGER))
- DEBUG (d, "exception messenger: " ADDR_FMT,
- ADDR_PRINTF (exception_messenger_addr));
+ DEBUG (d, "exception messenger: " VG_ADDR_FMT,
+ VG_ADDR_PRINTF (exception_messenger_addr));
if ((control & HURD_EXREGS_SET_ASPACE))
- DEBUG (d, "aspace: " ADDR_FMT, ADDR_PRINTF (aspace_addr));
+ DEBUG (d, "aspace: " VG_ADDR_FMT, VG_ADDR_PRINTF (aspace_addr));
if ((control & HURD_EXREGS_SET_ACTIVITY))
- DEBUG (d, "activity: " ADDR_FMT, ADDR_PRINTF (activity_addr));
+ DEBUG (d, "activity: " VG_ADDR_FMT, VG_ADDR_PRINTF (activity_addr));
if ((control & HURD_EXREGS_SET_SP))
DEBUG (d, "sp: %p", (void *) in.sp);
if ((control & HURD_EXREGS_SET_IP))
@@ -1632,35 +1632,35 @@ server_loop (void)
if ((control & HURD_EXREGS_SET_USER_HANDLE))
DEBUG (d, "user_handle: %p", (void *) in.user_handle);
- struct cap aspace = CAP_VOID;
+ struct vg_cap aspace = VG_CAP_VOID;
if ((HURD_EXREGS_SET_ASPACE & control))
aspace = CAP (&thread->aspace, aspace_addr, -1, false);
- struct cap a = CAP_VOID;
+ struct vg_cap a = VG_CAP_VOID;
if ((HURD_EXREGS_SET_ACTIVITY & control))
{
/* XXX: Remove this hack... */
- if (ADDR_IS_VOID (activity_addr))
+ if (VG_ADDR_IS_VOID (activity_addr))
a = thread->activity;
else
a = CAP (&thread->aspace,
- activity_addr, cap_activity, false);
+ activity_addr, vg_cap_activity, false);
}
- struct cap utcb = CAP_VOID;
+ struct vg_cap utcb = VG_CAP_VOID;
if ((HURD_EXREGS_SET_UTCB & control))
- utcb = CAP (&thread->aspace, utcb_addr, cap_page, true);
+ utcb = CAP (&thread->aspace, utcb_addr, vg_cap_page, true);
- struct cap exception_messenger = CAP_VOID;
+ struct vg_cap exception_messenger = VG_CAP_VOID;
if ((HURD_EXREGS_SET_EXCEPTION_MESSENGER & control))
exception_messenger
= CAP (&thread->aspace, exception_messenger_addr,
- cap_rmessenger, false);
+ vg_cap_rmessenger, false);
- struct cap aspace_out = thread->aspace;
- struct cap activity_out = thread->activity;
- struct cap utcb_out = thread->utcb;
- struct cap exception_messenger_out = thread->exception_messenger;
+ struct vg_cap aspace_out = thread->aspace;
+ struct vg_cap activity_out = thread->activity;
+ struct vg_cap utcb_out = thread->utcb;
+ struct vg_cap exception_messenger_out = thread->exception_messenger;
struct hurd_thread_exregs_out out;
out.sp = in.sp;
@@ -1686,7 +1686,7 @@ server_loop (void)
case RM_thread_id:
{
- if (object_type (target) != cap_thread)
+ if (object_type (target) != vg_cap_thread)
REPLY (EINVAL);
struct thread *t = (struct thread *) target;
@@ -1705,7 +1705,7 @@ server_loop (void)
if (err)
REPLY (err);
- DEBUG (4, ADDR_FMT, ADDR_PRINTF (target_messenger));
+ DEBUG (4, VG_ADDR_FMT, VG_ADDR_PRINTF (target_messenger));
reply->wait_reason = MESSENGER_WAIT_DESTROY;
object_wait_queue_enqueue (principal, target, reply);
@@ -1715,10 +1715,10 @@ server_loop (void)
case RM_activity_policy:
{
- if (object_type (target) != cap_activity_control)
+ if (object_type (target) != vg_cap_activity_control)
{
DEBUG (0, "expects an activity, not a %s",
- cap_type_string (object_type (target)));
+ vg_cap_type_string (object_type (target)));
REPLY (EINVAL);
}
struct activity *activity = (struct activity *) target;
@@ -1734,50 +1734,50 @@ server_loop (void)
int d = 4;
DEBUG (d, "(%s) child: %s%s; sibling: %s%s; storage: %s",
target_writable ? "strong" : "weak",
- (flags & ACTIVITY_POLICY_CHILD_REL_PRIORITY_SET) ? "P" : "-",
- (flags & ACTIVITY_POLICY_CHILD_REL_WEIGHT_SET) ? "W" : "-",
- (flags & ACTIVITY_POLICY_SIBLING_REL_PRIORITY_SET)
+ (flags & VG_ACTIVITY_POLICY_CHILD_REL_PRIORITY_SET) ? "P" : "-",
+ (flags & VG_ACTIVITY_POLICY_CHILD_REL_WEIGHT_SET) ? "W" : "-",
+ (flags & VG_ACTIVITY_POLICY_SIBLING_REL_PRIORITY_SET)
? "P" : "-",
- (flags & ACTIVITY_POLICY_SIBLING_REL_WEIGHT_SET) ? "W" : "-",
- (flags & ACTIVITY_POLICY_STORAGE_SET) ? "P" : "-");
+ (flags & VG_ACTIVITY_POLICY_SIBLING_REL_WEIGHT_SET) ? "W" : "-",
+ (flags & VG_ACTIVITY_POLICY_STORAGE_SET) ? "P" : "-");
- if ((flags & ACTIVITY_POLICY_CHILD_REL_PRIORITY_SET))
+ if ((flags & VG_ACTIVITY_POLICY_CHILD_REL_PRIORITY_SET))
DEBUG (d, "Child priority: %d", in.child_rel.priority);
- if ((flags & ACTIVITY_POLICY_CHILD_REL_WEIGHT_SET))
+ if ((flags & VG_ACTIVITY_POLICY_CHILD_REL_WEIGHT_SET))
DEBUG (d, "Child weight: %d", in.child_rel.weight);
- if ((flags & ACTIVITY_POLICY_SIBLING_REL_PRIORITY_SET))
+ if ((flags & VG_ACTIVITY_POLICY_SIBLING_REL_PRIORITY_SET))
DEBUG (d, "Sibling priority: %d", in.sibling_rel.priority);
- if ((flags & ACTIVITY_POLICY_SIBLING_REL_WEIGHT_SET))
+ if ((flags & VG_ACTIVITY_POLICY_SIBLING_REL_WEIGHT_SET))
DEBUG (d, "Sibling weight: %d", in.sibling_rel.weight);
- if ((flags & ACTIVITY_POLICY_STORAGE_SET))
+ if ((flags & VG_ACTIVITY_POLICY_STORAGE_SET))
DEBUG (d, "Storage: %d", in.folios);
if (! target_writable
- && (flags & (ACTIVITY_POLICY_STORAGE_SET
- | ACTIVITY_POLICY_SIBLING_REL_SET)))
+ && (flags & (VG_ACTIVITY_POLICY_STORAGE_SET
+ | VG_ACTIVITY_POLICY_SIBLING_REL_SET)))
REPLY (EPERM);
rm_activity_policy_reply (principal, reply, activity->policy);
- if ((flags & (ACTIVITY_POLICY_CHILD_REL_PRIORITY_SET
- | ACTIVITY_POLICY_CHILD_REL_WEIGHT_SET
- | ACTIVITY_POLICY_SIBLING_REL_PRIORITY_SET
- | ACTIVITY_POLICY_SIBLING_REL_WEIGHT_SET
- | ACTIVITY_POLICY_STORAGE_SET)))
+ if ((flags & (VG_ACTIVITY_POLICY_CHILD_REL_PRIORITY_SET
+ | VG_ACTIVITY_POLICY_CHILD_REL_WEIGHT_SET
+ | VG_ACTIVITY_POLICY_SIBLING_REL_PRIORITY_SET
+ | VG_ACTIVITY_POLICY_SIBLING_REL_WEIGHT_SET
+ | VG_ACTIVITY_POLICY_STORAGE_SET)))
{
struct activity_policy p = principal->policy;
- if ((flags & ACTIVITY_POLICY_CHILD_REL_PRIORITY_SET))
+ if ((flags & VG_ACTIVITY_POLICY_CHILD_REL_PRIORITY_SET))
p.child_rel.priority = in.child_rel.priority;
- if ((flags & ACTIVITY_POLICY_CHILD_REL_WEIGHT_SET))
+ if ((flags & VG_ACTIVITY_POLICY_CHILD_REL_WEIGHT_SET))
p.child_rel.weight = in.child_rel.weight;
- if ((flags & ACTIVITY_POLICY_SIBLING_REL_PRIORITY_SET))
+ if ((flags & VG_ACTIVITY_POLICY_SIBLING_REL_PRIORITY_SET))
p.sibling_rel.priority = in.sibling_rel.priority;
- if ((flags & ACTIVITY_POLICY_SIBLING_REL_WEIGHT_SET))
+ if ((flags & VG_ACTIVITY_POLICY_SIBLING_REL_WEIGHT_SET))
p.sibling_rel.weight = in.sibling_rel.weight;
- if ((flags & ACTIVITY_POLICY_STORAGE_SET))
+ if ((flags & VG_ACTIVITY_POLICY_STORAGE_SET))
p.folios = in.folios;
activity_policy_update (activity, p);
@@ -1788,7 +1788,7 @@ server_loop (void)
case RM_activity_info:
{
- if (object_type (target) != cap_activity_control)
+ if (object_type (target) != vg_cap_activity_control)
REPLY (EINVAL);
struct activity *activity = (struct activity *) target;
@@ -1855,7 +1855,7 @@ server_loop (void)
case RM_thread_activation_collect:
{
- if (object_type (target) != cap_thread)
+ if (object_type (target) != vg_cap_thread)
REPLY (EINVAL);
err = rm_thread_activation_collect_send_unmarshal (message, NULL);
@@ -1903,7 +1903,7 @@ server_loop (void)
err = rm_futex_reply (principal, m, 0);
if (err)
- panic ("Error futex waking: %d", err);
+ panic ("Error vg_futex waking: %d", err);
count ++;
@@ -1962,8 +1962,8 @@ server_loop (void)
char *mode = "unknown";
- struct object *page = cap_to_object (principal, &thread->utcb);
- if (page && object_type (page) == cap_page)
+ struct object *page = vg_cap_to_object (principal, &thread->utcb);
+ if (page && object_type (page) == vg_cap_page)
{
struct vg_utcb *utcb = (struct vg_utcb *) page;
@@ -1989,9 +1989,9 @@ server_loop (void)
REPLY (ENOSYS);
};
- addr_t addr = addr_chop (PTR_TO_ADDR (addr1), PAGESIZE_LOG2);
+ vg_addr_t addr = vg_addr_chop (VG_PTR_TO_ADDR (addr1), PAGESIZE_LOG2);
struct object *object1 = OBJECT (&thread->aspace,
- addr, cap_page, true, NULL);
+ addr, vg_cap_page, true, NULL);
int offset1 = (uintptr_t) addr1 & (PAGESIZE - 1);
int *vaddr1 = (void *) object1 + offset1;
@@ -2028,9 +2028,9 @@ server_loop (void)
break;
case FUTEX_WAKE_OP:
- addr = addr_chop (PTR_TO_ADDR (addr2), PAGESIZE_LOG2);
+ addr = vg_addr_chop (VG_PTR_TO_ADDR (addr2), PAGESIZE_LOG2);
struct object *object2 = OBJECT (&thread->aspace,
- addr, cap_page, true, NULL);
+ addr, vg_cap_page, true, NULL);
int offset2 = (uintptr_t) addr2 & (PAGESIZE - 1);
int *vaddr2 = (void *) object2 + offset2;
@@ -2096,8 +2096,8 @@ server_loop (void)
REPLY (EAGAIN);
/* Get the second object. */
- addr = addr_chop (PTR_TO_ADDR (addr2), PAGESIZE_LOG2);
- object2 = OBJECT (&thread->aspace, addr, cap_page, true, NULL);
+ addr = vg_addr_chop (VG_PTR_TO_ADDR (addr2), PAGESIZE_LOG2);
+ object2 = OBJECT (&thread->aspace, addr, vg_cap_page, true, NULL);
offset2 = (uintptr_t) addr2 & (PAGESIZE - 1);
count = wake (val1, object1, offset1,
@@ -2111,7 +2111,7 @@ server_loop (void)
case VG_messenger_id:
{
- if (object_type (target) != cap_messenger || ! target_writable)
+ if (object_type (target) != vg_cap_messenger || ! target_writable)
REPLY (EINVAL);
struct messenger *m = (struct messenger *) target;
diff --git a/viengoos/t-activity.c b/viengoos/t-activity.c
index f6a44fa..371bf81 100644
--- a/viengoos/t-activity.c
+++ b/viengoos/t-activity.c
@@ -17,21 +17,21 @@ static struct folio *folio;
static int object;
static struct as_allocate_pt_ret
-allocate_object (enum cap_type type, addr_t addr)
+allocate_object (enum vg_cap_type type, vg_addr_t addr)
{
- if (! folio || object == FOLIO_OBJECTS)
+ if (! folio || object == VG_FOLIO_OBJECTS)
{
- folio = folio_alloc (root_activity, FOLIO_POLICY_DEFAULT);
+ folio = folio_alloc (root_activity, VG_FOLIO_POLICY_DEFAULT);
object = 0;
}
struct as_allocate_pt_ret rt;
rt.cap = folio_object_alloc (root_activity, folio, object ++,
- type, OBJECT_POLICY_DEFAULT, 0);
+ type, VG_OBJECT_POLICY_DEFAULT, 0);
/* We don't need to set RT.STORAGE as as_insert doesn't require it
for the internal interface implementations. */
- rt.storage = ADDR (0, 0);
+ rt.storage = VG_ADDR (0, 0);
return rt;
}
@@ -49,12 +49,12 @@ test (void)
object_init ();
/* Create the root activity. */
- folio = folio_alloc (NULL, FOLIO_POLICY_DEFAULT);
+ folio = folio_alloc (NULL, VG_FOLIO_POLICY_DEFAULT);
if (! folio)
panic ("Failed to allocate storage for the initial task!");
- struct cap c = allocate_object (cap_activity_control, ADDR_VOID).cap;
- root_activity = (struct activity *) cap_to_object (root_activity, &c);
+ struct vg_cap c = allocate_object (vg_cap_activity_control, VG_ADDR_VOID).cap;
+ root_activity = (struct activity *) vg_cap_to_object (root_activity, &c);
folio_parent (root_activity, folio);
@@ -74,20 +74,20 @@ test (void)
for (i = 0; i < N; i ++)
{
/* Allocate a new activity. */
- struct cap cap;
+ struct vg_cap cap;
cap = folio_object_alloc (activity, folio, obj ++,
- cap_activity_control,
- OBJECT_POLICY_DEFAULT, 0);
- a[i].child = (struct activity *) cap_to_object (activity, &cap);
+ vg_cap_activity_control,
+ VG_OBJECT_POLICY_DEFAULT, 0);
+ a[i].child = (struct activity *) vg_cap_to_object (activity, &cap);
/* Allocate a folio against the activity and use it. */
- a[i].folio = folio_alloc (a[i].child, FOLIO_POLICY_DEFAULT);
+ a[i].folio = folio_alloc (a[i].child, VG_FOLIO_POLICY_DEFAULT);
assert (a[i].folio);
cap = folio_object_alloc (a[i].child, a[i].folio, 0,
- cap_page, OBJECT_POLICY_DEFAULT, 0);
- a[i].page = cap_to_object (activity, &cap);
- assert (object_type (a[i].page) == cap_page);
+ vg_cap_page, VG_OBJECT_POLICY_DEFAULT, 0);
+ a[i].page = vg_cap_to_object (activity, &cap);
+ assert (object_type (a[i].page) == vg_cap_page);
}
if (depth > 0)
@@ -99,14 +99,14 @@ test (void)
destroy the rest. */
for (i = 0; i < N / 2; i ++)
{
- struct cap cap = object_to_cap (a[i].page);
- struct object *o = cap_to_object (activity, &cap);
+ struct vg_cap cap = object_to_cap (a[i].page);
+ struct object *o = vg_cap_to_object (activity, &cap);
assert (o == a[i].page);
/* Destroy the activity. */
folio_free (activity, a[i].folio);
- o = cap_to_object (activity, &cap);
+ o = vg_cap_to_object (activity, &cap);
assert (! o);
}
}
@@ -114,7 +114,7 @@ test (void)
int i;
for (i = 0; i < 10; i ++)
{
- struct folio *f = folio_alloc (root_activity, FOLIO_POLICY_DEFAULT);
+ struct folio *f = folio_alloc (root_activity, VG_FOLIO_POLICY_DEFAULT);
assert (f);
try (root_activity, f, 4);
diff --git a/viengoos/t-as.c b/viengoos/t-as.c
index 75fbfaa..ffb061f 100644
--- a/viengoos/t-as.c
+++ b/viengoos/t-as.c
@@ -17,28 +17,28 @@ static struct folio *folio;
static int object;
static struct as_allocate_pt_ret
-allocate_object (enum cap_type type, addr_t addr)
+allocate_object (enum vg_cap_type type, vg_addr_t addr)
{
- if (! folio || object == FOLIO_OBJECTS)
+ if (! folio || object == VG_FOLIO_OBJECTS)
{
- folio = folio_alloc (root_activity, FOLIO_POLICY_DEFAULT);
+ folio = folio_alloc (root_activity, VG_FOLIO_POLICY_DEFAULT);
object = 0;
}
struct as_allocate_pt_ret rt;
rt.cap = folio_object_alloc (root_activity, folio, object ++,
- type, OBJECT_POLICY_DEFAULT, 0);
+ type, VG_OBJECT_POLICY_DEFAULT, 0);
/* We don't need to set RT.STORAGE as as_insert doesn't require it
for the internal interface implementations. */
- rt.storage = ADDR (0, 0);
+ rt.storage = VG_ADDR (0, 0);
return rt;
}
static struct as_allocate_pt_ret
-allocate_page_table (addr_t addr)
+allocate_page_table (vg_addr_t addr)
{
- return allocate_object (cap_cappage, addr);
+ return allocate_object (vg_cap_cappage, addr);
}
extern char _start;
@@ -46,17 +46,17 @@ extern char _end;
struct alloc
{
- addr_t addr;
+ vg_addr_t addr;
int type;
};
static void
try (struct alloc *allocs, int count, bool dump)
{
- struct cap aspace = { .type = cap_void };
- struct cap caps[count];
+ struct vg_cap aspace = { .type = vg_cap_void };
+ struct vg_cap caps[count];
- void do_check (struct cap *cap, bool writable, int i, bool present)
+ void do_check (struct vg_cap *cap, bool writable, int i, bool present)
{
if (present)
{
@@ -64,24 +64,24 @@ try (struct alloc *allocs, int count, bool dump)
assert (cap->type == caps[i].type);
- struct object *object = cap_to_object (root_activity, cap);
+ struct object *object = vg_cap_to_object (root_activity, cap);
struct object_desc *odesc = object_to_object_desc (object);
- if (caps[i].type != cap_void)
+ if (caps[i].type != vg_cap_void)
assert (odesc->oid == caps[i].oid);
- if (cap->type == cap_page)
+ if (cap->type == vg_cap_page)
assert (* (unsigned char *) object == i);
}
else
{
if (cap)
{
- struct object *object = cap_to_object (root_activity, cap);
+ struct object *object = vg_cap_to_object (root_activity, cap);
assert (! object);
/* This assertion relies on the fact that the
implementation will clear the type field on a failed
lookup. */
- assert (cap->type == cap_void);
+ assert (cap->type == vg_cap_void);
}
}
}
@@ -91,43 +91,43 @@ try (struct alloc *allocs, int count, bool dump)
{
switch (allocs[i].type)
{
- case cap_folio:
+ case vg_cap_folio:
caps[i] = object_to_cap ((struct object *)
folio_alloc (root_activity,
- FOLIO_POLICY_DEFAULT));
+ VG_FOLIO_POLICY_DEFAULT));
break;
- case cap_void:
- caps[i].type = cap_void;
+ case vg_cap_void:
+ caps[i].type = vg_cap_void;
break;
- case cap_page:
- case cap_rpage:
- case cap_cappage:
- case cap_rcappage:
+ case vg_cap_page:
+ case vg_cap_rpage:
+ case vg_cap_cappage:
+ case vg_cap_rcappage:
caps[i] = allocate_object (allocs[i].type, allocs[i].addr).cap;
break;
default:
assert (! " Bad type");
}
- struct object *object = cap_to_object (root_activity, &caps[i]);
- if (caps[i].type == cap_page)
+ struct object *object = vg_cap_to_object (root_activity, &caps[i]);
+ if (caps[i].type == vg_cap_page)
memset (object, i, PAGESIZE);
- as_insert_full (root_activity, ADDR_VOID, &aspace, allocs[i].addr,
- ADDR_VOID, ADDR_VOID, object_to_cap (object),
+ as_insert_full (root_activity, VG_ADDR_VOID, &aspace, allocs[i].addr,
+ VG_ADDR_VOID, VG_ADDR_VOID, object_to_cap (object),
allocate_page_table);
if (dump)
{
- printf ("After inserting: " ADDR_FMT "\n",
- ADDR_PRINTF (allocs[i].addr));
+ printf ("After inserting: " VG_ADDR_FMT "\n",
+ VG_ADDR_PRINTF (allocs[i].addr));
as_dump_from (root_activity, &aspace, NULL);
}
int j;
for (j = 0; j < count; j ++)
{
- struct cap *cap = NULL;
+ struct vg_cap *cap = NULL;
bool w;
as_slot_lookup_rel_use
@@ -138,7 +138,7 @@ try (struct alloc *allocs, int count, bool dump)
}));
do_check (cap, w, j, j <= i);
- struct cap c;
+ struct vg_cap c;
c = as_object_lookup_rel (root_activity,
&aspace, allocs[j].addr, -1,
&w);
@@ -150,7 +150,7 @@ try (struct alloc *allocs, int count, bool dump)
for (i = 0; i < count; i ++)
{
/* Make sure allocs[i].addr maps to PAGES[i]. */
- struct cap *cap = NULL;
+ struct vg_cap *cap = NULL;
bool w;
as_slot_lookup_rel_use (root_activity, &aspace, allocs[i].addr,
@@ -160,7 +160,7 @@ try (struct alloc *allocs, int count, bool dump)
}));
do_check (cap, w, i, true);
- struct cap c;
+ struct vg_cap c;
c = as_object_lookup_rel (root_activity,
&aspace, allocs[i].addr, -1,
&w);
@@ -169,13 +169,13 @@ try (struct alloc *allocs, int count, bool dump)
/* Void the capability in the returned capability slot. */
as_slot_lookup_rel_use (root_activity, &aspace, allocs[i].addr,
({
- slot->type = cap_void;
+ slot->type = vg_cap_void;
}));
/* The page should no longer be found. */
c = as_object_lookup_rel (root_activity, &aspace, allocs[i].addr, -1,
NULL);
- assert (c.type == cap_void);
+ assert (c.type == vg_cap_void);
/* Restore the capability slot. */
as_slot_lookup_rel_use (root_activity, &aspace, allocs[i].addr,
@@ -201,15 +201,15 @@ try (struct alloc *allocs, int count, bool dump)
/* Finally, free the object. */
switch (caps[i].type)
{
- case cap_folio:
+ case vg_cap_folio:
folio_free (root_activity,
- (struct folio *) cap_to_object (root_activity,
+ (struct folio *) vg_cap_to_object (root_activity,
&caps[i]));
break;
- case cap_void:
+ case vg_cap_void:
break;
default:
- object_free (root_activity, cap_to_object (root_activity, &caps[i]));
+ object_free (root_activity, vg_cap_to_object (root_activity, &caps[i]));
break;
}
@@ -225,7 +225,7 @@ try (struct alloc *allocs, int count, bool dump)
}));
assert (ret);
- struct cap c;
+ struct vg_cap c;
bool writable;
c = as_object_lookup_rel (root_activity,
&aspace, allocs[j].addr, -1, &writable);
@@ -245,12 +245,12 @@ test (void)
object_init ();
/* Create the root activity. */
- folio = folio_alloc (NULL, FOLIO_POLICY_DEFAULT);
+ folio = folio_alloc (NULL, VG_FOLIO_POLICY_DEFAULT);
if (! folio)
panic ("Failed to allocate storage for the initial task!");
- struct cap c = allocate_object (cap_activity_control, ADDR_VOID).cap;
- root_activity = (struct activity *) cap_to_object (root_activity, &c);
+ struct vg_cap c = allocate_object (vg_cap_activity_control, VG_ADDR_VOID).cap;
+ root_activity = (struct activity *) vg_cap_to_object (root_activity, &c);
folio_parent (root_activity, folio);
@@ -260,20 +260,20 @@ test (void)
/* We have an empty address space. When we use slot_lookup_rel
and specify that we don't care what type of capability we get,
we should get the capability slot--if the guard is right. */
- struct cap aspace = { type: cap_void };
+ struct vg_cap aspace = { type: vg_cap_void };
l4_word_t addr = 0xFA000;
bool ret = as_slot_lookup_rel_use (root_activity,
- &aspace, ADDR (addr, ADDR_BITS),
+ &aspace, VG_ADDR (addr, VG_ADDR_BITS),
({ }));
assert (! ret);
- /* Set the root to designate ADDR. */
- bool r = CAP_SET_GUARD (&aspace, addr, ADDR_BITS);
+ /* Set the root to designate VG_ADDR. */
+ bool r = VG_CAP_SET_GUARD (&aspace, addr, VG_ADDR_BITS);
assert (r);
ret = as_slot_lookup_rel_use (root_activity,
- &aspace, ADDR (addr, ADDR_BITS),
+ &aspace, VG_ADDR (addr, VG_ADDR_BITS),
({
assert (slot == &aspace);
assert (writable);
@@ -286,30 +286,30 @@ test (void)
printf ("Checking as_insert... ");
{
struct alloc allocs[] =
- { { ADDR (1 << (FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2),
- ADDR_BITS - FOLIO_OBJECTS_LOG2 - PAGESIZE_LOG2), cap_folio },
- { ADDR (0x100000003, 63), cap_page },
- { ADDR (0x100000004, 63), cap_page },
- { ADDR (0x1000 /* 4k. */, ADDR_BITS - PAGESIZE_LOG2), cap_page },
- { ADDR (0x00100000 /* 1MB */, ADDR_BITS - PAGESIZE_LOG2), cap_page },
- { ADDR (0x01000000 /* 16MB */, ADDR_BITS - PAGESIZE_LOG2), cap_page },
- { ADDR (0x10000000 /* 256MB */, ADDR_BITS - PAGESIZE_LOG2), cap_page },
- { ADDR (0x40000000 /* 1000MB */, ADDR_BITS - PAGESIZE_LOG2),
- cap_page },
- { ADDR (0x40000000 - 0x2000 /* 1000MB - 4k */,
- ADDR_BITS - PAGESIZE_LOG2),
- cap_page },
- { ADDR (0x40001000, ADDR_BITS - PAGESIZE_LOG2), cap_page },
- { ADDR (0x40003000, ADDR_BITS - PAGESIZE_LOG2), cap_page },
- { ADDR (0x40002000, ADDR_BITS - PAGESIZE_LOG2), cap_page },
- { ADDR (0x40009000, ADDR_BITS - PAGESIZE_LOG2), cap_page },
- { ADDR (0x40008000, ADDR_BITS - PAGESIZE_LOG2), cap_page },
- { ADDR (0x40007000, ADDR_BITS - PAGESIZE_LOG2), cap_page },
- { ADDR (0x40006000, ADDR_BITS - PAGESIZE_LOG2), cap_page },
- { ADDR (0x00101000 /* 1MB + 4k. */, ADDR_BITS - PAGESIZE_LOG2),
- cap_page },
- { ADDR (0x00FF0000 /* 1MB - 4k. */, ADDR_BITS - PAGESIZE_LOG2),
- cap_page },
+ { { VG_ADDR (1 << (VG_FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2),
+ VG_ADDR_BITS - VG_FOLIO_OBJECTS_LOG2 - PAGESIZE_LOG2), vg_cap_folio },
+ { VG_ADDR (0x100000003, 63), vg_cap_page },
+ { VG_ADDR (0x100000004, 63), vg_cap_page },
+ { VG_ADDR (0x1000 /* 4k. */, VG_ADDR_BITS - PAGESIZE_LOG2), vg_cap_page },
+ { VG_ADDR (0x00100000 /* 1MB */, VG_ADDR_BITS - PAGESIZE_LOG2), vg_cap_page },
+ { VG_ADDR (0x01000000 /* 16MB */, VG_ADDR_BITS - PAGESIZE_LOG2), vg_cap_page },
+ { VG_ADDR (0x10000000 /* 256MB */, VG_ADDR_BITS - PAGESIZE_LOG2), vg_cap_page },
+ { VG_ADDR (0x40000000 /* 1000MB */, VG_ADDR_BITS - PAGESIZE_LOG2),
+ vg_cap_page },
+ { VG_ADDR (0x40000000 - 0x2000 /* 1000MB - 4k */,
+ VG_ADDR_BITS - PAGESIZE_LOG2),
+ vg_cap_page },
+ { VG_ADDR (0x40001000, VG_ADDR_BITS - PAGESIZE_LOG2), vg_cap_page },
+ { VG_ADDR (0x40003000, VG_ADDR_BITS - PAGESIZE_LOG2), vg_cap_page },
+ { VG_ADDR (0x40002000, VG_ADDR_BITS - PAGESIZE_LOG2), vg_cap_page },
+ { VG_ADDR (0x40009000, VG_ADDR_BITS - PAGESIZE_LOG2), vg_cap_page },
+ { VG_ADDR (0x40008000, VG_ADDR_BITS - PAGESIZE_LOG2), vg_cap_page },
+ { VG_ADDR (0x40007000, VG_ADDR_BITS - PAGESIZE_LOG2), vg_cap_page },
+ { VG_ADDR (0x40006000, VG_ADDR_BITS - PAGESIZE_LOG2), vg_cap_page },
+ { VG_ADDR (0x00101000 /* 1MB + 4k. */, VG_ADDR_BITS - PAGESIZE_LOG2),
+ vg_cap_page },
+ { VG_ADDR (0x00FF0000 /* 1MB - 4k. */, VG_ADDR_BITS - PAGESIZE_LOG2),
+ vg_cap_page },
};
try (allocs, sizeof (allocs) / sizeof (allocs[0]), false);
@@ -317,14 +317,14 @@ test (void)
{
struct alloc allocs[] =
- { { ADDR (1, ADDR_BITS), cap_page },
- { ADDR (2, ADDR_BITS), cap_page },
- { ADDR (3, ADDR_BITS), cap_page },
- { ADDR (4, ADDR_BITS), cap_page },
- { ADDR (5, ADDR_BITS), cap_page },
- { ADDR (6, ADDR_BITS), cap_page },
- { ADDR (7, ADDR_BITS), cap_page },
- { ADDR (8, ADDR_BITS), cap_page }
+ { { VG_ADDR (1, VG_ADDR_BITS), vg_cap_page },
+ { VG_ADDR (2, VG_ADDR_BITS), vg_cap_page },
+ { VG_ADDR (3, VG_ADDR_BITS), vg_cap_page },
+ { VG_ADDR (4, VG_ADDR_BITS), vg_cap_page },
+ { VG_ADDR (5, VG_ADDR_BITS), vg_cap_page },
+ { VG_ADDR (6, VG_ADDR_BITS), vg_cap_page },
+ { VG_ADDR (7, VG_ADDR_BITS), vg_cap_page },
+ { VG_ADDR (8, VG_ADDR_BITS), vg_cap_page }
};
try (allocs, sizeof (allocs) / sizeof (allocs[0]), false);
@@ -333,8 +333,8 @@ test (void)
{
/* Induce a long different guard. */
struct alloc allocs[] =
- { { ADDR (0x100000000, 51), cap_cappage },
- { ADDR (0x80000, 44), cap_folio }
+ { { VG_ADDR (0x100000000, 51), vg_cap_cappage },
+ { VG_ADDR (0x80000, 44), vg_cap_folio }
};
try (allocs, sizeof (allocs) / sizeof (allocs[0]), false);
@@ -343,10 +343,10 @@ test (void)
{
/* Induce subpage allocation. */
struct alloc allocs[] =
- { { ADDR (0x80000, 44), cap_folio },
- { ADDR (0x1000, 51), cap_page },
- { ADDR (0x10000, 51), cap_page },
- { ADDR (0x2000, 51), cap_page }
+ { { VG_ADDR (0x80000, 44), vg_cap_folio },
+ { VG_ADDR (0x1000, 51), vg_cap_page },
+ { VG_ADDR (0x10000, 51), vg_cap_page },
+ { VG_ADDR (0x2000, 51), vg_cap_page }
};
try (allocs, sizeof (allocs) / sizeof (allocs[0]), false);
@@ -371,13 +371,13 @@ test (void)
would be used to point to the folio and the second to the
smaller cappage. */
struct alloc allocs[] =
- { { ADDR (0x80000, 51), cap_page },
- { ADDR (0x81000, 51), cap_page },
- { ADDR (0x82000, 51), cap_page },
- { ADDR (0x83000, 51), cap_page },
- { ADDR (0x84000, 51), cap_page },
- { ADDR (0x85000, 51), cap_page },
- { ADDR (0x0, 44), cap_folio }
+ { { VG_ADDR (0x80000, 51), vg_cap_page },
+ { VG_ADDR (0x81000, 51), vg_cap_page },
+ { VG_ADDR (0x82000, 51), vg_cap_page },
+ { VG_ADDR (0x83000, 51), vg_cap_page },
+ { VG_ADDR (0x84000, 51), vg_cap_page },
+ { VG_ADDR (0x85000, 51), vg_cap_page },
+ { VG_ADDR (0x0, 44), vg_cap_folio }
};
try (allocs, sizeof (allocs) / sizeof (allocs[0]), false);
diff --git a/viengoos/t-guard.c b/viengoos/t-guard.c
index 8ca55dc..ea0428d 100644
--- a/viengoos/t-guard.c
+++ b/viengoos/t-guard.c
@@ -18,17 +18,17 @@ test (void)
assert (gc.gbits == 8);
assert (gc.cappage_width == 2);
- /* Inserting a folio at /ADDR_BITS-19. */
+ /* Inserting a folio at /VG_ADDR_BITS-19. */
gc = as_compute_gbits_cappage (30, 11, 10);
assert (gc.gbits == 3);
assert (gc.cappage_width == 8);
- /* Inserting a page at /ADDR_BITS-12. */
+ /* Inserting a page at /VG_ADDR_BITS-12. */
gc = as_compute_gbits_cappage (30, 18, 10);
assert (gc.gbits == 3);
assert (gc.cappage_width == 8);
- /* Inserting a page at /ADDR_BITS-12. */
+ /* Inserting a page at /VG_ADDR_BITS-12. */
gc = as_compute_gbits_cappage (30, 18, 17);
assert (gc.gbits == 11);
assert (gc.cappage_width == 7);
diff --git a/viengoos/thread.c b/viengoos/thread.c
index 1889aa8..e17df62 100644
--- a/viengoos/thread.c
+++ b/viengoos/thread.c
@@ -252,11 +252,11 @@ control_to_string (l4_word_t control, char string[33])
error_t
thread_exregs (struct activity *principal,
struct thread *thread, uintptr_t control,
- struct cap aspace,
- uintptr_t flags, struct cap_properties properties,
- struct cap activity,
- struct cap utcb,
- struct cap exception_messenger,
+ struct vg_cap aspace,
+ uintptr_t flags, struct vg_cap_properties properties,
+ struct vg_cap activity,
+ struct vg_cap utcb,
+ struct vg_cap exception_messenger,
uintptr_t *sp, uintptr_t *ip,
uintptr_t *eflags, uintptr_t *user_handle)
{
@@ -271,25 +271,25 @@ thread_exregs (struct activity *principal,
}
if ((control & HURD_EXREGS_SET_ASPACE))
- cap_copy_x (principal,
- ADDR_VOID, &thread->aspace, ADDR_VOID,
- ADDR_VOID, aspace, ADDR_VOID,
+ vg_cap_copy_x (principal,
+ VG_ADDR_VOID, &thread->aspace, VG_ADDR_VOID,
+ VG_ADDR_VOID, aspace, VG_ADDR_VOID,
flags, properties);
if ((control & HURD_EXREGS_SET_ACTIVITY))
- cap_copy (principal,
- ADDR_VOID, &thread->activity, ADDR_VOID,
- ADDR_VOID, activity, ADDR_VOID);
+ vg_cap_copy (principal,
+ VG_ADDR_VOID, &thread->activity, VG_ADDR_VOID,
+ VG_ADDR_VOID, activity, VG_ADDR_VOID);
if ((control & HURD_EXREGS_SET_UTCB))
- cap_copy (principal,
- ADDR_VOID, &thread->utcb, ADDR_VOID,
- ADDR_VOID, utcb, ADDR_VOID);
+ vg_cap_copy (principal,
+ VG_ADDR_VOID, &thread->utcb, VG_ADDR_VOID,
+ VG_ADDR_VOID, utcb, VG_ADDR_VOID);
if ((control & HURD_EXREGS_SET_EXCEPTION_MESSENGER))
- cap_copy (principal,
- ADDR_VOID, &thread->exception_messenger, ADDR_VOID,
- ADDR_VOID, exception_messenger, ADDR_VOID);
+ vg_cap_copy (principal,
+ VG_ADDR_VOID, &thread->exception_messenger, VG_ADDR_VOID,
+ VG_ADDR_VOID, exception_messenger, VG_ADDR_VOID);
if (thread->commissioned)
{
@@ -357,7 +357,7 @@ thread_exregs (struct activity *principal,
if ((control & HURD_EXREGS_START) == HURD_EXREGS_START)
{
- struct object *a = cap_to_object (principal, &thread->activity);
+ struct object *a = vg_cap_to_object (principal, &thread->activity);
if (! a)
{
debug (0, "Thread not schedulable: no activity");
@@ -365,10 +365,10 @@ thread_exregs (struct activity *principal,
}
struct object_desc *desc = object_to_object_desc (a);
- if (! cap_types_compatible (desc->type, cap_activity))
+ if (! vg_cap_types_compatible (desc->type, vg_cap_activity))
{
debug (0, "Thread not schedulable: activity slot contains a %s",
- cap_type_string (desc->type));
+ vg_cap_type_string (desc->type));
return 0;
}
@@ -436,7 +436,7 @@ thread_activate (struct activity *activity,
bool may_block)
{
assert (messenger);
- assert (object_type ((struct object *) messenger) == cap_messenger);
+ assert (object_type ((struct object *) messenger) == vg_cap_messenger);
uintptr_t ip = 0;
@@ -450,7 +450,7 @@ thread_activate (struct activity *activity,
}
struct vg_utcb *utcb
- = (struct vg_utcb *) cap_to_object (activity, &thread->utcb);
+ = (struct vg_utcb *) vg_cap_to_object (activity, &thread->utcb);
if (! utcb)
{
#ifndef NDEBUG
@@ -465,10 +465,10 @@ thread_activate (struct activity *activity,
return false;
}
- if (object_type ((struct object *) utcb) != cap_page)
+ if (object_type ((struct object *) utcb) != vg_cap_page)
{
debug (0, "Malformed thread: utcb slot contains a %s, not a page",
- cap_type_string (object_type ((struct object *) utcb)));
+ vg_cap_type_string (object_type ((struct object *) utcb)));
return false;
}
@@ -490,7 +490,7 @@ thread_activate (struct activity *activity,
}
debug (5, "Activating %x (ip: %p; sp: %p)",
- thread->tid, ip, sp);
+ thread->tid, (void *) ip, (void *) sp);
utcb->protected_payload = messenger->protected_payload;
utcb->messenger_id = messenger->id;
@@ -500,7 +500,7 @@ thread_activate (struct activity *activity,
memcpy (utcb->inline_words, messenger->inline_words,
messenger->inline_word_count * sizeof (uintptr_t));
memcpy (utcb->inline_caps, messenger->inline_caps,
- messenger->inline_cap_count * sizeof (addr_t));
+ messenger->inline_cap_count * sizeof (vg_addr_t));
utcb->inline_word_count = messenger->inline_word_count;
utcb->inline_cap_count = messenger->inline_cap_count;
}
@@ -594,16 +594,16 @@ thread_raise_exception (struct activity *activity,
struct vg_message *message)
{
struct messenger *handler
- = (struct messenger *) cap_to_object (activity,
+ = (struct messenger *) vg_cap_to_object (activity,
&thread->exception_messenger);
if (! handler)
{
backtrace_print ();
debug (0, "Thread %x has no exception handler.", thread->tid);
}
- else if (object_type ((struct object *) handler) != cap_messenger)
+ else if (object_type ((struct object *) handler) != vg_cap_messenger)
debug (0, "%s is not a valid exception handler.",
- cap_type_string (object_type ((struct object *) handler)));
+ vg_cap_type_string (object_type ((struct object *) handler)));
else
{
if (! messenger_message_load (activity, handler, message))
@@ -617,7 +617,7 @@ thread_deliver_pending (struct activity *activity,
struct thread *thread)
{
struct vg_utcb *utcb
- = (struct vg_utcb *) cap_to_object (activity, &thread->utcb);
+ = (struct vg_utcb *) vg_cap_to_object (activity, &thread->utcb);
if (! utcb)
{
debug (0, "Malformed thread (%x): no utcb",
@@ -625,10 +625,10 @@ thread_deliver_pending (struct activity *activity,
return;
}
- if (object_type ((struct object *) utcb) != cap_page)
+ if (object_type ((struct object *) utcb) != vg_cap_page)
{
debug (0, "Malformed thread: utcb slot contains a %s, not a page",
- cap_type_string (object_type ((struct object *) utcb)));
+ vg_cap_type_string (object_type ((struct object *) utcb)));
return;
}
diff --git a/viengoos/thread.h b/viengoos/thread.h
index f869d0b..912137a 100644
--- a/viengoos/thread.h
+++ b/viengoos/thread.h
@@ -34,18 +34,18 @@ struct thread
/* User accessible fields. */
/* Address space. */
- struct cap aspace;
+ struct vg_cap aspace;
/* The current associated activity. (Not the activity out of which
this thread's storage is allocated!) */
- struct cap activity;
+ struct vg_cap activity;
/* A capability designating a messenger to which to deliver
exceptions. */
- struct cap exception_messenger;
+ struct vg_cap exception_messenger;
/* A capability the page that contains the thread's UTCB. */
- struct cap utcb;
+ struct vg_cap utcb;
/* Non-user-accessible fields. */
@@ -95,11 +95,11 @@ extern void thread_decommission (struct thread *thread);
not set the pager. */
extern error_t thread_exregs (struct activity *principal,
struct thread *thread, uintptr_t control,
- struct cap aspace,
- uintptr_t flags, struct cap_properties properties,
- struct cap activity,
- struct cap utcb,
- struct cap exception_messenger,
+ struct vg_cap aspace,
+ uintptr_t flags, struct vg_cap_properties properties,
+ struct vg_cap activity,
+ struct vg_cap utcb,
+ struct vg_cap exception_messenger,
uintptr_t *sp, uintptr_t *ip,
uintptr_t *eflags, uintptr_t *user_handle);
diff --git a/viengoos/viengoos.c b/viengoos/viengoos.c
index ccab7a5..837c820 100644
--- a/viengoos/viengoos.c
+++ b/viengoos/viengoos.c
@@ -219,7 +219,7 @@ system_task_load (void)
{
const char *const argv[] = { boot_modules[0].command_line, NULL };
struct thread *thread;
- thread = process_spawn (ADDR_VOID,
+ thread = process_spawn (VG_ADDR_VOID,
(void *) boot_modules[0].start,
(void *) boot_modules[0].end,
argv, NULL,