summaryrefslogtreecommitdiff
path: root/libhurd-mm
diff options
context:
space:
mode:
authorNeal H. Walfield <neal@gnu.org>2008-12-17 18:45:14 +0100
committerNeal H. Walfield <neal@gnu.org>2008-12-17 18:45:14 +0100
commit4b34130b861911b2fbc62e706239d55ce817d203 (patch)
tree0fb7f4a87b0813e7884b86b7a0a7390265083a2d /libhurd-mm
parenta7416b7c63f4954ff78eecf31e5146cb86cda6a6 (diff)
Add a "vg_" to public viengoos identifiers.
2008-12-17 Neal H. Walfield <neal@gnu.org> * viengoos/activity.h: Add a "vg_" to public viengoos identifiers. Update users. * viengoos/addr-trans.h: Likewise. * viengoos/addr.h: Likewise. * viengoos/cap.h: Likewise. * viengoos/folio.h: Likewise. * viengoos/futex.h: Likewise. * viengoos/rpc.h: Likewise. * viengoos/thread.h: Likewise.
Diffstat (limited to 'libhurd-mm')
-rw-r--r--libhurd-mm/anonymous.c90
-rw-r--r--libhurd-mm/anonymous.h10
-rw-r--r--libhurd-mm/as-build-custom.c14
-rw-r--r--libhurd-mm/as-build.c296
-rw-r--r--libhurd-mm/as-compute-gbits.h34
-rw-r--r--libhurd-mm/as-dump.c82
-rw-r--r--libhurd-mm/as-lookup.c190
-rw-r--r--libhurd-mm/as.c336
-rw-r--r--libhurd-mm/as.h184
-rw-r--r--libhurd-mm/capalloc.c66
-rw-r--r--libhurd-mm/capalloc.h4
-rw-r--r--libhurd-mm/exceptions.c122
-rw-r--r--libhurd-mm/map.c34
-rw-r--r--libhurd-mm/map.h6
-rw-r--r--libhurd-mm/message-buffer.c76
-rw-r--r--libhurd-mm/message-buffer.h6
-rw-r--r--libhurd-mm/mm-init.c18
-rw-r--r--libhurd-mm/mm.h2
-rw-r--r--libhurd-mm/mmap.c4
-rw-r--r--libhurd-mm/mprotect.c22
-rw-r--r--libhurd-mm/pager.h4
-rw-r--r--libhurd-mm/storage.c216
-rw-r--r--libhurd-mm/storage.h26
23 files changed, 921 insertions, 921 deletions
diff --git a/libhurd-mm/anonymous.c b/libhurd-mm/anonymous.c
index c6f0bcb..8bb91d3 100644
--- a/libhurd-mm/anonymous.c
+++ b/libhurd-mm/anonymous.c
@@ -46,7 +46,7 @@ struct storage_desc
/* Offset from start of pager. */
uintptr_t offset;
/* The allocated storage. */
- addr_t storage;
+ vg_addr_t storage;
};
static int
@@ -83,12 +83,12 @@ slab_alloc (void *hook, size_t size, void **ptr)
{
assert (size == PAGESIZE);
- struct storage storage = storage_alloc (meta_data_activity, cap_page,
+ struct storage storage = storage_alloc (meta_data_activity, vg_cap_page,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Out of space.");
- *ptr = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ *ptr = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr, 0, PAGESIZE_LOG2));
return 0;
}
@@ -98,7 +98,7 @@ slab_dealloc (void *hook, void *buffer, size_t size)
{
assert (size == PAGESIZE);
- addr_t addr = addr_chop (PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
+ vg_addr_t addr = vg_addr_chop (VG_PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
storage_free (addr, false);
return 0;
@@ -133,7 +133,7 @@ static struct hurd_slab_space anonymous_pager_slab
static bool
fault (struct pager *pager, uintptr_t offset, int count, bool read_only,
- uintptr_t fault_addr, uintptr_t ip, struct activation_fault_info info)
+ uintptr_t fault_addr, uintptr_t ip, struct vg_activation_fault_info info)
{
struct anonymous_pager *anon = (struct anonymous_pager *) pager;
assert (anon->magic == ANONYMOUS_MAGIC);
@@ -141,8 +141,8 @@ fault (struct pager *pager, uintptr_t offset, int count, bool read_only,
debug (5, "%p: fault at %p, spans %d pg (%d kb); "
"pager: %p-%p (%d pages; %d kb), offset: %x",
anon, (void *) fault_addr, count, count * PAGESIZE / 1024,
- (void *) (uintptr_t) addr_prefix (anon->map_area),
- (void *) (uintptr_t) addr_prefix (anon->map_area) + anon->pager.length,
+ (void *) (uintptr_t) vg_addr_prefix (anon->map_area),
+ (void *) (uintptr_t) vg_addr_prefix (anon->map_area) + anon->pager.length,
anon->pager.length / PAGESIZE, anon->pager.length / 1024,
offset);
@@ -216,10 +216,10 @@ fault (struct pager *pager, uintptr_t offset, int count, bool read_only,
"%x + %d pages <= %x",
offset, count, pager->length);
- debug (5, "Faulting %p - %p (%d pages; %d kb); pager at " ADDR_FMT "+%d",
+ debug (5, "Faulting %p - %p (%d pages; %d kb); pager at " VG_ADDR_FMT "+%d",
(void *) fault_addr, (void *) fault_addr + count * PAGE_SIZE,
count, count * PAGESIZE / 1024,
- ADDR_PRINTF (anon->map_area), offset);
+ VG_ADDR_PRINTF (anon->map_area), offset);
}
pages = __builtin_alloca (sizeof (void *) * count);
@@ -253,13 +253,13 @@ fault (struct pager *pager, uintptr_t offset, int count, bool read_only,
storage address as object_discarded_clear also
returns a mapping and we are likely to access the
data at the fault address. */
- err = rm_object_discarded_clear (ADDR_VOID, ADDR_VOID,
+ err = rm_object_discarded_clear (VG_ADDR_VOID, VG_ADDR_VOID,
storage_desc->storage);
assertx (err == 0, "%d", err);
- debug (5, "Clearing discarded bit for %p / " ADDR_FMT,
+ debug (5, "Clearing discarded bit for %p / " VG_ADDR_FMT,
(void *) fault_addr + i * PAGESIZE,
- ADDR_PRINTF (storage_desc->storage));
+ VG_ADDR_PRINTF (storage_desc->storage));
}
else if (! storage_desc)
/* Seems we have not yet allocated a page. */
@@ -271,9 +271,9 @@ fault (struct pager *pager, uintptr_t offset, int count, bool read_only,
struct storage storage
= storage_alloc (anon->activity,
- cap_page, STORAGE_UNKNOWN, anon->policy,
- ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ vg_cap_page, STORAGE_UNKNOWN, anon->policy,
+ VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Out of memory.");
storage_desc->storage = storage.addr;
@@ -286,32 +286,32 @@ fault (struct pager *pager, uintptr_t offset, int count, bool read_only,
"Fault address: %p, offset: %x",
(void *) fault_addr + i * PAGESIZE, o);
- debug (5, "Allocating storage for %p at " ADDR_FMT,
+ debug (5, "Allocating storage for %p at " VG_ADDR_FMT,
(void *) fault_addr + i * PAGESIZE,
- ADDR_PRINTF (storage_desc->storage));
+ VG_ADDR_PRINTF (storage_desc->storage));
profile_region ("install");
/* We generate a fake shadow cap for the storage as we know
its contents (It is a page that is in a folio with the
policy ANON->POLICY.) */
- struct cap page;
+ struct vg_cap page;
memset (&page, 0, sizeof (page));
- page.type = cap_page;
- CAP_POLICY_SET (&page, anon->policy);
+ page.type = vg_cap_page;
+ VG_CAP_POLICY_SET (&page, anon->policy);
- addr_t addr = addr_chop (PTR_TO_ADDR (fault_addr + i * PAGESIZE),
+ vg_addr_t addr = vg_addr_chop (VG_PTR_TO_ADDR (fault_addr + i * PAGESIZE),
PAGESIZE_LOG2);
as_ensure_use
(addr,
({
bool ret;
- ret = cap_copy_x (anon->activity,
- ADDR_VOID, slot, addr,
- ADDR_VOID, page, storage_desc->storage,
- read_only ? CAP_COPY_WEAKEN : 0,
- CAP_PROPERTIES_VOID);
+ ret = vg_cap_copy_x (anon->activity,
+ VG_ADDR_VOID, slot, addr,
+ VG_ADDR_VOID, page, storage_desc->storage,
+ read_only ? VG_CAP_COPY_WEAKEN : 0,
+ VG_CAP_PROPERTIES_VOID);
assert (ret);
}));
@@ -319,7 +319,7 @@ fault (struct pager *pager, uintptr_t offset, int count, bool read_only,
}
if (! recursive || ! (anon->flags & ANONYMOUS_NO_RECURSIVE))
- pages[i] = ADDR_TO_PTR (addr_extend (storage_desc->storage,
+ pages[i] = VG_ADDR_TO_PTR (vg_addr_extend (storage_desc->storage,
0, PAGESIZE_LOG2));
}
@@ -327,7 +327,7 @@ fault (struct pager *pager, uintptr_t offset, int count, bool read_only,
int faulted;
for (i = 0; i < count; i += faulted)
{
- error_t err = rm_fault (ADDR_VOID, fault_addr + i * PAGESIZE,
+ error_t err = rm_fault (VG_ADDR_VOID, fault_addr + i * PAGESIZE,
count - i, &faulted);
if (err || faulted == 0)
break;
@@ -471,7 +471,7 @@ mdestroy (struct map *map)
debug (5, "Freed %d pages", count);
/* Free the map area. Should we also free the staging area? */
- as_free (PTR_TO_ADDR (map->region.start), map->region.length);
+ as_free (VG_PTR_TO_ADDR (map->region.start), map->region.length);
}
static void
@@ -490,7 +490,7 @@ destroy (struct pager *pager)
/* Free the staging area. */
{
assert ((anon->flags & ANONYMOUS_STAGING_AREA));
- as_free (addr_chop (PTR_TO_ADDR (anon->staging_area), PAGESIZE_LOG2),
+ as_free (vg_addr_chop (VG_PTR_TO_ADDR (anon->staging_area), PAGESIZE_LOG2),
anon->pager.length / PAGESIZE);
}
else
@@ -556,14 +556,14 @@ advise (struct pager *pager,
case pager_advice_normal:
{
- struct activation_fault_info info;
+ struct vg_activation_fault_info info;
info.discarded = anon->policy.discardable;
- info.type = cap_page;
+ info.type = vg_cap_page;
/* XXX: What should we set info.access to? */
info.access = MAP_ACCESS_ALL;
bool r = fault (pager, start, length / PAGESIZE, false,
- addr_prefix (anon->map_area) + start, 0, info);
+ vg_addr_prefix (anon->map_area) + start, 0, info);
if (! r)
debug (5, "Did not resolve fault for anonymous pager");
@@ -577,7 +577,7 @@ advise (struct pager *pager,
}
struct anonymous_pager *
-anonymous_pager_alloc (addr_t activity,
+anonymous_pager_alloc (vg_addr_t activity,
void *hint, uintptr_t length, enum map_access access,
struct object_policy policy,
uintptr_t flags, anonymous_pager_fill_t fill,
@@ -640,7 +640,7 @@ anonymous_pager_alloc (addr_t activity,
may not cover all of the requested region if the starting
address is not aligned on a 1 << WIDTH boundary. Consider
a requested address of 12k and a size of 8k. In this case,
- WIDTH is 13 and addr_chop (hint, WIDTH) => 8k thus yielding
+ WIDTH is 13 and vg_addr_chop (hint, WIDTH) => 8k thus yielding
the region 8-16k, yet, the requested region is 12k-20k! In
such cases, we just need to double the width to cover the
whole region. */
@@ -659,7 +659,7 @@ anonymous_pager_alloc (addr_t activity,
{
/* NB: this may round HINT down if we need a power-of-2 staging
area! */
- anon->map_area = addr_chop (PTR_TO_ADDR (hint), width);
+ anon->map_area = vg_addr_chop (VG_PTR_TO_ADDR (hint), width);
bool r = as_alloc_at (anon->map_area, count);
if (! r)
@@ -667,10 +667,10 @@ anonymous_pager_alloc (addr_t activity,
{
if ((flags & ANONYMOUS_FIXED))
{
- debug (0, "(%p, %x (%p)): Specified range " ADDR_FMT "+%d "
+ debug (0, "(%p, %x (%p)): Specified range " VG_ADDR_FMT "+%d "
"in use and ANONYMOUS_FIXED specified",
hint, length, hint + length - 1,
- ADDR_PRINTF (anon->map_area), count);
+ VG_ADDR_PRINTF (anon->map_area), count);
goto error_with_buffer;
}
}
@@ -683,14 +683,14 @@ anonymous_pager_alloc (addr_t activity,
if (! alloced)
{
anon->map_area = as_alloc (width, count, true);
- if (ADDR_IS_VOID (anon->map_area))
+ if (VG_ADDR_IS_VOID (anon->map_area))
{
debug (0, "(%p, %x (%p)): No VA available",
hint, length, hint + length - 1);
goto error_with_buffer;
}
- *addr_out = ADDR_TO_PTR (addr_extend (anon->map_area, 0, width));
+ *addr_out = VG_ADDR_TO_PTR (vg_addr_extend (anon->map_area, 0, width));
}
anon->map_area_count = count;
@@ -699,11 +699,11 @@ anonymous_pager_alloc (addr_t activity,
if ((flags & ANONYMOUS_STAGING_AREA))
/* We need a staging area. */
{
- addr_t staging_area = as_alloc (PAGESIZE_LOG2, length / PAGESIZE, true);
- if (ADDR_IS_VOID (staging_area))
+ vg_addr_t staging_area = as_alloc (PAGESIZE_LOG2, length / PAGESIZE, true);
+ if (VG_ADDR_IS_VOID (staging_area))
goto error_with_map_area;
- anon->staging_area = ADDR_TO_PTR (addr_extend (staging_area,
+ anon->staging_area = VG_ADDR_TO_PTR (vg_addr_extend (staging_area,
0, PAGESIZE_LOG2));
}
diff --git a/libhurd-mm/anonymous.h b/libhurd-mm/anonymous.h
index fe5491e..aac7f7b 100644
--- a/libhurd-mm/anonymous.h
+++ b/libhurd-mm/anonymous.h
@@ -80,7 +80,7 @@ enum
typedef bool (*anonymous_pager_fill_t) (struct anonymous_pager *anon,
uintptr_t offset, uintptr_t count,
void *pages[],
- struct activation_fault_info info);
+ struct vg_activation_fault_info info);
#define ANONYMOUS_MAGIC 0xa707a707
@@ -103,7 +103,7 @@ struct anonymous_pager
/* The user's window onto the pager. */
- addr_t map_area;
+ vg_addr_t map_area;
int map_area_count;
ss_mutex_t lock;
@@ -115,7 +115,7 @@ struct anonymous_pager
/* Activity against which storage should be allocated. */
- addr_t activity;
+ vg_addr_t activity;
/* The policy to use when allocating memory. */
struct object_policy policy;
@@ -137,7 +137,7 @@ struct anonymous_pager
ADDR_HINT indicates the preferred starting address. Unless
ANONYMOUS_FIXED is included in FLAGS, the implementation may choose
another address. (The region will be allocated using as_alloc.)
- Both ADDR and LENGTH must be a multiple of the base page size. If
+ Both ADDR_HINT and LENGTH must be a multiple of the base page size. If
the specified region overlaps with an existing pager, EEXIST is
returned. The chosen start address is returned in *ADDR_OUT.
@@ -164,7 +164,7 @@ struct anonymous_pager
up. When the fill function is invoked, access to the main region
is disabled; any access is blocked until the fill function
returns. */
-extern struct anonymous_pager *anonymous_pager_alloc (addr_t activity,
+extern struct anonymous_pager *anonymous_pager_alloc (vg_addr_t activity,
void *addr_hint,
uintptr_t length,
enum map_access access,
diff --git a/libhurd-mm/as-build-custom.c b/libhurd-mm/as-build-custom.c
index 5c200ce..0acc415 100644
--- a/libhurd-mm/as-build-custom.c
+++ b/libhurd-mm/as-build-custom.c
@@ -28,9 +28,9 @@
#include "as-build.c"
-struct cap *
+struct vg_cap *
as_ensure_full_custom (activity_t activity,
- addr_t as_root_addr, struct cap *root, addr_t addr,
+ vg_addr_t as_root_addr, struct vg_cap *root, vg_addr_t addr,
as_allocate_page_table_t as_allocate_page_table,
as_object_index_t object_index)
{
@@ -39,18 +39,18 @@ as_ensure_full_custom (activity_t activity,
true);
}
-struct cap *
+struct vg_cap *
as_insert_custom (activity_t activity,
- addr_t as_root_addr, struct cap *root, addr_t addr,
- addr_t entry_as, struct cap entry, addr_t entry_addr,
+ vg_addr_t as_root_addr, struct vg_cap *root, vg_addr_t addr,
+ vg_addr_t entry_as, struct vg_cap entry, vg_addr_t entry_addr,
as_allocate_page_table_t as_allocate_page_table,
as_object_index_t object_index)
{
- struct cap *slot = as_build_custom (activity, as_root_addr, root, addr,
+ struct vg_cap *slot = as_build_custom (activity, as_root_addr, root, addr,
as_allocate_page_table,
object_index, false);
assert (slot);
- cap_copy (activity, as_root_addr, slot, addr, entry_as, entry, entry_addr);
+ vg_cap_copy (activity, as_root_addr, slot, addr, entry_as, entry, entry_addr);
return slot;
}
diff --git a/libhurd-mm/as-build.c b/libhurd-mm/as-build.c
index b2266cb..25ba5b9 100644
--- a/libhurd-mm/as-build.c
+++ b/libhurd-mm/as-build.c
@@ -85,7 +85,7 @@ struct trace_buffer as_trace = TRACE_BUFFER_INIT ("as_trace", 0,
#ifdef RM_INTERN
# define AS_DUMP as_dump_from (activity, as_root, __func__)
#else
-# define AS_DUMP rm_as_dump (ADDR_VOID, as_root_addr)
+# define AS_DUMP rm_as_dump (VG_ADDR_VOID, as_root_addr)
#endif
/* The following macros allow providing specialized address-space
@@ -120,49 +120,49 @@ struct trace_buffer as_trace = TRACE_BUFFER_INIT ("as_trace", 0,
location of the idx'th capability slot. If the capability is
implicit (in the case of a folio), return a fabricated capability
in *FAKE_SLOT and return FAKE_SLOT. Return NULL on failure. */
-static inline struct cap *
-do_index (activity_t activity, struct cap *pte, addr_t pt_addr, int idx,
- struct cap *fake_slot)
+static inline struct vg_cap *
+do_index (activity_t activity, struct vg_cap *pte, vg_addr_t pt_addr, int idx,
+ struct vg_cap *fake_slot)
{
- assert (pte->type == cap_cappage || pte->type == cap_rcappage
- || pte->type == cap_folio
- || pte->type == cap_thread
- || pte->type == cap_messenger || pte->type == cap_rmessenger);
+ assert (pte->type == vg_cap_cappage || pte->type == vg_cap_rcappage
+ || pte->type == vg_cap_folio
+ || pte->type == vg_cap_thread
+ || pte->type == vg_cap_messenger || pte->type == vg_cap_rmessenger);
/* Load the referenced object. */
- struct object *pt = cap_to_object (activity, pte);
+ struct object *pt = vg_cap_to_object (activity, pte);
if (! pt)
/* PTE's type was not void but its designation was invalid. This
can only happen if we inserted an object and subsequently
destroyed it. */
{
- /* The type should now have been set to cap_void. */
- assert (pte->type == cap_void);
- PANIC ("No object at " ADDR_FMT, ADDR_PRINTF (pt_addr));
+ /* The type should now have been set to vg_cap_void. */
+ assert (pte->type == vg_cap_void);
+ PANIC ("No object at " VG_ADDR_FMT, VG_ADDR_PRINTF (pt_addr));
}
switch (pte->type)
{
- case cap_cappage:
- case cap_rcappage:
- return &pt->caps[CAP_SUBPAGE_OFFSET (pte) + idx];
+ case vg_cap_cappage:
+ case vg_cap_rcappage:
+ return &pt->caps[VG_CAP_SUBPAGE_OFFSET (pte) + idx];
- case cap_folio:;
+ case vg_cap_folio:;
struct folio *folio = (struct folio *) pt;
- if (folio_object_type (folio, idx) == cap_void)
- PANIC ("Can't use void object at " ADDR_FMT " for address translation",
- ADDR_PRINTF (pt_addr));
+ if (vg_folio_object_type (folio, idx) == vg_cap_void)
+ PANIC ("Can't use void object at " VG_ADDR_FMT " for address translation",
+ VG_ADDR_PRINTF (pt_addr));
- *fake_slot = folio_object_cap (folio, idx);
+ *fake_slot = vg_folio_object_cap (folio, idx);
return fake_slot;
- case cap_thread:
- assert (idx < THREAD_SLOTS);
+ case vg_cap_thread:
+ assert (idx < VG_THREAD_SLOTS);
return &pt->caps[idx];
- case cap_messenger:
+ case vg_cap_messenger:
/* Note: rmessengers don't expose their capability slots. */
assert (idx < VG_MESSENGER_SLOTS);
return &pt->caps[idx];
@@ -186,53 +186,53 @@ do_index (activity_t activity, struct cap *pte, addr_t pt_addr, int idx,
If MAY_OVERWRITE is true, the function may overwrite an existing
capability. Otherwise, only capability slots containing a void
capability are used. */
-struct cap *
+struct vg_cap *
ID (as_build) (activity_t activity,
- addr_t as_root_addr, struct cap *as_root, addr_t addr,
+ vg_addr_t as_root_addr, struct vg_cap *as_root, vg_addr_t addr,
as_allocate_page_table_t allocate_page_table
OBJECT_INDEX_PARAM,
bool may_overwrite)
{
- struct cap *pte = as_root;
+ struct vg_cap *pte = as_root;
- DEBUG (5, DEBUG_BOLD ("Ensuring slot at " ADDR_FMT) " may overwrite: %d",
- ADDR_PRINTF (addr), may_overwrite);
- assert (! ADDR_IS_VOID (addr));
+ DEBUG (5, DEBUG_BOLD ("Ensuring slot at " VG_ADDR_FMT) " may overwrite: %d",
+ VG_ADDR_PRINTF (addr), may_overwrite);
+ assert (! VG_ADDR_IS_VOID (addr));
/* The number of bits to translate. */
- int remaining = addr_depth (addr);
+ int remaining = vg_addr_depth (addr);
/* The REMAINING bits to translates are in the REMAINING most significant
bits of PREFIX. Here it is more convenient to have them in the
lower bits. */
- uint64_t prefix = addr_prefix (addr) >> (ADDR_BITS - remaining);
+ uint64_t prefix = vg_addr_prefix (addr) >> (VG_ADDR_BITS - remaining);
/* Folios are not made up of capability slots and cannot be written
to. When traversing a folio, we manufacture a capability to used
object in FAKE_SLOT. If ADDR ends up designating such a
capability, we fail. */
- struct cap fake_slot;
+ struct vg_cap fake_slot;
do
{
- addr_t pte_addr = addr_chop (addr, remaining);
+ vg_addr_t pte_addr = vg_addr_chop (addr, remaining);
- DEBUG (5, "Cap at " ADDR_FMT ": " CAP_FMT " -> " ADDR_FMT " (%p); "
+ DEBUG (5, "Cap at " VG_ADDR_FMT ": " VG_CAP_FMT " -> " VG_ADDR_FMT " (%p); "
"remaining: %d",
- ADDR_PRINTF (pte_addr),
- CAP_PRINTF (pte),
- ADDR_PRINTF (addr_chop (addr,
- remaining - CAP_GUARD_BITS (pte))),
+ VG_ADDR_PRINTF (pte_addr),
+ VG_CAP_PRINTF (pte),
+ VG_ADDR_PRINTF (vg_addr_chop (addr,
+ remaining - VG_CAP_GUARD_BITS (pte))),
#ifdef RM_INTERN
NULL,
#else
- cap_get_shadow (pte),
+ vg_cap_get_shadow (pte),
#endif
remaining);
AS_CHECK_SHADOW (as_root_addr, pte_addr, pte, {});
- uint64_t pte_guard = CAP_GUARD (pte);
- int pte_gbits = CAP_GUARD_BITS (pte);
+ uint64_t pte_guard = VG_CAP_GUARD (pte);
+ int pte_gbits = VG_CAP_GUARD_BITS (pte);
uint64_t addr_guard;
if (remaining >= pte_gbits)
@@ -254,14 +254,14 @@ ID (as_build) (activity_t activity,
the other context may only use a slot if it owns the
area. */
break;
- else if ((pte->type == cap_cappage || pte->type == cap_rcappage
- || pte->type == cap_folio
- || pte->type == cap_thread
- || pte->type == cap_messenger)
+ else if ((pte->type == vg_cap_cappage || pte->type == vg_cap_rcappage
+ || pte->type == vg_cap_folio
+ || pte->type == vg_cap_thread
+ || pte->type == vg_cap_messenger)
&& remaining >= pte_gbits
&& pte_guard == addr_guard)
/* PTE's (possibly zero-width) guard matches and the
- designated object translates ADDR. We index the object
+ designated object translates VG_ADDR. We index the object
below. */
{
remaining -= pte_gbits;
@@ -352,18 +352,18 @@ ID (as_build) (activity_t activity,
length of the pte in the new cappage. */
int gbits;
- bool need_pivot = ! (pte->type == cap_void && pte_gbits == 0);
+ bool need_pivot = ! (pte->type == vg_cap_void && pte_gbits == 0);
if (! need_pivot)
/* The slot is available. */
{
int space = vg_msb64 (extract_bits64 (prefix, 0, remaining));
- if (space <= CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS)
+ if (space <= VG_CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS)
/* The remaining bits to translate fit in the
guard, we are done. */
break;
/* The guard value requires more than
- CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS bits. We need to
+ VG_CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS bits. We need to
insert a page table. */
gbits = tilobject = remaining;
}
@@ -386,23 +386,23 @@ ID (as_build) (activity_t activity,
area. */
int firstset = vg_msb64 (extract_bits64_inv (prefix,
remaining - 1, gbits));
- if (firstset > CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS)
+ if (firstset > VG_CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS)
/* FIRSTSET is the first (most significant) non-zero guard
bit. GBITS - FIRSTSET are the number of zero bits
before the most significant non-zero bit. We can
include all of the initial zero bits plus up to the
- next CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS bits. */
- gbits -= firstset - CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS;
+ next VG_CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS bits. */
+ gbits -= firstset - VG_CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS;
/* We want to choose the guard length such that the cappage
that we insert occurs at certain positions so as minimize
small partial cappages and painful rearrangements of the
tree. In particular, we want the total remaining bits to
translate after accounting the guard to be equal to
- FOLIO_OBJECTS_LOG2 + i * CAPPAGE_SLOTS_LOG2 where i >= 0.
+ VG_FOLIO_OBJECTS_LOG2 + i * VG_CAPPAGE_SLOTS_LOG2 where i >= 0.
As GBITS is maximal, we may have to remove guard bits to
achieve this. */
- int untranslated_bits = remaining + ADDR_BITS - addr_depth (addr);
+ int untranslated_bits = remaining + VG_ADDR_BITS - vg_addr_depth (addr);
if (! (untranslated_bits > 0 && tilobject > 0 && gbits >= 0
&& untranslated_bits >= tilobject
@@ -422,25 +422,25 @@ ID (as_build) (activity_t activity,
remaining -= gbits;
int pt_width = gc.cappage_width;
- if (! (pt_width > 0 && pt_width <= CAPPAGE_SLOTS_LOG2))
+ if (! (pt_width > 0 && pt_width <= VG_CAPPAGE_SLOTS_LOG2))
PANIC ("pt_width: %d", pt_width);
/* Allocate a new page table. */
/* XXX: If we use a subpage, we just ignore the rest of the
page. This is a bit of a waste but makes the code
simpler. */
- addr_t pt_addr = addr_chop (addr, remaining);
+ vg_addr_t pt_addr = vg_addr_chop (addr, remaining);
struct as_allocate_pt_ret rt = allocate_page_table (pt_addr);
- if (rt.cap.type == cap_void)
+ if (rt.cap.type == vg_cap_void)
/* No memory. */
return NULL;
- struct cap pt_cap = rt.cap;
- addr_t pt_phys_addr = rt.storage;
+ struct vg_cap pt_cap = rt.cap;
+ vg_addr_t pt_phys_addr = rt.storage;
/* do_index requires that the subpage specification be
correct. */
- CAP_SET_SUBPAGE (&pt_cap,
- 0, 1 << (CAPPAGE_SLOTS_LOG2 - pt_width));
+ VG_CAP_SET_SUBPAGE (&pt_cap,
+ 0, 1 << (VG_CAPPAGE_SLOTS_LOG2 - pt_width));
@@ -471,50 +471,50 @@ ID (as_build) (activity_t activity,
int pivot_idx = extract_bits_inv (pte_guard,
pte_gbits - gbits - 1,
pt_width);
- addr_t pivot_addr = addr_extend (pt_addr,
+ vg_addr_t pivot_addr = vg_addr_extend (pt_addr,
pivot_idx, pt_width);
- addr_t pivot_phys_addr = addr_extend (pt_phys_addr,
+ vg_addr_t pivot_phys_addr = vg_addr_extend (pt_phys_addr,
pivot_idx,
- CAPPAGE_SLOTS_LOG2);
+ VG_CAPPAGE_SLOTS_LOG2);
int pivot_gbits = pte_gbits - gbits - pt_width;
int pivot_guard = extract_bits64 (pte_guard, 0, pivot_gbits);
- if (! ADDR_EQ (addr_extend (pivot_addr, pivot_guard, pivot_gbits),
- addr_extend (pte_addr, pte_guard, pte_gbits)))
+ if (! VG_ADDR_EQ (vg_addr_extend (pivot_addr, pivot_guard, pivot_gbits),
+ vg_addr_extend (pte_addr, pte_guard, pte_gbits)))
{
- PANIC ("old pte target: " ADDR_FMT " != pivot target: " ADDR_FMT,
- ADDR_PRINTF (addr_extend (pte_addr,
+ PANIC ("old pte target: " VG_ADDR_FMT " != pivot target: " VG_ADDR_FMT,
+ VG_ADDR_PRINTF (vg_addr_extend (pte_addr,
pte_guard, pte_gbits)),
- ADDR_PRINTF (addr_extend (pivot_addr,
+ VG_ADDR_PRINTF (vg_addr_extend (pivot_addr,
pivot_guard, pivot_gbits)));
}
- DEBUG (5, ADDR_FMT ": indirecting pte at " ADDR_FMT
- " -> " ADDR_FMT " " CAP_FMT " with page table/%d at "
- ADDR_FMT "(%p) " "common guard: %d, remaining: %d; "
+ DEBUG (5, VG_ADDR_FMT ": indirecting pte at " VG_ADDR_FMT
+ " -> " VG_ADDR_FMT " " VG_CAP_FMT " with page table/%d at "
+ VG_ADDR_FMT "(%p) " "common guard: %d, remaining: %d; "
"old target (need pivot: %d) now via pt[%d] "
- "(" ADDR_FMT "-> " DEBUG_BOLD (ADDR_FMT) ")",
- ADDR_PRINTF (addr),
- ADDR_PRINTF (pte_addr),
- ADDR_PRINTF (addr_extend (pte_addr, CAP_GUARD (pte),
- CAP_GUARD_BITS (pte))),
- CAP_PRINTF (pte),
- pt_width, ADDR_PRINTF (pt_addr),
+ "(" VG_ADDR_FMT "-> " DEBUG_BOLD (VG_ADDR_FMT) ")",
+ VG_ADDR_PRINTF (addr),
+ VG_ADDR_PRINTF (pte_addr),
+ VG_ADDR_PRINTF (vg_addr_extend (pte_addr, VG_CAP_GUARD (pte),
+ VG_CAP_GUARD_BITS (pte))),
+ VG_CAP_PRINTF (pte),
+ pt_width, VG_ADDR_PRINTF (pt_addr),
#ifdef RM_INTERN
NULL,
#else
- cap_get_shadow (&pt_cap),
+ vg_cap_get_shadow (&pt_cap),
#endif
gbits, remaining,
- need_pivot, pivot_idx, ADDR_PRINTF (pivot_addr),
- ADDR_PRINTF (addr_extend (pivot_addr,
+ need_pivot, pivot_idx, VG_ADDR_PRINTF (pivot_addr),
+ VG_ADDR_PRINTF (vg_addr_extend (pivot_addr,
pivot_guard, pivot_gbits)));
/* 1.) Copy the PTE into the new page table. Adjust the
guard in the process. This is only necessary if PTE
actually designates something. */
- struct cap *pivot_cap = NULL;
+ struct vg_cap *pivot_cap = NULL;
if (need_pivot)
{
/* 1.a) Get the pivot PTE. */
@@ -526,18 +526,18 @@ ID (as_build) (activity_t activity,
/* 1.b) Make the pivot designate the object the PTE
currently designates. */
- struct cap_addr_trans addr_trans = CAP_ADDR_TRANS_VOID;
+ struct vg_cap_addr_trans addr_trans = VG_CAP_ADDR_TRANS_VOID;
bool r;
- r = CAP_ADDR_TRANS_SET_GUARD (&addr_trans,
+ r = VG_CAP_ADDR_TRANS_SET_GUARD (&addr_trans,
pivot_guard, pivot_gbits);
assert (r);
- r = cap_copy_x (activity,
- ADDR_VOID, pivot_cap, pivot_phys_addr,
+ r = vg_cap_copy_x (activity,
+ VG_ADDR_VOID, pivot_cap, pivot_phys_addr,
as_root_addr, *pte, pte_addr,
- CAP_COPY_COPY_ADDR_TRANS_GUARD,
- CAP_PROPERTIES (OBJECT_POLICY_DEFAULT,
+ VG_CAP_COPY_COPY_ADDR_TRANS_GUARD,
+ VG_CAP_PROPERTIES (VG_OBJECT_POLICY_DEFAULT,
addr_trans));
assert (r);
}
@@ -547,22 +547,22 @@ ID (as_build) (activity_t activity,
pte_gbits - 1, gbits);
pte_gbits = gbits;
- struct cap_addr_trans addr_trans = CAP_ADDR_TRANS_VOID;
+ struct vg_cap_addr_trans addr_trans = VG_CAP_ADDR_TRANS_VOID;
bool r;
- r = CAP_ADDR_TRANS_SET_GUARD_SUBPAGE (&addr_trans,
+ r = VG_CAP_ADDR_TRANS_SET_GUARD_SUBPAGE (&addr_trans,
pte_guard, pte_gbits,
0 /* We always use the
first subpage in
a page. */,
- 1 << (CAPPAGE_SLOTS_LOG2
+ 1 << (VG_CAPPAGE_SLOTS_LOG2
- pt_width));
assert (r);
- r = cap_copy_x (activity, as_root_addr, pte, pte_addr,
- ADDR_VOID, pt_cap, rt.storage,
- CAP_COPY_COPY_ADDR_TRANS_SUBPAGE
- | CAP_COPY_COPY_ADDR_TRANS_GUARD,
- CAP_PROPERTIES (OBJECT_POLICY_DEFAULT, addr_trans));
+ r = vg_cap_copy_x (activity, as_root_addr, pte, pte_addr,
+ VG_ADDR_VOID, pt_cap, rt.storage,
+ VG_CAP_COPY_COPY_ADDR_TRANS_SUBPAGE
+ | VG_CAP_COPY_COPY_ADDR_TRANS_GUARD,
+ VG_CAP_PROPERTIES (VG_OBJECT_POLICY_DEFAULT, addr_trans));
assert (r);
#ifndef NDEBUG
@@ -579,8 +579,8 @@ ID (as_build) (activity_t activity,
if (! (ret && rt.capp == pivot_cap))
as_dump_from (activity, as_root, "");
assertx (ret && rt.capp == pivot_cap,
- ADDR_FMT ": %sfound, got %p, expected %p",
- ADDR_PRINTF (pivot_addr),
+ VG_ADDR_FMT ": %sfound, got %p, expected %p",
+ VG_ADDR_PRINTF (pivot_addr),
ret ? "" : "not ", ret ? rt.capp : 0, pivot_cap);
AS_CHECK_SHADOW (as_root_addr, pivot_addr, pivot_cap, { });
@@ -595,52 +595,52 @@ ID (as_build) (activity_t activity,
int width;
switch (pte->type)
{
- case cap_cappage:
- case cap_rcappage:
- width = CAP_SUBPAGE_SIZE_LOG2 (pte);
+ case vg_cap_cappage:
+ case vg_cap_rcappage:
+ width = VG_CAP_SUBPAGE_SIZE_LOG2 (pte);
break;
- case cap_folio:
- width = FOLIO_OBJECTS_LOG2;
+ case vg_cap_folio:
+ width = VG_FOLIO_OBJECTS_LOG2;
break;
- case cap_thread:
- width = THREAD_SLOTS_LOG2;
+ case vg_cap_thread:
+ width = VG_THREAD_SLOTS_LOG2;
break;
- case cap_messenger:
+ case vg_cap_messenger:
/* Note: rmessengers don't expose their capability slots. */
width = VG_MESSENGER_SLOTS_LOG2;
break;
default:
AS_DUMP;
- PANIC ("Can't insert object at " ADDR_FMT ": "
- CAP_FMT " does translate address bits",
- ADDR_PRINTF (addr),
- CAP_PRINTF (pte));
+ PANIC ("Can't insert object at " VG_ADDR_FMT ": "
+ VG_CAP_FMT " does translate address bits",
+ VG_ADDR_PRINTF (addr),
+ VG_CAP_PRINTF (pte));
}
/* That should not be more than we have left to translate. */
if (width > remaining)
{
AS_DUMP;
- PANIC ("Translating " ADDR_FMT ": can't index %d-bit %s at "
- ADDR_FMT "; not enough bits (%d)",
- ADDR_PRINTF (addr), width, cap_type_string (pte->type),
- ADDR_PRINTF (addr_chop (addr, remaining)), remaining);
+ PANIC ("Translating " VG_ADDR_FMT ": can't index %d-bit %s at "
+ VG_ADDR_FMT "; not enough bits (%d)",
+ VG_ADDR_PRINTF (addr), width, vg_cap_type_string (pte->type),
+ VG_ADDR_PRINTF (vg_addr_chop (addr, remaining)), remaining);
}
int idx = extract_bits64_inv (prefix, remaining - 1, width);
- enum cap_type type = pte->type;
- pte = do_index (activity, pte, addr_chop (addr, remaining), idx,
+ enum vg_cap_type type = pte->type;
+ pte = do_index (activity, pte, vg_addr_chop (addr, remaining), idx,
&fake_slot);
if (! pte)
- PANIC ("Failed to index object at " ADDR_FMT,
- ADDR_PRINTF (addr_chop (addr, remaining)));
+ PANIC ("Failed to index object at " VG_ADDR_FMT,
+ VG_ADDR_PRINTF (vg_addr_chop (addr, remaining)));
- if (type == cap_folio)
+ if (type == vg_cap_folio)
assert (pte == &fake_slot);
else
assert (pte != &fake_slot);
@@ -648,41 +648,41 @@ ID (as_build) (activity_t activity,
remaining -= width;
DEBUG (5, "Indexing %s/%d[%d]; remaining: %d",
- cap_type_string (type), width, idx, remaining);
+ vg_cap_type_string (type), width, idx, remaining);
if (remaining == 0)
AS_CHECK_SHADOW (as_root_addr, addr, pte, {});
}
while (remaining > 0);
- if (! (pte->type == cap_void && CAP_GUARD_BITS (pte) == 0))
+ if (! (pte->type == vg_cap_void && VG_CAP_GUARD_BITS (pte) == 0))
/* PTE in use. */
{
- if (remaining != CAP_GUARD_BITS (pte)
- && extract_bits64 (prefix, 0, remaining) != CAP_GUARD (pte))
- DEBUG (0, "Overwriting " CAP_FMT " at " ADDR_FMT " -> " ADDR_FMT,
- CAP_PRINTF (pte),
- ADDR_PRINTF (addr),
- ADDR_PRINTF (addr_extend (addr, CAP_GUARD (pte),
- CAP_GUARD_BITS (pte))));
+ if (remaining != VG_CAP_GUARD_BITS (pte)
+ && extract_bits64 (prefix, 0, remaining) != VG_CAP_GUARD (pte))
+ DEBUG (0, "Overwriting " VG_CAP_FMT " at " VG_ADDR_FMT " -> " VG_ADDR_FMT,
+ VG_CAP_PRINTF (pte),
+ VG_ADDR_PRINTF (addr),
+ VG_ADDR_PRINTF (vg_addr_extend (addr, VG_CAP_GUARD (pte),
+ VG_CAP_GUARD_BITS (pte))));
if (may_overwrite)
{
- DEBUG (5, "Overwriting " CAP_FMT " at " ADDR_FMT " -> " ADDR_FMT,
- CAP_PRINTF (pte),
- ADDR_PRINTF (addr),
- ADDR_PRINTF (addr_extend (addr, CAP_GUARD (pte),
- CAP_GUARD_BITS (pte))));
+ DEBUG (5, "Overwriting " VG_CAP_FMT " at " VG_ADDR_FMT " -> " VG_ADDR_FMT,
+ VG_CAP_PRINTF (pte),
+ VG_ADDR_PRINTF (addr),
+ VG_ADDR_PRINTF (vg_addr_extend (addr, VG_CAP_GUARD (pte),
+ VG_CAP_GUARD_BITS (pte))));
/* XXX: Free any data associated with the capability
(e.g., shadow pages). */
}
else
{
AS_DUMP;
- PANIC ("There is already an object at " ADDR_FMT
- " (" CAP_FMT ") but may not overwrite.",
- ADDR_PRINTF (addr),
- CAP_PRINTF (pte));
+ PANIC ("There is already an object at " VG_ADDR_FMT
+ " (" VG_CAP_FMT ") but may not overwrite.",
+ VG_ADDR_PRINTF (addr),
+ VG_CAP_PRINTF (pte));
}
}
@@ -691,19 +691,19 @@ ID (as_build) (activity_t activity,
/* It is safe to use an int as a guard has a most 22 significant
bits. */
int guard = extract_bits64 (prefix, 0, gbits);
- if (gbits != CAP_GUARD_BITS (pte) || guard != CAP_GUARD (pte))
+ if (gbits != VG_CAP_GUARD_BITS (pte) || guard != VG_CAP_GUARD (pte))
{
- struct cap_addr_trans addr_trans = CAP_ADDR_TRANS_VOID;
- bool r = CAP_ADDR_TRANS_SET_GUARD_SUBPAGE (&addr_trans, guard, gbits,
+ struct vg_cap_addr_trans addr_trans = VG_CAP_ADDR_TRANS_VOID;
+ bool r = VG_CAP_ADDR_TRANS_SET_GUARD_SUBPAGE (&addr_trans, guard, gbits,
0, 1);
assert (r);
- r = cap_copy_x (activity, as_root_addr, pte, addr_chop (addr, gbits),
- as_root_addr, *pte, addr_chop (addr, gbits),
- CAP_COPY_COPY_ADDR_TRANS_GUARD,
- CAP_PROPERTIES (OBJECT_POLICY_DEFAULT, addr_trans));
+ r = vg_cap_copy_x (activity, as_root_addr, pte, vg_addr_chop (addr, gbits),
+ as_root_addr, *pte, vg_addr_chop (addr, gbits),
+ VG_CAP_COPY_COPY_ADDR_TRANS_GUARD,
+ VG_CAP_PROPERTIES (VG_OBJECT_POLICY_DEFAULT, addr_trans));
assert (r);
- AS_CHECK_SHADOW (as_root_addr, addr_chop (addr, gbits), pte, { });
+ AS_CHECK_SHADOW (as_root_addr, vg_addr_chop (addr, gbits), pte, { });
}
#ifndef NDEBUG
@@ -719,8 +719,8 @@ ID (as_build) (activity_t activity,
if (! (ret && rt.capp == pte))
as_dump_from (activity, as_root, "");
assertx (ret && rt.capp == pte,
- ADDR_FMT ": %sfound, got %p, expected %p",
- ADDR_PRINTF (addr),
+ VG_ADDR_FMT ": %sfound, got %p, expected %p",
+ VG_ADDR_PRINTF (addr),
ret ? "" : "not ", ret ? rt.capp : 0, pte);
}
# endif
diff --git a/libhurd-mm/as-compute-gbits.h b/libhurd-mm/as-compute-gbits.h
index 4fe1d42..4236ee7 100644
--- a/libhurd-mm/as-compute-gbits.h
+++ b/libhurd-mm/as-compute-gbits.h
@@ -35,9 +35,9 @@ as_compute_gbits_cappage (int untranslated_bits, int to_translate,
/* Our strategy is as follows: we want to avoid 1) having to move
page tables around, and 2) small cappages. We know that folios
will be mapped such that their data pages are visible in the data
- address space of the process, i.e., at /ADDR_BITS-7-12. Thus, we
- try to ensure that we have 7-bit cappages at /ADDR_BITS-7-12 and
- then 8-bit cappage at /ADDR_BITS-7-12-i*8, i > 0, i.e., /44, /36,
+ address space of the process, i.e., at /VG_ADDR_BITS-7-12. Thus, we
+ try to ensure that we have 7-bit cappages at /VG_ADDR_BITS-7-12 and
+ then 8-bit cappage at /VG_ADDR_BITS-7-12-i*8, i > 0, i.e., /44, /36,
etc. */
assertx (untranslated_bits > 0 && to_translate > 0 && gbits >= 0
@@ -51,7 +51,7 @@ as_compute_gbits_cappage (int untranslated_bits, int to_translate,
;
/* There could be less than PAGESIZE_LOG2 untranslated bits. Place
- a cappage at /ADDR_BITS-PAGESIZE_LOG2.
+ a cappage at /VG_ADDR_BITS-PAGESIZE_LOG2.
UNTRANSLATED_BITS
|--------------------|
@@ -65,9 +65,9 @@ as_compute_gbits_cappage (int untranslated_bits, int to_translate,
else if (untranslated_bits - gbits <= PAGESIZE_LOG2)
gbits = untranslated_bits - PAGESIZE_LOG2;
- /* There could be less than FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2
+ /* There could be less than VG_FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2
untranslated bits. Place a cappage at
- /ADDR_BITS-FOLIO_OBJECTS_LOG2-PAGESIZE_LOG2.
+ /VG_ADDR_BITS-VG_FOLIO_OBJECTS_LOG2-PAGESIZE_LOG2.
UNTRANSLATED_BITS
|--------------------|
@@ -75,12 +75,12 @@ as_compute_gbits_cappage (int untranslated_bits, int to_translate,
GBITS
|------|-------|
| PAGESIZE_LOG2
- `FOLIO_OBJECTS_LOG2
+ `VG_FOLIO_OBJECTS_LOG2
^
*/
- else if (untranslated_bits - gbits <= FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2)
- gbits = untranslated_bits - FOLIO_OBJECTS_LOG2 - PAGESIZE_LOG2;
+ else if (untranslated_bits - gbits <= VG_FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2)
+ gbits = untranslated_bits - VG_FOLIO_OBJECTS_LOG2 - PAGESIZE_LOG2;
/*
UNTRANSLATED_BITS
@@ -88,20 +88,20 @@ as_compute_gbits_cappage (int untranslated_bits, int to_translate,
|----------|-------|----|-----|
| | | PAGESIZE_LOG2
- | | `FOLIO_OBJECTS_LOG2
+ | | `VG_FOLIO_OBJECTS_LOG2
`GBITS `REMAINDER
Shrink GBITS such that REMAINDER becomes a multiple of
- CAPPAGE_SLOTS_LOG2.
+ VG_CAPPAGE_SLOTS_LOG2.
*/
else
{
int remainder = untranslated_bits - gbits
- - FOLIO_OBJECTS_LOG2 - PAGESIZE_LOG2;
+ - VG_FOLIO_OBJECTS_LOG2 - PAGESIZE_LOG2;
/* Amount to remove from GBITS such that REMAINDER + TO_REMOVE is a
- multiple of CAPPAGE_SLOTS_LOG2. */
- int to_remove = CAPPAGE_SLOTS_LOG2 - (remainder % CAPPAGE_SLOTS_LOG2);
+ multiple of VG_CAPPAGE_SLOTS_LOG2. */
+ int to_remove = VG_CAPPAGE_SLOTS_LOG2 - (remainder % VG_CAPPAGE_SLOTS_LOG2);
if (to_remove < gbits)
gbits -= to_remove;
@@ -112,10 +112,10 @@ as_compute_gbits_cappage (int untranslated_bits, int to_translate,
assert (gbits >= 0);
struct as_guard_cappage gc;
- if (untranslated_bits - gbits == FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2)
- gc.cappage_width = FOLIO_OBJECTS_LOG2;
+ if (untranslated_bits - gbits == VG_FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2)
+ gc.cappage_width = VG_FOLIO_OBJECTS_LOG2;
else
- gc.cappage_width = CAPPAGE_SLOTS_LOG2;
+ gc.cappage_width = VG_CAPPAGE_SLOTS_LOG2;
if (gbits + gc.cappage_width > to_translate)
gc.cappage_width = to_translate - gbits;
diff --git a/libhurd-mm/as-dump.c b/libhurd-mm/as-dump.c
index 4eb4168..628b5d3 100644
--- a/libhurd-mm/as-dump.c
+++ b/libhurd-mm/as-dump.c
@@ -65,16 +65,16 @@ print_nr (int width, int64_t nr, bool hex)
static void
do_walk (activity_t activity, int index,
- struct cap *root, addr_t addr,
+ struct vg_cap *root, vg_addr_t addr,
int indent, bool descend, const char *output_prefix)
{
int i;
- struct cap cap = as_cap_lookup_rel (activity, root, addr, -1, NULL);
- if (cap.type == cap_void)
+ struct vg_cap vg_cap = as_cap_lookup_rel (activity, root, addr, -1, NULL);
+ if (vg_cap.type == vg_cap_void)
return;
- if (! cap_to_object (activity, &cap))
+ if (! vg_cap_to_object (activity, &vg_cap))
/* Cap is there but the object has been deallocated. */
return;
@@ -90,30 +90,30 @@ do_walk (activity_t activity, int index,
S_PRINTF ("root");
S_PRINTF (" ] ");
- print_nr (12, addr_prefix (addr), true);
- S_PRINTF ("/%d ", addr_depth (addr));
- if (CAP_GUARD_BITS (&cap))
- S_PRINTF ("| 0x%llx/%d ", CAP_GUARD (&cap), CAP_GUARD_BITS (&cap));
- if (CAP_SUBPAGES (&cap) != 1)
- S_PRINTF ("(%d/%d) ", CAP_SUBPAGE (&cap), CAP_SUBPAGES (&cap));
+ print_nr (12, vg_addr_prefix (addr), true);
+ S_PRINTF ("/%d ", vg_addr_depth (addr));
+ if (VG_CAP_GUARD_BITS (&vg_cap))
+ S_PRINTF ("| 0x%llx/%d ", VG_CAP_GUARD (&vg_cap), VG_CAP_GUARD_BITS (&vg_cap));
+ if (VG_CAP_SUBPAGES (&vg_cap) != 1)
+ S_PRINTF ("(%d/%d) ", VG_CAP_SUBPAGE (&vg_cap), VG_CAP_SUBPAGES (&vg_cap));
- if (CAP_GUARD_BITS (&cap)
- && ADDR_BITS - addr_depth (addr) >= CAP_GUARD_BITS (&cap))
+ if (VG_CAP_GUARD_BITS (&vg_cap)
+ && VG_ADDR_BITS - vg_addr_depth (addr) >= VG_CAP_GUARD_BITS (&vg_cap))
S_PRINTF ("=> 0x%llx/%d ",
- addr_prefix (addr_extend (addr,
- CAP_GUARD (&cap),
- CAP_GUARD_BITS (&cap))),
- addr_depth (addr) + CAP_GUARD_BITS (&cap));
+ vg_addr_prefix (vg_addr_extend (addr,
+ VG_CAP_GUARD (&vg_cap),
+ VG_CAP_GUARD_BITS (&vg_cap))),
+ vg_addr_depth (addr) + VG_CAP_GUARD_BITS (&vg_cap));
#ifdef RM_INTERN
- S_PRINTF ("@" OID_FMT " ", OID_PRINTF (cap.oid));
+ S_PRINTF ("@" VG_OID_FMT " ", VG_OID_PRINTF (vg_cap.oid));
#endif
- S_PRINTF ("%s", cap_type_string (cap.type));
+ S_PRINTF ("%s", vg_cap_type_string (vg_cap.type));
#ifdef RM_INTERN
- if (cap.type == cap_page || cap.type == cap_rpage)
+ if (vg_cap.type == vg_cap_page || vg_cap.type == vg_cap_rpage)
{
- struct object *object = cap_to_object_soft (root_activity, &cap);
+ struct object *object = cap_to_object_soft (root_activity, &vg_cap);
if (object)
{
struct md5_ctx ctx;
@@ -145,55 +145,55 @@ do_walk (activity_t activity, int index,
if (! descend)
return;
- if (addr_depth (addr) + CAP_GUARD_BITS (&cap) > ADDR_BITS)
+ if (vg_addr_depth (addr) + VG_CAP_GUARD_BITS (&vg_cap) > VG_ADDR_BITS)
return;
- addr = addr_extend (addr, CAP_GUARD (&cap), CAP_GUARD_BITS (&cap));
+ addr = vg_addr_extend (addr, VG_CAP_GUARD (&vg_cap), VG_CAP_GUARD_BITS (&vg_cap));
- switch (cap.type)
+ switch (vg_cap.type)
{
- case cap_cappage:
- case cap_rcappage:
- if (addr_depth (addr) + CAP_SUBPAGE_SIZE_LOG2 (&cap) > ADDR_BITS)
+ case vg_cap_cappage:
+ case vg_cap_rcappage:
+ if (vg_addr_depth (addr) + VG_CAP_SUBPAGE_SIZE_LOG2 (&vg_cap) > VG_ADDR_BITS)
return;
- for (i = 0; i < CAP_SUBPAGE_SIZE (&cap); i ++)
+ for (i = 0; i < VG_CAP_SUBPAGE_SIZE (&vg_cap); i ++)
do_walk (activity, i, root,
- addr_extend (addr, i, CAP_SUBPAGE_SIZE_LOG2 (&cap)),
+ vg_addr_extend (addr, i, VG_CAP_SUBPAGE_SIZE_LOG2 (&vg_cap)),
indent + 1, true, output_prefix);
return;
- case cap_folio:
- if (addr_depth (addr) + FOLIO_OBJECTS_LOG2 > ADDR_BITS)
+ case vg_cap_folio:
+ if (vg_addr_depth (addr) + VG_FOLIO_OBJECTS_LOG2 > VG_ADDR_BITS)
return;
- for (i = 0; i < FOLIO_OBJECTS; i ++)
+ for (i = 0; i < VG_FOLIO_OBJECTS; i ++)
do_walk (activity, i, root,
- addr_extend (addr, i, FOLIO_OBJECTS_LOG2),
+ vg_addr_extend (addr, i, VG_FOLIO_OBJECTS_LOG2),
indent + 1, false, output_prefix);
return;
- case cap_thread:
- if (addr_depth (addr) + THREAD_SLOTS_LOG2 > ADDR_BITS)
+ case vg_cap_thread:
+ if (vg_addr_depth (addr) + VG_THREAD_SLOTS_LOG2 > VG_ADDR_BITS)
return;
- for (i = 0; i < THREAD_SLOTS; i ++)
+ for (i = 0; i < VG_THREAD_SLOTS; i ++)
do_walk (activity, i, root,
- addr_extend (addr, i, THREAD_SLOTS_LOG2),
+ vg_addr_extend (addr, i, VG_THREAD_SLOTS_LOG2),
indent + 1, true, output_prefix);
return;
- case cap_messenger:
+ case vg_cap_messenger:
/* rmessenger's don't expose their capability slots. */
- if (addr_depth (addr) + VG_MESSENGER_SLOTS_LOG2 > ADDR_BITS)
+ if (vg_addr_depth (addr) + VG_MESSENGER_SLOTS_LOG2 > VG_ADDR_BITS)
return;
for (i = 0; i < VG_MESSENGER_SLOTS; i ++)
do_walk (activity, i, root,
- addr_extend (addr, i, VG_MESSENGER_SLOTS_LOG2),
+ vg_addr_extend (addr, i, VG_MESSENGER_SLOTS_LOG2),
indent + 1, true, output_prefix);
return;
@@ -205,11 +205,11 @@ do_walk (activity_t activity, int index,
/* AS_LOCK must not be held. */
void
-as_dump_from (activity_t activity, struct cap *root, const char *prefix)
+as_dump_from (activity_t activity, struct vg_cap *root, const char *prefix)
{
debug (0, "Dumping address space.");
backtrace_print ();
if (0)
- do_walk (activity, -1, root, ADDR (0, 0), 0, true, prefix);
+ do_walk (activity, -1, root, VG_ADDR (0, 0), 0, true, prefix);
}
diff --git a/libhurd-mm/as-lookup.c b/libhurd-mm/as-lookup.c
index 7639fd1..0de3c03 100644
--- a/libhurd-mm/as-lookup.c
+++ b/libhurd-mm/as-lookup.c
@@ -57,14 +57,14 @@
static bool
as_lookup_rel_internal (activity_t activity,
- struct cap *root, addr_t address,
- enum cap_type type, bool *writable,
+ struct vg_cap *root, vg_addr_t address,
+ enum vg_cap_type type, bool *writable,
enum as_lookup_mode mode, union as_lookup_ret *rt,
bool dump)
{
assert (root);
- struct cap *start = root;
+ struct vg_cap *start = root;
#ifndef NDEBUG
bool dump_path = dump;
@@ -74,49 +74,49 @@ as_lookup_rel_internal (activity_t activity,
#endif
root = start;
- uint64_t addr = addr_prefix (address);
- uintptr_t remaining = addr_depth (address);
+ uint64_t addr = vg_addr_prefix (address);
+ uintptr_t remaining = vg_addr_depth (address);
/* The code below assumes that the REMAINING significant bits are in the
lower bits, not upper. */
- addr >>= (ADDR_BITS - remaining);
+ addr >>= (VG_ADDR_BITS - remaining);
- struct cap fake_slot;
+ struct vg_cap fake_slot;
/* Assume the object is writable until proven otherwise. */
int w = true;
if (dump_path)
- debug (0, "Looking up %s at " ADDR_FMT,
- mode == as_lookup_want_cap ? "cap"
+ debug (0, "Looking up %s at " VG_ADDR_FMT,
+ mode == as_lookup_want_cap ? "vg_cap"
: (mode == as_lookup_want_slot ? "slot" : "object"),
- ADDR_PRINTF (address));
+ VG_ADDR_PRINTF (address));
while (remaining > 0)
{
if (dump_path)
- debug (0, "Cap at " ADDR_FMT ": " CAP_FMT " -> " ADDR_FMT " (%d)",
- ADDR_PRINTF (addr_chop (address, remaining)),
- CAP_PRINTF (root),
- ADDR_PRINTF (addr_chop (address,
- remaining - CAP_GUARD_BITS (root))),
+ debug (0, "Cap at " VG_ADDR_FMT ": " VG_CAP_FMT " -> " VG_ADDR_FMT " (%d)",
+ VG_ADDR_PRINTF (vg_addr_chop (address, remaining)),
+ VG_CAP_PRINTF (root),
+ VG_ADDR_PRINTF (vg_addr_chop (address,
+ remaining - VG_CAP_GUARD_BITS (root))),
remaining);
- assertx (CAP_TYPE_MIN <= root->type && root->type <= CAP_TYPE_MAX,
- "Cap at " ADDR_FMT " has type %d?! (" ADDR_FMT ")",
- ADDR_PRINTF (addr_chop (address, remaining)), root->type,
- ADDR_PRINTF (address));
+ assertx (VG_CAP_TYPE_MIN <= root->type && root->type <= VG_CAP_TYPE_MAX,
+ "Cap at " VG_ADDR_FMT " has type %d?! (" VG_ADDR_FMT ")",
+ VG_ADDR_PRINTF (vg_addr_chop (address, remaining)), root->type,
+ VG_ADDR_PRINTF (address));
- if (root->type == cap_rcappage)
+ if (root->type == vg_cap_rcappage)
/* The page directory is read-only. Note the weakened access
appropriately. */
{
- if (type != -1 && ! cap_type_weak_p (type))
+ if (type != -1 && ! vg_cap_type_weak_p (type))
{
debug (1, "Read-only cappage at %llx/%d but %s requires "
"write access",
- addr_prefix (addr_chop (address, remaining)),
- addr_depth (address) - remaining,
- cap_type_string (type));
+ vg_addr_prefix (vg_addr_chop (address, remaining)),
+ vg_addr_depth (address) - remaining,
+ vg_cap_type_string (type));
/* Translating this capability does not provide write
access. The requested type is strong, bail. */
@@ -126,29 +126,29 @@ as_lookup_rel_internal (activity_t activity,
w = false;
}
- if (CAP_GUARD_BITS (root))
- /* Check that ADDR contains the guard. */
+ if (VG_CAP_GUARD_BITS (root))
+ /* Check that VG_ADDR contains the guard. */
{
- int gdepth = CAP_GUARD_BITS (root);
+ int gdepth = VG_CAP_GUARD_BITS (root);
if (gdepth > remaining)
{
debug (1, "Translating %llx/%d; not enough bits (%d) to "
"translate %d-bit guard at /%d",
- addr_prefix (address), addr_depth (address),
- remaining, gdepth, ADDR_BITS - remaining);
+ vg_addr_prefix (address), vg_addr_depth (address),
+ remaining, gdepth, VG_ADDR_BITS - remaining);
DUMP_OR_RET (false);
}
int guard = extract_bits64_inv (addr, remaining - 1, gdepth);
- if (CAP_GUARD (root) != guard)
+ if (VG_CAP_GUARD (root) != guard)
{
debug (dump_path ? 0 : 5,
- "Translating " ADDR_FMT ": guard 0x%llx/%d does "
+ "Translating " VG_ADDR_FMT ": guard 0x%llx/%d does "
"not match 0x%llx's bits %d-%d => 0x%x",
- ADDR_PRINTF (address),
- CAP_GUARD (root), CAP_GUARD_BITS (root), addr,
+ VG_ADDR_PRINTF (address),
+ VG_CAP_GUARD (root), VG_CAP_GUARD_BITS (root), addr,
remaining - gdepth, remaining - 1, guard);
return false;
}
@@ -169,34 +169,34 @@ as_lookup_rel_internal (activity_t activity,
switch (root->type)
{
- case cap_cappage:
- case cap_rcappage:
+ case vg_cap_cappage:
+ case vg_cap_rcappage:
{
/* Index the page table. */
- int bits = CAP_SUBPAGE_SIZE_LOG2 (root);
+ int bits = VG_CAP_SUBPAGE_SIZE_LOG2 (root);
if (remaining < bits)
{
- debug (1, "Translating " ADDR_FMT "; not enough bits (%d) "
- "to index %d-bit cappage at " ADDR_FMT,
- ADDR_PRINTF (address), remaining, bits,
- ADDR_PRINTF (addr_chop (address, remaining)));
+ debug (1, "Translating " VG_ADDR_FMT "; not enough bits (%d) "
+ "to index %d-bit cappage at " VG_ADDR_FMT,
+ VG_ADDR_PRINTF (address), remaining, bits,
+ VG_ADDR_PRINTF (vg_addr_chop (address, remaining)));
DUMP_OR_RET (false);
}
- struct object *object = cap_to_object (activity, root);
+ struct object *object = vg_cap_to_object (activity, root);
if (! object)
{
#ifdef RM_INTERN
- debug (1, "Failed to get object with OID " OID_FMT,
- OID_PRINTF (root->oid));
+ debug (1, "Failed to get object with OID " VG_OID_FMT,
+ VG_OID_PRINTF (root->oid));
DUMP_OR_RET (false);
#endif
return false;
}
- int offset = CAP_SUBPAGE_OFFSET (root)
+ int offset = VG_CAP_SUBPAGE_OFFSET (root)
+ extract_bits64_inv (addr, remaining - 1, bits);
- assert (0 <= offset && offset < CAPPAGE_SLOTS);
+ assert (0 <= offset && offset < VG_CAPPAGE_SLOTS);
remaining -= bits;
if (dump_path)
@@ -207,77 +207,77 @@ as_lookup_rel_internal (activity_t activity,
break;
}
- case cap_folio:
- if (remaining < FOLIO_OBJECTS_LOG2)
+ case vg_cap_folio:
+ if (remaining < VG_FOLIO_OBJECTS_LOG2)
{
- debug (1, "Translating " ADDR_FMT "; not enough bits (%d) "
- "to index folio at " ADDR_FMT,
- ADDR_PRINTF (address), remaining,
- ADDR_PRINTF (addr_chop (address, remaining)));
+ debug (1, "Translating " VG_ADDR_FMT "; not enough bits (%d) "
+ "to index folio at " VG_ADDR_FMT,
+ VG_ADDR_PRINTF (address), remaining,
+ VG_ADDR_PRINTF (vg_addr_chop (address, remaining)));
DUMP_OR_RET (false);
}
- struct object *object = cap_to_object (activity, root);
+ struct object *object = vg_cap_to_object (activity, root);
if (! object)
{
#ifdef RM_INTERN
- debug (1, "Failed to get object with OID " OID_FMT,
- OID_PRINTF (root->oid));
+ debug (1, "Failed to get object with OID " VG_OID_FMT,
+ VG_OID_PRINTF (root->oid));
#endif
DUMP_OR_RET (false);
}
struct folio *folio = (struct folio *) object;
- int i = extract_bits64_inv (addr, remaining - 1, FOLIO_OBJECTS_LOG2);
+ int i = extract_bits64_inv (addr, remaining - 1, VG_FOLIO_OBJECTS_LOG2);
#ifdef RM_INTERN
root = &fake_slot;
- *root = folio_object_cap (folio, i);
+ *root = vg_folio_object_cap (folio, i);
#else
root = &folio->objects[i];
#endif
- remaining -= FOLIO_OBJECTS_LOG2;
+ remaining -= VG_FOLIO_OBJECTS_LOG2;
if (dump_path)
debug (0, "Indexing folio: %d/%d (%d)",
- i, FOLIO_OBJECTS_LOG2, remaining);
+ i, VG_FOLIO_OBJECTS_LOG2, remaining);
break;
- case cap_thread:
- case cap_messenger:
+ case vg_cap_thread:
+ case vg_cap_messenger:
/* Note: rmessengers don't expose their capability slots. */
{
/* Index the object. */
int bits;
switch (root->type)
{
- case cap_thread:
- bits = THREAD_SLOTS_LOG2;
+ case vg_cap_thread:
+ bits = VG_THREAD_SLOTS_LOG2;
break;
- case cap_messenger:
+ case vg_cap_messenger:
bits = VG_MESSENGER_SLOTS_LOG2;
break;
}
if (remaining < bits)
{
- debug (1, "Translating " ADDR_FMT "; not enough bits (%d) "
- "to index %d-bit %s at " ADDR_FMT,
- ADDR_PRINTF (address), remaining, bits,
- cap_type_string (root->type),
- ADDR_PRINTF (addr_chop (address, remaining)));
+ debug (1, "Translating " VG_ADDR_FMT "; not enough bits (%d) "
+ "to index %d-bit %s at " VG_ADDR_FMT,
+ VG_ADDR_PRINTF (address), remaining, bits,
+ vg_cap_type_string (root->type),
+ VG_ADDR_PRINTF (vg_addr_chop (address, remaining)));
DUMP_OR_RET (false);
}
- struct object *object = cap_to_object (activity, root);
+ struct object *object = vg_cap_to_object (activity, root);
if (! object)
{
#ifdef RM_INTERN
- debug (1, "Failed to get object with OID " OID_FMT,
- OID_PRINTF (root->oid));
+ debug (1, "Failed to get object with OID " VG_OID_FMT,
+ VG_OID_PRINTF (root->oid));
DUMP_OR_RET (false);
#endif
return false;
@@ -292,7 +292,7 @@ as_lookup_rel_internal (activity_t activity,
if (dump_path)
debug (0, "Indexing %s: %d/%d (%d)",
- cap_type_string (root->type), offset, bits, remaining);
+ vg_cap_type_string (root->type), offset, bits, remaining);
root = &object->caps[offset];
break;
@@ -306,11 +306,11 @@ as_lookup_rel_internal (activity_t activity,
do_debug (4)
as_dump_from (activity, start, NULL);
debug (dump_path ? 0 : 5,
- "Translating " ADDR_FMT ", encountered a %s at "
- ADDR_FMT " but expected a cappage or a folio",
- ADDR_PRINTF (address),
- cap_type_string (root->type),
- ADDR_PRINTF (addr_chop (address, remaining)));
+ "Translating " VG_ADDR_FMT ", encountered a %s at "
+ VG_ADDR_FMT " but expected a cappage or a folio",
+ VG_ADDR_PRINTF (address),
+ vg_cap_type_string (root->type),
+ VG_ADDR_PRINTF (vg_addr_chop (address, remaining)));
return false;
}
@@ -318,16 +318,16 @@ as_lookup_rel_internal (activity_t activity,
/* We've indexed the object and have no bits remaining to
translate. */
{
- if (CAP_GUARD_BITS (root) && mode == as_lookup_want_object)
+ if (VG_CAP_GUARD_BITS (root) && mode == as_lookup_want_object)
/* The caller wants an object but we haven't translated
the slot's guard. */
{
debug (dump_path ? 0 : 4,
"Found slot at %llx/%d but referenced object "
"(%s) has an untranslated guard of %lld/%d!",
- addr_prefix (address), addr_depth (address),
- cap_type_string (root->type), CAP_GUARD (root),
- CAP_GUARD_BITS (root));
+ vg_addr_prefix (address), vg_addr_depth (address),
+ vg_cap_type_string (root->type), VG_CAP_GUARD (root),
+ VG_CAP_GUARD_BITS (root));
return false;
}
@@ -337,17 +337,17 @@ as_lookup_rel_internal (activity_t activity,
assert (remaining == 0);
if (dump_path)
- debug (0, "Cap at " ADDR_FMT ": " CAP_FMT " -> " ADDR_FMT " (%d)",
- ADDR_PRINTF (addr_chop (address, remaining)),
- CAP_PRINTF (root),
- ADDR_PRINTF (addr_chop (address,
- remaining - CAP_GUARD_BITS (root))),
+ debug (0, "Cap at " VG_ADDR_FMT ": " VG_CAP_FMT " -> " VG_ADDR_FMT " (%d)",
+ VG_ADDR_PRINTF (vg_addr_chop (address, remaining)),
+ VG_CAP_PRINTF (root),
+ VG_ADDR_PRINTF (vg_addr_chop (address,
+ remaining - VG_CAP_GUARD_BITS (root))),
remaining);
if (type != -1 && type != root->type)
/* Types don't match. */
{
- if (cap_type_strengthen (type) == root->type)
+ if (vg_cap_type_strengthen (type) == root->type)
/* The capability just provides more strength than
requested. That's fine. */
;
@@ -357,14 +357,14 @@ as_lookup_rel_internal (activity_t activity,
do_debug (4)
as_dump_from (activity, start, __func__);
debug (dump_path ? 0 : 4,
- "cap at " ADDR_FMT " designates a %s but want a %s",
- ADDR_PRINTF (address), cap_type_string (root->type),
- cap_type_string (type));
+ "vg_cap at " VG_ADDR_FMT " designates a %s but want a %s",
+ VG_ADDR_PRINTF (address), vg_cap_type_string (root->type),
+ vg_cap_type_string (type));
return false;
}
}
- if (mode == as_lookup_want_object && cap_type_weak_p (root->type))
+ if (mode == as_lookup_want_object && vg_cap_type_weak_p (root->type))
w = false;
if (writable)
@@ -375,7 +375,7 @@ as_lookup_rel_internal (activity_t activity,
if (root == &fake_slot)
{
debug (1, "%llx/%d resolves to a folio object but want a slot",
- addr_prefix (address), addr_depth (address));
+ vg_addr_prefix (address), vg_addr_depth (address));
DUMP_OR_RET (false);
}
rt->capp = root;
@@ -390,8 +390,8 @@ as_lookup_rel_internal (activity_t activity,
bool
as_lookup_rel (activity_t activity,
- struct cap *root, addr_t address,
- enum cap_type type, bool *writable,
+ struct vg_cap *root, vg_addr_t address,
+ enum vg_cap_type type, bool *writable,
enum as_lookup_mode mode, union as_lookup_ret *rt)
{
bool r;
@@ -410,7 +410,7 @@ as_lookup_rel (activity_t activity,
}
void
-as_dump_path_rel (activity_t activity, struct cap *root, addr_t addr)
+as_dump_path_rel (activity_t activity, struct vg_cap *root, vg_addr_t addr)
{
union as_lookup_ret rt;
diff --git a/libhurd-mm/as.c b/libhurd-mm/as.c
index be9dc09..7e47727 100644
--- a/libhurd-mm/as.c
+++ b/libhurd-mm/as.c
@@ -97,9 +97,9 @@ free_space_desc_slab_alloc (void *hook, size_t size, void **ptr)
assert (size == PAGESIZE);
struct storage storage = storage_alloc (meta_data_activity,
- cap_page, STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- *ptr = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ vg_cap_page, STORAGE_LONG_LIVED,
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ *ptr = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr, 0, PAGESIZE_LOG2));
return 0;
}
@@ -109,7 +109,7 @@ free_space_desc_slab_dealloc (void *hook, void *buffer, size_t size)
{
assert (size == PAGESIZE);
- addr_t addr = addr_chop (PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
+ vg_addr_t addr = vg_addr_chop (VG_PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
storage_free (addr, false);
return 0;
@@ -171,7 +171,7 @@ free_space_split (struct free_space *f, uint64_t start, uint64_t end)
}
}
-addr_t
+vg_addr_t
as_alloc (int width, uint64_t count, bool data_mappable)
{
assert (as_init_done);
@@ -189,10 +189,10 @@ as_alloc (int width, uint64_t count, bool data_mappable)
else if (w <= PAGESIZE_LOG2)
w = PAGESIZE_LOG2;
else
- /* Make W - PAGESIZE_LOG2 a multiple of CAPPAGE_SLOTS_LOG2;
+ /* Make W - PAGESIZE_LOG2 a multiple of VG_CAPPAGE_SLOTS_LOG2;
this greatly simplifies page table construction. */
- w += (CAPPAGE_SLOTS_LOG2
- - ((w - PAGESIZE_LOG2) % CAPPAGE_SLOTS_LOG2));
+ w += (VG_CAPPAGE_SLOTS_LOG2
+ - ((w - PAGESIZE_LOG2) % VG_CAPPAGE_SLOTS_LOG2));
}
uint64_t align = 1ULL << w;
@@ -200,7 +200,7 @@ as_alloc (int width, uint64_t count, bool data_mappable)
ss_mutex_lock (&free_spaces_lock);
- addr_t addr = ADDR_VOID;
+ vg_addr_t addr = VG_ADDR_VOID;
struct free_space *free_space;
for (free_space = hurd_btree_free_space_first (&free_spaces);
@@ -220,24 +220,24 @@ as_alloc (int width, uint64_t count, bool data_mappable)
break;
free_space_split (free_space, start, start + length - 1);
- addr = ADDR (start, ADDR_BITS - (w - shift));
+ addr = VG_ADDR (start, VG_ADDR_BITS - (w - shift));
break;
}
}
ss_mutex_unlock (&free_spaces_lock);
- if (ADDR_IS_VOID (addr))
+ if (VG_ADDR_IS_VOID (addr))
debug (0, "No space for object of size 0x%x", 1 << (width - 1));
return addr;
}
bool
-as_alloc_at (addr_t addr, uint64_t count)
+as_alloc_at (vg_addr_t addr, uint64_t count)
{
- uint64_t start = addr_prefix (addr);
- uint64_t length = (1ULL << (ADDR_BITS - addr_depth (addr))) * count;
+ uint64_t start = vg_addr_prefix (addr);
+ uint64_t length = (1ULL << (VG_ADDR_BITS - vg_addr_depth (addr))) * count;
uint64_t end = start + length - 1;
struct region region = { start, end };
@@ -259,10 +259,10 @@ as_alloc_at (addr_t addr, uint64_t count)
}
void
-as_free (addr_t addr, uint64_t count)
+as_free (vg_addr_t addr, uint64_t count)
{
- uint64_t start = addr_prefix (addr);
- uint64_t length = (1ULL << (ADDR_BITS - addr_depth (addr))) * count;
+ uint64_t start = vg_addr_prefix (addr);
+ uint64_t length = (1ULL << (VG_ADDR_BITS - vg_addr_depth (addr))) * count;
uint64_t end = start + length - 1;
struct free_space *space = free_space_desc_alloc ();
@@ -325,29 +325,29 @@ as_free (addr_t addr, uint64_t count)
}
struct as_allocate_pt_ret
-as_allocate_page_table (addr_t addr)
+as_allocate_page_table (vg_addr_t addr)
{
struct as_allocate_pt_ret ret;
memset (&ret, 0, sizeof (ret));
- ret.cap.type = cap_void;
+ ret.cap.type = vg_cap_void;
/* First allocate the real object. */
- struct storage storage = storage_alloc (meta_data_activity, cap_cappage,
+ struct storage storage = storage_alloc (meta_data_activity, vg_cap_cappage,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
return ret;
- debug (4, ADDR_FMT " -> " ADDR_FMT,
- ADDR_PRINTF (addr), ADDR_PRINTF (storage.addr));
+ debug (4, VG_ADDR_FMT " -> " VG_ADDR_FMT,
+ VG_ADDR_PRINTF (addr), VG_ADDR_PRINTF (storage.addr));
/* Then, allocate the shadow object. */
- struct storage shadow = storage_alloc (meta_data_activity, cap_page,
+ struct storage shadow = storage_alloc (meta_data_activity, vg_cap_page,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT,
- ADDR_VOID);
- if (ADDR_IS_VOID (shadow.addr))
+ VG_OBJECT_POLICY_DEFAULT,
+ VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (shadow.addr))
{
storage_free (storage.addr, false);
return ret;
@@ -355,8 +355,8 @@ as_allocate_page_table (addr_t addr)
ret.storage = storage.addr;
ret.cap = *storage.cap;
- cap_set_shadow (&ret.cap,
- ADDR_TO_PTR (addr_extend (shadow.addr,
+ vg_cap_set_shadow (&ret.cap,
+ VG_ADDR_TO_PTR (vg_addr_extend (shadow.addr,
0, PAGESIZE_LOG2)));
return ret;
@@ -374,26 +374,26 @@ as_alloc_slow (int width)
{
assert (! as_init_done);
- addr_t slot = ADDR_VOID;
+ vg_addr_t slot = VG_ADDR_VOID;
- int find_free_slot (addr_t addr,
- uintptr_t type, struct cap_properties properties,
+ int find_free_slot (vg_addr_t addr,
+ uintptr_t type, struct vg_cap_properties properties,
bool writable,
void *cookie)
{
- if (type == cap_folio)
+ if (type == vg_cap_folio)
/* We avoid allocating out of folios. */
return -1;
- assert (type == cap_void);
+ assert (type == vg_cap_void);
- if (ADDR_BITS - addr_depth (addr) < width)
+ if (VG_ADDR_BITS - vg_addr_depth (addr) < width)
return -1;
if (! writable)
return 0;
- uint64_t start = addr_prefix (addr);
+ uint64_t start = vg_addr_prefix (addr);
uint64_t end = start + (1 << width) - 1;
if (end >= DATA_ADDR_MAX)
@@ -414,8 +414,8 @@ as_alloc_slow (int width)
for (i = 0; i < desc_additional_count; i ++)
{
struct hurd_object_desc *desc = &desc_additional[i];
- if (ADDR_EQ (addr, addr_chop (desc->object,
- CAP_ADDR_TRANS_GUARD_BITS
+ if (VG_ADDR_EQ (addr, vg_addr_chop (desc->object,
+ VG_CAP_ADDR_TRANS_GUARD_BITS
(properties.addr_trans))))
return 0;
}
@@ -426,31 +426,31 @@ as_alloc_slow (int width)
error_t err;
- if (! as_walk (find_free_slot, 1 << cap_void | 1 << cap_folio,
+ if (! as_walk (find_free_slot, 1 << vg_cap_void | 1 << vg_cap_folio,
(void *) &slot))
panic ("Failed to find a free slot!");
- assert (! ADDR_IS_VOID (slot));
+ assert (! VG_ADDR_IS_VOID (slot));
/* Set the guard on the slot. */
- int gbits = ADDR_BITS - addr_depth (slot) - width;
+ int gbits = VG_ADDR_BITS - vg_addr_depth (slot) - width;
assert (gbits >= 0);
- struct cap_properties properties = CAP_PROPERTIES_DEFAULT;
- CAP_ADDR_TRANS_SET_GUARD (&properties.addr_trans, 0, gbits);
- err = rm_cap_copy (meta_data_activity, ADDR_VOID, slot, ADDR_VOID, slot,
- CAP_COPY_COPY_ADDR_TRANS_GUARD, properties);
+ struct vg_cap_properties properties = VG_CAP_PROPERTIES_DEFAULT;
+ VG_CAP_ADDR_TRANS_SET_GUARD (&properties.addr_trans, 0, gbits);
+ err = rm_cap_copy (meta_data_activity, VG_ADDR_VOID, slot, VG_ADDR_VOID, slot,
+ VG_CAP_COPY_COPY_ADDR_TRANS_GUARD, properties);
if (err)
panic ("failed to copy capability: %d", err);
- slot = addr_extend (slot, 0, gbits);
+ slot = vg_addr_extend (slot, 0, gbits);
/* Fill in a descriptor. */
assertx ((((uintptr_t) &desc_additional[0]) & (PAGESIZE - 1)) == 0,
"%p", &desc_additional[0]);
- debug (5, "Allocating space for " ADDR_FMT
+ debug (5, "Allocating space for " VG_ADDR_FMT
"; using additional descriptor %d",
- ADDR_PRINTF (slot), desc_additional_count);
+ VG_ADDR_PRINTF (slot), desc_additional_count);
struct hurd_object_desc *desc = &desc_additional[desc_additional_count ++];
if (desc_additional_count > DESC_ADDITIONAL)
@@ -460,7 +460,7 @@ as_alloc_slow (int width)
return desc;
}
-struct cap shadow_root;
+struct vg_cap shadow_root;
void
as_init (void)
@@ -471,10 +471,10 @@ as_init (void)
debug (0, "%d descriptors", __hurd_startup_data->desc_count);
for (i = 0; i < __hurd_startup_data->desc_count; i ++)
{
- debug (0, ADDR_FMT " (" ADDR_FMT "): %s",
- ADDR_PRINTF (__hurd_startup_data->descs[i].object),
- ADDR_PRINTF (__hurd_startup_data->descs[i].storage),
- cap_type_string (__hurd_startup_data->descs[i].type));
+ debug (0, VG_ADDR_FMT " (" VG_ADDR_FMT "): %s",
+ VG_ADDR_PRINTF (__hurd_startup_data->descs[i].object),
+ VG_ADDR_PRINTF (__hurd_startup_data->descs[i].storage),
+ vg_cap_type_string (__hurd_startup_data->descs[i].type));
}
}
@@ -490,37 +490,37 @@ as_init (void)
/* We start with a tabula rasa and then "allocate" the regions that
are actually in use. */
- as_free (ADDR (0, 0), 1);
+ as_free (VG_ADDR (0, 0), 1);
/* Then, we create the shadow page tables and mark the allocation
regions appropriately. */
- void add (struct hurd_object_desc *desc, addr_t addr)
+ void add (struct hurd_object_desc *desc, vg_addr_t addr)
{
error_t err;
- debug (5, "Adding object " ADDR_FMT " (%s)",
- ADDR_PRINTF (addr), cap_type_string (desc->type));
+ debug (5, "Adding object " VG_ADDR_FMT " (%s)",
+ VG_ADDR_PRINTF (addr), vg_cap_type_string (desc->type));
uintptr_t type;
- struct cap_properties properties;
- err = rm_cap_read (meta_data_activity, ADDR_VOID, addr,
+ struct vg_cap_properties properties;
+ err = rm_cap_read (meta_data_activity, VG_ADDR_VOID, addr,
&type, &properties);
assert (! err);
- if (! cap_types_compatible (type, desc->type))
- rm_as_dump (ADDR_VOID, ADDR_VOID);
- assertx (cap_types_compatible (type, desc->type),
- "Object at " ADDR_FMT ": %s != %s",
- ADDR_PRINTF (addr),
- cap_type_string (type), cap_type_string (desc->type));
+ if (! vg_cap_types_compatible (type, desc->type))
+ rm_as_dump (VG_ADDR_VOID, VG_ADDR_VOID);
+ assertx (vg_cap_types_compatible (type, desc->type),
+ "Object at " VG_ADDR_FMT ": %s != %s",
+ VG_ADDR_PRINTF (addr),
+ vg_cap_type_string (type), vg_cap_type_string (desc->type));
- int gbits = CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans);
- addr_t slot_addr = addr_chop (addr, gbits);
+ int gbits = VG_CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans);
+ vg_addr_t slot_addr = vg_addr_chop (addr, gbits);
as_slot_lookup_use (slot_addr,
({
slot->type = type;
- CAP_PROPERTIES_SET (slot, properties);
+ VG_CAP_PROPERTIES_SET (slot, properties);
}));
switch (desc->type)
@@ -528,42 +528,42 @@ as_init (void)
default:
/* Don't allocate the AS associated with the storage. It is
dominated by its containing folio. */
- if (! ADDR_EQ (addr, desc->storage))
+ if (! VG_ADDR_EQ (addr, desc->storage))
as_alloc_at (addr, 1);
break;
- case cap_void:
+ case vg_cap_void:
assert (! "void descriptor?");
return;
- case cap_cappage:
- case cap_rcappage:
- if (ADDR_BITS - addr_depth (addr)
- < CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 (properties.addr_trans))
+ case vg_cap_cappage:
+ case vg_cap_rcappage:
+ if (VG_ADDR_BITS - vg_addr_depth (addr)
+ < VG_CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 (properties.addr_trans))
/* The cappage is unusable for addressing, assuming it is
in-use. */
{
- if (! ADDR_EQ (addr, desc->storage))
+ if (! VG_ADDR_EQ (addr, desc->storage))
as_alloc_at (addr, 1);
return;
}
struct storage shadow_storage
= storage_alloc (meta_data_activity,
- cap_page, STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (shadow_storage.addr))
+ vg_cap_page, STORAGE_LONG_LIVED,
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (shadow_storage.addr))
panic ("Out of space.");
struct object *shadow
- = ADDR_TO_PTR (addr_extend (shadow_storage.addr,
+ = VG_ADDR_TO_PTR (vg_addr_extend (shadow_storage.addr,
0, PAGESIZE_LOG2));
as_slot_lookup_use (addr,
({
- cap_set_shadow (slot, shadow);
+ vg_cap_set_shadow (slot, shadow);
}));
break;
- case cap_folio:
+ case vg_cap_folio:
/* Folios are not available for use. */
as_alloc_at (addr, 1);
as_slot_lookup_use (addr,
@@ -593,8 +593,8 @@ as_init (void)
i < __hurd_startup_data->desc_count;
i ++, desc ++)
{
- depths |= 1ULL << addr_depth (desc->object);
- depths |= 1ULL << addr_depth (desc->storage);
+ depths |= 1ULL << vg_addr_depth (desc->object);
+ depths |= 1ULL << vg_addr_depth (desc->storage);
}
while (depths)
@@ -606,20 +606,20 @@ as_init (void)
i < __hurd_startup_data->desc_count;
i ++, desc ++)
{
- if (addr_depth (desc->object) == depth)
+ if (vg_addr_depth (desc->object) == depth)
add (desc, desc->object);
- if (! ADDR_EQ (desc->object, desc->storage)
- && addr_depth (desc->storage) == depth)
+ if (! VG_ADDR_EQ (desc->object, desc->storage)
+ && vg_addr_depth (desc->storage) == depth)
add (desc, desc->storage);
}
}
/* Reserve the kip and the utcb. */
- as_alloc_at (ADDR ((uintptr_t) l4_kip (), ADDR_BITS), l4_kip_area_size ());
- as_alloc_at (ADDR ((uintptr_t) _L4_utcb (), ADDR_BITS), l4_utcb_size ());
+ as_alloc_at (VG_ADDR ((uintptr_t) l4_kip (), VG_ADDR_BITS), l4_kip_area_size ());
+ as_alloc_at (VG_ADDR ((uintptr_t) _L4_utcb (), VG_ADDR_BITS), l4_utcb_size ());
/* And the page at 0. */
- as_alloc_at (addr_chop (PTR_TO_ADDR (0), PAGESIZE_LOG2), 1);
+ as_alloc_at (vg_addr_chop (VG_PTR_TO_ADDR (0), PAGESIZE_LOG2), 1);
/* Now we add any additional descriptors that describe memory that
we have allocated in the mean time. */
@@ -628,14 +628,14 @@ as_init (void)
desc = &desc_additional[i];
debug (5, "Considering additional descriptor (%d): "
- ADDR_FMT "(" ADDR_FMT "), a %s",
- i, ADDR_PRINTF (desc->object), ADDR_PRINTF (desc->storage),
- cap_type_string (desc->type));
+ VG_ADDR_FMT "(" VG_ADDR_FMT "), a %s",
+ i, VG_ADDR_PRINTF (desc->object), VG_ADDR_PRINTF (desc->storage),
+ vg_cap_type_string (desc->type));
- assert (desc->type != cap_void);
- assert (! ADDR_IS_VOID (desc->storage));
+ assert (desc->type != vg_cap_void);
+ assert (! VG_ADDR_IS_VOID (desc->storage));
- if (! ADDR_EQ (desc->object, desc->storage))
+ if (! VG_ADDR_EQ (desc->object, desc->storage))
add (desc, desc->storage);
add (desc, desc->object);
}
@@ -645,30 +645,30 @@ as_init (void)
/* Walk the address space the hard way and make sure that we've got
everything. */
- int visit (addr_t addr,
- uintptr_t type, struct cap_properties properties,
+ int visit (vg_addr_t addr,
+ uintptr_t type, struct vg_cap_properties properties,
bool writable, void *cookie)
{
- debug (5, "Checking that " ADDR_FMT " is a %s",
- ADDR_PRINTF (addr), cap_type_string (type));
+ debug (5, "Checking that " VG_ADDR_FMT " is a %s",
+ VG_ADDR_PRINTF (addr), vg_cap_type_string (type));
- struct cap cap = as_cap_lookup (addr, -1, NULL);
+ struct vg_cap cap = as_cap_lookup (addr, -1, NULL);
assertx (cap.type == type,
"user: %s != kernel: %s",
- cap_type_string (cap.type), cap_type_string (type));
+ vg_cap_type_string (cap.type), vg_cap_type_string (type));
- struct cap_properties properties2 = CAP_PROPERTIES_GET (cap);
+ struct vg_cap_properties properties2 = VG_CAP_PROPERTIES_GET (cap);
assert (properties.policy.discardable == properties2.policy.discardable);
assertx (properties.policy.priority == properties2.policy.priority,
- ADDR_FMT "(%s) %d != %d",
- ADDR_PRINTF (addr), cap_type_string (type),
+ VG_ADDR_FMT "(%s) %d != %d",
+ VG_ADDR_PRINTF (addr), vg_cap_type_string (type),
properties.policy.priority, properties2.policy.priority);
assert (properties.addr_trans.raw == properties2.addr_trans.raw);
- if (type == cap_folio)
+ if (type == vg_cap_folio)
{
- processing_folio = FOLIO_OBJECTS;
+ processing_folio = VG_FOLIO_OBJECTS;
return 0;
}
@@ -690,11 +690,11 @@ as_init (void)
for (i = 0, desc = &__hurd_startup_data->descs[0];
i < __hurd_startup_data->desc_count;
i ++, desc ++)
- if (ADDR_EQ (desc->object,
- addr_chop (PTR_TO_ADDR (desc_additional), PAGESIZE_LOG2)))
+ if (VG_ADDR_EQ (desc->object,
+ vg_addr_chop (VG_PTR_TO_ADDR (desc_additional), PAGESIZE_LOG2)))
{
storage_free (desc->storage, false);
- as_free (addr_chop (PTR_TO_ADDR (desc_additional), PAGESIZE_LOG2), 1);
+ as_free (vg_addr_chop (VG_PTR_TO_ADDR (desc_additional), PAGESIZE_LOG2), 1);
break;
}
assert (i != __hurd_startup_data->desc_count);
@@ -726,8 +726,8 @@ as_alloced_dump (const char *prefix)
exited. For other non-zero values, the walk is aborted and that
value is returned. If the walk is not aborted, 0 is returned. */
int
-as_walk (int (*visit) (addr_t addr,
- uintptr_t type, struct cap_properties properties,
+as_walk (int (*visit) (vg_addr_t addr,
+ uintptr_t type, struct vg_cap_properties properties,
bool writable,
void *cookie),
int types,
@@ -740,8 +740,8 @@ as_walk (int (*visit) (addr_t addr,
/* We keep track of the child that we should visit at a
particular depth. If child[0] is 2, that means traverse the
root's object's child #2. */
- unsigned short child[1 + ADDR_BITS];
- assert (CAPPAGE_SLOTS_LOG2 < sizeof (child[0]) * 8);
+ unsigned short child[1 + VG_ADDR_BITS];
+ assert (VG_CAPPAGE_SLOTS_LOG2 < sizeof (child[0]) * 8);
/* Depth is the current level that we are visiting. If depth is
1, we are visiting the root object's children. */
@@ -749,16 +749,16 @@ as_walk (int (*visit) (addr_t addr,
child[0] = 0;
error_t err;
- struct cap_properties properties;
+ struct vg_cap_properties properties;
uintptr_t type;
/* Just caching the root capability cuts the number of RPCs by
about 25%. */
- struct cap_properties root_properties;
+ struct vg_cap_properties root_properties;
uintptr_t root_type;
- err = rm_cap_read (meta_data_activity, ADDR_VOID,
- ADDR (0, 0), &root_type, &root_properties);
+ err = rm_cap_read (meta_data_activity, VG_ADDR_VOID,
+ VG_ADDR (0, 0), &root_type, &root_properties);
assert (err == 0);
restart:
@@ -766,7 +766,7 @@ as_walk (int (*visit) (addr_t addr,
int slots_log2;
- addr_t addr = ADDR (0, 0);
+ vg_addr_t addr = VG_ADDR (0, 0);
bool writable = true;
int d;
@@ -779,31 +779,31 @@ as_walk (int (*visit) (addr_t addr,
}
else
{
- err = rm_cap_read (meta_data_activity, ADDR_VOID,
+ err = rm_cap_read (meta_data_activity, VG_ADDR_VOID,
addr, &type, &properties);
assert (err == 0);
}
addr
- = addr_extend (addr, CAP_ADDR_TRANS_GUARD (properties.addr_trans),
- CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans));
+ = vg_addr_extend (addr, VG_CAP_ADDR_TRANS_GUARD (properties.addr_trans),
+ VG_CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans));
switch (type)
{
- case cap_rcappage:
+ case vg_cap_rcappage:
writable = false;
/* Fall through. */
- case cap_cappage:
+ case vg_cap_cappage:
slots_log2
- = CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 (properties.addr_trans);
+ = VG_CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 (properties.addr_trans);
break;
- case cap_folio:
- slots_log2 = FOLIO_OBJECTS_LOG2;
+ case vg_cap_folio:
+ slots_log2 = VG_FOLIO_OBJECTS_LOG2;
break;
- case cap_thread:
- slots_log2 = THREAD_SLOTS_LOG2;
+ case vg_cap_thread:
+ slots_log2 = VG_THREAD_SLOTS_LOG2;
break;
- case cap_messenger:
+ case vg_cap_messenger:
slots_log2 = VG_MESSENGER_SLOTS_LOG2;
break;
default:
@@ -831,15 +831,15 @@ as_walk (int (*visit) (addr_t addr,
goto restart;
}
- addr = addr_extend (addr, child[d], slots_log2);
- err = rm_cap_read (meta_data_activity, ADDR_VOID,
+ addr = vg_addr_extend (addr, child[d], slots_log2);
+ err = rm_cap_read (meta_data_activity, VG_ADDR_VOID,
addr, &type, &properties);
assert (err == 0);
}
for (;;)
{
- err = rm_cap_read (meta_data_activity, ADDR_VOID,
+ err = rm_cap_read (meta_data_activity, VG_ADDR_VOID,
addr, &type, &properties);
if (err)
/* Dangling pointer. */
@@ -854,8 +854,8 @@ as_walk (int (*visit) (addr_t addr,
do_debug (5)
{
- s_printf ("Considering " ADDR_FMT "(%s): ",
- ADDR_PRINTF (addr), cap_type_string (type));
+ s_printf ("Considering " VG_ADDR_FMT "(%s): ",
+ VG_ADDR_PRINTF (addr), vg_cap_type_string (type));
int i;
for (i = 0; i < depth; i ++)
s_printf ("%s%d", i == 0 ? "" : " -> ", child[i]);
@@ -883,27 +883,27 @@ as_walk (int (*visit) (addr_t addr,
return r;
}
- if (addr_depth (addr)
- + CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans)
- > ADDR_BITS)
+ if (vg_addr_depth (addr)
+ + VG_CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans)
+ > VG_ADDR_BITS)
{
child[depth - 1] ++;
goto restart;
}
addr
- = addr_extend (addr, CAP_ADDR_TRANS_GUARD (properties.addr_trans),
- CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans));
+ = vg_addr_extend (addr, VG_CAP_ADDR_TRANS_GUARD (properties.addr_trans),
+ VG_CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans));
switch (type)
{
- case cap_rcappage:
- case cap_cappage:
+ case vg_cap_rcappage:
+ case vg_cap_cappage:
slots_log2
- = CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 (properties.addr_trans);
+ = VG_CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 (properties.addr_trans);
break;
- case cap_folio:
- slots_log2 = FOLIO_OBJECTS_LOG2;
+ case vg_cap_folio:
+ slots_log2 = VG_FOLIO_OBJECTS_LOG2;
break;
default:
if (depth == 0)
@@ -914,34 +914,34 @@ as_walk (int (*visit) (addr_t addr,
goto restart;
}
- if (addr_depth (addr) + slots_log2 > ADDR_BITS)
+ if (vg_addr_depth (addr) + slots_log2 > VG_ADDR_BITS)
{
child[depth - 1] ++;
goto restart;
}
/* Visit the first child. */
- addr = addr_extend (addr, 0, slots_log2);
+ addr = vg_addr_extend (addr, 0, slots_log2);
child[depth] = 0;
depth ++;
}
}
/* We have the shadow page tables and presumably a normal stack. */
- int do_walk (struct cap *cap, addr_t addr, bool writable)
+ int do_walk (struct vg_cap *cap, vg_addr_t addr, bool writable)
{
uintptr_t type;
- struct cap_properties cap_properties;
+ struct vg_cap_properties vg_cap_properties;
type = cap->type;
- cap_properties = CAP_PROPERTIES_GET (*cap);
+ vg_cap_properties = VG_CAP_PROPERTIES_GET (*cap);
- debug (5, ADDR_FMT " (%s)", ADDR_PRINTF (addr), cap_type_string (type));
+ debug (5, VG_ADDR_FMT " (%s)", VG_ADDR_PRINTF (addr), vg_cap_type_string (type));
int r;
if (((1 << type) & types))
{
- r = visit (addr, type, cap_properties, writable, cookie);
+ r = visit (addr, type, vg_cap_properties, writable, cookie);
if (r == -1)
/* Don't go deeper. */
return 0;
@@ -949,49 +949,49 @@ as_walk (int (*visit) (addr_t addr,
return r;
}
- if (addr_depth (addr)
- + CAP_ADDR_TRANS_GUARD_BITS (cap_properties.addr_trans)
- > ADDR_BITS)
+ if (vg_addr_depth (addr)
+ + VG_CAP_ADDR_TRANS_GUARD_BITS (vg_cap_properties.addr_trans)
+ > VG_ADDR_BITS)
return 0;
addr
- = addr_extend (addr, CAP_ADDR_TRANS_GUARD (cap_properties.addr_trans),
- CAP_ADDR_TRANS_GUARD_BITS (cap_properties.addr_trans));
+ = vg_addr_extend (addr, VG_CAP_ADDR_TRANS_GUARD (vg_cap_properties.addr_trans),
+ VG_CAP_ADDR_TRANS_GUARD_BITS (vg_cap_properties.addr_trans));
int slots_log2 = 0;
switch (type)
{
- case cap_cappage:
- case cap_rcappage:
- if (type == cap_rcappage)
+ case vg_cap_cappage:
+ case vg_cap_rcappage:
+ if (type == vg_cap_rcappage)
writable = false;
slots_log2
- = CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 (cap_properties.addr_trans);
+ = VG_CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 (vg_cap_properties.addr_trans);
break;
- case cap_folio:
- slots_log2 = FOLIO_OBJECTS_LOG2;
+ case vg_cap_folio:
+ slots_log2 = VG_FOLIO_OBJECTS_LOG2;
break;
default:
return 0;
}
- if (addr_depth (addr) + slots_log2 > ADDR_BITS)
+ if (vg_addr_depth (addr) + slots_log2 > VG_ADDR_BITS)
return 0;
struct object *shadow = NULL;
if (as_init_done)
- shadow = cap_to_object (meta_data_activity, cap);
+ shadow = vg_cap_to_object (meta_data_activity, cap);
int i;
for (i = 0; i < (1 << slots_log2); i ++)
{
- struct cap *object = NULL;
+ struct vg_cap *object = NULL;
if (as_init_done)
object = &shadow->caps[i];
- r = do_walk (object, addr_extend (addr, i, slots_log2), writable);
+ r = do_walk (object, vg_addr_extend (addr, i, slots_log2), writable);
if (r)
return r;
}
@@ -999,5 +999,5 @@ as_walk (int (*visit) (addr_t addr,
return 0;
}
- return do_walk (&shadow_root, ADDR (0, 0), true);
+ return do_walk (&shadow_root, VG_ADDR (0, 0), true);
}
diff --git a/libhurd-mm/as.h b/libhurd-mm/as.h
index 8343fdd..b771fe7 100644
--- a/libhurd-mm/as.h
+++ b/libhurd-mm/as.h
@@ -35,11 +35,11 @@
are allocated as well. */
/* Allocate COUNT contiguous subtree such that the root's depth of
- each is at least ADDR_BITS - WIDTH. If DATA_MAPPABLE is true, then
+ each is at least VG_ADDR_BITS - WIDTH. If DATA_MAPPABLE is true, then
ensures that the leaves of each subtree are mappable in the region
accessible to data instructions. On success returns the address of
- the first subtree. Otherwise, returns ADDR_VOID. */
-extern addr_t as_alloc (int width, uint64_t count,
+ the first subtree. Otherwise, returns VG_ADDR_VOID. */
+extern vg_addr_t as_alloc (int width, uint64_t count,
bool data_mappable);
/* Like as_alloc but may be called before as_init is called. Address
@@ -49,14 +49,14 @@ extern struct hurd_object_desc *as_alloc_slow (int width);
/* Allocate the COUNT contiguous addresses strating at address ADDR.
Returns true on success, false otherwise. */
-extern bool as_alloc_at (addr_t addr, uint64_t count);
+extern bool as_alloc_at (vg_addr_t addr, uint64_t count);
-/* Free the COUNT contiguous addresses starting at ADDR. Each ADDR
+/* Free the COUNT contiguous addresses starting at VG_ADDR. Each ADDR
must have been previously returned by a call to as_chunk_alloc or
as_region_alloc. All address returned by a call to as_chunk_alloc
or as_region_alloc need not be freed by a single call to
as_free. */
-extern void as_free (addr_t addr, uint64_t count);
+extern void as_free (vg_addr_t addr, uint64_t count);
/* Whether as_init has completed. */
extern bool as_init_done;
@@ -187,7 +187,7 @@ as_unlock (void)
extern activity_t meta_data_activity;
/* The root of the shadow page tables. */
-extern struct cap shadow_root;
+extern struct vg_cap shadow_root;
#endif
#if defined (RM_INTERN) || defined (NDEBUG)
@@ -201,7 +201,7 @@ extern struct cap shadow_root;
do \
{ \
uintptr_t __acs_type = -1; \
- struct cap_properties __acs_p; \
+ struct vg_cap_properties __acs_p; \
error_t __acs_err; \
\
__acs_err = rm_cap_read (meta_data_activity, \
@@ -211,7 +211,7 @@ extern struct cap shadow_root;
bool die = false; \
if (__acs_err) \
die = true; \
- else if (__acs_type == cap_void) \
+ else if (__acs_type == vg_cap_void) \
/* The kernel's type is void. Either the shadow has not yet \
been updated or the object is dead. */ \
; \
@@ -221,26 +221,26 @@ extern struct cap shadow_root;
&& (!!__acs_p.policy.discardable \
== !!(__acs_cap)->discardable))) \
die = true; \
- else if ((__acs_type == cap_cappage || __acs_type == cap_rcappage) \
+ else if ((__acs_type == vg_cap_cappage || __acs_type == vg_cap_rcappage) \
&& __acs_p.addr_trans.raw != (__acs_cap)->addr_trans.raw) \
die = true; \
\
if (die) \
{ \
debug (0, \
- ADDR_FMT "@" ADDR_FMT ": err: %d; type: %s =? %s; " \
+ VG_ADDR_FMT "@" VG_ADDR_FMT ": err: %d; type: %s =? %s; " \
"guard: %lld/%d =? %lld/%d; subpage: %d/%d =? %d/%d; " \
"priority: %d =? %d; discardable: %d =? %d", \
- ADDR_PRINTF ((__acs_root_addr)), ADDR_PRINTF ((__acs_addr)), \
+ VG_ADDR_PRINTF ((__acs_root_addr)), VG_ADDR_PRINTF ((__acs_addr)), \
__acs_err, \
- cap_type_string ((__acs_cap)->type), \
- cap_type_string (__acs_type), \
- CAP_GUARD ((__acs_cap)), CAP_GUARD_BITS ((__acs_cap)), \
- CAP_ADDR_TRANS_GUARD (__acs_p.addr_trans), \
- CAP_ADDR_TRANS_GUARD_BITS (__acs_p.addr_trans), \
- CAP_SUBPAGE ((__acs_cap)), CAP_SUBPAGES_LOG2 ((__acs_cap)), \
- CAP_ADDR_TRANS_SUBPAGE (__acs_p.addr_trans), \
- CAP_ADDR_TRANS_SUBPAGES_LOG2 (__acs_p.addr_trans), \
+ vg_cap_type_string ((__acs_cap)->type), \
+ vg_cap_type_string (__acs_type), \
+ VG_CAP_GUARD ((__acs_cap)), VG_CAP_GUARD_BITS ((__acs_cap)), \
+ VG_CAP_ADDR_TRANS_GUARD (__acs_p.addr_trans), \
+ VG_CAP_ADDR_TRANS_GUARD_BITS (__acs_p.addr_trans), \
+ VG_CAP_SUBPAGE ((__acs_cap)), VG_CAP_SUBPAGES_LOG2 ((__acs_cap)), \
+ VG_CAP_ADDR_TRANS_SUBPAGE (__acs_p.addr_trans), \
+ VG_CAP_ADDR_TRANS_SUBPAGES_LOG2 (__acs_p.addr_trans), \
(__acs_cap)->priority, __acs_p.policy.priority, \
!!(__acs_cap)->discardable, !!__acs_p.policy.discardable); \
{ \
@@ -258,7 +258,7 @@ extern struct cap shadow_root;
do \
{ \
if ((__acs_root_cap) == &shadow_root) \
- AS_CHECK_SHADOW(ADDR_VOID, (__acs_addr), (__acs_cap), \
+ AS_CHECK_SHADOW(VG_ADDR_VOID, (__acs_addr), (__acs_cap), \
(__acs_code)); \
} \
while (0)
@@ -266,21 +266,21 @@ extern struct cap shadow_root;
struct as_allocate_pt_ret
{
- struct cap cap;
- addr_t storage;
+ struct vg_cap cap;
+ vg_addr_t storage;
};
/* Page table allocator used by as_build. */
-typedef struct as_allocate_pt_ret (*as_allocate_page_table_t) (addr_t addr);
+typedef struct as_allocate_pt_ret (*as_allocate_page_table_t) (vg_addr_t addr);
-/* Default page table allocator. Allocates a cap_cappage and the
+/* Default page table allocator. Allocates a vg_cap_cappage and the
accompanying shadow page table. */
-extern struct as_allocate_pt_ret as_allocate_page_table (addr_t addr);
+extern struct as_allocate_pt_ret as_allocate_page_table (vg_addr_t addr);
/* Build up the address space, which is root at AS_ROOT_ADDR (and
shadowed by AS_ROOT_CAP), such that there is a capability slot at
- address ADDR. Return the shadow capability.
+ address VG_ADDR. Return the shadow capability.
If MAY_OVERWRITE is true, the function is permitted to overwrite an
existing capability. Otherwise, only capability slots containing a
@@ -294,9 +294,9 @@ extern struct as_allocate_pt_ret as_allocate_page_table (addr_t addr);
Must be called with a write lock on AS_LOCK. Must be called with
8kb of stack that will not fault. */
-struct cap *as_build (activity_t activity,
- addr_t as_root_addr, struct cap *as_root_cap,
- addr_t addr,
+struct vg_cap *as_build (activity_t activity,
+ vg_addr_t as_root_addr, struct vg_cap *as_root_cap,
+ vg_addr_t addr,
as_allocate_page_table_t allocate_page_table,
bool may_overwrite);
@@ -306,21 +306,21 @@ struct cap *as_build (activity_t activity,
is implicit (in the case of a folio), return a fabricated
capability in *FAKE_SLOT and return FAKE_SLOT. Return NULL on
failure. */
-typedef struct cap *(*as_object_index_t) (activity_t activity,
- struct cap *pt,
- addr_t pt_addr, int idx,
- struct cap *fake_slot);
+typedef struct vg_cap *(*as_object_index_t) (activity_t activity,
+ struct vg_cap *pt,
+ vg_addr_t pt_addr, int idx,
+ struct vg_cap *fake_slot);
/* Like as_buildup, but using a custom shadow page table
implementation. */
-struct cap *as_build_custom (activity_t activity,
- addr_t as_root_addr, struct cap *as_root_cap,
- addr_t addr,
+struct vg_cap *as_build_custom (activity_t activity,
+ vg_addr_t as_root_addr, struct vg_cap *as_root_cap,
+ vg_addr_t addr,
as_allocate_page_table_t allocate_page_table,
as_object_index_t object_index,
bool may_overwrite);
-/* Ensure that the slot designated by ADDR in the address space rooted
+/* Ensure that the slot designated by VG_ADDR in the address space rooted
at AS_ROOT_ADDR (which is shadowed by AS_ROOT_CAP) is accessible by
allocating any required page tables and rearranging the address
space as necessary. Execute CODE (with AS_LOCK held) with the
@@ -334,15 +334,15 @@ struct cap *as_build_custom (activity_t activity,
do \
{ \
activity_t __asef_activity = (__asef_activity_); \
- addr_t __asef_as_root_addr = (__asef_as_root_addr_); \
- struct cap *__asef_as_root_cap = (__asef_as_root_cap_); \
- addr_t __asef_addr = (__asef_addr_); \
+ vg_addr_t __asef_as_root_addr = (__asef_as_root_addr_); \
+ struct vg_cap *__asef_as_root_cap = (__asef_as_root_cap_); \
+ vg_addr_t __asef_addr = (__asef_addr_); \
as_allocate_page_table_t __asef_allocate_page_table \
= (__asef_allocate_page_table_); \
\
as_lock (); \
\
- struct cap *slot = as_build (__asef_activity, \
+ struct vg_cap *slot = as_build (__asef_activity, \
__asef_as_root_addr, \
__asef_as_root_cap, \
__asef_addr, \
@@ -365,10 +365,10 @@ struct cap *as_build_custom (activity_t activity,
{ \
assert (as_init_done); \
\
- addr_t __ase_as_addr = (__ase_as_addr_); \
+ vg_addr_t __ase_as_addr = (__ase_as_addr_); \
\
as_ensure_full (meta_data_activity, \
- ADDR_VOID, &shadow_root, \
+ VG_ADDR_VOID, &shadow_root, \
__ase_as_addr, \
as_allocate_page_table, \
(__ase_code)); \
@@ -378,7 +378,7 @@ struct cap *as_build_custom (activity_t activity,
/* Like as_ensure_use, however, does not execute any code. */
#define as_ensure(__ae_addr) \
as_ensure_full (meta_data_activity, \
- ADDR_VOID, &shadow_root, __ae_addr, \
+ VG_ADDR_VOID, &shadow_root, __ae_addr, \
as_allocate_page_table, \
({;}))
#endif
@@ -397,10 +397,10 @@ struct cap *as_build_custom (activity_t activity,
accompanying shadow page tables. See as_build for details. */
static inline void
as_insert_full (activity_t activity,
- addr_t target_as_root_addr, struct cap *target_as_root_cap,
- addr_t target_addr,
- addr_t source_as_root_addr,
- addr_t source_addr, struct cap source_cap,
+ vg_addr_t target_as_root_addr, struct vg_cap *target_as_root_cap,
+ vg_addr_t target_addr,
+ vg_addr_t source_as_root_addr,
+ vg_addr_t source_addr, struct vg_cap source_cap,
as_allocate_page_table_t allocate_page_table)
{
AS_CHECK_SHADOW (source_as_root_addr, source_addr, &source_cap, {});
@@ -411,7 +411,7 @@ as_insert_full (activity_t activity,
allocate_page_table,
({
bool ret;
- ret = cap_copy (activity,
+ ret = vg_cap_copy (activity,
target_as_root_addr,
slot,
target_addr,
@@ -419,24 +419,24 @@ as_insert_full (activity_t activity,
source_cap,
source_addr);
assertx (ret,
- ADDR_FMT "@" ADDR_FMT
- " <- " ADDR_FMT "@" ADDR_FMT " (" CAP_FMT ")",
- ADDR_PRINTF (target_as_root_addr),
- ADDR_PRINTF (target_addr),
- ADDR_PRINTF (source_as_root_addr),
- ADDR_PRINTF (source_addr),
- CAP_PRINTF (&source_cap));
+ VG_ADDR_FMT "@" VG_ADDR_FMT
+ " <- " VG_ADDR_FMT "@" VG_ADDR_FMT " (" VG_CAP_FMT ")",
+ VG_ADDR_PRINTF (target_as_root_addr),
+ VG_ADDR_PRINTF (target_addr),
+ VG_ADDR_PRINTF (source_as_root_addr),
+ VG_ADDR_PRINTF (source_addr),
+ VG_CAP_PRINTF (&source_cap));
}));
}
#ifndef RM_INTERN
static inline void
-as_insert (addr_t target_addr,
- addr_t source_addr, struct cap source_cap)
+as_insert (vg_addr_t target_addr,
+ vg_addr_t source_addr, struct vg_cap source_cap)
{
as_insert_full (meta_data_activity,
- ADDR_VOID, &shadow_root, target_addr,
- ADDR_VOID, source_addr, source_cap,
+ VG_ADDR_VOID, &shadow_root, target_addr,
+ VG_ADDR_VOID, source_addr, source_cap,
as_allocate_page_table);
}
#endif
@@ -445,26 +445,26 @@ as_insert (addr_t target_addr,
#ifndef RM_INTERN
/* Variant of as_ensure_full that doesn't assume the default shadow
page table format but calls OBJECT_INDEX to index objects. */
-extern struct cap *as_ensure_full_custom
+extern struct vg_cap *as_ensure_full_custom
(activity_t activity,
- addr_t as, struct cap *root, addr_t addr,
+ vg_addr_t as, struct vg_cap *root, vg_addr_t addr,
as_allocate_page_table_t allocate_page_table,
as_object_index_t object_index);
/* Variant of as_insert that doesn't assume the default shadow page
table format but calls OBJECT_INDEX to index objects. */
-extern struct cap *as_insert_custom
+extern struct vg_cap *as_insert_custom
(activity_t activity,
- addr_t target_as, struct cap *t_as_cap, addr_t target,
- addr_t source_as, struct cap c_cap, addr_t source,
+ vg_addr_t target_as, struct vg_cap *t_as_cap, vg_addr_t target,
+ vg_addr_t source_as, struct vg_cap c_cap, vg_addr_t source,
as_allocate_page_table_t allocate_page_table,
as_object_index_t object_index);
#endif
union as_lookup_ret
{
- struct cap cap;
- struct cap *capp;
+ struct vg_cap cap;
+ struct vg_cap *capp;
};
enum as_lookup_mode
@@ -497,8 +497,8 @@ enum as_lookup_mode
On success, whether the slot or the object is writable is returned
in *WRITABLE. */
extern bool as_lookup_rel (activity_t activity,
- struct cap *as_root_cap, addr_t addr,
- enum cap_type type, bool *writable,
+ struct vg_cap *as_root_cap, vg_addr_t addr,
+ enum vg_cap_type type, bool *writable,
enum as_lookup_mode mode,
union as_lookup_ret *ret);
@@ -513,8 +513,8 @@ extern bool as_lookup_rel (activity_t activity,
__alru_code) \
({ \
activity_t __alru_activity = (__alru_activity_); \
- struct cap *__alru_root = (__alru_root_); \
- addr_t __alru_addr = (__alru_addr_); \
+ struct vg_cap *__alru_root = (__alru_root_); \
+ vg_addr_t __alru_addr = (__alru_addr_); \
\
union as_lookup_ret __alru_ret_val; \
\
@@ -527,7 +527,7 @@ extern bool as_lookup_rel (activity_t activity,
&__alru_ret_val); \
if (__alru_ret) \
{ \
- struct cap *slot __attribute__ ((unused)) = __alru_ret_val.capp; \
+ struct vg_cap *slot __attribute__ ((unused)) = __alru_ret_val.capp; \
(__alru_code); \
\
AS_CHECK_SHADOW2(__alru_root, __alru_addr, slot, {}); \
@@ -553,15 +553,15 @@ extern bool as_lookup_rel (activity_t activity,
space rooted by ROOT.
TYPE is the required type. If the type is incompatible
- (cap_rcappage => cap_cappage and cap_rpage => cap_page), bails. If
+ (vg_cap_rcappage => vg_cap_cappage and vg_cap_rpage => vg_cap_page), bails. If
TYPE is -1, then any type is acceptable. May cause paging. If
non-NULL, returns whether the slot is writable in *WRITABLE.
This function locks (and unlocks) as_lock. */
-static inline struct cap
+static inline struct vg_cap
as_cap_lookup_rel (activity_t activity,
- struct cap *root, addr_t addr,
- enum cap_type type, bool *writable)
+ struct vg_cap *root, vg_addr_t addr,
+ enum vg_cap_type type, bool *writable)
{
union as_lookup_ret ret_val;
@@ -576,15 +576,15 @@ as_cap_lookup_rel (activity_t activity,
as_unlock ();
if (! ret)
- return (struct cap) { .type = cap_void };
+ return (struct vg_cap) { .type = vg_cap_void };
return ret_val.cap;
}
#ifndef RM_INTERN
-static inline struct cap
-as_cap_lookup (addr_t addr, enum cap_type type, bool *writable)
+static inline struct vg_cap
+as_cap_lookup (vg_addr_t addr, enum vg_cap_type type, bool *writable)
{
return as_cap_lookup_rel (meta_data_activity,
&shadow_root, addr, -1, writable);
@@ -598,15 +598,15 @@ as_cap_lookup (addr_t addr, enum cap_type type, bool *writable)
than the object itself.
TYPE is the required type. If the type is incompatible
- (cap_rcappage => cap_cappage and cap_rpage => cap_page), bails. If
+ (vg_cap_rcappage => vg_cap_cappage and vg_cap_rpage => vg_cap_page), bails. If
TYPE is -1, then any type is acceptable. May cause paging. If
non-NULL, returns whether the object is writable in *WRITABLE.
This function locks (and unlocks) as_lock. */
-static inline struct cap
+static inline struct vg_cap
as_object_lookup_rel (activity_t activity,
- struct cap *root, addr_t addr,
- enum cap_type type, bool *writable)
+ struct vg_cap *root, vg_addr_t addr,
+ enum vg_cap_type type, bool *writable)
{
union as_lookup_ret ret_val;
@@ -621,15 +621,15 @@ as_object_lookup_rel (activity_t activity,
as_unlock ();
if (! ret)
- return (struct cap) { .type = cap_void };
+ return (struct vg_cap) { .type = vg_cap_void };
return ret_val.cap;
}
#ifndef RM_INTERN
-static inline struct cap
-as_object_lookup (addr_t addr, enum cap_type type, bool *writable)
+static inline struct vg_cap
+as_object_lookup (vg_addr_t addr, enum vg_cap_type type, bool *writable)
{
return as_object_lookup_rel (meta_data_activity,
&shadow_root, addr, -1, writable);
@@ -638,11 +638,11 @@ as_object_lookup (addr_t addr, enum cap_type type, bool *writable)
/* Print the path taken to get to the slot at address ADDRESS. */
extern void as_dump_path_rel (activity_t activity,
- struct cap *root, addr_t addr);
+ struct vg_cap *root, vg_addr_t addr);
#ifndef RM_INTERN
static inline void
-as_dump_path (addr_t addr)
+as_dump_path (vg_addr_t addr)
{
as_dump_path_rel (meta_data_activity, &shadow_root, addr);
}
@@ -655,16 +655,16 @@ as_dump_path (addr_t addr)
properties. WRITABLE is whether the slot is writable. If VISIT
returns a non-zero value, the walk is aborted and that value is
returned. If the walk is not aborted, 0 is returned. */
-extern int as_walk (int (*visit) (addr_t cap,
+extern int as_walk (int (*visit) (vg_addr_t cap,
uintptr_t type,
- struct cap_properties properties,
+ struct vg_cap_properties properties,
bool writable,
void *cookie),
int types,
void *cookie);
/* AS_LOCK must not be held. */
-extern void as_dump_from (activity_t activity, struct cap *root,
+extern void as_dump_from (activity_t activity, struct vg_cap *root,
const char *prefix);
#ifndef RM_INTERN
diff --git a/libhurd-mm/capalloc.c b/libhurd-mm/capalloc.c
index 998b123..56715e4 100644
--- a/libhurd-mm/capalloc.c
+++ b/libhurd-mm/capalloc.c
@@ -36,10 +36,10 @@
struct cappage_desc
{
- addr_t cappage;
- struct cap *cap;
+ vg_addr_t cappage;
+ struct vg_cap *cap;
- unsigned char alloced[CAPPAGE_SLOTS / 8];
+ unsigned char alloced[VG_CAPPAGE_SLOTS / 8];
unsigned short free;
pthread_mutex_t lock;
@@ -71,15 +71,15 @@ list_unlink (struct cappage_desc *e)
}
static int
-addr_compare (const addr_t *a, const addr_t *b)
+addr_compare (const vg_addr_t *a, const vg_addr_t *b)
{
- if (addr_prefix (*a) < addr_prefix (*b))
+ if (vg_addr_prefix (*a) < vg_addr_prefix (*b))
return -1;
- return addr_prefix (*a) != addr_prefix (*b);
+ return vg_addr_prefix (*a) != vg_addr_prefix (*b);
}
BTREE_CLASS (cappage_desc, struct cappage_desc,
- addr_t, cappage, node, addr_compare, false)
+ vg_addr_t, cappage, node, addr_compare, false)
static pthread_mutex_t cappage_descs_lock = PTHREAD_MUTEX_INITIALIZER;
static hurd_btree_cappage_desc_t cappage_descs;
@@ -91,11 +91,11 @@ cappage_desc_slab_alloc (void *hook, size_t size, void **ptr)
assert (size == PAGESIZE);
struct storage storage = storage_alloc (meta_data_activity,
- cap_page, STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ vg_cap_page, STORAGE_LONG_LIVED,
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Out of storage");
- *ptr = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ *ptr = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr, 0, PAGESIZE_LOG2));
return 0;
}
@@ -105,7 +105,7 @@ cappage_desc_slab_dealloc (void *hook, void *buffer, size_t size)
{
assert (size == PAGESIZE);
- addr_t addr = addr_chop (PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
+ vg_addr_t addr = vg_addr_chop (VG_PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
storage_free (addr, false);
return 0;
@@ -135,7 +135,7 @@ cappage_desc_free (struct cappage_desc *storage)
struct cappage_desc *nonempty;
-addr_t
+vg_addr_t
capalloc (void)
{
/* Find an appropriate storage area. */
@@ -170,12 +170,12 @@ capalloc (void)
/* As there is such a large number of caps per cappage, we
expect that the page will be long lived. */
struct storage storage = storage_alloc (meta_data_activity,
- cap_cappage, STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ vg_cap_cappage, STORAGE_LONG_LIVED,
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
{
cappage_desc_free (area);
- return ADDR_VOID;
+ return VG_ADDR_VOID;
}
area->lock = (pthread_mutex_t) PTHREAD_MUTEX_INITIALIZER;
@@ -186,29 +186,29 @@ capalloc (void)
/* Then, allocate the shadow object. */
struct storage shadow_storage
- = storage_alloc (meta_data_activity, cap_page,
- STORAGE_LONG_LIVED, OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (shadow_storage.addr))
+ = storage_alloc (meta_data_activity, vg_cap_page,
+ STORAGE_LONG_LIVED, VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (shadow_storage.addr))
{
/* No memory. */
storage_free (area->cappage, false);
cappage_desc_free (area);
- return ADDR_VOID;
+ return VG_ADDR_VOID;
}
- struct object *shadow = ADDR_TO_PTR (addr_extend (shadow_storage.addr,
+ struct object *shadow = VG_ADDR_TO_PTR (vg_addr_extend (shadow_storage.addr,
0, PAGESIZE_LOG2));
memset (shadow, 0, PAGESIZE);
- cap_set_shadow (area->cap, shadow);
+ vg_cap_set_shadow (area->cap, shadow);
memset (&area->alloced, 0, sizeof (area->alloced));
- area->free = CAPPAGE_SLOTS;
+ area->free = VG_CAPPAGE_SLOTS;
}
int idx = bit_alloc (area->alloced, sizeof (area->alloced), 0);
assert (idx != -1);
- addr_t addr = addr_extend (area->cappage, idx, CAPPAGE_SLOTS_LOG2);
+ vg_addr_t addr = vg_addr_extend (area->cappage, idx, VG_CAPPAGE_SLOTS_LOG2);
area->free --;
if (area->free == 0)
@@ -241,9 +241,9 @@ capalloc (void)
}
void
-capfree (addr_t cap)
+capfree (vg_addr_t cap)
{
- addr_t cappage = addr_chop (cap, CAPPAGE_SLOTS_LOG2);
+ vg_addr_t cappage = vg_addr_chop (cap, VG_CAPPAGE_SLOTS_LOG2);
struct cappage_desc *desc;
@@ -252,14 +252,14 @@ capfree (addr_t cap)
assert (desc);
pthread_mutex_lock (&desc->lock);
- bit_dealloc (desc->alloced, addr_extract (cap, CAPPAGE_SLOTS_LOG2));
+ bit_dealloc (desc->alloced, vg_addr_extract (cap, VG_CAPPAGE_SLOTS_LOG2));
desc->free ++;
if (desc->free == 1)
/* The cappage is no longer full. Add it back to the list of
nonempty cappages. */
list_link (&nonempty, desc);
- else if (desc->free == CAPPAGE_SLOTS)
+ else if (desc->free == VG_CAPPAGE_SLOTS)
/* No slots in the cappage are allocated. Free it if there is at
least one cappage on NONEMPTY. */
{
@@ -270,12 +270,12 @@ capfree (addr_t cap)
list_unlink (desc);
pthread_mutex_unlock (&cappage_descs_lock);
- struct object *shadow = cap_get_shadow (desc->cap);
- storage_free (addr_chop (PTR_TO_ADDR (shadow), PAGESIZE_LOG2),
+ struct object *shadow = vg_cap_get_shadow (desc->cap);
+ storage_free (vg_addr_chop (VG_PTR_TO_ADDR (shadow), PAGESIZE_LOG2),
false);
- cap_set_shadow (desc->cap, NULL);
+ vg_cap_set_shadow (desc->cap, NULL);
- desc->cap->type = cap_void;
+ desc->cap->type = vg_cap_void;
cappage_desc_free (desc);
diff --git a/libhurd-mm/capalloc.h b/libhurd-mm/capalloc.h
index 5583f3f..f71e032 100644
--- a/libhurd-mm/capalloc.h
+++ b/libhurd-mm/capalloc.h
@@ -25,9 +25,9 @@
#include <viengoos/addr.h>
/* Allocate a capability slot. */
-extern addr_t capalloc (void);
+extern vg_addr_t capalloc (void);
/* Free a capability previously allocated by capalloc. */
-extern void capfree (addr_t cap);
+extern void capfree (vg_addr_t vg_cap);
#endif /* _HURD_CAP_ALLOC_H */
diff --git a/libhurd-mm/exceptions.c b/libhurd-mm/exceptions.c
index 73d9a9f..42dfacb 100644
--- a/libhurd-mm/exceptions.c
+++ b/libhurd-mm/exceptions.c
@@ -141,8 +141,8 @@ hurd_activation_fetch (void)
/* Any reply will come in the form of a pending activation being
delivered. This RPC does not generate a response. */
- error_t err = rm_thread_activation_collect_send (ADDR_VOID, ADDR_VOID,
- ADDR_VOID);
+ error_t err = rm_thread_activation_collect_send (VG_ADDR_VOID, VG_ADDR_VOID,
+ VG_ADDR_VOID);
if (err)
panic ("Sending thread_activation_collect failed: %d", err);
}
@@ -199,9 +199,9 @@ activation_frame_slab_alloc (void *hook, size_t size, void **ptr)
assert (size == PAGESIZE);
struct storage storage = storage_alloc (meta_data_activity,
- cap_page, STORAGE_EPHEMERAL,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- *ptr = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ vg_cap_page, STORAGE_EPHEMERAL,
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ *ptr = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr, 0, PAGESIZE_LOG2));
return 0;
}
@@ -211,7 +211,7 @@ activation_frame_slab_dealloc (void *hook, void *buffer, size_t size)
{
assert (size == PAGESIZE);
- addr_t addr = addr_chop (PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
+ vg_addr_t addr = vg_addr_chop (VG_PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
storage_free (addr, false);
return 0;
@@ -342,10 +342,10 @@ hurd_activation_handler_normal (struct activation_frame *activation_frame,
{
case ACTIVATION_fault:
{
- addr_t fault;
+ vg_addr_t fault;
uintptr_t ip;
uintptr_t sp;
- struct activation_fault_info info;
+ struct vg_activation_fault_info info;
error_t err;
err = activation_fault_send_unmarshal (mb->reply,
@@ -354,10 +354,10 @@ hurd_activation_handler_normal (struct activation_frame *activation_frame,
if (err)
panic ("Failed to unmarshal exception: %d", err);
- debug (5, "Fault at " ADDR_FMT " (ip: %p, sp: %p, eax: %p, "
+ debug (5, "Fault at " VG_ADDR_FMT " (ip: %p, sp: %p, eax: %p, "
"ebx: %p, ecx: %p, edx: %p, edi: %p, esi: %p, ebp: %p, "
"eflags: %p)",
- ADDR_PRINTF (fault),
+ VG_ADDR_PRINTF (fault),
(void *) ip, (void *) sp,
(void *) activation_frame->eax,
(void *) activation_frame->ebx,
@@ -375,7 +375,7 @@ hurd_activation_handler_normal (struct activation_frame *activation_frame,
r = map_fault (fault, ip, info);
if (! r)
{
- uintptr_t f = (uintptr_t) ADDR_TO_PTR (fault);
+ uintptr_t f = (uintptr_t) VG_ADDR_TO_PTR (fault);
struct hurd_fault_catcher *catcher;
for (catcher = utcb->catchers; catcher; catcher = catcher->next)
{
@@ -404,10 +404,10 @@ hurd_activation_handler_normal (struct activation_frame *activation_frame,
if (as_rwlock_owner == l4_myself ())
debug (0, "I hold as_rwlock!");
- debug (0, "SIGSEGV at " ADDR_FMT " "
+ debug (0, "SIGSEGV at " VG_ADDR_FMT " "
"(ip: %p, sp: %p, eax: %p, ebx: %p, ecx: %p, "
"edx: %p, edi: %p, esi: %p, ebp: %p, eflags: %p)",
- ADDR_PRINTF (fault),
+ VG_ADDR_PRINTF (fault),
(void *) ip, (void *) sp,
(void *) activation_frame->eax,
(void *) activation_frame->ebx,
@@ -423,7 +423,7 @@ hurd_activation_handler_normal (struct activation_frame *activation_frame,
siginfo_t si;
memset (&si, 0, sizeof (si));
si.si_signo = SIGSEGV;
- si.si_addr = ADDR_TO_PTR (fault);
+ si.si_addr = VG_ADDR_TO_PTR (fault);
/* XXX: Should set si.si_code to SEGV_MAPERR or
SEGV_ACCERR. */
@@ -545,10 +545,10 @@ hurd_activation_handler_activated (struct hurd_utcb *utcb)
{
case ACTIVATION_fault:
{
- addr_t fault;
+ vg_addr_t fault;
uintptr_t ip;
uintptr_t sp;
- struct activation_fault_info info;
+ struct vg_activation_fault_info info;
error_t err;
err = activation_fault_send_unmarshal (mb->reply,
@@ -557,10 +557,10 @@ hurd_activation_handler_activated (struct hurd_utcb *utcb)
if (err)
panic ("Failed to unmarshal exception: %d", err);
- debug (4, "Fault at " ADDR_FMT "(ip: %x, sp: %x).",
- ADDR_PRINTF (fault), ip, sp);
+ debug (4, "Fault at " VG_ADDR_FMT "(ip: %x, sp: %x).",
+ VG_ADDR_PRINTF (fault), ip, sp);
- uintptr_t f = (uintptr_t) ADDR_TO_PTR (fault);
+ uintptr_t f = (uintptr_t) VG_ADDR_TO_PTR (fault);
uintptr_t stack_page = (sp & ~(PAGESIZE - 1));
uintptr_t fault_page = (f & ~(PAGESIZE - 1));
if (stack_page == fault_page
@@ -569,8 +569,8 @@ hurd_activation_handler_activated (struct hurd_utcb *utcb)
the following page. It is likely a stack fault.
Handle it using the alternate stack. */
{
- debug (5, "Stack fault at " ADDR_FMT "(ip: %x, sp: %x).",
- ADDR_PRINTF (fault), ip, sp);
+ debug (5, "Stack fault at " VG_ADDR_FMT "(ip: %x, sp: %x).",
+ VG_ADDR_PRINTF (fault), ip, sp);
assert (! utcb->alternate_stack_inuse);
utcb->alternate_stack_inuse = true;
@@ -580,9 +580,9 @@ hurd_activation_handler_activated (struct hurd_utcb *utcb)
activation_frame->normal_mode_stack = utcb->alternate_stack;
}
- debug (5, "Handling fault at " ADDR_FMT " in normal mode "
+ debug (5, "Handling fault at " VG_ADDR_FMT " in normal mode "
"(ip: %x, sp: %x).",
- ADDR_PRINTF (fault), ip, sp);
+ VG_ADDR_PRINTF (fault), ip, sp);
break;
}
@@ -594,9 +594,9 @@ hurd_activation_handler_activated (struct hurd_utcb *utcb)
/* Unblock the exception handler messenger. */
error_t err = vg_ipc (VG_IPC_RECEIVE | VG_IPC_RECEIVE_ACTIVATE
| VG_IPC_RETURN,
- ADDR_VOID, utcb->exception_buffer->receiver,
- ADDR_VOID,
- ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID);
+ VG_ADDR_VOID, utcb->exception_buffer->receiver,
+ VG_ADDR_VOID,
+ VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID);
assert (! err);
}
else if (mb->just_free)
@@ -685,8 +685,8 @@ hurd_activation_handler_init_early (void)
struct vg_message *msg = (void *) &activation_handler_msg[0];
rm_thread_exregs_send_marshal (msg, HURD_EXREGS_SET_UTCB, in,
- ADDR_VOID, ADDR_VOID,
- PTR_TO_PAGE (utcb), ADDR_VOID,
+ VG_ADDR_VOID, VG_ADDR_VOID,
+ VG_PTR_TO_PAGE (utcb), VG_ADDR_VOID,
__hurd_startup_data->messengers[1]);
error_t err;
@@ -696,11 +696,11 @@ hurd_activation_handler_init_early (void)
| VG_IPC_RECEIVE_INLINE
| VG_IPC_SEND_SET_THREAD_TO_CALLER
| VG_IPC_SEND_SET_ASROOT_TO_CALLERS,
- ADDR_VOID,
- __hurd_startup_data->messengers[1], ADDR_VOID, ADDR_VOID,
- ADDR_VOID, __hurd_startup_data->thread,
- __hurd_startup_data->messengers[0], PTR_TO_PAGE (msg),
- 0, 0, ADDR_VOID);
+ VG_ADDR_VOID,
+ __hurd_startup_data->messengers[1], VG_ADDR_VOID, VG_ADDR_VOID,
+ VG_ADDR_VOID, __hurd_startup_data->thread,
+ __hurd_startup_data->messengers[0], VG_PTR_TO_PAGE (msg),
+ 0, 0, VG_ADDR_VOID);
if (err)
panic ("Failed to send IPC: %d", err);
if (utcb->vg.inline_words[0])
@@ -730,14 +730,14 @@ hurd_activation_handler_init (void)
#define ACTIVATION_AREA_SIZE (1 << ACTIVATION_AREA_SIZE_LOG2)
error_t
-hurd_activation_state_alloc (addr_t thread, struct hurd_utcb **utcbp)
+hurd_activation_state_alloc (vg_addr_t thread, struct hurd_utcb **utcbp)
{
- debug (5, DEBUG_BOLD ("allocating activation state for " ADDR_FMT),
- ADDR_PRINTF (thread));
+ debug (5, DEBUG_BOLD ("allocating activation state for " VG_ADDR_FMT),
+ VG_ADDR_PRINTF (thread));
- addr_t activation_area = as_alloc (ACTIVATION_AREA_SIZE_LOG2, 1, true);
+ vg_addr_t activation_area = as_alloc (ACTIVATION_AREA_SIZE_LOG2, 1, true);
void *activation_area_base
- = ADDR_TO_PTR (addr_extend (activation_area,
+ = VG_ADDR_TO_PTR (vg_addr_extend (activation_area,
0, ACTIVATION_AREA_SIZE_LOG2));
debug (0, "Activation area: %p-%p",
@@ -746,22 +746,22 @@ hurd_activation_state_alloc (addr_t thread, struct hurd_utcb **utcbp)
int page_count = 0;
/* Be careful! We assume that pages is properly set up after at
most 2 allocations! */
- addr_t pages_[2];
- addr_t *pages = pages_;
+ vg_addr_t pages_[2];
+ vg_addr_t *pages = pages_;
void alloc (void *addr)
{
- addr_t slot = addr_chop (PTR_TO_ADDR (addr), PAGESIZE_LOG2);
+ vg_addr_t slot = vg_addr_chop (VG_PTR_TO_ADDR (addr), PAGESIZE_LOG2);
as_ensure (slot);
struct storage storage;
- storage = storage_alloc (ADDR_VOID, cap_page,
+ storage = storage_alloc (VG_ADDR_VOID, vg_cap_page,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT,
+ VG_OBJECT_POLICY_DEFAULT,
slot);
- if (ADDR_IS_VOID (storage.addr))
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Failed to allocate page for exception state");
if (pages == pages_)
@@ -804,12 +804,12 @@ hurd_activation_state_alloc (addr_t thread, struct hurd_utcb **utcbp)
/* At the top of the stack page, we use some space to remember the
storage we allocate so that we can free it later. */
utcb->vg.activation_handler_sp
- -= sizeof (addr_t) * ACTIVATION_AREA_SIZE / PAGESIZE;
+ -= sizeof (vg_addr_t) * ACTIVATION_AREA_SIZE / PAGESIZE;
memset ((void *) utcb->vg.activation_handler_sp, 0,
- sizeof (addr_t) * ACTIVATION_AREA_SIZE / PAGESIZE);
+ sizeof (vg_addr_t) * ACTIVATION_AREA_SIZE / PAGESIZE);
memcpy ((void *) utcb->vg.activation_handler_sp, pages,
- sizeof (addr_t) * page_count);
- pages = (addr_t *) utcb->vg.activation_handler_sp;
+ sizeof (vg_addr_t) * page_count);
+ pages = (vg_addr_t *) utcb->vg.activation_handler_sp;
/* The word beyond the base of the stack is a pointer to the
exception page. */
@@ -848,8 +848,8 @@ hurd_activation_state_alloc (addr_t thread, struct hurd_utcb **utcbp)
/* Unblock the exception handler messenger. */
error_t err = vg_ipc (VG_IPC_RECEIVE | VG_IPC_RECEIVE_ACTIVATE
| VG_IPC_RETURN,
- ADDR_VOID, utcb->exception_buffer->receiver, ADDR_VOID,
- ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID);
+ VG_ADDR_VOID, utcb->exception_buffer->receiver, VG_ADDR_VOID,
+ VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID, VG_ADDR_VOID);
assert (! err);
@@ -858,20 +858,20 @@ hurd_activation_state_alloc (addr_t thread, struct hurd_utcb **utcbp)
struct hurd_thread_exregs_in in;
struct hurd_thread_exregs_out out;
- err = rm_thread_exregs (ADDR_VOID, thread,
+ err = rm_thread_exregs (VG_ADDR_VOID, thread,
HURD_EXREGS_SET_UTCB
| HURD_EXREGS_SET_EXCEPTION_MESSENGER,
- in, ADDR_VOID, ADDR_VOID,
- PTR_TO_PAGE (utcb), utcb->exception_buffer->receiver,
+ in, VG_ADDR_VOID, VG_ADDR_VOID,
+ VG_PTR_TO_PAGE (utcb), utcb->exception_buffer->receiver,
&out, NULL, NULL, NULL, NULL);
if (err)
panic ("Failed to install utcb");
- err = rm_cap_copy (ADDR_VOID,
+ err = rm_cap_copy (VG_ADDR_VOID,
utcb->exception_buffer->receiver,
- ADDR (VG_MESSENGER_THREAD_SLOT, VG_MESSENGER_SLOTS_LOG2),
- ADDR_VOID, thread,
- 0, CAP_PROPERTIES_DEFAULT);
+ VG_ADDR (VG_MESSENGER_THREAD_SLOT, VG_MESSENGER_SLOTS_LOG2),
+ VG_ADDR_VOID, thread,
+ 0, VG_CAP_PROPERTIES_DEFAULT);
if (err)
panic ("Failed to set messenger's thread");
@@ -901,20 +901,20 @@ hurd_activation_state_free (struct hurd_utcb *utcb)
/* Free the allocated storage. */
/* Copy the array as we're going to free the storage that it is
in. */
- addr_t pages[ACTIVATION_AREA_SIZE / PAGESIZE];
+ vg_addr_t pages[ACTIVATION_AREA_SIZE / PAGESIZE];
memcpy (pages,
(void *) utcb->vg.activation_handler_sp + sizeof (uintptr_t),
- sizeof (addr_t) * ACTIVATION_AREA_SIZE / PAGESIZE);
+ sizeof (vg_addr_t) * ACTIVATION_AREA_SIZE / PAGESIZE);
int i;
for (i = 0; i < sizeof (pages) / sizeof (pages[0]); i ++)
- if (! ADDR_IS_VOID (pages[i]))
+ if (! VG_ADDR_IS_VOID (pages[i]))
storage_free (pages[i], false);
/* Finally, free the address space. */
int page = SKIP;
void *activation_area_base = (void *) utcb - page * PAGESIZE;
- as_free (addr_chop (PTR_TO_ADDR (activation_area_base),
+ as_free (vg_addr_chop (VG_PTR_TO_ADDR (activation_area_base),
ACTIVATION_AREA_SIZE_LOG2),
false);
}
diff --git a/libhurd-mm/map.c b/libhurd-mm/map.c
index 001b97d..2147d6b 100644
--- a/libhurd-mm/map.c
+++ b/libhurd-mm/map.c
@@ -35,12 +35,12 @@
static error_t
slab_alloc (void *hook, size_t size, void **ptr)
{
- struct storage storage = storage_alloc (meta_data_activity, cap_page,
+ struct storage storage = storage_alloc (meta_data_activity, vg_cap_page,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Out of space.");
- *ptr = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ *ptr = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr, 0, PAGESIZE_LOG2));
return 0;
}
@@ -50,7 +50,7 @@ slab_dealloc (void *hook, void *buffer, size_t size)
{
assert (size == PAGESIZE);
- addr_t addr = addr_chop (PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
+ vg_addr_t addr = vg_addr_chop (VG_PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
storage_free (addr, false);
return 0;
@@ -314,15 +314,15 @@ map_join (struct map *first, struct map *second)
}
bool
-map_fault (addr_t fault_addr, uintptr_t ip, struct activation_fault_info info)
+map_fault (vg_addr_t fault_addr, uintptr_t ip, struct vg_activation_fault_info info)
{
/* Find the map. */
struct region region;
- if (addr_depth (fault_addr) == ADDR_BITS - PAGESIZE_LOG2)
- fault_addr = addr_extend (fault_addr, 0, PAGESIZE_LOG2);
+ if (vg_addr_depth (fault_addr) == VG_ADDR_BITS - PAGESIZE_LOG2)
+ fault_addr = vg_addr_extend (fault_addr, 0, PAGESIZE_LOG2);
- region.start = (uintptr_t) ADDR_TO_PTR (fault_addr);
+ region.start = (uintptr_t) VG_ADDR_TO_PTR (fault_addr);
region.length = 1;
maps_lock_lock ();
@@ -332,9 +332,9 @@ map_fault (addr_t fault_addr, uintptr_t ip, struct activation_fault_info info)
{
do_debug (5)
{
- debug (0, "No map covers " ADDR_FMT "(" ACTIVATION_FAULT_INFO_FMT ")",
- ADDR_PRINTF (fault_addr),
- ACTIVATION_FAULT_INFO_PRINTF (info));
+ debug (0, "No map covers " VG_ADDR_FMT "(" VG_ACTIVATION_FAULT_INFO_FMT ")",
+ VG_ADDR_PRINTF (fault_addr),
+ VG_ACTIVATION_FAULT_INFO_PRINTF (info));
for (map = hurd_btree_map_first (&maps);
map;
map = hurd_btree_map_next (map))
@@ -349,9 +349,9 @@ map_fault (addr_t fault_addr, uintptr_t ip, struct activation_fault_info info)
if (((info.access & L4_FPAGE_WRITABLE) && ! (map->access & MAP_ACCESS_WRITE))
|| ! map->access)
{
- debug (0, "Invalid %s access at " ADDR_FMT ": " MAP_FMT,
+ debug (0, "Invalid %s access at " VG_ADDR_FMT ": " MAP_FMT,
info.access & L4_FPAGE_WRITABLE ? "write" : "read",
- ADDR_PRINTF (fault_addr), MAP_PRINTF (map));
+ VG_ADDR_PRINTF (fault_addr), MAP_PRINTF (map));
maps_lock_unlock ();
return false;
@@ -365,10 +365,10 @@ map_fault (addr_t fault_addr, uintptr_t ip, struct activation_fault_info info)
/* Propagate the fault. */
bool r = pager->fault (pager, offset, 1, ro,
- (uintptr_t) ADDR_TO_PTR (fault_addr), ip, info);
+ (uintptr_t) VG_ADDR_TO_PTR (fault_addr), ip, info);
if (! r)
- debug (5, "Map did not resolve fault at " ADDR_FMT,
- ADDR_PRINTF (fault_addr));
+ debug (5, "Map did not resolve fault at " VG_ADDR_FMT,
+ VG_ADDR_PRINTF (fault_addr));
return r;
}
diff --git a/libhurd-mm/map.h b/libhurd-mm/map.h
index 206b392..d9aedef 100644
--- a/libhurd-mm/map.h
+++ b/libhurd-mm/map.h
@@ -228,9 +228,9 @@ extern struct map *map_split (struct map *map, uintptr_t offset);
This function takes and releases MAP->PAGER->LOCK. */
extern bool map_join (struct map *first, struct map *second);
-/* Raise a fault at address ADDR. Returns true if the fault was
+/* Raise a fault at address VG_ADDR. Returns true if the fault was
handled, false otherwise. */
-extern bool map_fault (addr_t addr,
- uintptr_t ip, struct activation_fault_info info);
+extern bool map_fault (vg_addr_t addr,
+ uintptr_t ip, struct vg_activation_fault_info info);
#endif
diff --git a/libhurd-mm/message-buffer.c b/libhurd-mm/message-buffer.c
index c1326ab..dfa87c8 100644
--- a/libhurd-mm/message-buffer.c
+++ b/libhurd-mm/message-buffer.c
@@ -47,12 +47,12 @@ slab_alloc (void *hook, size_t size, void **ptr)
return 0;
}
- struct storage storage = storage_alloc (meta_data_activity, cap_page,
+ struct storage storage = storage_alloc (meta_data_activity, vg_cap_page,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Out of space.");
- *ptr = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ *ptr = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr, 0, PAGESIZE_LOG2));
return 0;
}
@@ -62,7 +62,7 @@ slab_dealloc (void *hook, void *buffer, size_t size)
{
assert (size == PAGESIZE);
- addr_t addr = addr_chop (PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
+ vg_addr_t addr = vg_addr_chop (VG_PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
storage_free (addr, false);
return 0;
@@ -91,10 +91,10 @@ slab_destructor (void *hook, void *object)
}
storage_free (mb->sender, false);
- storage_free (addr_chop (PTR_TO_ADDR (mb->request), PAGESIZE_LOG2),
+ storage_free (vg_addr_chop (VG_PTR_TO_ADDR (mb->request), PAGESIZE_LOG2),
false);
storage_free (mb->receiver, false);
- storage_free (addr_chop (PTR_TO_ADDR (mb->reply), PAGESIZE_LOG2),
+ storage_free (vg_addr_chop (VG_PTR_TO_ADDR (mb->reply), PAGESIZE_LOG2),
false);
}
@@ -129,10 +129,10 @@ hurd_message_buffer_alloc_hard (void)
mb->sender = __hurd_startup_data->messengers[initial_messenger ++];
else
{
- storage = storage_alloc (meta_data_activity, cap_messenger,
+ storage = storage_alloc (meta_data_activity, vg_cap_messenger,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Out of space.");
mb->sender = storage.addr;
@@ -143,10 +143,10 @@ hurd_message_buffer_alloc_hard (void)
mb->receiver_strong = __hurd_startup_data->messengers[initial_messenger ++];
else
{
- storage = storage_alloc (meta_data_activity, cap_messenger,
+ storage = storage_alloc (meta_data_activity, vg_cap_messenger,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Out of space.");
mb->receiver_strong = storage.addr;
@@ -155,17 +155,17 @@ hurd_message_buffer_alloc_hard (void)
/* Weaken it. */
#if 0
mb->receiver = capalloc ();
- struct cap receiver_cap = as_cap_lookup (mb->receiver_strong, cap_messenger,
+ struct vg_cap receiver_cap = as_cap_lookup (mb->receiver_strong, vg_cap_messenger,
NULL);
- assert (receiver_cap.type == cap_messenger);
+ assert (receiver_cap.type == vg_cap_messenger);
as_slot_lookup_use
(mb->receiver,
({
- bool ret = cap_copy_x (ADDR_VOID,
- ADDR_VOID, slot, mb->receiver,
- ADDR_VOID, receiver_cap, mb->receiver_strong,
- CAP_COPY_WEAKEN,
- CAP_PROPERTIES_VOID);
+ bool ret = vg_cap_copy_x (VG_ADDR_VOID,
+ VG_ADDR_VOID, slot, mb->receiver,
+ VG_ADDR_VOID, receiver_cap, mb->receiver_strong,
+ VG_CAP_COPY_WEAKEN,
+ VG_CAP_PROPERTIES_VOID);
assert (ret);
}));
#endif
@@ -176,13 +176,13 @@ hurd_message_buffer_alloc_hard (void)
mb->request = (void *) &initial_pages[initial_page ++][0];
else
{
- storage = storage_alloc (meta_data_activity, cap_page,
+ storage = storage_alloc (meta_data_activity, vg_cap_page,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Out of space.");
- mb->request = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ mb->request = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr, 0, PAGESIZE_LOG2));
}
/* And the receive buffer. */
@@ -190,13 +190,13 @@ hurd_message_buffer_alloc_hard (void)
mb->reply = (void *) &initial_pages[initial_page ++][0];
else
{
- storage = storage_alloc (meta_data_activity, cap_page,
+ storage = storage_alloc (meta_data_activity, vg_cap_page,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Out of space.");
- mb->reply = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ mb->reply = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr, 0, PAGESIZE_LOG2));
}
@@ -213,11 +213,11 @@ hurd_message_buffer_alloc_hard (void)
err = vg_ipc_full (VG_IPC_RECEIVE | VG_IPC_SEND | VG_IPC_RECEIVE_ACTIVATE
| VG_IPC_RECEIVE_SET_THREAD_TO_CALLER
| VG_IPC_SEND_SET_THREAD_TO_CALLER,
- ADDR_VOID, mb->receiver, PTR_TO_PAGE (mb->reply),
- ADDR_VOID,
- ADDR_VOID, mb->receiver,
- mb->sender, PTR_TO_PAGE (mb->request),
- 0, 0, ADDR_VOID);
+ VG_ADDR_VOID, mb->receiver, VG_PTR_TO_PAGE (mb->reply),
+ VG_ADDR_VOID,
+ VG_ADDR_VOID, mb->receiver,
+ mb->sender, VG_PTR_TO_PAGE (mb->request),
+ 0, 0, VG_ADDR_VOID);
if (err)
panic ("Failed to set receiver's id");
@@ -227,11 +227,11 @@ hurd_message_buffer_alloc_hard (void)
hurd_activation_message_register (mb);
err = vg_ipc_full (VG_IPC_RECEIVE | VG_IPC_SEND | VG_IPC_RECEIVE_ACTIVATE,
- ADDR_VOID, mb->receiver, PTR_TO_PAGE (mb->reply),
- ADDR_VOID,
- ADDR_VOID, mb->sender,
- mb->sender, PTR_TO_PAGE (mb->request),
- 0, 0, ADDR_VOID);
+ VG_ADDR_VOID, mb->receiver, VG_PTR_TO_PAGE (mb->reply),
+ VG_ADDR_VOID,
+ VG_ADDR_VOID, mb->sender,
+ mb->sender, VG_PTR_TO_PAGE (mb->request),
+ 0, 0, VG_ADDR_VOID);
if (err)
panic ("Failed to set sender's id");
diff --git a/libhurd-mm/message-buffer.h b/libhurd-mm/message-buffer.h
index 0c35b27..37eb2a0 100644
--- a/libhurd-mm/message-buffer.h
+++ b/libhurd-mm/message-buffer.h
@@ -38,13 +38,13 @@ struct hurd_message_buffer
/* A messenger associated REQUEST. The messenger's identifier is
set to the data structure's address. */
- addr_t sender;
+ vg_addr_t sender;
struct vg_message *request;
/* A messenger associated with REPLY. The messenger's identifier is
set to the data structure's address. */
- addr_t receiver_strong;
+ vg_addr_t receiver_strong;
/* A weakened version. */
- addr_t receiver;
+ vg_addr_t receiver;
struct vg_message *reply;
/* If not NULL, then this routine is called. */
diff --git a/libhurd-mm/mm-init.c b/libhurd-mm/mm-init.c
index caf362e..896b69f 100644
--- a/libhurd-mm/mm-init.c
+++ b/libhurd-mm/mm-init.c
@@ -38,19 +38,19 @@
extern struct hurd_startup_data *__hurd_startup_data;
-addr_t meta_data_activity;
+vg_addr_t meta_data_activity;
int mm_init_done;
void
-mm_init (addr_t activity)
+mm_init (vg_addr_t activity)
{
assert (! mm_init_done);
extern int output_debug;
output_debug = 1;
- if (ADDR_IS_VOID (activity))
+ if (VG_ADDR_IS_VOID (activity))
meta_data_activity = __hurd_startup_data->activity;
else
meta_data_activity = activity;
@@ -74,8 +74,8 @@ mm_init (addr_t activity)
#ifdef i386
void test (int nesting)
{
- addr_t addr = as_alloc (PAGESIZE_LOG2, 1, true);
- void *a = ADDR_TO_PTR (addr_extend (addr, 0, PAGESIZE_LOG2));
+ vg_addr_t addr = as_alloc (PAGESIZE_LOG2, 1, true);
+ void *a = VG_ADDR_TO_PTR (vg_addr_extend (addr, 0, PAGESIZE_LOG2));
int recursed = false;
@@ -83,7 +83,7 @@ mm_init (addr_t activity)
bool fault (struct pager *pager,
uintptr_t offset, int count, bool ro,
uintptr_t fault_addr, uintptr_t ip,
- struct activation_fault_info info)
+ struct vg_activation_fault_info info)
{
assert (a == (void *) (fault_addr & ~(PAGESIZE - 1)));
assert (count == 1);
@@ -115,9 +115,9 @@ mm_init (addr_t activity)
/* We cannot easily check esp and eip here. */
as_ensure (addr);
- storage = storage_alloc (ADDR_VOID,
- cap_page, STORAGE_UNKNOWN,
- OBJECT_POLICY_DEFAULT,
+ storage = storage_alloc (VG_ADDR_VOID,
+ vg_cap_page, STORAGE_UNKNOWN,
+ VG_OBJECT_POLICY_DEFAULT,
addr);
if (nesting > 1 && ! recursed)
diff --git a/libhurd-mm/mm.h b/libhurd-mm/mm.h
index 8baace8..2ba0262 100644
--- a/libhurd-mm/mm.h
+++ b/libhurd-mm/mm.h
@@ -29,6 +29,6 @@ extern int mm_init_done;
/* Initialize the memory management sub-system. ACTIVITY is the
activity to use to account meta-data resources. */
-extern void mm_init (addr_t activity);
+extern void mm_init (vg_addr_t activity);
#endif /* HURD_MM_MM_H */
diff --git a/libhurd-mm/mmap.c b/libhurd-mm/mmap.c
index bda87cf..425f6b4 100644
--- a/libhurd-mm/mmap.c
+++ b/libhurd-mm/mmap.c
@@ -77,8 +77,8 @@ mmap (void *addr, size_t length, int protect, int flags,
debug (5, "Trying to allocate memory %p-%p", addr, addr + length);
struct anonymous_pager *pager;
- pager = anonymous_pager_alloc (ADDR_VOID, addr, length, access,
- OBJECT_POLICY_DEFAULT,
+ pager = anonymous_pager_alloc (VG_ADDR_VOID, addr, length, access,
+ VG_OBJECT_POLICY_DEFAULT,
(flags & MAP_FIXED) ? ANONYMOUS_FIXED: 0,
NULL, &addr);
if (! pager)
diff --git a/libhurd-mm/mprotect.c b/libhurd-mm/mprotect.c
index e80d08c..5c70427 100644
--- a/libhurd-mm/mprotect.c
+++ b/libhurd-mm/mprotect.c
@@ -124,10 +124,10 @@ mprotect (void *addr, size_t length, int prot)
{
map->access = access;
- addr_t addr;
- for (addr = ADDR (map_start, ADDR_BITS - PAGESIZE_LOG2);
- addr_prefix (addr) < map_end;
- addr = addr_add (addr, 1))
+ vg_addr_t addr;
+ for (addr = VG_ADDR (map_start, VG_ADDR_BITS - PAGESIZE_LOG2);
+ vg_addr_prefix (addr) < map_end;
+ addr = vg_addr_add (addr, 1))
{
/* This may fail if the page has not yet been faulted
in. That's okay: it will get the right
@@ -139,18 +139,18 @@ mprotect (void *addr, size_t length, int prot)
{
error_t err;
err = rm_cap_rubout (meta_data_activity,
- ADDR_VOID, addr);
+ VG_ADDR_VOID, addr);
assert (! err);
- slot->type = cap_void;
+ slot->type = vg_cap_void;
}
else
{
bool ret;
- ret = cap_copy_x (meta_data_activity,
- ADDR_VOID, slot, addr,
- ADDR_VOID, *slot, addr,
- CAP_COPY_WEAKEN,
- CAP_PROPERTIES_VOID);
+ ret = vg_cap_copy_x (meta_data_activity,
+ VG_ADDR_VOID, slot, addr,
+ VG_ADDR_VOID, *slot, addr,
+ VG_CAP_COPY_WEAKEN,
+ VG_CAP_PROPERTIES_VOID);
assert (ret);
}
}));
diff --git a/libhurd-mm/pager.h b/libhurd-mm/pager.h
index 66b75ef..b647573 100644
--- a/libhurd-mm/pager.h
+++ b/libhurd-mm/pager.h
@@ -36,12 +36,12 @@ struct pager;
typedef bool (*pager_fault_t) (struct pager *pager,
uintptr_t offset, int count, bool ro,
uintptr_t fault_addr, uintptr_t ip,
- struct activation_fault_info info);
+ struct vg_activation_fault_info info);
/* The count sub-trees starting at ADDR are no longer referenced and
their associated storage may be reclaimed. */
typedef void (*pager_reclaim_t) (struct pager *pager,
- addr_t addr, int count);
+ vg_addr_t addr, int count);
/* Called when the last map to a pager has been destroyed. (This
function should not call pager_deinit!) Called with PAGER->LOCK
diff --git a/libhurd-mm/storage.c b/libhurd-mm/storage.c
index 8079036..bdb6ce0 100644
--- a/libhurd-mm/storage.c
+++ b/libhurd-mm/storage.c
@@ -56,12 +56,12 @@ static uatomic32_t free_count;
struct storage_desc
{
/* The address of the folio. */
- addr_t folio;
- /* The location of the shadow cap designating this folio. */
+ vg_addr_t folio;
+ /* The location of the shadow vg_cap designating this folio. */
struct object *shadow;
/* Which objects are allocated. */
- unsigned char alloced[FOLIO_OBJECTS / 8];
+ unsigned char alloced[VG_FOLIO_OBJECTS / 8];
/* The number of free objects. */
unsigned char free;
@@ -119,7 +119,7 @@ list_unlink (struct storage_desc *e)
}
static int
-addr_compare (const addr_t *a, const addr_t *b)
+addr_compare (const vg_addr_t *a, const vg_addr_t *b)
{
if (a->raw < b->raw)
return -1;
@@ -127,7 +127,7 @@ addr_compare (const addr_t *a, const addr_t *b)
}
BTREE_CLASS (storage_desc, struct storage_desc,
- addr_t, folio, node, addr_compare, false)
+ vg_addr_t, folio, node, addr_compare, false)
static hurd_btree_storage_desc_t storage_descs;
@@ -154,16 +154,16 @@ check_slab_space_reserve (void)
return;
/* We don't have a reserve. Allocate one now. */
- struct storage storage = storage_alloc (meta_data_activity, cap_page,
+ struct storage storage = storage_alloc (meta_data_activity, vg_cap_page,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT, ADDR_VOID);
- void *buffer = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ VG_OBJECT_POLICY_DEFAULT, VG_ADDR_VOID);
+ void *buffer = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr, 0, PAGESIZE_LOG2));
buffer = (void *) atomic_exchange_acq (&slab_space_reserve, buffer);
if (buffer)
/* Someone else allocated a buffer. We don't need two, so
deallocate it. */
- storage_free (addr_chop (PTR_TO_ADDR (buffer), PAGESIZE_LOG2), false);
+ storage_free (vg_addr_chop (VG_PTR_TO_ADDR (buffer), PAGESIZE_LOG2), false);
}
static error_t
@@ -187,7 +187,7 @@ storage_desc_slab_dealloc (void *hook, void *buffer, size_t size)
assert (size == PAGESIZE);
- addr_t addr = addr_chop (PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
+ vg_addr_t addr = vg_addr_chop (VG_PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
storage_free (addr, false);
return 0;
@@ -235,10 +235,10 @@ static struct storage_desc *short_lived;
/* Once there are more free objects in a LONG_LIVED_STABLE folio than
FREEING_THRESHOLD, we change the folios state from stable to
freeing. */
-#define FREEING_THRESHOLD (FOLIO_OBJECTS / 2)
+#define FREEING_THRESHOLD (VG_FOLIO_OBJECTS / 2)
static void
-shadow_setup (struct cap *cap, struct storage_desc *desc)
+shadow_setup (struct vg_cap *cap, struct storage_desc *desc)
{
/* We do not need to hold DESC->LOCK here as either we are in the
init phase and thus single threaded or we are initializing a new
@@ -254,12 +254,12 @@ shadow_setup (struct cap *cap, struct storage_desc *desc)
atomic_decrement (&free_count);
error_t err = rm_folio_object_alloc (meta_data_activity,
- desc->folio, idx, cap_page,
- OBJECT_POLICY_DEFAULT, 0,
+ desc->folio, idx, vg_cap_page,
+ VG_OBJECT_POLICY_DEFAULT, 0,
NULL, NULL);
assert (err == 0);
- shadow = ADDR_TO_PTR (addr_extend (addr_extend (desc->folio,
- idx, FOLIO_OBJECTS_LOG2),
+ shadow = VG_ADDR_TO_PTR (vg_addr_extend (vg_addr_extend (desc->folio,
+ idx, VG_FOLIO_OBJECTS_LOG2),
0, PAGESIZE_LOG2));
if (desc->free == 0)
@@ -285,32 +285,32 @@ shadow_setup (struct cap *cap, struct storage_desc *desc)
{
assert (! as_init_done);
- struct storage storage = storage_alloc (meta_data_activity, cap_page,
+ struct storage storage = storage_alloc (meta_data_activity, vg_cap_page,
STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT,
- ADDR_VOID);
- if (ADDR_IS_VOID (storage.addr))
+ VG_OBJECT_POLICY_DEFAULT,
+ VG_ADDR_VOID);
+ if (VG_ADDR_IS_VOID (storage.addr))
panic ("Out of storage.");
- shadow = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ shadow = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr, 0, PAGESIZE_LOG2));
}
desc->shadow = shadow;
- cap->type = cap_folio;
- CAP_SET_SUBPAGE (cap, 0, 1);
- cap_set_shadow (cap, shadow);
+ cap->type = vg_cap_folio;
+ VG_CAP_SET_SUBPAGE (cap, 0, 1);
+ vg_cap_set_shadow (cap, shadow);
if (idx != -1)
{
- shadow->caps[idx].type = cap_page;
- CAP_PROPERTIES_SET (&shadow->caps[idx],
- CAP_PROPERTIES (OBJECT_POLICY_DEFAULT,
- CAP_ADDR_TRANS_VOID));
+ shadow->caps[idx].type = vg_cap_page;
+ VG_CAP_PROPERTIES_SET (&shadow->caps[idx],
+ VG_CAP_PROPERTIES (VG_OBJECT_POLICY_DEFAULT,
+ VG_CAP_ADDR_TRANS_VOID));
}
}
void
-storage_shadow_setup (struct cap *cap, addr_t folio)
+storage_shadow_setup (struct vg_cap *cap, vg_addr_t folio)
{
/* This code is only called from the initialization code. When this
code runs, there is exactly one thread. Thus, there is no need
@@ -357,7 +357,7 @@ static bool do_serialize;
static void
storage_check_reserve_internal (bool force_allocate,
- addr_t activity,
+ vg_addr_t activity,
enum storage_expectancy expectancy,
bool i_may_have_lock)
{
@@ -435,18 +435,18 @@ storage_check_reserve_internal (bool force_allocate,
/* Although we have not yet allocated the objects, allocating
support structures for the folio may require memory causing
us to recurse. Thus, we add them first. */
- atomic_add (&free_count, FOLIO_OBJECTS);
+ atomic_add (&free_count, VG_FOLIO_OBJECTS);
/* Here is the big recursive dependency! Using the address that
as_alloc returns might require allocating one (or more) page
tables to make a slot available. Moreover, each of those
page tables requires not only a cappage but also a shadow
page table. */
- addr_t addr;
+ vg_addr_t addr;
if (likely (as_init_done))
{
- addr = as_alloc (FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2, 1, true);
- if (ADDR_IS_VOID (addr))
+ addr = as_alloc (VG_FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2, 1, true);
+ if (VG_ADDR_IS_VOID (addr))
panic ("Failed to allocate address space!");
as_ensure (addr);
@@ -454,21 +454,21 @@ storage_check_reserve_internal (bool force_allocate,
else
{
struct hurd_object_desc *desc;
- desc = as_alloc_slow (FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2);
- if (! desc || ADDR_IS_VOID (desc->object))
+ desc = as_alloc_slow (VG_FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2);
+ if (! desc || VG_ADDR_IS_VOID (desc->object))
panic ("Failed to allocate address space!");
addr = desc->object;
desc->storage = addr;
- desc->type = cap_folio;
+ desc->type = vg_cap_folio;
}
/* And then the folio. */
- addr_t a = addr;
- error_t err = rm_folio_alloc (activity, activity, FOLIO_POLICY_DEFAULT,
+ vg_addr_t a = addr;
+ error_t err = rm_folio_alloc (activity, activity, VG_FOLIO_POLICY_DEFAULT,
&a);
assert (! err);
- assert (ADDR_EQ (addr, a));
+ assert (VG_ADDR_EQ (addr, a));
/* Allocate and fill a descriptor. */
struct storage_desc *s = storage_desc_alloc ();
@@ -476,7 +476,7 @@ storage_check_reserve_internal (bool force_allocate,
s->lock = (ss_mutex_t) 0;
s->folio = addr;
memset (&s->alloced, 0, sizeof (s->alloced));
- s->free = FOLIO_OBJECTS;
+ s->free = VG_FOLIO_OBJECTS;
if (likely (as_init_done))
{
@@ -529,10 +529,10 @@ storage_check_reserve (bool i_may_have_lock)
#undef storage_alloc
struct storage
-storage_alloc (addr_t activity,
- enum cap_type type, enum storage_expectancy expectancy,
+storage_alloc (vg_addr_t activity,
+ enum vg_cap_type type, enum storage_expectancy expectancy,
struct object_policy policy,
- addr_t addr)
+ vg_addr_t addr)
{
assert (storage_init_done);
@@ -610,17 +610,17 @@ storage_alloc (addr_t activity,
int idx = bit_alloc (desc->alloced, sizeof (desc->alloced), 0);
assertx (idx != -1,
- "Folio (" ADDR_FMT ") full (free: %d) but on a list!",
- ADDR_PRINTF (desc->folio), desc->free);
+ "Folio (" VG_ADDR_FMT ") full (free: %d) but on a list!",
+ VG_ADDR_PRINTF (desc->folio), desc->free);
- addr_t folio = desc->folio;
- addr_t object = addr_extend (folio, idx, FOLIO_OBJECTS_LOG2);
+ vg_addr_t folio = desc->folio;
+ vg_addr_t object = vg_addr_extend (folio, idx, VG_FOLIO_OBJECTS_LOG2);
- debug (5, "Allocating object %d as %s from " ADDR_FMT " (" ADDR_FMT ") "
- "(%d left), installing at " ADDR_FMT,
- idx, cap_type_string (type),
- ADDR_PRINTF (folio), ADDR_PRINTF (object),
- desc->free, ADDR_PRINTF (addr));
+ debug (5, "Allocating object %d as %s from " VG_ADDR_FMT " (" VG_ADDR_FMT ") "
+ "(%d left), installing at " VG_ADDR_FMT,
+ idx, vg_cap_type_string (type),
+ VG_ADDR_PRINTF (folio), VG_ADDR_PRINTF (object),
+ desc->free, VG_ADDR_PRINTF (addr));
atomic_decrement (&free_count);
desc->free --;
@@ -632,7 +632,7 @@ storage_alloc (addr_t activity,
{
assert (bit_alloc (desc->alloced, sizeof (desc->alloced), 0) == -1);
- debug (3, "Folio at " ADDR_FMT " full", ADDR_PRINTF (folio));
+ debug (3, "Folio at " VG_ADDR_FMT " full", VG_ADDR_PRINTF (folio));
list_unlink (desc);
@@ -644,20 +644,20 @@ storage_alloc (addr_t activity,
ss_mutex_unlock (&storage_descs_lock);
}
- addr_t a = addr;
+ vg_addr_t a = addr;
error_t err = rm_folio_object_alloc (activity, folio, idx, type, policy, 0,
&a, NULL);
assertx (! err,
- "Allocating object %d from " ADDR_FMT " at " ADDR_FMT ": %d!",
- idx, ADDR_PRINTF (folio), ADDR_PRINTF (addr), err);
- assert (ADDR_EQ (a, addr));
+ "Allocating object %d from " VG_ADDR_FMT " at " VG_ADDR_FMT ": %d!",
+ idx, VG_ADDR_PRINTF (folio), VG_ADDR_PRINTF (addr), err);
+ assert (VG_ADDR_EQ (a, addr));
struct object *shadow = desc->shadow;
- struct cap *cap = NULL;
+ struct vg_cap *cap = NULL;
if (likely (!! shadow))
{
cap = &shadow->caps[idx];
- CAP_PROPERTIES_SET (cap, CAP_PROPERTIES (policy, CAP_ADDR_TRANS_VOID));
+ VG_CAP_PROPERTIES_SET (cap, VG_CAP_PROPERTIES (policy, VG_CAP_ADDR_TRANS_VOID));
cap->type = type;
}
else
@@ -666,16 +666,16 @@ storage_alloc (addr_t activity,
/* We drop DESC->LOCK. */
ss_mutex_unlock (&desc->lock);
- if (! ADDR_IS_VOID (addr))
- /* We also have to update the shadow for ADDR. Unfortunately, we
+ if (! VG_ADDR_IS_VOID (addr))
+ /* We also have to update the shadow for VG_ADDR. Unfortunately, we
don't have the cap although the caller might. */
{
bool ret = as_slot_lookup_use
(addr,
({
slot->type = type;
- cap_set_shadow (slot, NULL);
- CAP_POLICY_SET (slot, policy);
+ vg_cap_set_shadow (slot, NULL);
+ VG_CAP_POLICY_SET (slot, policy);
}));
if (! ret)
{
@@ -689,29 +689,29 @@ storage_alloc (addr_t activity,
storage.addr = object;
#ifndef NDEBUG
- if (type == cap_page)
+ if (type == vg_cap_page)
{
- unsigned int *p = ADDR_TO_PTR (addr_extend (storage.addr,
+ unsigned int *p = VG_ADDR_TO_PTR (vg_addr_extend (storage.addr,
0, PAGESIZE_LOG2));
int c;
for (c = 0; c < PAGESIZE / sizeof (int); c ++)
assertx (p[c] == 0,
- ADDR_FMT "(%p)[%d] = %x",
- ADDR_PRINTF (storage.addr), p, c * sizeof (int), p[c]);
+ VG_ADDR_FMT "(%p)[%d] = %x",
+ VG_ADDR_PRINTF (storage.addr), p, c * sizeof (int), p[c]);
}
#endif
- debug (5, "Allocated " ADDR_FMT "; " ADDR_FMT,
- ADDR_PRINTF (storage.addr), ADDR_PRINTF (addr));
+ debug (5, "Allocated " VG_ADDR_FMT "; " VG_ADDR_FMT,
+ VG_ADDR_PRINTF (storage.addr), VG_ADDR_PRINTF (addr));
return storage;
}
void
-storage_free_ (addr_t object, bool unmap_now)
+storage_free_ (vg_addr_t object, bool unmap_now)
{
- debug (5, DEBUG_BOLD ("Freeing " ADDR_FMT), ADDR_PRINTF (object));
+ debug (5, DEBUG_BOLD ("Freeing " VG_ADDR_FMT), VG_ADDR_PRINTF (object));
- addr_t folio = addr_chop (object, FOLIO_OBJECTS_LOG2);
+ vg_addr_t folio = vg_addr_chop (object, VG_FOLIO_OBJECTS_LOG2);
atomic_increment (&free_count);
@@ -721,9 +721,9 @@ storage_free_ (addr_t object, bool unmap_now)
struct storage_desc *storage;
storage = hurd_btree_storage_desc_find (&storage_descs, &folio);
assertx (storage,
- "No storage associated with " ADDR_FMT " "
+ "No storage associated with " VG_ADDR_FMT " "
"(did you pass the storage address?)",
- ADDR_PRINTF (object));
+ VG_ADDR_PRINTF (object));
ss_mutex_lock (&storage->lock);
@@ -731,20 +731,20 @@ storage_free_ (addr_t object, bool unmap_now)
struct object *shadow = storage->shadow;
- if (storage->free == FOLIO_OBJECTS
- || ((storage->free == FOLIO_OBJECTS - 1)
+ if (storage->free == VG_FOLIO_OBJECTS
+ || ((storage->free == VG_FOLIO_OBJECTS - 1)
&& shadow
- && ADDR_EQ (folio, addr_chop (PTR_TO_ADDR (shadow),
- FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2))))
+ && VG_ADDR_EQ (folio, vg_addr_chop (VG_PTR_TO_ADDR (shadow),
+ VG_FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2))))
/* The folio is now empty. */
{
- debug (1, "Folio at " ADDR_FMT " now empty", ADDR_PRINTF (folio));
+ debug (1, "Folio at " VG_ADDR_FMT " now empty", VG_ADDR_PRINTF (folio));
- if (free_count - FOLIO_OBJECTS > FREE_PAGES_LOW_WATER)
+ if (free_count - VG_FOLIO_OBJECTS > FREE_PAGES_LOW_WATER)
/* There are sufficient reserve pages not including this
folio. Thus, we free STORAGE. */
{
- atomic_add (&free_count, - FOLIO_OBJECTS);
+ atomic_add (&free_count, - VG_FOLIO_OBJECTS);
list_unlink (storage);
hurd_btree_storage_desc_detach (&storage_descs, storage);
@@ -757,18 +757,18 @@ storage_free_ (addr_t object, bool unmap_now)
as_slot_lookup_use (folio,
({
- cap_set_shadow (slot, NULL);
- slot->type = cap_void;
+ vg_cap_set_shadow (slot, NULL);
+ slot->type = vg_cap_void;
}));
storage_desc_free (storage);
if (shadow)
{
- addr_t shadow_addr = addr_chop (PTR_TO_ADDR (shadow),
+ vg_addr_t shadow_addr = vg_addr_chop (VG_PTR_TO_ADDR (shadow),
PAGESIZE_LOG2);
- if (ADDR_EQ (addr_chop (shadow_addr, FOLIO_OBJECTS_LOG2), folio))
+ if (VG_ADDR_EQ (vg_addr_chop (shadow_addr, VG_FOLIO_OBJECTS_LOG2), folio))
{
/* The shadow was allocate from ourself, which we
already freed. */
@@ -803,22 +803,22 @@ storage_free_ (addr_t object, bool unmap_now)
ss_mutex_unlock (&storage_descs_lock);
- int idx = addr_extract (object, FOLIO_OBJECTS_LOG2);
+ int idx = vg_addr_extract (object, VG_FOLIO_OBJECTS_LOG2);
bit_dealloc (storage->alloced, idx);
error_t err = rm_folio_object_alloc (meta_data_activity,
- folio, idx, cap_void,
- OBJECT_POLICY_DEFAULT, 0,
+ folio, idx, vg_cap_void,
+ VG_OBJECT_POLICY_DEFAULT, 0,
NULL, NULL);
assert (err == 0);
if (likely (!! shadow))
{
- shadow->caps[idx].type = cap_void;
- cap_set_shadow (&shadow->caps[idx], NULL);
- CAP_PROPERTIES_SET (&shadow->caps[idx],
- CAP_PROPERTIES (OBJECT_POLICY_DEFAULT,
- CAP_ADDR_TRANS_VOID));
+ shadow->caps[idx].type = vg_cap_void;
+ vg_cap_set_shadow (&shadow->caps[idx], NULL);
+ VG_CAP_PROPERTIES_SET (&shadow->caps[idx],
+ VG_CAP_PROPERTIES (VG_OBJECT_POLICY_DEFAULT,
+ VG_CAP_ADDR_TRANS_VOID));
}
else
assert (! as_init_done);
@@ -851,28 +851,28 @@ storage_init (void)
i < __hurd_startup_data->desc_count;
i ++, odesc ++)
{
- if (ADDR_IS_VOID (odesc->storage))
+ if (VG_ADDR_IS_VOID (odesc->storage))
continue;
- addr_t folio;
- if (odesc->type == cap_folio)
+ vg_addr_t folio;
+ if (odesc->type == vg_cap_folio)
folio = odesc->object;
else
- folio = addr_chop (odesc->storage, FOLIO_OBJECTS_LOG2);
+ folio = vg_addr_chop (odesc->storage, VG_FOLIO_OBJECTS_LOG2);
struct storage_desc *sdesc;
sdesc = hurd_btree_storage_desc_find (&storage_descs, &folio);
if (! sdesc)
/* Haven't seen this folio yet. */
{
- debug (5, "Adding folio " ADDR_FMT, ADDR_PRINTF (folio));
+ debug (5, "Adding folio " VG_ADDR_FMT, VG_ADDR_PRINTF (folio));
folio_count ++;
sdesc = storage_desc_alloc ();
sdesc->lock = (ss_mutex_t) 0;
sdesc->folio = folio;
- sdesc->free = FOLIO_OBJECTS;
+ sdesc->free = VG_FOLIO_OBJECTS;
sdesc->mode = LONG_LIVED_ALLOCING;
list_link (&long_lived_allocing, sdesc);
@@ -881,20 +881,20 @@ storage_init (void)
/* Assume that the folio is free. As we encounter objects,
we will mark them as allocated. */
- free_count += FOLIO_OBJECTS;
+ free_count += VG_FOLIO_OBJECTS;
}
- if (odesc->type != cap_folio)
+ if (odesc->type != vg_cap_folio)
{
- int idx = addr_extract (odesc->storage, FOLIO_OBJECTS_LOG2);
+ int idx = vg_addr_extract (odesc->storage, VG_FOLIO_OBJECTS_LOG2);
debug (5, "%llx/%d, %d -> %llx/%d (%s)",
- addr_prefix (folio),
- addr_depth (folio),
+ vg_addr_prefix (folio),
+ vg_addr_depth (folio),
idx,
- addr_prefix (odesc->storage),
- addr_depth (odesc->storage),
- cap_type_string (odesc->type));
+ vg_addr_prefix (odesc->storage),
+ vg_addr_depth (odesc->storage),
+ vg_cap_type_string (odesc->type));
bit_set (sdesc->alloced, sizeof (sdesc->alloced), idx);
diff --git a/libhurd-mm/storage.h b/libhurd-mm/storage.h
index c7b4f66..e454b83 100644
--- a/libhurd-mm/storage.h
+++ b/libhurd-mm/storage.h
@@ -42,16 +42,16 @@ enum storage_expectancy
struct storage
{
- struct cap *cap;
- addr_t addr;
+ struct vg_cap *cap;
+ vg_addr_t addr;
};
/* Allocate an object of type TYPE. The object has a life expectancy
- of EXPECTANCY. If ADDR is not ADDR_VOID, a capability to the
+ of EXPECTANCY. If ADDR is not VG_ADDR_VOID, a capability to the
storage will be saved at ADDR (and the shadow object updated
appropriately). On success, the shadow capability slot for the
storage is returned (useful for setting up a shadow object) and the
- address of the storage object. Otherwise, NULL and ADDR_VOID,
+ address of the storage object. Otherwise, NULL and VG_ADDR_VOID,
respectively, are returned. ACTIVITY is the activity to use to
account the storage.
@@ -61,11 +61,11 @@ struct storage
caller wants to use the allocated object for address translation,
the caller must allocate the shadow object. If not, functions
including the cap_lookup family will fail. */
-extern struct storage storage_alloc (addr_t activity,
- enum cap_type type,
+extern struct storage storage_alloc (vg_addr_t activity,
+ enum vg_cap_type type,
enum storage_expectancy expectancy,
struct object_policy policy,
- addr_t addr);
+ vg_addr_t addr);
#define storage_alloc(__sa_activity, __sa_type, __sa_expectancy, \
__sa_policy, __sa_addr) \
({ \
@@ -73,9 +73,9 @@ extern struct storage storage_alloc (addr_t activity,
__sa_storage = storage_alloc (__sa_activity, __sa_type, \
__sa_expectancy, __sa_policy, \
__sa_addr); \
- debug (5, "storage_alloc (%s, " ADDR_FMT ") -> " ADDR_FMT, \
- cap_type_string (__sa_type), ADDR_PRINTF (__sa_addr), \
- ADDR_PRINTF (__sa_storage.addr)); \
+ debug (5, "storage_alloc (%s, " VG_ADDR_FMT ") -> " VG_ADDR_FMT, \
+ vg_cap_type_string (__sa_type), VG_ADDR_PRINTF (__sa_addr), \
+ VG_ADDR_PRINTF (__sa_storage.addr)); \
__sa_storage; \
})
@@ -83,10 +83,10 @@ extern struct storage storage_alloc (addr_t activity,
/* Frees the storage at STORAGE. STORAGE must be the address returned
by storage_alloc (NOT the address provided to storage_alloc). If
UNMAP_NOW is not true, revoking the storage may be delayed. */
-extern void storage_free_ (addr_t storage, bool unmap_now);
+extern void storage_free_ (vg_addr_t storage, bool unmap_now);
#define storage_free(__sf_storage, __sf_unmap_now) \
({ \
- debug (5, "storage_free (" ADDR_FMT ")", ADDR_PRINTF (__sf_storage)); \
+ debug (5, "storage_free (" VG_ADDR_FMT ")", VG_ADDR_PRINTF (__sf_storage)); \
storage_free_ (__sf_storage, __sf_unmap_now); \
})
@@ -94,7 +94,7 @@ extern void storage_free_ (addr_t storage, bool unmap_now);
extern void storage_init (void);
/* Used by as_init to initialize a folio's shadow object. */
-extern void storage_shadow_setup (struct cap *cap, addr_t folio);
+extern void storage_shadow_setup (struct vg_cap *cap, vg_addr_t folio);
/* Return whether there is sufficient reserve storage. */
extern bool storage_have_reserve (void);