summaryrefslogtreecommitdiff
path: root/viengoos/as.c
diff options
context:
space:
mode:
authorneal <neal>2008-02-06 13:35:44 +0000
committerneal <neal>2008-02-06 13:35:44 +0000
commit3355d3b116a2ce1d1e3220bbcca42dc0ced68aae (patch)
tree126f3e247fcdbff111052a269d0fc3fe07d8d794 /viengoos/as.c
parentdf17395bb34ed6f5a8f77bfb950f93040df9da90 (diff)
2008-02-06 Neal H. Walfield <neal@gnu.org>
* as.c (as_build_internal): Improve comments. Do some variable renaming.
Diffstat (limited to 'viengoos/as.c')
-rw-r--r--viengoos/as.c302
1 files changed, 162 insertions, 140 deletions
diff --git a/viengoos/as.c b/viengoos/as.c
index 19bcf5c..3c856a4 100644
--- a/viengoos/as.c
+++ b/viengoos/as.c
@@ -65,102 +65,114 @@ ensure_stack(int i)
# define AS_LOCK do { } while (0)
# define AS_UNLOCK do { } while (0)
-# define AS_DUMP as_dump_from (activity, start, __func__);
+# define AS_DUMP as_dump_from (activity, as_root, __func__);
#endif
-/* Build the address space such that A designates a capability slot.
- If MAY_OVERWRITE is true, may overwrite an existing capability.
- Otherwise, the capability slot is expected to contain a void
- capability. */
+/* Build up the address space at AS_ROOT_ADDR such that there is a
+ capability slot at address ADDR. Returns the address of the
+ capability slot.
+
+ ALLOCATE_OBJECT is a callback function that is expected to allocate
+ a cappage to be used as a page table at address ADDR. The callback
+ function should allocate any necessary shadow page-tables. The
+ callback function may not call address space manipulation
+ functions.
+
+ If MAY_OVERWRITE is true, the function may overwrite an existing
+ capability. Otherwise, only capability slots containing a void
+ capability are used. */
static struct cap *
as_build_internal (activity_t activity,
- addr_t as_root_addr, struct cap *root, addr_t a,
+ addr_t as_root_addr, struct cap *as_root, addr_t addr,
struct as_insert_rt (*allocate_object) (enum cap_type type,
addr_t addr),
bool may_overwrite)
{
-#ifdef RM_INTERN
- struct cap *start = root;
-#endif
+ struct cap *pte = as_root;
- assert (! ADDR_IS_VOID (a));
-
- l4_uint64_t addr = addr_prefix (a);
- l4_word_t remaining = addr_depth (a);
-
- debug (4, "Ensuring slot at 0x%llx/%d", addr, remaining);
+ debug (4, "Ensuring slot at " ADDR_FMT, ADDR_PRINTF (addr));
+ assert (! ADDR_IS_VOID (addr));
+ /* The number of bits to translate. */
+ int remaining = addr_depth (addr);
/* The REMAINING bits to translates are in the REMAINING most significant
- bits of ADDR. Here it is more convenient to have them in the
+ bits of PREFIX. Here it is more convenient to have them in the
lower bits. */
- addr >>= (ADDR_BITS - remaining);
+ uint64_t prefix = addr_prefix (addr) >> (ADDR_BITS - remaining);
+ /* Folios are not made up of capability slots and cannot be written
+ to. When traversing a folio, we manufacture a capability to used
+ object in FAKE_SLOT. If ADDR ends up designating such a
+ capability, we fail. */
struct cap fake_slot;
do
{
- struct object *cappage = NULL;
+ struct object *pt = NULL;
- l4_uint64_t root_guard = CAP_GUARD (root);
- int root_gbits = CAP_GUARD_BITS (root);
+ uint64_t pte_guard = CAP_GUARD (pte);
+ int pte_gbits = CAP_GUARD_BITS (pte);
- if (root->type != cap_void
- && remaining >= root_gbits
- && root_guard == extract_bits64_inv (addr,
- remaining - 1, root_gbits))
- /* ROOT's (possibly zero-width) guard matches and thus
- translates part of the address. */
+ /* If PTE's guard matches, the designated page table translates
+ our address. Otherwise, we need to insert a page table and
+ indirect access to the object designated by PTE via it. */
+
+ if (pte->type != cap_void
+ && remaining >= pte_gbits
+ && pte_guard == extract_bits64_inv (prefix, remaining - 1, pte_gbits))
+ /* PTE's (possibly zero-width) guard matches and the
+ designated object translates ADDR. */
{
- if (remaining == root_gbits && may_overwrite)
+ if (remaining == pte_gbits && may_overwrite)
{
debug (4, "Overwriting " ADDR_FMT " with " ADDR_FMT
" (at " ADDR_FMT ")",
- ADDR_PRINTF (addr_extend (addr_chop (a, remaining),
- root_guard, root_gbits)),
- ADDR_PRINTF (a),
- ADDR_PRINTF (addr_chop (a, remaining)));
+ ADDR_PRINTF (addr_extend (addr_chop (addr, remaining),
+ pte_guard, pte_gbits)),
+ ADDR_PRINTF (addr),
+ ADDR_PRINTF (addr_chop (addr, remaining)));
/* XXX: Free any data associated with the capability
(e.g., shadow pages). */
break;
}
/* Subtract the number of bits the guard translates. */
- remaining -= root_gbits;
+ remaining -= pte_gbits;
assert (remaining >= 0);
if (remaining == 0)
- /* ROOT is not a void capability yet the guard translates
+ /* PTE is not a void capability yet the guard translates
all of the bits and we may not overwrite the
- capability. This means that ROOT references an object
- at ADDR. This is a problem: we want to insert a
- capability at ADDR. */
+ capability. This means that PTE references an object
+ at PREFIX. This is a problem: we want to insert a
+ capability at PREFIX. */
{
AS_DUMP;
panic ("There is already a %s object at %llx/%d!",
- cap_type_string (root->type),
- addr_prefix (a), addr_depth (a));
+ cap_type_string (pte->type),
+ addr_prefix (addr), addr_depth (addr));
}
- switch (root->type)
+ switch (pte->type)
{
case cap_cappage:
case cap_rcappage:
/* Load the referenced object. */
- cappage = cap_to_object (activity, root);
- if (! cappage)
- /* ROOT's type was not void but its designation was
+ pt = cap_to_object (activity, pte);
+ if (! pt)
+ /* PTE's type was not void but its designation was
invalid. This can only happen if we inserted an object
and subsequently destroyed it. */
{
/* The type should now have been set to cap_void. */
- assert (root->type == cap_void);
+ assert (pte->type == cap_void);
AS_DUMP;
panic ("Lost object at %llx/%d",
- addr_prefix (a), addr_depth (a) - remaining);
+ addr_prefix (addr), addr_depth (addr) - remaining);
}
- /* We index CAPPAGE below. */
+ /* We index PT below. */
break;
case cap_folio:
@@ -168,15 +180,15 @@ as_build_internal (activity_t activity,
if (remaining < FOLIO_OBJECTS_LOG2)
panic ("Translating " ADDR_FMT "; not enough bits (%d) "
"to index folio at " ADDR_FMT,
- ADDR_PRINTF (a), remaining,
- ADDR_PRINTF (addr_chop (a, remaining)));
+ ADDR_PRINTF (addr), remaining,
+ ADDR_PRINTF (addr_chop (addr, remaining)));
- struct object *object = cap_to_object (activity, root);
+ struct object *object = cap_to_object (activity, pte);
#ifdef RM_INTERN
if (! object)
{
debug (1, "Failed to get object with OID " OID_FMT,
- OID_PRINTF (root->oid));
+ OID_PRINTF (pte->oid));
return false;
}
#else
@@ -185,14 +197,15 @@ as_build_internal (activity_t activity,
struct folio *folio = (struct folio *) object;
- int i = extract_bits64_inv (addr,
+ int i = extract_bits64_inv (prefix,
remaining - 1, FOLIO_OBJECTS_LOG2);
if (folio_object_type (folio, i) == cap_void)
- panic ("Translating %llx/%d; indexed folio /%d object void",
- addr_prefix (a), addr_depth (a),
- ADDR_BITS - remaining);
+ panic ("Translating " ADDR_FMT "; indexed folio at "
+ ADDR_FMT ": void object",
+ ADDR_PRINTF (addr),
+ ADDR_PRINTF (addr_chop (addr, remaining)));
- root = &fake_slot;
+ pte = &fake_slot;
#ifdef RM_INTERN
struct object_desc *fdesc;
@@ -201,87 +214,95 @@ as_build_internal (activity_t activity,
object = object_find (activity, fdesc->oid + 1 + i,
folio_object_policy (folio, i));
assert (object);
- *root = object_to_cap (object);
+ *pte = object_to_cap (object);
#else
/* We don't use cap_copy as we just need a byte
copy. */
- *root = folio->objects[i];
+ *pte = folio->objects[i];
#endif
remaining -= FOLIO_OBJECTS_LOG2;
+
+ /* Fall through means we index PT. But we just did
+ that. Continue at the start of the loop. */
continue;
}
default:
AS_DUMP;
- panic ("Can't insert object at %llx/%d: "
- "%s at 0x%llx/%d does not translate address bits",
- addr_prefix (a), addr_depth (a),
- cap_type_string (root->type),
- addr_prefix (a), addr_depth (a) - remaining);
+ panic ("Can't insert object at " ADDR_FMT ": "
+ "%s at " ADDR_FMT " does not translate address bits",
+ ADDR_PRINTF (addr), cap_type_string (pte->type),
+ ADDR_PRINTF (addr_chop (addr, remaining)));
}
}
else
- /* We can get here due to two scenarios: ROOT is void or the
- the addresses at which we want to insert the object does
- not match the guard at ROOT. Perhaps in the former and
- definately in the latter, we need to introduce a level of
- indirection.
-
- R - ROOT
- E - ENTRY
- C - new cappage
-
- | <-root_depth-> | mismatch -> | <- gbits
- | | <- match C <- new page table
- R R / \ <- common guard,
- | | R \ index and
- o o | \ remaining guard
- / \ / | \ o E
- o o o E o / \
- ^ o o
- just insert */
+ /* There are two scenarios that lead us here: (1) the pte is
+ void or (2) the addresses at which we want to insert the
+ object does not match the guard at PTE. Perhaps in the
+ former (as we only have 22 guard bits) and definately in
+ the latter, we need to introduce a new page table.
+
+ Consider the second scenario:
+
+ E - PTE
+ T - PTE target
+ * - New PTE
+
+ Scenario:
+
+ [ |E| | | ] [ |E| | | ]
+ | \ pte's | <- (1) common guard
+ | / guard [ | | |*| ] <- (2) new page table
+ T | |
+ T ... <- (3) pivot T
+ */
{
- /* For convenience, we prefer that cappages occur at /44,
+ /* For convenience, we prefer that page tables occur at /44,
/36, /28, etc. This is useful as when we insert another
page that conflicts with the guard, we can trivially make
use of either 7- or 8-bit cappages rather than smaller
subppages. Moreover, it ensures that as paths are
decompressed, the tree remains relatively shallow. The
reason we don't choose /43 is that folios are 19-bits
- wide, while cappages are 8-bits and data pages 12.
+ wide while cappages are 8-bits and data pages 12
+ (= 20-bits).
- Consider an AS with a single page, the root having a
- 20-bit guard:
+ Consider an AS with a single page, the pte (*)
+ designating the object has a 20-bit guard:
- o
- | <- 20 bit guard
- o <- page
+ [ | | |*| | | ] <- page table
+ | <- 20 bit guard
+ o <- page
If we insert another page and there is a common guard of
1-bit, we could reuse this bit:
- o
- | <--- 1 bit guard
- o <--- 8-bit cappage
- / \ <-- 11-bit guards
- o o <- pages
+ [ | | | | | | ] <- page table
+ | <--- 1 bit guard
+ [ | | | | | | ] <- page table
+ | | <-- 11-bit guards
+ o o <- pages
- The problem with this is when we insert a third page that
- does not share the guard:
+ The problem with this is that if we want to avoid
+ shuffling (which we do), then when we insert a third page
+ that does not share the guard, we end up with small page
+ tables:
- o
+ [ | | | | | | ] <- page table
|
- o <- 1-bit subpage
- / \
- o o <- 8-bit cappage
- / \ | <- 11-bit guards
- o o o
+ [ | ] <- 1-bit subpage
+ / \
+ o o <- 8-bit cappage
+ / \ | <- 11-bit guards
+ o o o
- In this case, we would prefer a guard of 4 at the top.
+ In this scenario, a larger guard (4 bits wide) would have
+ been better.
- Managing the tree would also become a pain when removing
- entries. */
+ An additional reason to prefer larger guards at specific
+ depths is that it makes removing entries from the tree
+ easier. */
/* The number of bits until the next object. */
int tilobject;
@@ -292,9 +313,9 @@ as_build_internal (activity_t activity,
REMAINER - GBITS - log2 (sizeof (cappage)) is the guard
length of each entry in the new page. */
int gbits;
- if (root->type == cap_void)
+ if (pte->type == cap_void)
{
- int space = l4_msb64 (extract_bits64 (addr, 0, remaining));
+ int space = l4_msb64 (extract_bits64 (prefix, 0, remaining));
if (space <= CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS)
/* The slot is available and the remaining bits to
translate fit in the guard. */
@@ -305,20 +326,20 @@ as_build_internal (activity_t activity,
else
/* Find the size of the common prefix. */
{
- l4_uint64_t a = root_guard;
- int max = root_gbits > remaining ? remaining : root_gbits;
- l4_uint64_t b = extract_bits64_inv (addr, remaining - 1, max);
- if (remaining < root_gbits)
- a >>= root_gbits - remaining;
+ uint64_t a = pte_guard;
+ int max = pte_gbits > remaining ? remaining : pte_gbits;
+ uint64_t b = extract_bits64_inv (prefix, remaining - 1, max);
+ if (remaining < pte_gbits)
+ a >>= pte_gbits - remaining;
gbits = max - l4_msb64 (a ^ b);
- tilobject = root_gbits;
+ tilobject = pte_gbits;
}
/* Make sure that the guard to use fits in the guard
area. */
- int firstset = l4_msb64 (extract_bits64_inv (addr,
+ int firstset = l4_msb64 (extract_bits64_inv (prefix,
remaining - 1, gbits));
if (firstset > CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS)
/* FIRSTSET is the first (most significant) non-zero guard
@@ -336,7 +357,7 @@ as_build_internal (activity_t activity,
FOLIO_OBJECTS_LOG2 + i * CAPPAGE_SLOTS_LOG2 where i >= 0.
As GBITS is maximal, we may have to remove guard bits to
achieve this. */
- int untranslated_bits = remaining + ADDR_BITS - addr_depth (a);
+ int untranslated_bits = remaining + ADDR_BITS - addr_depth (addr);
struct as_guard_cappage gc
= as_compute_gbits_cappage (untranslated_bits,
@@ -359,43 +380,43 @@ as_build_internal (activity_t activity,
/* ALLOCATE_OBJECT wants the number of significant bits
translated to this object; REMAINING is number of bits
remaining to translate. */
- addr_t cappage_addr = addr_chop (a, remaining);
+ addr_t cappage_addr = addr_chop (addr, remaining);
struct as_insert_rt rt = allocate_object (cap_cappage, cappage_addr);
if (rt.cap.type == cap_void)
/* No memory. */
return NULL;
- cappage = cap_to_object (activity, &rt.cap);
+ pt = cap_to_object (activity, &rt.cap);
- /* Indirect access to the object designated by ROOT via the
+ /* Indirect access to the object designated by PTE via the
appropriate slot in new cappage (the pivot). */
- int pivot_idx = extract_bits_inv (root_guard,
- root_gbits - gbits - 1,
+ int pivot_idx = extract_bits_inv (pte_guard,
+ pte_gbits - gbits - 1,
subpage_bits);
addr_t pivot_addr = addr_extend (rt.storage,
pivot_idx,
CAPPAGE_SLOTS_LOG2);
- addr_t root_addr = addr_chop (cappage_addr, gbits);
+ addr_t pte_addr = addr_chop (cappage_addr, gbits);
- struct cap_addr_trans addr_trans = root->addr_trans;
+ struct cap_addr_trans addr_trans = pte->addr_trans;
int d = tilobject - gbits - subpage_bits;
CAP_ADDR_TRANS_SET_GUARD (&addr_trans,
- extract_bits64 (root_guard, 0, d), d);
+ extract_bits64 (pte_guard, 0, d), d);
bool r = cap_copy_x (activity,
- ADDR_VOID, &cappage->caps[pivot_idx], pivot_addr,
- as_root_addr, *root, root_addr,
+ ADDR_VOID, &pt->caps[pivot_idx], pivot_addr,
+ as_root_addr, *pte, pte_addr,
CAP_COPY_COPY_ADDR_TRANS_GUARD,
CAP_PROPERTIES (OBJECT_POLICY_DEFAULT,
addr_trans));
assert (r);
- /* Finally, set the slot at ROOT to point to CAPPAGE. */
- root_guard = extract_bits64_inv (root_guard,
- root_gbits - 1, gbits);
+ /* Finally, set the slot at PTE to point to PT. */
+ pte_guard = extract_bits64_inv (pte_guard,
+ pte_gbits - 1, gbits);
r = CAP_ADDR_TRANS_SET_GUARD_SUBPAGE (&addr_trans,
- root_guard, gbits,
+ pte_guard, gbits,
0 /* We always use the
first subpage in
a page. */,
@@ -403,7 +424,7 @@ as_build_internal (activity_t activity,
- subpage_bits));
assert (r);
- r = cap_copy_x (activity, as_root_addr, root, root_addr,
+ r = cap_copy_x (activity, as_root_addr, pte, pte_addr,
ADDR_VOID, rt.cap, rt.storage,
CAP_COPY_COPY_ADDR_TRANS_SUBPAGE
| CAP_COPY_COPY_ADDR_TRANS_GUARD,
@@ -412,44 +433,45 @@ as_build_internal (activity_t activity,
assert (r);
}
- /* Index CAPPAGE finding the next PTE. */
+ /* Index PT finding the next PTE. */
- /* The cappage referenced by ROOT translates WIDTH bits. */
- int width = CAP_SUBPAGE_SIZE_LOG2 (root);
+ /* The cappage referenced by PTE translates WIDTH bits. */
+ int width = CAP_SUBPAGE_SIZE_LOG2 (pte);
/* That should not be more than we have left to translate. */
if (width > remaining)
{
AS_DUMP;
panic ("Translating " ADDR_FMT ": can't index %d-bit cappage; "
"not enough bits (%d)",
- ADDR_PRINTF (a), width, remaining);
+ ADDR_PRINTF (addr), width, remaining);
}
- int idx = extract_bits64_inv (addr, remaining - 1, width);
- root = &cappage->caps[CAP_SUBPAGE_OFFSET (root) + idx];
+ int idx = extract_bits64_inv (prefix, remaining - 1, width);
+ pte = &pt->caps[CAP_SUBPAGE_OFFSET (pte) + idx];
remaining -= width;
}
while (remaining > 0);
if (! may_overwrite)
- assert (root->type == cap_void);
+ assert (pte->type == cap_void);
int gbits = remaining;
- l4_word_t guard = extract_bits64 (addr, 0, gbits);
- if (gbits != CAP_GUARD_BITS (root) || guard != CAP_GUARD (root))
+ /* It is safe to use an int as a guard has a most 22 bits. */
+ int guard = extract_bits64 (prefix, 0, gbits);
+ if (gbits != CAP_GUARD_BITS (pte) || guard != CAP_GUARD (pte))
{
struct cap_addr_trans addr_trans = CAP_ADDR_TRANS_VOID;
bool r = CAP_ADDR_TRANS_SET_GUARD_SUBPAGE (&addr_trans, guard, gbits,
0, 1);
assert (r);
- r = cap_copy_x (activity, as_root_addr, root, addr_chop (a, gbits),
- as_root_addr, *root, addr_chop (a, gbits),
+ r = cap_copy_x (activity, as_root_addr, pte, addr_chop (addr, gbits),
+ as_root_addr, *pte, addr_chop (addr, gbits),
CAP_COPY_COPY_ADDR_TRANS_GUARD,
CAP_PROPERTIES (OBJECT_POLICY_DEFAULT, addr_trans));
assert (r);
}
- return root;
+ return pte;
}
/* Ensure that the slot designated by A is accessible. */