summaryrefslogtreecommitdiff
path: root/libhurd-mm/as-build.c
diff options
context:
space:
mode:
authorneal <neal>2008-06-05 13:20:56 +0000
committerneal <neal>2008-06-05 13:20:56 +0000
commit7aeb80b54a28b9b87622debe47a69fd033923383 (patch)
tree00100da227e0ccf3694cd11f34324782d7ce5ef5 /libhurd-mm/as-build.c
parent029f94a04e9fc49dfb8b562d403b451c17fe1587 (diff)
viengoos/
2008-06-05 Neal H. Walfield <neal@gnu.org> * cap-lookup.c: Move from here... * ../libhurd-mm/as-lookup.c: ... to here. * as.c: Move from here... * ../libhurd-mm/as-build.c: ... to here. * as-custom.c: Move from here... * ../libhurd-mm/as-build-custom.c: ... to here. * as.h: Move contents... * ../libhurd-mm/as.h: ... to here... * ../libhurd-mm/as-compute-gbits.h: ... and to here. * bits.h: Move from here... * ../libhurd-mm/bits.h: ... to here. * Makefile.am (viengoos_SOURCES): Remove cap-lookup.c, as.h and as.c. (t_as_SOURCES): Likewise. (t_activity_SOURCES): Likewise. (viengoos_LDADD): Add ../libhurd-mm/libas-kernel.a. (t_as_LDADD): Add ../libhurd-mm/libas-check.a. (t_activity_LDADD): Likewise. (lib_LIBRARIES): Remove libhurd-cap.a. (libhurd_cap_a_CPPFLAGS): Remove variable. (libhurd_cap_a_CFLAGS): Likewise. (libhurd_cap_a_SOURCES): Likewise. * server.c (server_loop): Replace use of object_lookup_rel with as_object_lookup_rel. Replace use of slot_lookup_rel with as_slot_lookup_rel_use. Replace use of cap_lookup_rel with as_cap_lookup_rel. * viengoos.c: Don't include "as.h", include <hurd/as.h>. * t-activity.c: Don't include "as.h", include <hurd/as.h>. (allocate_object): Change return type to struct as_allocate_pt_ret. * t-as.c: Don't include "as.h", include <hurd/as.h>. (allocate_object): Change return type to struct as_allocate_pt_ret. (allocate_page_table): New function. (try): Replace use of as_insert with as_insert_full. Replace use of slot_lookup_rel with as_slot_lookup_rel_use. Replace use of object_lookup_rel with as_object_lookup_rel. (test): Likewise. * t-guard.c: Don't include "as.h", include <hurd/as.h>. Include "../libhurd-mm/as-compute-gbits.h". libhurd-mm/ 2008-06-05 Neal H. Walfield <neal@gnu.org> * as.h: Include <hurd/exceptions.h>. [! RM_INTERN]: Include <hurd/storage.h> and <pthread.h>. (as_lock_ensure_stack) [! RM_INTERN]: New function. (as_lock): New function. (as_lock_readonly): Likewise. (as_unlock): Likewise. (meta_data_activity) [RM_INTERN]: Don't declare. (shadow_root) [RM_INTERN]: Don't declare. (struct as_insert_rt): Rename from this... (struct as_allocate_pt_ret): ... to this. Update users. (as_allocate_page_table_t): New typedef. (as_allocate_page_table): New declaration. (as_build): New declaration. (as_build_custom): Likewise. (as_slot_ensure): Remove declaration. (as_ensure_full): New macro. (as_ensure_use) [! RM_INTERN]: Likewise. (as_ensure) [! RM_INTERN]: New function. (as_insert): Rename from this... (as_insert_full): ... to this. Don't return the capability. Reverse the order of the source address and cap. Replace allocate_object parameter with an allocate_page_table parameter. Update users. (as_insert) [! RM_INTERN]: New function. (as_slot_ensure_full_custom): Rename from this... (as_ensure_full_custom): ... to this. Replace allocate_object parameter with an allocate_page_table parameter. (as_insert_custom): Likewise. (union as_lookup_ret): New definition. (as_lookup_want_cap): New definition. (as_lookup_want_slot): Likewise. (as_lookup_want_object): Likewise. (as_lookup_rel): New declaration. (slot_lookup): Remove declaration. (as_slot_lookup_rel_use): Replace it with this macro. (as_slot_lookup_use) [! RM_INTERN]: New macro. (as_cap_lookup_rel): New function. (cap_lookup): Rename from this... (as_cap_lookup) [! RM_INTERN]: ... to this. Remove activity parameter. Implement here as a static inline function. (as_object_lookup_rel): New function. (object_lookup): Rename from this... (as_object_lookup) [! RM_INTERN]: ... to this. Remove activity parameter. Implement here as a static inline function. (as_dump_from): New declaration. * as-compute-gbits.h: Include <hurd/folio.h>. * as.c (allocate_object): Rename from this... (as_allocate_page_table): ... to this. Remove static qualifier. Don't take parameter type, just allocate a cap_cappage. (as_slot_ensure): Remove function. (as_init): Replace use of slot_lookup_rel with as_slot_lookup_use or as_cap_lookup as appropriate. (cap_lookup): Remove function. (object_lookup): Likewise. (slot_lookup): Likewise. (as_dump): Likewise. * as-build.c: Don't include "as.h", but <hurd/as.h>. Include <hurd/rm.h>, "as-compute-gbits.h". [RM_INTERN]: Don't include "object.h" but "../viengoos/object.h". (CUSTOM) [ID_SUFFIX]: Define. (as_build_internal): Rename from this... (as_build): ... to this. Remove static qualifier. Replace parameter allocate_object with allocate_page_table. Use it instead. (as_slot_ensure_full): Remove function. (as_insert): Likewise. * as-build-custom.c (as_object_index_t): Remove definition. (AS_LOCK): Don't define. (AS_UNLOCK): Don't define. (as_ensure_full_custom): New function. (as_insert_custom): Likewise. * as-lookup.c: Include <hurd/as.h>. [RM_INTERN]: Don't include "object.h" but "../viengoos/object.h". [! RM_INTERN]: Include <pthread.h>. (as_lock) [! RM_INTERN]: Rename from this... (as_rwlock) [! RM_INTERN]: ... to this. (ensure_stack) [! RM_INTERN]: Remove function. (AS_LOCK): Don't define. (AS_UNLOCK): Don't define. (lookup): Rename from this... (as_lookup_rel): ... to this. Change mode's type to an enum as_lookup_mode. Change rt's type to a union as_lookup_ret. Don't use want_object but as_lookup_want_object. Don't use want_slot but as_lookup_want_slot. Don't use want_cap but as_lookup_want_cap. (cap_lookup_rel): Remove function. (object_lookup_rel): Likewise. (slot_lookup_rel): Likewise. (print_nr): Move from here... * as-lookup.c (do_walk): Move from here... * as-dump.c (do_walk): ... to here. * as-lookup.c (as_dump_from): Move from here... * as-dump.c (as_dump_from): ... to here. * Makefile.am (lib_LIBRARIES) [ENABLE_TESTS]: Set to libas-check.a (lib_LIBRARIES) [! ENABLE_TESTS]: Add libas-kernel.a. (libhurd_mm_a_SOURCES): Add bits., as-build.c as-build-custom.c, as-lookup.c and as-dump.c. (libas_kernel_a_CPPFLAGS): New variable. (libas_kernel_a_CCASFLAGS): New variable. (libas_kernel_a_CFLAGS): New variable. (libas_kernel_a_SOURCES): New variable. (libas_check_a_CPPFLAGS): New variable. (libas_check_a_CCASFLAGS): New variable. (libas_check_a_CFLAGS): New variable. (libas_check_a_SOURCES): New variable. * anonymous.c (fault): Replace use of as_slot_ensure with as_ensure. * exceptions.c (exception_handler_init): Replace use of as_slot_ensure with as_ensure. * storage.c (storage_check_reserve_internal): Replace use of as_lock with as_rwlock. Replace use of as_slot_ensure with and slot_lookup with as_ensure and as_slot_lookup_use. (storage_alloc): Replace use of slot_lookup with as_slot_lookup_use. hurd/ 2008-06-05 Neal H. Walfield <neal@gnu.org> * cap.h: Don't include <pthread.h>. (as_lock): Remove declaration. (cap_lookup_rel): Likewise. (object_lookup_rel): Likewise. (slot_lookup_rel): Likewise. / 2008-06-05 Neal H. Walfield <neal@gnu.org> * libc.a.in: Remove -lhurd-cap. * Makefile.am (libc-stmp): Remove dependency on viengoos/libhurd-cap.a. 2008-06-05 Neal H. Walfield <neal@gnu.org> * process-spawn.c (as_insert_custom): Replace use of as_insert with as_insert_full. (allocate_object): Change return type to struct as_allocate_pt_ret. Replace use of as_slot_ensure with as_ensure_use. (allocate_page_table): New function. (process_spawn): Update use of as_insert_custom to be consistent with new API, in particular, pass allocate_page_table instead of allocate_object. Replace use of object_lookup_rel with as_object_lookup_rel. libpthread/ 2008-06-05 Neal H. Walfield <neal@gnu.org> * sysdeps/l4/hurd/pt-thread-alloc.c (__pthread_thread_alloc): Replace use of as_slot_ensure with as_ensure. ruth/ 2008-06-05 Neal H. Walfield <neal@gnu.org> * ruth.c (main): Replace use of slot_lookup with as_cap_lookup. Replace use of as_slot_ensure with as_ensure_use. Replace use of slot_lookup with as_slot_lookup_use.
Diffstat (limited to 'libhurd-mm/as-build.c')
-rw-r--r--libhurd-mm/as-build.c558
1 files changed, 558 insertions, 0 deletions
diff --git a/libhurd-mm/as-build.c b/libhurd-mm/as-build.c
new file mode 100644
index 0000000..1a64306
--- /dev/null
+++ b/libhurd-mm/as-build.c
@@ -0,0 +1,558 @@
+/* as-build.c - Address space composition helper functions.
+ Copyright (C) 2007, 2008 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <l4.h>
+#include <stddef.h>
+#include <assert.h>
+
+#include <hurd/cap.h>
+#include <hurd/stddef.h>
+#include <hurd/folio.h>
+#include <hurd/exceptions.h>
+#include <hurd/as.h>
+#include <hurd/rm.h>
+
+#ifndef RM_INTERN
+# include <hurd/storage.h>
+#endif
+
+#include "as-compute-gbits.h"
+#include "bits.h"
+
+#ifdef RM_INTERN
+#include "../viengoos/object.h"
+#endif
+
+#ifdef ID_SUFFIX
+# define CUSTOM
+#endif
+
+#ifndef RM_INTERN
+# include <hurd/trace.h>
+
+# ifdef CUSTOM
+extern struct trace_buffer as_trace;
+# else
+/* The buffer is protected by the as_lock lock. */
+struct trace_buffer as_trace = TRACE_BUFFER_INIT ("as_trace", 0,
+ true, true, false);
+# endif
+
+# define DEBUG(level, fmt, ...) \
+ do \
+ { \
+ debug (level, fmt, ##__VA_ARGS__); \
+ trace_buffer_add (&as_trace, fmt, ##__VA_ARGS__); \
+ } \
+ while (0)
+
+# define PANIC(fmt, ...) \
+ do \
+ { \
+ trace_buffer_dump (&as_trace, 0); \
+ panic (fmt, ##__VA_ARGS__); \
+ } \
+ while (0)
+
+#else
+
+# define DEBUG(level, fmt, ...) \
+ debug (level, fmt, ##__VA_ARGS__)
+
+# define PANIC(fmt, ...) \
+ panic (fmt, ##__VA_ARGS__)
+
+#endif
+
+
+#ifdef RM_INTERN
+# define AS_DUMP as_dump_from (activity, as_root, __func__)
+#else
+# define AS_DUMP rm_as_dump (ADDR_VOID, as_root_addr)
+#endif
+
+/* The following macros allow providing specialized address-space
+ construction functions. */
+
+/* The suffix to append to as_slot_ensure_full_ and as_insert_. */
+#ifdef ID_SUFFIX
+# define ID__(a, b) a ## _ ## b
+# define ID_(a, b) ID__(a, b)
+# define ID(a) ID_(a, ID_SUFFIX)
+#else
+# define ID(a) a
+#endif
+
+/* The callback signature. For instance:
+
+ #define OBJECT_INDEX_ARG_TYPE index_callback_t
+ */
+#ifdef OBJECT_INDEX_ARG_TYPE
+# define OBJECT_INDEX_PARAM , OBJECT_INDEX_ARG_TYPE do_index
+# define OBJECT_INDEX_ARG do_index,
+#else
+
+/* When there is no user-supplied callback, we default to traversing
+ kernel objects/shadow objects. */
+
+# define OBJECT_INDEX_PARAM
+# define OBJECT_INDEX_ARG
+
+/* PT designates a cappage or a folio. The cappage or folio is at
+ address PT_ADDR. Index the object designed by PTE returning the
+ location of the idx'th capability slot. If the capability is
+ implicit (in the case of a folio), return a fabricated capability
+ in *FAKE_SLOT and return FAKE_SLOT. Return NULL on failure. */
+static inline struct cap *
+do_index (activity_t activity, struct cap *pte, addr_t pt_addr, int idx,
+ struct cap *fake_slot)
+{
+ assert (pte->type == cap_cappage || pte->type == cap_rcappage
+ || pte->type == cap_folio);
+
+ /* Load the referenced object. */
+ struct object *pt = cap_to_object (activity, pte);
+ if (! pt)
+ /* PTE's type was not void but its designation was invalid. This
+ can only happen if we inserted an object and subsequently
+ destroyed it. */
+ {
+ /* The type should now have been set to cap_void. */
+ assert (pte->type == cap_void);
+ PANIC ("No object at " ADDR_FMT, ADDR_PRINTF (pt_addr));
+ }
+
+ switch (pte->type)
+ {
+ case cap_cappage:
+ case cap_rcappage:
+ return &pt->caps[CAP_SUBPAGE_OFFSET (pte) + idx];
+
+ case cap_folio:;
+ struct folio *folio = (struct folio *) pt;
+
+ if (folio_object_type (folio, idx) == cap_void)
+ PANIC ("Can't use void object at " ADDR_FMT " for address translation",
+ ADDR_PRINTF (pt_addr));
+
+ *fake_slot = folio_object_cap (folio, idx);
+
+ return fake_slot;
+
+ default:
+ return NULL;
+ }
+}
+#endif
+
+/* Build up the address space at AS_ROOT_ADDR such that there is a
+ capability slot at address ADDR. Returns the address of the
+ capability slot.
+
+ ALLOCATE_OBJECT is a callback function that is expected to allocate
+ a cappage to be used as a page table at address ADDR. The callback
+ function should allocate any necessary shadow page-tables. The
+ callback function may not call address space manipulation
+ functions.
+
+ If MAY_OVERWRITE is true, the function may overwrite an existing
+ capability. Otherwise, only capability slots containing a void
+ capability are used. */
+struct cap *
+ID (as_build) (activity_t activity,
+ addr_t as_root_addr, struct cap *as_root, addr_t addr,
+ as_allocate_page_table_t allocate_page_table
+ OBJECT_INDEX_PARAM,
+ bool may_overwrite)
+{
+ struct cap *pte = as_root;
+
+ DEBUG (4, "Ensuring slot at " ADDR_FMT, ADDR_PRINTF (addr));
+ assert (! ADDR_IS_VOID (addr));
+
+ /* The number of bits to translate. */
+ int remaining = addr_depth (addr);
+ /* The REMAINING bits to translates are in the REMAINING most significant
+ bits of PREFIX. Here it is more convenient to have them in the
+ lower bits. */
+ uint64_t prefix = addr_prefix (addr) >> (ADDR_BITS - remaining);
+
+ /* Folios are not made up of capability slots and cannot be written
+ to. When traversing a folio, we manufacture a capability to used
+ object in FAKE_SLOT. If ADDR ends up designating such a
+ capability, we fail. */
+ struct cap fake_slot;
+
+ do
+ {
+ uint64_t pte_guard = CAP_GUARD (pte);
+ int pte_gbits = CAP_GUARD_BITS (pte);
+
+ /* If PTE's guard matches, the designated page table translates
+ our address. Otherwise, we need to insert a page table and
+ indirect access to the object designated by PTE via it. */
+
+ if (pte->type != cap_void
+ && remaining >= pte_gbits
+ && pte_guard == extract_bits64_inv (prefix, remaining - 1, pte_gbits))
+ /* PTE's (possibly zero-width) guard matches and the
+ designated object translates ADDR. */
+ {
+ if (remaining == pte_gbits && may_overwrite)
+ {
+ DEBUG (4, "Overwriting " ADDR_FMT " with " ADDR_FMT
+ " (at " ADDR_FMT ")",
+ ADDR_PRINTF (addr_extend (addr_chop (addr,
+ remaining),
+ pte_guard,
+ pte_gbits)),
+ ADDR_PRINTF (addr),
+ ADDR_PRINTF (addr_chop (addr, remaining)));
+ /* XXX: Free any data associated with the capability
+ (e.g., shadow pages). */
+ break;
+ }
+
+ /* Subtract the number of bits the guard translates. */
+ remaining -= pte_gbits;
+ assert (remaining >= 0);
+
+ if (remaining == 0)
+ /* PTE is not a void capability yet the guard translates
+ all of the bits and we may not overwrite the
+ capability. This means that PTE references an object
+ at PREFIX. This is a problem: we want to insert a
+ capability at PREFIX. */
+ {
+ AS_DUMP;
+ PANIC ("There is already a %s object at %llx/%d!",
+ cap_type_string (pte->type),
+ addr_prefix (addr), addr_depth (addr));
+ }
+
+ /* We index the object designated by PTE below. */
+ }
+ else
+ /* There are two scenarios that lead us here: (1) the pte is
+ void or (2) the addresses at which we want to insert the
+ object does not match the guard at PTE. Perhaps in the
+ former (as we only have 22 guard bits) and definately in
+ the latter, we need to introduce a new page table.
+
+ Consider the second scenario:
+
+ E - PTE
+ T - PTE target
+ * - New PTE
+
+ Scenario:
+
+ [ |E| | | ] [ |E| | | ]
+ | \ pte's | <- (1) common guard
+ | / guard [ | | |*| ] <- (2) new page table
+ T | |
+ T ... <- (3) pivot T
+ */
+ {
+ /* For convenience, we prefer that page tables occur at /44,
+ /36, /28, etc. This is useful as when we insert another
+ page that conflicts with the guard, we can trivially make
+ use of either 7- or 8-bit cappages rather than smaller
+ subppages. Moreover, it ensures that as paths are
+ decompressed, the tree remains relatively shallow. The
+ reason we don't choose /43 is that folios are 19-bits
+ wide while cappages are 8-bits and data pages 12
+ (= 20-bits).
+
+ Consider an AS with a single page, the pte (*)
+ designating the object has a 20-bit guard:
+
+ [ | | |*| | | ] <- page table
+ | <- 20 bit guard
+ o <- page
+
+ If we insert another page and there is a common guard of
+ 1-bit, we could reuse this bit:
+
+ [ | | | | | | ] <- page table
+ | <--- 1 bit guard
+ [ | | | | | | ] <- page table
+ | | <-- 11-bit guards
+ o o <- pages
+
+ The problem with this is that if we want to avoid
+ shuffling (which we do), then when we insert a third page
+ that does not share the guard, we end up with small page
+ tables:
+
+ [ | | | | | | ] <- page table
+ |
+ [ | ] <- 1-bit subpage
+ / \
+ o o <- 8-bit cappage
+ / \ | <- 11-bit guards
+ o o o
+
+ In this scenario, a larger guard (4 bits wide) would have
+ been better.
+
+ An additional reason to prefer larger guards at specific
+ depths is that it makes removing entries from the tree
+ easier. */
+
+ /* The number of bits until the next object. */
+ int tilobject;
+
+ /* GBITS is the amount of guard that we use to point to the
+ cappage we will allocate.
+
+ REMAINER - GBITS - log2 (sizeof (cappage)) is the guard
+ length of each entry in the new page. */
+ int gbits;
+ if (pte->type == cap_void)
+ {
+ int space = l4_msb64 (extract_bits64 (prefix, 0, remaining));
+ if (space <= CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS)
+ /* The slot is available and the remaining bits to
+ translate fit in the guard. */
+ break;
+
+ gbits = tilobject = remaining;
+ }
+ else
+ /* Find the size of the common prefix. */
+ {
+ uint64_t a = pte_guard;
+ int max = pte_gbits > remaining ? remaining : pte_gbits;
+ uint64_t b = extract_bits64_inv (prefix, remaining - 1, max);
+ if (remaining < pte_gbits)
+ a >>= pte_gbits - remaining;
+
+ gbits = max - l4_msb64 (a ^ b);
+
+ tilobject = pte_gbits;
+ }
+
+ /* Make sure that the guard to use fits in the guard
+ area. */
+ int firstset = l4_msb64 (extract_bits64_inv (prefix,
+ remaining - 1, gbits));
+ if (firstset > CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS)
+ /* FIRSTSET is the first (most significant) non-zero guard
+ bit. GBITS - FIRSTSET are the number of zero bits
+ before the most significant non-zero bit. We can
+ include all of the initial zero bits plus up to the
+ next CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS bits. */
+ gbits = (gbits - firstset) + CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS;
+
+ /* We want to choose the guard length such that the cappage
+ that we insert occurs at certain positions so as minimize
+ small partial cappages and painful rearrangements of the
+ tree. In particular, we want the total remaining bits to
+ translate after accounting the guard to be equal to
+ FOLIO_OBJECTS_LOG2 + i * CAPPAGE_SLOTS_LOG2 where i >= 0.
+ As GBITS is maximal, we may have to remove guard bits to
+ achieve this. */
+ int untranslated_bits = remaining + ADDR_BITS - addr_depth (addr);
+
+ struct as_guard_cappage gc
+ = as_compute_gbits_cappage (untranslated_bits,
+ tilobject, gbits);
+ assert (gc.gbits <= gbits);
+ assert (gc.gbits + gc.cappage_width <= tilobject);
+ gbits = gc.gbits;
+
+ /* Account the bits translated by the guard. */
+ remaining -= gbits;
+
+ int subpage_bits = gc.cappage_width;
+ assert (subpage_bits >= 0);
+ assert (subpage_bits <= CAPPAGE_SLOTS_LOG2);
+
+ /* Allocate a new page table. */
+ /* XXX: If we use a subpage, we just ignore the rest of the
+ page. This is a bit of a waste but makes the code
+ simpler. */
+ /* ALLOCATE_PAGE_TABLE wants the number of significant bits
+ translated to this object; REMAINING is number of bits
+ remaining to translate. */
+ addr_t pt_addr = addr_chop (addr, remaining);
+ struct as_allocate_pt_ret rt = allocate_page_table (pt_addr);
+ if (rt.cap.type == cap_void)
+ /* No memory. */
+ return NULL;
+
+ /* We've now allocated a new page table.
+
+ * - PTE
+ & - pivot
+ $ - new PTE
+
+ Before: After:
+
+ [ |*| | | | ] [ |*| | | | ]
+ | | <- shortened guard
+ | <- orig. guard v
+ v [ |&| | |$| ] <- new page table
+ [ | | | | | ] |
+ v
+ [ | | | | | ]
+
+ Algorithm:
+
+ 1) Copy contents of PTE to pivot.
+ 2) Set PTE to point to new page table.
+ 3) Index new page table to continue address translation
+ (note: the new PTE may be the same as the pivot).
+ */
+
+ /* 1.a) Get the pivot PTE. */
+ int pivot_idx = extract_bits_inv (pte_guard,
+ pte_gbits - gbits - 1,
+ subpage_bits);
+
+ /* do_index requires that the subpage specification be
+ correct. */
+ struct cap pt_cap = rt.cap;
+ CAP_SET_SUBPAGE (&pt_cap,
+ 0, 1 << (CAPPAGE_SLOTS_LOG2 - subpage_bits));
+
+ struct cap *pivot_cap = do_index (activity,
+ &pt_cap, pt_addr,
+ pivot_idx, &fake_slot);
+ assert (pivot_cap != &fake_slot);
+
+ addr_t pivot_addr = addr_extend (rt.storage,
+ pivot_idx,
+ CAPPAGE_SLOTS_LOG2);
+
+ /* 1.b) Make the pivot designate the object the PTE
+ currently designates. */
+ addr_t pte_addr = addr_chop (pt_addr, gbits);
+
+ struct cap_addr_trans addr_trans = pte->addr_trans;
+ int d = tilobject - gbits - subpage_bits;
+ CAP_ADDR_TRANS_SET_GUARD (&addr_trans,
+ extract_bits64 (pte_guard, 0, d), d);
+
+ bool r = cap_copy_x (activity,
+ ADDR_VOID, pivot_cap, pivot_addr,
+ as_root_addr, *pte, pte_addr,
+ CAP_COPY_COPY_ADDR_TRANS_GUARD,
+ CAP_PROPERTIES (OBJECT_POLICY_DEFAULT,
+ addr_trans));
+ assert (r);
+
+ /* 2) Set PTE to point to PT. */
+ pte_guard = extract_bits64_inv (pte_guard,
+ pte_gbits - 1, gbits);
+ r = CAP_ADDR_TRANS_SET_GUARD_SUBPAGE (&addr_trans,
+ pte_guard, gbits,
+ 0 /* We always use the
+ first subpage in
+ a page. */,
+ 1 << (CAPPAGE_SLOTS_LOG2
+ - subpage_bits));
+ assert (r);
+
+ r = cap_copy_x (activity, as_root_addr, pte, pte_addr,
+ ADDR_VOID, pt_cap, rt.storage,
+ CAP_COPY_COPY_ADDR_TRANS_SUBPAGE
+ | CAP_COPY_COPY_ADDR_TRANS_GUARD,
+ CAP_PROPERTIES (OBJECT_POLICY_DEFAULT, addr_trans));
+ assert (r);
+ }
+
+ /* Index the object designated by PTE to find the next PTE. The
+ guard has already been translated. */
+ int width;
+ switch (pte->type)
+ {
+ case cap_cappage:
+ case cap_rcappage:
+ width = CAP_SUBPAGE_SIZE_LOG2 (pte);
+ break;
+
+ case cap_folio:
+ width = FOLIO_OBJECTS_LOG2;
+ break;
+
+ default:
+ AS_DUMP;
+ PANIC ("Can't insert object at " ADDR_FMT ": "
+ "%s at " ADDR_FMT " does not translate address bits "
+ "(remaining: %d, gbits: %d, pte guard: %d, my guard: %d)",
+ ADDR_PRINTF (addr), cap_type_string (pte->type),
+ ADDR_PRINTF (addr_chop (addr, remaining)),
+ remaining, pte_gbits, pte_guard,
+ extract_bits64_inv (prefix, remaining - 1, pte_gbits));
+ }
+
+ /* That should not be more than we have left to translate. */
+ if (width > remaining)
+ {
+ AS_DUMP;
+ PANIC ("Translating " ADDR_FMT ": can't index %d-bit %s at "
+ ADDR_FMT "; not enough bits (%d)",
+ ADDR_PRINTF (addr), width, cap_type_string (pte->type),
+ ADDR_PRINTF (addr_chop (addr, remaining)), remaining);
+ }
+
+ int idx = extract_bits64_inv (prefix, remaining - 1, width);
+
+ enum cap_type type = pte->type;
+ pte = do_index (activity, pte, addr_chop (addr, remaining), idx,
+ &fake_slot);
+ if (! pte)
+ PANIC ("Failed to index object at " ADDR_FMT,
+ ADDR_PRINTF (addr_chop (addr, remaining)));
+
+ if (type == cap_folio)
+ assert (pte == &fake_slot);
+ else
+ assert (pte != &fake_slot);
+
+ remaining -= width;
+ }
+ while (remaining > 0);
+
+ if (! may_overwrite)
+ assertx (pte->type == cap_void,
+ ADDR_FMT " contains a %s but may not overwrite",
+ ADDR_PRINTF (addr), cap_type_string (pte->type));
+
+ int gbits = remaining;
+ /* It is safe to use an int as a guard has a most 22 bits. */
+ int guard = extract_bits64 (prefix, 0, gbits);
+ if (gbits != CAP_GUARD_BITS (pte) || guard != CAP_GUARD (pte))
+ {
+ struct cap_addr_trans addr_trans = CAP_ADDR_TRANS_VOID;
+ bool r = CAP_ADDR_TRANS_SET_GUARD_SUBPAGE (&addr_trans, guard, gbits,
+ 0, 1);
+ assert (r);
+ r = cap_copy_x (activity, as_root_addr, pte, addr_chop (addr, gbits),
+ as_root_addr, *pte, addr_chop (addr, gbits),
+ CAP_COPY_COPY_ADDR_TRANS_GUARD,
+ CAP_PROPERTIES (OBJECT_POLICY_DEFAULT, addr_trans));
+ assert (r);
+ }
+
+ return pte;
+}