summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--benchmarks/ChangeLog11
-rw-r--r--benchmarks/activity-distribution.c10
-rw-r--r--benchmarks/cache.c11
-rw-r--r--benchmarks/shared-memory-distribution.c7
-rw-r--r--hieronymus/ChangeLog8
-rw-r--r--hieronymus/hieronymus.c12
-rw-r--r--hurd/ChangeLog236
-rw-r--r--hurd/RPC7
-rw-r--r--hurd/activity.h12
-rw-r--r--hurd/cap.h134
-rw-r--r--hurd/exceptions.h181
-rw-r--r--hurd/folio.h42
-rw-r--r--hurd/futex.h81
-rw-r--r--hurd/headers.m43
-rw-r--r--hurd/ipc.h296
-rw-r--r--hurd/message.h229
-rw-r--r--hurd/messenger.h87
-rw-r--r--hurd/rpc.h1041
-rw-r--r--hurd/startup.h21
-rw-r--r--hurd/t-rpc.c123
-rw-r--r--hurd/thread.h346
-rw-r--r--libc-parts/ChangeLog23
-rw-r--r--libc-parts/_exit.c2
-rw-r--r--libc-parts/backtrace.c112
-rw-r--r--libc-parts/ia32-crt0.S4
-rw-r--r--libc-parts/process-spawn.c59
-rw-r--r--libc-parts/s_printf.c25
-rw-r--r--libhurd-mm/ChangeLog117
-rw-r--r--libhurd-mm/Makefile.am1
-rw-r--r--libhurd-mm/anonymous.c43
-rw-r--r--libhurd-mm/anonymous.h6
-rw-r--r--libhurd-mm/as-build.c26
-rw-r--r--libhurd-mm/as-dump.c24
-rw-r--r--libhurd-mm/as-lookup.c60
-rw-r--r--libhurd-mm/as.c15
-rw-r--r--libhurd-mm/as.h12
-rw-r--r--libhurd-mm/exceptions.c1035
-rw-r--r--libhurd-mm/headers.m41
-rw-r--r--libhurd-mm/ia32-exception-entry.S366
-rw-r--r--libhurd-mm/map.c8
-rw-r--r--libhurd-mm/map.h6
-rw-r--r--libhurd-mm/message-buffer.c315
-rw-r--r--libhurd-mm/message-buffer.h80
-rw-r--r--libhurd-mm/mm-init.c197
-rw-r--r--libhurd-mm/pager.h9
-rw-r--r--libhurd-mm/storage.c45
-rw-r--r--libpthread/ChangeLog53
-rw-r--r--libpthread/Makefile.am1
-rw-r--r--libpthread/sysdeps/viengoos/bits/pthread-np.h4
-rw-r--r--libpthread/sysdeps/viengoos/ia32/pt-setup.c26
-rw-r--r--libpthread/sysdeps/viengoos/ia32/signal-dispatch-lowlevel.c11
-rw-r--r--libpthread/sysdeps/viengoos/pt-block.c14
-rw-r--r--libpthread/sysdeps/viengoos/pt-hurd-utcb-np.c28
-rw-r--r--libpthread/sysdeps/viengoos/pt-setactivity-np.c5
-rw-r--r--libpthread/sysdeps/viengoos/pt-sysdep.h24
-rw-r--r--libpthread/sysdeps/viengoos/pt-thread-alloc.c67
-rw-r--r--libpthread/sysdeps/viengoos/pt-thread-dealloc.c22
-rw-r--r--libpthread/sysdeps/viengoos/pt-thread-halt.c21
-rw-r--r--libpthread/sysdeps/viengoos/pt-thread-start.c16
-rw-r--r--libpthread/sysdeps/viengoos/pt-wakeup.c7
-rw-r--r--ruth/ChangeLog12
-rw-r--r--ruth/ruth.c124
-rw-r--r--viengoos/ChangeLog112
-rw-r--r--viengoos/Makefile.am1
-rw-r--r--viengoos/ager.c28
-rw-r--r--viengoos/cap.c37
-rw-r--r--viengoos/messenger.c347
-rw-r--r--viengoos/messenger.h214
-rw-r--r--viengoos/object.c318
-rw-r--r--viengoos/object.h57
-rw-r--r--viengoos/pager.c24
-rw-r--r--viengoos/rm.h48
-rw-r--r--viengoos/server.c1472
-rw-r--r--viengoos/thread.c220
-rw-r--r--viengoos/thread.h114
75 files changed, 6473 insertions, 2443 deletions
diff --git a/benchmarks/ChangeLog b/benchmarks/ChangeLog
index a3508cf..118609d 100644
--- a/benchmarks/ChangeLog
+++ b/benchmarks/ChangeLog
@@ -1,3 +1,14 @@
+2008-12-12 Neal H. Walfield <neal@gnu.org>
+
+ Update according to new RPC interfaces.
+ * activity-distribution.c (main): Update use of rm_activity_policy
+ and rm_activity_info to be consistent with the new interface.
+ Replace use of `struct exception_info' with `struct
+ activation_fault_info'.
+ * cache.c (helper): Update use of rm_activity_policy and
+ rm_activity_info to be consistent with the new interface.
+ * shared-memory-distribution.c (main): Likewise.
+
2008-11-18 Neal H. Walfield <neal@gnu.org>
* cache.c: New file.
diff --git a/benchmarks/activity-distribution.c b/benchmarks/activity-distribution.c
index 966d8af..46724b6 100644
--- a/benchmarks/activity-distribution.c
+++ b/benchmarks/activity-distribution.c
@@ -47,7 +47,7 @@ main (int argc, char *argv[])
in.sibling_rel.priority = i == 0 ? 2 : 1;
in.sibling_rel.weight = i + 1;
struct activity_policy out;
- err = rm_activity_policy (activities[i],
+ err = rm_activity_policy (activity, activities[i],
ACTIVITY_POLICY_SIBLING_REL_SET, in,
&out);
assert (err == 0);
@@ -62,7 +62,7 @@ main (int argc, char *argv[])
{
struct activity_info info;
- err = rm_activity_info (activity, activity_info_stats, 1, &info);
+ err = rm_activity_info (activity, activity, activity_info_stats, 1, &info);
assert (err == 0);
assert (info.event == activity_info_stats);
assert (info.stats.count >= 1);
@@ -74,7 +74,7 @@ main (int argc, char *argv[])
bool my_fill (struct anonymous_pager *anon,
uintptr_t offset, uintptr_t count,
void *pages[],
- struct exception_info info)
+ struct activation_fault_info info)
{
uintptr_t *p = pages[0];
p[0] = offset;
@@ -159,7 +159,7 @@ main (int argc, char *argv[])
struct activity_info info;
- rm_activity_info (activity, activity_info_stats,
+ rm_activity_info (activity, activity, activity_info_stats,
next_period, &info);
assert (info.event == activity_info_stats);
assert (info.stats.count > 0);
@@ -173,7 +173,7 @@ main (int argc, char *argv[])
int j;
for (j = 0; j < THREADS; j ++)
{
- rm_activity_info (activity, activity_info_stats,
+ rm_activity_info (activity, activity, activity_info_stats,
next_period, &info);
assert (info.event == activity_info_stats);
assert (info.stats.count > 0);
diff --git a/benchmarks/cache.c b/benchmarks/cache.c
index 536dc9b..33842af 100644
--- a/benchmarks/cache.c
+++ b/benchmarks/cache.c
@@ -185,6 +185,7 @@ static struct hurd_ihash cache;
#include <hurd/activity.h>
#include <pthread.h>
#include <hurd/anonymous.h>
+#include <hurd/as.h>
#include <string.h>
addr_t main_activity;
@@ -210,7 +211,7 @@ helper (void *arg)
/* First the main thread. */
error_t err;
- err = rm_activity_info (main_activity, activity_info_stats,
+ err = rm_activity_info (ADDR_VOID, main_activity, activity_info_stats,
stat_count == 0
? 0 : stats[stat_count - 1].period + 1,
&info);
@@ -226,7 +227,7 @@ helper (void *arg)
stats[stat_count].period = info.stats.stats[0].period;
/* Then, the hog. */
- err = rm_activity_info (hog_activity, activity_info_stats,
+ err = rm_activity_info (ADDR_VOID, hog_activity, activity_info_stats,
stat_count == 0
? 0 : stats[stat_count - 1].period + 1,
&info);
@@ -418,15 +419,15 @@ helper_fork (void)
in.child_rel.priority = 2;
in.child_rel.weight = 20;
- err = rm_activity_policy (ADDR_VOID,
+ err = rm_activity_policy (ADDR_VOID, meta_data_activity,
ACTIVITY_POLICY_CHILD_REL_SET, in, &out);
assert (err == 0);
- err = rm_activity_policy (hog_activity,
+ err = rm_activity_policy (ADDR_VOID, hog_activity,
ACTIVITY_POLICY_SIBLING_REL_SET, in, &out);
assert (err == 0);
- err = rm_activity_policy (main_activity,
+ err = rm_activity_policy (ADDR_VOID, main_activity,
ACTIVITY_POLICY_SIBLING_REL_SET, in, &out);
assert (err == 0);
diff --git a/benchmarks/shared-memory-distribution.c b/benchmarks/shared-memory-distribution.c
index 04b37c2..482f0c1 100644
--- a/benchmarks/shared-memory-distribution.c
+++ b/benchmarks/shared-memory-distribution.c
@@ -110,7 +110,8 @@ main (int argc, char *argv[])
struct activity_info info;
- rm_activity_info (activity, activity_info_stats, next_period, &info);
+ rm_activity_info (activity, activity,
+ activity_info_stats, next_period, &info);
assert (info.event == activity_info_stats);
assert (info.stats.count > 0);
if (i != 0)
@@ -123,8 +124,8 @@ main (int argc, char *argv[])
int j;
for (j = 0; j < THREADS; j ++)
{
- rm_activity_info (activity, activity_info_stats, next_period,
- &info);
+ rm_activity_info (activity, activity,
+ activity_info_stats, next_period, &info);
assert (info.event == activity_info_stats);
assert (info.stats.count > 0);
stats[i][1 + j] = info.stats.stats[0];
diff --git a/hieronymus/ChangeLog b/hieronymus/ChangeLog
index 11944f0..4952cdc 100644
--- a/hieronymus/ChangeLog
+++ b/hieronymus/ChangeLog
@@ -1,3 +1,11 @@
+2008-12-12 Neal H. Walfield <neal@gnu.org>
+
+ Update according to new RPC interfaces.
+ * hieronymus.c (activity_alloc): Update use of rm_activity_policy,
+ rm_activity_info and rm_folio_object_alloc to be consistent with
+ new interface. Replace use of rm_thread_wait_object_destroyed
+ with rm_object_reply_on_destruction.
+
2008-07-08 Neal H. Walfield <neal@gnu.org>
* Makefile.am (modules.h): Generate an array of arguments from
diff --git a/hieronymus/hieronymus.c b/hieronymus/hieronymus.c
index 0727d45..0d99c94 100644
--- a/hieronymus/hieronymus.c
+++ b/hieronymus/hieronymus.c
@@ -72,7 +72,7 @@ activity_alloc (struct activity_policy policy)
panic ("Failed to allocate storage.");
struct activity_policy out;
- error_t err = rm_activity_policy (storage.addr,
+ error_t err = rm_activity_policy (ADDR_VOID, storage.addr,
ACTIVITY_POLICY_STORAGE_SET
| ACTIVITY_POLICY_CHILD_REL_SET
| ACTIVITY_POLICY_SIBLING_REL_SET,
@@ -137,8 +137,8 @@ do_gather_stats (void *arg)
for (i = 0; i < module_count; i ++, stat ++)
{
error_t err;
- err = rm_activity_info (activities[i], activity_info_stats,
- period, &info);
+ err = rm_activity_info (ADDR_VOID, activities[i], activity_info_stats,
+ period, &info);
assert_perror (err);
assert (info.event == activity_info_stats);
assert (info.stats.count > 0);
@@ -336,8 +336,8 @@ main (int argc, char *argv[])
for (i = 0; i < module_count; i ++)
{
uintptr_t rt = -1;
- rm_thread_wait_object_destroyed (root_activity,
- thread[i], &rt);
+ rm_object_reply_on_destruction (root_activity,
+ thread[i], &rt);
addr_t folio = addr_chop (activities[i], FOLIO_OBJECTS_LOG2);
int index = addr_extract (activities[i], FOLIO_OBJECTS_LOG2);
@@ -346,7 +346,7 @@ main (int argc, char *argv[])
err = rm_folio_object_alloc (ADDR_VOID, folio, index,
cap_void, OBJECT_POLICY_VOID,
(uintptr_t) rt,
- ADDR_VOID, ADDR_VOID);
+ NULL, NULL);
if (err)
debug (0, "deallocating object: %d", err);
diff --git a/hurd/ChangeLog b/hurd/ChangeLog
index 7d15920..3c74c1c 100644
--- a/hurd/ChangeLog
+++ b/hurd/ChangeLog
@@ -1,3 +1,239 @@
+2008-12-11 Neal H. Walfield <neal@gnu.org>
+
+ Adapt RPC interfaces according to changes in IPC semantics.
+
+ * messenger.h: New file.
+ * message.h: New file.
+ * ipc.h: New file.
+ * headers.m4: Link sysroot/include/hurd/message.h to
+ hurd/message.h, sysroot/include/hurd/messenger.h to
+ hurd/messenger.h, and sysroot/include/hurd/ipc.h to hurd/ipc.h.
+ * cap.h: Include <hurd/addr.h> and <stdbool.h>.
+ (enum cap_type): Define cap_messenger, cap_rmessenger and
+ cap_type_count.
+ (cap_type_string): Handle cap_messenger and cap_rmessenger.
+ (cap_types_compatible): Likewise.
+ (cap_type_weak_p): Likewise.
+ (cap_type_weaken): Likewise.
+ (cap_type_strengthen): Likewise.
+ (oid_t): Replace L4 type with standard type.
+ (CAP_VOID): Define.
+
+ * rpc.h [! RPC_TARGET]: Don't error out if not defined.
+ [RPC_TARGET_ARG_]: Don't define or undefine.
+ [RPC_TARGET_]: Likewise.
+ [RPC_TARGET_NEED_ARG]: Ignore.
+ Don't include <l4/ipc.h> or <l4/space.h>. Include
+ <hurd/message.h> and <hurd/ipc.h>.
+ (reply_buffer) [RM_INTERN]: Declare.
+ (messenger_message_load) [RM_INTERN]: Likewise.
+ [! RM_INTERN] Include <hurd/message-buffer.h>.
+ (cap_t): Define.
+ (CPP_FOREACH): Define.
+ (CPP_SAFE_DEREF): Likewise.
+ (RPC_ARGUMENTS): Take additional argument prefix. Use it. Update
+ users.
+ (RPC_CHOP): Rename from this...
+ (RPC_CHOP2): ... to this. Update users.
+ (RPC_TYPE_SHIFT): New define.
+ (RPCLOADARG): Rewrite according to new marshalling semantics.
+ (RPCSTOREARG): Likewise.
+ (RPC_SEND_MARSHAL): Likewise.
+ (RPC_SEND_UNMARSHAL): Likewise.
+ (RPC_REPLY_MARSHAL): Likewise.
+ (RPC_REPLY_UNMARSHAL): Likewise.
+ (RPC_RECEIVE_MARSHAL): New define.
+ (RPC_MARSHAL_GEN_): Break this into...
+ (RPC_SEND_MARSHAL_GEN_): ... this...
+ (RPC_RECEIVE_MARSHAL_GEN_): ... this...
+ (RPC_REPLY_MARSHAL_GEN_): ... and this. Update users.
+ (RPC_MARSHAL_GEN_): Redefine in terms of the new macros.
+ (RPC_SEND_): Rewrite according to new marshalling and IPC
+ semantics.
+ (RPC_SEND_NONBLOCKING_): Define.
+ (RPC_): Rewrite according to new marshalling and IPC
+ semantics.
+ (RPC_REPLY_): Likewise.
+ (RPC_SIMPLE_): Don't define.
+ (RPC_SIMPLE): Don't define.
+ (RPC): Take additional argument ret_cap_count. Update users.
+ (rpc_error_reply_marshal): Rewrite according to new marshalling
+ and IPC semantics.
+ (rpc_error_reply): Likewise.
+ * t-rpc.c (RPC_TARGET_NEED_ARG): Don't define.
+ (RPC_TARGET): Define.
+ (RPC_noargs): Set to a large interger.
+ (RPC_caps): New define.
+ (noargs): Update interface specification according to new IDL
+ interface. Update users.
+ (onein): Likewise.
+ (oneout): Likewise.
+ (onlyin): Likewise.
+ (onlyout): Likewise.
+ (mix): Likewise.
+ (noargs): Likewise.
+ (onein): Likewise.
+ (oneout): Likewise.
+ (onlyin): Likewise.
+ (onlyout): Likewise.
+ (mix): New interface.
+ (RPC_TARGET): Don't undefine.
+ (main): Update to use the new RPC marshalling interface. Write a
+ test using the new `mix' interface.
+ * activity.h (RPC_TARGET_NEED_ARG): Don't undefine.
+ (RPC_TARGET): Don't define.
+ (activity_policy): Update interface specification according to new
+ IDL interface. Update users.
+ (activity_info): Likewise.
+ * cap.h: (RPC_TARGET_NEED_ARG): Don't undefine.
+ (RPC_TARGET): Don't define.
+ (RM_object_slot_copy_out): Don't define.
+ (RM_object_slot_copy_in): Likewise.
+ (RM_object_slot_read): Likewise.
+ (RM_object_reply_on_destruction): Define.
+ (cap_copy): Update interface specification according to new
+ IDL interface. Update users.
+ (cap_rubout): Likewise.
+ (cap_read): Likewise.
+ (object_discarded_clear): Likewise.
+ (object_discard): Likewise.
+ (object_status): Likewise.
+ (object_name): Likewise.
+ (object_reply_on_destruction): New interface replacing
+ thread_wait_destroy.
+ (object_slot_copy_out): Remove interface.
+ (object_slot_copy_in): Likewise.
+ (object_slot_read): Likewise.
+ (RPC_TARGET): Don't undefine.
+ * exceptions.h: Don't include <l4/thread.h>. Include
+ <l4/space.h>.
+ (RPC_STUB_PREFIX): Redefine to `activation'.
+ (RPC_ID_PREFIX EXCEPTION): Redefine to `ACTIVATION'.
+ (RPC_TARGET_NEED_ARG): Don't define.
+ (RPC_TARGET_ARG_TYPE): Likewise.
+ (RPC_TARGET): Likewise.
+ (EXCEPTION_fault): Rename from this...
+ (ACTIVATION_fault): ... to this. Update users.
+ (exception_method_id_string): Rename from this...
+ (activation_method_id_string): ... to this.
+ (struct exception_info): Rename from this...
+ (struct activation_fault_info): ... to this. Update users.
+ (EXCEPTION_INFO_FMT): Rename from this...
+ (ACTIVATION_FAULT_INFO_FMT): ... to this. Update users.
+ (EXCEPTION_INFO_PRINTF): Rename from this...
+ (ACTIVATION_FAULT_INFO_PRINTF): ... to this. Update users.
+ (fault): Update interface specification according to new IDL
+ interface. Update users.
+ * folio.h (RPC_TARGET_NEED_ARG): Don't undefine.
+ (RPC_TARGET): Don't define.
+ (folio_alloc): Update interface specification according to new IDL
+ interface. Update users.
+ (folio_free): Likewise.
+ (folio_object_alloc): Likewise.
+ (folio_policy): Likewise.
+ (RPC_TARGET): Don't undefine.
+ * futex.h (RPC_TARGET_NEED_ARG): Don't undefine.
+ (RPC_TARGET): Don't define.
+ (futex): Update interface specification according to new IDL
+ interface. Update users.
+ (RPC_TARGET): Don't undefine.
+ (futex_using): New function.
+ (futex): Implement in terms of it.
+ (futex_wait_using): New function.
+ (futex_wait): Implement in terms of it.
+ (futex_wake_using): New function.
+ (futex_wake): Implement in terms of it.
+ * thread.h (RM_thread_wait_object_destroyed): Don't define.
+ (RM_thread_raise_exception): Rename from this...
+ (RM_thread_activation_collect): ... to this.
+ (RM_thread_id): Define.
+ (RPC_TARGET_NEED_ARG): Don't undefine.
+ (RPC_TARGET): Don't define.
+ (struct hurd_thread_exregs_in): Remove fields aspace, activity,
+ exception_page, aspace_out, activity_out and exception_page_out.
+ (thread_exregs): Update interface specification according to new
+ IDL interface. Add additional parameters exception_messenger and
+ exception_messenger_out. Update users.
+ (thread_wait_object_destroyed): Remove interface.
+ (struct exception_buffer): Don't define.
+ (thread_raise_exception): Remove interface.
+ (thread_id): New interface.
+ (thread_activation_collect): Likewise.
+ (RPC_TARGET): Don't undefine.
+ * RPC: Update.
+
+ * exceptions.h (hurd_activation_handler_init_early): New
+ declaration.
+ (hurd_activation_handler_init): Likewise.
+ (hurd_utcb): Likewise.
+ (EXCEPTION_STACK_SIZE_LOG2): Don't define.
+ (EXCEPTION_STACK_SIZE): Likewise.
+ (hurd_activation_state_alloc): New declaration.
+ (exception_page_cleanup): Rename from this...
+ (hurd_activation_state_free): ... to this. Update users.
+ (exception_handler_activated): Rename from this...
+ (hurd_activation_handler_activated): ... to this.
+ (exception_handler_normal): Rename from this...
+ (hurd_activation_handler_normal): ... to this. Update users.
+ Take additional parameter utcb.
+ (exception_handler_entry): Rename from this...
+ (hurd_activation_handler_entry): ... to this.
+ (exception_handler_end): Rename from this...
+ (hurd_activation_handler_end): ... to this.
+ (hurd_activation_message_register): New declaration.
+ (hurd_activation_message_unregister): Likewise.
+ (hurd_activation_stack_dump): Likewise.
+ * thread.h [! __have_vg_thread_id_t] (__have_vg_thread_id_t):
+ Define.
+ [! __have_vg_thread_id_t && USE_L4]: Include <l4.h>.
+ [! __have_vg_thread_id_t && !USE_L4]: Include <stdint.h>.
+ [! __have_vg_thread_id_t] (vg_thread_id_t): Define.
+ [! __have_vg_thread_id_t] (vg_niltid): Define.
+ [! __have_vg_thread_id_t] (VG_THREAD_ID_FMT): Define.
+ [! __have_activation_frame] (__have_activation_frame): Define.
+ [! __have_activation_frame && USE_L4]: Include <l4/ipc.h>.
+ [! __have_activation_frame] (struct hurd_message_buffer): Declare.
+ [! __have_activation_frame] (struct activation_frame): Define in
+ this case. Add fields normal_mode_stack and canary.
+ [! __have_activation_frame && i386] (struct activation_frame):
+ Change regs to have 10 elements. Add fields eax, ecx, edx,
+ eflags, eip, ebx, edi, esi, ebp and esp.
+ [! __have_activation_frame && !USE_L4] (struct activation_frame):
+ Remove fields saved_sender, saved_receiver, saved_timeout,
+ saved_error_code, saved_flags, and saved_br0 in this case.
+ [__need_vg_thread_id_t || __need_activation_frame]
+ (__need_vg_thread_id_t): Undefine.
+ [__need_vg_thread_id_t || __need_activation_frame]
+ (__need_activation_frame): Likewise.
+ [!__need_vg_thread_id_t && !__need_activation_frame]: Include the
+ rest of the file in this case. Include <stdint.h>,
+ <hurd/types.h>, <hurd/addr.h>, <hurd/addr-trans.h>, <hurd/cap.h>,
+ <hurd/messenger.h> and <setjmp.h>.
+ (hurd_activation_frame_longjmp): New declaration.
+ (struct hurd_fault_catcher): New definition.
+ (hurd_fault_catcher_register): New declaration.
+ (hurd_fault_catcher_unregister): Likewise.
+ (struct exception_page): Rename from this...
+ (struct vg_utcb): ... to this. Update users. Remove field
+ exception. Add fields protected_payload, messenger_id,
+ inline_words, inline_caps, inline_word_count, inline_cap_count,
+ inline_data, exception_buffer, extant_messages, catchers,
+ alternate_stack, alternate_stack_inuse, canary0, canary1.
+ (UTCB_CANARY0): Define.
+ (UTCB_CANARY1): Likewise.
+ (THREAD_EXCEPTION_PAGE_SLOT): Rename from this...
+ (THREAD_UTCB): ... to this.
+ (THREAD_EXCEPTION_MESSENGER): Define.
+ (THREAD_SLOTS): Likewise.
+ (THREAD_SLOTS_LOG2): Likewise.
+ (HURD_EXREGS_SET_EXCEPTION_PAGE): Rename from this...
+ (HURD_EXREGS_SET_UTCB): ... to this. Update users.
+ (HURD_EXREGS_SET_EXCEPTION_MESSENGER): Define.
+ (HURD_EXREGS_SET_REGS): Add HURD_EXREGS_SET_EXCEPTION_MESSENGER.
+ (vg_myself): New function.
+
+ * startup.h (struct hurd_startup_data): Add field messengers.
+
2008-12-10 Neal H. Walfield <neal@gnu.org>
* stddef.h: When checking if compiling for i386, check if i386 is
diff --git a/hurd/RPC b/hurd/RPC
index 61969c5..2268bda 100644
--- a/hurd/RPC
+++ b/hurd/RPC
@@ -4,8 +4,9 @@ RPC Method id Assignments
100: miscellaneous (putchar, etc.)
200: folio
300: cap
-400: object
-500: exceptions
+400: generic object
+500: activation
600: thread
700: activity
-800: futex \ No newline at end of file
+800: futex
+900: messenger
diff --git a/hurd/activity.h b/hurd/activity.h
index 8a4902d..dc77fc0 100644
--- a/hurd/activity.h
+++ b/hurd/activity.h
@@ -169,12 +169,6 @@ struct activity_stats
#define RPC_STUB_PREFIX rm
#define RPC_ID_PREFIX RM
-#undef RPC_TARGET_NEED_ARG
-#define RPC_TARGET \
- ({ \
- extern struct hurd_startup_data *__hurd_startup_data; \
- __hurd_startup_data->rm; \
- })
#include <hurd/rpc.h>
@@ -194,7 +188,8 @@ enum
};
/* Get ACTIVITY's policy and set according to FLAGS and IN. */
-RPC (activity_policy, 3, 1, addr_t, activity,
+RPC (activity_policy, 2, 1, 0,
+ /* cap_t principal, cap_t activity */
uintptr_t, flags, struct activity_policy, in,
/* Out: */
struct activity_policy, out);
@@ -248,7 +243,8 @@ struct activity_info
indicating that the activity must free some memory or will be such
subject to paging. In this case, the activity should try to free
at least the indicated number of pages as quickly as possible. */
-RPC (activity_info, 3, 1, addr_t, activity,
+RPC (activity_info, 2, 1, 0,
+ /* cap_t principal, cap_t activity, */
uintptr_t, flags, uintptr_t, until_period,
/* Out: */
struct activity_info, info)
diff --git a/hurd/cap.h b/hurd/cap.h
index eae17db..0dd3e82 100644
--- a/hurd/cap.h
+++ b/hurd/cap.h
@@ -23,10 +23,12 @@
#include <hurd/types.h>
#include <hurd/stddef.h>
+#include <hurd/addr.h>
#include <hurd/addr-trans.h>
#include <hurd/startup.h>
#include <hurd/error.h>
#include <stdint.h>
+#include <stdbool.h>
/* Capabilities.
@@ -47,7 +49,10 @@ enum cap_type
cap_activity,
cap_activity_control,
cap_thread,
-#define CAP_TYPE_MAX cap_thread
+ cap_messenger,
+ cap_rmessenger,
+ cap_type_count,
+#define CAP_TYPE_MAX (cap_type_count - 1)
};
static inline const char *
@@ -73,6 +78,10 @@ cap_type_string (enum cap_type type)
return "activity_control";
case cap_thread:
return "thread";
+ case cap_messenger:
+ return "messenger";
+ case cap_rmessenger:
+ return "rmessenger";
default:
return "unknown cap type";
};
@@ -101,6 +110,11 @@ cap_types_compatible (enum cap_type a, enum cap_type b)
if (a == cap_activity_control && b == cap_activity)
return true;
+ if (a == cap_messenger && b == cap_rmessenger)
+ return true;
+ if (a == cap_rmessenger && b == cap_messenger)
+ return true;
+
return false;
}
@@ -113,6 +127,7 @@ cap_type_weak_p (enum cap_type type)
case cap_rpage:
case cap_rcappage:
case cap_activity:
+ case cap_rmessenger:
return true;
default:
@@ -139,6 +154,10 @@ cap_type_weaken (enum cap_type type)
case cap_activity:
return cap_activity;
+ case cap_messenger:
+ case cap_rmessenger:
+ return cap_rmessenger;
+
default:
return cap_void;
}
@@ -163,6 +182,10 @@ cap_type_strengthen (enum cap_type type)
case cap_activity:
return cap_activity_control;
+ case cap_messenger:
+ case cap_rmessenger:
+ return cap_messenger;
+
default:
return type;
}
@@ -229,7 +252,7 @@ struct cap_properties
#ifdef RM_INTERN
/* An OID corresponds to a page on a volume. Only the least 54 bits
are significant. */
-typedef l4_uint64_t oid_t;
+typedef uint64_t oid_t;
#define OID_FMT "0x%llx"
#define OID_PRINTF(__op_oid) ((oid_t) (__op_oid))
#endif
@@ -273,6 +296,8 @@ struct cap
#endif
};
+#define CAP_VOID ((struct cap) { .type = cap_void })
+
/* Return CAP's policy. */
#define CAP_POLICY_GET(__cpg_cap) \
OBJECT_POLICY ((__cpg_cap).discardable, (__cpg_cap).priority)
@@ -356,12 +381,6 @@ struct cap
#define RPC_STUB_PREFIX rm
#define RPC_ID_PREFIX RM
-#undef RPC_TARGET_NEED_ARG
-#define RPC_TARGET \
- ({ \
- extern struct hurd_startup_data *__hurd_startup_data; \
- __hurd_startup_data->rm; \
- })
#include <hurd/rpc.h>
@@ -371,12 +390,10 @@ enum
RM_cap_rubout,
RM_cap_read,
- RM_object_slot_copy_out = 400,
- RM_object_slot_copy_in,
- RM_object_slot_read,
- RM_object_discarded_clear,
+ RM_object_discarded_clear = 400,
RM_object_discard,
RM_object_status,
+ RM_object_reply_on_destruction,
RM_object_name,
};
@@ -400,14 +417,9 @@ enum
CAP_COPY_PRIORITY_SET = 1 << 5,
};
-/* Copy the capability in capability slot SOURCE in the address space
- rooted at SOURCE_ADDRESS_SPACE to the slot TARGET in the address
- space rooted at TARGET_ADDRESS_SPACE. The address space is
- resolved in the context of the caller. If the address space
- identifies a thread, its address space root is used. If it is
- ADDR_VOID, then the calling thread's address space route is used.
- (PRINCIPAL and the address spaces are looked up in the context of
- the caller.)
+/* Copy the capability in capability slot SOURCE to the slot at ADDR
+ in the object OBJECT. If OBJECT is ADDR_VOID, then the calling
+ thread's address space root is used.
By default, preserves SOURCE's subpage specification and copies
TARGET's guard and policy.
@@ -419,9 +431,8 @@ enum
If CAP_COPY_COPY_SOURCE_GUARD is set, uses the guard description in
source. Otherwise, preserves the guard in TARGET.
- If CAP_COPY_WEAKEN is set, saves a weakened version of SOURCE in
- *TARGET (e.g., if SOURCE's type is cap_page, *TARGET's type is set
- to cap_rpage).
+ If CAP_COPY_WEAKEN is set, saves a weakened version of SOURCE
+ (e.g., if SOURCE's type is cap_page, a cap_rpage is saved).
If CAP_COPY_DISCARDABLE_SET is set, then sets the discardable bit
based on the value in PROPERTIES. Otherwise, copies SOURCE's
@@ -429,55 +440,33 @@ enum
If CAP_COPY_PRIORITY_SET is set, then sets the priority based on
the value in properties. Otherwise, copies SOURCE's value. */
-RPC(cap_copy, 7, 0, addr_t, principal,
- addr_t, target_address_space, addr_t, target,
- addr_t, source_address_space, addr_t, source,
- l4_word_t, flags, struct cap_properties, properties)
-
-/* Overwrite the capability slot TARGET in address space
- TARGET_ADDRESS_SPACE with a void capability. */
-RPC(cap_rubout, 3, 0, addr_t, principal,
- addr_t, target_address_space, addr_t, target)
-
-/* Returns the public bits of the capability CAP in TYPE and
- CAP_PROPERTIES. */
-RPC(cap_read, 3, 2, addr_t, principal, addr_t, address_space, addr_t, cap,
- /* Out: */
- l4_word_t, type, struct cap_properties, properties)
-
-/* Copy the capability from slot SLOT of the object OBJECT (relative
- to the start of the object's subpage) to slot TARGET. PROPERTIES
- are interpreted as per cap_copy. */
-RPC(object_slot_copy_out, 8, 0, addr_t, principal,
- addr_t, object_address_space, addr_t, object, l4_word_t, slot,
- addr_t, target_address_space, addr_t, target,
- l4_word_t, flags, struct cap_properties, properties)
-
-/* Copy the capability from slot SOURCE to slot INDEX of the object
- OBJECT (relative to the start of the object's subpage). PROPERTIES
- are interpreted as per cap_copy. */
-RPC(object_slot_copy_in, 8, 0, addr_t, principal,
- addr_t, object_address_space, addr_t, object, l4_word_t, index,
- addr_t, source_address_space, addr_t, source,
- l4_word_t, flags, struct cap_properties, properties)
-
-/* Store the public bits of the capability slot SLOT of object OBJECT
+RPC(cap_copy, 5, 0, 0,
+ /* cap_t activity, cap_t object, */ addr_t, addr,
+ cap_t, source_object, addr_t, source_addr,
+ uintptr_t, flags, struct cap_properties, properties)
+
+/* Overwrite the capability slot at ADDR in the object OBJECT with a
+ void capability. */
+RPC(cap_rubout, 1, 0, 0,
+ /* cap_t activity, cap_t object, */ addr_t, addr)
+
+/* Returns the public bits of the capability at address ADDR in OBJECT
in TYPE and CAP_PROPERTIES. */
-RPC(object_slot_read, 4, 2, addr_t, principal, addr_t, address_space,
- addr_t, object, l4_word_t, slot,
+RPC(cap_read, 1, 2, 0,
+ /* cap_t activity, cap_t object, */ addr_t, addr,
/* Out: */
- l4_word_t, type, struct cap_properties, properties)
+ uintptr_t, type, struct cap_properties, properties)
-/* Clear the discarded bit. */
-RPC(object_discarded_clear, 2, 0,
- addr_t, principal, addr_t, object)
+/* Clear the discarded bit of the object at ADDR in object OBJECT. */
+RPC(object_discarded_clear, 1, 0, 0,
+ /* cap_t activity, cap_t object, */ addr_t, addr)
/* If the object designated by OBJECT is in memory, discard it.
OBJECT must have write authority. This does not set the object's
discarded bit and thus does not result in a fault. Instead, the
- next access will see zero-filled memory. */
-RPC(object_discard, 2, 0,
- addr_t, principal, addr_t, object)
+ next access will see, e.g., zero-filled memory. */
+RPC(object_discard, 0, 0, 0
+ /* cap_t activity, cap_t object, */)
enum
{
@@ -490,9 +479,17 @@ enum
(Note: this is not the state of a frame but an indication of
whether the object has been modified since the last time it the
dirty bit was cleared.) */
-RPC (object_status, 3, 1, addr_t, principal, addr_t, object, bool, clear,
+RPC (object_status, 1, 1, 0,
+ /* addr_t activity, addr_t object, */ bool, clear,
uintptr_t, status)
+/* Returns the object's return code in RETURN_CODE on object
+ destruction. */
+RPC (object_reply_on_destruction, 0, 1, 0,
+ /* cap_t principal, cap_t object, */
+ /* Out: */
+ uintptr_t, return_code);
+
struct object_name
{
char name[12];
@@ -501,13 +498,12 @@ struct object_name
/* Give object OBJECT a name. This is only used for debugging
purposes and is only supported by some objects, in particular,
activities and threads. */
-RPC (object_name, 3, 0, addr_t, principal,
- addr_t, object, struct object_name, name);
+RPC (object_name, 1, 0, 0,
+ /* cap_t activity, cap_t object, */ struct object_name, name);
#undef RPC_STUB_PREFIX
#undef RPC_ID_PREFIX
-#undef RPC_TARGET
/* An object. */
diff --git a/hurd/exceptions.h b/hurd/exceptions.h
index 5771210..765fc5f 100644
--- a/hurd/exceptions.h
+++ b/hurd/exceptions.h
@@ -1,26 +1,25 @@
-/* exceptions.h - Exception handling definitions.
+/* activations.h - Activation handling definitions.
Copyright (C) 2007, 2008 Free Software Foundation, Inc.
Written by Neal H. Walfield <neal@gnu.org>.
This file is part of the GNU Hurd.
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
+ GNU Hurd is free software: you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation, either version 3 of the
+ License, or (at your option) any later version.
- The GNU Hurd is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
+ GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
+ License along with GNU Hurd. If not, see
+ <http://www.gnu.org/licenses/>. */
-#ifndef _HURD_EXCEPTIONS_H
-#define _HURD_EXCEPTIONS_H 1
+#ifndef _HURD_ACTIVATIONS_H
+#define _HURD_ACTIVATIONS_H 1
#include <hurd/stddef.h>
@@ -29,36 +28,33 @@
#include <stdint.h>
#include <hurd/cap.h>
#include <hurd/thread.h>
-#include <l4/thread.h>
#include <hurd/error.h>
+#include <l4/space.h>
-#define RPC_STUB_PREFIX exception
-#define RPC_ID_PREFIX EXCEPTION
-#define RPC_TARGET_NEED_ARG
-#define RPC_TARGET_ARG_TYPE l4_thread_id_t
-#define RPC_TARGET(x) (x)
+#define RPC_STUB_PREFIX activation
+#define RPC_ID_PREFIX ACTIVATION
#include <hurd/rpc.h>
-/* Exception message ids. */
+/* Activation message ids. */
enum
{
- EXCEPTION_fault = 10,
+ ACTIVATION_fault = 10,
};
/* Return a string corresponding to a message id. */
static inline const char *
-exception_method_id_string (l4_word_t id)
+activation_method_id_string (uintptr_t id)
{
switch (id)
{
- case EXCEPTION_fault:
+ case ACTIVATION_fault:
return "fault";
default:
return "unknown";
}
}
-struct exception_info
+struct activation_fault_info
{
union
{
@@ -75,93 +71,98 @@ struct exception_info
};
};
-#define EXCEPTION_INFO_FMT "%c%c%c %s%s"
-#define EXCEPTION_INFO_PRINTF(info) \
+#define ACTIVATION_FAULT_INFO_FMT "%c%c%c %s%s"
+#define ACTIVATION_FAULT_INFO_PRINTF(info) \
((info).access & L4_FPAGE_READABLE ? 'r' : '~'), \
- ((info).access & L4_FPAGE_WRITABLE ? 'w' : '~'), \
- ((info).access & L4_FPAGE_EXECUTABLE ? 'x' : '~'), \
- cap_type_string ((info).type), \
- (info.discarded) ? " discarded" : ""
+ ((info).access & L4_FPAGE_WRITABLE ? 'w' : '~'), \
+ ((info).access & L4_FPAGE_EXECUTABLE ? 'x' : '~'), \
+ cap_type_string ((info).type), \
+ (info.discarded) ? " discarded" : ""
/* Raise a fault at address FAULT_ADDRESS. If IP is not 0, then IP is
the value of the IP of the faulting thread at the time of the fault
and SP the value of the stack pointer at the time of the fault. */
-RPC (fault, 4, 0, addr_t, fault_address, uintptr_t, sp, uintptr_t, ip,
- struct exception_info, exception_info)
+RPC (fault, 4, 0, 0,
+ addr_t, fault_address, uintptr_t, sp, uintptr_t, ip,
+ struct activation_fault_info, activation_fault_info)
#undef RPC_STUB_PREFIX
#undef RPC_ID_PREFIX
-#undef RPC_TARGET_NEED_ARG
-#undef RPC_TARGET_ARG_TYPE
-#undef RPC_TARGET
-#define RPC_STUB_PREFIX rm
-#define RPC_ID_PREFIX RM
-#undef RPC_TARGET_NEED_ARG
-#define RPC_TARGET \
- ({ \
- extern struct hurd_startup_data *__hurd_startup_data; \
- __hurd_startup_data->rm; \
- })
+/* Initializes the activation handler to allow receiving IPCs (but
+ does not handle other faults). This must be called exactly once
+ before any IPCs are sent. */
+extern void hurd_activation_handler_init_early (void);
-#include <hurd/rpc.h>
+/* Initialize the activation handler. This must be called after the
+ storage sub-system has been initialized. At this point, the
+ activation handler is able to handle exceptions. */
+extern void hurd_activation_handler_init (void);
-/* Exception message ids. */
-enum
- {
- RM_exception_collect = 500,
- };
-/* Cause the delivery of a pending event, if any. */
-RPC(exception_collect, 1, 0, addr_t, principal)
+/* Return the calling thread's UTCB. Threading libraries should set
+ this to their own implementation once they are up and running. */
+extern struct vg_utcb *(*hurd_utcb) (void);
-#undef RPC_STUB_PREFIX
-#undef RPC_ID_PREFIX
-#undef RPC_TARGET
-#endif /* !ASM */
-
-/* The exception stack is 4 pages large. The word beyond the base of
- the stack is a pointer to the exception page, which is usually the
- last page of the stack. */
-#define EXCEPTION_STACK_SIZE_LOG2 (PAGESIZE_LOG2 + 2)
-#define EXCEPTION_STACK_SIZE (1 << EXCEPTION_STACK_SIZE_LOG2)
+/* Allocate a utcb buffer and associated data structures (including an
+ exception messenger) for the thread THEAD (which must already exist
+ but should not be running). Installs the UTCB and exception
+ messenger in the thread object. Returns the new UTCB in *UTCB.
+ Returns 0 on success, otherwise an error code. */
+extern error_t hurd_activation_state_alloc (addr_t thread,
+ struct vg_utcb **utcb);
-#ifndef ASM
-
-/* Initialize the exception handler. */
-extern void exception_handler_init (void);
+/* Release the state allocated by hurd_activation_state_alloc. May
+ not be called by a thread on its own UTCB! */
+extern void hurd_activation_state_free (struct vg_utcb *utcb);
-/* When a thread causes an exception, the kernel invokes the thread's
- exception handler. This points to the low-level exception handler,
- which invokes exception_handler_activated. (It is passed a pointer
- to the exception page.)
+/* When a thread causes an activation, the kernel invokes the thread's
+ activation handler. This points to the low-level activation handler,
+ which invokes activation_handler_activated. (It is passed a pointer
+ to the utcb.)
This function must determine how to continue. It may, but need
- not, immediately handle the fault. The problem with handling the
- fault immediately is that this function runs on the exception
- handler's tiny stack (~3kb) and it runs in activated mode. The
+ not, immediately handle the activation. The problem with handling
+ an activation immediately is that this function runs on the
+ activation handler's tiny stack and it runs in activated mode. The
latter means that it may not fault (which generally precludes
- accessing any dynamically allocated storage). To allow an easy
- transition to another function in normal-mode, if the function
- returns an exception_frame, then the exception handler will call
- exception_handler_normal passing it that argument. This function
- runs in normal mode and on the normal stack. When this function
- returns, the interrupted state is restored. */
-extern struct exception_frame *
- exception_handler_activated (struct exception_page *exception_page);
-
-extern void exception_handler_normal (struct exception_frame *exception_frame);
-
-/* Should be called before destroyed the exception page associated
- with a thread. */
-extern void exception_page_cleanup (struct exception_page *exception_page);
-
-/* The first instruction of exception handler dispatcher. */
-extern char exception_handler_entry;
+ accessing any dynamically allocated storage) or even properly send
+ IPC (as it has no easy way to determine when the IPC has been
+ received and when a reply is available--this information is
+ delivered by activations!).
+
+ To allow an easy transition to another function in normal-mode, if
+ the function returns an activation_frame, then the activation
+ handler will call hurd_activation_handler_normal passing it that
+ argument. This function runs in normal mode and on the normal
+ stack. When this function returns, the interrupted state is
+ restored. */
+extern struct activation_frame *hurd_activation_handler_activated
+ (struct vg_utcb *utcb);
+
+extern void hurd_activation_handler_normal
+ (struct activation_frame *activation_frame, struct vg_utcb *utcb);
+
+
+/* The first instruction of activation handler dispatcher. */
+extern char hurd_activation_handler_entry;
/* The instruction immediately following the last instruction of the
- exception handler dispatcher. */
-extern char exception_handler_end;
+ activation handler dispatcher. */
+extern char hurd_activation_handler_end;
+
+
+/* Register the current extant IPC. */
+extern void hurd_activation_message_register (struct hurd_message_buffer *mb);
+
+/* Unregister the current extant IPC. This is normally done
+ automatically when a reply is receive. However, if the IPC is
+ aborted, then this function must be called before the next IPC may
+ be sent. */
+extern void hurd_activation_message_unregister (struct hurd_message_buffer *mb);
+
+/* Dump the activation stack to stdout. */
+extern void hurd_activation_stack_dump (void);
#endif /* !ASM */
diff --git a/hurd/folio.h b/hurd/folio.h
index 9245ef6..9ad36c3 100644
--- a/hurd/folio.h
+++ b/hurd/folio.h
@@ -373,12 +373,6 @@ folio_object_cap (struct folio *folio, int object)
#define RPC_STUB_PREFIX rm
#define RPC_ID_PREFIX RM
-#undef RPC_TARGET_NEED_ARG
-#define RPC_TARGET \
- ({ \
- extern struct hurd_startup_data *__hurd_startup_data; \
- __hurd_startup_data->rm; \
- })
#include <hurd/rpc.h>
@@ -390,14 +384,16 @@ enum
RM_folio_policy
};
-/* Allocate a folio against PRINCIPAL. Store a capability in the
+/* Allocate a folio against ACTIVITY. Return a capability in the
caller's cspace in slot FOLIO. POLICY specifies the storage
policy. */
-RPC(folio_alloc, 3, 0, addr_t, principal, addr_t, folio,
- struct folio_policy, policy)
+RPC(folio_alloc, 1, 0, 1,
+ /* cap_t, principal, cap_t, activity, */
+ struct folio_policy, policy, cap_t, folio)
-/* Free the folio designated by FOLIO. PRINCIPAL pays. */
-RPC(folio_free, 2, 0, addr_t, principal, addr_t, folio)
+/* Free the folio designated by FOLIO. */
+RPC(folio_free, 0, 0, 0
+ /* cap_t, principal, cap_t, folio */)
/* Destroys the INDEXth object in folio FOLIO and allocate in its
place an object of tye TYPE. If TYPE is CAP_VOID, any existing
@@ -406,15 +402,14 @@ RPC(folio_free, 2, 0, addr_t, principal, addr_t, folio)
folio. If an object is destroyed and there are waiters, they are
passed the return code RETURN_CODE.
- If OBJECT_SLOT is not ADDR_VOID, then stores a capability to the
- allocated object in OBJECT_SLOT. If OBJECT_WEAK_SLOT is not
- ADDR_VOID, stores a weaken reference to the created object. If an
- object is destroyed and there are waiters, they are passed the
- return code RETURN_CODE. */
-RPC(folio_object_alloc, 8, 0, addr_t, principal,
- addr_t, folio, uintptr_t, index, uintptr_t, type,
+ Returns a capability to the allocated object in OBJECT. Returns a
+ weak capability to the object in OBJECT_WEAK. */
+RPC(folio_object_alloc, 4, 0, 2,
+ /* cap_t, principal, cap_t, folio, */
+ uintptr_t, index, uintptr_t, type,
struct object_policy, policy, uintptr_t, return_code,
- addr_t, object_slot, addr_t, object_weak_slot)
+ /* Out: */
+ cap_t, object, cap_t, object_weak)
/* Flags for folio_policy. */
enum
@@ -433,16 +428,15 @@ enum
/* Get and set the management policy for folio FOLIO.
If FOLIO_POLICY_DELIVER is set in FLAGS, then return FOLIO's
- current paging policy in VALUE. Then, if any of the set flags are
+ current paging policy in OLD. Then, if any of the set flags are
set, set the corresponding values based on the value of POLICY. */
-RPC(folio_policy, 4, 1,
- addr_t, principal, addr_t, folio,
+RPC(folio_policy, 2, 1, 0,
+ /* cap_t, principal, cap_t, folio, */
uintptr_t, flags, struct folio_policy, policy,
/* Out: */
- struct folio_policy, value)
+ struct folio_policy, old)
#undef RPC_STUB_PREFIX
#undef RPC_ID_PREFIX
-#undef RPC_TARGET
#endif
diff --git a/hurd/futex.h b/hurd/futex.h
index 8c4507c..e1e71a3 100644
--- a/hurd/futex.h
+++ b/hurd/futex.h
@@ -3,17 +3,17 @@
Written by Neal H. Walfield <neal@gnu.org>.
GNU Hurd is free software: you can redistribute it and/or modify it
- under the terms of the GNU Lesser General Public License as
- published by the Free Software Foundation, either version 3 of the
- License, or (at your option) any later version.
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
GNU Hurd is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
+ General Public License for more details.
- You should have received a copy of the GNU Lesser General Public
- License along with GNU Hurd. If not, see
+ You should have received a copy of the GNU General Public License
+ along with GNU Hurd. If not, see
<http://www.gnu.org/licenses/>. */
#ifndef _HURD_FUTEX_H
@@ -38,12 +38,6 @@ enum
#define RPC_STUB_PREFIX rm
#define RPC_ID_PREFIX RM
-#undef RPC_TARGET_NEED_ARG
-#define RPC_TARGET \
- ({ \
- extern struct hurd_startup_data *__hurd_startup_data; \
- __hurd_startup_data->rm; \
- })
#include <hurd/rpc.h>
@@ -103,8 +97,8 @@ union futex_val3
#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE \
(union futex_val3) { { 1, 0, FUTEX_OP_CMP_GT, FUTEX_OP_SET } }
-RPC (futex, 8, 1,
- addr_t, principal,
+RPC (futex, 7, 1, 0,
+ /* cap_t principal, cap_t thread, */
void *, addr1, int, op, int, val1,
bool, timeout, union futex_val2, val2,
void *, addr2, union futex_val3, val3,
@@ -113,7 +107,6 @@ RPC (futex, 8, 1,
#undef RPC_STUB_PREFIX
#undef RPC_ID_PREFIX
-#undef RPC_TARGET
#ifndef RM_INTERN
#include <errno.h>
@@ -124,11 +117,11 @@ struct futex_return
long ret;
};
-/* Standard futex signatures. See futex documentation, e.g., Futexes
- are Tricky by Ulrich Drepper. */
static inline struct futex_return
-futex (void *addr1, int op, int val1, struct timespec *timespec,
- void *addr2, int val3)
+__attribute__((always_inline))
+futex_using (struct hurd_message_buffer *mb,
+ void *addr1, int op, int val1, struct timespec *timespec,
+ void *addr2, int val3)
{
union futex_val2 val2;
if (timespec)
@@ -138,18 +131,36 @@ futex (void *addr1, int op, int val1, struct timespec *timespec,
error_t err;
long ret = 0; /* Elide gcc warning. */
- err = rm_futex (ADDR_VOID,
- addr1, op, val1, !! timespec, val2, addr2,
- (union futex_val3) val3, &ret);
+ if (mb)
+ err = rm_futex_using (mb,
+ ADDR_VOID, ADDR_VOID,
+ addr1, op, val1, !! timespec, val2, addr2,
+ (union futex_val3) val3, &ret);
+ else
+ err = rm_futex (ADDR_VOID, ADDR_VOID,
+ addr1, op, val1, !! timespec, val2, addr2,
+ (union futex_val3) val3, &ret);
return (struct futex_return) { err, ret };
}
+/* Standard futex signatures. See futex documentation, e.g., Futexes
+ are Tricky by Ulrich Drepper. */
+static inline struct futex_return
+__attribute__((always_inline))
+futex (void *addr1, int op, int val1, struct timespec *timespec,
+ void *addr2, int val3)
+{
+ return futex_using (NULL, addr1, op, val1, timespec, addr2, val3);
+}
+
+
/* If *F is VAL, wait until woken. */
static inline long
-futex_wait (int *f, int val)
+__attribute__((always_inline))
+futex_wait_using (struct hurd_message_buffer *mb, int *f, int val)
{
struct futex_return ret;
- ret = futex (f, FUTEX_WAIT, val, NULL, 0, 0);
+ ret = futex_using (mb, f, FUTEX_WAIT, val, NULL, 0, 0);
if (ret.err)
{
errno = ret.err;
@@ -158,8 +169,17 @@ futex_wait (int *f, int val)
return ret.ret;
}
+static inline long
+__attribute__((always_inline))
+futex_wait (int *f, int val)
+{
+ return futex_wait_using (NULL, f, val);
+}
+
+
/* If *F is VAL, wait until woken. */
static inline long
+__attribute__((always_inline))
futex_timed_wait (int *f, int val, struct timespec *timespec)
{
struct futex_return ret;
@@ -172,12 +192,14 @@ futex_timed_wait (int *f, int val, struct timespec *timespec)
return ret.ret;
}
+
/* Signal NWAKE waiters waiting on futex F. */
static inline long
-futex_wake (int *f, int nwake)
+__attribute__((always_inline))
+futex_wake_using (struct hurd_message_buffer *mb, int *f, int nwake)
{
struct futex_return ret;
- ret = futex (f, FUTEX_WAKE, nwake, NULL, 0, 0);
+ ret = futex_using (mb, f, FUTEX_WAKE, nwake, NULL, 0, 0);
if (ret.err)
{
errno = ret.err;
@@ -185,6 +207,13 @@ futex_wake (int *f, int nwake)
}
return ret.ret;
}
+
+static inline long
+__attribute__((always_inline))
+futex_wake (int *f, int nwake)
+{
+ return futex_wake_using (NULL, f, nwake);
+}
#endif /* !RM_INTERN */
#endif
diff --git a/hurd/headers.m4 b/hurd/headers.m4
index 7708aea..51b19fd 100644
--- a/hurd/headers.m4
+++ b/hurd/headers.m4
@@ -27,6 +27,9 @@ AC_CONFIG_LINKS([sysroot/include/hurd/stddef.h:hurd/stddef.h
sysroot/include/hurd/rmutex.h:hurd/rmutex.h
sysroot/include/hurd/futex.h:hurd/futex.h
sysroot/include/hurd/error.h:hurd/error.h
+ sysroot/include/hurd/message.h:hurd/message.h
+ sysroot/include/hurd/messenger.h:hurd/messenger.h
+ sysroot/include/hurd/ipc.h:hurd/ipc.h
sysroot/include/hurd/math.h:hurd/math.h
sysroot/include/hurd/bits/math.h:hurd/bits/${arch}/math.h
])
diff --git a/hurd/ipc.h b/hurd/ipc.h
new file mode 100644
index 0000000..95588a0
--- /dev/null
+++ b/hurd/ipc.h
@@ -0,0 +1,296 @@
+/* ipc.h - Interprocess communication interface.
+ Copyright (C) 2008 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ GNU Hurd is free software: you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU Hurd. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _VIENGOOS_IPC_H
+#define _VIENGOOS_IPC_H 1
+
+#include <stdint.h>
+#include <errno.h>
+#include <hurd/addr.h>
+#include <hurd/stddef.h>
+#include <hurd/message.h>
+#include <assert.h>
+
+#ifdef USE_L4
+#include <l4.h>
+#endif
+
+/* IPC flags. */
+enum
+ {
+ /* IPC includes a receive phase. */
+ VG_IPC_RECEIVE = 1 << 0,
+ /* Don't unblock the receive buffer if there is no message queued
+ for delivery. */
+ VG_IPC_RECEIVE_NONBLOCKING = 1 << 1,
+ /* Activate the thread on message receipt. */
+ VG_IPC_RECEIVE_ACTIVATE = 1 << 2,
+ /* Set the receive messenger's thread to the caller. */
+ VG_IPC_RECEIVE_SET_THREAD_TO_CALLER = 1 << 3,
+ /* Set the receive messener's address space root to the
+ caller's. */
+ VG_IPC_RECEIVE_SET_ASROOT_TO_CALLERS = 1 << 4,
+ /* Whether to receive the message inline. */
+ VG_IPC_RECEIVE_INLINE = 1 << 5,
+ /* Whether to receive any capabilities inline when receiving a
+ message inline (i.e., when VG_IPC_RECEIVE_INLINE is set). */
+ VG_IPC_RECEIVE_INLINE_CAP1 = 1 << 6,
+
+ /* IPC includes a send phase. */
+ VG_IPC_SEND = 1 << 7,
+ /* If the object is blocked, return EWOULDBLOCK. */
+ VG_IPC_SEND_NONBLOCKING = 1 << 8,
+ /* Activate the thread on message transfer. */
+ VG_IPC_SEND_ACTIVATE = 1 << 9,
+ /* Set the send messenger's thread to the caller. */
+ VG_IPC_SEND_SET_THREAD_TO_CALLER = 1 << 10,
+ /* Set the sender messener's address space root to the
+ caller's. */
+ VG_IPC_SEND_SET_ASROOT_TO_CALLERS = 1 << 11,
+ /* Whether to send the message inline. */
+ VG_IPC_SEND_INLINE = 1 << 12,
+
+ /* Which inline data to transfer when sending a message. Inline
+ data is ignored if the send buffer is not ADDR_VOID. */
+ VG_IPC_SEND_INLINE_WORD1 = 1 << 13,
+ VG_IPC_SEND_INLINE_WORD2 = 1 << 14,
+ VG_IPC_SEND_INLINE_CAP1 = 1 << 15,
+
+
+ /* The IPC includes a return phase. */
+ VG_IPC_RETURN = 1 << 16,
+
+ };
+
+#ifndef RM_INTERN
+/* An IPC consists of three phases: the receive phase, the send phase
+ and the return phase. All three phases are optional. Each phase
+ is executed after the previous phase has completed. If a phase
+ does not complete successfully, the phase is aborted and the
+ remaining phases are not executed.
+
+
+ RECEIVE PHASE
+
+ If FLAGS contains VG_IPC_RECEIVE, the IPC includes a receive phase.
+
+ If RECV_BUF is not ADDR_VOID, associates RECV_BUF with
+ RECV_MESSENGER.
+
+ If FLAGS contains VG_IPC_RECEIVE_NONBLOCKING:
+
+ Unblocks RECV_MESSENGER if RECV_MESSENGER has a messenger waiting
+ to deliver a message. Otherwise, returns EWOUDBLOCK.
+
+ Otherwise:
+
+ Unblocks RECV_MESSENGER.
+
+ Resources are charged to RECV_ACTIVITY.
+
+ If VG_IPC_RECEIVE_ACTIVATE is set, an activation is sent to the
+ thread associated with RECV_MESSENGER when RECV_MESSENGER receives
+ a message.
+
+
+ SEND PHASE
+
+ If FLAGS contains VG_IPC_SEND, the IPC includes a send phase.
+
+ If SEND_MESSENGER is ADDR_VOID, an implicit messenger is allocated
+ and VG_IPC_SEND_NONBLOCKING is assumed to be on.
+
+ If SEND_BUF is not ADDR_VOID, assocaiates SEND_BUF with
+ SEND_MESSENGER. Otherwise, associates inline data (INLINE_WORD1,
+ INLINE_WORD2 and INLINE_CAP) according to the inline flags with
+ SEND_MESSENGER.
+
+ If FLAGS contains VG_IPC_SEND_NONBLOCKING:
+
+ If TARGET_MESSENGER is blocked, returns ETIMEDOUT.
+
+ Otherwise:
+
+ Blocks SEND_MESSENGER and enqueues it on TARGET_MESSENGER.
+
+ When TARGET_MESSENGER becomes unblocked, SEND_MESSENGER delivers
+ its message to TARGET_MESSENGER.
+
+ Resources are charged to SEND_ACTIVITY.
+
+ If VG_IPC_SEND_ACTIVATE is set, an activation is sent to the thread
+ associated with SEND_MESSENGER when SEND_MESSENGER's message is
+ transferred to TARGET_MESSENGER (or, when TARGET_MESSENGER is
+ destroyed).
+
+
+ RETURN PHASE
+
+ If FLAGS contains VG_IPC_RETURN, the IPC returns. Otherwise, the
+ calling thread is suspended until it is next activated. */
+static inline error_t
+vg_ipc_full (uintptr_t flags,
+ addr_t recv_activity, addr_t recv_messenger, addr_t recv_buf,
+ addr_t recv_inline_cap,
+ addr_t send_activity, addr_t target_messenger,
+ addr_t send_messenger, addr_t send_buf,
+ uintptr_t send_inline_word1, uintptr_t send_inline_word2,
+ addr_t send_inline_cap)
+{
+ error_t err = 0;
+
+#ifdef USE_L4
+ l4_msg_tag_t tag = l4_niltag;
+ l4_msg_tag_set_label (&tag, 8194);
+
+ l4_msg_t msg;
+ l4_msg_clear (msg);
+ l4_msg_set_msg_tag (msg, tag);
+
+ void msg_append_addr (addr_t addr)
+ {
+ int i;
+ for (i = 0; i < sizeof (addr_t) / sizeof (uintptr_t); i ++)
+ l4_msg_append_word (msg, ((uintptr_t *) &addr)[i]);
+ }
+
+ l4_msg_append_word (msg, flags);
+
+ msg_append_addr (recv_activity);
+ msg_append_addr (recv_messenger);
+ msg_append_addr (recv_buf);
+ msg_append_addr (recv_inline_cap);
+
+ msg_append_addr (send_activity);
+ msg_append_addr (target_messenger);
+
+ msg_append_addr (send_messenger);
+ msg_append_addr (send_buf);
+
+ l4_msg_append_word (msg, send_inline_word1);
+ l4_msg_append_word (msg, send_inline_word2);
+ msg_append_addr (send_inline_cap);
+
+ l4_msg_load (msg);
+ l4_accept (l4_map_grant_items (L4_COMPLETE_ADDRESS_SPACE));
+
+ bool call = true;
+
+ while (1)
+ {
+ extern struct hurd_startup_data *__hurd_startup_data;
+
+ if (call)
+ tag = l4_call (__hurd_startup_data->rm);
+ else
+ tag = l4_receive (__hurd_startup_data->rm);
+
+ if (likely (l4_ipc_failed (tag)))
+ {
+ if (((l4_error_code () >> 1) & 0x7) == 3)
+ {
+ if (l4_error_code () & 1)
+ /* IPC was interrupted in the receive phase, i.e., we
+ got a response. */
+ break;
+ else
+ call = false;
+ }
+ else
+ return EHOSTDOWN;
+ }
+ else
+ {
+ assert (l4_untyped_words (tag) == 1);
+ l4_msg_store (tag, msg);
+ /* Potential error performing IPC (or VG_RETURN specified). */
+ err = l4_msg_word (msg, 1);
+ break;
+ }
+ }
+#else
+# warning vg_ipc not ported to this architecture.
+#endif
+
+ return err;
+}
+
+static inline error_t
+vg_ipc (uintptr_t flags,
+ addr_t recv_activity, addr_t recv_messenger, addr_t recv_buf,
+ addr_t send_activity, addr_t target_messenger,
+ addr_t send_messenger, addr_t send_buf)
+{
+ return vg_ipc_full (flags,
+ recv_activity, recv_messenger, recv_buf, ADDR_VOID,
+ send_activity, target_messenger,
+ send_messenger, send_buf,
+ 0, 0, ADDR_VOID);
+}
+
+static inline error_t
+vg_ipc_short (uintptr_t flags,
+ addr_t recv_activity, addr_t recv_messenger, addr_t recv_cap,
+ addr_t send_activity, addr_t target_messenger,
+ addr_t send_messenger,
+ uintptr_t inline_word1, uintptr_t inline_word2,
+ addr_t inline_cap)
+{
+ return vg_ipc_full (flags,
+ recv_activity, recv_messenger, ADDR_VOID, recv_cap,
+ send_activity, target_messenger,
+ send_messenger, ADDR_VOID,
+ inline_word1, inline_word2, inline_cap);
+}
+
+static inline error_t
+vg_send (uintptr_t flags, addr_t send_activity, addr_t target_messenger,
+ addr_t send_messenger, addr_t send_buf)
+{
+ return vg_ipc_full (flags | VG_IPC_SEND | VG_IPC_SEND_ACTIVATE,
+ ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ send_activity, target_messenger,
+ send_messenger, send_buf,
+ 0, 0, ADDR_VOID);
+}
+
+static inline error_t
+vg_reply (uintptr_t flags, addr_t send_activity, addr_t target_messenger,
+ addr_t send_messenger, addr_t send_buf)
+{
+ return vg_ipc_full (flags | VG_IPC_SEND | VG_IPC_SEND_NONBLOCKING,
+ ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ send_activity, target_messenger, send_messenger, send_buf,
+ 0, 0, ADDR_VOID);
+}
+
+/* Suspend the caller until the next activation. */
+static inline error_t
+vg_suspend (void)
+{
+ return vg_ipc_full (0,
+ ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ 0, 0, ADDR_VOID);
+}
+
+#endif
+
+#endif
diff --git a/hurd/message.h b/hurd/message.h
new file mode 100644
index 0000000..d59e41d
--- /dev/null
+++ b/hurd/message.h
@@ -0,0 +1,229 @@
+/* message.h - Message buffer definitions.
+ Copyright (C) 2008 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ GNU Hurd is free software: you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU Hurd. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _VIENGOOS_MESSAGE_H
+#define _VIENGOOS_MESSAGE_H 1
+
+#include <stdint.h>
+#include <assert.h>
+#include <hurd/addr.h>
+#include <hurd/stddef.h>
+
+/* A message.
+
+ When handing a message structure to a messenger, it must start at
+ the beginning of a page and it cannot extend past the end of that
+ page. */
+struct vg_message
+{
+ union
+ {
+ struct
+ {
+ /* The number of capability addresses in the message. */
+ uint16_t cap_count;
+ /* The number of bytes of data transferred in this message. */
+ uint16_t data_count;
+
+ addr_t caps[/* cap_count */];
+ // char data[data_count];
+ };
+
+ char raw[PAGESIZE];
+ };
+};
+
+
+/* Clear the msg so that it references no capabilities and
+ contains no data. */
+static inline void
+vg_message_clear (struct vg_message *msg)
+{
+ msg->cap_count = 0;
+ msg->data_count = 0;
+}
+
+
+/* Return the number of capabilities referenced by MSG. */
+static inline int
+vg_message_cap_count (struct vg_message *msg)
+{
+ int max = (PAGESIZE - __builtin_offsetof (struct vg_message, caps))
+ / sizeof (addr_t);
+
+ int count = msg->cap_count;
+ if (count > max)
+ count = max;
+
+ return count;
+}
+
+/* Return the number of bytes of data in MSG. */
+static inline int
+vg_message_data_count (struct vg_message *msg)
+{
+ int max = PAGESIZE
+ - vg_message_cap_count (msg) * sizeof (addr_t)
+ - __builtin_offsetof (struct vg_message, caps);
+
+ int count = msg->data_count;
+ if (count > max)
+ count = max;
+
+ return count;
+}
+
+
+/* Return the start of the capability address array in msg MSG. */
+static inline addr_t *
+vg_message_caps (struct vg_message *msg)
+{
+ return msg->caps;
+}
+
+/* Return capability IDX in msg MSG. */
+static inline addr_t
+vg_message_cap (struct vg_message *msg, int idx)
+{
+ assert (idx < msg->cap_count);
+
+ return msg->caps[idx];
+}
+
+
+/* Return the start of the data in msg MSG. */
+static inline char *
+vg_message_data (struct vg_message *msg)
+{
+ return (void *) msg
+ + __builtin_offsetof (struct vg_message, caps)
+ + msg->cap_count * sizeof (addr_t);
+}
+
+/* Return data word WORD in msg MSG. */
+static inline uintptr_t
+vg_message_word (struct vg_message *msg, int word)
+{
+ assert (word < msg->data_count / sizeof (uintptr_t));
+
+ return ((uintptr_t *) vg_message_data (msg))[word];
+}
+
+
+/* Append the array of capability addresses CAPS to the msg MSG.
+ There must be sufficient room in the message buffer. */
+static inline void
+vg_message_append_caps (struct vg_message *msg, int cap_count, addr_t *caps)
+{
+ assert ((void *) vg_message_data (msg) - (void *) msg
+ + vg_message_data_count (msg) + cap_count * sizeof (*caps)
+ <= PAGESIZE);
+
+ __builtin_memmove (&msg->caps[msg->cap_count + cap_count],
+ &msg->caps[msg->cap_count],
+ msg->data_count);
+
+ __builtin_memcpy (&msg->caps[msg->cap_count],
+ caps,
+ cap_count * sizeof (addr_t));
+
+ msg->cap_count += cap_count;
+}
+
+/* Append the capability address CAP to the msg MSG. There must be
+ sufficient room in the message buffer. */
+static inline void
+vg_message_append_cap (struct vg_message *msg, addr_t cap)
+{
+ vg_message_append_caps (msg, 1, &cap);
+}
+
+
+/* Append DATA to the msg MSG. There must be sufficient room in the
+ message buffer. */
+static inline void
+vg_message_append_data (struct vg_message *msg, int bytes, char *data)
+{
+ int dstart = __builtin_offsetof (struct vg_message, caps)
+ + msg->cap_count * sizeof (addr_t);
+ int dend = dstart + msg->data_count;
+
+ int new_dend = dend + bytes;
+ assert (new_dend <= PAGESIZE);
+
+ msg->data_count += bytes;
+ __builtin_memcpy ((void *) msg + dend, data, bytes);
+}
+
+/* Append the word WORD to the msg MSG. There must be
+ sufficient room in the message buffer. */
+static inline void
+vg_message_append_word (struct vg_message *msg, uintptr_t word)
+{
+ vg_message_append_data (msg, sizeof (word), (char *) &word);
+}
+
+/* Return data word WORD in msg MSG. */
+static inline void
+vg_message_word_set (struct vg_message *msg, int pos, uintptr_t word)
+{
+ if (msg->data_count < pos * sizeof (uintptr_t))
+ msg->data_count = pos * sizeof (uintptr_t);
+
+ ((uintptr_t *) vg_message_data (msg))[pos] = word;
+}
+
+#include <s-printf.h>
+
+static inline void
+vg_message_dump (struct vg_message *message)
+{
+ s_printf ("%d bytes, %d caps\n",
+ vg_message_data_count (message),
+ vg_message_cap_count (message));
+
+ char d2h[] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ 'A', 'B', 'C', 'D', 'E', 'F' };
+ unsigned char *data = vg_message_data (message);
+
+ int i = 0;
+ while (i < vg_message_data_count (message))
+ {
+ s_printf ("%d: ", i);
+
+ int j, k;
+ for (j = 0, k = 0;
+ i < vg_message_data_count (message) && j < 4 * 8;
+ j ++, i ++)
+ {
+ s_printf ("%c%c", d2h[data[i] >> 4], d2h[data[i] & 0xf]);
+ if (j % 4 == 3)
+ s_printf (" ");
+ }
+ s_printf ("\n");
+ }
+
+ for (i = 0; i < vg_message_cap_count (message); i ++)
+ s_printf ("cap %d: " ADDR_FMT "\n",
+ i, ADDR_PRINTF (vg_message_cap (message, i)));
+}
+
+
+#endif /* _VIENGOOS_MESSAGE_H */
diff --git a/hurd/messenger.h b/hurd/messenger.h
new file mode 100644
index 0000000..78d42f1
--- /dev/null
+++ b/hurd/messenger.h
@@ -0,0 +1,87 @@
+/* messenger.h - Messenger buffer definitions.
+ Copyright (C) 2008 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ GNU Hurd is free software: you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU Hurd. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _VIENGOOS_MESSENGER_H
+#define _VIENGOOS_MESSENGER_H 1
+
+#include <stdint.h>
+#include <hurd/addr.h>
+
+/* A messenger references a message buffer. It can transfer a message
+ (contained in its message buffer) to another messenger. It can
+ also receive a message from another messenger. A messenger can
+ block waiting to deliver a message to or receive a message from
+ another messenger.
+
+ To send a message, a payload is loaded into a message buffer and
+ associated with a messenger. The messenger is then enqueued on
+ another messenger. When the latter messenger is unblocked, the
+ message is delivered.
+
+ To avoid messages from being overwritten, messengers are blocked on
+ message delivery and must be explicitly unblocked before another
+ message is sent. */
+#ifdef RM_INTERN
+struct messenger;
+typedef struct messenger *vg_messenger_t;
+#else
+typedef addr_t vg_messenger_t;
+#endif
+
+#define VG_MESSENGER_INLINE_WORDS 2
+#define VG_MESSENGER_INLINE_CAPS 1
+
+/* Number of user-settable capability slots at the start of the
+ messenger structure. */
+enum
+ {
+ /* The thread to activate. */
+ VG_MESSENGER_THREAD_SLOT = 0,
+ /* The address space root relative to which all capability
+ addresses in the message buffer will be resolved. */
+ VG_MESSENGER_ASROOT_SLOT,
+ /* The assocaited message buffer. */
+ VG_MESSENGER_BUFFER_SLOT,
+ /* The activity that was delivered with the last message. */
+ VG_MESSENGER_ACTIVITY_SLOT,
+
+ VG_MESSENGER_SLOTS = 4,
+ };
+#define VG_MESSENGER_SLOTS_LOG2 2
+
+enum
+ {
+ VG_messenger_id = 900,
+ };
+
+#define RPC_STUB_PREFIX vg
+#define RPC_ID_PREFIX VG
+
+#include <hurd/rpc.h>
+
+/* Set MESSENGER's ID to ID and return the old ID in OLD. */
+RPC(messenger_id, 1, 1, 0,
+ /* cap_t activity, cap_t messenger, */
+ uint64_t, id, uint64_t, old)
+
+#undef RPC_STUB_PREFIX vg
+#undef RPC_ID_PREFIX VG
+
+#endif /* _VIENGOOS_MESSENGER_H */
diff --git a/hurd/rpc.h b/hurd/rpc.h
index e363509..55fe8be 100644
--- a/hurd/rpc.h
+++ b/hurd/rpc.h
@@ -4,20 +4,19 @@
This file is part of the GNU Hurd.
- The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
+ GNU Hurd is free software: you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation, either version 3 of the
+ License, or (at your option) any later version.
- The GNU Hurd is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
+ GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
+ License along with GNU Hurd. If not, see
+ <http://www.gnu.org/licenses/>. */
#define RPC_CONCAT2(a,b) a##b
#define RPC_CONCAT(a,b) RPC_CONCAT2(a,b)
@@ -41,107 +40,66 @@
#else
#define RPC_ID_PREFIX_(name) RPC_CONCAT(RPC_ID_PREFIX,_##name)
#endif
-
-/* We need to know where to send the IPC. Either the caller can
- supply it or it can be implicit.
-
- If the caller should supply the target, then define
- RPC_TARGET_NEED_ARG, RPC_TARGET_ARG_TYPE to the type of the
- argument, and RPC_TARGET to a be a macro that takes a single
- argument and returns an l4_thread_id_t.
-
- #define RPC_STUB_PREFIX prefix
- #define RPC_ID_PREFIX PREFIX
- #define RPC_TARGET_NEED_ARG
- #define RPC_TARGET_ARG_TYPE object_t
- #define RPC_TARGET(x) ((x)->thread_id)
-
- If the caller need not supply the argument, then the includer
- should not define RPC_TARGET_NEED_ARG and should define RPC_TARGET
- to be a macro that takes no arguments and returns an
- l4_thread_id_t.
-
- #define RPC_STUB_PREFIX prefix
- #define RPC_ID_PREFIX PREFIX
- #undef RPC_TARGET_NEED_ARG
- #define RPC_TARGET ({ extern l4_thread_id_t foo_server; foo_server; })
-
- At the end of the include file, be sure to #undef the used
- preprocessor variables to avoid problems when multiple headers
- make use of this file.
-
- #undef RPC_STUB_PREFIX
- #undef RPC_ID_PREFIX
- #undef RPC_TARGET_NEED_ARG
- #undef RPC_TARGET_ARG_TYPE
- #undef RPC_TARGET
- */
-#ifndef RPC_TARGET
-#error Did not define RPC_TARGET
-#endif
-
-#undef RPC_TARGET_ARG_
-#undef RPC_TARGET_
-
-#ifdef RPC_TARGET_NEED_ARG
-# ifndef RPC_TARGET_ARG_TYPE
-# error RPC_TARGET_NEED_ARG define but RPC_TARGET_ARG_TYPE not defined.
-# endif
-
-# define RPC_TARGET_ARG_ RPC_TARGET_ARG_TYPE arg_,
-# define RPC_TARGET_ RPC_TARGET(arg_)
-#else
-
-# define RPC_TARGET_ARG_
-# define RPC_TARGET_ RPC_TARGET
-#endif
-
-#undef RPC_TARGET_NEED_ARG
#ifndef _HURD_RPC_H
#define _HURD_RPC_H
#include <hurd/stddef.h>
-#include <l4/ipc.h>
-#include <l4/space.h>
+#include <hurd/message.h>
+#include <hurd/ipc.h>
#include <errno.h>
+
+#ifdef RM_INTERN
+extern struct vg_message *reply_buffer;
+
+/* We can't include messenger.h as it includes hurd/cap.h which in turn
+ includes this file. */
+struct messenger;
+struct activity;
+extern bool messenger_message_load (struct activity *activity,
+ struct messenger *target,
+ struct vg_message *message);
+#else
+# include <hurd/message-buffer.h>
+#endif
+typedef addr_t cap_t;
/* First we define some cpp help macros. */
-#define CPP_IFTHEN_0(when, whennot) whennot
-#define CPP_IFTHEN_1(when, whennot) when
-#define CPP_IFTHEN_2(when, whennot) when
-#define CPP_IFTHEN_3(when, whennot) when
-#define CPP_IFTHEN_4(when, whennot) when
-#define CPP_IFTHEN_5(when, whennot) when
-#define CPP_IFTHEN_6(when, whennot) when
-#define CPP_IFTHEN_7(when, whennot) when
-#define CPP_IFTHEN_8(when, whennot) when
-#define CPP_IFTHEN_9(when, whennot) when
-#define CPP_IFTHEN_10(when, whennot) when
-#define CPP_IFTHEN_11(when, whennot) when
-#define CPP_IFTHEN_12(when, whennot) when
-#define CPP_IFTHEN_13(when, whennot) when
-#define CPP_IFTHEN_14(when, whennot) when
-#define CPP_IFTHEN_15(when, whennot) when
-#define CPP_IFTHEN_16(when, whennot) when
-#define CPP_IFTHEN_17(when, whennot) when
-#define CPP_IFTHEN_18(when, whennot) when
-#define CPP_IFTHEN_19(when, whennot) when
-#define CPP_IFTHEN_20(when, whennot) when
-#define CPP_IFTHEN_21(when, whennot) when
-#define CPP_IFTHEN_22(when, whennot) when
-#define CPP_IFTHEN_23(when, whennot) when
-#define CPP_IFTHEN_24(when, whennot) when
-#define CPP_IFTHEN_25(when, whennot) when
-
-#define CPP_IFTHEN_(expr, when, whennot) \
- CPP_IFTHEN_##expr(when, whennot)
-#define CPP_IFTHEN(expr, when, whennot) \
- CPP_IFTHEN_(expr, when, whennot)
-#define CPP_IF(expr, when) \
- CPP_IFTHEN(expr, when,)
-#define CPP_IFNOT(expr, whennot) \
- CPP_IFTHEN(expr, , whennot)
+#define CPP_IFELSE_0(when, whennot) whennot
+#define CPP_IFELSE_1(when, whennot) when
+#define CPP_IFELSE_2(when, whennot) when
+#define CPP_IFELSE_3(when, whennot) when
+#define CPP_IFELSE_4(when, whennot) when
+#define CPP_IFELSE_5(when, whennot) when
+#define CPP_IFELSE_6(when, whennot) when
+#define CPP_IFELSE_7(when, whennot) when
+#define CPP_IFELSE_8(when, whennot) when
+#define CPP_IFELSE_9(when, whennot) when
+#define CPP_IFELSE_10(when, whennot) when
+#define CPP_IFELSE_11(when, whennot) when
+#define CPP_IFELSE_12(when, whennot) when
+#define CPP_IFELSE_13(when, whennot) when
+#define CPP_IFELSE_14(when, whennot) when
+#define CPP_IFELSE_15(when, whennot) when
+#define CPP_IFELSE_16(when, whennot) when
+#define CPP_IFELSE_17(when, whennot) when
+#define CPP_IFELSE_18(when, whennot) when
+#define CPP_IFELSE_19(when, whennot) when
+#define CPP_IFELSE_20(when, whennot) when
+#define CPP_IFELSE_21(when, whennot) when
+#define CPP_IFELSE_22(when, whennot) when
+#define CPP_IFELSE_23(when, whennot) when
+#define CPP_IFELSE_24(when, whennot) when
+#define CPP_IFELSE_25(when, whennot) when
+
+#define CPP_IFELSE_(expr, when, whennot) \
+ CPP_IFELSE_##expr(when, whennot)
+#define CPP_IFELSE(expr, when, whennot) \
+ CPP_IFELSE_(expr, when, whennot)
+#define CPP_IF(expr, when) \
+ CPP_IFELSE(expr, when,)
+#define CPP_IFNOT(expr, whennot) \
+ CPP_IFELSE(expr, , whennot)
#define CPP_SUCC_0 1
#define CPP_SUCC_1 2
@@ -176,11 +134,11 @@
/* We'd like to define CPP_ADD as:
#define CPP_ADD(x, y) \
- CPP_IFTHEN(y, CPP_ADD(SUCC(x), SUCC(y)), y)
+ CPP_IFELSE(y, CPP_ADD(SUCC(x), SUCC(y)), y)
This does not work as while a macro is being expanded, it becomes
ineligible for expansion. Thus, any references (including indirect
- references) are not expanded. Nested applications of a macro are,
+ references) are not expanded. Repeated applications of a macro are,
however, allowed, and this is what the CPP_APPLY macro does. */
#define CPP_APPLY1(x, y) x(y)
#define CPP_APPLY2(x, y) x(CPP_APPLY1(x, y))
@@ -209,13 +167,44 @@
#define CPP_APPLY25(x, y) x(CPP_APPLY24(x, y))
#define CPP_ADD(x, y) \
- CPP_IFTHEN(y, CPP_APPLY##y(CPP_SUCC, x), x)
+ CPP_IFELSE(y, CPP_APPLY##y(CPP_SUCC, x), x)
+
+/* Apply a function to each of the first n arguments.
+
+
+ CPP_FOREACH(2, CPP_SAFE_DEREF, NULL, a, b)
+
+ =>
+
+ ((a) ? *(a) : NULL), ((b) ? *(b) : NULL)
+ */
+#define CPP_FOREACH_0(func, cookie, ...)
+#define CPP_FOREACH_1(func, cookie, element, ...) func(cookie, element)
+#define CPP_FOREACH_2(func, cookie, element, ...) func(cookie, element), CPP_FOREACH_1(func, cookie, __VA_ARGS__)
+#define CPP_FOREACH_3(func, cookie, element, ...) func(cookie, element), CPP_FOREACH_2(func, cookie, __VA_ARGS__)
+#define CPP_FOREACH_4(func, cookie, element, ...) func(cookie, element), CPP_FOREACH_3(func, cookie, __VA_ARGS__)
+#define CPP_FOREACH_5(func, cookie, element, ...) func(cookie, element), CPP_FOREACH_4(func, cookie, __VA_ARGS__)
+#define CPP_FOREACH_6(func, cookie, element, ...) func(cookie, element), CPP_FOREACH_5(func, cookie, __VA_ARGS__)
+#define CPP_FOREACH_7(func, cookie, element, ...) func(cookie, element), CPP_FOREACH_6(func, cookie, __VA_ARGS__)
+#define CPP_FOREACH_8(func, cookie, element, ...) func(cookie, element), CPP_FOREACH_7(func, cookie, __VA_ARGS__)
+#define CPP_FOREACH_9(func, cookie, element, ...) func(cookie, element), CPP_FOREACH_8(func, cookie, __VA_ARGS__)
+
+#define CPP_FOREACH_(n, func, cookie, ...) \
+ CPP_FOREACH_##n(func, cookie, __VA_ARGS__)
+#define CPP_FOREACH(n, func, cookie, ...) \
+ CPP_FOREACH_(n, func, cookie, __VA_ARGS__)
+
+/* Used in conjunction with CPP_FOREACH. Generates C code that
+ dereferences ELEMENT if it is not NULL, otherwise, returns
+ COOKIE. */
+#define CPP_SAFE_DEREF(cookie, element) ((element) ? *(element) : (cookie))
+
/* CPP treats commas specially so we have to be smart about how we
insert them algorithmically. For instance, this won't work:
#define COMMA ,
- CPP_IFTHEN(x, COMMA, )
+ CPP_IFELSE(x, COMMA, )
To optional insert a comma, use this function instead. When the
result is need, invoke the result. For instance:
@@ -224,22 +213,36 @@
*/
#define RPC_COMMA() ,
#define RPC_NOCOMMA()
-#define RPC_IF_COMMA(x) CPP_IFTHEN(x, RPC_COMMA, RPC_NOCOMMA)
+#define RPC_IF_COMMA(x) CPP_IFELSE(x, RPC_COMMA, RPC_NOCOMMA)
-/* Load the argument ARG, which is of type TYPE into MR IDX. */
+/* Append the argument __RLA_ARG, whose type is __RLA_TYPE, to the
+ message buffer MSG. */
#define RPCLOADARG(__rla_type, __rla_arg) \
{ \
- union \
- { \
- __rla_type __rla_a; \
- l4_word_t __rla_raw[(sizeof (__rla_type) + sizeof (l4_word_t) - 1) \
- / sizeof (l4_word_t)]; \
- } __rla_arg2 = { (__rla_arg) }; \
- int __rla_i; \
- for (__rla_i = 0; \
- __rla_i < sizeof (__rla_arg2) / sizeof (l4_word_t); \
- __rla_i ++) \
- l4_msg_append_word (*msg, __rla_arg2.__rla_raw[__rla_i]); \
+ if (__builtin_strcmp (#__rla_type, "cap_t") == 0) \
+ { \
+ union \
+ { \
+ __rla_type __rla_a; \
+ RPC_GRAB2 (, 1, RPC_TYPE_SHIFT (1, struct cap *, cap_t, __rla_foo)); \
+ cap_t __rla_cap; \
+ } __rla_arg2 = { (__rla_arg) }; \
+ vg_message_append_cap (msg, __rla_arg2.__rla_cap); \
+ } \
+ else \
+ { \
+ union \
+ { \
+ __rla_type __rla_a; \
+ uintptr_t __rla_raw[(sizeof (__rla_type) + sizeof (uintptr_t) - 1) \
+ / sizeof (uintptr_t)]; \
+ } __rla_arg2 = { (__rla_arg) }; \
+ int __rla_i; \
+ for (__rla_i = 0; \
+ __rla_i < sizeof (__rla_arg2) / sizeof (uintptr_t); \
+ __rla_i ++) \
+ vg_message_append_word (msg, __rla_arg2.__rla_raw[__rla_i]); \
+ } \
}
#define RPCLOAD0(...)
@@ -271,23 +274,48 @@
#define RPCLOAD_(__rl_count, ...) RPCLOAD##__rl_count (__VA_ARGS__)
#define RPCLOAD(__rl_count, ...) RPCLOAD_ (__rl_count, __VA_ARGS__)
-/* Store the contents of MR __RSU_IDX+1 into *ARG, which is of type TYPE.
- NB: __RSU_IDX is thus the return parameter number, not the message
- register number; MR0 contains the error code. */
+/* Store the next argument in the message MSG whose type is __RSA_TYPE
+ in *__RSA_ARG. */
#define RPCSTOREARG(__rsa_type, __rsa_arg) \
{ \
- union \
- { \
- __rsa_type __rsa_a; \
- l4_word_t __rsa_raw[(sizeof (__rsa_type) + sizeof (l4_word_t) - 1) \
- / sizeof (l4_word_t)]; \
- } __rsa_arg2; \
- int __rsa_i; \
- for (__rsa_i = 0; \
- __rsa_i < sizeof (__rsa_arg2) / sizeof (l4_word_t); \
- __rsa_i ++) \
- __rsa_arg2.__rsa_raw[__rsa_i] = l4_msg_word (*msg, __rsu_idx ++); \
- *(__rsa_arg) = __rsa_arg2.__rsa_a; \
+ if (__builtin_strcmp (#__rsa_type, "cap_t") == 0) \
+ { \
+ union \
+ { \
+ __rsa_type *__rsa_a; \
+ cap_t *__rsa_cap; \
+ } __rsa_arg2; \
+ __rsa_arg2.__rsa_a = __rsa_arg; \
+ if (vg_message_cap_count (msg) > __rsu_cap_idx) \
+ { \
+ if (__rsa_arg) \
+ *__rsa_arg2.__rsa_cap = vg_message_cap (msg, __rsu_cap_idx); \
+ __rsu_cap_idx ++; \
+ } \
+ else \
+ __rsu_err = EINVAL; \
+ } \
+ else \
+ { \
+ union \
+ { \
+ __rsa_type __rsa_a; \
+ uintptr_t __rsa_raw[(sizeof (__rsa_type) + sizeof (uintptr_t) - 1) \
+ / sizeof (uintptr_t)]; \
+ } __rsa_arg2; \
+ int __rsa_i; \
+ for (__rsa_i = 0; \
+ __rsa_i < sizeof (__rsa_arg2) / sizeof (uintptr_t); \
+ __rsa_i ++) \
+ if (vg_message_data_count (msg) / sizeof (uintptr_t) \
+ > __rsu_data_idx) \
+ __rsa_arg2.__rsa_raw[__rsa_i] \
+ = vg_message_word (msg, __rsu_data_idx ++); \
+ else \
+ __rsu_err = EINVAL; \
+ if (! __rsu_err && __rsa_arg) \
+ *(__rsa_arg) = __rsa_arg2.__rsa_a; \
+ } \
}
#define RPCSTORE0(...)
@@ -345,35 +373,36 @@
#define RPCSTORE_(__rs_count, ...) RPCSTORE##__rs_count (__VA_ARGS__)
#define RPCSTORE(__rs_count, ...) RPCSTORE_ (__rs_count, __VA_ARGS__)
-/* Marshal the in-arguments into the provided message buffer. */
+/* Marshal a request. */
#define RPC_SEND_MARSHAL(id, icount, ...) \
static inline void \
+ __attribute__((always_inline)) \
RPC_CONCAT (RPC_STUB_PREFIX_(id), _send_marshal) \
- (l4_msg_t *msg RPC_IF_COMMA (icount) () \
- RPC_GRAB2 (, icount, ##__VA_ARGS__)) \
+ (struct vg_message *msg, \
+ RPC_GRAB2 (, icount, ##__VA_ARGS__) RPC_IF_COMMA(icount) () \
+ cap_t reply_messenger) \
{ \
- l4_msg_tag_t tag; \
- \
- tag = l4_niltag; \
- l4_msg_tag_set_label (&tag, RPC_ID_PREFIX_(id)); \
- \
- l4_msg_clear (*msg); \
- l4_msg_set_msg_tag (*msg, tag); \
- \
+ vg_message_clear (msg); \
+ /* Add the label. */ \
+ vg_message_append_word (msg, RPC_ID_PREFIX_(id)); \
+ /* Then load the arguments. */ \
RPCLOAD (icount, ##__VA_ARGS__); \
+ /* Finally, add the reply messenger. */ \
+ vg_message_append_cap (msg, reply_messenger); \
}
-/* Unmarshal the in-arguments from the provided message buffer. */
+/* Unmarshal a request. */
#define RPC_SEND_UNMARSHAL(id, icount, ...) \
static inline error_t \
+ __attribute__((always_inline)) \
RPC_CONCAT (RPC_STUB_PREFIX_(id), _send_unmarshal) \
- (l4_msg_t *msg RPC_IF_COMMA(icount) () \
- RPC_GRAB2 (*, icount, ##__VA_ARGS__)) \
+ (struct vg_message *msg, \
+ RPC_GRAB2 (*, icount, ##__VA_ARGS__) RPC_IF_COMMA(icount) () \
+ cap_t *reply_messenger) \
{ \
- l4_msg_tag_t tag = l4_msg_msg_tag (*msg); \
- \
- l4_word_t label; \
- label = l4_label (tag); \
+ uintptr_t label = 0; \
+ if (likely (vg_message_data_count (msg) >= sizeof (uintptr_t))) \
+ label = vg_message_word (msg, 0); \
if (label != RPC_ID_PREFIX_(id)) \
{ \
debug (1, #id " has bad method id, %d, excepted %d", \
@@ -381,130 +410,175 @@
return EINVAL; \
} \
\
- error_t err = 0; \
- int __rsu_idx __attribute__ ((unused)); \
- __rsu_idx = 0; \
+ int __rsu_data_idx __attribute__ ((unused)) = 1; \
+ int __rsu_cap_idx __attribute__ ((unused)) = 0; \
+ error_t __rsu_err = 0; \
RPCSTORE (icount, ##__VA_ARGS__); \
- if (err == 0 && __rsu_idx != l4_untyped_words (tag)) \
+ if (unlikely (__rsu_err \
+ || (__rsu_data_idx * sizeof (uintptr_t) \
+ != vg_message_data_count (msg) \
+ && __rsu_cap_idx + 1 != vg_message_cap_count (msg)))) \
{ \
- debug (1, #id " has wrong number of arguments: %d, expected %d words", \
- l4_untyped_words (tag), __rsu_idx); \
+ debug (1, #id " has wrong number of arguments: " \
+ "got %d bytes and %d caps; expected %d/%d", \
+ __rsu_data_idx * sizeof (uintptr_t), __rsu_cap_idx + 1, \
+ vg_message_data_count (msg), \
+ vg_message_cap_count (msg)); \
return EINVAL; \
} \
+ \
+ if (reply_messenger) \
+ *reply_messenger = vg_message_cap (msg, __rsu_cap_idx); \
return 0; \
}
-/* Marshal the reply. */
-#define RPC_REPLY_MARSHAL(id, ocount, ...) \
+/* Prepare a receive buffer. */
+#ifdef RM_INTERN
+#define RPC_RECEIVE_MARSHAL(id, ret_cap_count, ...)
+#else
+#define RPC_RECEIVE_MARSHAL(id, ret_cap_count, ...) \
+ static inline void \
+ __attribute__((always_inline)) \
+ RPC_CONCAT (RPC_STUB_PREFIX_(id), _receive_marshal) \
+ (struct vg_message *msg RPC_IF_COMMA(ret_cap_count) () \
+ RPC_GRAB2 (, ret_cap_count, ##__VA_ARGS__)) \
+ { \
+ vg_message_clear (msg); \
+ /* Load the arguments. */ \
+ RPCLOAD (ret_cap_count, ##__VA_ARGS__); \
+ assert (vg_message_data_count (msg) == 0); \
+ assert (vg_message_cap_count (msg) == ret_cap_count); \
+ }
+#endif
+
+/* Marshal a reply. */
+#define RPC_REPLY_MARSHAL(id, out_count, ret_cap_count, ...) \
static inline void \
+ __attribute__((always_inline)) \
RPC_CONCAT (RPC_STUB_PREFIX_(id), _reply_marshal) \
- (l4_msg_t *msg RPC_IF_COMMA (ocount) () \
- RPC_GRAB2 (, ocount, ##__VA_ARGS__)) \
+ (struct vg_message *msg \
+ RPC_IF_COMMA (out_count) () \
+ RPC_GRAB2 (, out_count, ##__VA_ARGS__) \
+ RPC_IF_COMMA (ret_cap_count) () \
+ RPC_GRAB2 (, ret_cap_count, \
+ RPC_TYPE_SHIFT (ret_cap_count, struct cap *, \
+ RPC_CHOP2 (out_count, __VA_ARGS__)))) \
{ \
- l4_msg_tag_t tag; \
- \
- tag = l4_niltag; \
- l4_msg_tag_set_label (&tag, RPC_ID_PREFIX_(id)); \
+ vg_message_clear (msg); \
\
- l4_msg_clear (*msg); \
- l4_msg_set_msg_tag (*msg, tag); \
+ /* The error code. */ \
+ vg_message_append_word (msg, 0); \
+ RPCLOAD (CPP_ADD (out_count, ret_cap_count), ##__VA_ARGS__); \
\
- /* No error. */ \
- l4_msg_append_word (*msg, 0); \
- \
- RPCLOAD (ocount, ##__VA_ARGS__); \
+ assert (vg_message_cap_count (msg) == ret_cap_count); \
}
-/* Unmarshal the reply. */
-#define RPC_REPLY_UNMARSHAL(id, ocount, ...) \
+/* Unmarshal a reply. */
+#define RPC_REPLY_UNMARSHAL(id, out_count, ret_cap_count, ...) \
static inline error_t \
+ __attribute__((always_inline)) \
RPC_CONCAT (RPC_STUB_PREFIX_(id), _reply_unmarshal) \
- (l4_msg_t *msg RPC_IF_COMMA (ocount) () \
- RPC_GRAB2(*, ocount, ##__VA_ARGS__)) \
+ (struct vg_message *msg \
+ RPC_IF_COMMA (CPP_ADD (out_count, ret_cap_count)) () \
+ RPC_GRAB2(*, CPP_ADD (out_count, ret_cap_count), ##__VA_ARGS__)) \
{ \
- l4_msg_tag_t tag = l4_msg_msg_tag (*msg); \
- l4_word_t err = l4_msg_word (*msg, 0); \
+ /* The server error code. */ \
+ error_t __rsu_err = EINVAL; \
+ if (likely (vg_message_data_count (msg) >= sizeof (uintptr_t))) \
+ __rsu_err = vg_message_word (msg, 0); \
+ if (unlikely (__rsu_err)) \
+ return __rsu_err; \
\
- int __rsu_idx __attribute__ ((unused)); \
- __rsu_idx = 1; \
- RPCSTORE (ocount, ##__VA_ARGS__); \
- if (err == 0) \
+ int __rsu_data_idx __attribute__ ((unused)) = 1; \
+ int __rsu_cap_idx __attribute__ ((unused)) = 0; \
+ RPCSTORE (CPP_ADD (out_count, ret_cap_count), ##__VA_ARGS__); \
+ if (unlikely (__rsu_err \
+ || (__rsu_data_idx * sizeof (uintptr_t) \
+ != vg_message_data_count (msg) \
+ || __rsu_cap_idx != vg_message_cap_count (msg)))) \
{ \
- if (__rsu_idx != l4_untyped_words (tag)) \
- { \
- debug (1, "Got %d words, expected %d words", \
- __rsu_idx, l4_untyped_words (tag)); \
- return EINVAL; \
- } \
+ debug (1, #id " has wrong number of arguments: " \
+ "got %d bytes and %d caps; expected %d/%d", \
+ __rsu_data_idx * sizeof (uintptr_t), __rsu_cap_idx, \
+ vg_message_data_count (msg), \
+ vg_message_cap_count (msg)); \
+ return EINVAL; \
} \
- return err; \
+ return 0; \
}
/* RPC_ARGUMENTS takes a list of types and arguments and returns the first
COUNT arguments. (NB: the list may contain more than COUNT
- arguments!). */
+ arguments!).
+
+ RPC_ARGUMENTS(2, &, int, i, int, j, double, d)
+
+ =>
+
+ &i, &j
+*/
#define RPC_ARGUMENTS0(...)
-#define RPC_ARGUMENTS1(__ra_type, __ra_arg, ...) __ra_arg RPC_ARGUMENTS0(__VA_ARGS__)
-#define RPC_ARGUMENTS2(__ra_type, __ra_arg, ...) __ra_arg, RPC_ARGUMENTS1(__VA_ARGS__)
-#define RPC_ARGUMENTS3(__ra_type, __ra_arg, ...) __ra_arg, RPC_ARGUMENTS2(__VA_ARGS__)
-#define RPC_ARGUMENTS4(__ra_type, __ra_arg, ...) __ra_arg, RPC_ARGUMENTS3(__VA_ARGS__)
-#define RPC_ARGUMENTS5(__ra_type, __ra_arg, ...) __ra_arg, RPC_ARGUMENTS4(__VA_ARGS__)
-#define RPC_ARGUMENTS6(__ra_type, __ra_arg, ...) __ra_arg, RPC_ARGUMENTS5(__VA_ARGS__)
-#define RPC_ARGUMENTS7(__ra_type, __ra_arg, ...) __ra_arg, RPC_ARGUMENTS6(__VA_ARGS__)
-#define RPC_ARGUMENTS8(__ra_type, __ra_arg, ...) __ra_arg, RPC_ARGUMENTS7(__VA_ARGS__)
-#define RPC_ARGUMENTS9(__ra_type, __ra_arg, ...) __ra_arg, RPC_ARGUMENTS8(__VA_ARGS__)
-#define RPC_ARGUMENTS10(__ra_type, __ra_arg, ...) __ra_arg, RPC_ARGUMENTS9(__VA_ARGS__)
-#define RPC_ARGUMENTS11(__ra_type, __ra_arg, ...) __ra_arg, RPC_ARGUMENTS10(__VA_ARGS__)
-#define RPC_ARGUMENTS12(__ra_type, __ra_arg, ...) __ra_arg, RPC_ARGUMENTS11(__VA_ARGS__)
-#define RPC_ARGUMENTS13(__ra_type, __ra_arg, ...) __ra_arg, RPC_ARGUMENTS12(__VA_ARGS__)
-#define RPC_ARGUMENTS14(__ra_type, __ra_arg, ...) __ra_arg, RPC_ARGUMENTS13(__VA_ARGS__)
-#define RPC_ARGUMENTS15(__ra_type, __ra_arg, ...) __ra_arg, RPC_ARGUMENTS14(__VA_ARGS__)
-#define RPC_ARGUMENTS16(__ra_type, __ra_arg, ...) __ra_arg, RPC_ARGUMENTS15(__VA_ARGS__)
-#define RPC_ARGUMENTS17(__ra_type, __ra_arg, ...) __ra_arg, RPC_ARGUMENTS16(__VA_ARGS__)
-#define RPC_ARGUMENTS18(__ra_type, __ra_arg, ...) __ra_arg, RPC_ARGUMENTS17(__VA_ARGS__)
-#define RPC_ARGUMENTS19(__ra_type, __ra_arg, ...) __ra_arg, RPC_ARGUMENTS18(__VA_ARGS__)
-#define RPC_ARGUMENTS20(__ra_type, __ra_arg, ...) __ra_arg, RPC_ARGUMENTS19(__VA_ARGS__)
-#define RPC_ARGUMENTS_(__ra_count, ...) RPC_ARGUMENTS##__ra_count(__VA_ARGS__)
-#define RPC_ARGUMENTS(__ra_count, ...) RPC_ARGUMENTS_(__ra_count, __VA_ARGS__)
+#define RPC_ARGUMENTS1(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg RPC_ARGUMENTS0(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS2(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS1(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS3(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS2(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS4(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS3(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS5(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS4(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS6(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS5(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS7(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS6(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS8(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS7(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS9(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS8(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS10(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS9(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS11(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS10(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS12(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS11(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS13(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS12(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS14(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS13(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS15(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS14(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS16(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS15(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS17(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS16(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS18(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS17(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS19(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS18(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS20(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS19(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS_(__ra_count, __ra_prefix, ...) RPC_ARGUMENTS##__ra_count(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS(__ra_count, __ra_prefix, ...) RPC_ARGUMENTS_(__ra_count, __ra_prefix, __VA_ARGS__)
/* Given a list of arguments, returns the arguments minus the first
COUNT **pairs** of arguments. For example:
- RPC_CHOP(1, int, i, int, j, double, d)
+ RPC_CHOP2(1, int, i, int, j, double, d)
=>
int, j, double, d
*/
-#define RPC_CHOP0(...) __VA_ARGS__
-#define RPC_CHOP1(__rc_a, __rc_b, ...) RPC_CHOP0(__VA_ARGS__)
-#define RPC_CHOP2(__rc_a, __rc_b, ...) RPC_CHOP1(__VA_ARGS__)
-#define RPC_CHOP3(__rc_a, __rc_b, ...) RPC_CHOP2(__VA_ARGS__)
-#define RPC_CHOP4(__rc_a, __rc_b, ...) RPC_CHOP3(__VA_ARGS__)
-#define RPC_CHOP5(__rc_a, __rc_b, ...) RPC_CHOP4(__VA_ARGS__)
-#define RPC_CHOP6(__rc_a, __rc_b, ...) RPC_CHOP5(__VA_ARGS__)
-#define RPC_CHOP7(__rc_a, __rc_b, ...) RPC_CHOP6(__VA_ARGS__)
-#define RPC_CHOP8(__rc_a, __rc_b, ...) RPC_CHOP7(__VA_ARGS__)
-#define RPC_CHOP9(__rc_a, __rc_b, ...) RPC_CHOP8(__VA_ARGS__)
-#define RPC_CHOP10(__rc_a, __rc_b, ...) RPC_CHOP9(__VA_ARGS__)
-#define RPC_CHOP11(__rc_a, __rc_b, ...) RPC_CHOP10(__VA_ARGS__)
-#define RPC_CHOP12(__rc_a, __rc_b, ...) RPC_CHOP11(__VA_ARGS__)
-#define RPC_CHOP13(__rc_a, __rc_b, ...) RPC_CHOP12(__VA_ARGS__)
-#define RPC_CHOP14(__rc_a, __rc_b, ...) RPC_CHOP13(__VA_ARGS__)
-#define RPC_CHOP15(__rc_a, __rc_b, ...) RPC_CHOP14(__VA_ARGS__)
-#define RPC_CHOP16(__rc_a, __rc_b, ...) RPC_CHOP15(__VA_ARGS__)
-#define RPC_CHOP17(__rc_a, __rc_b, ...) RPC_CHOP16(__VA_ARGS__)
-#define RPC_CHOP18(__rc_a, __rc_b, ...) RPC_CHOP17(__VA_ARGS__)
-#define RPC_CHOP19(__rc_a, __rc_b, ...) RPC_CHOP18(__VA_ARGS__)
-#define RPC_CHOP20(__rc_a, __rc_b, ...) RPC_CHOP19(__VA_ARGS__)
-#define RPC_CHOP21(__rc_a, __rc_b, ...) RPC_CHOP20(__VA_ARGS__)
-#define RPC_CHOP22(__rc_a, __rc_b, ...) RPC_CHOP21(__VA_ARGS__)
-#define RPC_CHOP23(__rc_a, __rc_b, ...) RPC_CHOP22(__VA_ARGS__)
-#define RPC_CHOP24(__rc_a, __rc_b, ...) RPC_CHOP23(__VA_ARGS__)
-#define RPC_CHOP25(__rc_a, __rc_b, ...) RPC_CHOP24(__VA_ARGS__)
-#define RPC_CHOP_(__rc_count, ...) RPC_CHOP##__rc_count (__VA_ARGS__)
-#define RPC_CHOP(__rc_count, ...) RPC_CHOP_(__rc_count, __VA_ARGS__)
+#define RPC_CHOP2_0(...) __VA_ARGS__
+#define RPC_CHOP2_1(__rc_a, __rc_b, ...) RPC_CHOP2_0(__VA_ARGS__)
+#define RPC_CHOP2_2(__rc_a, __rc_b, ...) RPC_CHOP2_1(__VA_ARGS__)
+#define RPC_CHOP2_3(__rc_a, __rc_b, ...) RPC_CHOP2_2(__VA_ARGS__)
+#define RPC_CHOP2_4(__rc_a, __rc_b, ...) RPC_CHOP2_3(__VA_ARGS__)
+#define RPC_CHOP2_5(__rc_a, __rc_b, ...) RPC_CHOP2_4(__VA_ARGS__)
+#define RPC_CHOP2_6(__rc_a, __rc_b, ...) RPC_CHOP2_5(__VA_ARGS__)
+#define RPC_CHOP2_7(__rc_a, __rc_b, ...) RPC_CHOP2_6(__VA_ARGS__)
+#define RPC_CHOP2_8(__rc_a, __rc_b, ...) RPC_CHOP2_7(__VA_ARGS__)
+#define RPC_CHOP2_9(__rc_a, __rc_b, ...) RPC_CHOP2_8(__VA_ARGS__)
+#define RPC_CHOP2_10(__rc_a, __rc_b, ...) RPC_CHOP2_9(__VA_ARGS__)
+#define RPC_CHOP2_11(__rc_a, __rc_b, ...) RPC_CHOP2_10(__VA_ARGS__)
+#define RPC_CHOP2_12(__rc_a, __rc_b, ...) RPC_CHOP2_11(__VA_ARGS__)
+#define RPC_CHOP2_13(__rc_a, __rc_b, ...) RPC_CHOP2_12(__VA_ARGS__)
+#define RPC_CHOP2_14(__rc_a, __rc_b, ...) RPC_CHOP2_13(__VA_ARGS__)
+#define RPC_CHOP2_15(__rc_a, __rc_b, ...) RPC_CHOP2_14(__VA_ARGS__)
+#define RPC_CHOP2_16(__rc_a, __rc_b, ...) RPC_CHOP2_15(__VA_ARGS__)
+#define RPC_CHOP2_17(__rc_a, __rc_b, ...) RPC_CHOP2_16(__VA_ARGS__)
+#define RPC_CHOP2_18(__rc_a, __rc_b, ...) RPC_CHOP2_17(__VA_ARGS__)
+#define RPC_CHOP2_19(__rc_a, __rc_b, ...) RPC_CHOP2_18(__VA_ARGS__)
+#define RPC_CHOP2_20(__rc_a, __rc_b, ...) RPC_CHOP2_19(__VA_ARGS__)
+#define RPC_CHOP2_21(__rc_a, __rc_b, ...) RPC_CHOP2_20(__VA_ARGS__)
+#define RPC_CHOP2_22(__rc_a, __rc_b, ...) RPC_CHOP2_21(__VA_ARGS__)
+#define RPC_CHOP2_23(__rc_a, __rc_b, ...) RPC_CHOP2_22(__VA_ARGS__)
+#define RPC_CHOP2_24(__rc_a, __rc_b, ...) RPC_CHOP2_23(__VA_ARGS__)
+#define RPC_CHOP2_25(__rc_a, __rc_b, ...) RPC_CHOP2_24(__VA_ARGS__)
+#define RPC_CHOP2_(__rc_count, ...) RPC_CHOP2_##__rc_count (__VA_ARGS__)
+#define RPC_CHOP2(__rc_count, ...) RPC_CHOP2_(__rc_count, __VA_ARGS__)
/* Given a list of arguments, returns the first COUNT **pairs** of
arguments, the elements of each pair separated by SEP and each pair
@@ -576,182 +650,405 @@
#define RPC_GRAB_(__rg_count, ...) RPC_GRAB_##__rg_count (__VA_ARGS__)
#define RPC_GRAB(__rg_count, ...) RPC_GRAB_(__rg_count, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_0(...)
+#define RPC_TYPE_SHIFT_1(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg RPC_TYPE_SHIFT_0(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_2(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_1(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_3(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_2(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_4(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_3(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_5(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_4(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_6(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_5(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_7(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_6(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_8(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_7(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_9(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_8(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_10(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_9(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_11(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_10(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_12(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_11(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_13(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_12(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_14(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_13(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_15(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_14(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_16(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_15(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_17(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_16(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_18(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_17(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_19(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_18(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_20(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_19(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_(__ra_count, __ra_new_type, ...) RPC_TYPE_SHIFT_##__ra_count(__ra_new_type, __VA_ARGS__)
+#ifdef RM_INTERN
+# define RPC_TYPE_SHIFT(__ra_count, __ra_new_type, ...) RPC_TYPE_SHIFT_(__ra_count, __ra_new_type, __VA_ARGS__)
+#else
+# define RPC_TYPE_SHIFT(__ra_count, __ra_new_type, ...) __VA_ARGS__
+#endif
+
/* Ensure that there are X pairs of arguments. */
#define RPC_INVALID_NUMBER_OF_ARGUMENTS_
#define RPC_EMPTY_LIST_(x) RPC_INVALID_NUMBER_OF_ARGUMENTS_##x
#define RPC_EMPTY_LIST(x) RPC_EMPTY_LIST_(x)
#define RPC_ENSURE_ARGS(count, ...) \
- RPC_EMPTY_LIST (RPC_CHOP (count, __VA_ARGS__))
-
-#define RPC_MARSHAL_GEN_(id, icount, ocount, ...) \
- RPC_ENSURE_ARGS(CPP_ADD (icount, ocount), ##__VA_ARGS__) \
+ RPC_EMPTY_LIST (RPC_CHOP2 (count, __VA_ARGS__))
+
+#define RPC_SEND_MARSHAL_GEN_(id, in_count, out_count, ret_cap_count, ...) \
+ RPC_SEND_MARSHAL(id, in_count, ##__VA_ARGS__) \
+ RPC_SEND_UNMARSHAL(id, in_count, ##__VA_ARGS__)
+
+#define RPC_RECEIVE_MARSHAL_GEN_(id, in_count, out_count, ret_cap_count, ...) \
+ RPC_RECEIVE_MARSHAL(id, ret_cap_count, \
+ RPC_CHOP2 (CPP_ADD (in_count, out_count), \
+ ##__VA_ARGS__))
+
+#define RPC_REPLY_MARSHAL_GEN_(id, in_count, out_count, ret_cap_count, ...) \
+ RPC_REPLY_MARSHAL(id, out_count, ret_cap_count, \
+ RPC_CHOP2 (in_count, ##__VA_ARGS__)) \
+ RPC_REPLY_UNMARSHAL(id, out_count, ret_cap_count, \
+ RPC_CHOP2 (in_count, ##__VA_ARGS__))
+
+#define RPC_MARSHAL_GEN_(id, in_count, out_count, ret_cap_count, ...) \
+ RPC_ENSURE_ARGS(CPP_ADD (CPP_ADD (in_count, out_count), \
+ ret_cap_count), \
+ ##__VA_ARGS__) \
+ RPC_SEND_MARSHAL_GEN_(id, in_count, out_count, ret_cap_count, \
+ ##__VA_ARGS__) \
+ RPC_RECEIVE_MARSHAL_GEN_(id, in_count, out_count, ret_cap_count, \
+ ##__VA_ARGS__) \
+ RPC_REPLY_MARSHAL_GEN_(id, in_count, out_count, ret_cap_count, \
+ ##__VA_ARGS__)
+
+/* Send a message. __RPC_REPY_MESSENGER designates the messenger that
+ should receive the reply. (Its buffer should have already been
+ prepared using, e.g., the corresponding receive_marshal
+ function.) */
+#ifndef RM_INTERN
+#define RPC_SEND_(postfix, id, in_count, out_count, ret_cap_count, ...) \
+ static inline error_t \
+ __attribute__((always_inline)) \
+ RPC_CONCAT(RPC_STUB_PREFIX_(id), postfix) \
+ (cap_t __rpc_activity, cap_t __rpc_object \
+ RPC_IF_COMMA (in_count) () \
+ RPC_GRAB2 (, in_count, __VA_ARGS__), \
+ cap_t __rpc_reply_messenger) \
+ { \
+ struct hurd_message_buffer *mb = hurd_message_buffer_alloc (); \
+ mb->just_free = true; \
\
- RPC_SEND_MARSHAL(id, icount, ##__VA_ARGS__) \
- RPC_SEND_UNMARSHAL(id, icount, ##__VA_ARGS__) \
- RPC_REPLY_MARSHAL(id, ocount, RPC_CHOP (icount, ##__VA_ARGS__)) \
- RPC_REPLY_UNMARSHAL(id, ocount, RPC_CHOP (icount, ##__VA_ARGS__))
+ RPC_CONCAT (RPC_STUB_PREFIX_(id), _send_marshal) \
+ (mb->request \
+ RPC_IF_COMMA (in_count) () RPC_ARGUMENTS(in_count,, __VA_ARGS__), \
+ __rpc_reply_messenger); \
+ \
+ error_t err = vg_send (VG_IPC_SEND_SET_THREAD_TO_CALLER \
+ | VG_IPC_SEND_SET_ASROOT_TO_CALLERS, \
+ __rpc_activity, __rpc_object, \
+ mb->sender, ADDR_VOID); \
+ \
+ return err; \
+ }
+#else
+#define RPC_SEND_(postfix, id, in_count, out_count, ret_cap_count, ...)
+#endif
-#define RPC_SIMPLE_(postfix, id, icount, ocount, ...) \
- /* Send, but do not wait for a reply. */ \
+/* Send a message. Abort if the target is not ready. */
+#ifndef RM_INTERN
+#define RPC_SEND_NONBLOCKING_(postfix, id, in_count, out_count, ret_cap_count, ...) \
static inline error_t \
__attribute__((always_inline)) \
RPC_CONCAT(RPC_STUB_PREFIX_(id), postfix) \
- (RPC_TARGET_ARG_ RPC_GRAB2 (, icount, __VA_ARGS__)) \
+ (cap_t __rpc_activity, cap_t __rpc_object \
+ RPC_IF_COMMA (in_count) () \
+ RPC_GRAB2 (, in_count, __VA_ARGS__), \
+ cap_t __rpc_reply_messenger) \
{ \
- l4_msg_tag_t tag; \
- l4_msg_t msg; \
+ struct hurd_message_buffer *mb = hurd_message_buffer_alloc (); \
\
RPC_CONCAT (RPC_STUB_PREFIX_(id), _send_marshal) \
- CPP_IFTHEN (icount, \
- (&msg, RPC_ARGUMENTS(icount, __VA_ARGS__)), \
- (&msg)); \
+ (mb->request \
+ RPC_IF_COMMA (in_count) () RPC_ARGUMENTS(in_count,, __VA_ARGS__), \
+ __rpc_reply_messenger); \
\
- l4_msg_load (msg); \
- l4_accept (L4_UNTYPED_WORDS_ACCEPTOR); \
+ error_t err = vg_reply (VG_IPC_SEND_SET_THREAD_TO_CALLER \
+ | VG_IPC_SEND_SET_ASROOT_TO_CALLERS, \
+ __rpc_activity, __rpc_object, \
+ mb->sender, ADDR_VOID); \
\
- for (;;) \
- { \
- tag = l4_send (RPC_TARGET_); \
- if (unlikely (l4_ipc_failed (tag))) \
- { \
- if (((l4_error_code () >> 1) & 0x7) == 3) \
- /* IPC was interrupted. */ \
- /* XXX: We need to somehow consider the signal state and \
- return EINTR if appropriate. */ \
- continue; \
- return EHOSTDOWN; \
- } \
- break; \
- } \
+ hurd_message_buffer_free (mb); \
\
- return 0; \
+ return err; \
}
+#else
+#define RPC_SEND_NONBLOCKING_(postfix, id, in_count, out_count, ret_cap_count, ...)
+#endif
-#define RPC_(postfix, id, icount, ocount, ...) \
+/* Send a message and wait for a reply. */
+#ifndef RM_INTERN
+#define RPC_(postfix, id, in_count, out_count, ret_cap_count, ...) \
static inline error_t \
__attribute__((always_inline)) \
- RPC_CONCAT (RPC_STUB_PREFIX_(id), postfix) \
- (RPC_TARGET_ARG_ \
- RPC_GRAB (CPP_ADD(icount, ocount), \
- RPC_GRAB2 (, icount, __VA_ARGS__) RPC_IF_COMMA(icount) () \
- RPC_GRAB2 (*, ocount, RPC_CHOP (icount, __VA_ARGS__)))) \
+ RPC_CONCAT (RPC_CONCAT (RPC_STUB_PREFIX_(id), _using), postfix) \
+ (struct hurd_message_buffer *mb, \
+ addr_t __rpc_activity, \
+ addr_t __rpc_object \
+ /* In arguments. */ \
+ RPC_IF_COMMA (in_count) () \
+ RPC_GRAB2 (, in_count, __VA_ARGS__) \
+ /* Out arguments (data and caps). */ \
+ RPC_IF_COMMA (CPP_ADD (out_count, ret_cap_count)) () \
+ RPC_GRAB2 (*, CPP_ADD (out_count, ret_cap_count), \
+ RPC_CHOP2 (in_count, __VA_ARGS__))) \
{ \
- l4_msg_tag_t tag; \
- l4_msg_t msg; \
+ /* Prepare the reply buffer. */ \
+ RPC_CONCAT (RPC_STUB_PREFIX_(id), _receive_marshal) \
+ (mb->reply \
+ RPC_IF_COMMA (ret_cap_count) () \
+ CPP_FOREACH(ret_cap_count, CPP_SAFE_DEREF, ADDR_VOID, \
+ RPC_ARGUMENTS (ret_cap_count, , \
+ RPC_CHOP2 (CPP_ADD (in_count, out_count), \
+ __VA_ARGS__)))); \
\
+ /* Then the send buffer. */ \
RPC_CONCAT (RPC_STUB_PREFIX_(id), _send_marshal) \
- CPP_IFTHEN (icount, \
- (&msg, RPC_ARGUMENTS(icount, __VA_ARGS__)), \
- (&msg)); \
+ (mb->request \
+ RPC_IF_COMMA (in_count) () \
+ RPC_ARGUMENTS (in_count,, __VA_ARGS__), \
+ mb->receiver); \
\
- l4_msg_load (msg); \
- l4_accept (l4_map_grant_items (L4_COMPLETE_ADDRESS_SPACE)); \
+ hurd_activation_message_register (mb); \
\
- bool call = true; \
- for (;;) \
- { \
- if (call) \
- tag = l4_call (RPC_TARGET_); \
- else \
- tag = l4_receive (RPC_TARGET_); \
- if (unlikely (l4_ipc_failed (tag))) \
- { \
- if (((l4_error_code () >> 1) & 0x7) == 3) \
- /* IPC was interrupted. */ \
- /* XXX: We need to somehow consider the signal state and \
- return EINTR if appropriate. */ \
- { \
- if ((l4_error_code () & 1)) \
- /* Error occurred during receive phase. */ \
- call = false; \
+ /* We will be resumed via an activation. */ \
+ error_t err = vg_ipc (VG_IPC_RECEIVE | VG_IPC_SEND \
+ | VG_IPC_RECEIVE_ACTIVATE \
+ | VG_IPC_SEND_SET_THREAD_TO_CALLER \
+ | VG_IPC_SEND_SET_ASROOT_TO_CALLERS \
+ | VG_IPC_RECEIVE_SET_THREAD_TO_CALLER \
+ | VG_IPC_RECEIVE_SET_ASROOT_TO_CALLERS, \
+ __rpc_activity, \
+ mb->receiver_strong, ADDR_VOID, \
+ __rpc_activity, __rpc_object, \
+ mb->sender, ADDR_VOID); \
+ if (err) \
+ /* Error sending the IPC. */ \
+ hurd_activation_message_unregister (mb); \
+ else \
+ err = RPC_CONCAT (RPC_STUB_PREFIX_(id), _reply_unmarshal) \
+ (mb->reply \
+ RPC_IF_COMMA (CPP_ADD (out_count, ret_cap_count)) () \
+ RPC_ARGUMENTS (CPP_ADD (out_count, ret_cap_count),, \
+ RPC_CHOP2 (in_count, ##__VA_ARGS__))); \
\
- continue; \
- } \
- return EHOSTDOWN; \
- } \
- break; \
- } \
- \
- l4_msg_store (tag, msg); \
- return RPC_CONCAT (RPC_STUB_PREFIX_(id), _reply_unmarshal) \
- CPP_IFTHEN (ocount, \
- (&msg, RPC_ARGUMENTS (ocount, \
- RPC_CHOP (icount, ##__VA_ARGS__))), \
- (&msg)); \
+ return err; \
} \
+ \
+ static inline error_t \
+ __attribute__((always_inline)) \
+ RPC_CONCAT (RPC_STUB_PREFIX_(id), postfix) \
+ (addr_t __rpc_activity, \
+ addr_t __rpc_object \
+ /* In arguments. */ \
+ RPC_IF_COMMA (in_count) () \
+ RPC_GRAB2 (, in_count, __VA_ARGS__) \
+ /* Out arguments (data and caps). */ \
+ RPC_IF_COMMA (CPP_ADD (out_count, ret_cap_count)) () \
+ RPC_GRAB2 (*, CPP_ADD (out_count, ret_cap_count), \
+ RPC_CHOP2 (in_count, __VA_ARGS__))) \
+ { \
+ struct hurd_message_buffer *mb = hurd_message_buffer_alloc (); \
+ \
+ error_t err; \
+ err = RPC_CONCAT (RPC_CONCAT (RPC_STUB_PREFIX_(id), _using), postfix) \
+ (mb, __rpc_activity, __rpc_object \
+ RPC_IF_COMMA (CPP_ADD (CPP_ADD (in_count, out_count), \
+ ret_cap_count)) () \
+ RPC_ARGUMENTS (CPP_ADD (CPP_ADD (in_count, out_count), \
+ ret_cap_count),, __VA_ARGS__)); \
+ \
+ hurd_message_buffer_free (mb); \
+ \
+ return err; \
+ }
+#else
+# define RPC_(postfix, id, in_count, out_count, ret_cap_count, ...)
+#endif
-/* Generate stubs for marshalling a reply and sending it (without
- blocking). */
-#define RPC_REPLY_(id, icount, ocount, ...) \
+/* Send a reply to __RPC_TARGET. If __RPC_TARGET does not accept the
+ message immediately, abort sending. */
+#ifndef RM_INTERN
+#define RPC_REPLY_(id, in_count, out_count, ret_cap_count, ...) \
static inline error_t \
RPC_CONCAT (RPC_STUB_PREFIX_(id), _reply) \
- (l4_thread_id_t tid RPC_IF_COMMA (ocount) () \
- RPC_GRAB2 (, ocount, RPC_CHOP (icount, ##__VA_ARGS__))) \
+ (addr_t __rpc_activity, \
+ addr_t __rpc_target \
+ /* Out data. */ \
+ RPC_IF_COMMA (out_count) () \
+ RPC_GRAB2 (, out_count, RPC_CHOP2 (in_count, ##__VA_ARGS__)) \
+ /* Return capabilities. */ \
+ RPC_IF_COMMA (ret_cap_count) () \
+ RPC_GRAB2 (, ret_cap_count, \
+ RPC_CHOP2 (CPP_ADD (in_count, out_count), \
+ ##__VA_ARGS__))) \
{ \
- l4_msg_tag_t tag; \
- l4_msg_t msg; \
+ struct hurd_message_buffer *mb = hurd_message_buffer_alloc (); \
\
RPC_CONCAT (RPC_STUB_PREFIX_(id), _reply_marshal) \
- (&msg RPC_IF_COMMA (ocount) () \
- RPC_ARGUMENTS(ocount, RPC_CHOP (icount, __VA_ARGS__))); \
+ (mb->request \
+ /* Out data. */ \
+ RPC_IF_COMMA (out_count) () \
+ RPC_ARGUMENTS(out_count,, RPC_CHOP2 (in_count, __VA_ARGS__)) \
+ /* Out capabilities. */ \
+ RPC_IF_COMMA (ret_cap_count) () \
+ RPC_ARGUMENTS(ret_cap_count,, \
+ RPC_CHOP2 (CPP_ADD (in_count, out_count), \
+ __VA_ARGS__))); \
\
- l4_msg_load (msg); \
- l4_accept (L4_UNTYPED_WORDS_ACCEPTOR); \
+ error_t err = vg_reply (VG_IPC_SEND_SET_THREAD_TO_CALLER \
+ | VG_IPC_SEND_SET_ASROOT_TO_CALLERS, \
+ __rpc_activity, __rpc_target, \
+ mb->sender, ADDR_VOID); \
\
- tag = l4_reply (tid); \
- if (l4_ipc_failed (tag)) \
- return EHOSTDOWN; \
- return 0; \
+ hurd_message_buffer_free (mb); \
+ \
+ return err; \
}
+#else
+#define RPC_REPLY_(id, in_count, out_count, ret_cap_count, ...) \
+ static inline error_t \
+ __attribute__((always_inline)) \
+ RPC_CONCAT (RPC_STUB_PREFIX_(id), _reply) \
+ (struct activity *__rpc_activity, \
+ struct messenger *__rpc_target \
+ /* Out data. */ \
+ RPC_IF_COMMA (out_count) () \
+ RPC_GRAB2 (, out_count, RPC_CHOP2 (in_count, ##__VA_ARGS__)) \
+ /* Return capabilities. */ \
+ RPC_IF_COMMA (ret_cap_count) () \
+ RPC_GRAB2 (, ret_cap_count, \
+ RPC_TYPE_SHIFT (ret_cap_count, struct cap, \
+ RPC_CHOP2 (CPP_ADD (in_count, out_count), \
+ ##__VA_ARGS__)))) \
+ { \
+ RPC_CONCAT (RPC_STUB_PREFIX_(id), _reply_marshal) \
+ (reply_buffer \
+ /* Out data. */ \
+ RPC_IF_COMMA (out_count) () \
+ RPC_ARGUMENTS(out_count,, RPC_CHOP2 (in_count, ##__VA_ARGS__)) \
+ /* Out capabilities. */ \
+ RPC_IF_COMMA (ret_cap_count) () \
+ RPC_ARGUMENTS (ret_cap_count, &, \
+ RPC_CHOP2 (CPP_ADD (in_count, out_count), \
+ ##__VA_ARGS__))); \
+ \
+ bool ret = messenger_message_load (__rpc_activity, \
+ __rpc_target, reply_buffer); \
+ \
+ return ret ? 0 : EWOULDBLOCK; \
+ }
+#endif
+
+/* RPC template. ID is the method name. IN_COUNT is the number of
+ arguments. OUT_COUNT is the number of out arguments.
+ RET_CAP_COUNT is the number of capabilities that are returned. The
+ remaining arguments correspond to pairs of types and argument
+ names.
+
+ Consider:
+
+ RPC(method, 2, 1, 1,
+ // In (data and capability) parameters
+ int, foo, cap_t, bar,
+ // Out data parameters
+ int bam,
+ // Out capabilities
+ cap_t xyzzy)
+
+ This will generate marshalling and unmarshalling functions as well
+ as send, reply and call functions. For instance, the signature for
+ the correspond send marshal function is:
+
+ error_t method_send_marshal (struct vg_message *message,
+ int foo, cap_t bar, cap_t reply)
+
+ that of the send unmarshal function is:
+
+ error_t method_send_unmarshal (struct vg_message *message,
+ int *foo, cap_t *bar, cap_t *reply)
+
+ that of the receive marshal function is:
+
+ error_t method_receive_marshal (struct vg_message *message,
+ cap_t xyzzy)
+
+
+ that of the reply marshal function is:
+
+ error_t method_reply_marshal (struct vg_message *message,
+ int bam, cap_t xyzzy)
-/* RPC template. ID is the method name, ARGS is the list of arguments
- as normally passed to a function, LOADER is code to load the in
- parameters, and STORER is code to load the out parameters. The
- code assumes that the first MR contains the error code and returns
- this as the function return value. If the IPC fails, EHOSTDOWN is
- returned. */
-
-#define RPC_SIMPLE(id, icount, ocount, ...) \
- RPC_MARSHAL_GEN_(id, icount, ocount, ##__VA_ARGS__) \
- \
- RPC_SIMPLE_(, id, icount, ocount, ##__VA_ARGS__) \
- RPC_SIMPLE_(_send, id, icount, ocount, ##__VA_ARGS__) \
- RPC_(_call, id, icount, ocount, ##__VA_ARGS__) \
- \
- RPC_REPLY_(id, icount, ocount, ##__VA_ARGS__)
-
-#define RPC(id, icount, ocount, ...) \
- RPC_MARSHAL_GEN_(id, icount, ocount, ##__VA_ARGS__) \
- \
- RPC_(, id, icount, ocount, ##__VA_ARGS__) \
- RPC_SIMPLE_(_send, id, icount, ocount, ##__VA_ARGS__) \
- RPC_(_call, id, icount, ocount, ##__VA_ARGS__) \
- \
- RPC_REPLY_(id, icount, ocount, ##__VA_ARGS__)
+ that of the reply unmarshal function is:
+
+ error_t method_reply_unmarshal (struct vg_message *message,
+ int *bam, cap_t *xyzzy)
+
+ Functions to send requests and replies as well as to produce calls
+ are also generated.
+
+ error_t method_call (cap_t activity, cap_t object,
+ int foo, cap_t bar, int *bam, cap_t *xyzzy)
+
+ Note that *XYZZY must be initialize with the location of a
+ capability slot to store the returned capability. *XYZZY is set to
+ ADDR_VOID if the sender did not provide a capability.
+
+ To send a message and not wait for a reply, a function with the
+ following prototype is generated:
+
+ error_t method_send (cap_t activity, cap_t object,
+ int foo, cap_t bar,
+ cap_t reply_messenger)
+
+ To reply to a request, a function with the following prototype is
+ generated:
+
+ error_t method_reply (cap_t activity, cap_t reply_messenger,
+ int bam, cap_t xyzzy)
+*/
+
+#define RPC(id, in_count, out_count, ret_cap_count, ...) \
+ RPC_MARSHAL_GEN_(id, in_count, out_count, ret_cap_count, ##__VA_ARGS__) \
+ \
+ RPC_(, id, in_count, out_count, ret_cap_count, ##__VA_ARGS__) \
+ RPC_SEND_(_send, id, in_count, out_count, ret_cap_count, \
+ ##__VA_ARGS__) \
+ RPC_SEND_NONBLOCKING_(_send_nonblocking, \
+ id, in_count, out_count, ret_cap_count, \
+ ##__VA_ARGS__) \
+ RPC_REPLY_(id, in_count, out_count, ret_cap_count, ##__VA_ARGS__)
/* Marshal a reply consisting of the error code ERR in *MSG. */
static inline void
-rpc_error_reply_marshal (l4_msg_t *msg, error_t err)
+__attribute__((always_inline))
+rpc_error_reply_marshal (struct vg_message *msg, error_t err)
{
- l4_msg_clear (*msg);
- l4_msg_put_word (*msg, 0, err);
- l4_msg_set_untyped_words (*msg, 1);
+ vg_message_clear (msg);
+ vg_message_append_word (msg, err);
}
-/* Reply to the thread THREAD with error code ERROR. */
+/* Reply to the target TARGET with error code ERROR. */
+#ifdef RM_INTERN
static inline error_t
-rpc_error_reply (l4_thread_id_t thread, error_t err)
+__attribute__((always_inline))
+rpc_error_reply (struct activity *activity, struct messenger *target,
+ error_t err)
{
- l4_msg_t msg;
-
- rpc_error_reply_marshal (&msg, err);
- l4_msg_load (msg);
-
- l4_msg_tag_t tag;
- tag = l4_reply (thread);
- if (l4_ipc_failed (tag))
- return EHOSTDOWN;
- return 0;
+ rpc_error_reply_marshal (reply_buffer, err);
+ bool ret = messenger_message_load (activity, target, reply_buffer);
+ return ret ? 0 : EWOULDBLOCK;
}
+#else
+static inline error_t
+__attribute__((always_inline))
+rpc_error_reply (cap_t activity, cap_t target, error_t err)
+{
+ return vg_ipc_short (VG_IPC_SEND_NONBLOCKING | VG_IPC_SEND_INLINE
+ | VG_IPC_SEND_INLINE_WORD1,
+ ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ ADDR_VOID, target,
+ ADDR_VOID, err, 0, ADDR_VOID);
+}
+#endif
#endif
diff --git a/hurd/startup.h b/hurd/startup.h
index 7883dfb..0c31d9f 100644
--- a/hurd/startup.h
+++ b/hurd/startup.h
@@ -23,10 +23,12 @@
#include <stdint.h>
#include <stddef.h>
-#include <l4/types.h>
-
#include <hurd/types.h>
#include <hurd/addr.h>
+
+#ifdef USE_L4
+# include <l4/types.h>
+#endif
/* The version of the startup data defined by this header file. */
#define HURD_STARTUP_VERSION_MAJOR UINT16_C (0)
@@ -59,11 +61,16 @@ struct hurd_startup_data
unsigned short version_minor;
/* Startup flags. */
- l4_word_t flags;
+ uintptr_t flags;
+#ifdef USE_L4
/* The UTCB area of this task. */
l4_fpage_t utcb_area;
+ /* Thread id of Viengoos. */
+ l4_thread_id_t rm;
+#endif
+
/* The argument vector. */
char *argz;
/* Absolute address in the data space. */
@@ -74,9 +81,6 @@ struct hurd_startup_data
/* Absolute address in the data space. */
size_t envz_len;
- /* Thread id of the resource manager. */
- l4_thread_id_t rm;
-
/* Slot in which a capability designating the task's primary
activity is stored. */
addr_t activity;
@@ -85,6 +89,11 @@ struct hurd_startup_data
stored. */
addr_t thread;
+ /* To allow a task to boot strap itself, it needs a couple of
+ messengers (one to send and another to receive). Here they
+ are. */
+ addr_t messengers[2];
+
struct hurd_object_desc *descs;
int desc_count;
diff --git a/hurd/t-rpc.c b/hurd/t-rpc.c
index 9483d39..b286296 100644
--- a/hurd/t-rpc.c
+++ b/hurd/t-rpc.c
@@ -7,20 +7,19 @@ int output_debug = 1;
#define RPC_STUB_PREFIX rpc
#define RPC_ID_PREFIX RPC
-#undef RPC_TARGET_NEED_ARG
-#define RPC_TARGET 1
#include <hurd/rpc.h>
/* Exception message ids. */
enum
{
- RPC_noargs = 500,
+ RPC_noargs = 0x1ABE100,
RPC_onein,
RPC_oneout,
RPC_onlyin,
RPC_onlyout,
RPC_mix,
+ RPC_caps,
};
struct foo
@@ -29,58 +28,81 @@ struct foo
char b;
};
-RPC(noargs, 0, 0)
-RPC(onein, 1, 0, uint32_t, arg)
-RPC(oneout, 0, 1, uint32_t, arg)
-RPC(onlyin, 4, 0, uint32_t, arg, uint32_t, idx, struct foo, foo, bool, p)
-RPC(onlyout, 0, 4, uint32_t, arg, uint32_t, idx, struct foo, foo, bool, p)
-RPC(mix, 2, 3, uint32_t, arg, uint32_t, idx,
+RPC(noargs, 0, 0, 0)
+RPC(onein, 1, 0, 0, uint32_t, arg)
+RPC(oneout, 0, 1, 0, uint32_t, arg)
+RPC(onlyin, 4, 0, 0, uint32_t, arg, uint32_t, idx, struct foo, foo, bool, p)
+RPC(onlyout, 0, 4, 0, uint32_t, arg, uint32_t, idx, struct foo, foo, bool, p)
+RPC(mix, 2, 3, 0, uint32_t, arg, uint32_t, idx,
struct foo, foo, bool, p, int, i)
+RPC(caps, 3, 2, 2,
+ /* In: */
+ int, i, cap_t, c, struct foo, foo,
+ /* Out: */
+ int, a, int, b, cap_t, x, cap_t, y)
#undef RPC_STUB_PREFIX
#undef RPC_ID_PREFIX
-#undef RPC_TARGET
int
main (int argc, char *argv[])
{
printf ("Checking RPC... ");
- l4_msg_t msg;
error_t err;
+ struct vg_message *msg;
- rpc_noargs_send_marshal (&msg);
- err = rpc_noargs_send_unmarshal (&msg);
+
+#define REPLY ADDR (0x1000, ADDR_BITS - 12)
+ addr_t reply = REPLY;
+
+ msg = malloc (sizeof (*msg));
+ rpc_noargs_send_marshal (msg, REPLY);
+ err = rpc_noargs_send_unmarshal (msg, &reply);
assert (! err);
+ assert (ADDR_EQ (reply, REPLY));
+ free (msg);
- rpc_noargs_reply_marshal (&msg);
- err = rpc_noargs_reply_unmarshal (&msg);
+ msg = malloc (sizeof (*msg));
+ rpc_noargs_reply_marshal (msg);
+ err = rpc_noargs_reply_unmarshal (msg);
assert (err == 0);
+ free (msg);
+ msg = malloc (sizeof (*msg));
#define VALUE 0xfde8963a
uint32_t arg = VALUE;
uint32_t arg_out;
- rpc_onein_send_marshal (&msg, arg);
- err = rpc_onein_send_unmarshal (&msg, &arg_out);
+ rpc_onein_send_marshal (msg, arg, REPLY);
+ err = rpc_onein_send_unmarshal (msg, &arg_out, &reply);
assert (! err);
assert (arg_out == VALUE);
+ assert (ADDR_EQ (reply, REPLY));
+ free (msg);
- rpc_onein_reply_marshal (&msg);
- err = rpc_onein_reply_unmarshal (&msg);
+ msg = malloc (sizeof (*msg));
+ rpc_onein_reply_marshal (msg);
+ err = rpc_onein_reply_unmarshal (msg);
assert (! err);
+ free (msg);
-
- rpc_oneout_send_marshal (&msg);
- err = rpc_oneout_send_unmarshal (&msg);
+ msg = malloc (sizeof (*msg));
+ rpc_oneout_send_marshal (msg, REPLY);
+ err = rpc_oneout_send_unmarshal (msg, &reply);
assert (! err);
+ assert (ADDR_EQ (reply, REPLY));
+ free (msg);
- rpc_oneout_reply_marshal (&msg, arg);
- err = rpc_oneout_reply_unmarshal (&msg, &arg_out);
+ msg = malloc (sizeof (*msg));
+ rpc_oneout_reply_marshal (msg, arg);
+ err = rpc_oneout_reply_unmarshal (msg, &arg_out);
assert (! err);
assert (arg_out == VALUE);
+ free (msg);
+ msg = malloc (sizeof (*msg));
struct foo foo;
foo.a = 1 << 31;
@@ -89,26 +111,34 @@ main (int argc, char *argv[])
struct foo foo_out;
bool p_out;
- rpc_onlyin_send_marshal (&msg, 0x1234567, 321, foo, true);
- err = rpc_onlyin_send_unmarshal (&msg, &arg_out, &idx_out, &foo_out, &p_out);
+ rpc_onlyin_send_marshal (msg, 0x1234567, 0xABC, foo, true, REPLY);
+ err = rpc_onlyin_send_unmarshal (msg, &arg_out, &idx_out, &foo_out, &p_out,
+ &reply);
assert (! err);
assert (arg_out == 0x1234567);
- assert (idx_out == 321);
+ assert (idx_out == 0xABC);
assert (foo_out.a == foo.a);
assert (foo_out.b == foo.b);
assert (p_out == true);
+ assert (ADDR_EQ (reply, REPLY));
+ free (msg);
- rpc_onlyin_reply_marshal (&msg);
- err = rpc_onlyin_reply_unmarshal (&msg);
+ msg = malloc (sizeof (*msg));
+ rpc_onlyin_reply_marshal (msg);
+ err = rpc_onlyin_reply_unmarshal (msg);
assert (! err);
+ free (msg);
-
- rpc_onlyout_send_marshal (&msg);
- err = rpc_onlyout_send_unmarshal (&msg);
+ msg = malloc (sizeof (*msg));
+ rpc_onlyout_send_marshal (msg, REPLY);
+ err = rpc_onlyout_send_unmarshal (msg, &reply);
assert (! err);
+ assert (ADDR_EQ (reply, REPLY));
+ free (msg);
- rpc_onlyout_reply_marshal (&msg, 0x1234567, 321, foo, true);
- err = rpc_onlyout_reply_unmarshal (&msg, &arg_out, &idx_out,
+ msg = malloc (sizeof (*msg));
+ rpc_onlyout_reply_marshal (msg, 0x1234567, 321, foo, true);
+ err = rpc_onlyout_reply_unmarshal (msg, &arg_out, &idx_out,
&foo_out, &p_out);
assert (! err);
assert (arg_out == 0x1234567);
@@ -116,22 +146,39 @@ main (int argc, char *argv[])
assert (foo_out.a == foo.a);
assert (foo_out.b == foo.b);
assert (p_out == true);
+ free (msg);
- rpc_mix_send_marshal (&msg, arg, 456789);
- err = rpc_mix_send_unmarshal (&msg, &arg_out, &idx_out);
+ msg = malloc (sizeof (*msg));
+ rpc_mix_send_marshal (msg, arg, 456789, REPLY);
+ err = rpc_mix_send_unmarshal (msg, &arg_out, &idx_out, &reply);
assert (! err);
assert (arg_out == arg);
assert (idx_out == 456789);
+ assert (ADDR_EQ (reply, REPLY));
+ free (msg);
+ msg = malloc (sizeof (*msg));
int i_out = 0;
- rpc_mix_reply_marshal (&msg, foo, false, 4200042);
- err = rpc_mix_reply_unmarshal (&msg, &foo_out, &p_out, &i_out);
+ rpc_mix_reply_marshal (msg, foo, false, 4200042);
+ err = rpc_mix_reply_unmarshal (msg, &foo_out, &p_out, &i_out);
assert (! err);
assert (foo_out.a == foo.a);
assert (foo_out.b == foo.b);
assert (p_out == false);
assert (i_out == 4200042);
+ free (msg);
+
+ msg = malloc (sizeof (*msg));
+ rpc_caps_send_marshal (msg, 54, ADDR (1, ADDR_BITS), foo, REPLY);
+ addr_t addr;
+ err = rpc_caps_send_unmarshal (msg, &i_out, &addr, &foo_out, &reply);
+ assert (! err);
+ assert (i_out == 54);
+ assert (ADDR_EQ (addr, ADDR (1, ADDR_BITS)));
+ assert (foo_out.a == foo.a);
+ assert (foo_out.b == foo.b);
+ free (msg);
printf ("ok\n");
return 0;
diff --git a/hurd/thread.h b/hurd/thread.h
index c583723..81a4925 100644
--- a/hurd/thread.h
+++ b/hurd/thread.h
@@ -16,42 +16,70 @@
License along with GNU Hurd. If not, see
<http://www.gnu.org/licenses/>. */
-#ifndef _HURD_THREAD_H
-#define _HURD_THREAD_H 1
+#ifndef __have_vg_thread_id_t
+# define __have_vg_thread_id_t
-#include <hurd/types.h>
-#include <hurd/addr-trans.h>
-#include <hurd/cap.h>
-#include <hurd/startup.h>
-#include <l4/syscall.h>
-#include <l4/ipc.h>
+# ifdef USE_L4
+# include <l4.h>
+typedef l4_thread_id_t vg_thread_id_t;
+# define vg_niltid l4_nilthread
+# define VG_THREAD_ID_FMT "%x"
+# else
+# include <stdint.h>
+typedef uint64_t vg_thread_id_t;
+# define vg_niltid -1
+# define VG_THREAD_ID_FMT "%llx"
+# endif
-enum
- {
- RM_thread_exregs = 600,
- RM_thread_wait_object_destroyed,
- RM_thread_raise_exception,
- };
+#endif /* !__have_vg_thread_id_t */
-#ifdef RM_INTERN
-struct thread;
-typedef struct thread *thread_t;
-#else
-typedef addr_t thread_t;
-#endif
+#ifndef __have_activation_frame
+# define __have_activation_frame
-struct exception_frame
+# include <stdint.h>
+
+# ifdef USE_L4
+# include <l4/ipc.h>
+# endif
+
+struct hurd_message_buffer;
+
+struct activation_frame
{
-#if i386
- /* eax, ecx, edx, eflags, eip, ebx, edi, esi. */
- l4_word_t regs[8];
+ /* **** ia32-exception-entry.S silently depends on the layout of
+ this structure up to and including the next field **** */
+#ifdef i386
+ union
+ {
+ uintptr_t regs[10];
+ struct
+ {
+ uintptr_t eax;
+ uintptr_t ecx;
+ uintptr_t edx;
+ uintptr_t eflags;
+ uintptr_t eip;
+ uintptr_t ebx;
+ uintptr_t edi;
+ uintptr_t esi;
+ uintptr_t ebp;
+ uintptr_t esp;
+ };
+ };
#else
# error Not ported to this architecture!
#endif
- struct exception_frame *next;
- struct exception_frame *prev;
- l4_msg_t exception;
+ /* The base of the stack to use when running
+ hurd_activation_handler_normal. If NULL, then the interrupted
+ stack is used. */
+ void *normal_mode_stack;
+
+ struct activation_frame *next;
+ struct activation_frame *prev;
+ struct hurd_message_buffer *message_buffer;
+
+#ifdef USE_L4
/* We need to save parts of the UTCB. */
l4_word_t saved_sender;
l4_word_t saved_receiver;
@@ -60,16 +88,69 @@ struct exception_frame
l4_word_t saved_flags;
l4_word_t saved_br0;
l4_msg_t saved_message;
+#endif
+
+#define ACTIVATION_FRAME_CANARY 0x10ADAB1E
+ uintptr_t canary;
+};
+#endif
+
+#if defined(__need_vg_thread_id_t) || defined (__need_activation_frame)
+# undef __need_vg_thread_id_t
+# undef __need_activation_frame
+#else
+
+#ifndef _HURD_THREAD_H
+#define _HURD_THREAD_H 1
+
+#include <stdint.h>
+#include <hurd/types.h>
+#include <hurd/addr.h>
+#include <hurd/addr-trans.h>
+#include <hurd/cap.h>
+#include <hurd/messenger.h>
+#include <setjmp.h>
+
+/* Cause the activation frame to assume the state of the long jump
+ buffer BUF. If SET_RET is true, the normal function return value
+ is set to RET. */
+extern void hurd_activation_frame_longjmp (struct activation_frame *af,
+ jmp_buf buf,
+ bool set_ret, int ret);
+
+struct hurd_fault_catcher
+{
+#define HURD_FAULT_CATCHER_MAGIC 0xb01dface
+ uintptr_t magic;
+
+ /* Start of the region to watch. */
+ uintptr_t start;
+ /* Length of the region in bytes. */
+ uintptr_t len;
+ /* The callback. Return true to continue execute. False to throw a
+ SIGSEGV. */
+ bool (*callback) (struct activation_frame *activation_frame,
+ uintptr_t fault);
+
+ struct hurd_fault_catcher *next;
+ struct hurd_fault_catcher **prevp;
};
-struct exception_page
+/* Register a fatch catch handler. */
+extern void hurd_fault_catcher_register (struct hurd_fault_catcher *catcher);
+
+/* Unregister a fault catch handler. */
+extern void hurd_fault_catcher_unregister (struct hurd_fault_catcher *catcher);
+
+/* The user thread control block. */
+struct vg_utcb
{
union
{
- /* Whether the thread is in activation mode or not. If so, no
- exception will be delivered.
+ /* The following structures are examined or modified by the
+ kernel. */
- **** ia32-exception-entry.S silently depends on the layout of
+ /* **** ia32-exception-entry.S silently depends on the layout of
this structure **** */
struct
{
@@ -77,56 +158,108 @@ struct exception_page
{
struct
{
- /* Whether the thread is in activated mode. */
- l4_word_t activated_mode : 1;
+ /* Whether the thread is in activated mode. If so, any
+ activations that arrive during this time will be queued
+ or dropped. */
+ uintptr_t activated_mode : 1;
/* Set by the kernel to indicated that there is a pending
message. */
- l4_word_t pending_message : 1;
+ uintptr_t pending_message : 1;
/* Set by the kernel to indicate whether the thread was
interrupted while the EIP is in the transition range. */
- l4_word_t interrupt_in_transition : 1;
+ uintptr_t interrupt_in_transition : 1;
};
- l4_word_t mode;
+ uintptr_t mode;
};
/* The value of the IP and SP when the thread was running. */
- l4_word_t saved_ip;
- l4_word_t saved_sp;
+ uintptr_t saved_ip;
+ uintptr_t saved_sp;
/* The state of the thread (as returned by _L4_exchange_regs) */
- l4_word_t saved_thread_state;
+ uintptr_t saved_thread_state;
+
+ /* Top of the activation frame stack (i.e., the active
+ activation). */
+ struct activation_frame *activation_stack;
+ /* The bottom of the activation stack. */
+ struct activation_frame *activation_stack_bottom;
+
+ uintptr_t activation_handler_sp;
+ uintptr_t activation_handler_ip;
+ uintptr_t activation_handler_end;
+
+ /* The protected payload of the capability that invoked the
+ messenger that caused this activation. */
+ uint64_t protected_payload;
+ /* The messenger's id. */
+ uint64_t messenger_id;
+
+ uintptr_t inline_words[VG_MESSENGER_INLINE_WORDS];
+ addr_t inline_caps[VG_MESSENGER_INLINE_CAPS];
+
+ union
+ {
+ struct
+ {
+ int inline_word_count : 2;
+ int inline_cap_count : 1;
+ };
+ int inline_data : 3;
+ };
+
+ /* The following fields are not examined or modified by the
+ kernel. */
+
+ /* The CRC protects the above fields by checking for
+ modification, which can happen if a call back function uses
+ too much stack. The fields following crc are not protected
+ by the crc as they are expected to be changed by the
+ activation handler. */
- /* Top of the exception frame stack. */
- struct exception_frame *exception_stack;
- /* Bottom of the exception frame stack. */
- struct exception_frame *exception_stack_bottom;
+ uintptr_t crc;
- l4_word_t exception_handler_sp;
- l4_word_t exception_handler_ip;
- l4_word_t exception_handler_end;
+ /* The exception buffer. */
+ struct hurd_message_buffer *exception_buffer;
+ /* The current extant IPC. */
+ struct hurd_message_buffer *extant_message;
- /* The exception. */
- l4_msg_t exception;
+ struct hurd_fault_catcher *catchers;
- l4_word_t crc;
+ /* The alternate activation stack. */
+ void *alternate_stack;
+ bool alternate_stack_inuse;
+
+#define UTCB_CANARY0 0xCA17A1
+#define UTCB_CANARY1 0xDEADB15D
+ uintptr_t canary0;
+ uintptr_t canary1;
};
char data[PAGESIZE];
};
};
+/* A thread object's user accessible capability slots. */
enum
{
/* Root of the address space. */
THREAD_ASPACE_SLOT = 0,
/* The activity the thread is bound to. */
THREAD_ACTIVITY_SLOT = 1,
- /* Where exceptions are saved. Must be a cap_page. */
- THREAD_EXCEPTION_PAGE_SLOT = 2,
+ /* The messenger to post exceptions to. */
+ THREAD_EXCEPTION_MESSENGER = 2,
+ /* The user thread control block. Must be a cap_page. */
+ THREAD_UTCB = 3,
+
+ /* Total number of capability slots in a thread object. This must
+ be a power of 2. */
+ THREAD_SLOTS = 4,
};
+#define THREAD_SLOTS_LOG2 2
enum
{
- HURD_EXREGS_SET_EXCEPTION_PAGE = 0x1000,
-
+ HURD_EXREGS_SET_UTCB = 0x2000,
+ HURD_EXREGS_SET_EXCEPTION_MESSENGER = 0x1000,
HURD_EXREGS_SET_ASPACE = 0x800,
HURD_EXREGS_SET_ACTIVITY = 0x400,
HURD_EXREGS_SET_SP = _L4_XCHG_REGS_SET_SP,
@@ -134,7 +267,8 @@ enum
HURD_EXREGS_SET_SP_IP = _L4_XCHG_REGS_SET_SP | _L4_XCHG_REGS_SET_IP,
HURD_EXREGS_SET_EFLAGS = _L4_XCHG_REGS_SET_FLAGS,
HURD_EXREGS_SET_USER_HANDLE = _L4_XCHG_REGS_SET_USER_HANDLE,
- HURD_EXREGS_SET_REGS = (HURD_EXREGS_SET_EXCEPTION_PAGE
+ HURD_EXREGS_SET_REGS = (HURD_EXREGS_SET_UTCB
+ | HURD_EXREGS_SET_EXCEPTION_MESSENGER
| HURD_EXREGS_SET_ASPACE
| HURD_EXREGS_SET_ACTIVITY
| HURD_EXREGS_SET_SP
@@ -152,53 +286,53 @@ enum
HURD_EXREGS_ABORT_IPC = HURD_EXREGS_ABORT_SEND | _L4_XCHG_REGS_CANCEL_RECV,
};
+enum
+ {
+ RM_thread_exregs = 600,
+ RM_thread_id,
+ RM_thread_activation_collect,
+ };
+
+#ifdef RM_INTERN
+struct thread;
+typedef struct thread *thread_t;
+#else
+typedef addr_t thread_t;
+#endif
+
#define RPC_STUB_PREFIX rm
#define RPC_ID_PREFIX RM
-#undef RPC_TARGET_NEED_ARG
-#define RPC_TARGET \
- ({ \
- extern struct hurd_startup_data *__hurd_startup_data; \
- __hurd_startup_data->rm; \
- })
#include <hurd/rpc.h>
struct hurd_thread_exregs_in
{
- addr_t aspace;
uintptr_t aspace_cap_properties_flags;
struct cap_properties aspace_cap_properties;
- addr_t activity;
-
- addr_t exception_page;
-
- l4_word_t sp;
- l4_word_t ip;
- l4_word_t eflags;
- l4_word_t user_handle;
-
- addr_t aspace_out;
- addr_t activity_out;
- addr_t exception_page_out;
+ uintptr_t sp;
+ uintptr_t ip;
+ uintptr_t eflags;
+ uintptr_t user_handle;
};
struct hurd_thread_exregs_out
{
- l4_word_t sp;
- l4_word_t ip;
- l4_word_t eflags;
- l4_word_t user_handle;
+ uintptr_t sp;
+ uintptr_t ip;
+ uintptr_t eflags;
+ uintptr_t user_handle;
};
/* l4_exregs wrapper. */
-RPC (thread_exregs, 4, 1,
- addr_t, principal,
- addr_t, thread,
- l4_word_t, control,
- struct hurd_thread_exregs_in, in,
+RPC (thread_exregs, 6, 1, 4,
+ /* cap_t principal, cap_t thread, */
+ uintptr_t, control, struct hurd_thread_exregs_in, in,
+ cap_t, aspace, cap_t, activity, cap_t, utcb, cap_t, exception_messenger,
/* Out: */
- struct hurd_thread_exregs_out, out)
+ struct hurd_thread_exregs_out, out,
+ cap_t, aspace_out, cap_t, activity_out, cap_t, utcb_out,
+ cap_t, exception_messenger_out)
static inline error_t
thread_start (addr_t thread)
@@ -208,7 +342,8 @@ thread_start (addr_t thread)
return rm_thread_exregs (ADDR_VOID, thread,
HURD_EXREGS_START | HURD_EXREGS_ABORT_IPC,
- in, &out);
+ in, ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ &out, NULL, NULL, NULL, NULL);
}
static inline error_t
@@ -223,7 +358,8 @@ thread_start_sp_ip (addr_t thread, uintptr_t sp, uintptr_t ip)
return rm_thread_exregs (ADDR_VOID, thread,
HURD_EXREGS_START | HURD_EXREGS_ABORT_IPC
| HURD_EXREGS_SET_SP_IP,
- in, &out);
+ in, ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ &out, NULL, NULL, NULL, NULL);
}
static inline error_t
@@ -234,29 +370,31 @@ thread_stop (addr_t thread)
return rm_thread_exregs (ADDR_VOID, thread,
HURD_EXREGS_STOP | HURD_EXREGS_ABORT_IPC,
- in, &out);
+ in, ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ &out, NULL, NULL, NULL, NULL);
}
-/* Cause the caller to wait until OBJECT is destroyed. Returns the
- object's return code in RETURN_CODE. */
-RPC(thread_wait_object_destroyed, 2, 1,
- addr_t, principal, addr_t, object,
- /* Out: */
- uintptr_t, return_code);
-
-/* The kernel interprets the payload as a short l4_msg_t buffer: that
- is one that does not exceed 128 bytes. */
-struct exception_buffer
-{
- char payload[128];
-};
+/* Return the unique integer associated with thread THREAD. */
+RPC(thread_id, 0, 1, 0,
+ /* cap_t, principal, cap_t, thread, */
+ vg_thread_id_t, tid)
-RPC(thread_raise_exception, 3, 0,
- addr_t, principal, addr_t, thread,
- struct exception_buffer, exception_buffer)
+/* Cause the delivery of a pending message, if any. */
+RPC(thread_activation_collect, 0, 0, 0
+ /* cap_t principal, cap_t thread */)
#undef RPC_STUB_PREFIX
#undef RPC_ID_PREFIX
-#undef RPC_TARGET
-#endif
+static inline vg_thread_id_t
+vg_myself (void)
+{
+ vg_thread_id_t tid;
+ error_t err = rm_thread_id (ADDR_VOID, ADDR_VOID, &tid);
+ if (err)
+ return vg_niltid;
+ return tid;
+}
+
+#endif /* _HURD_THREAD_H */
+#endif /* __need_vg_thread_id_t */
diff --git a/libc-parts/ChangeLog b/libc-parts/ChangeLog
index c1a37f0..caf0dae 100644
--- a/libc-parts/ChangeLog
+++ b/libc-parts/ChangeLog
@@ -1,5 +1,28 @@
2008-12-11 Neal H. Walfield <neal@gnu.org>
+ Update to new RPC interfaces.
+
+ * _exit.c (_exit): Update use of rm_folio_object_alloc to be
+ consistent with the new interface.
+
+ * backtrace.c (RA) [!RM_INTERN]: Set up a fault catch handler to
+ avoid gratuitously faulting.
+ (backtrace) [!RM_INTERN]: Set up a jump buffer. Jump to it on a
+ fault.
+ (backtrace_print): Use s_printf, not printf.
+
+ * ia32-crt0.S (STACK_SIZE): Increase to 128 kb.
+
+ * process-spawn.c (process_spawn): Don't use a capability slot to
+ identify the root of the new thread's address space, allocate a
+ thread object. Allocate messengers for the new thread and save
+ them in STARTUP_DATA->MESSENGERS.
+
+ * s_printf.c (io_buffer_flush): Use the debug output interface.
+ (s_putchar): Don't call rm_write but use io_buffer_flush.
+
+2008-12-11 Neal H. Walfield <neal@gnu.org>
+
* ia32-cmain.c: Include <hurd/storage.h>.
(finish): If the thread is not using the initial stack, free it.
(_pthread_init_routine): Add the weak attribute to the
diff --git a/libc-parts/_exit.c b/libc-parts/_exit.c
index 2bec189..fc9630d 100644
--- a/libc-parts/_exit.c
+++ b/libc-parts/_exit.c
@@ -72,7 +72,7 @@ _exit (int ret)
err = rm_folio_object_alloc (ADDR_VOID, folio, index,
cap_void, OBJECT_POLICY_VOID,
(uintptr_t) ret,
- ADDR_VOID, ADDR_VOID);
+ NULL, NULL);
if (err)
debug (0, "deallocating object: %d", err);
}
diff --git a/libc-parts/backtrace.c b/libc-parts/backtrace.c
index 3f225ff..5439413 100644
--- a/libc-parts/backtrace.c
+++ b/libc-parts/backtrace.c
@@ -2,36 +2,109 @@
Copyright (C) 2008 Free Software Foundation, Inc.
Written by Neal H. Walfield <neal@gnu.org>.
- This file is part of the GNU Hurd.
-
The GNU Hurd is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
- The GNU Hurd is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
+#include <s-printf.h>
+#include <l4.h>
-#define RA(level) \
+#ifdef RM_INTERN
+# define RA(level) \
if (level < size && __builtin_frame_address ((level) + 1)) \
{ \
array[level] = __builtin_return_address ((level) + 1); \
if (array[level] == 0) \
- return (level) + 1; \
+ return count; \
+ count ++; \
} \
else \
- return level;
+ return count;
+
+#else
+# include <hurd/exceptions.h>
+# include <setjmp.h>
+
+# define RA(level) \
+ if (count >= size) \
+ return count; \
+ \
+ { \
+ void *fa = __builtin_frame_address ((level) + 1); \
+ if (fa) \
+ { \
+ if (utcb) \
+ { \
+ catcher.start = (uintptr_t) fa + displacement; \
+ catcher.len = sizeof (uintptr_t); \
+ catcher.callback = get_me_outda_here; \
+ \
+ hurd_fault_catcher_register (&catcher); \
+ } \
+ \
+ array[count] = __builtin_return_address ((level) + 1); \
+ \
+ if (utcb) \
+ hurd_fault_catcher_unregister (&catcher); \
+ \
+ if (array[count] == 0) \
+ return count; \
+ count ++; \
+ } \
+ else \
+ return count; \
+ }
+#endif
+
int
backtrace (void **array, int size)
{
+ /* Without the volatile, count ends up either optimized away or in a
+ caller saved register before the setjmp. In either case, if we
+ fault, we'll end up returning 0 even if we get some of the
+ backtrace. volatile seems to prevent this. */
+ volatile int count = 0;
+
+#ifndef RM_INTERN
+ /* The location of the return address relative to the start of a
+ frame. */
+ intptr_t displacement = sizeof (uintptr_t);
+# ifndef i386
+# warning Not ported to this architecture... guessing
+# endif
+
+ jmp_buf jmpbuf;
+
+ /* If we don't yet have a utcb then don't set up a fault catcher. */
+ struct vg_utcb *utcb = hurd_utcb ();
+
+ if (utcb)
+ {
+ if (setjmp (jmpbuf))
+ return count;
+ }
+
+ struct hurd_fault_catcher catcher;
+
+ bool get_me_outda_here (struct activation_frame *af, uintptr_t fault)
+ {
+ hurd_fault_catcher_unregister (&catcher);
+ hurd_activation_frame_longjmp (af, jmpbuf, true, 1);
+ return true;
+ }
+#endif
+
RA(0);
RA(1);
RA(2);
@@ -52,17 +125,18 @@ backtrace (void **array, int size)
RA(18);
RA(19);
RA(20);
- return 21;
+ return count;
}
-int
+void
backtrace_print (void)
{
void *bt[20];
int count = backtrace (bt, sizeof (bt) / sizeof (bt[0]));
+ s_printf ("Backtrace for %x: ", l4_myself ());
int i;
for (i = 0; i < count; i ++)
- printf ("%p ", bt[i]);
- printf ("\n");
+ s_printf ("%p ", bt[i]);
+ s_printf ("\n");
}
diff --git a/libc-parts/ia32-crt0.S b/libc-parts/ia32-crt0.S
index 0a6e88e..49e35a2 100644
--- a/libc-parts/ia32-crt0.S
+++ b/libc-parts/ia32-crt0.S
@@ -18,8 +18,8 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
-/* The size of our stack (4 pages). */
-#define STACK_SIZE 0x4000
+/* The size of our initial stack (32 pages). */
+#define STACK_SIZE 0x32000
.text
diff --git a/libc-parts/process-spawn.c b/libc-parts/process-spawn.c
index 299040c..4211951 100644
--- a/libc-parts/process-spawn.c
+++ b/libc-parts/process-spawn.c
@@ -40,6 +40,7 @@
#ifndef RM_INTERN
#include <hurd/ihash.h>
#include <hurd/capalloc.h>
+#include <hurd/storage.h>
#endif
#ifdef RM_INTERN
@@ -290,6 +291,14 @@ process_spawn (addr_t activity,
#else
addr_t as_root = capalloc ();
struct cap *as_root_cap = add_shadow (ADDR (0, 0));
+
+ /* This is sort of a hack. To copy a capability, we need to invoke
+ the source object that contains the capability. A capability
+ slot is not an object. Finding the object corresponding to
+ AS_ROOT is possible, but iterposing a thread is just easier. */
+ struct storage thread_root
+ = storage_alloc (ADDR_VOID, cap_thread, STORAGE_EPHEMERAL,
+ OBJECT_POLICY_DEFAULT, as_root);
#endif
/* Allocation support. */
@@ -336,7 +345,7 @@ process_spawn (addr_t activity,
folio_task_addr = ADDR (addr_prefix (folio_task_addr) + (1ULL << w),
ADDR_BITS - w);
- debug (5, "allocating folio at " ADDR_FMT,
+ debug (5, "Allocating folio at " ADDR_FMT,
ADDR_PRINTF (folio_task_addr));
#ifdef RM_INTERN
@@ -358,10 +367,12 @@ process_spawn (addr_t activity,
as_ensure (folio_local_addr);
- error_t err = rm_folio_alloc (activity, folio_local_addr,
- FOLIO_POLICY_DEFAULT);
+ error_t err = rm_folio_alloc (activity, activity,
+ FOLIO_POLICY_DEFAULT,
+ &folio_local_addr);
if (err)
panic ("Failed to allocate folio");
+ assert (! ADDR_IS_VOID (folio_local_addr));
as_slot_lookup_use (folio_local_addr,
({
@@ -398,6 +409,12 @@ process_spawn (addr_t activity,
memset (&rt, 0, sizeof (rt));
int index = folio_index ++;
+
+ debug (5, "Allocating " ADDR_FMT " (%s)",
+ ADDR_PRINTF (addr_extend (folio_task_addr,
+ index, FOLIO_OBJECTS_LOG2)),
+ cap_type_string (type));
+
#ifdef RM_INTERN
rt.cap = folio_object_alloc (root_activity,
folio_local_addr, index,
@@ -407,7 +424,7 @@ process_spawn (addr_t activity,
rm_folio_object_alloc (ADDR_VOID,
folio_local_addr, index,
cap_type_strengthen (type),
- OBJECT_POLICY_VOID, 0, ADDR_VOID, ADDR_VOID);
+ OBJECT_POLICY_VOID, 0, NULL, NULL);
rt.cap.type = cap_type_strengthen (type);
CAP_PROPERTIES_SET (&rt.cap, CAP_PROPERTIES_VOID);
@@ -617,6 +634,17 @@ process_spawn (addr_t activity,
#endif
}
+ /* Allocate some messengers. */
+ int i;
+ for (i = 0; i < 2; i ++)
+ {
+ rt = allocate_object (cap_messenger, ADDR_VOID);
+ assert (descs[startup_data->desc_count - 1].type == cap_messenger);
+ startup_data->messengers[i] = descs[startup_data->desc_count - 1].object;
+ debug (5, "Messenger %d: " ADDR_FMT,
+ i, ADDR_PRINTF (startup_data->messengers[i]));
+ }
+
/* We need to 1) insert the folios in the address space, 2) fix up
their descriptors (recall: we are abusing desc->storage to hold
the local name for the storge), and 3) copy the startup data. We
@@ -677,7 +705,6 @@ process_spawn (addr_t activity,
}
/* Copy the staging area in place. */
- int i;
for (i = 0; i < page; i ++)
memcpy (pages[i], (void *) startup_data + i * PAGESIZE, PAGESIZE);
@@ -726,31 +753,31 @@ process_spawn (addr_t activity,
HURD_EXREGS_SET_SP_IP
| (make_runnable ? HURD_EXREGS_START : 0)
| HURD_EXREGS_ABORT_IPC,
- NULL, 0, CAP_PROPERTIES_VOID,
- NULL, NULL,
- &sp, &ip,
- NULL, NULL, NULL, NULL, NULL);
+ CAP_VOID, 0, CAP_PROPERTIES_VOID,
+ CAP_VOID, CAP_VOID, CAP_VOID,
+ &sp, &ip, NULL, NULL);
#else
/* Start thread. */
struct hurd_thread_exregs_in in;
/* Per the API (cf. <hurd/startup.h>). */
in.sp = STARTUP_DATA_ADDR;
in.ip = ip;
- in.aspace = as_root;
in.aspace_cap_properties = CAP_PROPERTIES_VOID;
in.aspace_cap_properties_flags = CAP_COPY_COPY_SOURCE_GUARD;
- /* XXX: Weaken. */
- in.activity = activity;
error_t err;
struct hurd_thread_exregs_out out;
+ /* XXX: Use a weakened activity. */
err = rm_thread_exregs (ADDR_VOID, thread,
HURD_EXREGS_SET_SP_IP
| HURD_EXREGS_SET_ASPACE
| HURD_EXREGS_SET_ACTIVITY
| (make_runnable ? HURD_EXREGS_START : 0)
| HURD_EXREGS_ABORT_IPC,
- in, &out);
+ in, addr_extend (as_root, THREAD_ASPACE_SLOT,
+ THREAD_SLOTS_LOG2),
+ activity, ADDR_VOID, ADDR_VOID,
+ &out, NULL, NULL, NULL, NULL);
#endif
if (err)
panic ("Failed to start thread: %d", err);
@@ -758,11 +785,7 @@ process_spawn (addr_t activity,
/* Free the remaining locally allocated resources. */
#ifndef RM_INTERN
- as_slot_lookup_use (as_root,
- ({
- rm_cap_rubout (ADDR_VOID, ADDR_VOID, as_root);
- memset (slot, 0, sizeof (*slot));
- }));
+ storage_free (thread_root.addr, false);
capfree (as_root);
#endif
diff --git a/libc-parts/s_printf.c b/libc-parts/s_printf.c
index 2f57f67..7940af8 100644
--- a/libc-parts/s_printf.c
+++ b/libc-parts/s_printf.c
@@ -40,7 +40,28 @@ io_buffer_flush (struct io_buffer *buffer)
if (buffer->len == 0)
return;
- rm_write (*buffer);
+ // rm_write_send_nonblocking (ADDR_VOID, ADDR_VOID, *buffer, ADDR_VOID);
+ l4_msg_tag_t tag = l4_niltag;
+ l4_msg_tag_set_label (&tag, 2132);
+
+ l4_msg_t msg;
+ l4_msg_clear (msg);
+ l4_msg_set_msg_tag (msg, tag);
+
+ l4_msg_append_word (msg, buffer->len);
+
+ assert (buffer->len <= sizeof (buffer->data));
+
+ uintptr_t *data = (uintptr_t) &buffer->data[0];
+ int remaining;
+ for (remaining = buffer->len; remaining > 0; remaining -= sizeof (uintptr_t))
+ l4_msg_append_word (msg, *(data ++));
+
+ l4_msg_load (msg);
+
+ extern struct hurd_startup_data *__hurd_startup_data;
+ l4_send (__hurd_startup_data->rm);
+
buffer->len = 0;
}
@@ -68,7 +89,7 @@ s_putchar (int chr)
struct io_buffer buffer;
buffer.len = 1;
buffer.data[0] = chr;
- rm_write (buffer);
+ io_buffer_flush (&buffer);
return 0;
#endif
}
diff --git a/libhurd-mm/ChangeLog b/libhurd-mm/ChangeLog
index 703e145..56d2842 100644
--- a/libhurd-mm/ChangeLog
+++ b/libhurd-mm/ChangeLog
@@ -1,3 +1,120 @@
+2008-12-12 Neal H. Walfield <neal@gnu.org>
+
+ Update to new RPC interface and IPC semantics. Support messengers.
+
+ * message-buffer.h: New file.
+ * message-buffer.c: Likewise.
+ * Makefile.am (libhurd_mm_a_SOURCES): Add message-buffer.h and
+ message-buffer.c.
+ * headers.m4: Link sysroot/include/hurd/message-buffer.h to
+ libhurd-mm/message-buffer.h.
+ * exceptions.c: Include <hurd/mm.h>, <hurd/rm.h> and
+ <backtrace.h>.
+ (hurd_fault_catcher_register): New function.
+ (hurd_fault_catcher_unregister): Likewise.
+ (hurd_activation_frame_longjmp): Likewise.
+ (utcb_state_save): Rename from this...
+ (l4_utcb_state_save): ... to this. Take a `struct
+ activation_frame *', not a `struct exception_frame *'.
+ (utcb_state_restore): Rename from this...
+ (l4_utcb_state_restore): ... to this. Take a `struct
+ activation_frame *', not a `struct exception_frame *'.
+ (exception_fetch_exception): Rename from this...
+ (hurd_activation_fetch): ... to this.
+ (hurd_activation_message_register): New function.
+ (hurd_activation_frame_longjmp): Likewise.
+ (exception_frame_slab): Rename from this...
+ (activation_frame_slab): ... to this. Use a static initializer.
+ (exception_frame_slab_alloc): Rename from this...
+ (activation_frame_slab_alloc): ... to this. Don't preserve the L4
+ utcb.
+ (exception_frame_slab_dealloc): Rename from this...
+ (activation_frame_slab_dealloc): ... to this.
+ (exception_frame_alloc): Rename from this...
+ (activation_frame_alloc): ... to this. If there are no
+ preallocated frames, panic. Move the hard allocation code to...
+ (check_activation_frame_reserve): ... this new function.
+ (hurd_activation_stack_dump): New function.
+ (hurd_activation_handler_normal): Take an additional parameter,
+ the utcb. Add consistency checks. Handle IPC and closures.
+ Update fault handling code to use the new fault interface. If
+ unable to resolve the fault via the pager mechanism, see if a
+ fault catcher in installed. Check the UTCB's canary. If running
+ on the alternate stack, clear UTCB->ALTERNATE_STACK_INUSE on exit.
+ (hurd_activation_handler_activated): Take a `struct vg_utcb *',
+ not a `struct exception_page *'. Handle IPC and closures.
+ Improve test to determine if the fault was a stack fault. If so,
+ return to normal mode to handle the fault and use an alternate
+ stack.
+ (activation_handler_area0): New local variable.
+ (activation_handler_msg): Likewise.
+ (initial_utcb): Likewise.
+ (simple_utcb_fetcher): New function.
+ (hurd_utcb): New variable.
+ (hurd_activation_handler_init_early): New function.
+ (hurd_activation_handler_init): Likewise.
+ (exception_handler_init): Remove function.
+ (ACTIVATION_AREA_SIZE_LOG2): Define.
+ (ACTIVATION_AREA_SIZE): Likewise.
+ (hurd_activation_state_alloc): New function.
+ (exception_page_cleanup): Rename from this...
+ (hurd_activation_state_free): ... to this. Rewrite.
+ * ia32-exception-entry.S (_hurd_activation_handler_entry): Save
+ the eflags before executing a sub instruction. Don't try to
+ smartly calculate the location of the UTCB. Instead, just reload
+ it.
+ (activation_frame_run): Use an alternate stack, if requested.
+ Save ebx and ebi. Pass the utcb to the callback.
+ * mm-init.c [i386]: Include <hurd/pager.h>.
+ Include <backtrace.h>.
+ (mm_init): Call hurd_activation_handler_init_early and
+ hurd_activation_handler_init. Don't call exception_handler_init.
+ (mm_init) [! NDEBUG && i386]: Test the activation code.
+
+ * as-build.c (do_index): Handle indexing a cap_thread or a
+ cap_messenger.
+ (as_build): Likewise.
+ * as-dump.c (do_walk): Handle indexing a cap_thread or a
+ cap_messenger.
+ * as-lookup.c (as_lookup_rel_internal): Likewise.
+ * as.c (as_walk): Likewise.
+
+ * storage.c: Include <backtrace.h>.
+ (shadow_setup): Update use of rm_folio_object_alloc according to
+ its new interface.
+ (storage_check_reserve_internal): Likewise.
+ (storage_free_): Likewise.
+ (FREE_PAGES_SERIALIZE): Bump to 32.
+ (storage_alloc): If we try to get storage more than 5 lives, print
+ a warning that we may be experiencing live lock.
+
+ * pager.h (pager_fault_t): Change info's type from `struct
+ exception_info' to `struct activation_fault_info'.
+ (PAGER_VOID): Define.
+ * map.h: Don't include <hurd/exceptions.h>. Include <hurd/as.h>.
+ (maps_lock_lock): Don't use EXCEPTION_STACK_SIZE but
+ AS_STACK_SPACE.
+ (map_fault): Change info's type from `struct exception_info' to
+ `struct activation_fault_info'.
+ * map.c (map_fault): Change info's type from `struct
+ exception_info' to `struct activation_fault_info'.
+
+ * as.h (AS_STACK_SPACE): Define.
+ (as_lock): Use AS_STACK_SPACE instead of EXCEPTION_STACK_SIZE.
+ (as_lock_readonly): Likewise.
+
+ * as.h (AS_CHECK_SHADOW): Only check the address translator for
+ capabilities that designate cappages.
+
+ * anonymous.h (ANONYMOUS_MAGIC): Define.
+ (struct anonymous_pager): Add field magic.
+ * anonymous.c (fault): Assert that ANON->MAGIC has the expected
+ value. Correctly size PAGES.
+ (mdestroy): Assert that ANON->MAGIC has the expected value.
+ (destroy): Likewise.
+ (advise): Likewise.
+ (anonymous_pager_alloc): Initialize ANON->MAGIC.
+
2008-12-04 Neal H. Walfield <neal@gnu.org>
* mmap.c (mmap): Use correct format conversions.
diff --git a/libhurd-mm/Makefile.am b/libhurd-mm/Makefile.am
index e0f08b8..64dce7c 100644
--- a/libhurd-mm/Makefile.am
+++ b/libhurd-mm/Makefile.am
@@ -50,6 +50,7 @@ libhurd_mm_a_SOURCES = mm.h \
mmap.c sbrk.c \
mprotect.c \
madvise.c \
+ message-buffer.h message-buffer.c \
$(ARCH_SOURCES)
libas_kernel_a_CPPFLAGS = $(KERNEL_CPPFLAGS)
diff --git a/libhurd-mm/anonymous.c b/libhurd-mm/anonymous.c
index 656711b..c679507 100644
--- a/libhurd-mm/anonymous.c
+++ b/libhurd-mm/anonymous.c
@@ -31,6 +31,7 @@
#include <hurd/rm.h>
#include <profile.h>
+#include <backtrace.h>
#include "anonymous.h"
#include "pager.h"
@@ -132,18 +133,23 @@ static struct hurd_slab_space anonymous_pager_slab
static bool
fault (struct pager *pager, uintptr_t offset, int count, bool read_only,
- uintptr_t fault_addr, uintptr_t ip, struct exception_info info)
+ uintptr_t fault_addr, uintptr_t ip, struct activation_fault_info info)
{
struct anonymous_pager *anon = (struct anonymous_pager *) pager;
+ assert (anon->magic == ANONYMOUS_MAGIC);
- debug (5, "Fault at %p, %d pages (%d kb); pager at " ADDR_FMT "+%d",
- fault_addr, count, count * PAGESIZE / 1024,
- ADDR_PRINTF (anon->map_area), offset);
+ debug (5, "%p: fault at %p, spans %d pg (%d kb); "
+ "pager: %p-%p (%d pages; %d kb), offset: %x",
+ anon, (void *) fault_addr, count, count * PAGESIZE / 1024,
+ (void *) (uintptr_t) addr_prefix (anon->map_area),
+ (void *) (uintptr_t) addr_prefix (anon->map_area) + anon->pager.length,
+ anon->pager.length / PAGESIZE, anon->pager.length / 1024,
+ offset);
ss_mutex_lock (&anon->lock);
bool recursive = false;
- void *pages[count];
+ void **pages;
profile_region (count > 1 ? ">1" : "=1");
@@ -204,17 +210,20 @@ fault (struct pager *pager, uintptr_t offset, int count, bool read_only,
fault_addr -= left;
offset -= left;
- count += (left + right) / PAGESIZE;
+ count = (left + PAGESIZE + right) / PAGESIZE;
assertx (offset + count * PAGESIZE <= pager->length,
"%x + %d pages <= %x",
offset, count, pager->length);
- debug (5, "Fault at %p, %d pages (%d kb); pager at " ADDR_FMT "+%d",
- fault_addr, count, count * PAGESIZE / 1024,
+ debug (5, "Faulting %p - %p (%d pages; %d kb); pager at " ADDR_FMT "+%d",
+ (void *) fault_addr, (void *) fault_addr + count * PAGE_SIZE,
+ count, count * PAGESIZE / 1024,
ADDR_PRINTF (anon->map_area), offset);
}
+ pages = __builtin_alloca (sizeof (void *) * count);
+
if (! (anon->flags & ANONYMOUS_NO_ALLOC))
{
hurd_btree_storage_desc_t *storage_descs;
@@ -244,7 +253,7 @@ fault (struct pager *pager, uintptr_t offset, int count, bool read_only,
storage address as object_discarded_clear also
returns a mapping and we are likely to access the
data at the fault address. */
- err = rm_object_discarded_clear (ADDR_VOID,
+ err = rm_object_discarded_clear (ADDR_VOID, ADDR_VOID,
storage_desc->storage);
assertx (err == 0, "%d", err);
@@ -326,6 +335,7 @@ fault (struct pager *pager, uintptr_t offset, int count, bool read_only,
#endif
}
+ assert (anon->magic == ANONYMOUS_MAGIC);
ss_mutex_unlock (&anon->lock);
profile_region_end ();
@@ -367,6 +377,8 @@ fault (struct pager *pager, uintptr_t offset, int count, bool read_only,
debug (5, "Fault at %x resolved", fault_addr);
+ assert (anon->magic == ANONYMOUS_MAGIC);
+
return r;
}
@@ -374,6 +386,7 @@ static void
mdestroy (struct map *map)
{
struct anonymous_pager *anon = (struct anonymous_pager *) map->pager;
+ assert (anon->magic == ANONYMOUS_MAGIC);
/* XXX: We assume that every byte is mapped by at most one mapping.
We may have to reexamine this assumption if we allow multiple
@@ -468,6 +481,7 @@ destroy (struct pager *pager)
assert (! pager->maps);
struct anonymous_pager *anon = (struct anonymous_pager *) pager;
+ assert (anon->magic == ANONYMOUS_MAGIC);
/* Wait any fill function returns. */
ss_mutex_lock (&anon->fill_lock);
@@ -498,6 +512,7 @@ advise (struct pager *pager,
uintptr_t start, uintptr_t length, uintptr_t advice)
{
struct anonymous_pager *anon = (struct anonymous_pager *) pager;
+ assert (anon->magic == ANONYMOUS_MAGIC);
switch (advice)
{
@@ -541,7 +556,7 @@ advise (struct pager *pager,
case pager_advice_normal:
{
- struct exception_info info;
+ struct activation_fault_info info;
info.discarded = anon->policy.discardable;
info.type = cap_page;
/* XXX: What should we set info.access to? */
@@ -585,6 +600,8 @@ anonymous_pager_alloc (addr_t activity,
struct anonymous_pager *anon = buffer;
memset (anon, 0, sizeof (*anon));
+ anon->magic = ANONYMOUS_MAGIC;
+
anon->pager.length = length;
anon->pager.fault = fault;
anon->pager.no_refs = destroy;
@@ -650,7 +667,7 @@ anonymous_pager_alloc (addr_t activity,
{
if ((flags & ANONYMOUS_FIXED))
{
- debug (0, "(%x, %x (%x)): Specified range " ADDR_FMT "+%d "
+ debug (0, "(%p, %x (%p)): Specified range " ADDR_FMT "+%d "
"in use and ANONYMOUS_FIXED specified",
hint, length, hint + length - 1,
ADDR_PRINTF (anon->map_area), count);
@@ -668,7 +685,7 @@ anonymous_pager_alloc (addr_t activity,
anon->map_area = as_alloc (width, count, true);
if (ADDR_IS_VOID (anon->map_area))
{
- debug (0, "(%x, %x (%x)): No VA available",
+ debug (0, "(%p, %x (%p)): No VA available",
hint, length, hint + length - 1);
goto error_with_buffer;
}
@@ -699,7 +716,7 @@ anonymous_pager_alloc (addr_t activity,
panic ("Memory exhausted.");
- debug (5, "Installed pager at %x spanning %d pages",
+ debug (5, "Installed pager at %p spanning %d pages",
*addr_out, length / PAGESIZE);
return anon;
diff --git a/libhurd-mm/anonymous.h b/libhurd-mm/anonymous.h
index fd1bedb..e946dc2 100644
--- a/libhurd-mm/anonymous.h
+++ b/libhurd-mm/anonymous.h
@@ -80,12 +80,16 @@ enum
typedef bool (*anonymous_pager_fill_t) (struct anonymous_pager *anon,
uintptr_t offset, uintptr_t count,
void *pages[],
- struct exception_info info);
+ struct activation_fault_info info);
+
+#define ANONYMOUS_MAGIC 0xa707a707
struct anonymous_pager
{
struct pager pager;
+ uintptr_t magic;
+
/* The staging area. Only valid if ANONYMOUS_STAGING_AREA is
set. */
void *staging_area;
diff --git a/libhurd-mm/as-build.c b/libhurd-mm/as-build.c
index 589799d..a44f172 100644
--- a/libhurd-mm/as-build.c
+++ b/libhurd-mm/as-build.c
@@ -125,7 +125,9 @@ do_index (activity_t activity, struct cap *pte, addr_t pt_addr, int idx,
struct cap *fake_slot)
{
assert (pte->type == cap_cappage || pte->type == cap_rcappage
- || pte->type == cap_folio);
+ || pte->type == cap_folio
+ || pte->type == cap_thread
+ || pte->type == cap_messenger || pte->type == cap_rmessenger);
/* Load the referenced object. */
struct object *pt = cap_to_object (activity, pte);
@@ -156,6 +158,15 @@ do_index (activity_t activity, struct cap *pte, addr_t pt_addr, int idx,
return fake_slot;
+ case cap_thread:
+ assert (idx < THREAD_SLOTS);
+ return &pt->caps[idx];
+
+ case cap_messenger:
+ /* Note: rmessengers don't expose their capability slots. */
+ assert (idx < VG_MESSENGER_SLOTS);
+ return &pt->caps[idx];
+
default:
return NULL;
}
@@ -244,7 +255,9 @@ ID (as_build) (activity_t activity,
area. */
break;
else if ((pte->type == cap_cappage || pte->type == cap_rcappage
- || pte->type == cap_folio)
+ || pte->type == cap_folio
+ || pte->type == cap_thread
+ || pte->type == cap_messenger)
&& remaining >= pte_gbits
&& pte_guard == addr_guard)
/* PTE's (possibly zero-width) guard matches and the
@@ -591,6 +604,15 @@ ID (as_build) (activity_t activity,
width = FOLIO_OBJECTS_LOG2;
break;
+ case cap_thread:
+ width = THREAD_SLOTS_LOG2;
+ break;
+
+ case cap_messenger:
+ /* Note: rmessengers don't expose their capability slots. */
+ width = VG_MESSENGER_SLOTS_LOG2;
+ break;
+
default:
AS_DUMP;
PANIC ("Can't insert object at " ADDR_FMT ": "
diff --git a/libhurd-mm/as-dump.c b/libhurd-mm/as-dump.c
index 839013e..27dfc6d 100644
--- a/libhurd-mm/as-dump.c
+++ b/libhurd-mm/as-dump.c
@@ -23,6 +23,7 @@
#include <hurd/as.h>
#include <hurd/stddef.h>
#include <assert.h>
+#include <backtrace.h>
#ifdef RM_INTERN
#include <md5.h>
@@ -174,6 +175,29 @@ do_walk (activity_t activity, int index,
return;
+ case cap_thread:
+ if (addr_depth (addr) + THREAD_SLOTS_LOG2 > ADDR_BITS)
+ return;
+
+ for (i = 0; i < THREAD_SLOTS; i ++)
+ do_walk (activity, i, root,
+ addr_extend (addr, i, THREAD_SLOTS_LOG2),
+ indent + 1, true, output_prefix);
+
+ return;
+
+ case cap_messenger:
+ /* rmessenger's don't expose their capability slots. */
+ if (addr_depth (addr) + VG_MESSENGER_SLOTS_LOG2 > ADDR_BITS)
+ return;
+
+ for (i = 0; i < VG_MESSENGER_SLOTS; i ++)
+ do_walk (activity, i, root,
+ addr_extend (addr, i, VG_MESSENGER_SLOTS_LOG2),
+ indent + 1, true, output_prefix);
+
+ return;
+
default:
return;
}
diff --git a/libhurd-mm/as-lookup.c b/libhurd-mm/as-lookup.c
index 0a270ab..62343dd 100644
--- a/libhurd-mm/as-lookup.c
+++ b/libhurd-mm/as-lookup.c
@@ -62,6 +62,8 @@ as_lookup_rel_internal (activity_t activity,
enum as_lookup_mode mode, union as_lookup_ret *rt,
bool dump)
{
+ assert (root);
+
struct cap *start = root;
#ifndef NDEBUG
@@ -99,7 +101,10 @@ as_lookup_rel_internal (activity_t activity,
remaining - CAP_GUARD_BITS (root))),
remaining);
- assert (CAP_TYPE_MIN <= root->type && root->type <= CAP_TYPE_MAX);
+ assertx (CAP_TYPE_MIN <= root->type && root->type <= CAP_TYPE_MAX,
+ "Cap at " ADDR_FMT " has type %d?! (" ADDR_FMT ")",
+ ADDR_PRINTF (addr_chop (address, remaining)), root->type,
+ ADDR_PRINTF (address));
if (root->type == cap_rcappage)
/* The page directory is read-only. Note the weakened access
@@ -240,6 +245,59 @@ as_lookup_rel_internal (activity_t activity,
break;
+ case cap_thread:
+ case cap_messenger:
+ /* Note: rmessengers don't expose their capability slots. */
+ {
+ /* Index the object. */
+ int bits;
+ switch (root->type)
+ {
+ case cap_thread:
+ bits = THREAD_SLOTS_LOG2;
+ break;
+
+ case cap_messenger:
+ bits = VG_MESSENGER_SLOTS_LOG2;
+ break;
+ }
+
+ if (remaining < bits)
+ {
+ debug (1, "Translating " ADDR_FMT "; not enough bits (%d) "
+ "to index %d-bit %s at " ADDR_FMT,
+ ADDR_PRINTF (address), remaining, bits,
+ cap_type_string (root->type),
+ ADDR_PRINTF (addr_chop (address, remaining)));
+ DUMP_OR_RET (false);
+ }
+
+ struct object *object = cap_to_object (activity, root);
+ if (! object)
+ {
+#ifdef RM_INTERN
+ debug (1, "Failed to get object with OID " OID_FMT,
+ OID_PRINTF (root->oid));
+ DUMP_OR_RET (false);
+#endif
+ return false;
+ }
+#ifdef RM_INTERN
+ assert (object_type (object) == root->type);
+#endif
+
+ int offset = extract_bits64_inv (addr, remaining - 1, bits);
+ assert (0 <= offset && offset < (1 << bits));
+ remaining -= bits;
+
+ if (dump_path)
+ debug (0, "Indexing %s: %d/%d (%d)",
+ cap_type_string (root->type), offset, bits, remaining);
+
+ root = &object->caps[offset];
+ break;
+ }
+
default:
/* We designate a non-address bit translating object but we
have no bits left to translate. This is not an unusual
diff --git a/libhurd-mm/as.c b/libhurd-mm/as.c
index f8b5d25..b7643ab 100644
--- a/libhurd-mm/as.c
+++ b/libhurd-mm/as.c
@@ -20,6 +20,7 @@
#include "as.h"
#include "storage.h"
+#include <hurd/rm.h>
#include <pthread.h>
#include <hurd/folio.h>
@@ -506,10 +507,12 @@ as_init (void)
err = rm_cap_read (meta_data_activity, ADDR_VOID, addr,
&type, &properties);
assert (! err);
+ if (! cap_types_compatible (type, desc->type))
+ rm_as_dump (ADDR_VOID, ADDR_VOID);
assertx (cap_types_compatible (type, desc->type),
- "%s != %s",
- cap_type_string (type),
- cap_type_string (desc->type));
+ "Object at " ADDR_FMT ": %s != %s",
+ ADDR_PRINTF (addr),
+ cap_type_string (type), cap_type_string (desc->type));
int gbits = CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans);
addr_t slot_addr = addr_chop (addr, gbits);
@@ -797,6 +800,12 @@ as_walk (int (*visit) (addr_t addr,
case cap_folio:
slots_log2 = FOLIO_OBJECTS_LOG2;
break;
+ case cap_thread:
+ slots_log2 = THREAD_SLOTS_LOG2;
+ break;
+ case cap_messenger:
+ slots_log2 = VG_MESSENGER_SLOTS_LOG2;
+ break;
default:
assert (0 == 1);
break;
diff --git a/libhurd-mm/as.h b/libhurd-mm/as.h
index 0d4d48b..4b0a448 100644
--- a/libhurd-mm/as.h
+++ b/libhurd-mm/as.h
@@ -92,6 +92,9 @@ as_lock_ensure_stack (int amount)
space[i] = 0;
}
+/* The amount of stack space that needs to be available to avoid
+ faulting. */
+#define AS_STACK_SPACE (8 * PAGESIZE)
/* Address space lock. Should hold a read lock when accessing the
address space. Must hold a write lock when modifying the address
@@ -104,7 +107,7 @@ as_lock (void)
extern pthread_rwlock_t as_rwlock;
extern l4_thread_id_t as_rwlock_owner;
- as_lock_ensure_stack (EXCEPTION_STACK_SIZE - PAGESIZE);
+ as_lock_ensure_stack (AS_STACK_SPACE);
storage_check_reserve (false);
@@ -133,7 +136,7 @@ as_lock_readonly (void)
extern pthread_rwlock_t as_rwlock;
extern l4_thread_id_t as_rwlock_owner;
- as_lock_ensure_stack (EXCEPTION_STACK_SIZE - PAGESIZE);
+ as_lock_ensure_stack (AS_STACK_SPACE);
storage_check_reserve (false);
@@ -218,7 +221,8 @@ extern struct cap shadow_root;
&& (!!__acs_p.policy.discardable \
== !!(__acs_cap)->discardable))) \
die = true; \
- else if (__acs_p.addr_trans.raw != (__acs_cap)->addr_trans.raw) \
+ else if ((__acs_type == cap_cappage || __acs_type == cap_rcappage) \
+ && __acs_p.addr_trans.raw != (__acs_cap)->addr_trans.raw) \
die = true; \
\
if (die) \
@@ -596,7 +600,7 @@ as_cap_lookup (addr_t addr, enum cap_type type, bool *writable)
TYPE is the required type. If the type is incompatible
(cap_rcappage => cap_cappage and cap_rpage => cap_page), bails. If
TYPE is -1, then any type is acceptable. May cause paging. If
- non-NULL, returns whether the slot is writable in *WRITABLE.
+ non-NULL, returns whether the object is writable in *WRITABLE.
This function locks (and unlocks) as_lock. */
static inline struct cap
diff --git a/libhurd-mm/exceptions.c b/libhurd-mm/exceptions.c
index 6827f71..26ad7c7 100644
--- a/libhurd-mm/exceptions.c
+++ b/libhurd-mm/exceptions.c
@@ -22,70 +22,192 @@
#include <hurd/stddef.h>
#include <hurd/exceptions.h>
#include <hurd/storage.h>
-#include <hurd/slab.h>
#include <hurd/thread.h>
+#include <hurd/mm.h>
+#include <hurd/rm.h>
+#include <hurd/slab.h>
#include <l4/thread.h>
#include <signal.h>
#include <string.h>
+#include <backtrace.h>
#include "map.h"
#include "as.h"
+void
+hurd_fault_catcher_register (struct hurd_fault_catcher *catcher)
+{
+ struct vg_utcb *utcb = hurd_utcb ();
+ assert (utcb);
+ assert (catcher);
+
+ catcher->magic = HURD_FAULT_CATCHER_MAGIC;
+
+ catcher->next = utcb->catchers;
+ catcher->prevp = &utcb->catchers;
+
+ utcb->catchers = catcher;
+ if (catcher->next)
+ catcher->next->prevp = &catcher->next;
+}
+
+void
+hurd_fault_catcher_unregister (struct hurd_fault_catcher *catcher)
+{
+ assertx (catcher->magic == HURD_FAULT_CATCHER_MAGIC,
+ "%p", (void *) catcher->magic);
+ catcher->magic = ~HURD_FAULT_CATCHER_MAGIC;
+
+ *catcher->prevp = catcher->next;
+ if (catcher->next)
+ catcher->next->prevp = catcher->prevp;
+}
+
extern struct hurd_startup_data *__hurd_startup_data;
+void
+hurd_activation_frame_longjmp (struct activation_frame *activation_frame,
+ jmp_buf buf, bool set_ret, int ret)
+{
+#ifdef i386
+ /* XXX: Hack! Hack! This is customized for the newlib version!!!
+
+ From newlib/newlib/libc/machine/i386/setjmp.S
+
+ jmp_buf:
+ eax ebx ecx edx esi edi ebp esp eip
+ 0 4 8 12 16 20 24 28 32
+ */
+ /* A cheap check to try and ensure we are using a newlib data
+ structure. */
+ assert (sizeof (jmp_buf) == sizeof (uintptr_t) * 9);
+
+ uintptr_t *regs = (uintptr_t *) buf;
+ activation_frame->eax = *(regs ++);
+ activation_frame->ebx = *(regs ++);
+ activation_frame->ecx = *(regs ++);
+ activation_frame->edx = *(regs ++);
+ activation_frame->esi = *(regs ++);
+ activation_frame->edi = *(regs ++);
+ activation_frame->ebp = *(regs ++);
+ activation_frame->esp = *(regs ++);
+ activation_frame->eip = *(regs ++);
+
+ /* The return value is stored in eax. */
+ if (set_ret)
+ activation_frame->eax = ret;
+
+#else
+# warning Not ported to this architecture
+#endif
+}
+
static void
-utcb_state_save (struct exception_frame *exception_frame)
+l4_utcb_state_save (struct activation_frame *activation_frame)
{
- l4_word_t *utcb = _L4_utcb ();
-
- exception_frame->saved_sender = utcb[_L4_UTCB_SENDER];
- exception_frame->saved_receiver = utcb[_L4_UTCB_RECEIVER];
- exception_frame->saved_timeout = utcb[_L4_UTCB_TIMEOUT];
- exception_frame->saved_error_code = utcb[_L4_UTCB_ERROR_CODE];
- exception_frame->saved_flags = utcb[_L4_UTCB_FLAGS];
- exception_frame->saved_br0 = utcb[_L4_UTCB_BR0];
- memcpy (&exception_frame->saved_message,
- utcb, L4_NUM_MRS * sizeof (l4_word_t));
+ uintptr_t *utcb = _L4_utcb ();
+
+ activation_frame->saved_sender = utcb[_L4_UTCB_SENDER];
+ activation_frame->saved_receiver = utcb[_L4_UTCB_RECEIVER];
+ activation_frame->saved_timeout = utcb[_L4_UTCB_TIMEOUT];
+ activation_frame->saved_error_code = utcb[_L4_UTCB_ERROR_CODE];
+ activation_frame->saved_flags = utcb[_L4_UTCB_FLAGS];
+ activation_frame->saved_br0 = utcb[_L4_UTCB_BR0];
+ memcpy (&activation_frame->saved_message,
+ &utcb[_L4_UTCB_MR0], L4_NUM_MRS * sizeof (uintptr_t));
}
static void
-utcb_state_restore (struct exception_frame *exception_frame)
+l4_utcb_state_restore (struct activation_frame *activation_frame)
+{
+ uintptr_t *utcb = _L4_utcb ();
+
+ utcb[_L4_UTCB_SENDER] = activation_frame->saved_sender;
+ utcb[_L4_UTCB_RECEIVER] = activation_frame->saved_receiver;
+ utcb[_L4_UTCB_TIMEOUT] = activation_frame->saved_timeout;
+ utcb[_L4_UTCB_ERROR_CODE] = activation_frame->saved_error_code;
+ utcb[_L4_UTCB_FLAGS] = activation_frame->saved_flags;
+ utcb[_L4_UTCB_BR0] = activation_frame->saved_br0;
+ memcpy (&utcb[_L4_UTCB_MR0], &activation_frame->saved_message,
+ L4_NUM_MRS * sizeof (uintptr_t));
+}
+
+/* Fetch any pending activation. */
+void
+hurd_activation_fetch (void)
{
- l4_word_t *utcb = _L4_utcb ();
-
- utcb[_L4_UTCB_SENDER] = exception_frame->saved_sender;
- utcb[_L4_UTCB_RECEIVER] = exception_frame->saved_receiver;
- utcb[_L4_UTCB_TIMEOUT] = exception_frame->saved_timeout;
- utcb[_L4_UTCB_ERROR_CODE] = exception_frame->saved_error_code;
- utcb[_L4_UTCB_FLAGS] = exception_frame->saved_flags;
- utcb[_L4_UTCB_BR0] = exception_frame->saved_br0;
- memcpy (utcb, &exception_frame->saved_message,
- L4_NUM_MRS * sizeof (l4_word_t));
+ debug (0, DEBUG_BOLD ("XXX"));
+
+ /* Any reply will come in the form of a pending activation being
+ delivered. This RPC does not generate a response. */
+ error_t err = rm_thread_activation_collect_send (ADDR_VOID, ADDR_VOID,
+ ADDR_VOID);
+ if (err)
+ panic ("Sending thread_activation_collect failed: %d", err);
}
-static struct hurd_slab_space exception_frame_slab;
+void
+hurd_activation_message_register (struct hurd_message_buffer *message_buffer)
+{
+ if (unlikely (! mm_init_done))
+ return;
+
+ struct vg_utcb *utcb = hurd_utcb ();
+ assert (utcb);
+ assert (message_buffer);
+
+ debug (5, "Registering %p (utcb: %p)", message_buffer, utcb);
+
+ if (utcb->extant_message)
+ panic ("Already have an extant message buffer!");
+
+ utcb->extant_message = message_buffer;
+ message_buffer->just_free = false;
+ message_buffer->closure = NULL;
+}
+
+void
+hurd_activation_message_unregister (struct hurd_message_buffer *message_buffer)
+{
+ if (unlikely (! mm_init_done))
+ return;
+
+ struct vg_utcb *utcb = hurd_utcb ();
+ assert (utcb);
+ assert (message_buffer);
+ assert (utcb->extant_message == message_buffer);
+ utcb->extant_message = NULL;
+}
+
+/* Message buffers contain an activation frame. Exceptions reuse
+ message buffers and can be nested. To avoid squashing the
+ activation frame, we need to allocate */
+
+static error_t activation_frame_slab_alloc (void *, size_t, void **);
+static error_t activation_frame_slab_dealloc (void *, void *, size_t);
+
+static struct hurd_slab_space activation_frame_slab
+ = HURD_SLAB_SPACE_INITIALIZER (struct activation_frame,
+ activation_frame_slab_alloc,
+ activation_frame_slab_dealloc,
+ NULL, NULL, NULL);
static error_t
-exception_frame_slab_alloc (void *hook, size_t size, void **ptr)
+activation_frame_slab_alloc (void *hook, size_t size, void **ptr)
{
assert (size == PAGESIZE);
- struct exception_frame frame;
- utcb_state_save (&frame);
-
struct storage storage = storage_alloc (meta_data_activity,
cap_page, STORAGE_EPHEMERAL,
OBJECT_POLICY_DEFAULT, ADDR_VOID);
*ptr = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
- utcb_state_restore (&frame);
-
return 0;
}
static error_t
-exception_frame_slab_dealloc (void *hook, void *buffer, size_t size)
+activation_frame_slab_dealloc (void *hook, void *buffer, size_t size)
{
assert (size == PAGESIZE);
@@ -94,345 +216,704 @@ exception_frame_slab_dealloc (void *hook, void *buffer, size_t size)
return 0;
}
-
-static struct exception_frame *
-exception_frame_alloc (struct exception_page *exception_page)
+
+static void
+check_activation_frame_reserve (struct vg_utcb *utcb)
{
- struct exception_frame *exception_frame;
+ if (unlikely (! utcb->activation_stack
+ || ! utcb->activation_stack->prev))
+ /* There are no activation frames in reserve. Allocate one. */
+ {
+ void *buffer;
+ error_t err = hurd_slab_alloc (&activation_frame_slab, &buffer);
+ if (err)
+ panic ("Out of memory!");
+
+ struct activation_frame *activation_frame = buffer;
+ activation_frame->canary = ACTIVATION_FRAME_CANARY;
+
+ activation_frame->prev = NULL;
+ activation_frame->next = utcb->activation_stack;
+ if (activation_frame->next)
+ activation_frame->next->prev = activation_frame;
+
+ if (! utcb->activation_stack_bottom)
+ /* This is the first frame we've allocated. */
+ utcb->activation_stack_bottom = activation_frame;
+ }
+}
+
+static struct activation_frame *
+activation_frame_alloc (struct vg_utcb *utcb)
+{
+ struct activation_frame *activation_frame;
- if (! exception_page->exception_stack
- && exception_page->exception_stack_bottom)
+ if (! utcb->activation_stack
+ && utcb->activation_stack_bottom)
/* The stack is empty but we have an available frame. */
{
- exception_frame = exception_page->exception_stack_bottom;
- exception_page->exception_stack = exception_frame;
+ activation_frame = utcb->activation_stack_bottom;
+ utcb->activation_stack = activation_frame;
}
- else if (exception_page->exception_stack
- && exception_page->exception_stack->prev)
+ else if (utcb->activation_stack
+ && utcb->activation_stack->prev)
/* The stack is not empty and we have an available frame. */
{
- exception_frame = exception_page->exception_stack->prev;
- exception_page->exception_stack = exception_frame;
+ activation_frame = utcb->activation_stack->prev;
+ utcb->activation_stack = activation_frame;
}
else
/* We do not have an available frame. */
- {
- void *buffer;
- error_t err = hurd_slab_alloc (&exception_frame_slab, &buffer);
- if (err)
- panic ("Out of memory!");
-
- exception_frame = buffer;
+ panic ("Activation frame reserve is empty.");
- exception_frame->prev = NULL;
- exception_frame->next = exception_page->exception_stack;
- if (exception_frame->next)
- exception_frame->next->prev = exception_frame;
+ return activation_frame;
+}
+
+void
+hurd_activation_stack_dump (void)
+{
+ struct vg_utcb *utcb = hurd_utcb ();
- exception_page->exception_stack = exception_frame;
+ int depth = 0;
+ struct activation_frame *activation_frame;
+ for (activation_frame = utcb->activation_stack;
+ activation_frame;
+ activation_frame = activation_frame->next)
+ {
+ depth ++;
+ debug (0, "%d (%p): ip: %p, sp: %p, eax: %p, ebx: %p, ecx: %p, "
+ "edx: %p, edi: %p, esi: %p, ebp: %p, eflags: %p",
+ depth, activation_frame,
+ (void *) activation_frame->eip,
+ (void *) activation_frame->esp,
+ (void *) activation_frame->eax,
+ (void *) activation_frame->ebx,
+ (void *) activation_frame->ecx,
+ (void *) activation_frame->edx,
+ (void *) activation_frame->edi,
+ (void *) activation_frame->esi,
+ (void *) activation_frame->ebp,
+ (void *) activation_frame->eflags);
- if (! exception_page->exception_stack_bottom)
- /* This is the first frame we've allocated. */
- exception_page->exception_stack_bottom = exception_frame;
}
-
- return exception_frame;
}
-/* Fetch an exception. */
void
-exception_fetch_exception (void)
+hurd_activation_handler_normal (struct activation_frame *activation_frame,
+ struct vg_utcb *utcb)
{
- l4_msg_t msg;
- rm_exception_collect_send_marshal (&msg, ADDR_VOID);
- l4_msg_load (msg);
-
- l4_thread_id_t from;
- l4_msg_tag_t msg_tag = l4_reply_wait (__hurd_startup_data->rm, &from);
- if (l4_ipc_failed (msg_tag))
- panic ("Receiving message failed: %u", (l4_error_code () >> 1) & 0x7);
-}
+ assert (utcb == hurd_utcb ());
+ assert (activation_frame->canary == ACTIVATION_FRAME_CANARY);
+ assert (utcb->activation_stack == activation_frame);
-/* XXX: Before returning from either exception_handler_normal or
- exception_handler_activated, we need to examine the thread's
- control state and if the IPC was interrupt, set the error code
- appropriately. This also requires changing all invocations of IPCs
- to loop on interrupt. Currently, this is not a problem as the only
- exception that we get is a page fault, which can only occur when
- the thread is not in an IPC. (Sure, there are string buffers, but
- we don't use them.) */
+ do_debug (4)
+ {
+ static int calls;
+ int call = ++ calls;
-void
-exception_handler_normal (struct exception_frame *exception_frame)
-{
- debug (5, "Exception handler called (0x%x.%x, exception_frame: %p, "
- "next: %p)",
- l4_thread_no (l4_myself ()), l4_version (l4_myself ()),
- exception_frame, exception_frame->next);
+ int depth = 0;
+ struct activation_frame *af;
+ for (af = utcb->activation_stack; af; af = af->next)
+ depth ++;
- l4_msg_t *msg = &exception_frame->exception;
+ debug (0, "Activation (%d; %d nested) (frame: %p, next: %p)",
+ call, depth, activation_frame, activation_frame->next);
+ hurd_activation_stack_dump ();
+ }
+
+ struct hurd_message_buffer *mb = activation_frame->message_buffer;
+ assert (mb->magic == HURD_MESSAGE_BUFFER_MAGIC);
- l4_msg_tag_t msg_tag = l4_msg_msg_tag (*msg);
- l4_word_t label;
- label = l4_label (msg_tag);
+ check_activation_frame_reserve (utcb);
- switch (label)
+ if (mb->closure)
{
- case EXCEPTION_fault:
- {
- addr_t fault;
- uintptr_t ip;
- uintptr_t sp;
- struct exception_info info;
-
- error_t err;
- err = exception_fault_send_unmarshal (msg, &fault, &sp, &ip, &info);
- if (err)
- panic ("Failed to unmarshal exception: %d", err);
-
- bool r = map_fault (fault, ip, info);
- if (! r)
+ debug (5, "Executing closure %p", mb->closure);
+ mb->closure (mb);
+ }
+ else
+ {
+ debug (5, "Exception");
+
+ assert (mb == utcb->exception_buffer);
+
+ uintptr_t label = vg_message_word (mb->reply, 0);
+ switch (label)
+ {
+ case ACTIVATION_fault:
{
- debug (0, "SIGSEGV at " ADDR_FMT " (ip: %p, sp: %p, eax: %p, "
- "ebx: %p, ecx: %p, edx: %p, edi: %p, esi: %p, "
+ addr_t fault;
+ uintptr_t ip;
+ uintptr_t sp;
+ struct activation_fault_info info;
+
+ error_t err;
+ err = activation_fault_send_unmarshal (mb->reply,
+ &fault, &sp, &ip, &info,
+ NULL);
+ if (err)
+ panic ("Failed to unmarshal exception: %d", err);
+
+ debug (5, "Fault at " ADDR_FMT " (ip: %p, sp: %p, eax: %p, "
+ "ebx: %p, ecx: %p, edx: %p, edi: %p, esi: %p, ebp: %p, "
"eflags: %p)",
- ADDR_PRINTF (fault), ip, sp,
- exception_frame->regs[0],
- exception_frame->regs[5],
- exception_frame->regs[1],
- exception_frame->regs[2],
- exception_frame->regs[6],
- exception_frame->regs[7],
- exception_frame->regs[3]);
-
- extern int backtrace (void **array, int size);
-
- void *a[20];
- int count = backtrace (a, sizeof (a) / sizeof (a[0]));
- int i;
- s_printf ("Backtrace: ");
- for (i = 0; i < count; i ++)
- s_printf ("%p ", a[i]);
- s_printf ("\n");
-
- siginfo_t si;
- memset (&si, 0, sizeof (si));
- si.si_signo = SIGSEGV;
- si.si_addr = ADDR_TO_PTR (fault);
-
- /* XXX: Should set si.si_code to SEGV_MAPERR or
- SEGV_ACCERR. */
-
- pthread_kill_siginfo_np (pthread_self (), si);
- }
+ ADDR_PRINTF (fault),
+ (void *) ip, (void *) sp,
+ (void *) activation_frame->eax,
+ (void *) activation_frame->ebx,
+ (void *) activation_frame->ecx,
+ (void *) activation_frame->edx,
+ (void *) activation_frame->edi,
+ (void *) activation_frame->esi,
+ (void *) activation_frame->ebp,
+ (void *) activation_frame->eflags);
+
+ extern l4_thread_id_t as_rwlock_owner;
+
+ bool r = false;
+ if (likely (as_rwlock_owner != l4_myself ()))
+ r = map_fault (fault, ip, info);
+ if (! r)
+ {
+ uintptr_t f = (uintptr_t) ADDR_TO_PTR (fault);
+ struct hurd_fault_catcher *catcher;
+ for (catcher = utcb->catchers; catcher; catcher = catcher->next)
+ {
+ assertx (catcher->magic == HURD_FAULT_CATCHER_MAGIC,
+ "Catcher %p has bad magic: %p",
+ catcher, (void *) catcher->magic);
+
+ if (catcher->start <= f
+ && f <= catcher->start + catcher->len - 1)
+ {
+ debug (5, "Catcher caught fault at %p! (callback: %p)",
+ (void *) f, catcher->callback);
+ if (catcher->callback (activation_frame, f))
+ /* The callback claims that we can continue. */
+ break;
+ }
+ else
+ debug (5, "Catcher %p-%p does not cover fault %p",
+ (void *) catcher->start,
+ (void *) catcher->start + catcher->len - 1,
+ (void *) f);
+ }
+
+ if (! catcher)
+ {
+ if (as_rwlock_owner == l4_myself ())
+ debug (0, "I hold as_rwlock!");
+
+ debug (0, "SIGSEGV at " ADDR_FMT " "
+ "(ip: %p, sp: %p, eax: %p, ebx: %p, ecx: %p, "
+ "edx: %p, edi: %p, esi: %p, ebp: %p, eflags: %p)",
+ ADDR_PRINTF (fault),
+ (void *) ip, (void *) sp,
+ (void *) activation_frame->eax,
+ (void *) activation_frame->ebx,
+ (void *) activation_frame->ecx,
+ (void *) activation_frame->edx,
+ (void *) activation_frame->edi,
+ (void *) activation_frame->esi,
+ (void *) activation_frame->ebp,
+ (void *) activation_frame->eflags);
+
+ backtrace_print ();
+
+ siginfo_t si;
+ memset (&si, 0, sizeof (si));
+ si.si_signo = SIGSEGV;
+ si.si_addr = ADDR_TO_PTR (fault);
+
+ /* XXX: Should set si.si_code to SEGV_MAPERR or
+ SEGV_ACCERR. */
+
+ pthread_kill_siginfo_np (pthread_self (), si);
+ }
+ }
- break;
- }
+ break;
+ }
- default:
- panic ("Unknown message id: %d", label);
+ default:
+ panic ("Unknown message id: %d", label);
+ }
}
- utcb_state_restore (exception_frame);
+ if (activation_frame->normal_mode_stack == utcb->alternate_stack)
+ utcb->alternate_stack_inuse = false;
+
+ assert (utcb->canary0 == UTCB_CANARY0);
+ assert (utcb->canary1 == UTCB_CANARY1);
+
+ l4_utcb_state_restore (activation_frame);
}
#ifndef NDEBUG
-static l4_word_t
-crc (struct exception_page *exception_page)
+static uintptr_t
+crc (struct vg_utcb *utcb)
{
- l4_word_t crc = 0;
- l4_word_t *p;
- for (p = (l4_word_t *) exception_page; p < &exception_page->crc; p ++)
+ uintptr_t crc = 0;
+ uintptr_t *p;
+ for (p = (uintptr_t *) utcb; p < &utcb->crc; p ++)
crc += *p;
return crc;
}
#endif
-struct exception_frame *
-exception_handler_activated (struct exception_page *exception_page)
+struct activation_frame *
+hurd_activation_handler_activated (struct vg_utcb *utcb)
{
- /* We expect EXCEPTION_PAGE to be page aligned. */
- assert (((uintptr_t) exception_page & (PAGESIZE - 1)) == 0);
- assert (exception_page->activated_mode);
+ assert (((uintptr_t) utcb & (PAGESIZE - 1)) == 0);
+ assert (utcb->canary0 == UTCB_CANARY0);
+ assert (utcb->canary1 == UTCB_CANARY1);
+ assert (utcb->activated_mode);
+ /* XXX: Assumption that stack grows down... */
+ assert (utcb->activation_handler_sp - PAGESIZE <= (uintptr_t) &utcb);
+ assert ((uintptr_t) &utcb <= utcb->activation_handler_sp);
+
+ if (unlikely (! mm_init_done))
+ /* Just returns: during initialization, we don't except any faults or
+ asynchronous IPC. We do expect that IPC will be made but it will
+ always be made with VG_IPC_RETURN and as such just returning will
+ do the right thing. */
+ return NULL;
+
+ /* This comes after the mm_init_done check as when switching utcbs,
+ this may not be true. */
+ assertx (utcb == hurd_utcb (),
+ "%p != %p (func: %p; ip: %p, sp: %p)",
+ utcb, hurd_utcb (), hurd_utcb,
+ (void *) utcb->saved_ip, (void *) utcb->saved_sp);
+
+ debug (5, "Activation handler called (utcb: %p)", utcb);
+
+ struct hurd_message_buffer *mb
+ = (struct hurd_message_buffer *) (uintptr_t) utcb->messenger_id;
+
+ debug (5, "Got message %llx (utcb: %p)", utcb->messenger_id, utcb);
- /* Allocate an exception frame. */
- struct exception_frame *exception_frame
- = exception_frame_alloc (exception_page);
- utcb_state_save (exception_frame);
+ assert (mb->magic == HURD_MESSAGE_BUFFER_MAGIC);
- debug (5, "Exception handler called (exception_page: %p)",
- exception_page);
+ struct activation_frame *activation_frame = activation_frame_alloc (utcb);
+ assert (activation_frame->canary == ACTIVATION_FRAME_CANARY);
+
+ l4_utcb_state_save (activation_frame);
+
+ activation_frame->message_buffer = mb;
#ifndef NDEBUG
- exception_page->crc = crc (exception_page);
+ utcb->crc = crc (utcb);
#endif
- l4_msg_t *msg = &exception_page->exception;
+ /* Whether we need to process the activation in normal mode. */
+ bool trampoline = true;
- l4_msg_tag_t msg_tag = l4_msg_msg_tag (*msg);
- l4_word_t label;
- label = l4_label (msg_tag);
+ if (mb == utcb->extant_message)
+ /* The extant IPC reply. Just return, everything is in place. */
+ {
+#ifndef NDEBUG
+ do_debug (0)
+ {
+ int label = 0;
+ if (vg_message_data_count (mb->request) >= sizeof (uintptr_t))
+ label = vg_message_word (mb->request, 0);
+ error_t err = -1;
+ if (vg_message_data_count (mb->reply) >= sizeof (uintptr_t))
+ err = vg_message_word (mb->reply, 0);
+
+ debug (5, "Extant RPC: %s (%d) -> %d",
+ rm_method_id_string (label), label, err);
+ }
+#endif
- switch (label)
+ utcb->extant_message = NULL;
+ trampoline = false;
+ }
+ else if (mb->closure)
{
- case EXCEPTION_fault:
- {
- addr_t fault;
- uintptr_t ip;
- uintptr_t sp;
- struct exception_info info;
-
- error_t err;
- err = exception_fault_send_unmarshal (msg, &fault, &sp, &ip, &info);
- if (err)
- panic ("Failed to unmarshal exception: %d", err);
-
- /* XXX: We assume that the stack grows down here. */
- uintptr_t f = (uintptr_t) ADDR_TO_PTR (fault);
- if (sp - PAGESIZE <= f && f <= sp + PAGESIZE * 4)
- /* The fault occurs within four pages of the stack pointer.
- It has got to be a stack fault. Handle it here. */
+ debug (5, "Closure");
+ }
+ else if (mb == utcb->exception_buffer)
+ /* It's an exception. Process it. */
+ {
+ debug (5, "Exception");
+
+ uintptr_t label = vg_message_word (mb->reply, 0);
+ switch (label)
+ {
+ case ACTIVATION_fault:
{
- debug (5, "Handling fault at " ADDR_FMT " in activated mode "
- "(ip: %x, sp: %x).",
+ addr_t fault;
+ uintptr_t ip;
+ uintptr_t sp;
+ struct activation_fault_info info;
+
+ error_t err;
+ err = activation_fault_send_unmarshal (mb->reply,
+ &fault, &sp, &ip, &info,
+ NULL);
+ if (err)
+ panic ("Failed to unmarshal exception: %d", err);
+
+ debug (4, "Fault at " ADDR_FMT "(ip: %x, sp: %x).",
ADDR_PRINTF (fault), ip, sp);
- bool r = map_fault (fault, ip, info);
- if (! r)
+ uintptr_t f = (uintptr_t) ADDR_TO_PTR (fault);
+ uintptr_t stack_page = (sp & ~(PAGESIZE - 1));
+ uintptr_t fault_page = (f & ~(PAGESIZE - 1));
+ if (stack_page == fault_page
+ || stack_page - PAGESIZE == fault_page)
+ /* The fault on the same page as the stack pointer or
+ the following page. It is likely a stack fault.
+ Handle it using the alternate stack. */
{
- debug (0, "SIGSEGV at " ADDR_FMT " (ip: %p, sp: %p, eax: %p, "
- "ebx: %p, ecx: %p, edx: %p, edi: %p, esi: %p, "
- "eflags: %p)",
- ADDR_PRINTF (fault), ip, sp,
- exception_frame->regs[0],
- exception_frame->regs[5],
- exception_frame->regs[1],
- exception_frame->regs[2],
- exception_frame->regs[6],
- exception_frame->regs[7],
- exception_frame->regs[3]);
-
- siginfo_t si;
- memset (&si, 0, sizeof (si));
- si.si_signo = SIGSEGV;
- si.si_addr = ADDR_TO_PTR (fault);
-
- /* XXX: Should set si.si_code to SEGV_MAPERR or
- SEGV_ACCERR. */
-
- pthread_kill_siginfo_np (pthread_self (), si);
- }
- assert (exception_page->crc == crc (exception_page));
+ debug (5, "Stack fault at " ADDR_FMT "(ip: %x, sp: %x).",
+ ADDR_PRINTF (fault), ip, sp);
- utcb_state_restore (exception_frame);
+ assert (! utcb->alternate_stack_inuse);
+ utcb->alternate_stack_inuse = true;
- assert (exception_page->crc == crc (exception_page));
- assertx (exception_page->exception_stack == exception_frame,
- "%p != %p",
- exception_page->exception_stack, exception_frame);
+ assert (utcb->alternate_stack);
- exception_page->exception_stack
- = exception_page->exception_stack->next;
- return NULL;
+ activation_frame->normal_mode_stack = utcb->alternate_stack;
+ }
+
+ debug (5, "Handling fault at " ADDR_FMT " in normal mode "
+ "(ip: %x, sp: %x).",
+ ADDR_PRINTF (fault), ip, sp);
+
+ break;
}
- debug (5, "Handling fault at " ADDR_FMT " in normal mode "
- "(ip: %x, sp: %x).",
- ADDR_PRINTF (fault), ip, sp);
+ default:
+ panic ("Unknown message id: %d", label);
+ }
+
+ /* Unblock the exception handler messenger. */
+ error_t err = vg_ipc (VG_IPC_RECEIVE | VG_IPC_RECEIVE_ACTIVATE
+ | VG_IPC_RETURN,
+ ADDR_VOID, utcb->exception_buffer->receiver,
+ ADDR_VOID,
+ ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID);
+ assert (! err);
+ }
+ else if (mb->just_free)
+ {
+ debug (5, "Just freeing");
+ hurd_message_buffer_free (mb);
+ trampoline = false;
+ }
+ else
+ {
+ panic ("Unknown messenger %llx (extant: %p; exception: %p) (label: %d)",
+ utcb->messenger_id,
+ utcb->extant_message, utcb->exception_buffer,
+ vg_message_word (mb->reply, 0));
+ }
+
+ /* Assert that the utcb has not been modified. */
+ assert (utcb->crc == crc (utcb));
+
+ if (! trampoline)
+ {
+ debug (5, "Direct return");
+
+ assert (utcb->activation_stack == activation_frame);
+ utcb->activation_stack = utcb->activation_stack->next;
- break;
- }
+ l4_utcb_state_restore (activation_frame);
- default:
- panic ("Unknown message id: %d", label);
+ activation_frame = NULL;
+ }
+ else
+ {
+ debug (5, "Continuing in normal mode");
+ l4_utcb_state_restore (activation_frame);
}
- /* Handle the fault in normal mode. */
+ assert (utcb->canary0 == UTCB_CANARY0);
+ assert (utcb->canary1 == UTCB_CANARY1);
- /* Copy the relevant bits. */
- memcpy (&exception_frame->exception, msg,
- (1 + l4_untyped_words (msg_tag)) * sizeof (l4_word_t));
+ return activation_frame;
+}
+
+static char activation_handler_area0[PAGESIZE]
+ __attribute__ ((aligned (PAGESIZE)));
+static char activation_handler_msg[PAGESIZE]
+ __attribute__ ((aligned (PAGESIZE)));
+static struct vg_utcb *initial_utcb = (void *) &activation_handler_area0[0];
+
+static struct vg_utcb *
+simple_utcb_fetcher (void)
+{
+ assert (initial_utcb->canary0 == UTCB_CANARY0);
+ assert (initial_utcb->canary1 == UTCB_CANARY1);
- assert (exception_page->crc == crc (exception_page));
- return exception_frame;
+ return initial_utcb;
}
+struct vg_utcb *(*hurd_utcb) (void);
+
void
-exception_handler_init (void)
+hurd_activation_handler_init_early (void)
{
- error_t err = hurd_slab_init (&exception_frame_slab,
- sizeof (struct exception_frame), 0,
- exception_frame_slab_alloc,
- exception_frame_slab_dealloc,
- NULL, NULL, NULL);
- assert (! err);
+ initial_utcb->canary0 = UTCB_CANARY0;
+ initial_utcb->canary1 = UTCB_CANARY1;
- extern struct hurd_startup_data *__hurd_startup_data;
+ hurd_utcb = simple_utcb_fetcher;
- /* We use the start of the area (lowest address) as the exception page. */
- addr_t stack_area = as_alloc (EXCEPTION_STACK_SIZE_LOG2, 1, true);
- void *stack_area_base
- = ADDR_TO_PTR (addr_extend (stack_area, 0, EXCEPTION_STACK_SIZE_LOG2));
+ struct vg_utcb *utcb = hurd_utcb ();
+ assert (utcb == initial_utcb);
- debug (5, "Exception area: %x-%x",
- stack_area_base, stack_area_base + EXCEPTION_STACK_SIZE - 1);
+ /* XXX: We assume the stack grows down! SP is set to the end of the
+ exception page. */
+ utcb->activation_handler_sp
+ = (uintptr_t) activation_handler_area0 + sizeof (activation_handler_area0);
- void *page;
- for (page = stack_area_base;
- page < stack_area_base + EXCEPTION_STACK_SIZE;
- page += PAGESIZE)
- {
- addr_t slot = addr_chop (PTR_TO_ADDR (page), PAGESIZE_LOG2);
+ /* The word beyond the base of the stack is interpreted as a pointer
+ to the exception page. Make it so. */
+ utcb->activation_handler_sp -= sizeof (void *);
+ * (void **) utcb->activation_handler_sp = utcb;
- as_ensure (slot);
+ utcb->activation_handler_ip = (uintptr_t) &hurd_activation_handler_entry;
+ utcb->activation_handler_end = (uintptr_t) &hurd_activation_handler_end;
- struct storage storage;
- storage = storage_alloc (ADDR_VOID, cap_page,
- STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT,
- slot);
+ struct hurd_thread_exregs_in in;
+ memset (&in, 0, sizeof (in));
+
+ struct vg_message *msg = (void *) &activation_handler_msg[0];
+ rm_thread_exregs_send_marshal (msg, HURD_EXREGS_SET_UTCB, in,
+ ADDR_VOID, ADDR_VOID,
+ PTR_TO_PAGE (utcb), ADDR_VOID,
+ __hurd_startup_data->messengers[1]);
+
+ error_t err;
+ err = vg_ipc_full (VG_IPC_RECEIVE | VG_IPC_SEND | VG_IPC_RECEIVE_ACTIVATE
+ | VG_IPC_RECEIVE_SET_THREAD_TO_CALLER
+ | VG_IPC_RECEIVE_SET_ASROOT_TO_CALLERS
+ | VG_IPC_RECEIVE_INLINE
+ | VG_IPC_SEND_SET_THREAD_TO_CALLER
+ | VG_IPC_SEND_SET_ASROOT_TO_CALLERS,
+ ADDR_VOID,
+ __hurd_startup_data->messengers[1], ADDR_VOID, ADDR_VOID,
+ ADDR_VOID, __hurd_startup_data->thread,
+ __hurd_startup_data->messengers[0], PTR_TO_PAGE (msg),
+ 0, 0, ADDR_VOID);
+ if (err)
+ panic ("Failed to send IPC: %d", err);
+ if (utcb->inline_words[0])
+ panic ("Failed to install utcb page: %d", utcb->inline_words[0]);
+}
- if (ADDR_IS_VOID (storage.addr))
- panic ("Failed to allocate page for exception state");
- }
+void
+hurd_activation_handler_init (void)
+{
+ struct vg_utcb *utcb;
+ error_t err = hurd_activation_state_alloc (__hurd_startup_data->thread,
+ &utcb);
+ if (err)
+ panic ("Failed to allocate activation state: %d", err);
- struct exception_page *exception_page = stack_area_base;
+ assert (! initial_utcb->activation_stack);
- /* XXX: We assume the stack grows down! SP is set to the end of the
- exception page. */
- exception_page->exception_handler_sp
- = (uintptr_t) stack_area_base + EXCEPTION_STACK_SIZE;
+ initial_utcb = utcb;
+
+ debug (4, "initial_utcb (%p) is now: %p", &initial_utcb, initial_utcb);
+}
+
+/* The activation area is 16 pages large. It consists of the utch,
+ the activation stack and an alternate stack (which is needed to
+ handle stack faults). */
+#define ACTIVATION_AREA_SIZE_LOG2 (PAGESIZE_LOG2 + 4)
+#define ACTIVATION_AREA_SIZE (1 << ACTIVATION_AREA_SIZE_LOG2)
+
+error_t
+hurd_activation_state_alloc (addr_t thread, struct vg_utcb **utcbp)
+{
+ debug (5, DEBUG_BOLD ("allocating activation state for " ADDR_FMT),
+ ADDR_PRINTF (thread));
+
+ addr_t activation_area = as_alloc (ACTIVATION_AREA_SIZE_LOG2, 1, true);
+ void *activation_area_base
+ = ADDR_TO_PTR (addr_extend (activation_area,
+ 0, ACTIVATION_AREA_SIZE_LOG2));
+
+ debug (0, "Activation area: %p-%p",
+ activation_area_base, activation_area_base + ACTIVATION_AREA_SIZE);
+
+ int page_count = 0;
+ /* Be careful! We assume that pages is properly set up after at
+ most 2 allocations! */
+ addr_t pages_[2];
+ addr_t *pages = pages_;
+
+ void alloc (void *addr)
+ {
+ addr_t slot = addr_chop (PTR_TO_ADDR (addr), PAGESIZE_LOG2);
+
+ as_ensure (slot);
+
+ struct storage storage;
+ storage = storage_alloc (ADDR_VOID, cap_page,
+ STORAGE_LONG_LIVED,
+ OBJECT_POLICY_DEFAULT,
+ slot);
+
+ if (ADDR_IS_VOID (storage.addr))
+ panic ("Failed to allocate page for exception state");
+
+ if (pages == pages_)
+ assert (page_count < sizeof (pages_) / sizeof (pages_[0]));
+ pages[page_count ++] = storage.addr;
+ }
+
+ /* When NDEBUG is true, we leave some pages empty so that should
+ something overrun, we'll fault. */
+#ifndef NDEBUG
+#define SKIP 1
+#else
+#define SKIP 0
+#endif
+
+ int page = SKIP;
+
+ /* Allocate the utcb. */
+ struct vg_utcb *utcb = activation_area_base + page * PAGESIZE;
+ alloc (utcb);
+ page += 1 + SKIP;
+
+ /* And set up the small activation stack.
+ UTCB->ACTIVATION_HANDLER_SP is the base of the stack.
+
+ XXX: We assume the stack grows down! */
+#ifndef NDEBUG
+ /* Use a dedicated page. */
+ utcb->activation_handler_sp
+ = (uintptr_t) activation_area_base + page * PAGESIZE;
+ alloc ((void *) utcb->activation_handler_sp);
+
+ utcb->activation_handler_sp += PAGESIZE;
+ page += 1 + SKIP;
+#else
+ /* Use the end of the UTCB. */
+ utcb->activation_handler_sp = utcb + PAGESIZE;
+#endif
+
+ /* At the top of the stack page, we use some space to remember the
+ storage we allocate so that we can free it later. */
+ utcb->activation_handler_sp
+ -= sizeof (addr_t) * ACTIVATION_AREA_SIZE / PAGESIZE;
+ memset (utcb->activation_handler_sp, 0,
+ sizeof (addr_t) * ACTIVATION_AREA_SIZE / PAGESIZE);
+ memcpy (utcb->activation_handler_sp, pages, sizeof (addr_t) * page_count);
+ pages = (addr_t *) utcb->activation_handler_sp;
/* The word beyond the base of the stack is a pointer to the
exception page. */
- exception_page->exception_handler_sp -= sizeof (void *);
- * (void **) exception_page->exception_handler_sp = exception_page;
+ utcb->activation_handler_sp -= sizeof (void *);
+ * (void **) utcb->activation_handler_sp = utcb;
- exception_page->exception_handler_ip = (l4_word_t) &exception_handler_entry;
- exception_page->exception_handler_end = (l4_word_t) &exception_handler_end;
- struct hurd_thread_exregs_in in;
- in.exception_page = addr_chop (PTR_TO_ADDR (exception_page), PAGESIZE_LOG2);
+ /* And a medium-sized alternate stack. */
+ void *a;
+ for (a = activation_area_base + page * PAGESIZE;
+ a < activation_area_base + ACTIVATION_AREA_SIZE - SKIP * PAGESIZE;
+ a += PAGESIZE)
+ alloc (a);
+
+ assert (a - activation_area_base + page * PAGESIZE >= AS_STACK_SPACE);
+
+ /* XXX: We again assume that the stack grows down. */
+ utcb->alternate_stack = a;
+
+
+ utcb->activation_handler_ip = (uintptr_t) &hurd_activation_handler_entry;
+ utcb->activation_handler_end = (uintptr_t) &hurd_activation_handler_end;
+
+ utcb->exception_buffer = hurd_message_buffer_alloc_long ();
+ utcb->extant_message = NULL;
+
+ utcb->canary0 = UTCB_CANARY0;
+ utcb->canary1 = UTCB_CANARY1;
+
+ debug (5, "Activation area: %p-%p; utcb: %p; stack: %p; alt stack: %p",
+ (void *) activation_area_base,
+ (void *) activation_area_base + ACTIVATION_AREA_SIZE - 1,
+ utcb, (void *) utcb->activation_handler_sp, utcb->alternate_stack);
+
+
+ /* Unblock the exception handler messenger. */
+ error_t err = vg_ipc (VG_IPC_RECEIVE | VG_IPC_RECEIVE_ACTIVATE
+ | VG_IPC_RETURN,
+ ADDR_VOID, utcb->exception_buffer->receiver, ADDR_VOID,
+ ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID);
+ assert (! err);
+
+ *utcbp = utcb;
+
+ struct hurd_thread_exregs_in in;
struct hurd_thread_exregs_out out;
- err = rm_thread_exregs (ADDR_VOID, __hurd_startup_data->thread,
- HURD_EXREGS_SET_EXCEPTION_PAGE,
- in, &out);
+
+ err = rm_thread_exregs (ADDR_VOID, thread,
+ HURD_EXREGS_SET_UTCB
+ | HURD_EXREGS_SET_EXCEPTION_MESSENGER,
+ in, ADDR_VOID, ADDR_VOID,
+ PTR_TO_PAGE (utcb), utcb->exception_buffer->receiver,
+ &out, NULL, NULL, NULL, NULL);
+ if (err)
+ panic ("Failed to install utcb");
+
+ err = rm_cap_copy (ADDR_VOID,
+ utcb->exception_buffer->receiver,
+ ADDR (VG_MESSENGER_THREAD_SLOT, VG_MESSENGER_SLOTS_LOG2),
+ ADDR_VOID, thread,
+ 0, CAP_PROPERTIES_DEFAULT);
if (err)
- panic ("Failed to install exception page");
+ panic ("Failed to set messenger's thread");
+
+ check_activation_frame_reserve (utcb);
+
+ return 0;
}
void
-exception_page_cleanup (struct exception_page *exception_page)
+hurd_activation_state_free (struct vg_utcb *utcb)
{
- struct exception_frame *f;
- struct exception_frame *prev = exception_page->exception_stack_bottom;
+ assert (utcb->canary0 == UTCB_CANARY0);
+ assert (utcb->canary1 == UTCB_CANARY1);
+ assert (! utcb->activation_stack);
+ /* Free any activation frames. */
+ struct activation_frame *f;
+ struct activation_frame *prev = utcb->activation_stack_bottom;
while ((f = prev))
{
prev = f->prev;
- hurd_slab_dealloc (&exception_frame_slab, f);
+ hurd_slab_dealloc (&activation_frame_slab, f);
}
-}
+ hurd_message_buffer_free (utcb->exception_buffer);
+
+ /* Free the allocated storage. */
+ /* Copy the array as we're going to free the storage that it is
+ in. */
+ addr_t pages[ACTIVATION_AREA_SIZE / PAGESIZE];
+ memcpy (pages,
+ utcb->activation_handler_sp + sizeof (uintptr_t),
+ sizeof (addr_t) * ACTIVATION_AREA_SIZE / PAGESIZE);
+
+ int i;
+ for (i = 0; i < sizeof (pages) / sizeof (pages[0]); i ++)
+ if (! ADDR_IS_VOID (pages[i]))
+ storage_free (pages[i], false);
+
+ /* Finally, free the address space. */
+ int page = SKIP;
+ void *activation_area_base = (void *) utcb - page * PAGESIZE;
+ as_free (addr_chop (PTR_TO_ADDR (activation_area_base),
+ ACTIVATION_AREA_SIZE_LOG2),
+ false);
+}
diff --git a/libhurd-mm/headers.m4 b/libhurd-mm/headers.m4
index d0157e4..8686d6f 100644
--- a/libhurd-mm/headers.m4
+++ b/libhurd-mm/headers.m4
@@ -18,6 +18,7 @@ AC_CONFIG_LINKS([
sysroot/include/hurd/map.h:libhurd-mm/map.h
sysroot/include/hurd/pager.h:libhurd-mm/pager.h
sysroot/include/hurd/anonymous.h:libhurd-mm/anonymous.h
+ sysroot/include/hurd/message-buffer.h:libhurd-mm/message-buffer.h
])
AC_CONFIG_COMMANDS_POST([
diff --git a/libhurd-mm/ia32-exception-entry.S b/libhurd-mm/ia32-exception-entry.S
index 47fbe83..cc9ac78 100644
--- a/libhurd-mm/ia32-exception-entry.S
+++ b/libhurd-mm/ia32-exception-entry.S
@@ -1,4 +1,4 @@
-/* ia32-exception-entry.S - Exception handler dispatcher.
+/* ia32-activation-entry.S - Activation handler dispatcher.
Copyright (C) 2007, 2008 Free Software Foundation, Inc.
Written by Neal H. Walfield <neal@gnu.org>.
@@ -22,124 +22,125 @@
.text
-/* Offsets into a struct exception_page. */
-#define MODE (0*4)
-#define SAVED_IP (1*4)
-#define SAVED_SP (2*4)
-#define SAVED_THREAD_STATE (3*4)
-
-#define EXCEPTION_STACK (4*4)
-
-/* Relative to one word beyond the bottom of the stack. */
-#define EXCEPTION_PAGE_PTR (-1*4)
-#define SAVED_EAX (-2*4)
-#define SAVED_ECX (-3*4)
-#define SAVED_FLAGS (-4*4)
-#define SAVED_EDX (-5*4)
-
-/* Offsets into a struct exception_fault. */
-#define EF_SAVED_EAX 0
-#define EF_SAVED_ECX 4
-#define EF_SAVED_EDX 8
-#define EF_SAVED_FLAGS 12
-#define EF_SAVED_IP 16
-#define EF_SAVED_EBX 20
-#define EF_SAVED_EDI 24
-#define EF_SAVED_ESI 28
-#define EF_NEXT 32
+/* Offsets into a struct vg_utcb. */
+#define UTCB_MODE (0*4)
+#define UTCB_SAVED_IP (1*4)
+#define UTCB_SAVED_SP (2*4)
+#define UTCB_SAVED_THREAD_STATE (3*4)
+#define UTCB_ACTIVATION_FRAME_STACK (4*4)
+/* The bits of the mode word. */
#define ACTIVATED_MODE_BIT 0
#define PENDING_MESSAGE_BIT 1
#define INTERRUPT_IN_TRANSITION_BIT 2
- /* Handle an exception. */
- .globl exception_handler_entry, _exception_handler_entry
-exception_handler_entry:
-_exception_handler_entry:
+/* Offsets into a struct activation_frame. */
+#define AF_SAVED_EAX 0
+#define AF_SAVED_ECX 4
+#define AF_SAVED_EDX 8
+#define AF_SAVED_FLAGS 12
+#define AF_SAVED_IP 16
+#define AF_SAVED_EBX 20
+#define AF_SAVED_EDI 24
+#define AF_SAVED_ESI 28
+#define AF_SAVED_EBP 32
+#define AF_SAVED_ESP 36
+#define AF_NORMAL_MODE_STACK 40
+#define AF_NEXT 44
+
+ /* Handle an activation. */
+ .globl hurd_activation_handler_entry, _hurd_activation_handler_entry
+hurd_activation_handler_entry:
+_hurd_activation_handler_entry:
/* How we will use the stack:
relative to entry sp
- | relative to sp after saving edx
+ | relative to sp after saving the register file.
| |
v v
- +0 +24 pointer to exception_page
- -4 +20 saved eax \
- -8 +16 saved ecx \ save
- -12 +12 saved flags / area
- -16 +8 saved edx /
- -20 +4 entry edx
- -24 +0 entry eflags
+ +0 +24 pointer to utcb
+ -4 +20 entry eflags
+ -8 +16 entry edx
+ -12 +12 saved eax \
+ -16 +8 saved ecx \ save
+ -20 +4 saved flags / area
+ -24 +0 saved edx /
*/
- /* Adjust the stack: our saved EAX, ECX, EDX and FLAGS may be
- there. */
-
- sub $16, %esp
-
- /* %ESP points to the top of the exception page. If the
- interrupt in transition flag is not set, then we need to save
- the caller-saved registers. Otherwise, we were interrupted
- while returning to normal mode and the the saved state, not
- our registers, reflects the real user state (see big comment
- below for more information). */
+ /* %ESP points to the top of the UTCB. If the interrupt in
+ transition flag is not set, then we need to save the
+ caller-saved registers. Otherwise, we were interrupted while
+ returning to normal mode and the the saved state, not our
+ registers, reflects the real user state (see big comment below
+ for more information). */
- pushl %edx
- /* Save the eflags before we do anything serious. */
+ /* Save the eflags before we do *anything*. */
pushf
- /* %EDX is now the only register which we can touch. Make it
- a pointer to the exception page. Recall: we stashed a pointer
- to the exception page at the word following the botton of the
- stack. */
- mov 24(%esp), %edx
-
- /* Now check if the interrupt in transition flag is set. */
- bt $INTERRUPT_IN_TRANSITION_BIT, MODE(%edx)
- jc after_save
-
- /* Nope; we need to save the current EAX, ECX and eflags. */
- mov %eax, 20(%esp)
- mov %ecx, 16(%esp)
+ /* We need to check if the interrupt in transition flag is
+ set. Free up a register and make it a pointer to the UTCB.
+ Recall: we stashed a pointer to the UTCB at the word following
+ the botton of the stack. */
+ pushl %edx
+ mov 8(%esp), %edx
+
+ bt $INTERRUPT_IN_TRANSITION_BIT, UTCB_MODE(%edx)
+ jc skip_save
+
+ /* Nope; we need to save the current EAX and ECX and copy the
+ entry eflags and EDX into the save area. */
+
+ pushl %eax
+ pushl %ecx
/* entry eflags. */
- popl %ecx
- mov %ecx, (12-4)(%esp)
+ mov 12(%esp), %eax
+ pushl %eax
/* entry edx. */
- popl %ecx
- mov %ecx, (8-4-4)(%esp)
+ mov 12(%esp), %eax
+ pushl %eax
+
jmp after_adjust
-after_save:
-
- /* Adjust the stack: we don't need our entry flags or entry edx. */
- add $8, %esp
+skip_save:
+ /* We don't save the entry registers but use the saved values.
+ Adjust the stack pointer to point to the start of the save area. */
+ sub $16, %esp
+
+after_adjust:
-after_adjust:
+ /* We are going to call the activation handler. According to
+ the i386 ABI:
- /* We are going to call the exception handler. But first save
- our pointer to the exception page on the stack. */
- pushl %edx
+ - caller saved registers are: eax, ecx, edx
+ - callee saved registers are: ebp, ebx, esi, edi
+
+ We've already saved the original eax, ecx and edx. The
+ function will preserve the rest.
- /* The exception handler function takes a single argument:
- the exception page. */
+ The only value we care about is our pointer to the UTCB (which
+ is in edx) and which we can save on the stack. */
+ pushl %edx
- /* Push the exception page. */
+ /* The activation handler function takes a single argument:
+ the UTCB. */
pushl %edx
/* Clear the direction flag as per the calling conventions. */
cld
- call exception_handler_activated
- /* The exception frame, if any, is in EAX. */
+ call hurd_activation_handler_activated
+ /* The activation frame, if any, is in EAX. */
/* Clear the arguments. */
add $4, %esp
- /* Restore exception page pointer. */
+ /* Restore UTCB pointer. */
popl %edx
- /* Check if there is an exception frame. */
+ /* Check if hurd_activation_handler_activated returned an
+ activation frame. */
test %eax, %eax
- jnz exception_frame_run
+ jnz activation_frame_run
- /* There is no exception frame, transition immediately back to
+ /* There is no activation frame, transition immediately back to
normal mode.
To return to normal mode, we need to restore the saved
@@ -160,156 +161,165 @@ after_adjust:
But this raises another problem: the IP and SP that the kernel
sees are not those that return us to user code. As this code
- relies on the exception stack, a nested stack will leave us in
+ relies on the activation stack, a nested stack will leave us in
an inconsistent state. (This can also happen if we receive a
message before returning to user code.) To avoid this, we
register our restore to normal mode function with the kernel.
If the kernel transitions us back to activated mode while the
EIP is in this range, then it does not save the EIP and ESP
- and invokes the exception handler with the
+ and invokes the activation handler with the
interrupt_in_transition flag set. */
/* Reset the activation bit. */
- and $(~1), MODE(%edx)
-
- /* Set EAX to one word beyond the bottom of the stack (i.e.,
- pointing at the pointer to the exception page. */
- add $PAGESIZE, %esp
- and $(~(PAGESIZE-1)), %esp
- mov %esp, %eax
+ and $(~1), UTCB_MODE(%edx)
/* Check for pending messages. This does not need to be
atomic as if we get interrupted here, we automatically
transition back to activated mode. */
- bt $PENDING_MESSAGE_BIT, MODE(%edx)
- jc process_pending
+ bt $PENDING_MESSAGE_BIT, UTCB_MODE(%edx)
+ jnc no_pending
+
+ /* There is a pending activation. Force its delivery. As we
+ are no longer in activated mode, either we'll be activated
+ with the interrupt-in-transition bit set (and thus never
+ return here) or we'll return. In the latter case, we just
+ resume execution. */
+
+ /* Save the UTCB. */
+ pushl %edx
+
+ cld
+ call hurd_activation_fetch
+
+ popl %edx
+
+no_pending:
+
+ /* Set EAX to the start of the save area. */
+ mov %esp, %eax
/* Restore the user stack. */
- mov SAVED_SP(%edx), %esp
+ mov UTCB_SAVED_SP(%edx), %esp
/* Copy the saved EIP and saved flags to the user stack. */
- mov SAVED_IP(%edx), %ecx
+ mov UTCB_SAVED_IP(%edx), %ecx
pushl %ecx
- mov SAVED_FLAGS(%eax), %ecx
+ mov 4(%eax), %ecx
pushl %ecx
/* Restore the general-purpose registers. */
- mov SAVED_EDX(%eax), %edx
- mov SAVED_ECX(%eax), %ecx
- mov SAVED_EAX(%eax), %eax
+ mov 0(%eax), %edx
+ mov 8(%eax), %ecx
+ mov 12(%eax), %eax
/* Restore the saved eflags. */
popf
/* And finally, the saved EIP and in doing so the saved ESP. */
ret
-process_pending:
- /* This code is called if after leaving activated mode, we
- detect a pending message. %EDX points to the exception page
- and eax one word beyond the bottom of the exception stack. */
-
- /* Set activated mode and interrupt in transition. */
- or $(1 | 4), MODE(%edx)
-
- /* Set the ESP to the top of the stack. */
- mov %eax, %esp
- add $4, %esp
-
- /* Get the pending exception. */
- call exception_fetch_exception
-
- jmp exception_handler_entry
-
-
-exception_frame_run:
- /* EAX contains the exception frame, EDX the exception page,
- and ESP points after the saved edx. */
+
+activation_frame_run:
+ /* EAX contains the activation frame, EDX the UTCB, and ESP
+ points to the save area. ECX has been saved in the save area. */
- /* Copy all relevant register state from the exception page
- and save area to the exception frame. We use edx as the
- intermediate. We can restore it from the exception stack
- (it's the word following the base). */
+ /* Copy all relevant register state from the UTCB
+ and save area to the activation frame. We use ecx as the
+ intermediate. */
- mov SAVED_IP(%edx), %edx
- mov %edx, EF_SAVED_IP(%eax)
+ mov UTCB_SAVED_IP(%edx), %ecx
+ mov %ecx, AF_SAVED_IP(%eax)
+ mov UTCB_SAVED_SP(%edx), %ecx
+ mov %ecx, AF_SAVED_ESP(%eax)
/* edx. */
- mov 0(%esp), %edx
- mov %edx, EF_SAVED_EDX(%eax)
+ mov 0(%esp), %ecx
+ mov %ecx, AF_SAVED_EDX(%eax)
/* flags. */
- mov 4(%esp), %edx
- mov %edx, EF_SAVED_FLAGS(%eax)
+ mov 4(%esp), %ecx
+ mov %ecx, AF_SAVED_FLAGS(%eax)
/* ecx. */
- mov 8(%esp), %edx
- mov %edx, EF_SAVED_ECX(%eax)
+ mov 8(%esp), %ecx
+ mov %ecx, AF_SAVED_ECX(%eax)
/* eax. */
- mov 12(%esp), %edx
- mov %edx, EF_SAVED_EAX(%eax)
-
- mov %ebx, EF_SAVED_EBX(%eax)
- mov %edi, EF_SAVED_EDI(%eax)
- mov %esi, EF_SAVED_ESI(%eax)
-
- /* Restore the exception page pointer (edx). */
- mov 16(%esp), %edx
+ mov 12(%esp), %ecx
+ mov %ecx, AF_SAVED_EAX(%eax)
+
+ /* We save the rest for debugging purposes. */
+ mov %ebx, AF_SAVED_EBX(%eax)
+ mov %edi, AF_SAVED_EDI(%eax)
+ mov %esi, AF_SAVED_ESI(%eax)
+ mov %ebp, AF_SAVED_EBP(%eax)
+
+ /* Abandon the activation stack. If AF->NORMAL_MODE_STACK is
+ 0, we use the top of the normal user stack. Otherwise, we use
+ the stack indicated by AF->NORMAL_MODE_STACK. */
- /* Restore the user ESP. */
- mov SAVED_SP(%edx), %esp
+ mov AF_NORMAL_MODE_STACK(%eax), %esp
+ test %esp, %esp
+ jnz stack_setup
+ mov UTCB_SAVED_SP(%edx), %esp
+stack_setup:
- /* We've now stashed away all the state we need to restore to
- the interrupted state. */
+ /* We've now stashed away all the state that was in the UTCB
+ or on the activation stack that we need to restore the
+ interrupted state. */
- /* Reset the activation bit. */
- and $(~1), MODE(%edx)
+ /* Clear the activation bit. */
+ and $(~1), UTCB_MODE(%edx)
- /* XXX: Check for pending message. */
+ .global hurd_activation_handler_end, _hurd_activation_handler_end
+hurd_activation_handler_end:
+_hurd_activation_handler_end:
- .global exception_handler_end, _exception_handler_end
-exception_handler_end:
-_exception_handler_end:
+ /* We have now left activated mode. We've saved all the state
+ we need to return to the interrupted state in the activation
+ frame and ESP points to another stack (i.e., not the activate
+ stack). If a fault now occurs, nothing bad can happend. */
- /* We have now left activated mode. We've saved all the
- state we need to return to the interrupted state in the
- exception frame and ESP points to the normal stack. If a
- fault now occurs, nothing bad can happend. */
-
- /* Save the exception page pointer. */
+ /* Save the UTCB pointer. */
pushl %edx
- /* Save the exception frame pointer. */
+ /* Save the activation frame pointer. */
pushl %eax
- /* Call the continuation (single argument: exception frame
- pointer). */
+ /* Call the continuation (two arguments: activation frame
+ pointer and the utcb). */
+ pushl %edx
pushl %eax
cld
- call exception_handler_normal
+ call hurd_activation_handler_normal
- /* Remove the argument. */
- add $4, %esp
+ /* Remove the arguments. */
+ add $8, %esp
- /* Restore the exception frame pointer. */
+ /* Restore the activation frame pointer. */
popl %eax
- /* And restore the exception page pointer. */
+ /* And restore the UTCB pointer. */
popl %edx
- /* Restore the user state. */
- mov EF_SAVED_IP(%eax), %ecx
+ /* Restore the interrupted state. */
+
+ /* First, the interrupted stack. */
+ mov AF_SAVED_ESP(%eax), %esp
+
+ /* Then, push the state onto that stack. */
+ mov AF_SAVED_IP(%eax), %ecx
pushl %ecx
- mov EF_SAVED_FLAGS(%eax), %ecx
+ mov AF_SAVED_FLAGS(%eax), %ecx
pushl %ecx
- mov EF_SAVED_EDX(%eax), %ecx
+ mov AF_SAVED_EDX(%eax), %ecx
pushl %ecx
- mov EF_SAVED_ECX(%eax), %ecx
+ mov AF_SAVED_ECX(%eax), %ecx
pushl %ecx
- mov EF_SAVED_EAX(%eax), %ecx
+ mov AF_SAVED_EAX(%eax), %ecx
pushl %ecx
- /* Remove our exception frame, which is at the top
- of the exception frame stack. */
- mov EF_NEXT(%eax), %ecx
- mov %ecx, EXCEPTION_STACK(%edx)
+ /* Remove our activation frame, which is at the top
+ of the activation frame stack. */
+ mov AF_NEXT(%eax), %ecx
+ mov %ecx, UTCB_ACTIVATION_FRAME_STACK(%edx)
+ /* Finally, restore the register file. */
popl %eax
popl %ecx
popl %edx
diff --git a/libhurd-mm/map.c b/libhurd-mm/map.c
index 4c3ebb2..001b97d 100644
--- a/libhurd-mm/map.c
+++ b/libhurd-mm/map.c
@@ -126,7 +126,7 @@ map_install (struct map *map)
assert ((map->access & ~MAP_ACCESS_ALL) == 0);
- debug (5, "Installing %c%c map at %x+%x(%x) referencing %x starting at %x",
+ debug (5, "Installing %c%c map at %x+%x(%x) referencing %p starting at %x",
map->access & MAP_ACCESS_READ ? 'r' : '~',
map->access & MAP_ACCESS_WRITE ? 'w' : '~',
map->region.start, map->region.start + map->region.length,
@@ -314,7 +314,7 @@ map_join (struct map *first, struct map *second)
}
bool
-map_fault (addr_t fault_addr, uintptr_t ip, struct exception_info info)
+map_fault (addr_t fault_addr, uintptr_t ip, struct activation_fault_info info)
{
/* Find the map. */
struct region region;
@@ -332,9 +332,9 @@ map_fault (addr_t fault_addr, uintptr_t ip, struct exception_info info)
{
do_debug (5)
{
- debug (0, "No map covers " ADDR_FMT "(" EXCEPTION_INFO_FMT ")",
+ debug (0, "No map covers " ADDR_FMT "(" ACTIVATION_FAULT_INFO_FMT ")",
ADDR_PRINTF (fault_addr),
- EXCEPTION_INFO_PRINTF (info));
+ ACTIVATION_FAULT_INFO_PRINTF (info));
for (map = hurd_btree_map_first (&maps);
map;
map = hurd_btree_map_next (map))
diff --git a/libhurd-mm/map.h b/libhurd-mm/map.h
index dba2389..febc3ea 100644
--- a/libhurd-mm/map.h
+++ b/libhurd-mm/map.h
@@ -23,8 +23,8 @@
#include <hurd/btree.h>
#include <hurd/addr.h>
-#include <hurd/exceptions.h>
#include <hurd/mutex.h>
+#include <hurd/as.h>
#include <stdint.h>
#include <stdbool.h>
#include <assert.h>
@@ -161,7 +161,7 @@ maps_lock_lock (void)
{
extern ss_mutex_t maps_lock;
- map_lock_ensure_stack (EXCEPTION_STACK_SIZE - PAGESIZE);
+ map_lock_ensure_stack (AS_STACK_SPACE);
ss_mutex_lock (&maps_lock);
}
@@ -231,6 +231,6 @@ extern bool map_join (struct map *first, struct map *second);
/* Raise a fault at address ADDR. Returns true if the fault was
handled, false otherwise. */
extern bool map_fault (addr_t addr,
- uintptr_t ip, struct exception_info info);
+ uintptr_t ip, struct activation_fault_info info);
#endif
diff --git a/libhurd-mm/message-buffer.c b/libhurd-mm/message-buffer.c
new file mode 100644
index 0000000..c1326ab
--- /dev/null
+++ b/libhurd-mm/message-buffer.c
@@ -0,0 +1,315 @@
+/* message-buffer.c - Implementation of messaging data structure management.
+ Copyright (C) 2008 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ GNU Hurd is free software: you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation, either version 3 of the
+ License, or (at your option) any later version.
+
+ GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GNU Hurd. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <hurd/stddef.h>
+#include <hurd/slab.h>
+#include <hurd/storage.h>
+#include <hurd/as.h>
+#include <hurd/startup.h>
+#include <hurd/capalloc.h>
+#include <hurd/exceptions.h>
+
+extern struct hurd_startup_data *__hurd_startup_data;
+
+static char initial_pages[4][PAGESIZE] __attribute__ ((aligned (PAGESIZE)));
+static int initial_page;
+#define INITIAL_PAGE_COUNT (sizeof (initial_pages) / sizeof (initial_pages[0]))
+static int initial_messenger;
+#define INITIAL_MESSENGER_COUNT \
+ (sizeof (__hurd_startup_data->messengers) \
+ / sizeof (__hurd_startup_data->messengers[0]))
+
+static error_t
+slab_alloc (void *hook, size_t size, void **ptr)
+{
+ assert (size == PAGESIZE);
+
+ if (unlikely (initial_page < INITIAL_PAGE_COUNT))
+ {
+ *ptr = initial_pages[initial_page ++];
+ return 0;
+ }
+
+ struct storage storage = storage_alloc (meta_data_activity, cap_page,
+ STORAGE_LONG_LIVED,
+ OBJECT_POLICY_DEFAULT, ADDR_VOID);
+ if (ADDR_IS_VOID (storage.addr))
+ panic ("Out of space.");
+ *ptr = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+
+ return 0;
+}
+
+static error_t
+slab_dealloc (void *hook, void *buffer, size_t size)
+{
+ assert (size == PAGESIZE);
+
+ addr_t addr = addr_chop (PTR_TO_ADDR (buffer), PAGESIZE_LOG2);
+ storage_free (addr, false);
+
+ return 0;
+}
+
+static error_t
+slab_constructor (void *hook, void *object)
+{
+ struct hurd_message_buffer *mb = object;
+ assert (mb->magic == 0);
+ mb->magic = ~HURD_MESSAGE_BUFFER_MAGIC;
+
+ return 0;
+}
+
+static void
+slab_destructor (void *hook, void *object)
+{
+ struct hurd_message_buffer *mb = object;
+
+ if (mb->magic != HURD_MESSAGE_BUFFER_MAGIC)
+ /* It was never initialized. */
+ {
+ assert (mb->magic == ~HURD_MESSAGE_BUFFER_MAGIC);
+ return;
+ }
+
+ storage_free (mb->sender, false);
+ storage_free (addr_chop (PTR_TO_ADDR (mb->request), PAGESIZE_LOG2),
+ false);
+ storage_free (mb->receiver, false);
+ storage_free (addr_chop (PTR_TO_ADDR (mb->reply), PAGESIZE_LOG2),
+ false);
+}
+
+/* Storage descriptors are alloced from a slab. */
+static struct hurd_slab_space message_buffer_slab
+ = HURD_SLAB_SPACE_INITIALIZER (struct hurd_message_buffer,
+ slab_alloc, slab_dealloc,
+ slab_constructor, slab_destructor, NULL);
+
+
+static struct hurd_message_buffer *
+hurd_message_buffer_alloc_hard (void)
+{
+ void *buffer;
+ error_t err = hurd_slab_alloc (&message_buffer_slab, &buffer);
+ if (err)
+ panic ("Out of memory!");
+
+ struct hurd_message_buffer *mb = buffer;
+
+ if (mb->magic == HURD_MESSAGE_BUFFER_MAGIC)
+ /* It's already initialized. */
+ return mb;
+
+ assert (mb->magic == ~HURD_MESSAGE_BUFFER_MAGIC);
+ mb->magic = HURD_MESSAGE_BUFFER_MAGIC;
+
+ struct storage storage;
+
+ /* The send messenger. */
+ if (unlikely (initial_messenger < INITIAL_MESSENGER_COUNT))
+ mb->sender = __hurd_startup_data->messengers[initial_messenger ++];
+ else
+ {
+ storage = storage_alloc (meta_data_activity, cap_messenger,
+ STORAGE_LONG_LIVED,
+ OBJECT_POLICY_DEFAULT, ADDR_VOID);
+ if (ADDR_IS_VOID (storage.addr))
+ panic ("Out of space.");
+
+ mb->sender = storage.addr;
+ }
+
+ /* The receive messenger. */
+ if (unlikely (initial_messenger < INITIAL_MESSENGER_COUNT))
+ mb->receiver_strong = __hurd_startup_data->messengers[initial_messenger ++];
+ else
+ {
+ storage = storage_alloc (meta_data_activity, cap_messenger,
+ STORAGE_LONG_LIVED,
+ OBJECT_POLICY_DEFAULT, ADDR_VOID);
+ if (ADDR_IS_VOID (storage.addr))
+ panic ("Out of space.");
+
+ mb->receiver_strong = storage.addr;
+ }
+
+ /* Weaken it. */
+#if 0
+ mb->receiver = capalloc ();
+ struct cap receiver_cap = as_cap_lookup (mb->receiver_strong, cap_messenger,
+ NULL);
+ assert (receiver_cap.type == cap_messenger);
+ as_slot_lookup_use
+ (mb->receiver,
+ ({
+ bool ret = cap_copy_x (ADDR_VOID,
+ ADDR_VOID, slot, mb->receiver,
+ ADDR_VOID, receiver_cap, mb->receiver_strong,
+ CAP_COPY_WEAKEN,
+ CAP_PROPERTIES_VOID);
+ assert (ret);
+ }));
+#endif
+ mb->receiver = mb->receiver_strong;
+
+ /* The send buffer. */
+ if (unlikely (initial_page < INITIAL_PAGE_COUNT))
+ mb->request = (void *) &initial_pages[initial_page ++][0];
+ else
+ {
+ storage = storage_alloc (meta_data_activity, cap_page,
+ STORAGE_LONG_LIVED,
+ OBJECT_POLICY_DEFAULT, ADDR_VOID);
+ if (ADDR_IS_VOID (storage.addr))
+ panic ("Out of space.");
+
+ mb->request = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ }
+
+ /* And the receive buffer. */
+ if (unlikely (initial_page < INITIAL_PAGE_COUNT))
+ mb->reply = (void *) &initial_pages[initial_page ++][0];
+ else
+ {
+ storage = storage_alloc (meta_data_activity, cap_page,
+ STORAGE_LONG_LIVED,
+ OBJECT_POLICY_DEFAULT, ADDR_VOID);
+ if (ADDR_IS_VOID (storage.addr))
+ panic ("Out of space.");
+
+ mb->reply = ADDR_TO_PTR (addr_extend (storage.addr, 0, PAGESIZE_LOG2));
+ }
+
+
+ /* Now set the messengers' id. */
+ vg_messenger_id_receive_marshal (mb->reply);
+ vg_messenger_id_send_marshal (mb->request,
+ (uint64_t) (uintptr_t) mb,
+ mb->receiver);
+
+ /* Set the reply messenger's id first as the activation handler
+ requires that it be set correctly. This will do that just before
+ the reply is sent. */
+ hurd_activation_message_register (mb);
+ err = vg_ipc_full (VG_IPC_RECEIVE | VG_IPC_SEND | VG_IPC_RECEIVE_ACTIVATE
+ | VG_IPC_RECEIVE_SET_THREAD_TO_CALLER
+ | VG_IPC_SEND_SET_THREAD_TO_CALLER,
+ ADDR_VOID, mb->receiver, PTR_TO_PAGE (mb->reply),
+ ADDR_VOID,
+ ADDR_VOID, mb->receiver,
+ mb->sender, PTR_TO_PAGE (mb->request),
+ 0, 0, ADDR_VOID);
+ if (err)
+ panic ("Failed to set receiver's id");
+
+ err = vg_messenger_id_reply_unmarshal (mb->reply, NULL);
+ if (err)
+ panic ("Setting receiver's id: %d", err);
+
+ hurd_activation_message_register (mb);
+ err = vg_ipc_full (VG_IPC_RECEIVE | VG_IPC_SEND | VG_IPC_RECEIVE_ACTIVATE,
+ ADDR_VOID, mb->receiver, PTR_TO_PAGE (mb->reply),
+ ADDR_VOID,
+ ADDR_VOID, mb->sender,
+ mb->sender, PTR_TO_PAGE (mb->request),
+ 0, 0, ADDR_VOID);
+ if (err)
+ panic ("Failed to set sender's id");
+
+ err = vg_messenger_id_reply_unmarshal (mb->reply, NULL);
+ if (err)
+ panic ("Setting sender's id: %d", err);
+
+ return mb;
+}
+
+static struct hurd_message_buffer *buffers;
+static int buffers_count;
+
+static void
+hurd_message_buffer_free_internal (struct hurd_message_buffer *buffer,
+ bool already_accounted)
+{
+ /* XXX We should perhaps free some buffers if we go over a high
+ water mark. */
+ // hurd_slab_dealloc (&message_buffer_slab, buffer);
+
+ /* Add BUFFER to the free list. */
+ for (;;)
+ {
+ buffer->next = buffers;
+ if (__sync_val_compare_and_swap (&buffers, buffer->next, buffer)
+ == buffer->next)
+ {
+ if (! already_accounted)
+ __sync_fetch_and_add (&buffers_count, 1);
+ return;
+ }
+ }
+}
+
+void
+hurd_message_buffer_free (struct hurd_message_buffer *buffer)
+{
+ hurd_message_buffer_free_internal (buffer, false);
+}
+
+#define BUFFERS_LOW_WATER 4
+
+struct hurd_message_buffer *
+hurd_message_buffer_alloc (void)
+{
+ struct hurd_message_buffer *mb;
+ do
+ {
+#if 0
+ if (likely (mm_init_done)
+ && unlikely (buffers_count <= BUFFERS_LOW_WATER))
+ {
+ int i = BUFFERS_LOW_WATER;
+ mb = hurd_message_buffer_alloc_hard ();
+
+ if (buffers_count == BUFFERS_LOW_WATER)
+ return mb;
+
+ hurd_message_buffer_free_internal (buffer, true);
+ }
+#endif
+
+ mb = buffers;
+ if (! mb)
+ {
+ mb = hurd_message_buffer_alloc_hard ();
+ return mb;
+ }
+ }
+ while (__sync_val_compare_and_swap (&buffers, mb, mb->next) != mb);
+ __sync_fetch_and_add (&buffers_count, -1);
+
+ return mb;
+}
+
+struct hurd_message_buffer *
+hurd_message_buffer_alloc_long (void)
+{
+ return hurd_message_buffer_alloc_hard ();
+}
diff --git a/libhurd-mm/message-buffer.h b/libhurd-mm/message-buffer.h
new file mode 100644
index 0000000..21e44b4
--- /dev/null
+++ b/libhurd-mm/message-buffer.h
@@ -0,0 +1,80 @@
+/* message-buffer.h - Interface for managing messaging data structures.
+ Copyright (C) 2008 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ GNU Hurd is free software: you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation, either version 3 of the
+ License, or (at your option) any later version.
+
+ GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GNU Hurd. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef __have_hurd_message_buffer
+# define __have_hurd_message_buffer
+
+# include <stdint.h>
+# include <stdbool.h>
+# include <hurd/addr.h>
+
+/* Forward. */
+struct vg_message;
+
+#define HURD_MESSAGE_BUFFER_MAGIC 0x111A61C
+
+struct hurd_message_buffer
+{
+ uintptr_t magic;
+
+ struct hurd_message_buffer *next;
+
+ /* A messenger associated REQUEST. The messenger's identifier is
+ set to the data structure's address. */
+ addr_t sender;
+ struct vg_message *request;
+ /* A messenger associated with REPLY. The messenger's identifier is
+ set to the data structure's address. */
+ addr_t receiver_strong;
+ /* A weakened version. */
+ addr_t receiver;
+ struct vg_message *reply;
+
+ /* If not NULL, then this routine is called. */
+ void (*closure) (struct hurd_message_buffer *mb);
+
+ /* XXX: Whether the activation should resume the thread or simply
+ free the buffer. Ignored if callback is not NULL. */
+ bool just_free;
+
+ void *cookie;
+};
+
+#endif /* __have_hurd_message_buffer */
+
+#ifdef __need_hurd_message_buffer
+# undef __need_hurd_message_buffer
+#else
+
+# ifndef _HURD_MESSAGE_BUFFER
+# define _HURD_MESSAGE_BUFFER
+
+/* Allocate a message buffer. */
+extern struct hurd_message_buffer *hurd_message_buffer_alloc (void);
+
+/* Allocate a message buffer, which is unlikely to be freed soon. */
+extern struct hurd_message_buffer *hurd_message_buffer_alloc_long (void);
+
+/* Free a message buffer. */
+extern void hurd_message_buffer_free (struct hurd_message_buffer *buf);
+
+# endif /* _HURD_MESSAGE_BUFFER */
+
+#endif /* !__need_hurd_message_buffer */
diff --git a/libhurd-mm/mm-init.c b/libhurd-mm/mm-init.c
index 8604568..ea12898 100644
--- a/libhurd-mm/mm-init.c
+++ b/libhurd-mm/mm-init.c
@@ -26,6 +26,12 @@
#include <hurd/startup.h>
#include <hurd/exceptions.h>
+#ifdef i386
+#include <hurd/pager.h>
+#endif
+
+#include <backtrace.h>
+
#include "storage.h"
#include "as.h"
@@ -48,9 +54,198 @@ mm_init (addr_t activity)
else
meta_data_activity = activity;
+ hurd_activation_handler_init_early ();
storage_init ();
as_init ();
- exception_handler_init ();
+ hurd_activation_handler_init ();
mm_init_done = 1;
+
+#ifndef NDEBUG
+ /* The following test checks the activation trampoline. In
+ particular, it checks that the register file before a fault
+ matches the register file after a fault. This is interesting
+ because such an activation is handled in normal mode. That
+ means, when the fault occurs, we enter the activation handler,
+ return an activation frame, enter normal mode, execute the normal
+ mode activation handler, call the call back functions, and then
+ return to the interrupted code. */
+#ifdef i386
+ void test (int nesting)
+ {
+ addr_t addr = as_alloc (PAGESIZE_LOG2, 1, true);
+ void *a = ADDR_TO_PTR (addr_extend (addr, 0, PAGESIZE_LOG2));
+
+ int recursed = false;
+
+ struct storage storage;
+ bool fault (struct pager *pager,
+ uintptr_t offset, int count, bool ro,
+ uintptr_t fault_addr, uintptr_t ip,
+ struct activation_fault_info info)
+ {
+ assert (a == (void *) (fault_addr & ~(PAGESIZE - 1)));
+ assert (count == 1);
+
+ struct vg_utcb *utcb = hurd_utcb ();
+ struct activation_frame *activation_frame = utcb->activation_stack;
+ debug (4, "Fault at %p (ip: %p, sp: %p, eax: %p, "
+ "ebx: %p, ecx: %p, edx: %p, edi: %p, esi: %p, ebp: %p, "
+ "eflags: %p)",
+ fault,
+ (void *) activation_frame->eip,
+ (void *) activation_frame->esp,
+ (void *) activation_frame->eax,
+ (void *) activation_frame->ebx,
+ (void *) activation_frame->ecx,
+ (void *) activation_frame->edx,
+ (void *) activation_frame->edi,
+ (void *) activation_frame->esi,
+ (void *) activation_frame->ebp,
+ (void *) activation_frame->eflags);
+
+ assert (activation_frame->eax == 0xa);
+ assert (activation_frame->ebx == 0xb);
+ assert (activation_frame->ecx == 0xc);
+ assert (activation_frame->edx == 0xd);
+ assert (activation_frame->edi == 0xd1);
+ assert (activation_frame->esi == 0x21);
+ assert (activation_frame->ebp == (uintptr_t) a);
+ /* We cannot easily check esp and eip here. */
+
+ as_ensure (addr);
+ storage = storage_alloc (ADDR_VOID,
+ cap_page, STORAGE_UNKNOWN,
+ OBJECT_POLICY_DEFAULT,
+ addr);
+
+ if (nesting > 1 && ! recursed)
+ {
+ recursed = true;
+
+ int i;
+ for (i = 0; i < 3; i ++)
+ {
+ debug (5, "Depth: %d; iter: %d", nesting - 1, i);
+ test (nesting - 1);
+ debug (5, "Depth: %d; iter: %d done", nesting - 1, i);
+ }
+ }
+
+ return true;
+ }
+
+ struct pager pager = PAGER_VOID;
+ pager.length = PAGESIZE;
+ pager.fault = fault;
+ pager_init (&pager);
+
+ struct region region = { (uintptr_t) a, PAGESIZE };
+ struct map *map = map_create (region, MAP_ACCESS_ALL, &pager, 0, NULL);
+
+ uintptr_t pre_flags, pre_esp;
+ uintptr_t eax, ebx, ecx, edx, edi, esi, ebp, esp, flags;
+ uintptr_t canary;
+
+ /* Check that the trampoline works. */
+ __asm__ __volatile__
+ (
+ "mov %%esp, %[pre_esp]\n\t"
+ "pushf\n\t"
+ "pop %%eax\n\t"
+ "mov %%eax, %[pre_flags]\n\t"
+
+ /* Canary. */
+ "pushl $0xcab00d1e\n\t"
+
+ "pushl %%ebp\n\t"
+
+ "mov $0xa, %%eax\n\t"
+ "mov $0xb, %%ebx\n\t"
+ "mov $0xc, %%ecx\n\t"
+ "mov $0xd, %%edx\n\t"
+ "mov $0xd1, %%edi\n\t"
+ "mov $0x21, %%esi\n\t"
+ "mov %[addr], %%ebp\n\t"
+ /* Fault! */
+ "mov %%eax, 0(%%ebp)\n\t"
+
+ /* Save the current ebp. */
+ "pushl %%ebp\n\t"
+ /* Restore the old ebp. */
+ "mov 4(%%esp), %%ebp\n\t"
+
+ /* Save the rest of the GP registers. */
+ "mov %%eax, %[eax]\n\t"
+ "mov %%ebx, %[ebx]\n\t"
+ "mov %%ecx, %[ecx]\n\t"
+ "mov %%edx, %[edx]\n\t"
+ "mov %%edi, %[edi]\n\t"
+ "mov %%esi, %[esi]\n\t"
+
+ /* Save the new flags. */
+ "pushf\n\t"
+ "pop %%eax\n\t"
+ "mov %%eax, %[flags]\n\t"
+
+ /* Save the new ebp. */
+ "mov 0(%%esp), %%eax\n\t"
+ "mov %%eax, %[ebp]\n\t"
+
+ /* Fix up the stack. */
+ "add $8, %%esp\n\t"
+
+ /* Grab the canary. */
+ "popl %%eax\n\t"
+ "mov %%eax, %[canary]\n\t"
+
+ /* And don't forget to save the new esp. */
+ "mov %%esp, %[esp]\n\t"
+
+ : [eax] "=m" (eax), [ebx] "=m" (ebx),
+ [ecx] "=m" (ecx), [edx] "=m" (edx),
+ [edi] "=m" (edi), [esi] "=m" (esi), [ebp] "=m" (ebp),
+ [pre_esp] "=m" (pre_esp), [esp] "=m" (esp),
+ [pre_flags] "=m" (pre_flags), [flags] "=m" (flags),
+ [canary] "=m" (canary)
+ : [addr] "m" (a)
+ : "%eax", "%ebx", "%ecx", "%edx", "%edi", "%esi");
+
+ debug (4, "Regsiter file: "
+ "eax: %p, ebx: %p, ecx: %p, edx: %p, "
+ "edi: %p, esi: %p, ebp: %p -> %p, esp: %p -> %p, flags: %p -> %p",
+ (void *) eax, (void *) ebx, (void *) ecx, (void *) edx,
+ (void *) edi, (void *) esi, (void *) a, (void *) ebp,
+ (void *) pre_esp, (void *) esp,
+ (void *) pre_flags, (void *) flags);
+
+ assert (eax == 0xa);
+ assert (ebx == 0xb);
+ assert (ecx == 0xc);
+ assert (edx == 0xd);
+ assert (edi == 0xd1);
+ assert (esi == 0x21);
+ assert (ebp == (uintptr_t) a);
+ assert (esp == pre_esp);
+ assert (flags == pre_flags);
+ assert (canary == 0xcab00d1e);
+
+ maps_lock_lock ();
+ map_disconnect (map);
+ maps_lock_unlock ();
+ map_destroy (map);
+
+ storage_free (storage.addr, false);
+ as_free (addr, 1);
+ }
+
+ int i;
+ for (i = 0; i < 3; i ++)
+ {
+ debug (5, "Depth: %d; iter: %d", 3, i + 1);
+ test (3);
+ debug (5, "Depth: %d; iter: %d done", 3, i + 1);
+ }
+#endif
+#endif
}
diff --git a/libhurd-mm/pager.h b/libhurd-mm/pager.h
index 6d4c517..66b75ef 100644
--- a/libhurd-mm/pager.h
+++ b/libhurd-mm/pager.h
@@ -36,7 +36,7 @@ struct pager;
typedef bool (*pager_fault_t) (struct pager *pager,
uintptr_t offset, int count, bool ro,
uintptr_t fault_addr, uintptr_t ip,
- struct exception_info info);
+ struct activation_fault_info info);
/* The count sub-trees starting at ADDR are no longer referenced and
their associated storage may be reclaimed. */
@@ -81,8 +81,11 @@ struct pager
pager_advise_t advise;
};
-/* Initialize the pager. LENGTH and FAULT must be set
- appropriately. */
+#define PAGER_VOID { NULL, 0, 0, NULL, NULL, NULL }
+
+/* Initialize the pager. All fields must be set appropriately. After
+ calling this function, LENGTH and FAULT may no longer be
+ changed. */
extern bool pager_init (struct pager *pager);
/* Deinitialize the pager PAGER, destroying all the mappings in the
diff --git a/libhurd-mm/storage.c b/libhurd-mm/storage.c
index d6d21a1..cfdb6ed 100644
--- a/libhurd-mm/storage.c
+++ b/libhurd-mm/storage.c
@@ -28,6 +28,7 @@
#include <hurd/startup.h>
#include <hurd/rm.h>
#include <hurd/mutex.h>
+#include <backtrace.h>
#ifndef NDEBUG
struct ss_lock_trace ss_lock_trace[SS_LOCK_TRACE_COUNT];
@@ -255,7 +256,7 @@ shadow_setup (struct cap *cap, struct storage_desc *desc)
error_t err = rm_folio_object_alloc (meta_data_activity,
desc->folio, idx, cap_page,
OBJECT_POLICY_DEFAULT, 0,
- ADDR_VOID, ADDR_VOID);
+ NULL, NULL);
assert (err == 0);
shadow = ADDR_TO_PTR (addr_extend (addr_extend (desc->folio,
idx, FOLIO_OBJECTS_LOG2),
@@ -331,7 +332,7 @@ static bool storage_init_done;
soon have a problem. In this case, we serialize access to the pool
of available pages to allow some thread that is able to allocate
more pages the chance to do so. */
-#define FREE_PAGES_SERIALIZE 16
+#define FREE_PAGES_SERIALIZE 32
static pthread_mutex_t storage_low_mutex
= PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
@@ -453,8 +454,11 @@ storage_check_reserve_internal (bool force_allocate,
}
/* And then the folio. */
- error_t err = rm_folio_alloc (activity, addr, FOLIO_POLICY_DEFAULT);
+ addr_t a = addr;
+ error_t err = rm_folio_alloc (activity, activity, FOLIO_POLICY_DEFAULT,
+ &a);
assert (! err);
+ assert (ADDR_EQ (addr, a));
/* Allocate and fill a descriptor. */
struct storage_desc *s = storage_desc_alloc ();
@@ -524,10 +528,18 @@ storage_alloc (addr_t activity,
struct storage_desc *desc;
bool do_allocate = false;
+ int tries = 0;
do
{
- storage_check_reserve_internal (do_allocate, activity, expectancy,
- true);
+ if (tries ++ == 5)
+ {
+ backtrace_print ();
+ debug (0, "Failing to get storage (free count: %d). Live lock?",
+ free_count);
+ }
+
+ storage_check_reserve_internal (do_allocate, meta_data_activity,
+ expectancy, true);
/* Find an appropriate storage area. */
struct storage_desc *pluck (struct storage_desc *list)
@@ -594,9 +606,10 @@ storage_alloc (addr_t activity,
addr_t folio = desc->folio;
addr_t object = addr_extend (folio, idx, FOLIO_OBJECTS_LOG2);
- debug (5, "Allocating object %d from " ADDR_FMT " (" ADDR_FMT ") "
- "(%d left), copying to " ADDR_FMT,
- idx, ADDR_PRINTF (folio), ADDR_PRINTF (object),
+ debug (5, "Allocating object %d as %s from " ADDR_FMT " (" ADDR_FMT ") "
+ "(%d left), installing at " ADDR_FMT,
+ idx, cap_type_string (type),
+ ADDR_PRINTF (folio), ADDR_PRINTF (object),
desc->free, ADDR_PRINTF (addr));
atomic_decrement (&free_count);
@@ -621,13 +634,13 @@ storage_alloc (addr_t activity,
ss_mutex_unlock (&storage_descs_lock);
}
- error_t err = rm_folio_object_alloc (activity,
- folio, idx, type,
- policy, 0,
- addr, ADDR_VOID);
+ addr_t a = addr;
+ error_t err = rm_folio_object_alloc (activity, folio, idx, type, policy, 0,
+ &a, NULL);
assertx (! err,
"Allocating object %d from " ADDR_FMT " at " ADDR_FMT ": %d!",
idx, ADDR_PRINTF (folio), ADDR_PRINTF (addr), err);
+ assert (ADDR_EQ (a, addr));
struct object *shadow = desc->shadow;
struct cap *cap = NULL;
@@ -686,6 +699,8 @@ storage_alloc (addr_t activity,
void
storage_free_ (addr_t object, bool unmap_now)
{
+ debug (5, DEBUG_BOLD ("Freeing " ADDR_FMT), ADDR_PRINTF (object));
+
addr_t folio = addr_chop (object, FOLIO_OBJECTS_LOG2);
atomic_increment (&free_count);
@@ -697,7 +712,7 @@ storage_free_ (addr_t object, bool unmap_now)
storage = hurd_btree_storage_desc_find (&storage_descs, &folio);
assertx (storage,
"No storage associated with " ADDR_FMT " "
- "(did you pass the storage address?",
+ "(did you pass the storage address?)",
ADDR_PRINTF (object));
ss_mutex_lock (&storage->lock);
@@ -784,7 +799,7 @@ storage_free_ (addr_t object, bool unmap_now)
error_t err = rm_folio_object_alloc (meta_data_activity,
folio, idx, cap_void,
OBJECT_POLICY_DEFAULT, 0,
- ADDR_VOID, ADDR_VOID);
+ NULL, NULL);
assert (err == 0);
if (likely (!! shadow))
@@ -884,7 +899,7 @@ storage_init (void)
ss_mutex_unlock (&storage_descs_lock);
debug (1, "%d folios, %d objects used, %d free objects",
- folio_count, __hurd_startup_data->desc_count, free_count);
+ folio_count, __hurd_startup_data->desc_count, (int) free_count);
storage_init_done = true;
diff --git a/libpthread/ChangeLog b/libpthread/ChangeLog
index 0c7540c..cd6013f 100644
--- a/libpthread/ChangeLog
+++ b/libpthread/ChangeLog
@@ -1,5 +1,58 @@
2008-12-11 Neal H. Walfield <neal@gnu.org>
+ Update to new RPC interfaces, IPC semantics.
+
+ * sysdeps/viengoos/bits/pthread-np.h: Include <hurd/exceptions.h>.
+ (pthread_hurd_utcb_np): New declaration.
+ * sysdeps/viengoos/pt-hurd-utcb-np.c: New file.
+ * Makefile.am (libpthread_a_SOURCES): Add pt-hurd-utcb.c.
+
+ * sysdeps/viengoos/pt-sysdep.h (EXCEPTION_AREA_SIZE): Don't
+ define.
+ (EXCEPTION_AREA_SIZE_LOG2): Likewise.
+ (EXCEPTION_PAGE): Likewise.
+ (PTHREAD_SYSDEP_MEMBERS): Remove fields exception_area, and
+ exception_area_va. Add fields utcb and lock_message_buffer.
+ * sysdeps/viengoos/pt-thread-alloc.c: Include
+ <hurd/message-buffer.h>.
+ (__pthread_thread_alloc): Initialize thread->lock_message_buffer.
+ When executed the first time, set the thread's L4 user-defined
+ handler. Initialize THREAD->UTCB with the thread's current utcb.
+ Set HURD_UTCB to PTHREAD_HURD_UTCB_NP. For subsequent threads,
+ don't manually set up the activation area. Instead, call
+ hurd_activation_state_alloc.
+ * sysdeps/viengoos/pt-thread-dealloc.c: Include
+ <hurd/message-buffer.h>.
+ (__pthread_thread_dealloc): Call __pthread_thread_halt. Don't
+ manually clean up the activation area. Instead, call
+ hurd_activation_state_free. Free THREAD->LOCK_MESSAGE_BUFFER.
+ * sysdeps/viengoos/ia32/pt-setup.c (stack_setup): Pre-fault the
+ first four pages of the new stack.
+ (__pthread_setup): Don't set up the activation area.
+
+ * sysdeps/viengoos/pt-wakeup.c (__pthread_wakeup): Use
+ futex_wake_using with the calling thread's lock messenger.
+ * sysdeps/viengoos/pt-block.c (__pthread_block): Use
+ futex_wait_using and provide THREAD->LOCK_MESSAGE_BUFFER as the
+ message buffer.
+
+ * sysdeps/viengoos/pt-thread-start.c (__pthread_thread_start):
+ Don't set the first thread's L4 user-defined handler here.
+ (__pthread_thread_start): Update use of rm_thread_exregs according
+ to be consistent with new interface.
+ * sysdeps/viengoos/pt-thread-halt.c (__pthread_thread_halt): If
+ THREAD is the current thread, call vg_suspend.
+
+ * sysdeps/viengoos/pt-setactivity-np.c (pthread_setactivity_np):
+ Update use of rm_thread_exregs according to be consistent with new
+ interface.
+ * sysdeps/viengoos/ia32/signal-dispatch-lowlevel.c
+ (signal_dispatch_lowlevel): Use __builtin_frame_address to get the
+ current stack frame's start. Update use of rm_thread_exregs
+ according to be consistent with new interface.
+
+2008-12-11 Neal H. Walfield <neal@gnu.org>
+
* sysdeps/generic/bits/spin-lock-inline.h: New file.
2008-12-11 Neal H. Walfield <neal@gnu.org>
diff --git a/libpthread/Makefile.am b/libpthread/Makefile.am
index a4d226d..4e820de 100644
--- a/libpthread/Makefile.am
+++ b/libpthread/Makefile.am
@@ -135,6 +135,7 @@ libpthread_a_SOURCES = pt-attr.c pt-attr-destroy.c pt-attr-getdetachstate.c \
sem-post.c sem-unlink.c \
\
pt-setactivity-np.c \
+ pt-hurd-utcb-np.c \
\
kill.c \
killpg.c \
diff --git a/libpthread/sysdeps/viengoos/bits/pthread-np.h b/libpthread/sysdeps/viengoos/bits/pthread-np.h
index a90793d..aea3bab 100644
--- a/libpthread/sysdeps/viengoos/bits/pthread-np.h
+++ b/libpthread/sysdeps/viengoos/bits/pthread-np.h
@@ -25,7 +25,11 @@
#define _BITS_PTHREAD_NP_H 1
#include <hurd/addr.h>
+#include <hurd/exceptions.h>
int pthread_setactivity_np (addr_t activity);
+/* Returns the caller's activation state block. */
+struct vg_utcb *pthread_hurd_utcb_np (void) __attribute__ ((pure));
+
#endif /* bits/pthread-np.h */
diff --git a/libpthread/sysdeps/viengoos/ia32/pt-setup.c b/libpthread/sysdeps/viengoos/ia32/pt-setup.c
index 579905c..88a60e8 100644
--- a/libpthread/sysdeps/viengoos/ia32/pt-setup.c
+++ b/libpthread/sysdeps/viengoos/ia32/pt-setup.c
@@ -73,7 +73,7 @@ stack_setup (struct __pthread *thread,
top = (uintptr_t *) ((uintptr_t) thread->stackaddr + thread->stacksize);
/* Align on 0x10 for MMX operations. */
- top = (uintptr_t) top & ~0xf;
+ top = (uintptr_t *) ((uintptr_t) top & ~0xf);
if (start_routine)
{
@@ -84,6 +84,12 @@ stack_setup (struct __pthread *thread,
*--top = (uintptr_t) entry_point;
}
+ /* We need 4 pages of stack to avoid faulting before we have set up
+ the activation area. Make it so. */
+ int i;
+ for (i = 1; i < 4; i ++)
+ top[-i * PAGESIZE] = 0;
+
return top;
}
@@ -95,23 +101,5 @@ __pthread_setup (struct __pthread *thread,
thread->mcontext.pc = (void *) &_pthread_entry_point;
thread->mcontext.sp = (void *) stack_setup (thread, start_routine, arg,
entry_point);
-
- if (__pthread_num_threads == 1)
- return 0;
-
- assert (! ADDR_IS_VOID (thread->exception_area[0]));
-
- struct exception_page *exception_page = thread->exception_area_va;
-
- /* SP is set to the end of the exception area minus one word, which
- is the location of the exception page. */
- exception_page->exception_handler_sp
- = (uintptr_t) thread->exception_area_va + EXCEPTION_AREA_SIZE;
- exception_page->exception_handler_sp -= sizeof (void *);
- * (void **) exception_page->exception_handler_sp = thread->exception_area_va;
-
- exception_page->exception_handler_ip = (uintptr_t) &exception_handler_entry;
- exception_page->exception_handler_end = (uintptr_t) &exception_handler_end;
-
return 0;
}
diff --git a/libpthread/sysdeps/viengoos/ia32/signal-dispatch-lowlevel.c b/libpthread/sysdeps/viengoos/ia32/signal-dispatch-lowlevel.c
index 37ef821..8dd9be7 100644
--- a/libpthread/sysdeps/viengoos/ia32/signal-dispatch-lowlevel.c
+++ b/libpthread/sysdeps/viengoos/ia32/signal-dispatch-lowlevel.c
@@ -113,8 +113,9 @@ signal_dispatch_lowlevel (struct signal_state *ss, pthread_t tid,
if (self)
{
- /* The return address is just before the first argument. */
- intr_sp = (uintptr_t) &ss - 4;
+ /* The return address is 4 bytes offset from the start of the
+ stack frame. */
+ intr_sp = (uintptr_t) __builtin_frame_address (0) + 4;
assert (* (void **) intr_sp == __builtin_return_address (0));
}
else
@@ -127,7 +128,8 @@ signal_dispatch_lowlevel (struct signal_state *ss, pthread_t tid,
err = rm_thread_exregs (ADDR_VOID, thread->object,
HURD_EXREGS_STOP | HURD_EXREGS_ABORT_IPC
| HURD_EXREGS_GET_REGS,
- in, &out);
+ in, ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ &out, NULL, NULL, NULL, NULL);
if (err)
panic ("Failed to modify thread " ADDR_FMT,
ADDR_PRINTF (thread->object));
@@ -208,6 +210,7 @@ signal_dispatch_lowlevel (struct signal_state *ss, pthread_t tid,
rm_thread_exregs (ADDR_VOID, thread->object,
HURD_EXREGS_SET_SP_IP
| HURD_EXREGS_START | HURD_EXREGS_ABORT_IPC,
- in, &out);
+ in, ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ &out, NULL, NULL, NULL, NULL);
}
}
diff --git a/libpthread/sysdeps/viengoos/pt-block.c b/libpthread/sysdeps/viengoos/pt-block.c
index 2315b1c..548cc3f 100644
--- a/libpthread/sysdeps/viengoos/pt-block.c
+++ b/libpthread/sysdeps/viengoos/pt-block.c
@@ -26,5 +26,17 @@
void
__pthread_block (struct __pthread *thread)
{
- futex_wait (&thread->threadid, thread->threadid);
+ assert (thread->lock_message_buffer);
+
+ struct hurd_message_buffer *mb = thread->lock_message_buffer;
+#ifndef NDEBUG
+ /* Try to detect recursive locks, which we don't handle. */
+ thread->lock_message_buffer = NULL;
+#endif
+
+ futex_wait_using (mb, &thread->threadid, thread->threadid);
+
+#ifndef NDEBUG
+ thread->lock_message_buffer = mb;
+#endif
}
diff --git a/libpthread/sysdeps/viengoos/pt-hurd-utcb-np.c b/libpthread/sysdeps/viengoos/pt-hurd-utcb-np.c
new file mode 100644
index 0000000..b3a76f4
--- /dev/null
+++ b/libpthread/sysdeps/viengoos/pt-hurd-utcb-np.c
@@ -0,0 +1,28 @@
+/* pt-hurd-utcb-np.c: Return the calling thread's utcb.
+ Copyright (C) 2008 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#include <pt-internal.h>
+
+struct vg_utcb *
+pthread_hurd_utcb_np (void)
+{
+ struct __pthread *thread = _pthread_self ();
+ return thread->utcb;
+}
+
diff --git a/libpthread/sysdeps/viengoos/pt-setactivity-np.c b/libpthread/sysdeps/viengoos/pt-setactivity-np.c
index f2f0723..efcc9b7 100644
--- a/libpthread/sysdeps/viengoos/pt-setactivity-np.c
+++ b/libpthread/sysdeps/viengoos/pt-setactivity-np.c
@@ -28,12 +28,11 @@ pthread_setactivity_np (addr_t activity)
struct __pthread *self = _pthread_self ();
struct hurd_thread_exregs_in in;
- in.activity = activity;
-
struct hurd_thread_exregs_out out;
int err = rm_thread_exregs (ADDR_VOID, self->object,
HURD_EXREGS_SET_ACTIVITY,
- in, &out);
+ in, ADDR_VOID, activity, ADDR_VOID, ADDR_VOID,
+ &out, NULL, NULL, NULL, NULL);
return err;
}
diff --git a/libpthread/sysdeps/viengoos/pt-sysdep.h b/libpthread/sysdeps/viengoos/pt-sysdep.h
index 5e9fabd..0733640 100644
--- a/libpthread/sysdeps/viengoos/pt-sysdep.h
+++ b/libpthread/sysdeps/viengoos/pt-sysdep.h
@@ -31,20 +31,16 @@
#define PTHREAD_STACK_DEFAULT (2 * 1024 * 1024)
#include <hurd/exceptions.h>
-
-#define EXCEPTION_AREA_SIZE EXCEPTION_STACK_SIZE
-#define EXCEPTION_AREA_SIZE_LOG2 EXCEPTION_STACK_SIZE_LOG2
-/* The exception page is the first object. */
-#define EXCEPTION_PAGE 0
-
-#define PTHREAD_SYSDEP_MEMBERS \
- addr_t object; \
- l4_thread_id_t threadid; \
- addr_t exception_area[EXCEPTION_AREA_SIZE / PAGESIZE]; \
- void *exception_area_va; \
- /* If the above four fields are valid. */ \
- bool have_kernel_resources; \
- l4_word_t my_errno;
+#include <hurd/message-buffer.h>
+
+#define PTHREAD_SYSDEP_MEMBERS \
+ addr_t object; \
+ vg_thread_id_t threadid; \
+ struct vg_utcb *utcb; \
+ struct hurd_message_buffer *lock_message_buffer; \
+ /* If the above fields are valid. */ \
+ bool have_kernel_resources; \
+ uintptr_t my_errno;
extern inline struct __pthread *
__attribute__((__always_inline__))
diff --git a/libpthread/sysdeps/viengoos/pt-thread-alloc.c b/libpthread/sysdeps/viengoos/pt-thread-alloc.c
index 11af1d0..3cc02be 100644
--- a/libpthread/sysdeps/viengoos/pt-thread-alloc.c
+++ b/libpthread/sysdeps/viengoos/pt-thread-alloc.c
@@ -25,6 +25,7 @@
#include <hurd/storage.h>
#include <hurd/as.h>
#include <hurd/addr.h>
+#include <hurd/message-buffer.h>
#include <pt-internal.h>
@@ -38,45 +39,24 @@ __pthread_thread_alloc (struct __pthread *thread)
if (thread->have_kernel_resources)
return 0;
+ thread->lock_message_buffer = hurd_message_buffer_alloc_long ();
/* The main thread is already running of course. */
if (__pthread_num_threads == 1)
{
thread->object = __hurd_startup_data->thread;
thread->threadid = l4_myself ();
+
+ l4_set_user_defined_handle ((l4_word_t) thread);
+
+ /* Get the thread's UTCB and stash it. */
+ thread->utcb = hurd_utcb ();
+ /* Override the utcb fetch function. */
+ hurd_utcb = pthread_hurd_utcb_np;
+ assert (thread->utcb == hurd_utcb ());
}
else
{
- addr_t exception_area = as_alloc (EXCEPTION_AREA_SIZE_LOG2, 1, true);
-
- thread->exception_area_va
- = ADDR_TO_PTR (addr_extend (exception_area,
- 0, EXCEPTION_AREA_SIZE_LOG2));
-
- int i;
- for (i = 0; i < EXCEPTION_AREA_SIZE / PAGESIZE; i ++)
- {
- addr_t slot = addr_chop (PTR_TO_ADDR (thread->exception_area_va
- + i * PAGESIZE),
- PAGESIZE_LOG2);
- as_ensure (slot);
-
- struct storage storage = storage_alloc (ADDR_VOID, cap_page,
- STORAGE_LONG_LIVED,
- OBJECT_POLICY_DEFAULT,
- slot);
- if (ADDR_IS_VOID (storage.addr))
- {
- int j;
- for (j = 0; j < i; j ++)
- storage_free (thread->exception_area[j], false);
- as_free (exception_area, false);
- return EAGAIN;
- }
-
- thread->exception_area[i] = storage.addr;
- }
-
struct storage storage;
storage = storage_alloc (meta_data_activity, cap_thread,
/* Threads are rarely shortly lived. */
@@ -84,14 +64,33 @@ __pthread_thread_alloc (struct __pthread *thread)
ADDR_VOID);
if (ADDR_IS_VOID (storage.addr))
{
- int j;
- for (j = 0; j < EXCEPTION_AREA_SIZE / PAGESIZE; j ++)
- storage_free (thread->exception_area[j], false);
- as_free (exception_area, false);
+ debug (0, DEBUG_BOLD ("Out of memory"));
return EAGAIN;
}
thread->object = storage.addr;
+
+ error_t err;
+ err = hurd_activation_state_alloc (thread->object, &thread->utcb);
+ if (unlikely (err))
+ panic ("Failed to initialize thread's activation state: %d", err);
+
+ err = rm_cap_copy (ADDR_VOID,
+ thread->lock_message_buffer->receiver,
+ ADDR (VG_MESSENGER_THREAD_SLOT,
+ VG_MESSENGER_SLOTS_LOG2),
+ ADDR_VOID, thread->object,
+ 0, CAP_PROPERTIES_DEFAULT);
+ if (err)
+ panic ("Failed to set lock messenger's thread");
+
+ /* Unblock the lock messenger. */
+ err = vg_ipc (VG_IPC_RECEIVE | VG_IPC_RECEIVE_ACTIVATE
+ | VG_IPC_RETURN,
+ ADDR_VOID, thread->lock_message_buffer->receiver, ADDR_VOID,
+ ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID);
+ if (err)
+ panic ("Failed to unblock messenger's thread");
}
thread->have_kernel_resources = true;
diff --git a/libpthread/sysdeps/viengoos/pt-thread-dealloc.c b/libpthread/sysdeps/viengoos/pt-thread-dealloc.c
index 71b3d96..9e174a9 100644
--- a/libpthread/sysdeps/viengoos/pt-thread-dealloc.c
+++ b/libpthread/sysdeps/viengoos/pt-thread-dealloc.c
@@ -26,30 +26,20 @@
#include <hurd/mutex.h>
#include <hurd/as.h>
#include <hurd/addr.h>
+#include <hurd/message-buffer.h>
void
__pthread_thread_dealloc (struct __pthread *thread)
{
assert (thread != _pthread_self ());
- /* Clean up the exception page. */
- exception_page_cleanup
- (ADDR_TO_PTR (addr_extend (thread->exception_area[EXCEPTION_PAGE],
- 0, PAGESIZE_LOG2)));
+ __pthread_thread_halt (thread);
- /* Free the storage. */
- int i;
- for (i = 0; i < EXCEPTION_AREA_SIZE / PAGESIZE; i ++)
- {
- assert (! ADDR_IS_VOID (thread->exception_area[i]));
- storage_free (thread->exception_area[i], false);
- }
+ /* Clean up the activation state. */
+ hurd_activation_state_free (thread->utcb);
- /* And the address space. */
- as_free (addr_chop (PTR_TO_ADDR (thread->exception_area_va),
- EXCEPTION_AREA_SIZE_LOG2), false);
-
- storage_free (thread->object, false);
+ assert (thread->lock_message_buffer);
+ hurd_message_buffer_free (thread->lock_message_buffer);
thread->have_kernel_resources = 0;
}
diff --git a/libpthread/sysdeps/viengoos/pt-thread-halt.c b/libpthread/sysdeps/viengoos/pt-thread-halt.c
index aef1395..cfa861b 100644
--- a/libpthread/sysdeps/viengoos/pt-thread-halt.c
+++ b/libpthread/sysdeps/viengoos/pt-thread-halt.c
@@ -27,6 +27,25 @@
void
__pthread_thread_halt (struct __pthread *thread)
{
+ /* We need to be careful. This function is called in three
+ situations: by the thread itself when it is about to exit, by a
+ thread joining it, and when reusing an existing thread. Hence,
+ it must be kosher to interrupt this functions execution at any
+ point: syncronization is difficult as in the first case, there is
+ no way to indicate completion. */
if (thread->have_kernel_resources)
- thread_stop (thread->object);
+ {
+ if (thread == _pthread_self ())
+ {
+ while (1)
+ vg_suspend ();
+ }
+ else
+ {
+ error_t err = thread_stop (thread->object);
+ if (err)
+ panic ("Failed to halt " ADDR_FMT ": %d",
+ ADDR_PRINTF (thread->object), err);
+ }
+ }
}
diff --git a/libpthread/sysdeps/viengoos/pt-thread-start.c b/libpthread/sysdeps/viengoos/pt-thread-start.c
index 9db399c..f03f4c2 100644
--- a/libpthread/sysdeps/viengoos/pt-thread-start.c
+++ b/libpthread/sysdeps/viengoos/pt-thread-start.c
@@ -1,5 +1,5 @@
-/* Start thread. L4 version.
- Copyright (C) 2007 Free Software Foundation, Inc.
+/* Start thread. Viengoos version.
+ Copyright (C) 2007, 2008 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -35,21 +35,17 @@ __pthread_thread_start (struct __pthread *thread)
{
assert (__pthread_total == 1);
assert (l4_is_thread_equal (l4_myself (), thread->threadid));
- l4_set_user_defined_handle ((l4_word_t) thread);
}
else
{
struct hurd_thread_exregs_in in;
struct hurd_thread_exregs_out out;
- in.aspace = ADDR (0, 0);
+ addr_t aspace = ADDR (0, 0);
in.aspace_cap_properties = CAP_PROPERTIES_VOID;
in.aspace_cap_properties_flags = CAP_COPY_COPY_SOURCE_GUARD;
- in.activity = ADDR_VOID;
-
- in.exception_page = addr_chop (PTR_TO_ADDR (thread->exception_area_va),
- PAGESIZE_LOG2);
+ addr_t activity = ADDR_VOID;
in.sp = (l4_word_t) thread->mcontext.sp;
in.ip = (l4_word_t) thread->mcontext.pc;
@@ -58,12 +54,12 @@ __pthread_thread_start (struct __pthread *thread)
err = rm_thread_exregs (ADDR_VOID, thread->object,
HURD_EXREGS_SET_ASPACE
| HURD_EXREGS_SET_ACTIVITY
- | HURD_EXREGS_SET_EXCEPTION_PAGE
| HURD_EXREGS_SET_SP_IP
| HURD_EXREGS_SET_USER_HANDLE
| HURD_EXREGS_START
| HURD_EXREGS_ABORT_IPC,
- in, &out);
+ in, aspace, activity, ADDR_VOID, ADDR_VOID,
+ &out, NULL, NULL, NULL, NULL);
assert (err == 0);
}
return 0;
diff --git a/libpthread/sysdeps/viengoos/pt-wakeup.c b/libpthread/sysdeps/viengoos/pt-wakeup.c
index e568a6f..60804ea 100644
--- a/libpthread/sysdeps/viengoos/pt-wakeup.c
+++ b/libpthread/sysdeps/viengoos/pt-wakeup.c
@@ -27,6 +27,10 @@
void
__pthread_wakeup (struct __pthread *thread)
{
+ struct __pthread *self = _pthread_self ();
+ assert (self != thread);
+ assert (self->lock_message_buffer);
+
/* We need to loop here as the blocked thread may not yet be
blocked! Here's what happens when a thread blocks: it registers
itself as blocked, drops the relevant lock and then actually
@@ -36,7 +40,8 @@ __pthread_wakeup (struct __pthread *thread)
long ret;
do
{
- ret = futex_wake (&thread->threadid, INT_MAX);
+ ret = futex_wake_using (self->lock_message_buffer,
+ &thread->threadid, INT_MAX);
assertx (ret <= 1, "tid: %x, ret: %d", thread->threadid, ret);
if (ret == 0)
diff --git a/ruth/ChangeLog b/ruth/ChangeLog
index 0712359..2b3f432 100644
--- a/ruth/ChangeLog
+++ b/ruth/ChangeLog
@@ -1,3 +1,15 @@
+2008-12-12 Neal H. Walfield <neal@gnu.org>
+
+ Update to new RPC interfaces.
+ * ruth.c (main): Update use of rm_folio_alloc,
+ rm_folio_object_alloc, rm_thread_exregs, rm_activity_policy,
+ rm_activity_info. Replace use of rm_thread_wait_object_destroy
+ with rm_object_reply_on_destruction. Replace use of `struct
+ exception_info' with `struct activation_fault_info'. Fix signal
+ test's use of condition variables to not rely on the scheduler.
+ When checking deallocation code, set up a fault handler to
+ programmatically determine success.
+
2008-11-04 Neal H. Walfield <neal@gnu.org>
* ruth.c (main): Use OBJECT_PRIORITY_DEFAULT, not
diff --git a/ruth/ruth.c b/ruth/ruth.c
index f97bd1b..8f25a59 100644
--- a/ruth/ruth.c
+++ b/ruth/ruth.c
@@ -1,4 +1,4 @@
-/* ruth.c - Test server.
+/* ruth.c - Test suite.
Copyright (C) 2007, 2008 Free Software Foundation, Inc.
Written by Neal H. Walfield <neal@gnu.org>.
@@ -43,6 +43,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
+#include <setjmp.h>
#include <l4.h>
@@ -117,8 +118,10 @@ main (int argc, char *argv[])
addr_t folio = capalloc ();
assert (! ADDR_IS_VOID (folio));
- error_t err = rm_folio_alloc (activity, folio, FOLIO_POLICY_DEFAULT);
+ error_t err = rm_folio_alloc (activity, activity, FOLIO_POLICY_DEFAULT,
+ &folio);
assert (! err);
+ assert (! ADDR_IS_VOID (folio));
int i;
for (i = -10; i < 129; i ++)
@@ -129,8 +132,9 @@ main (int argc, char *argv[])
err = rm_folio_object_alloc (activity, folio, i, cap_page,
OBJECT_POLICY_DEFAULT, 0,
- addr, ADDR_VOID);
+ &addr, NULL);
assert ((err == 0) == (0 <= i && i < FOLIO_OBJECTS));
+ assert (! ADDR_IS_VOID (addr));
if (0 <= i && i < FOLIO_OBJECTS)
{
@@ -176,8 +180,10 @@ main (int argc, char *argv[])
cap_set_shadow (slot, shadow);
}));
- error_t err = rm_folio_alloc (activity, f, FOLIO_POLICY_DEFAULT);
+ error_t err = rm_folio_alloc (activity, activity,
+ FOLIO_POLICY_DEFAULT, &f);
assert (! err);
+ assert (! ADDR_IS_VOID (f));
int j;
for (j = 0; j <= i; j ++)
@@ -303,6 +309,9 @@ main (int argc, char *argv[])
{
static volatile int done;
char stack[0x1000];
+ /* Fault it in. */
+ stack[0] = 0;
+ stack[sizeof (stack)] = 0;
void start (void)
{
@@ -328,12 +337,9 @@ main (int argc, char *argv[])
struct hurd_thread_exregs_in in;
- in.aspace = ADDR (0, 0);
in.aspace_cap_properties = CAP_PROPERTIES_DEFAULT;
in.aspace_cap_properties_flags = CAP_COPY_COPY_SOURCE_GUARD;
- in.activity = activity;
-
in.sp = (l4_word_t) ((void *) stack + sizeof (stack));
in.ip = (l4_word_t) &start;
@@ -343,7 +349,8 @@ main (int argc, char *argv[])
HURD_EXREGS_SET_ASPACE | HURD_EXREGS_SET_ACTIVITY
| HURD_EXREGS_SET_SP_IP | HURD_EXREGS_START
| HURD_EXREGS_ABORT_IPC,
- in, &out);
+ in, ADDR (0, 0), activity, ADDR_VOID, ADDR_VOID,
+ &out, NULL, NULL, NULL, NULL);
debug (5, "Waiting for thread");
while (done == 0)
@@ -431,6 +438,7 @@ main (int argc, char *argv[])
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+ bool ready_to_go;
const int count = 18;
@@ -472,17 +480,20 @@ main (int argc, char *argv[])
if (sigaction (SIGUSR1, &act, NULL) < 0)
panic ("Failed to install signal handler: %s", strerror (errno));
- debug (5, "Installed signal handler, waking main thread");
+ debug (5, "Installed signal handler, waking main thread (%d)", j);
/* Wait until the main thread unlocks MUTEX. */
pthread_mutex_lock (&mutex);
pthread_mutex_unlock (&mutex);
- debug (5, "Signaling main thread");
+ debug (5, "Signaling main thread (%d)", j);
+
/* Signal the main thread that we are ready. */
+ ready_to_go = true;
pthread_cond_signal (&cond);
+
/* Block. */
while (i != 1)
l4_yield ();
@@ -605,7 +616,9 @@ main (int argc, char *argv[])
for (i = 0; i < count; i ++)
{
/* Wait for the thread to install the signal handler. */
- pthread_cond_wait (&cond, &mutex);
+ while (!ready_to_go)
+ pthread_cond_wait (&cond, &mutex);
+ ready_to_go = false;
pthread_mutex_unlock (&mutex);
err = pthread_kill (thread, SIGUSR1);
@@ -647,19 +660,23 @@ main (int argc, char *argv[])
err = rm_folio_object_alloc (activity, folio, obj ++,
cap_activity_control,
OBJECT_POLICY_DEFAULT, 0,
- a[i].child, ADDR_VOID);
+ &a[i].child, NULL);
assert (err == 0);
+ assert (! ADDR_IS_VOID (a[i].child));
/* Allocate a folio against the activity and use it. */
a[i].folio = capalloc ();
- err = rm_folio_alloc (a[i].child, a[i].folio, FOLIO_POLICY_DEFAULT);
+ err = rm_folio_alloc (activity, a[i].child, FOLIO_POLICY_DEFAULT,
+ &a[i].folio);
assert (err == 0);
+ assert (! ADDR_IS_VOID (a[i].folio));
a[i].page = capalloc ();
err = rm_folio_object_alloc (a[i].child, a[i].folio, 0, cap_page,
OBJECT_POLICY_DEFAULT, 0,
- a[i].page, ADDR_VOID);
+ &a[i].page, NULL);
assert (err == 0);
+ assert (! ADDR_IS_VOID (a[i].page));
l4_word_t type;
struct cap_properties properties;
@@ -676,11 +693,12 @@ main (int argc, char *argv[])
test (a[i].child, a[i].folio, depth - 1);
/* We destroy the first N / 2 activities. The caller will
- destroy the rest. */
+ implicitly destroy the rest. */
for (i = 0; i < N / 2; i ++)
{
/* Destroy the activity. */
- rm_folio_free (activity, a[i].folio);
+ err = rm_folio_free (activity, a[i].folio);
+ assert (! err);
/* To determine if the folio has been destroyed, we cannot simply
read the capability: this returns the type stored in the
@@ -690,7 +708,7 @@ main (int argc, char *argv[])
destroyed. */
err = rm_folio_object_alloc (a[i].child, a[i].folio, 1, cap_page,
OBJECT_POLICY_DEFAULT, 0,
- a[i].page, ADDR_VOID);
+ &a[i].page, NULL);
assert (err);
capfree (a[i].page);
@@ -701,8 +719,9 @@ main (int argc, char *argv[])
error_t err;
addr_t folio = capalloc ();
- err = rm_folio_alloc (activity, folio, FOLIO_POLICY_DEFAULT);
+ err = rm_folio_alloc (activity, activity, FOLIO_POLICY_DEFAULT, &folio);
assert (err == 0);
+ assert (! ADDR_IS_VOID (folio));
test (activity, folio, 2);
@@ -733,14 +752,14 @@ main (int argc, char *argv[])
in.child_rel = ACTIVITY_MEMORY_POLICY_VOID;
in.folios = 10000;
- err = rm_activity_policy (a,
+ err = rm_activity_policy (a, a,
ACTIVITY_POLICY_SIBLING_REL_SET
| ACTIVITY_POLICY_STORAGE_SET,
in,
&out);
assert (err == 0);
- err = rm_activity_policy (a,
+ err = rm_activity_policy (a, a,
0, ACTIVITY_POLICY_VOID,
&out);
assert (err == 0);
@@ -752,7 +771,7 @@ main (int argc, char *argv[])
in.sibling_rel.priority = 4;
in.sibling_rel.weight = 5;
in.folios = 10001;
- err = rm_activity_policy (a,
+ err = rm_activity_policy (a, a,
ACTIVITY_POLICY_SIBLING_REL_SET
| ACTIVITY_POLICY_STORAGE_SET,
in, &out);
@@ -763,13 +782,13 @@ main (int argc, char *argv[])
assert (out.sibling_rel.weight == 3);
assert (out.folios == 10000);
- err = rm_activity_policy (weak,
+ err = rm_activity_policy (a, weak,
ACTIVITY_POLICY_SIBLING_REL_SET
| ACTIVITY_POLICY_STORAGE_SET,
in, &out);
- assert (err == EPERM);
+ assertx (err == EPERM, "%d", err);
- err = rm_activity_policy (weak, 0, in, &out);
+ err = rm_activity_policy (a, weak, 0, in, &out);
assert (err == 0);
assert (out.sibling_rel.priority == 4);
@@ -860,7 +879,7 @@ main (int argc, char *argv[])
}
{
- printf ("Checking thread_wait_object_destroy... ");
+ printf ("Checking object_reply_on_destruction... ");
struct storage storage = storage_alloc (activity, cap_page,
STORAGE_MEDIUM_LIVED,
@@ -872,8 +891,8 @@ main (int argc, char *argv[])
{
uintptr_t ret = 0;
error_t err;
- err = rm_thread_wait_object_destroyed (ADDR_VOID, storage.addr, &ret);
- debug (5, "object destroy returned: err: %d, ret: %d", err, ret);
+ err = rm_object_reply_on_destruction (ADDR_VOID, storage.addr, &ret);
+ debug (5, "object_reply_on_destruction: err: %d, ret: %d", err, ret);
assert (err == 0);
assert (ret == 10);
return 0;
@@ -893,7 +912,7 @@ main (int argc, char *argv[])
addr_chop (storage.addr, FOLIO_OBJECTS_LOG2),
addr_extract (storage.addr, FOLIO_OBJECTS_LOG2),
cap_void,
- OBJECT_POLICY_VOID, 10, ADDR_VOID, ADDR_VOID);
+ OBJECT_POLICY_VOID, 10, NULL, NULL);
/* Release the memory. */
storage_free (storage.addr, true);
@@ -913,7 +932,7 @@ main (int argc, char *argv[])
bool fill (struct anonymous_pager *anon,
uintptr_t offset, uintptr_t count,
void *pages[],
- struct exception_info info)
+ struct activation_fault_info info)
{
assert (count == 1);
@@ -946,7 +965,7 @@ main (int argc, char *argv[])
bool fill (struct anonymous_pager *anon,
uintptr_t offset, uintptr_t count,
void *pages[],
- struct exception_info info)
+ struct activation_fault_info info)
{
assert (count == 1);
@@ -962,8 +981,8 @@ main (int argc, char *argv[])
do
{
struct activity_info info;
- error_t err = rm_activity_info (ADDR_VOID, activity_info_stats, 1,
- &info);
+ error_t err = rm_activity_info (ADDR_VOID, activity,
+ activity_info_stats, 1, &info);
assert_perror (err);
assert (info.stats.count >= 1);
@@ -971,12 +990,12 @@ main (int argc, char *argv[])
}
while (frames == 0);
- debug (0, "%d frames available", frames);
+ debug (0, "%d frames available", (int) frames);
uint32_t goal = frames * 2;
/* Limit to at most 1GB of memory. */
if (goal > ((uint32_t) -1) / PAGESIZE / 4)
goal = ((uint32_t) -1) / PAGESIZE / 4;
- debug (0, "Allocating %d frames", goal);
+ debug (0, "Allocating %d frames", (int) goal);
void *addr;
struct anonymous_pager *pager
@@ -1002,7 +1021,7 @@ main (int argc, char *argv[])
}
{
- printf ("Checking read-only pages... ");
+ printf ("Checking deallocation... ");
addr_t addr = as_alloc (PAGESIZE_LOG2, 1, true);
assert (! ADDR_IS_VOID (addr));
@@ -1015,17 +1034,44 @@ main (int argc, char *argv[])
addr).addr;
assert (! ADDR_IS_VOID (storage));
-
- debug (1, "Writing before dealloc...");
int *buffer = ADDR_TO_PTR (addr_extend (addr, 0, PAGESIZE_LOG2));
+
+ debug (5, "Writing before dealloc...");
*buffer = 0;
storage_free (storage, true);
- debug (1, "Writing after dealloc (should sigsegv)...");
- *buffer = 0;
+ debug (5, "Writing after dealloc...");
+
+ jmp_buf jmpbuf;
+ struct hurd_fault_catcher catcher;
+
+ bool faulted = false;
+ bool callback (struct activation_frame *activation_frame,
+ uintptr_t fault)
+ {
+ faulted = true;
+
+ hurd_fault_catcher_unregister (&catcher);
+ hurd_activation_frame_longjmp (activation_frame, jmpbuf, true, 1);
+ return true;
+ }
+
+ catcher.start = (uintptr_t) buffer;
+ catcher.len = PAGESIZE;
+ catcher.callback = callback;
+ hurd_fault_catcher_register (&catcher);
+
+ if (setjmp (jmpbuf) == 0)
+ {
+ *buffer = 0;
+ assert (! "Didn't fault!?");
+ }
+ assert (faulted);
}
+ debug (1, DEBUG_BOLD ("\n\nAll tests ran successfully to completion!\n\n"));
+
debug (1, "Shutting down...");
while (1)
l4_sleep (L4_NEVER);
diff --git a/viengoos/ChangeLog b/viengoos/ChangeLog
index 0c9817b..783bbfe 100644
--- a/viengoos/ChangeLog
+++ b/viengoos/ChangeLog
@@ -1,3 +1,115 @@
+2008-12-12 Neal H. Walfield <neal@gnu.org>
+
+ Implement messengers and convert to new IPC semantics.
+ * messenger.h: New file.
+ * messenger.c: New file.
+ * Makefile.am (viengoos_SOURCES): Add messenger.h and messenger.c.
+ * ager.c: Include "messenger.h".
+ (update_stats): Update notifivation code to use messengers.
+ * cap.c: Include <hurd/messenger.h>.
+ (cap_shootdown): Follow thread and messenger objects.
+ * object.h (object_wait_queue_head): Use and return struct
+ messenger *'s, not struct thread *'s. Update users.
+ (object_wait_queue_tail): Likewise.
+ (object_wait_queue_next): Likewise.
+ (object_wait_queue_prev): Likewise.
+ (object_wait_queue_enqueue): Likewise.
+ (object_wait_queue_dequeue): Likewise. Rename from this...
+ (object_wait_queue_unlink): ... to this.
+ (object_wait_queue_push): New declaration.
+ (folio_object_wait_queue_for_each): Use and return struct
+ messenger *'s, not struct thread *'s. Update users.
+ (object_wait_queue_for_each): Likewise.
+ * object.c: Include <hurd/messenger.h> and "messenger.h".
+ (folio_object_alloc): When destroying a messenger, call
+ messenger_destroy.
+ (folio_object_alloc): Send notifications using messengers.
+ (object_wait_queue_head): Use and return struct messenger *'s, not
+ struct thread *'s.
+ (object_wait_queue_tail): Likewise.
+ (object_wait_queue_next): Likewise.
+ (object_wait_queue_prev): Likewise.
+ (object_wait_queue_check): Likewise.
+ (object_wait_queue_enqueue): Likewise. Add MESSENGER to end of
+ the queue, not the beginning.
+ (object_wait_queue_push): New function.
+ (object_wait_queue_dequeue): Use and return struct messenger *'s,
+ not struct thread *'s. Rename from this...
+ (object_wait_queue_unlink): ... to this.
+ * pager.c: Include "messenger.h".
+ * thread.h: Don't include "list.h". Include <hurd/cap.h> and
+ <hurd/thread.h>.
+ (struct folio): Remove declaration.
+ (THREAD_SLOTS): Don't define.
+ (THREAD_WAIT_FUTEX): Move from here...
+ * messenger.h (MESSENGER_WAIT_FUTEX): ... to here.
+ * thread.h (THREAD_WAIT_DESTROY): Move from here...
+ * messenger.h (MESSENGER_WAIT_DESTROY): ... to here.
+ * thread.h (THREAD_WAIT_ACTIVITY_INFO): Move from here...
+ * messenger.h (MESSENGER_WAIT_ACTIVITY_INFO): ... to here.
+ * thread.h (struct thread): Rename field exception_page to utcb.
+ Add field exception_messenger. Remove fields wait_queue_p,
+ wait_queue_head, wait_queue_tail, wait_reason, wait_reason_arg,
+ wait_reason_arg2, wait_queue and futex_waiter_node.
+ (futex_waiters): Don't declare.
+ (thread_exregs): Change input capabilities to not be pointers to
+ capabilities but just capability structures. Add argument
+ exception_messenger. Remove arguments aspace_out, activity_out
+ and exception_page_out. Update users.
+ (thread_activate): New declaration.
+ (thread_raise_exception): Change MSG's type to be struct
+ vg_message *. Update users.
+ (thread_deliver_pending): New declaration.
+ * thread.c (thread_deinit): Remove code to remove THREAD from a
+ wait queue.
+ (thread_exregs): Change input capabilities to not be pointers to
+ capabilities but just capability structures. Update code. Add
+ argument exception_messenger. Set THREAD's exception messenger
+ according to it and CONTROL. Remove arguments aspace_out,
+ activity_out and exception_page_out. Don't save the old
+ capabilities.
+ (thread_raise_exception): Move body of function...
+ (thread_activate): ... to this new function. Update to use
+ messengers.
+ (thread_raise_exception): Implement in terms of it.
+ (thread_deliver_pending): New function.
+ * server.c: Include <hurd/ipc.h> and "messenger.h".
+ (DEBUG): If label is the IPC label, use "IPC" as the function.
+ (OBJECT_): Take additional parameter WRITABLE. Save whether the
+ object is writable in *WRITABLE. Update users.
+ (OBJECT): Likewise.
+ (server_loop): Update to use messengers and the new IPC interface.
+ Update method implementations appropriately. Don't marshal faults
+ using exception_fault_send_marshal but the new
+ activation_fault_send_marshal. Remove implementations of
+ object_slot_copy_out, object_slot_copy_in and object_slot_read.
+ Reimplement object_discard. In the thread_exregs implementation,
+ handle the exception messenger. Implement thread_id. Remove
+ thread_wait_object_destroyed. Implement
+ object_reply_on_destruction. In activity_info and
+ activity_policy, don't operate on PRINCIPAL but the invoke
+ activity. Implement thread_activation_collect. When blocking on
+ a futex, don't enqueue the calling thread but the reply messenger.
+ Implement the messenger_id method.
+ (REPLY): Redefine before processing an object invocation to reply
+ using the reply messenger included in the request.
+
+ * rm.h: Include <l4/message.h>.
+ (rm_method_id_string): Don't handle object_slot_copy_out,
+ object_slot_copy_in, object_slot_read, exception_collect or
+ thread_wait_object_destroyed. Handle object_reply_on_destruction,
+ thread_id, thread_activation_collect.
+ (RPC_TARGET_NEED_ARG): Don't undefine.
+ (RPC_TARGET): Don't define.
+ (struct io_buffer): Redefine in terms of L4_NUM_BRS.
+ (write): Update interface specification according to new IDL
+ interface. Update users.
+ (read): Likewise.
+ (as_dump): Likewise.
+ (fault): Likewise.
+ (RPC_STUB_PREFIX): Don't undefine.
+ (RPC_ID_PREFIX): Likewise.
+
2008-12-11 Neal H. Walfield <neal@gnu.org>
* viengoos.c (bootstrap): Add code to configure the memory to
diff --git a/viengoos/Makefile.am b/viengoos/Makefile.am
index bb97920..c3d37df 100644
--- a/viengoos/Makefile.am
+++ b/viengoos/Makefile.am
@@ -49,6 +49,7 @@ viengoos_SOURCES = $(ARCH_SOURCES) \
cap.h cap.c \
activity.h activity.c \
thread.h thread.c \
+ messenger.h messenger.c \
ager.h ager.c \
bits.h \
server.h server.c \
diff --git a/viengoos/ager.c b/viengoos/ager.c
index afd2168..d078252 100644
--- a/viengoos/ager.c
+++ b/viengoos/ager.c
@@ -32,6 +32,7 @@
#include "zalloc.h"
#include "thread.h"
#include "pager.h"
+#include "messenger.h"
#define MIN(x,y) ((x) < (y) ? (x) : (y))
#define MAX(x,y) ((x) > (y) ? (x) : (y))
@@ -401,14 +402,14 @@ update_stats (void)
0, sizeof (*ACTIVITY_STATS (activity)));
/* Wake anyone waiting for this statistic. */
- struct thread *thread;
+ struct messenger *messenger;
object_wait_queue_for_each (activity, (struct object *) activity,
- thread)
- if (thread->wait_reason == THREAD_WAIT_ACTIVITY_INFO
- && (thread->wait_reason_arg & activity_info_stats)
- && thread->wait_reason_arg2 <= period / FREQ)
+ messenger)
+ if (messenger->wait_reason == MESSENGER_WAIT_ACTIVITY_INFO
+ && (messenger->wait_reason_arg & activity_info_stats)
+ && messenger->wait_reason_arg2 <= period / FREQ)
{
- object_wait_queue_dequeue (activity, thread);
+ object_wait_queue_unlink (activity, messenger);
/* XXX: Only return valid stat buffers. */
struct activity_info info;
@@ -426,20 +427,7 @@ update_stats (void)
info.stats.count = ACTIVITY_STATS_PERIODS;
- l4_msg_t msg;
- rm_activity_info_reply_marshal (&msg, info);
- l4_msg_tag_t msg_tag = l4_msg_msg_tag (msg);
- l4_set_propagation (&msg_tag);
- l4_msg_set_msg_tag (msg, msg_tag);
- l4_set_virtual_sender (viengoos_tid);
- l4_msg_load (msg);
- msg_tag = l4_reply (thread->tid);
-
- if (l4_ipc_failed (msg_tag))
- debug (0, "%s %x failed: %u",
- l4_error_code () & 1 ? "Receiving from" : "Sending to",
- l4_error_code () & 1 ? l4_myself () : thread->tid,
- (l4_error_code () >> 1) & 0x7);
+ rm_activity_info_reply (root_activity, messenger, info);
}
}
diff --git a/viengoos/cap.c b/viengoos/cap.c
index 3f020fe..2a9ae66 100644
--- a/viengoos/cap.c
+++ b/viengoos/cap.c
@@ -20,6 +20,7 @@
#include <assert.h>
#include <hurd/stddef.h>
+#include <hurd/messenger.h>
#include "cap.h"
#include "object.h"
@@ -142,6 +143,42 @@ cap_shootdown (struct activity *activity, struct cap *root)
return;
+ case cap_messenger:
+ case cap_rmessenger:
+ if (remaining < VG_MESSENGER_SLOTS_LOG2 + PAGESIZE_LOG2)
+ return;
+
+ object = cap_to_object (activity, cap);
+ if (! object)
+ return;
+
+ remaining -= VG_MESSENGER_SLOTS_LOG2;
+
+ for (i = 0; i < VG_MESSENGER_SLOTS_LOG2; i ++)
+ if (root->oid != object->caps[i].oid)
+ doit (&object->caps[i], remaining);
+
+ return;
+
+ case cap_thread:
+ if (remaining < THREAD_SLOTS_LOG2 + PAGESIZE_LOG2)
+ return;
+
+ object = cap_to_object (activity, cap);
+ if (! object)
+ return;
+
+ remaining -= THREAD_SLOTS_LOG2;
+
+ for (i = 0; i < THREAD_SLOTS_LOG2; i ++)
+ if (root->oid != object->caps[i].oid)
+ doit (&object->caps[i],
+ remaining
+ + (i == THREAD_ASPACE_SLOT ? THREAD_SLOTS_LOG2 : 0));
+
+ return;
+
+
case cap_folio:
if (remaining < FOLIO_OBJECTS_LOG2 + PAGESIZE_LOG2)
return;
diff --git a/viengoos/messenger.c b/viengoos/messenger.c
new file mode 100644
index 0000000..fbcd58f
--- /dev/null
+++ b/viengoos/messenger.c
@@ -0,0 +1,347 @@
+/* messenger.c - Messenger object implementation.
+ Copyright (C) 2008 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <stdbool.h>
+#include <errno.h>
+#include <assert.h>
+#include <hurd/cap.h>
+#include <hurd/as.h>
+
+#include "messenger.h"
+#include "object.h"
+#include "thread.h"
+
+/* When the kernel formulates relies, it does so in this buffer. */
+static char reply_message_data[PAGESIZE] __attribute__ ((aligned (PAGESIZE)));
+struct vg_message *reply_buffer = (struct vg_message *) &reply_message_data[0];
+
+#define MIN(x, y) ((x) < (y) ? (x) : (y))
+#define MAX(x, y) ((x) > (y) ? (x) : (y))
+
+#include <backtrace.h>
+
+static bool
+messenger_load_internal (struct activity *activity,
+ struct messenger *target,
+ struct messenger *source,
+ struct vg_message *smessage,
+ bool may_block)
+{
+ assert (object_type ((struct object *) target) == cap_messenger);
+ if (source)
+ assert (object_type ((struct object *) source) == cap_messenger);
+
+ if (source)
+ assert (! smessage);
+ else
+ assert (smessage);
+
+ /* SOURCE should not already be blocked on another messenger. */
+ if (source)
+ {
+ assert (! source->wait_queue.next);
+ assert (! source->wait_queue.prev);
+ }
+
+ if (unlikely (target->blocked))
+ /* TARGET is blocked. */
+ {
+ if (! may_block)
+ {
+ debug (0, "Not enqueuing messenger: "
+ "target blocked and delivery marked as non-blocking.");
+ backtrace_print ();
+ return false;
+ }
+
+ /* Enqueue SOURCE on TARGET's wait queue. */
+
+ debug (0, "Target blocked. Enqueuing sender.");
+
+ assert (source);
+ source->wait_reason = MESSENGER_WAIT_TRANSFER_MESSAGE;
+ object_wait_queue_enqueue (activity, (struct object *) target, source);
+
+ return true;
+ }
+
+ /* TARGET is not blocked. Deliver the message. */
+ debug (5, "Delivering sender's message to target.");
+
+ target->blocked = true;
+
+ /* There are four combinations: the source can either have inline
+ data or out-of-line data and the target can either have inline
+ data or out-of-line data. */
+
+ struct vg_message *tmessage = NULL;
+
+ void *sdata;
+ void *tdata;
+ int data_count;
+
+ addr_t *saddrs;
+ int saddr_count;
+ addr_t *taddrs;
+ int taddr_count;
+
+ if (! source || source->out_of_band)
+ /* Source data is in a buffer. */
+ {
+ if (source)
+ smessage = (struct vg_message *) cap_to_object (activity,
+ &source->buffer);
+ else
+ assert (smessage);
+
+ if (smessage)
+ {
+ sdata = vg_message_data (smessage);
+ data_count = vg_message_data_count (smessage);
+
+ saddrs = vg_message_caps (smessage);
+ saddr_count = vg_message_cap_count (smessage);
+ }
+ else
+ {
+ sdata = NULL;
+ data_count = 0;
+ saddrs = NULL;
+ saddr_count = 0;
+ }
+ }
+ else
+ /* Source data is inline. */
+ {
+ assert (source);
+
+ sdata = source->inline_words;
+ data_count
+ = sizeof (source->inline_words[0]) * source->inline_word_count;
+
+ saddrs = source->inline_caps;
+ saddr_count = source->inline_cap_count;
+ }
+
+ if (target->out_of_band)
+ /* Target data is in a buffer. */
+ {
+ tmessage = (struct vg_message *) cap_to_object (activity,
+ &target->buffer);
+ if (tmessage)
+ {
+ taddrs = vg_message_caps (tmessage);
+ taddr_count = vg_message_cap_count (tmessage);
+
+ /* Set the number of capabilities to the number in the
+ source message. */
+ tmessage->cap_count = saddr_count;
+ tdata = vg_message_data (tmessage);
+ tmessage->data_count = data_count;
+ }
+ else
+ {
+ tdata = NULL;
+ data_count = 0;
+
+ taddrs = NULL;
+ taddr_count = 0;
+ }
+ }
+ else
+ /* Target data is inline. */
+ {
+ tdata = target->inline_words;
+ data_count = MIN (data_count,
+ sizeof (uintptr_t) * VG_MESSENGER_INLINE_WORDS);
+ target->inline_word_count
+ = (data_count + sizeof (uintptr_t) - 1) / sizeof (uintptr_t);
+
+ taddrs = target->inline_caps;
+ taddr_count = target->inline_cap_count;
+ }
+
+ do_debug (5)
+ {
+ if (smessage)
+ {
+ debug (0, "Source: ");
+ vg_message_dump (smessage);
+ }
+ if (tmessage)
+ {
+ debug (0, "Target: ");
+ vg_message_dump (tmessage);
+ }
+ }
+
+ /* Copy the caps. */
+ int i;
+ for (i = 0; i < MIN (saddr_count, taddr_count); i ++)
+ {
+ /* First get the target capability slot. */
+ bool twritable = true;
+
+ struct cap *tcap = NULL;
+ if (! ADDR_IS_VOID (taddrs[i]))
+ {
+ as_slot_lookup_rel_use (activity, &target->as_root, taddrs[i],
+ ({
+ twritable = writable;
+ tcap = slot;
+ }));
+ if (! tcap || ! twritable)
+ debug (0, DEBUG_BOLD ("Target " ADDR_FMT " does not designate "
+ "a %svalid slot!"),
+ ADDR_PRINTF (taddrs[i]), twritable ? "writable " : "");
+ }
+
+ if (likely (tcap && twritable))
+ /* We have a slot and it is writable. Look up the source
+ capability. */
+ {
+ struct cap scap = CAP_VOID;
+ bool swritable = true;
+ if (source)
+ {
+ if (! ADDR_IS_VOID (saddrs[i]))
+ scap = as_cap_lookup_rel (activity,
+ &source->as_root, saddrs[i],
+ -1, &swritable);
+ }
+ else
+ /* This is a kernel provided buffer. In this case the
+ address is really a pointer to a capability. */
+ if ((uintptr_t) saddrs[i].raw)
+ scap = * (struct cap *) (uintptr_t) saddrs[i].raw;
+
+ if (! swritable)
+ scap.type = cap_type_weaken (scap.type);
+
+ /* Shoot down the capability. */
+ cap_shootdown (activity, tcap);
+
+ /* Preserve the address translator and policy. */
+ struct cap_properties props = CAP_PROPERTIES_GET (*tcap);
+ *tcap = scap;
+ CAP_PROPERTIES_SET (tcap, props);
+
+ debug (5, ADDR_FMT " <- " CAP_FMT,
+ ADDR_PRINTF (taddrs[i]), CAP_PRINTF (tcap));
+ }
+ else
+ taddrs[i] = ADDR_VOID;
+ }
+ if (i < MAX (taddr_count, saddr_count) && target->out_of_band && taddrs)
+ /* Set the address of any non-transferred caps in the target to
+ ADDR_VOID. */
+ memset (&taddrs[i], 0,
+ sizeof (taddrs[0]) * (MAX (taddr_count, saddr_count)) - i);
+
+ /* Copy the data. */
+ memcpy (tdata, sdata, data_count);
+
+ do_debug (5)
+ if (tmessage)
+ {
+ debug (0, "Delivery: ");
+ vg_message_dump (tmessage);
+ }
+
+ if (target->activate_on_receive)
+ messenger_message_deliver (activity, target);
+ else
+ debug (0, "Not activing target.");
+
+ if (source && source->activate_on_send)
+ messenger_message_deliver (activity, source);
+
+ return true;
+}
+
+bool
+messenger_message_transfer (struct activity *activity,
+ struct messenger *target,
+ struct messenger *source,
+ bool may_block)
+{
+ return messenger_load_internal (activity, target, source, NULL, may_block);
+}
+
+bool
+messenger_message_load (struct activity *activity,
+ struct messenger *target,
+ struct vg_message *message)
+{
+ return messenger_load_internal (activity, target, NULL, message, false);
+}
+
+bool
+messenger_message_deliver (struct activity *activity,
+ struct messenger *messenger)
+{
+ assert (messenger->blocked);
+ assert (! messenger->wait_queue_p);
+
+ struct thread *thread
+ = (struct thread *) cap_to_object (activity, &messenger->thread);
+ if (! thread)
+ {
+ debug (0, "Messenger has no thread to activate!");
+ return false;
+ }
+
+ if (object_type ((struct object *) thread) != cap_thread)
+ {
+ debug (0, "Messenger's thread cap does not designate a thread but a %s",
+ cap_type_string (object_type ((struct object *) thread)));
+ return false;
+ }
+
+ return thread_activate (activity, thread, messenger, true);
+}
+
+void
+messenger_unblock (struct activity *activity, struct messenger *messenger)
+{
+ if (! messenger->blocked)
+ return;
+
+ messenger->blocked = 0;
+
+ struct messenger *m;
+ object_wait_queue_for_each (activity, (struct object *) messenger, m)
+ if (m->wait_reason == MESSENGER_WAIT_TRANSFER_MESSAGE)
+ {
+ object_wait_queue_unlink (activity, m);
+ bool ret = messenger_message_transfer (activity, messenger, m, true);
+ assert (ret);
+
+ break;
+ }
+}
+
+void
+messenger_destroy (struct activity *activity, struct messenger *messenger)
+{
+ if (messenger->wait_queue_p)
+ /* MESSENGER is attached to a wait queue. Detach it. */
+ object_wait_queue_unlink (activity, messenger);
+}
diff --git a/viengoos/messenger.h b/viengoos/messenger.h
new file mode 100644
index 0000000..abbf8ff
--- /dev/null
+++ b/viengoos/messenger.h
@@ -0,0 +1,214 @@
+/* messenger.h - Messenger buffer definitions.
+ Copyright (C) 2008 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ GNU Hurd is free software: you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation, either version 3 of the
+ License, or (at your option) any later version.
+
+ GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GNU Hurd. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _MESSENGER_H
+#define _MESSENGER_H 1
+
+#include <stdint.h>
+#include <string.h>
+#include <assert.h>
+#include <hurd/cap.h>
+#include <hurd/messenger.h>
+#include <hurd/message.h>
+
+#ifndef NDEBUG
+#include "../viengoos/list.h"
+#endif
+
+/* Messenger may be enqueued on any object and for different reasons.
+ The reason an object is enqueued is stored in the WAIT_REASON.
+ These are the reasons. */
+enum
+ {
+ /* The messenger is blocked on an object wait for a futex.
+ WAIT_REASON_ARG holds the byte offset in the object on which it
+ is waiting. */
+ MESSENGER_WAIT_FUTEX,
+
+ /* The messenger is blocked on an object waiting for the object to
+ be destroyed. */
+ MESSENGER_WAIT_DESTROY,
+
+ /* The messenger is blocked on an activity waiting for
+ information. The type of information is stored in
+ wait_reason_arg. The period in wait_reason_arg2. */
+ MESSENGER_WAIT_ACTIVITY_INFO,
+
+ /* The messenger is trying to transfer a message to another
+ messenger or to a thread. */
+ MESSENGER_WAIT_TRANSFER_MESSAGE,
+ };
+
+/* Messenger object. */
+struct messenger
+{
+ /* When this messenger is activated (that is, its contents are
+ delivered or it receives a message), THREAD is activated. This
+ is settable from user space. */
+ struct cap thread;
+
+ /* The root of the address space in which capability addresses
+ referenced in the message are resolved. */
+ struct cap as_root;
+
+ /* The message buffer. */
+ struct cap buffer;
+
+ /* The activity supplied by the sender of the message. */
+ struct cap sender_activity;
+
+
+ /* Whether the data is inline or out of line. */
+ bool out_of_band;
+
+ /* The inline data. */
+ int inline_word_count;
+ int inline_cap_count;
+
+ /* Inline data. */
+ uintptr_t inline_words[VG_MESSENGER_INLINE_WORDS];
+ addr_t inline_caps[VG_MESSENGER_INLINE_CAPS];
+
+
+ /* The buffer's version. If USER_VERSION_MATCHING is true, a
+ message can only be delivered if the user version in the
+ capability used to designate the buffer matches the buffer's user
+ version. */
+ uint64_t user_version;
+
+ /* If the user version in the capability must match
+ USER_VERSION. */
+ bool user_version_matching;
+ bool user_version_increment_on_delivery;
+
+
+ /* If the buffer is blocked, no messages will be delivered.
+ When a message is deliveried to this buffer, this is set to
+ true. */
+ bool blocked;
+
+ /* Activate thread when this messenger receives a message. */
+ bool activate_on_receive;
+ /* Activate thread when this messenger sends a message. */
+ bool activate_on_send;
+
+ /* The payload in the capability that was used to delivery the
+ message. This is only valid if this buffer contains an
+ (undelivered) message. */
+ uint64_t protected_payload;
+
+ /* The messenger's identifier. */
+ uint64_t id;
+
+
+ /* The object the messenger is waiting on. Only meaningful if
+ WAIT_QUEUE_P is true.
+
+ The list node used to connect a messenger to its target's
+ sender's wait queue.
+
+ Senders are arranged in a doubly-linked list. The head points to
+ the second element and the last element. The last element points
+ to the root and the second to last object.
+
+
+ H ----> 1
+ ^ //\
+ | / ||
+ ||/_ \/
+ 3 <===> 2
+
+ Next pointers: H -> 1 -> 2 -> 3 -> H
+ Previous pointers: 1 -> 3 -> 2 -> 1
+ */
+ struct
+ {
+ /* We don't need versioning as we automatically collect on object
+ destruction. */
+ oid_t next;
+ oid_t prev;
+ } wait_queue;
+
+ /* Whether the object is attached to a wait queue. (This is
+ different from the value of folio_object_wait_queue_p which
+ specifies if there are objects on this thread's wait queue.) */
+ uint32_t wait_queue_p : 1;
+
+ /* Whether this messenger is the head of the wait queue. If so,
+ WAIT_QUEUE.PREV designates the object. */
+ uint32_t wait_queue_head : 1;
+
+ /* Whether this messenger is the tail of the wait queue. If so,
+ WAIT_QUEUE.NEXT designates the object. */
+ uint32_t wait_queue_tail : 1;
+
+
+ /* Why the messenger is on a wait queue. */
+ uint32_t wait_reason : 27;
+ /* Additional information about the reason. */
+ uint32_t wait_reason_arg;
+ uint32_t wait_reason_arg2;
+
+#ifndef NDEBUG
+ /* Used for debugging futexes. */
+ struct list_node futex_waiter_node;
+#endif
+};
+
+#ifndef NDEBUG
+LIST_CLASS(futex_waiter, struct messenger, futex_waiter_node, true)
+/* List of threads waiting on a futex. */
+extern struct futex_waiter_list futex_waiters;
+#endif
+
+/* When the kernel formulates relies, it does so in this buffer. */
+extern struct vg_message *reply_buffer;
+
+/* Transfer SOURCE's message contents to TARGET. If TARGET is blocked
+ and MAY_BLOCK is true, enqueue SOURCE on TARGET. Returns whether
+ the message was delivered or whether SOURCE was enqueued on
+ TARGET. */
+extern bool messenger_message_transfer (struct activity *activity,
+ struct messenger *target,
+ struct messenger *source,
+ bool may_block);
+
+/* If target is not blocked, load the message MESSAGE into TARGET.
+ Returns whether the message was loaded. NB: ANY CAPABILITY
+ ADDRESSES ARE INTERPRETTED AS POINTERS TO STRUCT CAP!!! */
+extern bool messenger_message_load (struct activity *activity,
+ struct messenger *target,
+ struct vg_message *message);
+
+/* Attempt to deliver the message stored in TARGET to its thread. If
+ THREAD is activated, enqueues TARGET on it. */
+extern bool messenger_message_deliver (struct activity *activity,
+ struct messenger *target);
+
+/* Unblock messenger MESSENGER. If any messengers are waiting to
+ deliver a message attempt delivery. */
+extern void messenger_unblock (struct activity *activity,
+ struct messenger *messenger);
+
+/* Destroy the messenger MESSENGER: it is about to be deallocated. */
+extern void messenger_destroy (struct activity *activity,
+ struct messenger *messenger);
+
+#endif
diff --git a/viengoos/object.c b/viengoos/object.c
index 9dbc663..c91ff45 100644
--- a/viengoos/object.c
+++ b/viengoos/object.c
@@ -24,6 +24,7 @@
#include <hurd/ihash.h>
#include <hurd/folio.h>
#include <hurd/thread.h>
+#include <hurd/messenger.h>
#include <bit-array.h>
#include <assert.h>
@@ -31,6 +32,7 @@
#include "activity.h"
#include "thread.h"
#include "zalloc.h"
+#include "messenger.h"
/* For lack of a better place. */
ss_mutex_t kernel_lock;
@@ -527,7 +529,8 @@ folio_object_alloc (struct activity *activity,
/* Deallocate any existing object. */
if (folio_object_type (folio, idx) == cap_activity_control
- || folio_object_type (folio, idx) == cap_thread)
+ || folio_object_type (folio, idx) == cap_thread
+ || folio_object_type (folio, idx) == cap_messenger)
/* These object types have state that needs to be explicitly
destroyed. */
{
@@ -547,6 +550,10 @@ folio_object_alloc (struct activity *activity,
debug (4, "Destroying thread object at %llx", oid);
thread_deinit (activity, (struct thread *) object);
break;
+ case cap_messenger:
+ debug (4, "Destroying messenger object at %llx", oid);
+ messenger_destroy (activity, (struct messenger *) object);
+ break;
default:
assert (!"Object desc type does not match folio type.");
break;
@@ -555,14 +562,15 @@ folio_object_alloc (struct activity *activity,
/* Wake any threads waiting on this object. We wake them even if
they are not waiting for this object's death. */
- struct thread *thread;
- folio_object_wait_queue_for_each (activity, folio, idx, thread)
+ struct messenger *messenger;
+ folio_object_wait_queue_for_each (activity, folio, idx, messenger)
{
- object_wait_queue_dequeue (activity, thread);
- if (thread->wait_reason == THREAD_WAIT_DESTROY)
- rm_thread_wait_object_destroyed_reply (thread->tid, return_code);
+ object_wait_queue_unlink (activity, messenger);
+ if (messenger->wait_reason == MESSENGER_WAIT_DESTROY)
+ rm_object_reply_on_destruction_reply (activity,
+ messenger, return_code);
else
- rpc_error_reply (thread->tid, EFAULT);
+ rpc_error_reply (activity, messenger, EFAULT);
}
struct object_desc *odesc;
@@ -959,7 +967,7 @@ object_desc_claim (struct activity *activity, struct object_desc *desc,
}
/* Return the first waiter queued on object OBJECT. */
-struct thread *
+struct messenger *
object_wait_queue_head (struct activity *activity, struct object *object)
{
struct folio *folio = objects_folio (activity, object);
@@ -971,18 +979,18 @@ object_wait_queue_head (struct activity *activity, struct object *object)
oid_t h = folio_object_wait_queue (folio, i);
struct object *head = object_find (activity, h, OBJECT_POLICY_DEFAULT);
assert (head);
- assert (object_type (head) == cap_thread);
- assert (((struct thread *) head)->wait_queue_p);
- assert (((struct thread *) head)->wait_queue_head);
+ assert (object_type (head) == cap_messenger);
+ assert (((struct messenger *) head)->wait_queue_p);
+ assert (((struct messenger *) head)->wait_queue_head);
- return (struct thread *) head;
+ return (struct messenger *) head;
}
/* Return the last waiter queued on object OBJECT. */
-struct thread *
+struct messenger *
object_wait_queue_tail (struct activity *activity, struct object *object)
{
- struct thread *head = object_wait_queue_head (activity, object);
+ struct messenger *head = object_wait_queue_head (activity, object);
if (! head)
return NULL;
@@ -990,47 +998,47 @@ object_wait_queue_tail (struct activity *activity, struct object *object)
/* HEAD is also the list's tail. */
return head;
- struct thread *tail;
- tail = (struct thread *) object_find (activity, head->wait_queue.prev,
- OBJECT_POLICY_DEFAULT);
+ struct messenger *tail;
+ tail = (struct messenger *) object_find (activity, head->wait_queue.prev,
+ OBJECT_POLICY_DEFAULT);
assert (tail);
- assert (object_type ((struct object *) tail) == cap_thread);
+ assert (object_type ((struct object *) tail) == cap_messenger);
assert (tail->wait_queue_p);
assert (tail->wait_queue_tail);
return tail;
}
-/* Return the waiter following THREAD. */
-struct thread *
-object_wait_queue_next (struct activity *activity, struct thread *t)
+/* Return the waiter following M. */
+struct messenger *
+object_wait_queue_next (struct activity *activity, struct messenger *m)
{
- if (t->wait_queue_tail)
+ if (m->wait_queue_tail)
return NULL;
- struct thread *next;
- next = (struct thread *) object_find (activity, t->wait_queue.next,
- OBJECT_POLICY_DEFAULT);
+ struct messenger *next;
+ next = (struct messenger *) object_find (activity, m->wait_queue.next,
+ OBJECT_POLICY_DEFAULT);
assert (next);
- assert (object_type ((struct object *) next) == cap_thread);
+ assert (object_type ((struct object *) next) == cap_messenger);
assert (next->wait_queue_p);
assert (! next->wait_queue_head);
return next;
}
-/* Return the waiter preceding THREAD. */
-struct thread *
-object_wait_queue_prev (struct activity *activity, struct thread *t)
+/* Return the waiter preceding M. */
+struct messenger *
+object_wait_queue_prev (struct activity *activity, struct messenger *m)
{
- if (t->wait_queue_head)
+ if (m->wait_queue_head)
return NULL;
- struct thread *prev;
- prev = (struct thread *) object_find (activity, t->wait_queue.prev,
- OBJECT_POLICY_DEFAULT);
+ struct messenger *prev;
+ prev = (struct messenger *) object_find (activity, m->wait_queue.prev,
+ OBJECT_POLICY_DEFAULT);
assert (prev);
- assert (object_type ((struct object *) prev) == cap_thread);
+ assert (object_type ((struct object *) prev) == cap_messenger);
assert (prev->wait_queue_p);
assert (! prev->wait_queue_tail);
@@ -1038,29 +1046,29 @@ object_wait_queue_prev (struct activity *activity, struct thread *t)
}
static void
-object_wait_queue_check (struct activity *activity, struct thread *thread)
+object_wait_queue_check (struct activity *activity, struct messenger *messenger)
{
#ifndef NDEBUG
- if (! thread->wait_queue_p)
+ if (! messenger->wait_queue_p)
return;
- struct thread *last = thread;
- struct thread *t;
+ struct messenger *last = messenger;
+ struct messenger *m;
for (;;)
{
if (last->wait_queue_tail)
break;
- t = (struct thread *) object_find (activity, last->wait_queue.next,
- OBJECT_POLICY_DEFAULT);
- assert (t);
- assert (t->wait_queue_p);
- assert (! t->wait_queue_head);
- struct object *p = object_find (activity, t->wait_queue.prev,
+ m = (struct messenger *) object_find (activity, last->wait_queue.next,
+ OBJECT_POLICY_DEFAULT);
+ assert (m);
+ assert (m->wait_queue_p);
+ assert (! m->wait_queue_head);
+ struct object *p = object_find (activity, m->wait_queue.prev,
OBJECT_POLICY_DEFAULT);
assert (p == (struct object *) last);
- last = t;
+ last = m;
}
assert (last->wait_queue_tail);
@@ -1071,68 +1079,67 @@ object_wait_queue_check (struct activity *activity, struct thread *thread)
assert (folio_object_wait_queue_p (objects_folio (activity, o),
objects_folio_offset (o)));
- struct thread *head = object_wait_queue_head (activity, o);
+ struct messenger *head = object_wait_queue_head (activity, o);
if (! head)
return;
assert (head->wait_queue_head);
- struct thread *tail;
- tail = (struct thread *) object_find (activity, head->wait_queue.prev,
- OBJECT_POLICY_DEFAULT);
+ struct messenger *tail;
+ tail = (struct messenger *) object_find (activity, head->wait_queue.prev,
+ OBJECT_POLICY_DEFAULT);
assert (tail);
assert (tail->wait_queue_tail);
assert (last == tail);
last = head;
- while (last != thread)
+ while (last != messenger)
{
assert (! last->wait_queue_tail);
- t = (struct thread *) object_find (activity, last->wait_queue.next,
- OBJECT_POLICY_DEFAULT);
- assert (t);
- assert (t->wait_queue_p);
- assert (! t->wait_queue_head);
+ m = (struct messenger *) object_find (activity, last->wait_queue.next,
+ OBJECT_POLICY_DEFAULT);
+ assert (m);
+ assert (m->wait_queue_p);
+ assert (! m->wait_queue_head);
- struct object *p = object_find (activity, t->wait_queue.prev,
+ struct object *p = object_find (activity, m->wait_queue.prev,
OBJECT_POLICY_DEFAULT);
assert (p == (struct object *) last);
- last = t;
+ last = m;
}
#endif /* !NDEBUG */
}
-/* Enqueue the thread THREAD on object OBJECT's wait queue. */
void
-object_wait_queue_enqueue (struct activity *activity,
- struct object *object, struct thread *thread)
+object_wait_queue_push (struct activity *activity,
+ struct object *object, struct messenger *messenger)
{
- debug (5, "Adding " OID_FMT " to %p",
- OID_PRINTF (object_to_object_desc ((struct object *) thread)->oid),
+ debug (5, "Pushing " OID_FMT " onto %p",
+ OID_PRINTF (object_to_object_desc ((struct object *) messenger)->oid),
object);
- object_wait_queue_check (activity, thread);
+ object_wait_queue_check (activity, messenger);
- assert (! thread->wait_queue_p);
+ assert (! messenger->wait_queue_p);
- struct thread *oldhead = object_wait_queue_head (activity, object);
+ struct messenger *oldhead = object_wait_queue_head (activity, object);
if (oldhead)
{
assert (oldhead->wait_queue_head);
- /* THREAD->PREV = TAIL. */
- thread->wait_queue.prev = oldhead->wait_queue.prev;
+ /* MESSENGER->PREV = TAIL. */
+ messenger->wait_queue.prev = oldhead->wait_queue.prev;
- /* OLDHEAD->PREV = THREAD. */
+ /* OLDHEAD->PREV = MESSENGER. */
oldhead->wait_queue_head = 0;
- oldhead->wait_queue.prev = object_oid ((struct object *) thread);
+ oldhead->wait_queue.prev = object_oid ((struct object *) messenger);
- /* THREAD->NEXT = OLDHEAD. */
- thread->wait_queue.next = object_oid ((struct object *) oldhead);
+ /* MESSENGER->NEXT = OLDHEAD. */
+ messenger->wait_queue.next = object_oid ((struct object *) oldhead);
- thread->wait_queue_tail = 0;
+ messenger->wait_queue_tail = 0;
}
else
/* Empty list. */
@@ -1141,133 +1148,194 @@ object_wait_queue_enqueue (struct activity *activity,
objects_folio_offset (object),
true);
- /* THREAD->PREV = THREAD. */
- thread->wait_queue.prev = object_oid ((struct object *) thread);
+ /* MESSENGER->PREV = MESSENGER. */
+ messenger->wait_queue.prev = object_oid ((struct object *) messenger);
- /* THREAD->NEXT = OBJECT. */
- thread->wait_queue_tail = 1;
- thread->wait_queue.next = object_oid (object);
+ /* MESSENGER->NEXT = OBJECT. */
+ messenger->wait_queue_tail = 1;
+ messenger->wait_queue.next = object_oid (object);
}
- thread->wait_queue_p = true;
+ messenger->wait_queue_p = true;
- /* WAIT_QUEUE = THREAD. */
- thread->wait_queue_head = 1;
+ /* WAIT_QUEUE = MESSENGER. */
+ messenger->wait_queue_head = 1;
folio_object_wait_queue_set (objects_folio (activity, object),
objects_folio_offset (object),
- object_oid ((struct object *) thread));
+ object_oid ((struct object *) messenger));
+
+ object_wait_queue_check (activity, messenger);
+}
+
+void
+object_wait_queue_enqueue (struct activity *activity,
+ struct object *object, struct messenger *messenger)
+{
+ debug (5, "Enqueueing " OID_FMT " on %p",
+ OID_PRINTF (object_to_object_desc ((struct object *) messenger)->oid),
+ object);
+
+ object_wait_queue_check (activity, messenger);
+
+ assert (! messenger->wait_queue_p);
+
+ struct messenger *oldtail = object_wait_queue_tail (activity, object);
+ if (oldtail)
+ {
+ /* HEAD->PREV = MESSENGER. */
+ struct messenger *head = object_wait_queue_head (activity, object);
+ head->wait_queue.prev = object_oid ((struct object *) messenger);
+
+ assert (oldtail->wait_queue_tail);
+
+ /* MESSENGER->PREV = OLDTAIL. */
+ messenger->wait_queue.prev = object_oid ((struct object *) oldtail);
+
+ /* OLDTAIL->NEXT = MESSENGER. */
+ oldtail->wait_queue_tail = 0;
+ oldtail->wait_queue.next = object_oid ((struct object *) messenger);
+
+ /* MESSENGER->NEXT = OBJECT. */
+ messenger->wait_queue.next = object_oid (object);
+
+ messenger->wait_queue_head = 0;
+ messenger->wait_queue_tail = 1;
+ }
+ else
+ /* Empty list. */
+ {
+ folio_object_wait_queue_p_set (objects_folio (activity, object),
+ objects_folio_offset (object),
+ true);
+
+ /* MESSENGER->PREV = MESSENGER. */
+ messenger->wait_queue.prev = object_oid ((struct object *) messenger);
+
+ /* MESSENGER->NEXT = OBJECT. */
+ messenger->wait_queue_tail = 1;
+ messenger->wait_queue.next = object_oid (object);
+
+ /* WAIT_QUEUE = MESSENGER. */
+ messenger->wait_queue_head = 1;
+ folio_object_wait_queue_set (objects_folio (activity, object),
+ objects_folio_offset (object),
+ object_oid ((struct object *) messenger));
+ }
+
+ messenger->wait_queue_p = true;
- object_wait_queue_check (activity, thread);
+ object_wait_queue_check (activity, messenger);
}
-/* Dequeue thread THREAD from its wait queue. */
+/* Unlink messenger MESSENGER from its wait queue. */
void
-object_wait_queue_dequeue (struct activity *activity, struct thread *thread)
+object_wait_queue_unlink (struct activity *activity,
+ struct messenger *messenger)
{
debug (5, "Removing " OID_FMT,
- OID_PRINTF (object_to_object_desc ((struct object *) thread)->oid));
+ OID_PRINTF (object_to_object_desc ((struct object *) messenger)->oid));
- assert (thread->wait_queue_p);
+ assert (messenger->wait_queue_p);
- object_wait_queue_check (activity, thread);
+ object_wait_queue_check (activity, messenger);
- if (thread->wait_queue_tail)
- /* THREAD is the tail. THREAD->NEXT must be the object on which
+ if (messenger->wait_queue_tail)
+ /* MESSENGER is the tail. MESSENGER->NEXT must be the object on which
we are queued. */
{
struct object *object;
- object = object_find (activity, thread->wait_queue.next,
+ object = object_find (activity, messenger->wait_queue.next,
OBJECT_POLICY_DEFAULT);
assert (object);
assert (folio_object_wait_queue_p (objects_folio (activity, object),
objects_folio_offset (object)));
- assert (object_wait_queue_tail (activity, object) == thread);
+ assert (object_wait_queue_tail (activity, object) == messenger);
- if (thread->wait_queue_head)
- /* THREAD is also the head and thus the only item on the
+ if (messenger->wait_queue_head)
+ /* MESSENGER is also the head and thus the only item on the
list. */
{
- assert (object_find (activity, thread->wait_queue.prev,
+ assert (object_find (activity, messenger->wait_queue.prev,
OBJECT_POLICY_DEFAULT)
- == (struct object *) thread);
+ == (struct object *) messenger);
folio_object_wait_queue_p_set (objects_folio (activity, object),
objects_folio_offset (object),
false);
}
else
- /* THREAD is not also the head. */
+ /* MESSENGER is not also the head. */
{
- struct thread *head = object_wait_queue_head (activity, object);
+ struct messenger *head = object_wait_queue_head (activity, object);
/* HEAD->PREV == TAIL. */
assert (object_find (activity, head->wait_queue.prev,
OBJECT_POLICY_DEFAULT)
- == (struct object *) thread);
+ == (struct object *) messenger);
/* HEAD->PREV = TAIL->PREV. */
- head->wait_queue.prev = thread->wait_queue.prev;
+ head->wait_queue.prev = messenger->wait_queue.prev;
/* TAIL->PREV->NEXT = OBJECT. */
- struct thread *prev;
- prev = (struct thread *) object_find (activity,
- thread->wait_queue.prev,
+ struct messenger *prev;
+ prev = (struct messenger *) object_find (activity,
+ messenger->wait_queue.prev,
OBJECT_POLICY_DEFAULT);
assert (prev);
- assert (object_type ((struct object *) prev) == cap_thread);
+ assert (object_type ((struct object *) prev) == cap_messenger);
prev->wait_queue_tail = 1;
- prev->wait_queue.next = thread->wait_queue.next;
+ prev->wait_queue.next = messenger->wait_queue.next;
}
}
else
- /* THREAD is not the tail. */
+ /* MESSENGER is not the tail. */
{
- struct thread *next = object_wait_queue_next (activity, thread);
+ struct messenger *next = object_wait_queue_next (activity, messenger);
assert (next);
- struct object *p = object_find (activity, thread->wait_queue.prev,
+ struct object *p = object_find (activity, messenger->wait_queue.prev,
OBJECT_POLICY_DEFAULT);
assert (p);
- assert (object_type (p) == cap_thread);
- struct thread *prev = (struct thread *) p;
+ assert (object_type (p) == cap_messenger);
+ struct messenger *prev = (struct messenger *) p;
- if (thread->wait_queue_head)
- /* THREAD is the head. */
+ if (messenger->wait_queue_head)
+ /* MESSENGER is the head. */
{
- /* THREAD->PREV is the tail, TAIL->NEXT the object. */
- struct thread *tail = prev;
+ /* MESSENGER->PREV is the tail, TAIL->NEXT the object. */
+ struct messenger *tail = prev;
struct object *object = object_find (activity, tail->wait_queue.next,
OBJECT_POLICY_DEFAULT);
assert (object);
- assert (object_wait_queue_head (activity, object) == thread);
+ assert (object_wait_queue_head (activity, object) == messenger);
- /* OBJECT->WAIT_QUEUE = THREAD->NEXT. */
+ /* OBJECT->WAIT_QUEUE = MESSENGER->NEXT. */
next->wait_queue_head = 1;
folio_object_wait_queue_set (objects_folio (activity, object),
objects_folio_offset (object),
- thread->wait_queue.next);
+ messenger->wait_queue.next);
}
else
- /* THREAD is neither the head nor the tail. */
+ /* MESSENGER is neither the head nor the tail. */
{
- /* THREAD->PREV->NEXT = THREAD->NEXT. */
- prev->wait_queue.next = thread->wait_queue.next;
+ /* MESSENGER->PREV->NEXT = MESSENGER->NEXT. */
+ prev->wait_queue.next = messenger->wait_queue.next;
}
- /* THREAD->NEXT->PREV = THREAD->PREV. */
- next->wait_queue.prev = thread->wait_queue.prev;
+ /* MESSENGER->NEXT->PREV = MESSENGER->PREV. */
+ next->wait_queue.prev = messenger->wait_queue.prev;
}
- thread->wait_queue_p = false;
+ messenger->wait_queue_p = false;
#ifndef NDEBUG
- if (thread->wait_reason == THREAD_WAIT_FUTEX)
- futex_waiter_list_unlink (&futex_waiters, thread);
+ if (messenger->wait_reason == MESSENGER_WAIT_FUTEX)
+ futex_waiter_list_unlink (&futex_waiters, messenger);
#endif
- object_wait_queue_check (activity, thread);
+ object_wait_queue_check (activity, messenger);
}
diff --git a/viengoos/object.h b/viengoos/object.h
index 077bd7e..47f36b2 100644
--- a/viengoos/object.h
+++ b/viengoos/object.h
@@ -536,56 +536,63 @@ extern void folio_policy (struct activity *activity,
struct folio_policy *out);
/* Return the first waiter queued on object OBJECT. */
-extern struct thread *object_wait_queue_head (struct activity *activity,
- struct object *object);
+extern struct messenger *object_wait_queue_head (struct activity *activity,
+ struct object *object);
/* Return the last waiter queued on object OBJECT. */
-extern struct thread *object_wait_queue_tail (struct activity *activity,
- struct object *object);
+extern struct messenger *object_wait_queue_tail (struct activity *activity,
+ struct object *object);
-/* Return the waiter following THREAD. */
-extern struct thread *object_wait_queue_next (struct activity *activity,
- struct thread *thread);
+/* Return the waiter following MESSENGER. */
+extern struct messenger *object_wait_queue_next (struct activity *activity,
+ struct messenger *messenger);
-/* Return the waiter preceding THREAD. */
-extern struct thread *object_wait_queue_prev (struct activity *activity,
- struct thread *thread);
+/* Return the waiter preceding MESSENGER. */
+extern struct messenger *object_wait_queue_prev (struct activity *activity,
+ struct messenger *messenger);
-/* Enqueue thread on object OBJECT's wait queue. */
+/* Push the messenger MESSENGER onto object OBJECT's wait queue (i.e.,
+ add it to the front of the wait queue). */
+extern void object_wait_queue_push (struct activity *activity,
+ struct object *object,
+ struct messenger *messenger);
+
+/* Enqueue the messenger MESSENGER on object OBJECT's wait queue
+ (i.e., add it to the end of the wait queue). */
extern void object_wait_queue_enqueue (struct activity *activity,
struct object *object,
- struct thread *thread);
+ struct messenger *messenger);
-/* Dequeue thread THREAD from its wait queue. */
-extern void object_wait_queue_dequeue (struct activity *activity,
- struct thread *thread);
+/* Unlink messenger MESSENGER from its wait queue. */
+extern void object_wait_queue_unlink (struct activity *activity,
+ struct messenger *messenger);
-/* Iterate over each thread waiting on the object at IDX in FOLIO. It
+/* Iterate over each messenger waiting on the object at IDX in FOLIO. It
is safe to call object_wait_queue_dequeue. */
#define folio_object_wait_queue_for_each(__owqfe_activity, \
__owqfe_folio, __owqfe_idx, \
- __owqfe_thread) \
- for (struct thread *__owqfe_next \
- = (struct thread *) \
+ __owqfe_messenger) \
+ for (struct messenger *__owqfe_next \
+ = (struct messenger *) \
(folio_object_wait_queue_p (__owqfe_folio, __owqfe_idx) \
? object_find (__owqfe_activity, \
folio_object_wait_queue (__owqfe_folio, \
__owqfe_idx), \
OBJECT_POLICY_VOID) \
: NULL); \
- (__owqfe_thread = __owqfe_next) \
+ (__owqfe_messenger = __owqfe_next) \
&& ((__owqfe_next = object_wait_queue_next (__owqfe_activity, \
- __owqfe_thread)) \
+ __owqfe_messenger)) \
|| 1); /* do nothing. */)
#define object_wait_queue_for_each(__owqfe_activity, __owqfe_object, \
- __owqfe_thread) \
- for (struct thread *__owqfe_next \
+ __owqfe_messenger) \
+ for (struct messenger *__owqfe_next \
= object_wait_queue_head (__owqfe_activity, __owqfe_object); \
- (__owqfe_thread = __owqfe_next) \
+ (__owqfe_messenger = __owqfe_next) \
&& ((__owqfe_next = object_wait_queue_next (__owqfe_activity, \
- __owqfe_thread)) \
+ __owqfe_messenger)) \
|| 1); /* do nothing. */)
#endif
diff --git a/viengoos/pager.c b/viengoos/pager.c
index 65ccb5f..8b3b038 100644
--- a/viengoos/pager.c
+++ b/viengoos/pager.c
@@ -23,8 +23,9 @@
#include "activity.h"
#include "object.h"
#include "pager.h"
-#include "thread.h"
#include "profile.h"
+#include "messenger.h"
+#include "thread.h"
int pager_min_alloc_before_next_collect;
@@ -484,14 +485,13 @@ pager_collect (int goal)
bool need_reclaim = true;
- struct thread *thread;
- object_wait_queue_for_each (victim, (struct object *) victim,
- thread)
- if (thread->wait_reason == THREAD_WAIT_ACTIVITY_INFO
- && (thread->wait_reason_arg & activity_info_pressure))
+ struct messenger *m;
+ object_wait_queue_for_each (victim, (struct object *) victim, m)
+ if (m->wait_reason == MESSENGER_WAIT_ACTIVITY_INFO
+ && (m->wait_reason_arg & activity_info_pressure))
break;
- if (thread)
+ if (m)
{
debug (5, DEBUG_BOLD ("Requesting that " OBJECT_NAME_FMT " free "
"%d pages.")
@@ -528,12 +528,12 @@ pager_collect (int goal)
object_wait_queue_for_each (victim,
(struct object *) victim,
- thread)
- if (thread->wait_reason == THREAD_WAIT_ACTIVITY_INFO
- && (thread->wait_reason_arg & activity_info_pressure))
+ m)
+ if (m->wait_reason == MESSENGER_WAIT_ACTIVITY_INFO
+ && (m->wait_reason_arg & activity_info_pressure))
{
- object_wait_queue_dequeue (victim, thread);
- rm_activity_info_reply (thread->tid, info);
+ object_wait_queue_unlink (victim, m);
+ rm_activity_info_reply (root_activity, m, info);
}
}
diff --git a/viengoos/rm.h b/viengoos/rm.h
index 03754e1..bef530e 100644
--- a/viengoos/rm.h
+++ b/viengoos/rm.h
@@ -27,6 +27,7 @@
#include <hurd/thread.h>
#include <hurd/activity.h>
#include <hurd/futex.h>
+#include <l4/message.h>
enum rm_method_id
{
@@ -55,32 +56,30 @@ rm_method_id_string (int id)
return "folio_free";
case RM_folio_object_alloc:
return "folio_object_alloc";
+ case RM_folio_policy:
+ return "folio_policy";
case RM_cap_copy:
return "cap_copy";
case RM_cap_rubout:
return "cap_rubout";
case RM_cap_read:
return "cap_read";
- case RM_object_slot_copy_out:
- return "object_slot_copy_out";
- case RM_object_slot_copy_in:
- return "object_slot_copy_in";
- case RM_object_slot_read:
- return "object_slot_read";
case RM_object_discarded_clear:
return "object_discarded_clear";
case RM_object_discard:
return "object_discard";
case RM_object_status:
return "object_status";
+ case RM_object_reply_on_destruction:
+ return "object_reply_on_destruction";
case RM_object_name:
return "object_name";
- case RM_exception_collect:
- return "exception_collect";
case RM_thread_exregs:
return "thread_exregs";
- case RM_thread_wait_object_destroyed:
- return "thread_wait_object_destroyed";
+ case RM_thread_id:
+ return "thread_id";
+ case RM_thread_activation_collect:
+ return "thread_activation_collect";
case RM_activity_policy:
return "activity_policy";
case RM_activity_info:
@@ -94,12 +93,6 @@ rm_method_id_string (int id)
#define RPC_STUB_PREFIX rm
#define RPC_ID_PREFIX RM
-#undef RPC_TARGET_NEED_ARG
-#define RPC_TARGET \
- ({ \
- extern struct hurd_startup_data *__hurd_startup_data; \
- __hurd_startup_data->rm; \
- })
#include <hurd/rpc.h>
@@ -107,20 +100,29 @@ struct io_buffer
{
/* The length. */
unsigned char len;
- char data[127];
+ char data[(L4_NUM_BRS - 2) * sizeof (uintptr_t)];
};
/* Echo the character CHR on the manager console. */
-RPC_SIMPLE(write, 1, 0, struct io_buffer, io)
+RPC(write, 1, 0, 0, struct io_buffer, io)
/* Read up to MAX characters from the console's input device. */
-RPC(read, 1, 1, int, max, struct io_buffer, io)
+RPC(read, 1, 1, 0,
+ int, max, struct io_buffer, io)
/* Dump the address space rooted at ROOT. */
-RPC(as_dump, 2, 0, addr_t, principal, addr_t, root)
-
-/* Fault up to the MIN (15, COUNT) pages starting at START. */
-RPC(fault, 3, 1, addr_t, principal, uintptr_t, start, int, count,
+RPC(as_dump, 0, 0, 0,
+ /* cap_t, principal, cap_t, object */)
+
+/* Fault up to COUNT pages starting at START. Returns the number
+ actually faulted in OCOUNT. */
+RPC(fault, 2, 1, 0,
+ /* cap_t, principal, cap_t thread, */
+ uintptr_t, start, int, count,
+ /* Out: */
int, ocount)
+#undef RPC_STUB_PREFIX
+#undef RPC_ID_PREFIX
+
#endif
diff --git a/viengoos/server.c b/viengoos/server.c
index 530577a..544a6c4 100644
--- a/viengoos/server.c
+++ b/viengoos/server.c
@@ -28,6 +28,7 @@
#include <hurd/futex.h>
#include <hurd/trace.h>
#include <hurd/as.h>
+#include <hurd/ipc.h>
#include "server.h"
@@ -38,6 +39,7 @@
#include "object.h"
#include "thread.h"
#include "activity.h"
+#include "messenger.h"
#include "viengoos.h"
#include "profile.h"
@@ -48,7 +50,7 @@ struct futex_waiter_list futex_waiters;
#ifndef NDEBUG
struct trace_buffer rpc_trace = TRACE_BUFFER_INIT ("rpcs", 0,
- false, false, false);
+ true, false, false);
/* Like debug but also prints the method id and saves to the trace
buffer if level is less than or equal to 4. */
@@ -59,11 +61,14 @@ struct trace_buffer rpc_trace = TRACE_BUFFER_INIT ("rpcs", 0,
trace_buffer_add (&rpc_trace, "(%x %s %d) " format, \
thread->tid, \
l4_is_pagefault (msg_tag) ? "pagefault" \
- : rm_method_id_string (label), label, \
+ : label == 8194 ? "IPC" \
+ : rm_method_id_string (label), \
+ label, \
##args); \
debug (level, "(%x %s:%d %d) " format, \
thread->tid, l4_is_pagefault (msg_tag) ? "pagefault" \
- : rm_method_id_string (label), __LINE__, label, \
+ : label == 8194 ? "IPC" : rm_method_id_string (label), \
+ __LINE__, label, \
##args); \
} \
while (0)
@@ -72,7 +77,8 @@ struct trace_buffer rpc_trace = TRACE_BUFFER_INIT ("rpcs", 0,
# define DEBUG(level, format, args...) \
debug (level, "(%x %s:%d %d) " format, \
thread->tid, l4_is_pagefault (msg_tag) ? "pagefault" \
- : rm_method_id_string (label), __LINE__, label, \
+ : label == 8194 ? "IPC" : rm_method_id_string (label), \
+ __LINE__, label, \
##args)
#endif
@@ -140,11 +146,11 @@ server_loop (void)
{
debug (0, "No IPCs for some time. Deadlock?");
- struct thread *thread;
- while ((thread = futex_waiter_list_head (&futex_waiters)))
+ struct messenger *messenger;
+ while ((messenger = futex_waiter_list_head (&futex_waiters)))
{
- object_wait_queue_dequeue (root_activity, thread);
- rpc_error_reply (thread->tid, EDEADLK);
+ object_wait_queue_unlink (root_activity, messenger);
+ rpc_error_reply (root_activity, messenger, EDEADLK);
}
trace_buffer_dump (&rpc_trace, 0);
@@ -166,7 +172,7 @@ server_loop (void)
#endif
l4_msg_store (msg_tag, msg);
- l4_word_t label;
+ uintptr_t label;
label = l4_label (msg_tag);
/* By default we reply to the sender. */
@@ -233,15 +239,15 @@ server_loop (void)
Thus, it is difficult to incorporate it into the case
switch below. */
{
- l4_word_t access;
- l4_word_t ip;
- l4_word_t fault = l4_pagefault (msg_tag, &access, &ip);
+ uintptr_t access;
+ uintptr_t ip;
+ uintptr_t fault = l4_pagefault (msg_tag, &access, &ip);
bool write_fault = !! (access & L4_FPAGE_WRITABLE);
- DEBUG (4, "%s fault at %x (ip = %x)",
+ DEBUG (4, "%s fault at %x (ip: %x)",
write_fault ? "Write" : "Read", fault, ip);
- l4_word_t page_addr = fault & ~(PAGESIZE - 1);
+ uintptr_t page_addr = fault & ~(PAGESIZE - 1);
struct cap cap;
bool writable;
@@ -264,7 +270,7 @@ server_loop (void)
if (! writable && cap.discardable)
{
- debug (5, "Ignoring discardable predicate for cap designating "
+ DEBUG (4, "Ignoring discardable predicate for cap designating "
OID_FMT " (%s)",
OID_PRINTF (cap.oid), cap_type_string (cap.type));
cap.discardable = false;
@@ -287,7 +293,7 @@ server_loop (void)
{
if (folio_object_discarded (folio, object))
{
- debug (5, OID_FMT " (%s) was discarded",
+ DEBUG (4, OID_FMT " (%s) was discarded",
OID_PRINTF (cap.oid),
cap_type_string (folio_object_type (folio,
object)));
@@ -296,7 +302,7 @@ server_loop (void)
discarded = true;
- debug (5, "Raising discarded fault at %x", page_addr);
+ DEBUG (5, "Raising discarded fault at %x", page_addr);
}
}
}
@@ -308,30 +314,29 @@ server_loop (void)
ip, fault, write_fault ? 'w' : 'r',
discarded ? " discarded" : "");
- l4_word_t c = _L4_XCHG_REGS_DELIVER;
+ uintptr_t c = _L4_XCHG_REGS_DELIVER;
l4_thread_id_t targ = thread->tid;
- l4_word_t sp = 0;
- l4_word_t dummy = 0;
+ uintptr_t sp = 0;
+ uintptr_t dummy = 0;
_L4_exchange_registers (&targ, &c,
&sp, &dummy, &dummy, &dummy, &dummy);
- struct exception_info info;
+ struct activation_fault_info info;
info.access = access;
info.type = write_fault ? cap_page : cap_rpage;
info.discarded = discarded;
- l4_msg_t msg;
- exception_fault_send_marshal (&msg, PTR_TO_ADDR (fault),
- sp, ip, info);
-
- thread_raise_exception (activity, thread, &msg);
+ activation_fault_send_marshal (reply_buffer, PTR_TO_ADDR (fault),
+ sp, ip, info, ADDR_VOID);
+ thread_raise_exception (activity, thread, reply_buffer);
continue;
}
- DEBUG (4, "%s fault at " DEBUG_BOLD ("%x") " (ip=%x), replying with %p(r%s)",
- write_fault ? "Write" : "Read", page_addr, ip, page,
+ DEBUG (4, "%s fault at " DEBUG_BOLD ("%x") " (ip=%x), "
+ "replying with %p(r%s)",
+ write_fault ? "Write" : "Read", fault, ip, page,
writable ? "w" : "");
object_to_object_desc (page)->mapped = true;
@@ -405,19 +410,18 @@ server_loop (void)
struct activity *principal;
- /* If ERR_ is not 0, create a message indicating an error with the
- error code ERR_. Go to the start of the server loop. */
+ /* Create a message indicating an error with the error code ERR_.
+ Go to the start of the server loop. */
#define REPLY(err_) \
do \
{ \
if (err_) \
- { \
- DEBUG (1, DEBUG_BOLD ("Returning error %d"), err_); \
- l4_msg_clear (msg); \
- l4_msg_put_word (msg, 0, (err_)); \
- l4_msg_set_untyped_words (msg, 1); \
- do_reply = 1; \
- } \
+ DEBUG (1, DEBUG_BOLD ("Returning error %d to %x"), \
+ err_, from); \
+ l4_msg_clear (msg); \
+ l4_msg_put_word (msg, 0, (err_)); \
+ l4_msg_set_untyped_words (msg, 1); \
+ do_reply = 1; \
goto out; \
} \
while (0)
@@ -498,24 +502,26 @@ server_loop (void)
error_t OBJECT_ (struct cap *root,
addr_t addr, int type, bool require_writable,
- struct object **objectp)
+ struct object **objectp, bool *writable)
{
- bool writable = true;
+ bool w = true;
struct cap cap;
- cap = as_object_lookup_rel (principal, root, addr, type,
- require_writable ? &writable : NULL);
+ cap = as_object_lookup_rel (principal, root, addr, type, &w);
if (type != -1 && ! cap_types_compatible (cap.type, type))
{
- DEBUG (4, "Addr 0x%llx/%d does not reference object of "
+ DEBUG (0, "Addr 0x%llx/%d does not reference object of "
"type %s but %s",
addr_prefix (addr), addr_depth (addr),
cap_type_string (type), cap_type_string (cap.type));
return ENOENT;
}
- if (require_writable && ! writable)
+ if (writable)
+ *writable = w;
+
+ if (require_writable && ! w)
{
- DEBUG (4, "Addr " ADDR_FMT " not writable",
+ DEBUG (0, "Addr " ADDR_FMT " not writable",
ADDR_PRINTF (addr));
return EPERM;
}
@@ -523,19 +529,20 @@ server_loop (void)
*objectp = cap_to_object (principal, &cap);
if (! *objectp)
{
- DEBUG (4, "Addr " ADDR_FMT " contains a dangling pointer: "
- CAP_FMT,
- ADDR_PRINTF (addr), CAP_PRINTF (&cap));
+ do_debug (4)
+ DEBUG (0, "Addr " ADDR_FMT " contains a dangling pointer: "
+ CAP_FMT,
+ ADDR_PRINTF (addr), CAP_PRINTF (&cap));
return ENOENT;
}
return 0;
}
-#define OBJECT(root_, addr_, type_, require_writable_) \
+#define OBJECT(root_, addr_, type_, require_writable_, writablep_) \
({ \
struct object *OBJECT_ret; \
error_t err = OBJECT_ (root_, addr_, type_, require_writable_, \
- &OBJECT_ret); \
+ &OBJECT_ret, writablep_); \
if (err) \
REPLY (err); \
OBJECT_ret; \
@@ -557,7 +564,7 @@ server_loop (void)
thread if it matches the guard exactly. */ \
struct object *t_; \
error_t err = OBJECT_ (&thread->aspace, root_addr_, \
- cap_thread, true, &t_); \
+ cap_thread, true, &t_, NULL); \
if (! err) \
root_ = &((struct thread *) t_)->aspace; \
else \
@@ -568,46 +575,6 @@ server_loop (void)
root_; \
})
- if (label == RM_write)
- {
- struct io_buffer buffer;
- err = rm_write_send_unmarshal (&msg, &buffer);
- if (! err)
- {
- int i;
- for (i = 0; i < buffer.len; i ++)
- putchar (buffer.data[i]);
- }
-
- /* No reply needed. */
- do_reply = 0;
- continue;
- }
- else if (label == RM_read)
- {
- int max;
- err = rm_read_send_unmarshal (&msg, &max);
- if (err)
- {
- DEBUG (0, "Read error!");
- REPLY (EINVAL);
- }
-
- struct io_buffer buffer;
- buffer.len = 0;
-
- if (max > 0)
- {
- buffer.len = 1;
- buffer.data[0] = getchar ();
- }
-
- rm_read_reply_marshal (&msg, buffer);
- continue;
- }
-
- do_reply = 1;
-
/* Return the next word. */
#define ARG(word_) l4_msg_word (msg, word_);
@@ -628,19 +595,224 @@ server_loop (void)
#define ARG_ADDR(word_) ((addr_t) { ARG64(word_) })
+ if (label == 2132)
+ /* write. */
+ {
+ int len = msg[1];
+ char *buffer = (char *) &msg[2];
+ buffer[len] = 0;
+ s_printf ("%s", buffer);
+ continue;
+ }
+
+ if (label != 8194)
+ {
+ DEBUG (0, "Invalid label: %d", label);
+ continue;
+ }
+
+ int i = 0;
+ uintptr_t flags = ARG (i);
+ i ++;
+ addr_t recv_activity = ARG_ADDR (i);
+ i += ARG64_WORDS;
+ addr_t recv_messenger = ARG_ADDR (i);
+ i += ARG64_WORDS;
+ addr_t recv_buf = ARG_ADDR (i);
+ i += ARG64_WORDS;
+ addr_t recv_inline_cap = ARG_ADDR (i);
+ i += ARG64_WORDS;
+
+ addr_t send_activity = ARG_ADDR (i);
+ i += ARG64_WORDS;
+ addr_t target_messenger = ARG_ADDR (i);
+ i += ARG64_WORDS;
+
+ addr_t send_messenger = ARG_ADDR (i);
+ i += ARG64_WORDS;
+ addr_t send_buf = ARG_ADDR (i);
+ i += ARG64_WORDS;
+
+ uintptr_t inline_word1 = ARG (i);
+ i ++;
+ uintptr_t inline_word2 = ARG (i);
+ i ++;
+ addr_t inline_cap = ARG_ADDR (i);
+
+#ifndef NDEBUG
+ /* Get the label early to improve debugging output in case the
+ target is invalid. */
+ if ((flags & VG_IPC_SEND))
+ {
+ if ((flags & VG_IPC_SEND_INLINE))
+ label = inline_word1;
+ else
+ {
+ principal = activity;
+
+ struct cap cap = CAP_VOID;
+ if (! ADDR_IS_VOID (send_buf))
+ /* Caller provided a send buffer. */
+ CAP_ (&thread->aspace, send_buf, cap_page, true, &cap);
+ else
+ {
+ struct object *object = NULL;
+ OBJECT_ (&thread->aspace, send_messenger,
+ cap_messenger, true, &object, NULL);
+ if (object)
+ cap = ((struct messenger *) object)->buffer;
+ }
+
+ struct vg_message *message;
+ message = (struct vg_message *) cap_to_object (principal,
+ &cap);
+ if (message)
+ label = vg_message_word (message, 0);
+ }
+ }
+#endif
+
+ DEBUG (4, "flags: %s%s%s%s%s%s %s%s%s%s%s%s %s %s%s%s%s(%x),"
+ "recv (" ADDR_FMT ", " ADDR_FMT ", " ADDR_FMT "), "
+ "send (" ADDR_FMT ", " ADDR_FMT ", " ADDR_FMT ", " ADDR_FMT "), "
+ "inline (" ADDR_FMT "; %x, %x, " ADDR_FMT ")",
+ (flags & VG_IPC_RECEIVE) ? "R" : "-",
+ (flags & VG_IPC_RECEIVE_NONBLOCKING) ? "N" : "B",
+ (flags & VG_IPC_RECEIVE_ACTIVATE) ? "A" : "-",
+ (flags & VG_IPC_RECEIVE_SET_THREAD_TO_CALLER) ? "T" : "-",
+ (flags & VG_IPC_RECEIVE_SET_ASROOT_TO_CALLERS) ? "A" : "-",
+ (flags & VG_IPC_RECEIVE_INLINE) ? "I" : "-",
+ (flags & VG_IPC_SEND) ? "S" : "-",
+ (flags & VG_IPC_SEND_NONBLOCKING) ? "N" : "B",
+ (flags & VG_IPC_SEND_ACTIVATE) ? "A" : "-",
+ (flags & VG_IPC_SEND_SET_THREAD_TO_CALLER) ? "T" : "-",
+ (flags & VG_IPC_SEND_SET_ASROOT_TO_CALLERS) ? "A" : "-",
+ (flags & VG_IPC_SEND_INLINE) ? "I" : "-",
+ (flags & VG_IPC_RETURN) ? "R" : "-",
+ (flags & VG_IPC_RECEIVE_INLINE_CAP1) ? "C" : "-",
+ (flags & VG_IPC_SEND_INLINE_WORD1) ? "1" : "-",
+ (flags & VG_IPC_SEND_INLINE_WORD2) ? "2" : "-",
+ (flags & VG_IPC_SEND_INLINE_CAP1) ? "C" : "-",
+ flags,
+ ADDR_PRINTF (recv_activity), ADDR_PRINTF (recv_messenger),
+ ADDR_PRINTF (recv_buf),
+ ADDR_PRINTF (send_activity), ADDR_PRINTF (target_messenger),
+ ADDR_PRINTF (send_messenger), ADDR_PRINTF (send_buf),
+ ADDR_PRINTF (recv_inline_cap),
+ inline_word1, inline_word2, ADDR_PRINTF (inline_cap));
+
+ if ((flags & VG_IPC_RECEIVE))
+ /* IPC includes a receive phase. */
+ {
+ principal = activity;
+ if (! ADDR_IS_VOID (recv_activity))
+ {
+ principal = (struct activity *) OBJECT (&thread->aspace,
+ recv_activity,
+ cap_activity, false,
+ NULL);
+ if (! principal)
+ {
+ DEBUG (0, "Invalid receive activity.");
+ REPLY (ENOENT);
+ }
+ }
+
+ struct messenger *messenger
+ = (struct messenger *) OBJECT (&thread->aspace,
+ recv_messenger, cap_messenger,
+ true, NULL);
+ if (! messenger)
+ {
+ DEBUG (0, "IPC includes receive phase, however, "
+ "no receive messenger provided.");
+ REPLY (EINVAL);
+ }
+
+ if ((flags & VG_IPC_RECEIVE_INLINE))
+ {
+ messenger->out_of_band = false;
+ if ((flags & VG_IPC_RECEIVE_INLINE_CAP1))
+ messenger->inline_caps[0] = recv_inline_cap;
+ }
+ else
+ {
+ messenger->out_of_band = true;
+ if (unlikely (! ADDR_IS_VOID (recv_buf)))
+ /* Associate RECV_BUF with RECV_MESSENGER. */
+ messenger->buffer = CAP (&thread->aspace, recv_buf,
+ cap_page, true);
+ }
+
+ if (unlikely ((flags & VG_IPC_RECEIVE_SET_THREAD_TO_CALLER)))
+ messenger->thread = object_to_cap ((struct object *) thread);
+
+ if (unlikely ((flags & VG_IPC_RECEIVE_SET_ASROOT_TO_CALLERS)))
+ messenger->as_root = thread->aspace;
+
+ messenger->activate_on_receive = (flags & VG_IPC_RECEIVE_ACTIVATE);
+
+ /* See if there is a messenger trying to send to
+ MESSENGER. */
+ struct messenger *sender;
+ object_wait_queue_for_each (principal,
+ (struct object *) messenger, sender)
+ if (sender->wait_reason == MESSENGER_WAIT_TRANSFER_MESSAGE)
+ /* There is. Transfer SENDER's message to MESSENGER. */
+ {
+ object_wait_queue_unlink (principal, sender);
+
+ assert (messenger->blocked);
+ messenger->blocked = 0;
+ bool ret = messenger_message_transfer (principal,
+ messenger, sender,
+ true);
+ assert (ret);
+
+ break;
+ }
+
+ if (! sender)
+ /* There was no sender waiting. */
+ {
+ if ((flags & VG_IPC_RECEIVE_NONBLOCKING))
+ /* The receive phase is non-blocking. */
+ REPLY (EWOULDBLOCK);
+ else
+ /* Unblock MESSENGER. */
+ messenger->blocked = 0;
+ }
+ }
+
+ if (! (flags & VG_IPC_SEND))
+ /* No send phase. */
+ {
+ if ((flags & VG_IPC_RETURN))
+ /* But a return phase. */
+ REPLY (0);
+
+ continue;
+ }
+
+ /* Send phase. */
+
+ if ((flags & VG_IPC_SEND_INLINE))
+ label = inline_word1;
+
principal = activity;
- addr_t principal_addr = ARG_ADDR (0);
struct cap principal_cap;
- if (! ADDR_IS_VOID (principal_addr))
+ if (! ADDR_IS_VOID (send_activity))
{
+ /* We need the cap below, otherwise, we could just use
+ OBJECT. */
principal_cap = CAP (&thread->aspace,
- principal_addr, cap_activity, false);
+ send_activity, cap_activity, false);
principal = (struct activity *) cap_to_object (principal,
&principal_cap);
if (! principal)
{
DEBUG (4, "Dangling pointer at " ADDR_FMT,
- ADDR_PRINTF (principal_addr));
+ ADDR_PRINTF (send_activity));
REPLY (ENOENT);
}
}
@@ -650,26 +822,243 @@ server_loop (void)
principal = activity;
}
+ struct messenger *source
+ = (struct messenger *) OBJECT (&thread->aspace,
+ send_messenger, cap_messenger,
+ true, NULL);
+ if (unlikely (! source))
+ {
+ DEBUG (0, "Source not valid.");
+ REPLY (ENOENT);
+ }
+
+ if (unlikely (! ADDR_IS_VOID (send_buf)))
+ source->buffer = CAP (&thread->aspace, send_buf, cap_page, true);
+
+ if (unlikely ((flags & VG_IPC_SEND_SET_THREAD_TO_CALLER)))
+ source->thread = object_to_cap ((struct object *) thread);
+
+ if (unlikely ((flags & VG_IPC_SEND_SET_ASROOT_TO_CALLERS)))
+ source->as_root = thread->aspace;
+
+ source->activate_on_send = (flags & VG_IPC_SEND_ACTIVATE);
+
+ bool target_writable = true;
+ struct object *target;
+ /* We special case VOID to mean the current thread. */
+ if (ADDR_IS_VOID (target_messenger))
+ target = (struct object *) thread;
+ else
+ target = OBJECT (&thread->aspace, target_messenger, -1, false,
+ &target_writable);
+ if (! target)
+ {
+ DEBUG (0, "Target not valid.");
+ REPLY (ENOENT);
+ }
+
+ if (object_type (target) == cap_messenger && ! target_writable)
+ /* TARGET is a weak reference to a messenger. Forward the
+ message. */
+ {
+ DEBUG (5, "IPC: " OID_FMT " -> " OID_FMT,
+ OID_PRINTF (object_oid ((struct object *) source)),
+ OID_PRINTF (object_oid ((struct object *) target)));
+
+ if ((flags & VG_IPC_SEND_INLINE))
+ {
+ source->out_of_band = false;
+ source->inline_words[0] = inline_word1;
+ source->inline_words[1] = inline_word2;
+ source->inline_caps[0] = inline_cap;
+
+ if ((flags & VG_IPC_SEND_INLINE_WORD1)
+ && (flags & VG_IPC_SEND_INLINE_WORD2))
+ source->inline_word_count = 2;
+ else if ((flags & VG_IPC_SEND_INLINE_WORD1))
+ source->inline_word_count = 1;
+ else
+ source->inline_word_count = 0;
+
+ if ((flags & VG_IPC_SEND_INLINE_CAP1))
+ source->inline_cap_count = 1;
+ else
+ source->inline_cap_count = 0;
+ }
+ else
+ source->out_of_band = true;
+
+ if (messenger_message_transfer (principal,
+ (struct messenger *) target,
+ source,
+ ! (flags & VG_IPC_SEND_NONBLOCKING)))
+ /* The messenger has been enqueued. */
+ {
+ if ((flags & VG_IPC_RETURN))
+ REPLY (0);
+ continue;
+ }
+ else
+ REPLY (ETIMEDOUT);
+ }
+
+ /* TARGET designates a kernel implemented object. Implement
+ it. */
+
+ /* The reply messenger (if any). */
+ struct messenger *reply = NULL;
+
+ /* We are now so far that we should not reply to the caller but
+ to TARGET. Set up our handy REPLY macro to do so. */
+#undef REPLY
+ /* Send a reply indicating that an error with the error code ERR_.
+ Go to the start of the server loop. */
+#define REPLY(err_) \
+ do \
+ { \
+ if (err_) \
+ DEBUG (0, DEBUG_BOLD ("Returning error %d"), err_); \
+ if (reply) \
+ if (rpc_error_reply (principal, reply, err_)) \
+ DEBUG (0, DEBUG_BOLD ("Failed to send reply")); \
+ goto out; \
+ } \
+ while (0)
+
+
+ struct vg_message *message;
+ if ((flags & VG_IPC_SEND_INLINE))
+ {
+ message = reply_buffer;
+ vg_message_clear (message);
+ if ((flags & VG_IPC_SEND_INLINE_WORD1))
+ vg_message_append_word (message, inline_word1);
+ if ((flags & VG_IPC_SEND_INLINE_WORD2))
+ vg_message_append_word (message, inline_word2);
+ if ((flags & VG_IPC_SEND_INLINE_CAP1))
+ vg_message_append_cap (message, inline_cap);
+ }
+ else
+ {
+ if (source->buffer.type != cap_page)
+ {
+ DEBUG (0, "Sender user-buffer has wrong type: %s",
+ cap_type_string (source->buffer.type));
+ REPLY (EINVAL);
+ }
+ message = (struct vg_message *) cap_to_object (principal,
+ &source->buffer);
+ if (! message)
+ {
+ DEBUG (0, "Sender user-buffer has wrong type: %s",
+ cap_type_string (source->buffer.type));
+ REPLY (EINVAL);
+ }
+ }
+
+ label = vg_message_word (message, 0);
+
+ do_debug (5)
+ {
+ DEBUG (0, "");
+ vg_message_dump (message);
+ }
+
+ /* Extract the reply messenger (if any). */
+ if (vg_message_cap_count (message) > 0)
+ /* We only look for a messenger here. We know that any reply
+ that a kernel object generates that is sent to a kernel
+ object will just result in a discarded EINVAL. */
+ reply = (struct messenger *)
+ OBJECT (&thread->aspace,
+ vg_message_cap (message,
+ vg_message_cap_count (message) - 1),
+ cap_rmessenger, false, NULL);
+
+ /* There are a number of methods that look up an object relative
+ to the invoked object. Generate an appropriate root for
+ them. */
+ struct cap target_root_cap;
+ struct cap *target_root;
+ if (likely (target == (struct object *) thread))
+ target_root = &thread->aspace;
+ else if (object_type (target) == cap_thread)
+ target_root = &((struct thread *) target)->aspace;
+ else
+ {
+ target_root_cap = object_to_cap (target);
+ target_root = &target_root_cap;
+ }
+
+ DEBUG (4, OID_FMT " %s(%llx) -> " OID_FMT " %s(%llx)",
+ OID_PRINTF (object_oid ((struct object *) source)),
+ cap_type_string (object_type ((struct object *) source)),
+ source->id,
+ OID_PRINTF (object_oid ((struct object *) target)),
+ cap_type_string (object_type (target)),
+ object_type (target) == cap_messenger
+ ? ((struct messenger *) target)->id : 0);
+ if (reply)
+ DEBUG (4, "reply to: " OID_FMT "(%llx)",
+ OID_PRINTF (object_oid ((struct object *) reply)),
+ reply->id);
+
switch (label)
{
+ case RM_write:
+ {
+ struct io_buffer buffer;
+ err = rm_write_send_unmarshal (message, &buffer, NULL);
+ if (! err)
+ {
+ int i;
+ for (i = 0; i < buffer.len; i ++)
+ putchar (buffer.data[i]);
+ }
+
+ rm_write_reply (activity, reply);
+ break;
+ }
+ case RM_read:
+ {
+ int max;
+ err = rm_read_send_unmarshal (message, &max, NULL);
+ if (err)
+ {
+ DEBUG (0, "Read error!");
+ REPLY (EINVAL);
+ }
+
+ struct io_buffer buffer;
+ buffer.len = 0;
+
+ if (max > 0)
+ {
+ buffer.len = 1;
+ buffer.data[0] = getchar ();
+ }
+
+ rm_read_reply (activity, reply, buffer);
+ break;
+ }
+
case RM_fault:
{
uintptr_t start;
int max;
- err = rm_fault_send_unmarshal (&msg, &principal_addr,
- &start, &max);
+ err = rm_fault_send_unmarshal (message, &start, &max, NULL);
if (err)
REPLY (err);
- DEBUG (4, "(%p, %d)", start, max);
+ DEBUG (4, "(%p, %d)", (void *) start, max);
start &= ~(PAGESIZE - 1);
- rm_fault_reply_marshal (&msg, 0);
+ rm_fault_reply (activity, reply, 0);
int limit = (L4_NUM_MRS - 1
- l4_untyped_words (l4_msg_msg_tag (msg)))
- * sizeof (l4_word_t) / sizeof (l4_map_item_t);
+ * sizeof (uintptr_t) / sizeof (l4_map_item_t);
if (max > limit)
max = limit;
@@ -716,7 +1105,7 @@ server_loop (void)
l4_untyped_words (l4_msg_msg_tag (msg)),
l4_typed_words (l4_msg_msg_tag (msg)));
- rm_fault_reply_marshal (&msg, count);
+ rm_fault_reply (activity, reply, count);
int i;
for (i = 0; i < count; i ++)
l4_msg_append_map_item (msg, map_items[i]);
@@ -726,82 +1115,80 @@ server_loop (void)
case RM_folio_alloc:
{
- addr_t folio_addr;
- struct folio_policy policy;
+ if (object_type (target) != cap_activity_control)
+ {
+ DEBUG (0, "target " ADDR_FMT " not an activity but a %s",
+ ADDR_PRINTF (target_messenger),
+ cap_type_string (object_type (target)));
+ REPLY (EINVAL);
+ }
+
+ struct activity *activity = (struct activity *) target;
- err = rm_folio_alloc_send_unmarshal (&msg, &principal_addr,
- &folio_addr, &policy);
+ struct folio_policy policy;
+ err = rm_folio_alloc_send_unmarshal (message, &policy, NULL);
if (err)
REPLY (err);
- DEBUG (4, "(" ADDR_FMT ")", ADDR_PRINTF (folio_addr));
-
- struct cap *folio_slot = SLOT (&thread->aspace, folio_addr);
+ DEBUG (4, "(" ADDR_FMT ")", ADDR_PRINTF (target_messenger));
- struct folio *folio = folio_alloc (principal, policy);
+ struct folio *folio = folio_alloc (activity, policy);
if (! folio)
REPLY (ENOMEM);
- bool r = cap_set (principal, folio_slot,
- object_to_cap ((struct object *) folio));
- assert (r);
-
- rm_folio_alloc_reply_marshal (&msg);
+ rm_folio_alloc_reply (principal, reply,
+ object_to_cap ((struct object *) folio));
break;
}
case RM_folio_free:
{
- addr_t folio_addr;
+ if (object_type (target) != cap_folio)
+ REPLY (EINVAL);
+
+ struct folio *folio = (struct folio *) target;
- err = rm_folio_free_send_unmarshal (&msg, &principal_addr,
- &folio_addr);
+ err = rm_folio_free_send_unmarshal (message, NULL);
if (err)
REPLY (err);
- DEBUG (4, "(" ADDR_FMT ")", ADDR_PRINTF (folio_addr));
+ DEBUG (4, "(" ADDR_FMT ")", ADDR_PRINTF (target_messenger));
- struct folio *folio = (struct folio *) OBJECT (&thread->aspace,
- folio_addr,
- cap_folio, true);
folio_free (principal, folio);
- rm_folio_free_reply_marshal (&msg);
+ rm_folio_free_reply (activity, reply);
break;
}
case RM_folio_object_alloc:
{
- addr_t folio_addr;
+ if (object_type (target) != cap_folio)
+ REPLY (EINVAL);
+
+ struct folio *folio = (struct folio *) target;
+
uint32_t idx;
uint32_t type;
struct object_policy policy;
uintptr_t return_code;
- addr_t object_addr;
- addr_t object_weak_addr;
-
- err = rm_folio_object_alloc_send_unmarshal (&msg, &principal_addr,
- &folio_addr, &idx,
- &type, &policy,
- &return_code,
- &object_addr,
- &object_weak_addr);
+
+ err = rm_folio_object_alloc_send_unmarshal (message,
+ &idx, &type, &policy,
+ &return_code, NULL);
if (err)
REPLY (err);
- DEBUG (4, "(" ADDR_FMT ", %d, %s, (%s, %d), %d, "
- ADDR_FMT", "ADDR_FMT")",
-
- ADDR_PRINTF (folio_addr), idx, cap_type_string (type),
+ DEBUG (4, "(" ADDR_FMT ", %d (" ADDR_FMT "), %s, (%s, %d), %d)",
+ ADDR_PRINTF (target_messenger), idx,
+ addr_depth (target_messenger) + FOLIO_OBJECTS_LOG2
+ <= ADDR_BITS
+ ? ADDR_PRINTF (addr_extend (target_messenger,
+ idx, FOLIO_OBJECTS_LOG2))
+ : ADDR_PRINTF (ADDR_VOID),
+ cap_type_string (type),
policy.discardable ? "discardable" : "precious",
policy.priority,
- return_code,
- ADDR_PRINTF (object_addr),
- ADDR_PRINTF (object_weak_addr));
-
- struct folio *folio = (struct folio *) OBJECT (&thread->aspace,
- folio_addr,
- cap_folio, true);
+ return_code);
if (idx >= FOLIO_OBJECTS)
REPLY (EINVAL);
@@ -809,202 +1196,61 @@ server_loop (void)
if (! (CAP_TYPE_MIN <= type && type <= CAP_TYPE_MAX))
REPLY (EINVAL);
- struct cap *object_slot = NULL;
- if (! ADDR_IS_VOID (object_addr))
- object_slot = SLOT (&thread->aspace, object_addr);
-
- struct cap *object_weak_slot = NULL;
- if (! ADDR_IS_VOID (object_weak_addr))
- object_weak_slot = SLOT (&thread->aspace, object_weak_addr);
-
struct cap cap;
cap = folio_object_alloc (principal,
folio, idx, type, policy, return_code);
- if (type != cap_void)
- {
- if (object_slot)
- {
- bool r = cap_set (principal, object_slot, cap);
- assert (r);
- }
- if (object_weak_slot)
- {
- bool r = cap_set (principal, object_weak_slot, cap);
- assert (r);
- object_weak_slot->type
- = cap_type_weaken (object_weak_slot->type);
- }
- }
+ struct cap weak = cap;
+ weak.type = cap_type_weaken (cap.type);
- rm_folio_object_alloc_reply_marshal (&msg);
+ rm_folio_object_alloc_reply (activity, reply, cap, weak);
break;
}
case RM_folio_policy:
{
- addr_t folio_addr;
- l4_word_t flags;
+ if (object_type (target) != cap_folio)
+ REPLY (EINVAL);
+
+ struct folio *folio = (struct folio *) target;
+
+ uintptr_t flags;
struct folio_policy in, out;
- err = rm_folio_policy_send_unmarshal (&msg, &principal_addr,
- &folio_addr,
- &flags, &in);
+ err = rm_folio_policy_send_unmarshal (message, &flags, &in, NULL);
if (err)
REPLY (err);
DEBUG (4, "(" ADDR_FMT ", %d)",
- ADDR_PRINTF (folio_addr), flags);
-
- struct folio *folio = (struct folio *) OBJECT (&thread->aspace,
- folio_addr,
- cap_folio, true);
+ ADDR_PRINTF (target_messenger), flags);
folio_policy (principal, folio, flags, in, &out);
- rm_folio_policy_reply_marshal (&msg, out);
+ rm_folio_policy_reply (activity, reply, out);
break;
}
- case RM_object_slot_copy_out:
+ case RM_cap_copy:
{
addr_t source_as_addr;
addr_t source_addr;
struct cap source;
addr_t target_as_addr;
addr_t target_addr;
- struct cap *target;
- uint32_t idx;
uint32_t flags;
struct cap_properties properties;
- struct cap object_cap;
- struct object *object;
-
- err = rm_object_slot_copy_out_send_unmarshal
- (&msg, &principal_addr, &source_as_addr, &source_addr, &idx,
- &target_as_addr, &target_addr, &flags, &properties);
- if (err)
- REPLY (err);
-
- DEBUG (4, "(" ADDR_FMT "@" ADDR_FMT "+%d -> "
- ADDR_FMT "@" ADDR_FMT ", %s%s%s%s%s%s, %s/%d %lld/%d %d/%d",
-
- ADDR_PRINTF (source_as_addr), ADDR_PRINTF (source_addr),
- idx,
- ADDR_PRINTF (target_as_addr), ADDR_PRINTF (target_addr),
-
- CAP_COPY_COPY_ADDR_TRANS_SUBPAGE & flags
- ? "copy subpage/" : "",
- CAP_COPY_COPY_ADDR_TRANS_GUARD & flags
- ? "copy trans guard/" : "",
- CAP_COPY_COPY_SOURCE_GUARD & flags
- ? "copy src guard/" : "",
- CAP_COPY_WEAKEN & flags ? "weak/" : "",
- CAP_COPY_DISCARDABLE_SET & flags ? "discardable/" : "",
- CAP_COPY_PRIORITY_SET & flags ? "priority" : "",
-
- properties.policy.discardable ? "discardable" : "precious",
- properties.policy.priority,
- CAP_ADDR_TRANS_GUARD (properties.addr_trans),
- CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans),
- CAP_ADDR_TRANS_SUBPAGE (properties.addr_trans),
- CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans));
-
- struct cap *root = ROOT (source_as_addr);
- object_cap = CAP (root, source_addr, -1, false);
-
- root = ROOT (target_as_addr);
-
- goto get_slot;
-
- case RM_object_slot_copy_in:
- err = rm_object_slot_copy_in_send_unmarshal
- (&msg, &principal_addr, &target_as_addr, &target_addr, &idx,
- &source_as_addr, &source_addr, &flags, &properties);
- if (err)
- REPLY (err);
-
- DEBUG (4, "(" ADDR_FMT "@" ADDR_FMT "+%d <- "
- ADDR_FMT "@" ADDR_FMT ", %s%s%s%s%s%s, %s/%d %lld/%d %d/%d",
-
- ADDR_PRINTF (target_as_addr), ADDR_PRINTF (target_addr),
- idx,
- ADDR_PRINTF (source_as_addr), ADDR_PRINTF (source_addr),
-
- CAP_COPY_COPY_ADDR_TRANS_SUBPAGE & flags
- ? "copy subpage/" : "",
- CAP_COPY_COPY_ADDR_TRANS_GUARD & flags
- ? "copy trans guard/" : "",
- CAP_COPY_COPY_SOURCE_GUARD & flags
- ? "copy src guard/" : "",
- CAP_COPY_WEAKEN & flags ? "weak/" : "",
- CAP_COPY_DISCARDABLE_SET & flags ? "discardable/" : "",
- CAP_COPY_PRIORITY_SET & flags ? "priority" : "",
-
- properties.policy.discardable ? "discardable" : "precious",
- properties.policy.priority,
- CAP_ADDR_TRANS_GUARD (properties.addr_trans),
- CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans),
- CAP_ADDR_TRANS_SUBPAGE (properties.addr_trans),
- CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans));
-
- root = ROOT (target_as_addr);
- object_cap = CAP (root, target_addr, -1, true);
-
- root = ROOT (source_as_addr);
-
- get_slot:
- if (idx >= cap_type_num_slots[object_cap.type])
- REPLY (EINVAL);
-
- if (object_cap.type == cap_cappage
- || object_cap.type == cap_rcappage)
- /* Ensure that IDX falls within the subpage. */
- {
- if (idx >= CAP_SUBPAGE_SIZE (&object_cap))
- {
- DEBUG (1, "index (%d) >= subpage size (%d)",
- idx, CAP_SUBPAGE_SIZE (&object_cap));
- REPLY (EINVAL);
- }
-
- idx += CAP_SUBPAGE_OFFSET (&object_cap);
- }
-
- object = cap_to_object (principal, &object_cap);
- if (! object)
- {
- DEBUG (1, CAP_FMT " maps to void", CAP_PRINTF (&object_cap));
- REPLY (EINVAL);
- }
-
- if (label == RM_object_slot_copy_out)
- {
- source = ((struct cap *) object)[idx];
- target = SLOT (root, target_addr);
- }
- else
- {
- source = CAP (root, source_addr, -1, false);
- target = &((struct cap *) object)[idx];
- }
-
- goto cap_copy_body;
-
- case RM_cap_copy:
- err = rm_cap_copy_send_unmarshal (&msg,
- &principal_addr,
- &target_as_addr, &target_addr,
+ err = rm_cap_copy_send_unmarshal (message,
+ &target_addr,
&source_as_addr, &source_addr,
- &flags, &properties);
+ &flags, &properties, NULL);
if (err)
REPLY (err);
DEBUG (4, "(" ADDR_FMT "@" ADDR_FMT " <- "
ADDR_FMT "@" ADDR_FMT ", %s%s%s%s%s%s, %s/%d %lld/%d %d/%d",
- ADDR_PRINTF (target_as_addr), ADDR_PRINTF (target_addr),
+ ADDR_PRINTF (target_messenger), ADDR_PRINTF (target_addr),
ADDR_PRINTF (source_as_addr), ADDR_PRINTF (source_addr),
CAP_COPY_COPY_ADDR_TRANS_SUBPAGE & flags
@@ -1024,13 +1270,11 @@ server_loop (void)
CAP_ADDR_TRANS_SUBPAGE (properties.addr_trans),
CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans));
- root = ROOT (target_as_addr);
- target = SLOT (root, target_addr);
-
- root = ROOT (source_as_addr);
- source = CAP (root, source_addr, -1, false);
+ struct cap *target;
+ target = SLOT (target_root, target_addr);
- cap_copy_body:;
+ target_root = ROOT (source_as_addr);
+ source = CAP (target_root, source_addr, -1, false);
if ((flags & ~(CAP_COPY_COPY_ADDR_TRANS_SUBPAGE
| CAP_COPY_COPY_ADDR_TRANS_GUARD
@@ -1091,165 +1335,98 @@ server_loop (void)
}
}
- switch (label)
- {
- case RM_object_slot_copy_out:
- rm_object_slot_copy_out_reply_marshal (&msg);
- break;
- case RM_object_slot_copy_in:
- rm_object_slot_copy_in_reply_marshal (&msg);
- break;
- case RM_cap_copy:
- rm_cap_copy_reply_marshal (&msg);
+ rm_cap_copy_reply (activity, reply);
#if 0
- /* XXX: Surprisingly, it appears that this may be
- more expensive than just faulting the pages
- normally. This needs more investivation. */
- if (ADDR_IS_VOID (target_as_addr)
- && cap_types_compatible (target->type, cap_page)
- && CAP_GUARD_BITS (target) == 0
- && addr_depth (target_addr) == ADDR_BITS - PAGESIZE_LOG2)
- /* The target address space is the caller's. The target
- object appears to be a page. It seems to be
- installed at a point where it would appear in the
- hardware address space. If this is really the case,
- then we can map it now and save a fault later. */
- {
- profile_region ("cap_copy-prefault");
-
- struct cap cap = *target;
- if (target->type == cap_rpage)
- cap.discardable = false;
+ /* XXX: Surprisingly, it appears that this may be
+ more expensive than just faulting the pages
+ normally. This needs more investivation. */
+ if (ADDR_IS_VOID (target_as_addr)
+ && cap_types_compatible (target->type, cap_page)
+ && CAP_GUARD_BITS (target) == 0
+ && addr_depth (target_addr) == ADDR_BITS - PAGESIZE_LOG2)
+ /* The target address space is the caller's. The target
+ object appears to be a page. It seems to be
+ installed at a point where it would appear in the
+ hardware address space. If this is really the case,
+ then we can map it now and save a fault later. */
+ {
+ profile_region ("cap_copy-prefault");
- struct object *page = cap_to_object_soft (principal, &cap);
- if (page)
- {
- object_to_object_desc (page)->mapped = true;
+ struct cap cap = *target;
+ if (target->type == cap_rpage)
+ cap.discardable = false;
- l4_fpage_t fpage
- = l4_fpage ((uintptr_t) page, PAGESIZE);
- fpage = l4_fpage_add_rights (fpage, L4_FPAGE_READABLE);
- if (cap.type == cap_page)
- fpage = l4_fpage_add_rights (fpage,
- L4_FPAGE_WRITABLE);
+ struct object *page = cap_to_object_soft (principal, &cap);
+ if (page)
+ {
+ object_to_object_desc (page)->mapped = true;
- l4_word_t page_addr = addr_prefix (target_addr);
+ l4_fpage_t fpage
+ = l4_fpage ((uintptr_t) page, PAGESIZE);
+ fpage = l4_fpage_add_rights (fpage, L4_FPAGE_READABLE);
+ if (cap.type == cap_page)
+ fpage = l4_fpage_add_rights (fpage,
+ L4_FPAGE_WRITABLE);
- l4_map_item_t map_item = l4_map_item (fpage, page_addr);
+ uintptr_t page_addr = addr_prefix (target_addr);
- l4_msg_append_map_item (msg, map_item);
- }
+ l4_map_item_t map_item = l4_map_item (fpage, page_addr);
- profile_region_end ();
+ l4_msg_append_map_item (msg, map_item);
}
-#endif
- break;
+ profile_region_end ();
}
+#endif
+
break;
}
case RM_cap_rubout:
{
- addr_t target_as_addr;
- addr_t target_addr;
+ addr_t addr;
- err = rm_cap_rubout_send_unmarshal (&msg,
- &principal_addr,
- &target_as_addr,
- &target_addr);
+ err = rm_cap_rubout_send_unmarshal (message, &addr, NULL);
if (err)
REPLY (err);
DEBUG (4, ADDR_FMT "@" ADDR_FMT,
- ADDR_PRINTF (target_as_addr),
- ADDR_PRINTF (target_addr));
-
- struct cap *root = ROOT (target_as_addr);
-
- /* We don't look up the argument directly as we need to
- respect any subpag specification for cappages. */
- struct cap *target = SLOT (root, target_addr);
-
- cap_shootdown (principal, target);
-
- memset (target, 0, sizeof (*target));
-
- rm_cap_rubout_reply_marshal (&msg);
- break;
- }
-
- case RM_object_slot_read:
- {
- addr_t root_addr;
- addr_t source_addr;
- uint32_t idx;
-
- err = rm_object_slot_read_send_unmarshal (&msg,
- &principal_addr,
- &root_addr,
- &source_addr, &idx);
- if (err)
- REPLY (err);
-
- DEBUG (4, ADDR_FMT "@" ADDR_FMT "+%d",
- ADDR_PRINTF (root_addr), ADDR_PRINTF (source_addr), idx);
-
- struct cap *root = ROOT (root_addr);
+ ADDR_PRINTF (target_messenger),
+ ADDR_PRINTF (addr));
/* We don't look up the argument directly as we need to
- respect any subpag specification for cappages. */
- struct cap source = CAP (root, source_addr, -1, false);
+ respect any subpage specification for cappages. */
+ struct cap *slot = SLOT (target_root, addr);
- struct object *object = cap_to_object (principal, &source);
- if (! object)
- REPLY (EINVAL);
+ cap_shootdown (principal, slot);
- if (idx >= cap_type_num_slots[source.type])
- REPLY (EINVAL);
+ memset (target, 0, sizeof (*slot));
- if (source.type == cap_cappage || source.type == cap_rcappage)
- /* Ensure that idx falls within the subpage. */
- {
- if (idx >= CAP_SUBPAGE_SIZE (&source))
- REPLY (EINVAL);
-
- idx += CAP_SUBPAGE_OFFSET (&source);
- }
-
- source = ((struct cap *) object)[idx];
-
- rm_object_slot_read_reply_marshal (&msg, source.type,
- CAP_PROPERTIES_GET (source));
+ rm_cap_rubout_reply (activity, reply);
break;
}
case RM_cap_read:
{
- addr_t root_addr;
addr_t cap_addr;
- err = rm_cap_read_send_unmarshal (&msg, &principal_addr,
- &root_addr,
- &cap_addr);
+ err = rm_cap_read_send_unmarshal (message, &cap_addr, NULL);
if (err)
REPLY (err);
DEBUG (4, ADDR_FMT "@" ADDR_FMT,
- ADDR_PRINTF (root_addr), ADDR_PRINTF (cap_addr));
+ ADDR_PRINTF (target_messenger), ADDR_PRINTF (cap_addr));
- struct cap *root = ROOT (root_addr);
-
- struct cap cap = CAP (root, cap_addr, -1, false);
+ struct cap cap = CAP (target_root, cap_addr, -1, false);
/* Even if CAP.TYPE is not void, the cap may not designate
an object. Looking up the object will set CAP.TYPE to
cap_void if this is the case. */
if (cap.type != cap_void)
cap_to_object (principal, &cap);
- rm_cap_read_reply_marshal (&msg, cap.type,
- CAP_PROPERTIES_GET (cap));
+ rm_cap_read_reply (activity, reply, cap.type,
+ CAP_PROPERTIES_GET (cap));
break;
}
@@ -1258,15 +1435,16 @@ server_loop (void)
addr_t object_addr;
err = rm_object_discarded_clear_send_unmarshal
- (&msg, &principal_addr, &object_addr);
+ (message, &object_addr, NULL);
if (err)
REPLY (err);
DEBUG (4, ADDR_FMT, ADDR_PRINTF (object_addr));
- /* We can't look up the object here as object_lookup
- returns NULL if the object's discardable bit is
- set! Instead, we lookup the capability. */
+ /* We can't look up the object use OBJECT as object_lookup
+ returns NULL if the object's discardable bit is set!
+ Instead, we lookup the capability, find the object's
+ folio and then clear its discarded bit. */
struct cap cap = CAP (&thread->aspace, object_addr, -1, true);
if (cap.type == cap_void)
REPLY (ENOENT);
@@ -1285,7 +1463,7 @@ server_loop (void)
bool was_discarded = folio_object_discarded (folio, idx);
folio_object_discarded_set (folio, idx, false);
- rm_object_discarded_clear_reply_marshal (&msg);
+ rm_object_discarded_clear_reply (activity, reply);
#if 0
/* XXX: Surprisingly, it appears that this may be more
@@ -1313,7 +1491,7 @@ server_loop (void)
L4_FPAGE_READABLE
| L4_FPAGE_WRITABLE);
- l4_word_t page_addr = addr_prefix (object_addr);
+ uintptr_t page_addr = addr_prefix (object_addr);
l4_map_item_t map_item = l4_map_item (fpage, page_addr);
@@ -1334,87 +1512,32 @@ server_loop (void)
case RM_object_discard:
{
- addr_t object_addr;
-
- err = rm_object_discard_send_unmarshal
- (&msg, &principal_addr, &object_addr);
+ err = rm_object_discard_send_unmarshal (message, NULL);
if (err)
REPLY (err);
- DEBUG (4, ADDR_FMT, ADDR_PRINTF (object_addr));
+ DEBUG (4, ADDR_FMT, ADDR_PRINTF (target_messenger));
- /* We can't look up the object here as object_lookup
- returns NULL if the object's discardable bit is
- set! Instead, we lookup the capability. */
- struct cap cap = CAP (&thread->aspace, object_addr, -1, true);
- if (cap.type == cap_void)
- REPLY (ENOENT);
- if (cap_type_weak_p (cap.type))
- REPLY (EPERM);
-
- struct folio *folio;
- int offset;
-
- struct object *object = cap_to_object_soft (principal, &cap);
- if (object)
- {
- struct object_desc *desc = object_to_object_desc (object);
-
- folio = objects_folio (principal, object);
- offset = objects_folio_offset (object);
-
- ACTIVITY_STATS (desc->activity)->discarded ++;
-
- memory_object_destroy (principal, object);
- memory_frame_free ((uintptr_t) object);
-
- /* Consistent with the API, we do NOT set the discarded
- bit. */
-
- folio_object_content_set (folio, offset, false);
-
- assertx (! cap_to_object_soft (principal, &cap),
- ADDR_FMT ": " CAP_FMT,
- ADDR_PRINTF (object_addr), CAP_PRINTF (&cap));
- }
- else
- /* The object is not in memory, however, we can still
- clear it's content bit. */
- {
- offset = (cap.oid % (1 + FOLIO_OBJECTS)) - 1;
- oid_t foid = cap.oid - offset - 1;
-
- folio = (struct folio *)
- object_find (activity, foid, OBJECT_POLICY_VOID);
-
- if (folio_object_version (folio, offset) != cap.version)
- /* Or not, seems the object is gone! */
- REPLY (ENOENT);
- }
+ struct folio *folio = objects_folio (principal, target);
- folio_object_content_set (folio, offset, false);
+ folio_object_content_set (folio,
+ objects_folio_offset (target), false);
- rm_object_discard_reply_marshal (&msg);
+ rm_object_discard_reply (activity, reply);
break;
}
case RM_object_status:
{
- addr_t object_addr;
bool clear;
-
- err = rm_object_status_send_unmarshal
- (&msg, &principal_addr, &object_addr, &clear);
+ err = rm_object_status_send_unmarshal (message, &clear, NULL);
if (err)
REPLY (err);
DEBUG (4, ADDR_FMT ", %sclear",
- ADDR_PRINTF (object_addr), clear ? "" : "no ");
+ ADDR_PRINTF (target_messenger), clear ? "" : "no ");
- struct object *object = OBJECT (&thread->aspace,
- object_addr, -1, true);
-
- struct object_desc *desc = object_to_object_desc (object);
+ struct object_desc *desc = object_to_object_desc (target);
uintptr_t status = (desc->user_referenced ? object_referenced : 0)
| (desc->user_dirty ? object_dirty : 0);
@@ -1424,103 +1547,120 @@ server_loop (void)
desc->user_dirty = 0;
}
- rm_object_status_reply_marshal (&msg, status);
+ rm_object_status_reply (activity, reply, status);
break;
}
case RM_object_name:
{
- addr_t object_addr;
struct object_name name;
+ err = rm_object_name_send_unmarshal (message, &name, NULL);
- err = rm_object_name_send_unmarshal
- (&msg, &principal_addr, &object_addr, &name);
-
- struct object *object = OBJECT (&thread->aspace,
- object_addr, -1, false);
-
- if (object_type (object) == cap_activity_control)
+ if (object_type (target) == cap_activity_control)
{
- struct activity *a = (struct activity *) object;
+ struct activity *a = (struct activity *) target;
memcpy (a->name.name, name.name, sizeof (name));
a->name.name[sizeof (a->name.name) - 1] = 0;
}
- else if (object_type (object) == cap_thread)
+ else if (object_type (target) == cap_thread)
{
- struct thread *t = (struct thread *) object;
+ struct thread *t = (struct thread *) target;
memcpy (t->name.name, name.name, sizeof (name));
t->name.name[sizeof (t->name.name) - 1] = 0;
}
- rm_object_name_reply_marshal (&msg);
+ rm_object_name_reply (activity, reply);
break;
}
case RM_thread_exregs:
{
+ if (object_type (target) != cap_thread)
+ REPLY (EINVAL);
+ struct thread *t = (struct thread *) target;
+
struct hurd_thread_exregs_in in;
- addr_t target;
- l4_word_t control;
- err = rm_thread_exregs_send_unmarshal (&msg,
- &principal_addr, &target,
- &control, &in);
+ uintptr_t control;
+ addr_t aspace_addr;
+ addr_t activity_addr;
+ addr_t utcb_addr;
+ addr_t exception_messenger_addr;
+ err = rm_thread_exregs_send_unmarshal
+ (message, &control, &in,
+ &aspace_addr, &activity_addr, &utcb_addr,
+ &exception_messenger_addr,
+ NULL);
if (err)
REPLY (err);
- DEBUG (4, ADDR_FMT, ADDR_PRINTF (target));
-
- struct thread *t
- = (struct thread *) OBJECT (&thread->aspace,
- target, cap_thread, true);
-
- struct cap *aspace = NULL;
- struct cap aspace_cap;
+ int d = 4;
+ DEBUG (d, "%s%s" ADDR_FMT "(%x): %s%s%s%s %s%s%s%s %s%s%s %s%s",
+ t->name.name[0] ? t->name.name : "",
+ t->name.name[0] ? ": " : "",
+ ADDR_PRINTF (target_messenger), t->tid,
+ (control & HURD_EXREGS_SET_UTCB) ? "U" : "-",
+ (control & HURD_EXREGS_SET_EXCEPTION_MESSENGER) ? "E" : "-",
+ (control & HURD_EXREGS_SET_ASPACE) ? "R" : "-",
+ (control & HURD_EXREGS_SET_ACTIVITY) ? "A" : "-",
+ (control & HURD_EXREGS_SET_SP) ? "S" : "-",
+ (control & HURD_EXREGS_SET_IP) ? "I" : "-",
+ (control & HURD_EXREGS_SET_EFLAGS) ? "F" : "-",
+ (control & HURD_EXREGS_SET_USER_HANDLE) ? "U" : "-",
+ (control & _L4_XCHG_REGS_CANCEL_RECV) ? "R" : "-",
+ (control & _L4_XCHG_REGS_CANCEL_SEND) ? "S" : "-",
+ (control & _L4_XCHG_REGS_CANCEL_IPC) ? "I" : "-",
+ (control & _L4_XCHG_REGS_HALT) ? "H" : "-",
+ (control & _L4_XCHG_REGS_SET_HALT) ? "Y" : "N");
+
+ if ((control & HURD_EXREGS_SET_UTCB))
+ DEBUG (d, "utcb: " ADDR_FMT, ADDR_PRINTF (utcb_addr));
+ if ((control & HURD_EXREGS_SET_EXCEPTION_MESSENGER))
+ DEBUG (d, "exception messenger: " ADDR_FMT,
+ ADDR_PRINTF (exception_messenger_addr));
+ if ((control & HURD_EXREGS_SET_ASPACE))
+ DEBUG (d, "aspace: " ADDR_FMT, ADDR_PRINTF (aspace_addr));
+ if ((control & HURD_EXREGS_SET_ACTIVITY))
+ DEBUG (d, "activity: " ADDR_FMT, ADDR_PRINTF (activity_addr));
+ if ((control & HURD_EXREGS_SET_SP))
+ DEBUG (d, "sp: %p", (void *) in.sp);
+ if ((control & HURD_EXREGS_SET_IP))
+ DEBUG (d, "ip: %p", (void *) in.ip);
+ if ((control & HURD_EXREGS_SET_EFLAGS))
+ DEBUG (d, "eflags: %p", (void *) in.eflags);
+ if ((control & HURD_EXREGS_SET_USER_HANDLE))
+ DEBUG (d, "user_handle: %p", (void *) in.user_handle);
+
+ struct cap aspace = CAP_VOID;
if ((HURD_EXREGS_SET_ASPACE & control))
- {
- aspace_cap = CAP (&thread->aspace, in.aspace, -1, false);
- aspace = &aspace_cap;
- }
+ aspace = CAP (&thread->aspace, aspace_addr, -1, false);
- struct cap *a = NULL;
- struct cap a_cap;
+ struct cap a = CAP_VOID;
if ((HURD_EXREGS_SET_ACTIVITY & control))
{
- if (ADDR_IS_VOID (in.activity))
- a = &thread->activity;
+ /* XXX: Remove this hack... */
+ if (ADDR_IS_VOID (activity_addr))
+ a = thread->activity;
else
- {
- a_cap = CAP (&thread->aspace,
- in.activity, cap_activity, false);
- a = &a_cap;
- }
- }
-
- struct cap *exception_page = NULL;
- struct cap exception_page_cap;
- if ((HURD_EXREGS_SET_EXCEPTION_PAGE & control))
- {
- exception_page_cap = CAP (&thread->aspace,
- in.exception_page, cap_page, true);
- exception_page = &exception_page_cap;
+ a = CAP (&thread->aspace,
+ activity_addr, cap_activity, false);
}
- struct cap *aspace_out = NULL;
- if ((HURD_EXREGS_GET_REGS & control)
- && ! ADDR_IS_VOID (in.aspace_out))
- aspace_out = SLOT (&thread->aspace, in.aspace_out);
+ struct cap utcb = CAP_VOID;
+ if ((HURD_EXREGS_SET_UTCB & control))
+ utcb = CAP (&thread->aspace, utcb_addr, cap_page, true);
- struct cap *activity_out = NULL;
- if ((HURD_EXREGS_GET_REGS & control)
- && ! ADDR_IS_VOID (in.activity_out))
- activity_out = SLOT (&thread->aspace, in.activity_out);
+ struct cap exception_messenger = CAP_VOID;
+ if ((HURD_EXREGS_SET_EXCEPTION_MESSENGER & control))
+ exception_messenger
+ = CAP (&thread->aspace, exception_messenger_addr,
+ cap_rmessenger, false);
- struct cap *exception_page_out = NULL;
- if ((HURD_EXREGS_GET_REGS & control)
- && ! ADDR_IS_VOID (in.exception_page_out))
- exception_page_out = SLOT (&thread->aspace,
- in.exception_page_out);
+ struct cap aspace_out = thread->aspace;
+ struct cap activity_out = thread->activity;
+ struct cap utcb_out = thread->utcb;
+ struct cap exception_messenger_out = thread->exception_messenger;
struct hurd_thread_exregs_out out;
out.sp = in.sp;
@@ -1530,56 +1670,94 @@ server_loop (void)
err = thread_exregs (principal, t, control,
aspace, in.aspace_cap_properties_flags,
- in.aspace_cap_properties, a, exception_page,
+ in.aspace_cap_properties,
+ a, utcb, exception_messenger,
&out.sp, &out.ip,
- &out.eflags, &out.user_handle,
- aspace_out, activity_out,
- exception_page_out);
+ &out.eflags, &out.user_handle);
if (err)
REPLY (err);
- rm_thread_exregs_reply_marshal (&msg, out);
+ rm_thread_exregs_reply (activity, reply, out,
+ aspace_out, activity_out,
+ utcb_out, exception_messenger_out);
break;
}
- case RM_thread_wait_object_destroyed:
+ case RM_thread_id:
{
- addr_t addr;
- err = rm_thread_wait_object_destroyed_send_unmarshal
- (&msg, &principal_addr, &addr);
+ if (object_type (target) != cap_thread)
+ REPLY (EINVAL);
+ struct thread *t = (struct thread *) target;
+
+ err = rm_thread_id_send_unmarshal (message, NULL);
if (err)
REPLY (err);
- DEBUG (4, ADDR_FMT, ADDR_PRINTF (addr));
+ rm_thread_id_reply (activity, reply, t->tid);
+ break;
+ }
+
+ case RM_object_reply_on_destruction:
+ {
+ err = rm_object_reply_on_destruction_send_unmarshal (message,
+ NULL);
+ if (err)
+ REPLY (err);
- struct object *object = OBJECT (&thread->aspace, addr, -1, true);
+ DEBUG (4, ADDR_FMT, ADDR_PRINTF (target_messenger));
- thread->wait_reason = THREAD_WAIT_DESTROY;
- object_wait_queue_enqueue (principal, object, thread);
+ reply->wait_reason = MESSENGER_WAIT_DESTROY;
+ object_wait_queue_enqueue (principal, target, reply);
- do_reply = 0;
break;
}
case RM_activity_policy:
{
+ if (object_type (target) != cap_activity_control)
+ {
+ DEBUG (0, "expects an activity, not a %s",
+ cap_type_string (object_type (target)));
+ REPLY (EINVAL);
+ }
+ struct activity *activity = (struct activity *) target;
+
uintptr_t flags;
struct activity_policy in;
- err = rm_activity_policy_send_unmarshal (&msg, &principal_addr,
- &flags, &in);
+ err = rm_activity_policy_send_unmarshal (message, &flags, &in,
+ NULL);
if (err)
REPLY (err);
- DEBUG (4, "");
-
- if (principal_cap.type != cap_activity_control
+ int d = 4;
+ DEBUG (d, "(%s) child: %s%s; sibling: %s%s; storage: %s",
+ target_writable ? "strong" : "weak",
+ (flags & ACTIVITY_POLICY_CHILD_REL_PRIORITY_SET) ? "P" : "-",
+ (flags & ACTIVITY_POLICY_CHILD_REL_WEIGHT_SET) ? "W" : "-",
+ (flags & ACTIVITY_POLICY_SIBLING_REL_PRIORITY_SET)
+ ? "P" : "-",
+ (flags & ACTIVITY_POLICY_SIBLING_REL_WEIGHT_SET) ? "W" : "-",
+ (flags & ACTIVITY_POLICY_STORAGE_SET) ? "P" : "-");
+
+ if ((flags & ACTIVITY_POLICY_CHILD_REL_PRIORITY_SET))
+ DEBUG (d, "Child priority: %d", in.child_rel.priority);
+ if ((flags & ACTIVITY_POLICY_CHILD_REL_WEIGHT_SET))
+ DEBUG (d, "Child weight: %d", in.child_rel.weight);
+ if ((flags & ACTIVITY_POLICY_SIBLING_REL_PRIORITY_SET))
+ DEBUG (d, "Sibling priority: %d", in.sibling_rel.priority);
+ if ((flags & ACTIVITY_POLICY_SIBLING_REL_WEIGHT_SET))
+ DEBUG (d, "Sibling weight: %d", in.sibling_rel.weight);
+ if ((flags & ACTIVITY_POLICY_STORAGE_SET))
+ DEBUG (d, "Storage: %d", in.folios);
+
+ if (! target_writable
&& (flags & (ACTIVITY_POLICY_STORAGE_SET
| ACTIVITY_POLICY_CHILD_REL_SET)))
REPLY (EPERM);
- rm_activity_policy_reply_marshal (&msg, principal->policy);
+ rm_activity_policy_reply (principal, reply, activity->policy);
if ((flags & (ACTIVITY_POLICY_CHILD_REL_PRIORITY_SET
| ACTIVITY_POLICY_CHILD_REL_WEIGHT_SET
@@ -1602,7 +1780,7 @@ server_loop (void)
if ((flags & ACTIVITY_POLICY_STORAGE_SET))
p.folios = in.folios;
- activity_policy_update (principal, p);
+ activity_policy_update (activity, p);
}
break;
@@ -1610,32 +1788,36 @@ server_loop (void)
case RM_activity_info:
{
+ if (object_type (target) != cap_activity_control)
+ REPLY (EINVAL);
+ struct activity *activity = (struct activity *) target;
+
uintptr_t flags;
uintptr_t until_period;
- err = rm_activity_info_send_unmarshal (&msg, &principal_addr,
- &flags,
- &until_period);
+ err = rm_activity_info_send_unmarshal (message,
+ &flags, &until_period,
+ NULL);
if (err)
REPLY (err);
- int period = principal->current_period - 1;
+ int period = activity->current_period - 1;
if (period < 0)
period = (ACTIVITY_STATS_PERIODS + 1) + period;
DEBUG (4, OBJECT_NAME_FMT ": %s%s%s(%d), "
"period: %d (current: %d)",
- OBJECT_NAME_PRINTF ((struct object *) principal),
+ OBJECT_NAME_PRINTF ((struct object *) activity),
flags & activity_info_stats ? "stats" : "",
(flags == (activity_info_pressure|activity_info_stats))
? ", " : "",
flags & activity_info_pressure ? "pressure" : "",
flags,
- until_period, principal->stats[period].period);
+ until_period, activity->stats[period].period);
if ((flags & activity_info_stats)
- && principal->stats[period].period > 0
- && principal->stats[period].period >= until_period)
+ && activity->stats[period].period > 0
+ && activity->stats[period].period >= until_period)
/* Return the available statistics. */
{
/* XXX: Only return valid stat buffers. */
@@ -1645,29 +1827,25 @@ server_loop (void)
int i;
for (i = 0; i < ACTIVITY_STATS_PERIODS; i ++)
{
- period = principal->current_period - 1 - i;
+ period = activity->current_period - 1 - i;
if (period < 0)
period = (ACTIVITY_STATS_PERIODS + 1) + period;
- info.stats.stats[i] = principal->stats[period];
+ info.stats.stats[i] = activity->stats[period];
}
info.stats.count = ACTIVITY_STATS_PERIODS;
- rm_activity_info_reply_marshal (&msg, info);
+ rm_activity_info_reply (principal, reply, info);
}
else if (flags)
/* Queue thread on the activity. */
{
- thread->wait_reason = THREAD_WAIT_ACTIVITY_INFO;
- thread->wait_reason_arg = flags;
- thread->wait_reason_arg2 = until_period;
+ reply->wait_reason = MESSENGER_WAIT_ACTIVITY_INFO;
+ reply->wait_reason_arg = flags;
+ reply->wait_reason_arg2 = until_period;
- object_wait_queue_enqueue (principal,
- (struct object *) principal,
- thread);
-
- do_reply = 0;
+ object_wait_queue_enqueue (principal, target, reply);
}
else
REPLY (EINVAL);
@@ -1675,36 +1853,30 @@ server_loop (void)
break;
}
- case RM_exception_collect:
+ case RM_thread_activation_collect:
{
- /* We don't expect a principal. */
- err = rm_exception_collect_send_unmarshal (&msg, &principal_addr);
+ if (object_type (target) != cap_thread)
+ REPLY (EINVAL);
+
+ err = rm_thread_activation_collect_send_unmarshal (message, NULL);
if (err)
REPLY (err);
- panic ("Collecting exception: %x", from);
-#warning exception_collect not implemented
-
- /* XXX: Implement me. */
+ thread_deliver_pending (principal, (struct thread *) target);
+ rm_thread_activation_collect_reply (principal, reply);
break;
}
case RM_as_dump:
{
- addr_t root_addr;
- err = rm_as_dump_send_unmarshal (&msg, &principal_addr,
- &root_addr);
+ err = rm_as_dump_send_unmarshal (message, NULL);
if (err)
REPLY (err);
- DEBUG (4, "");
-
- struct cap *root = ROOT (root_addr);
-
- as_dump_from (principal, root, "");
+ as_dump_from (principal, target_root, "");
- rm_as_dump_reply_marshal (&msg);
+ rm_as_dump_reply (activity, reply);
break;
}
@@ -1716,22 +1888,22 @@ server_loop (void)
int to_requeue, struct object *object2, int offset2)
{
int count = 0;
- struct thread *t;
+ struct messenger *m;
- object_wait_queue_for_each (principal, object1, t)
- if (t->wait_reason == THREAD_WAIT_FUTEX
- && t->wait_reason_arg == offset1)
+ object_wait_queue_for_each (principal, object1, m)
+ if (m->wait_reason == MESSENGER_WAIT_FUTEX
+ && m->wait_reason_arg == offset1)
/* Got a match. */
{
if (count < to_wake)
{
- object_wait_queue_dequeue (principal, t);
+ object_wait_queue_unlink (principal, m);
- debug (5, "Waking thread %x", t->tid);
+ debug (5, "Waking messenger");
- err = rm_futex_reply (t->tid, 0);
+ err = rm_futex_reply (principal, m, 0);
if (err)
- panic ("Error futex waking %x: %d", t->tid, err);
+ panic ("Error futex waking: %d", err);
count ++;
@@ -1740,10 +1912,10 @@ server_loop (void)
}
else
{
- object_wait_queue_dequeue (principal, t);
+ object_wait_queue_unlink (principal, m);
- t->wait_reason_arg = offset2;
- object_wait_queue_enqueue (principal, object2, t);
+ m->wait_reason_arg = offset2;
+ object_wait_queue_enqueue (principal, object2, m);
count ++;
@@ -1763,10 +1935,10 @@ server_loop (void)
void *addr2;
union futex_val3 val3;
- err = rm_futex_send_unmarshal (&msg, &principal_addr,
+ err = rm_futex_send_unmarshal (message,
&addr1, &op, &val1,
&timeout, &val2,
- &addr2, &val3);
+ &addr2, &val3, NULL);
if (err)
REPLY (err);
@@ -1790,14 +1962,12 @@ server_loop (void)
char *mode = "unknown";
- struct object *page = cap_to_object (principal,
- &thread->exception_page);
+ struct object *page = cap_to_object (principal, &thread->utcb);
if (page && object_type (page) == cap_page)
{
- struct exception_page *exception_page
- = (struct exception_page *) page;
+ struct vg_utcb *utcb = (struct vg_utcb *) page;
- if (exception_page->activated_mode)
+ if (utcb->activated_mode)
mode = "activated";
else
mode = "normal";
@@ -1821,7 +1991,7 @@ server_loop (void)
addr_t addr = addr_chop (PTR_TO_ADDR (addr1), PAGESIZE_LOG2);
struct object *object1 = OBJECT (&thread->aspace,
- addr, cap_page, true);
+ addr, cap_page, true, NULL);
int offset1 = (uintptr_t) addr1 & (PAGESIZE - 1);
int *vaddr1 = (void *) object1 + offset1;
@@ -1834,17 +2004,15 @@ server_loop (void)
if (timeout)
panic ("Timeouts not yet supported");
- thread->wait_reason = THREAD_WAIT_FUTEX;
- thread->wait_reason_arg = offset1;
+ reply->wait_reason = MESSENGER_WAIT_FUTEX;
+ reply->wait_reason_arg = offset1;
- object_wait_queue_enqueue (principal, object1, thread);
+ object_wait_queue_enqueue (principal, object1, reply);
#ifndef NDEBUG
- futex_waiter_list_enqueue (&futex_waiters, thread);
+ futex_waiter_list_enqueue (&futex_waiters, reply);
#endif
- /* Don't reply. */
- do_reply = 0;
break;
case FUTEX_WAKE:
@@ -1856,13 +2024,13 @@ server_loop (void)
REPLY (EINVAL);
int count = wake (val1, object1, offset1, 0, 0, 0);
- rm_futex_reply_marshal (&msg, count);
+ rm_futex_reply (activity, reply, count);
break;
case FUTEX_WAKE_OP:
addr = addr_chop (PTR_TO_ADDR (addr2), PAGESIZE_LOG2);
struct object *object2 = OBJECT (&thread->aspace,
- addr, cap_page, true);
+ addr, cap_page, true, NULL);
int offset2 = (uintptr_t) addr2 & (PAGESIZE - 1);
int *vaddr2 = (void *) object2 + offset2;
@@ -1914,7 +2082,7 @@ server_loop (void)
if (comparison)
count += wake (val2.value, object2, offset2, 0, 0, 0);
- rm_futex_reply_marshal (&msg, 0);
+ rm_futex_reply (activity, reply, 0);
break;
case FUTEX_CMP_REQUEUE:
@@ -1929,23 +2097,51 @@ server_loop (void)
/* Get the second object. */
addr = addr_chop (PTR_TO_ADDR (addr2), PAGESIZE_LOG2);
- object2 = OBJECT (&thread->aspace, addr, cap_page, true);
+ object2 = OBJECT (&thread->aspace, addr, cap_page, true, NULL);
offset2 = (uintptr_t) addr2 & (PAGESIZE - 1);
count = wake (val1, object1, offset1,
val2.value, object2, offset2);
- rm_futex_reply_marshal (&msg, count);
+ rm_futex_reply (activity, reply, count);
break;
}
break;
}
+ case VG_messenger_id:
+ {
+ if (object_type (target) != cap_messenger || ! target_writable)
+ REPLY (EINVAL);
+ struct messenger *m = (struct messenger *) target;
+
+ uint64_t id;
+ err = vg_messenger_id_send_unmarshal (message, &id, NULL);
+ if (err)
+ REPLY (EINVAL);
+
+ uint64_t old = m->id;
+ m->id = id;
+
+ vg_messenger_id_reply (principal, reply, old);
+
+ break;
+ }
+
default:
/* XXX: Don't panic when running production code. */
DEBUG (1, "Didn't handle message from %x.%x with label %d",
l4_thread_no (from), l4_version (from), label);
}
+
+ if ((flags & VG_IPC_RETURN))
+ {
+ l4_msg_clear (msg);
+ l4_msg_put_word (msg, 0, 0);
+ l4_msg_set_untyped_words (msg, 1);
+ do_reply = 1;
+ }
+
out:;
}
diff --git a/viengoos/thread.c b/viengoos/thread.c
index 6ce0197..bdba441 100644
--- a/viengoos/thread.c
+++ b/viengoos/thread.c
@@ -26,12 +26,14 @@
#include <hurd/exceptions.h>
#include <hurd/thread.h>
#include <bit-array.h>
+#include <backtrace.h>
#include "cap.h"
#include "object.h"
#include "thread.h"
#include "activity.h"
#include "zalloc.h"
+#include "messenger.h"
#include <hurd/trace.h>
#define THREAD_VERSION 2
@@ -89,7 +91,7 @@ thread_init (struct thread *thread)
size_t size = PAGESIZE * 10;
void *buffer = (void *) zalloc (size);
if (! buffer)
- panic ("Failed to allocate memory for thread has.");
+ panic ("Failed to allocate memory for thread hash.");
hurd_ihash_init_with_buffer (&tid_to_thread, false, HURD_IHASH_NO_LOCP,
buffer, size);
@@ -132,10 +134,6 @@ thread_deinit (struct activity *activity, struct thread *thread)
if (thread->commissioned)
thread_decommission (thread);
- if (thread->wait_queue_p)
- /* THREAD is attached to a wait queue. Detach it. */
- object_wait_queue_dequeue (activity, thread);
-
/* Free the thread id. */
bit_dealloc (thread_ids,
l4_thread_no (thread->tid) - THREAD_ID_BASE);
@@ -254,15 +252,14 @@ control_to_string (l4_word_t control, char string[33])
error_t
thread_exregs (struct activity *principal,
- struct thread *thread, l4_word_t control,
- struct cap *aspace,
- l4_word_t flags, struct cap_properties properties,
- struct cap *activity,
- struct cap *exception_page,
- l4_word_t *sp, l4_word_t *ip,
- l4_word_t *eflags, l4_word_t *user_handle,
- struct cap *aspace_out, struct cap *activity_out,
- struct cap *exception_page_out)
+ struct thread *thread, uintptr_t control,
+ struct cap aspace,
+ uintptr_t flags, struct cap_properties properties,
+ struct cap activity,
+ struct cap utcb,
+ struct cap exception_messenger,
+ uintptr_t *sp, uintptr_t *ip,
+ uintptr_t *eflags, uintptr_t *user_handle)
{
if ((control & ~(HURD_EXREGS_SET_REGS
| HURD_EXREGS_GET_REGS
@@ -274,36 +271,26 @@ thread_exregs (struct activity *principal,
return EINVAL;
}
- if ((control & HURD_EXREGS_GET_REGS) && aspace_out)
- cap_copy (principal,
- ADDR_VOID, aspace_out, ADDR_VOID,
- ADDR_VOID, thread->aspace, ADDR_VOID);
-
if ((control & HURD_EXREGS_SET_ASPACE))
cap_copy_x (principal,
ADDR_VOID, &thread->aspace, ADDR_VOID,
- ADDR_VOID, *aspace, ADDR_VOID,
+ ADDR_VOID, aspace, ADDR_VOID,
flags, properties);
- if ((control & HURD_EXREGS_GET_REGS) && activity_out)
- cap_copy (principal,
- ADDR_VOID, activity_out, ADDR_VOID,
- ADDR_VOID, thread->activity, ADDR_VOID);
-
if ((control & HURD_EXREGS_SET_ACTIVITY))
cap_copy (principal,
ADDR_VOID, &thread->activity, ADDR_VOID,
- ADDR_VOID, *activity, ADDR_VOID);
+ ADDR_VOID, activity, ADDR_VOID);
- if ((control & HURD_EXREGS_GET_REGS) && exception_page_out)
+ if ((control & HURD_EXREGS_SET_UTCB))
cap_copy (principal,
- ADDR_VOID, exception_page_out, ADDR_VOID,
- ADDR_VOID, thread->exception_page, ADDR_VOID);
+ ADDR_VOID, &thread->utcb, ADDR_VOID,
+ ADDR_VOID, utcb, ADDR_VOID);
- if ((control & HURD_EXREGS_SET_EXCEPTION_PAGE))
+ if ((control & HURD_EXREGS_SET_EXCEPTION_MESSENGER))
cap_copy (principal,
- ADDR_VOID, &thread->exception_page, ADDR_VOID,
- ADDR_VOID, *exception_page, ADDR_VOID);
+ ADDR_VOID, &thread->exception_messenger, ADDR_VOID,
+ ADDR_VOID, exception_messenger, ADDR_VOID);
if (thread->commissioned)
{
@@ -443,23 +430,29 @@ thread_exregs (struct activity *principal,
return 0;
}
-void
-thread_raise_exception (struct activity *activity,
- struct thread *thread,
- l4_msg_t *msg)
+bool
+thread_activate (struct activity *activity,
+ struct thread *thread,
+ struct messenger *messenger,
+ bool may_block)
{
- l4_word_t ip = 0;
- l4_word_t sp = 0;
+ assert (messenger);
+ assert (object_type ((struct object *) messenger) == cap_messenger);
+
+
+ uintptr_t ip = 0;
+ uintptr_t sp = 0;
{
- l4_word_t c = _L4_XCHG_REGS_DELIVER;
+ uintptr_t c = _L4_XCHG_REGS_DELIVER;
l4_thread_id_t targ = thread->tid;
- l4_word_t dummy = 0;
+ uintptr_t dummy = 0;
_L4_exchange_registers (&targ, &c,
&sp, &ip, &dummy, &dummy, &dummy);
}
- struct object *page = cap_to_object (activity, &thread->exception_page);
- if (! page)
+ struct vg_utcb *utcb
+ = (struct vg_utcb *) cap_to_object (activity, &thread->utcb);
+ if (! utcb)
{
#ifndef NDEBUG
extern struct trace_buffer rpc_trace;
@@ -468,36 +461,50 @@ thread_raise_exception (struct activity *activity,
do_debug (4)
as_dump_from (activity, &thread->aspace, "");
- debug (0, "Malformed thread (%x): no exception page (ip: %x, sp: %x)",
+ debug (0, "Malformed thread (%x): no utcb (ip: %x, sp: %x)",
thread->tid, ip, sp);
- return;
+ return false;
}
- if (object_type (page) != cap_page)
+ if (object_type ((struct object *) utcb) != cap_page)
{
- debug (0, "Malformed thread: exception page slot contains a %s, "
- "not a cap_page",
- cap_type_string (object_type (page)));
- return;
+ debug (0, "Malformed thread: utcb slot contains a %s, not a page",
+ cap_type_string (object_type ((struct object *) utcb)));
+ return false;
}
- struct exception_page *exception_page = (struct exception_page *) page;
-
- if (exception_page->activated_mode)
+ if (utcb->activated_mode)
{
debug (0, "Deferring exception delivery: thread in activated mode!"
"(sp: %x, ip: %x)", sp, ip);
- /* XXX: Sure, we could note that an exception is pending but we
- need to queue the event. */
- // exception_page->pending_message = 1;
+ if (! may_block)
+ return false;
- return;
+ object_wait_queue_enqueue (activity,
+ (struct object *) thread, messenger);
+ messenger->wait_reason = MESSENGER_WAIT_TRANSFER_MESSAGE;
+
+ utcb->pending_message = 1;
+
+ return true;
}
- /* Copy the message. */
- memcpy (&exception_page->exception, msg,
- (1 + l4_untyped_words (l4_msg_msg_tag (*msg))) * sizeof (l4_word_t));
+ debug (5, "Activating %x (ip: %p; sp: %p)",
+ thread->tid, ip, sp);
+
+ utcb->protected_payload = messenger->protected_payload;
+ utcb->messenger_id = messenger->id;
+
+ if (! messenger->out_of_band)
+ {
+ memcpy (utcb->inline_words, messenger->inline_words,
+ messenger->inline_word_count * sizeof (uintptr_t));
+ memcpy (utcb->inline_caps, messenger->inline_caps,
+ messenger->inline_cap_count * sizeof (addr_t));
+ utcb->inline_word_count = messenger->inline_word_count;
+ utcb->inline_cap_count = messenger->inline_cap_count;
+ }
l4_word_t c = HURD_EXREGS_STOP | _L4_XCHG_REGS_DELIVER
| _L4_XCHG_REGS_CANCEL_SEND | _L4_XCHG_REGS_CANCEL_RECV;
@@ -521,7 +528,7 @@ thread_raise_exception (struct activity *activity,
int err = l4_error_code ();
debug (0, "Failed to exregs %x: %s (%d)",
thread->tid, l4_strerror (err), err);
- return;
+ return false;
}
do_debug (4)
{
@@ -531,28 +538,28 @@ thread_raise_exception (struct activity *activity,
thread->tid, string, c);
}
- exception_page->saved_thread_state = c;
+ utcb->saved_thread_state = c;
- exception_page->activated_mode = 1;
+ utcb->activated_mode = 1;
- if (exception_page->exception_handler_ip <= ip
- && ip < exception_page->exception_handler_end)
+ if (utcb->activation_handler_ip <= ip
+ && ip < utcb->activation_handler_end)
/* Thread is transitioning. Don't save sp and ip. */
{
- debug (4, "Fault while interrupt in transition (ip: %x)!",
+ debug (0, "Fault while interrupt in transition (ip: %x)!",
ip);
- exception_page->interrupt_in_transition = 1;
+ utcb->interrupt_in_transition = 1;
}
else
{
- exception_page->interrupt_in_transition = 0;
- exception_page->saved_sp = sp;
- exception_page->saved_ip = ip;
+ utcb->interrupt_in_transition = 0;
+ utcb->saved_sp = sp;
+ utcb->saved_ip = ip;
}
c = HURD_EXREGS_START | _L4_XCHG_REGS_SET_SP | _L4_XCHG_REGS_SET_IP;
- sp = exception_page->exception_handler_sp;
- ip = exception_page->exception_handler_ip;
+ sp = utcb->activation_handler_sp;
+ ip = utcb->activation_handler_ip;
targ = thread->tid;
do_debug (4)
{
@@ -571,7 +578,7 @@ thread_raise_exception (struct activity *activity,
int err = l4_error_code ();
debug (0, "Failed to exregs %x: %s (%d)",
thread->tid, l4_strerror (err), err);
- return;
+ return false;
}
do_debug (4)
{
@@ -580,4 +587,73 @@ thread_raise_exception (struct activity *activity,
debug (0, "exregs on %x returned control: %s (%x)",
thread->tid, string, c);
}
+
+ return true;
+}
+
+void
+thread_raise_exception (struct activity *activity,
+ struct thread *thread,
+ struct vg_message *message)
+{
+ struct messenger *handler
+ = (struct messenger *) cap_to_object (activity,
+ &thread->exception_messenger);
+ if (! handler)
+ {
+ backtrace_print ();
+ debug (0, "Thread %x has no exception handler.", thread->tid);
+ }
+ else if (object_type ((struct object *) handler) != cap_messenger)
+ debug (0, "%s is not a valid exception handler.",
+ cap_type_string (object_type ((struct object *) handler)));
+ else
+ {
+ if (! messenger_message_load (activity, handler, message))
+ debug (0, "Failed to deliver exception to thread's exception handler.");
+ return;
+ }
+}
+
+void
+thread_deliver_pending (struct activity *activity,
+ struct thread *thread)
+{
+ struct vg_utcb *utcb
+ = (struct vg_utcb *) cap_to_object (activity, &thread->utcb);
+ if (! utcb)
+ {
+ debug (0, "Malformed thread (%x): no utcb",
+ thread->tid);
+ return;
+ }
+
+ if (object_type ((struct object *) utcb) != cap_page)
+ {
+ debug (0, "Malformed thread: utcb slot contains a %s, not a page",
+ cap_type_string (object_type ((struct object *) utcb)));
+ return;
+ }
+
+ if (utcb->activated_mode)
+ {
+ debug (0, "Deferring exception delivery: thread in activated mode!");
+ return;
+ }
+
+
+ struct messenger *m;
+ object_wait_queue_for_each (activity, (struct object *) thread, m)
+ if (m->wait_reason == MESSENGER_WAIT_TRANSFER_MESSAGE)
+ {
+ object_wait_queue_unlink (activity, m);
+ m->wait_reason = MESSENGER_WAIT_TRANSFER_MESSAGE;
+
+ bool ret = thread_activate (activity, thread, m, false);
+ assert (ret);
+
+ return;
+ }
+
+ utcb->pending_message = 0;
}
diff --git a/viengoos/thread.h b/viengoos/thread.h
index 728bf7b..3bcb91d 100644
--- a/viengoos/thread.h
+++ b/viengoos/thread.h
@@ -23,35 +23,12 @@
#include <l4.h>
#include <errno.h>
-
-#include "list.h"
+#include <hurd/cap.h>
+#include <hurd/thread.h>
/* Forward. */
-struct folio;
struct activity;
-/* Number of capability slots at the start of the thread
- structure. */
-enum
- {
- THREAD_SLOTS = 3,
- };
-
-enum
- {
- /* THREAD is blocked on an object wait for a futex.
- WAIT_REASON_ARG holds the byte offset in the object on which it
- is waiting. */
- THREAD_WAIT_FUTEX,
- /* THREAD is blocked on an object waiting for the object to be
- destroyed. */
- THREAD_WAIT_DESTROY,
- /* THREAD is blocked on an activity waiting for information. The
- type of information is stored in wait_reason_arg. The period
- in wait_reason_arg2. */
- THREAD_WAIT_ACTIVITY_INFO,
- };
-
struct thread
{
/* User accessible fields. */
@@ -63,10 +40,14 @@ struct thread
this thread's storage is allocated!) */
struct cap activity;
- /* Capability identifying a page to use to store exceptions. */
- struct cap exception_page;
+ /* A capability designating a messenger to which to deliver
+ exceptions. */
+ struct cap exception_messenger;
- /* Non-user accessible fields. */
+ /* A capability the page that contains the thread's UTCB. */
+ struct cap utcb;
+
+ /* Non-user-accessible fields. */
/* Allocated thread id. */
l4_thread_id_t tid;
@@ -82,48 +63,9 @@ struct thread
/* Whether the thread has been commissioned (a tid allocated). */
uint32_t commissioned : 1;
- /* Whether the object is attached to a wait queue. (This is
- different from the value of folio_object_wait_queue_p which
- specifies if there are objects on this thread's wait queue.) */
- bool wait_queue_p;
-
- /* Whether this thread is the head of the wait queue. If so,
- WAIT_QUEUE.PREV designates the object. */
- uint32_t wait_queue_head : 1;
-
- /* Whether this thread is the tail of the wait queue. If so,
- WAIT_QUEUE.NEXT designates the object. */
- uint32_t wait_queue_tail : 1;
-
- /* The event the thread is interested in. */
- uint32_t wait_reason : 28;
- /* More information about the reason. */
- uint32_t wait_reason_arg;
- uint32_t wait_reason_arg2;
-
- /* The object the thread is waiting on. Only meaningful if
- WAIT_QUEUE_P is true. */
- struct
- {
- /* We don't need versioning as we automatically collect on object
- destruction. */
- oid_t next;
- oid_t prev;
- } wait_queue;
-
-#ifndef NDEBUG
- struct list_node futex_waiter_node;
-#endif
-
struct object_name name;
};
-#ifndef NDEBUG
-LIST_CLASS(futex_waiter, struct thread, futex_waiter_node, true)
-/* List of threads waiting on a futex. */
-extern struct futex_waiter_list futex_waiters;
-#endif
-
/* The hardwired base of the UTCB (2.5GB). */
#define UTCB_AREA_BASE (0xA0000000)
/* The size of the UTCB. */
@@ -152,21 +94,33 @@ extern void thread_decommission (struct thread *thread);
USER_HANDLER are as per l4_exchange_regs, however, the caller may
not set the pager. */
extern error_t thread_exregs (struct activity *principal,
- struct thread *thread, l4_word_t control,
- struct cap *aspace,
- l4_word_t flags, struct cap_properties properties,
- struct cap *activity,
- struct cap *exception_page,
- l4_word_t *sp, l4_word_t *ip,
- l4_word_t *eflags, l4_word_t *user_handle,
- struct cap *aspace_out,
- struct cap *activity_out,
- struct cap *exception_page_out);
-
-/* Send thread THREAD an exception. */
+ struct thread *thread, uintptr_t control,
+ struct cap aspace,
+ uintptr_t flags, struct cap_properties properties,
+ struct cap activity,
+ struct cap utcb,
+ struct cap exception_messenger,
+ uintptr_t *sp, uintptr_t *ip,
+ uintptr_t *eflags, uintptr_t *user_handle);
+
+/* Deliver the message carried by messenger MESSENGER to thread
+ thread. If thread is not activated, activate the thread. Returns
+ whether the message was delivered or the messenger was enqueued on
+ the thread. */
+extern bool thread_activate (struct activity *activity,
+ struct thread *thread,
+ struct messenger *messenger,
+ bool may_block);
+
+/* Send thread THREAD's exception messenger the exception described by
+ MESSAGE. If this would block, silently discards MESSAGE. */
extern void thread_raise_exception (struct activity *activity,
struct thread *thread,
- l4_msg_t *msg);
+ struct vg_message *message);
+
+/* Deliver a pending message, if any and if possible. */
+extern void thread_deliver_pending (struct activity *activity,
+ struct thread *thread);
/* Given the L4 thread id THREADID, find the associated thread. */
extern struct thread *thread_lookup (l4_thread_id_t threadid);