summaryrefslogtreecommitdiff
path: root/libviengoos
diff options
context:
space:
mode:
authorNeal H. Walfield <neal@gnu.org>2008-12-17 13:11:52 +0100
committerNeal H. Walfield <neal@gnu.org>2008-12-17 13:11:52 +0100
commitca828476fdb64ad4615bc6f580c2a451e78fcc1c (patch)
tree320c2e89e34bbcd0ba82b64728f831b8536cc286 /libviengoos
parentf0d7e0e0ee0cb0362ee79739922a76faef17917c (diff)
Move viengoos header files to libviengoos.
/ 2008-12-17 Neal H. Walfield <neal@gnu.org> * libviengoos: New directory. * Makefile.am (SUBDIRS): Add libviengoos. * configure.ac: Include libviengoos/headers.m4. Generate libviengoos/Makefile. hurd/ 2008-12-17 Neal H. Walfield <neal@gnu.org> * activity.h: Move to ../libviengoos/viengoos. Update references. * addr-trans.h: Likewise. * addr.h: Likewise. * cap.h: Likewise. * folio.h: Likewise. * futex.h: Likewise. * ipc.h: Likewise. * message.h: Likewise. * messenger.h: Likewise. * rpc.h: Likewise. * thread.h: Move viengoos specific bits... * ../libviengoos/viengoos/thread.h: ... to this new file. * thread.h (struct hurd_utcb): New structure. * exceptions.h (hurd_utcb): Use a struct hurd_utcb *, not a struct vg_utcb *. Update users. (hurd_activation_state_alloc): Likewise. (hurd_activation_state_free): Likewise. (hurd_activation_handler_activated): Likewise. (hurd_activation_handler_normal): Likewise. * t-addr-trans.c: Move to ../libviengoos. * t-addr.c: Likewise. * t-rpc.c: Likewise. * Makefile.am (TESTS, check_PROGRAMS, t_addr_CPPFLAGS) (t_addr_SOURCES, t_addr_trans_CPPFLAGS, t_addr_trans_SOURCES) (t_rpc_CPPFLAGS, t_rpc_SOURCES): Move from this file... * ../libviengoos/Makefile.am. * Makefile.am (includehurd_HEADERS): Don't mention activity.h, addr-trans.h, addr.h, cap.h, folio.h, futex.h, ipc.h, message.h or messenger.h. * headers.m4: Don't create a link to hurd/addr.h, hurd/addr-trans.h, hurd/cap.h, hurd/folio.h, hurd/rpc.h, hurd/activity.h, hurd/futex.h, hurd/message.h, hurd/messenger.h or hurd/ipc.h. libviengoos/ 2008-12-17 Neal H. Walfield <neal@gnu.org> * Makefile.am: New file. * headers.m4: Likewise. * t-addr-trans.c: Move from ../hurd. * t-addr.c: Likewise. * t-rpc.c: Likewise. * viengoos.h: Likewise. * viengoos/activity.h: Likewise. * viengoos/addr-trans.h: Likewise. * viengoos/addr.h: Likewise. * viengoos/cap.h: Likewise. * viengoos/folio.h: Likewise. * viengoos/futex.h: Likewise. * viengoos/ipc.h: Likewise. * viengoos/message.h: Likewise. * viengoos/messenger.h: Likewise. * viengoos/rpc.h: Likewise. * viengoos/misc.h: Moved from ../viengoos/rm.h. * viengoos/thread.h: New file split from ../hurd/thread.h. libpthread/ 2008-12-17 Neal H. Walfield <neal@gnu.org> * Makefile.am: New file. * headers.m4: Likewise. * t-addr-trans.c: Move from ../hurd. * t-addr.c: Likewise. * t-rpc.c: Likewise. * viengoos.h: Likewise. * viengoos/activity.h: Likewise. * viengoos/addr-trans.h: Likewise. * viengoos/addr.h: Likewise. * viengoos/cap.h: Likewise. * viengoos/folio.h: Likewise. * viengoos/futex.h: Likewise. * viengoos/ipc.h: Likewise. * viengoos/message.h: Likewise. * viengoos/messenger.h: Likewise. * viengoos/rpc.h: Likewise. * viengoos/misc.h: Moved from ../viengoos/rm.h. * viengoos/thread.h: New file split from ../hurd/thread.h. viengoos/ 2008-12-17 Neal H. Walfield <neal@gnu.org> * rm.h: Move from here... * ../libviengoos/viengoos/misc.h: ... to here. Update users. * headers.m4: Don't link rm.h to hurd/rm.h. * Makefile.am (viengoos_SOURCES): Remove rm.h.
Diffstat (limited to 'libviengoos')
-rw-r--r--libviengoos/ChangeLog21
-rw-r--r--libviengoos/Makefile.am40
-rw-r--r--libviengoos/headers.m428
-rw-r--r--libviengoos/t-addr-trans.c84
-rw-r--r--libviengoos/t-addr.c74
-rw-r--r--libviengoos/t-rpc.c185
-rw-r--r--libviengoos/viengoos.h1
-rw-r--r--libviengoos/viengoos/activity.h254
-rw-r--r--libviengoos/viengoos/addr-trans.h190
-rw-r--r--libviengoos/viengoos/addr.h179
-rw-r--r--libviengoos/viengoos/cap.h730
-rw-r--r--libviengoos/viengoos/folio.h441
-rw-r--r--libviengoos/viengoos/futex.h219
-rw-r--r--libviengoos/viengoos/ipc.h297
-rw-r--r--libviengoos/viengoos/message.h229
-rw-r--r--libviengoos/viengoos/messenger.h87
-rw-r--r--libviengoos/viengoos/misc.h128
-rw-r--r--libviengoos/viengoos/rpc.h1054
-rw-r--r--libviengoos/viengoos/thread.h281
19 files changed, 4522 insertions, 0 deletions
diff --git a/libviengoos/ChangeLog b/libviengoos/ChangeLog
new file mode 100644
index 0000000..1e3bbc5
--- /dev/null
+++ b/libviengoos/ChangeLog
@@ -0,0 +1,21 @@
+2008-12-17 Neal H. Walfield <neal@gnu.org>
+
+ * Makefile.am: New file.
+ * headers.m4: Likewise.
+ * t-addr-trans.c: Move from ../hurd.
+ * t-addr.c: Likewise.
+ * t-rpc.c: Likewise.
+ * viengoos.h: Likewise.
+ * viengoos/activity.h: Likewise.
+ * viengoos/addr-trans.h: Likewise.
+ * viengoos/addr.h: Likewise.
+ * viengoos/cap.h: Likewise.
+ * viengoos/folio.h: Likewise.
+ * viengoos/futex.h: Likewise.
+ * viengoos/ipc.h: Likewise.
+ * viengoos/message.h: Likewise.
+ * viengoos/messenger.h: Likewise.
+ * viengoos/rpc.h: Likewise.
+ * viengoos/misc.h: Moved from ../viengoos/rm.h.
+ * viengoos/thread.h: New file split from ../hurd/thread.h.
+
diff --git a/libviengoos/Makefile.am b/libviengoos/Makefile.am
new file mode 100644
index 0000000..6e7b1c4
--- /dev/null
+++ b/libviengoos/Makefile.am
@@ -0,0 +1,40 @@
+# Makefile.am - Makefile template for libviengoos.
+# Copyright (C) 2008 Free Software Foundation, Inc.
+# Written by Neal H. Walfield
+#
+# This file is part of the GNU Hurd.
+#
+# The GNU Hurd is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# The GNU Hurd is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+
+viengoos_headers = addr.h addr-trans.h cap.h \
+ thread.h folio.h activity.h futex.h messenger.h \
+ message.h ipc.h \
+ rpc.h \
+ misc.h
+
+nobase_include_HEADERS = viengoos.h \
+ $(addprefix viengoos/, $(viengoos_headers))
+
+TESTS = t-addr t-addr-trans t-rpc
+check_PROGRAMS = $(TESTS)
+
+t_addr_CPPFLAGS = $(CHECK_CPPFLAGS)
+t_addr_SOURCES = t-addr.c
+
+t_addr_trans_CPPFLAGS = $(CHECK_CPPFLAGS)
+t_addr_trans_SOURCES = t-addr-trans.c
+
+t_rpc_CPPFLAGS = $(CHECK_CPPFLAGS)
+t_rpc_SOURCES = t-rpc.c
diff --git a/libviengoos/headers.m4 b/libviengoos/headers.m4
new file mode 100644
index 0000000..834985f
--- /dev/null
+++ b/libviengoos/headers.m4
@@ -0,0 +1,28 @@
+# headers.m4 - Autoconf snippets to install links for header files.
+# Copyright 2008 Free Software Foundation, Inc.
+# Written by Neal H. Walfield
+#
+# This file is free software; as a special exception the author gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY, to the extent permitted by law; without even the
+# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+AC_CONFIG_LINKS([
+ sysroot/include/viengoos.h:libviengoos/viengoos.h
+ sysroot/include/viengoos/addr.h:libviengoos/viengoos/addr.h
+ sysroot/include/viengoos/addr-trans.h:libviengoos/viengoos/addr-trans.h
+ sysroot/include/viengoos/cap.h:libviengoos/viengoos/cap.h
+ sysroot/include/viengoos/thread.h:libviengoos/viengoos/thread.h
+ sysroot/include/viengoos/folio.h:libviengoos/viengoos/folio.h
+ sysroot/include/viengoos/activity.h:libviengoos/viengoos/activity.h
+ sysroot/include/viengoos/futex.h:libviengoos/viengoos/futex.h
+ sysroot/include/viengoos/messenger.h:libviengoos/viengoos/messenger.h
+ sysroot/include/viengoos/message.h:libviengoos/viengoos/message.h
+ sysroot/include/viengoos/ipc.h:libviengoos/viengoos/ipc.h
+ sysroot/include/viengoos/rpc.h:libviengoos/viengoos/rpc.h
+ sysroot/include/viengoos/misc.h:libviengoos/viengoos/misc.h
+])
+
diff --git a/libviengoos/t-addr-trans.c b/libviengoos/t-addr-trans.c
new file mode 100644
index 0000000..c3607ad
--- /dev/null
+++ b/libviengoos/t-addr-trans.c
@@ -0,0 +1,84 @@
+/* t-cap.c - Test the implementation of the various cap functions.
+ Copyright (C) 2007, 2008 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "stddef.h"
+#include "addr-trans.h"
+
+int output_debug;
+char *program_name = "t-addr-trans";
+
+int
+main (int argc, char *argv[])
+{
+ printf ("Checking CAP_ADDR_TRANS_SET_GUARD_SUBPAGE... ");
+
+ struct cap_addr_trans cap_addr_trans;
+
+ bool r;
+ int subpage_bits;
+ for (subpage_bits = 0; subpage_bits < 16; subpage_bits ++)
+ {
+ int subpages = 1 << subpage_bits;
+ int subpage_size_log2 = 8 - subpage_bits;
+ int subpage_size = 1 << subpage_size_log2;
+
+ memset (&cap_addr_trans, 0, sizeof (cap_addr_trans));
+
+ r = CAP_ADDR_TRANS_SET_SUBPAGE (&cap_addr_trans, 0, subpages);
+ assert (r == (subpage_bits <= 8));
+ if (subpage_bits >= 8)
+ continue;
+
+ assert (CAP_ADDR_TRANS_SUBPAGES (cap_addr_trans) == subpages);
+ assert (CAP_ADDR_TRANS_SUBPAGE_SIZE (cap_addr_trans) == subpage_size);
+ assert (CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 (cap_addr_trans)
+ == subpage_size_log2);
+
+ int gdepth;
+ for (gdepth = 0; gdepth < sizeof (uintptr_t) * 8; gdepth ++)
+ {
+ int guard_bits;
+ for (guard_bits = 0; guard_bits < sizeof (uintptr_t) * 8; guard_bits ++)
+ {
+ int guard = (1 << guard_bits) - 1;
+ r = CAP_ADDR_TRANS_SET_GUARD (&cap_addr_trans, guard, gdepth);
+ if (guard_bits <= gdepth
+ && (guard_bits + subpage_bits
+ <= CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS))
+ {
+ assert (r);
+ assert (CAP_ADDR_TRANS_GUARD_BITS (cap_addr_trans)
+ == gdepth);
+ assert (CAP_ADDR_TRANS_GUARD (cap_addr_trans) == guard);
+ }
+ else
+ assert (! r);
+ }
+ }
+ }
+
+ printf ("ok\n");
+
+ return 0;
+}
diff --git a/libviengoos/t-addr.c b/libviengoos/t-addr.c
new file mode 100644
index 0000000..1c2494d
--- /dev/null
+++ b/libviengoos/t-addr.c
@@ -0,0 +1,74 @@
+/* t-addr.c - Test the implementation of the various addr functions.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <hurd/stddef.h>
+#include <hurd/types.h>
+#include <viengoos/addr.h>
+#include <assert.h>
+#include <l4/math.h>
+
+char *program_name = "t-addr";
+int output_debug = 0;
+
+int
+main (int argc, char *argv[])
+{
+ addr_t addr;
+ int i, j;
+
+ printf ("Checking ADDR... ");
+ for (i = 0; i < ADDR_BITS; i ++)
+ {
+ addr = ADDR (1ULL << i, ADDR_BITS - i);
+ debug (1, "%llx/%d =? %llx/%d\n",
+ 1ULL << i, ADDR_BITS - i,
+ addr_prefix (addr), addr_depth (addr));
+ assert (addr_depth (addr) == ADDR_BITS - i);
+ assert (addr_prefix (addr) == 1ull << i);
+ }
+ printf ("ok.\n");
+
+ printf ("Checking addr_extend... ");
+ addr = ADDR (0, 0);
+ for (i = 1; i < ADDR_BITS; i ++)
+ {
+ addr = addr_extend (addr, 1, 1);
+ assert (addr_depth (addr) == i);
+ assert (l4_msb64 (addr_prefix (addr)) == ADDR_BITS);
+ assert (l4_lsb64 (addr_prefix (addr)) == ADDR_BITS - i + 1);
+ }
+ printf ("ok.\n");
+
+ printf ("Checking addr_extract... ");
+ addr = ADDR (0, 0);
+ for (i = 0; i < ADDR_BITS; i ++)
+ {
+ addr = ADDR (((1ULL << i) - 1) << (ADDR_BITS - i), i);
+
+ for (j = 0; j <= i; j ++)
+ {
+ l4_uint64_t idx = addr_extract (addr, j);
+ assert (idx == (1ULL << j) - 1);
+ }
+ }
+ printf ("ok.\n");
+
+ return 0;
+}
diff --git a/libviengoos/t-rpc.c b/libviengoos/t-rpc.c
new file mode 100644
index 0000000..0f40fe9
--- /dev/null
+++ b/libviengoos/t-rpc.c
@@ -0,0 +1,185 @@
+#include <stdbool.h>
+#include <stdint.h>
+#include <assert.h>
+
+char *program_name = "t-rpc";
+int output_debug = 1;
+
+#define RPC_STUB_PREFIX rpc
+#define RPC_ID_PREFIX RPC
+
+#include <viengoos/rpc.h>
+
+/* Exception message ids. */
+enum
+ {
+ RPC_noargs = 0x1ABE100,
+ RPC_onein,
+ RPC_oneout,
+ RPC_onlyin,
+ RPC_onlyout,
+ RPC_mix,
+ RPC_caps,
+ };
+
+struct foo
+{
+ int a;
+ char b;
+};
+
+RPC(noargs, 0, 0, 0)
+RPC(onein, 1, 0, 0, uint32_t, arg)
+RPC(oneout, 0, 1, 0, uint32_t, arg)
+RPC(onlyin, 4, 0, 0, uint32_t, arg, uint32_t, idx, struct foo, foo, bool, p)
+RPC(onlyout, 0, 4, 0, uint32_t, arg, uint32_t, idx, struct foo, foo, bool, p)
+RPC(mix, 2, 3, 0, uint32_t, arg, uint32_t, idx,
+ struct foo, foo, bool, p, int, i)
+RPC(caps, 3, 2, 2,
+ /* In: */
+ int, i, cap_t, c, struct foo, foo,
+ /* Out: */
+ int, a, int, b, cap_t, x, cap_t, y)
+
+#undef RPC_STUB_PREFIX
+#undef RPC_ID_PREFIX
+
+int
+main (int argc, char *argv[])
+{
+ printf ("Checking RPC... ");
+
+ error_t err;
+ struct vg_message *msg;
+
+
+#define REPLY ADDR (0x1000, ADDR_BITS - 12)
+ addr_t reply = REPLY;
+
+ msg = malloc (sizeof (*msg));
+ rpc_noargs_send_marshal (msg, REPLY);
+ err = rpc_noargs_send_unmarshal (msg, &reply);
+ assert (! err);
+ assert (ADDR_EQ (reply, REPLY));
+ free (msg);
+
+ msg = malloc (sizeof (*msg));
+ rpc_noargs_reply_marshal (msg);
+ err = rpc_noargs_reply_unmarshal (msg);
+ assert (err == 0);
+ free (msg);
+
+
+ msg = malloc (sizeof (*msg));
+#define VALUE 0xfde8963a
+ uint32_t arg = VALUE;
+ uint32_t arg_out;
+
+ rpc_onein_send_marshal (msg, arg, REPLY);
+ err = rpc_onein_send_unmarshal (msg, &arg_out, &reply);
+ assert (! err);
+ assert (arg_out == VALUE);
+ assert (ADDR_EQ (reply, REPLY));
+ free (msg);
+
+ msg = malloc (sizeof (*msg));
+ rpc_onein_reply_marshal (msg);
+ err = rpc_onein_reply_unmarshal (msg);
+ assert (! err);
+ free (msg);
+
+ msg = malloc (sizeof (*msg));
+ rpc_oneout_send_marshal (msg, REPLY);
+ err = rpc_oneout_send_unmarshal (msg, &reply);
+ assert (! err);
+ assert (ADDR_EQ (reply, REPLY));
+ free (msg);
+
+ msg = malloc (sizeof (*msg));
+ rpc_oneout_reply_marshal (msg, arg);
+ err = rpc_oneout_reply_unmarshal (msg, &arg_out);
+ assert (! err);
+ assert (arg_out == VALUE);
+ free (msg);
+
+ msg = malloc (sizeof (*msg));
+
+ struct foo foo;
+ foo.a = 1 << 31;
+ foo.b = 'l';
+ uint32_t idx_out;
+ struct foo foo_out;
+ bool p_out;
+
+ rpc_onlyin_send_marshal (msg, 0x1234567, 0xABC, foo, true, REPLY);
+ err = rpc_onlyin_send_unmarshal (msg, &arg_out, &idx_out, &foo_out, &p_out,
+ &reply);
+ assert (! err);
+ assert (arg_out == 0x1234567);
+ assert (idx_out == 0xABC);
+ assert (foo_out.a == foo.a);
+ assert (foo_out.b == foo.b);
+ assert (p_out == true);
+ assert (ADDR_EQ (reply, REPLY));
+ free (msg);
+
+ msg = malloc (sizeof (*msg));
+ rpc_onlyin_reply_marshal (msg);
+ err = rpc_onlyin_reply_unmarshal (msg);
+ assert (! err);
+ free (msg);
+
+ msg = malloc (sizeof (*msg));
+ rpc_onlyout_send_marshal (msg, REPLY);
+ err = rpc_onlyout_send_unmarshal (msg, &reply);
+ assert (! err);
+ assert (ADDR_EQ (reply, REPLY));
+ free (msg);
+
+ msg = malloc (sizeof (*msg));
+ rpc_onlyout_reply_marshal (msg, 0x1234567, 321, foo, true);
+ err = rpc_onlyout_reply_unmarshal (msg, &arg_out, &idx_out,
+ &foo_out, &p_out);
+ assert (! err);
+ assert (arg_out == 0x1234567);
+ assert (idx_out == 321);
+ assert (foo_out.a == foo.a);
+ assert (foo_out.b == foo.b);
+ assert (p_out == true);
+ free (msg);
+
+
+ msg = malloc (sizeof (*msg));
+ rpc_mix_send_marshal (msg, arg, 456789, REPLY);
+ err = rpc_mix_send_unmarshal (msg, &arg_out, &idx_out, &reply);
+ assert (! err);
+ assert (arg_out == arg);
+ assert (idx_out == 456789);
+ assert (ADDR_EQ (reply, REPLY));
+ free (msg);
+
+ msg = malloc (sizeof (*msg));
+ int i_out = 0;
+ rpc_mix_reply_marshal (msg, foo, false, 4200042);
+ err = rpc_mix_reply_unmarshal (msg, &foo_out, &p_out, &i_out);
+ assert (! err);
+ assert (foo_out.a == foo.a);
+ assert (foo_out.b == foo.b);
+ assert (p_out == false);
+ assert (i_out == 4200042);
+ free (msg);
+
+ msg = malloc (sizeof (*msg));
+ rpc_caps_send_marshal (msg, 54, ADDR (1, ADDR_BITS), foo, REPLY);
+ addr_t addr;
+ err = rpc_caps_send_unmarshal (msg, &i_out, &addr, &foo_out, &reply);
+ assert (! err);
+ assert (i_out == 54);
+ assert (ADDR_EQ (addr, ADDR (1, ADDR_BITS)));
+ assert (foo_out.a == foo.a);
+ assert (foo_out.b == foo.b);
+ free (msg);
+
+ printf ("ok\n");
+ return 0;
+}
diff --git a/libviengoos/viengoos.h b/libviengoos/viengoos.h
new file mode 100644
index 0000000..bc734ad
--- /dev/null
+++ b/libviengoos/viengoos.h
@@ -0,0 +1 @@
+#include <viengoos/thread.h>
diff --git a/libviengoos/viengoos/activity.h b/libviengoos/viengoos/activity.h
new file mode 100644
index 0000000..a180527
--- /dev/null
+++ b/libviengoos/viengoos/activity.h
@@ -0,0 +1,254 @@
+/* activity.h - Activity definitions.
+ Copyright (C) 2007, 2008 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ GNU Hurd is free software: you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation, either version 3 of the
+ License, or (at your option) any later version.
+
+ GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GNU Hurd. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _VIENGOOS_ACTIVITY_H
+#define _VIENGOOS_ACTIVITY_H 1
+
+#include <stdint.h>
+
+enum
+ {
+ RM_activity_policy = 700,
+ RM_activity_info,
+ };
+
+struct activity_memory_policy
+{
+ uint16_t priority;
+ uint16_t weight;
+};
+
+#define ACTIVITY_MEMORY_POLICY(__amp_priority, __amp_weight) \
+ (struct activity_memory_policy) { __amp_priority, __amp_weight }
+#define ACTIVITY_MEMORY_POLICY_VOID ACTIVITY_MEMORY_POLICY(0, 0)
+
+struct activity_policy
+{
+ /* This policy is typically set by the parent to reflect how
+ available memory should be distributed among its immediate
+ children. It may only be set via an activity control
+ capability. */
+ struct activity_memory_policy sibling_rel;
+
+ /* This policy is typically set by the activity user and controls
+ how the memory allocated *directly* to this activity is managed
+ relative to the memory allocated to this activity's children.
+ That is, if the activity has been choosen as a victim, this
+ provides a policy to determine whether the memory allocated
+ directly to the activity or that to a child activity should be
+ evicted. */
+ struct activity_memory_policy child_rel;
+
+ /* Number of folios. Zero means no limit. (This does not mean that
+ there is no limit, just that this activity does not impose a
+ limit. The parent activity, for instance, may impose a limit.)
+ May only be set via an activity control capability. */
+ uint32_t folios;
+};
+
+/* Activity statistics. These are approximate and in some cases
+ represent averages. */
+#define ACTIVITY_STATS_PERIODS 2
+struct activity_stats
+{
+ /* The period during which this statistic was generated. */
+ uint32_t period;
+
+ /* The maximum number of frames this activity could currently
+ allocate assuming other allocations do not otherwise change.
+ This implies stealing from others. */
+ uint32_t available;
+ uint32_t available_local;
+
+ /* Log2 the maximum amount of memory (in pages) that the user of
+ this activity ought to allocate in the next few seconds. If
+ negative, the amount of memory the activity ought to consider
+ freeing. */
+ int8_t damping_factor;
+
+ /* If pressure is non-zero, then this activity is causing PRESSURE.
+
+ PRESSURE is calculated as follows: if
+
+ 1) this activity is within its entitlement
+ 2) its working set is significantly smaller than its allocation
+ (as determined by the size of inactive relative to active), and
+ 3) other activities are being held back (i.e., paging) due to this
+ activity,
+
+ then this represents the amount of memory it would be nice to see
+ this activity free. This activity will not be penalized by the
+ system if it does not yield memory. However, if the activity has
+ memory which is yielding a low return, it would be friendly of it
+ to return it. */
+ uint8_t pressure;
+ uint8_t pressure_local;
+
+ /* The number of clean and dirty frames that are accounted to this
+ activity. (Does not include frames scheduled for eviction.) The
+ total number of frames accounted to this activity is thus CLEAN +
+ DIRTY. */
+ uint32_t clean;
+ uint32_t dirty;
+ /* Number of frames pending eviction. */
+ uint32_t pending_eviction;
+
+
+ /* Based on recency information, the number of active frames
+ accounted to this activity and its children. The number of
+ inactive frames is approximately CLEAN + DIRTY - ACTIVE. */
+ uint32_t active;
+ /* Likewise, but excluding its children. */
+ uint32_t active_local;
+
+ /* Number of frames that were active in the last period that become
+ inactive in this period. */
+ uint32_t became_active;
+ /* Number of frames that were inactive in the last period that
+ become active in this period. */
+ uint32_t became_inactive;
+
+
+ /* Number of frames that were not accounted to this activity in the
+ last period and are now accounted to it. */
+ uint32_t claimed;
+ /* Number of frames that were accounted to this activity in the last
+ period and are no longer accounted to it. */
+ uint32_t disowned;
+
+ /* The number of frames that this activity referenced but which are
+ accounted to some other activity. */
+ uint32_t freeloading;
+ /* The sum of the references by other processes to the frames that
+ are accounted to this activity. (A single frame may account
+ for multiple references.) */
+ uint32_t freeloaded;
+
+
+ /* Number of frames that were accounted to this activity and
+ scheduled for eviction. */
+ uint32_t evicted;
+ /* Number of frames that were accounted to this activity (not its
+ children), had the discarded bit set, and were discarded. */
+ uint32_t discarded;
+ /* Number of frames paged-in on behalf of this activity. This does
+ not include pages marked empty that do not require disk
+ activity. */
+ uint32_t pagedin;
+ /* Number of frames that were referenced before being completely
+ freed. (If evicted is significant and saved approximates
+ evicted, then the process is trashing.) */
+ uint32_t saved;
+};
+
+#define ACTIVITY_POLICY(__ap_sibling_rel, __ap_child_rel, __ap_storage) \
+ (struct activity_policy) { __ap_sibling_rel, __ap_child_rel, __ap_storage }
+#define ACTIVITY_POLICY_VOID \
+ ACTIVITY_POLICY(ACTIVITY_MEMORY_POLICY_VOID, \
+ ACTIVITY_MEMORY_POLICY_VOID, \
+ 0)
+
+#define RPC_STUB_PREFIX rm
+#define RPC_ID_PREFIX RM
+
+#include <viengoos/rpc.h>
+
+enum
+{
+ ACTIVITY_POLICY_CHILD_REL_PRIORITY_SET = 1 << 0,
+ ACTIVITY_POLICY_CHILD_REL_WEIGHT_SET = 1 << 1,
+ ACTIVITY_POLICY_SIBLING_REL_PRIORITY_SET = 1 << 2,
+ ACTIVITY_POLICY_SIBLING_REL_WEIGHT_SET = 1 << 3,
+ ACTIVITY_POLICY_STORAGE_SET = 1 << 4,
+
+ ACTIVITY_POLICY_CHILD_REL_SET = (ACTIVITY_POLICY_CHILD_REL_PRIORITY_SET
+ | ACTIVITY_POLICY_CHILD_REL_WEIGHT_SET),
+
+ ACTIVITY_POLICY_SIBLING_REL_SET = (ACTIVITY_POLICY_SIBLING_REL_PRIORITY_SET
+ | ACTIVITY_POLICY_SIBLING_REL_WEIGHT_SET),
+};
+
+/* Get ACTIVITY's policy and set according to FLAGS and IN. */
+RPC (activity_policy, 2, 1, 0,
+ /* cap_t principal, cap_t activity */
+ uintptr_t, flags, struct activity_policy, in,
+ /* Out: */
+ struct activity_policy, out);
+
+enum
+ {
+ /* Return statistics. */
+ activity_info_stats = 1 << 0,
+ /* Asynchronous change in availability. */
+ activity_info_pressure = 1 << 1,
+ };
+
+struct activity_info
+{
+ /* The returned event. */
+ uintptr_t event;
+ union
+ {
+ /* If EVENT is activity_info_stats. */
+ struct
+ {
+ /* The number of samples. */
+ int count;
+ /* Samples are ordered by recency with the youngest towards the
+ start of the buffer. */
+ struct activity_stats stats[ACTIVITY_STATS_PERIODS];
+ } stats;
+
+ /* If EVENT is activity_info_free. */
+ struct
+ {
+ /* The number of pages the caller should try to free (negative)
+ or may allocate (positive). */
+ int amount;
+ } pressure;
+ };
+};
+
+/* Return some information about the activity ACTIVITY. FLAGS is a
+ bit-wise or of events the caller is interested. Only one event
+ will be returned.
+
+ If FLAGS contains activity_info_stats, may return the next
+ statistic that comes at or after UNTIL_PERIOD. (This can be used
+ to register a callback that is sent when the statistics are next
+ available. For example, call with UNTIL_PERIOD equal to 0 to get
+ the current statistics and then examine the period field. Use this
+ as the base for the next call.)
+
+ If FLAGS contains activity_info_free, may return an upcall
+ indicating that the activity must free some memory or will be such
+ subject to paging. In this case, the activity should try to free
+ at least the indicated number of pages as quickly as possible. */
+RPC (activity_info, 2, 1, 0,
+ /* cap_t principal, cap_t activity, */
+ uintptr_t, flags, uintptr_t, until_period,
+ /* Out: */
+ struct activity_info, info)
+
+#undef RPC_STUB_PREFIX
+#undef RPC_ID_PREFIX
+#undef RPC_TARGET
+
+#endif
diff --git a/libviengoos/viengoos/addr-trans.h b/libviengoos/viengoos/addr-trans.h
new file mode 100644
index 0000000..de49ce7
--- /dev/null
+++ b/libviengoos/viengoos/addr-trans.h
@@ -0,0 +1,190 @@
+/* addr-trans.h - Address translation functions.
+ Copyright (C) 2007, 2008 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ GNU Hurd is free software: you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation, either version 3 of the
+ License, or (at your option) any later version.
+
+ GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GNU Hurd. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _VIENGOOS_ADDR_TRANS_H
+#define _VIENGOOS_ADDR_TRANS_H
+
+#include <stdint.h>
+#include <hurd/stddef.h>
+#include <hurd/math.h>
+
+/* Capabilities have two primary functions: they designate objects and
+ they participate in address translation. This structure controls
+ how the page table walker translates bits when passing through this
+ capability. */
+
+#define CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS 22
+#define CAP_ADDR_TRANS_SUBPAGES_BITS 4
+#define CAP_ADDR_TRANS_GDEPTH_BITS 6
+
+struct cap_addr_trans
+{
+ union
+ {
+ struct
+ {
+ /* The value of the guard and the subpage to use.
+
+ A capability page is partitioned into 2^SUBPAGES_LOG2 subpages.
+ This value determines the number of subpage index bits and
+ maximum number of guard bits. The number of subpage index bits
+ is SUBPAGES_LOG2 and the number of guard bits is the remainder
+ (the guard lies in the upper bits; the subpage in the lower).
+
+ If SUBPAGES_LOG2 is 0, there is a single subpage (covering the
+ entire page). This implies that there are no subpage bits (the
+ only valid offset is 0) and 21 possible guard bits. If
+ SUBPAGES_LOG2 is 0, there are 256 subpages, 8 subpage bits and a
+ maximum of 21-8=15 guard bits. */
+ uint32_t guard_subpage: CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS;
+ /* The log2 of the subpages. The size of a subpage is thus 2^(8 -
+ SUBPAGES_LOG2). Values of SUBPAGES_LOG2 other than 0 are only
+ allowed for cap pages. */
+ uint32_t subpages_log2: CAP_ADDR_TRANS_SUBPAGES_BITS;
+ /* Number of significant guard bits. The value of the GUARD is zero
+ extended if GDEPTH is greater than the number of available guard
+ bits. */
+ uint32_t gdepth: CAP_ADDR_TRANS_GDEPTH_BITS;
+ };
+ uint32_t raw;
+ };
+};
+
+#define CAP_ADDR_TRANS_INIT { { .raw = 0 } }
+#define CAP_ADDR_TRANS_VOID (struct cap_addr_trans) { { .raw = 0 } }
+
+/* The log2 number of subpages. */
+#define CAP_ADDR_TRANS_SUBPAGES_LOG2(cap_addr_trans_) \
+ ((cap_addr_trans_).subpages_log2)
+
+/* The number of subpages. */
+#define CAP_ADDR_TRANS_SUBPAGES(cap_addr_trans_) \
+ (1 << CAP_ADDR_TRANS_SUBPAGES_LOG2((cap_addr_trans_)))
+
+/* The designated subpage. */
+#define CAP_ADDR_TRANS_SUBPAGE(cap_addr_trans_) \
+ ((cap_addr_trans_).guard_subpage \
+ & (CAP_ADDR_TRANS_SUBPAGES ((cap_addr_trans_)) - 1))
+
+/* The log2 of the size of the named subpage (in capability
+ units). */
+#define CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2(cap_addr_trans_) \
+ (8 - (cap_addr_trans_).subpages_log2)
+
+/* The number of caps addressed by this capability. */
+#define CAP_ADDR_TRANS_SUBPAGE_SIZE(cap_addr_trans_) \
+ (1 << CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 ((cap_addr_trans_)))
+
+/* The offset in capability units (with respect to the start of the
+ capability page) of the first capability in the designated
+ sub-page. */
+#define CAP_ADDR_TRANS_SUBPAGE_OFFSET(cap_addr_trans_) \
+ (CAP_ADDR_TRANS_SUBPAGE ((cap_addr_trans_)) \
+ * CAP_ADDR_TRANS_SUBPAGE_SIZE ((cap_addr_trans_)))
+
+/* The number of guard bits. */
+#define CAP_ADDR_TRANS_GUARD_BITS(cap_addr_trans_) ((cap_addr_trans_).gdepth)
+
+/* The value of the guard. */
+#define CAP_ADDR_TRANS_GUARD(cap_addr_trans_) \
+ ((uint64_t) ((cap_addr_trans_).guard_subpage \
+ >> (cap_addr_trans_).subpages_log2))
+
+#define CATSGST_(test_, format, args...) \
+ if (! (test_)) \
+ { \
+ r_ = false; \
+ debug (1, format, ##args); \
+ }
+
+/* Set CAP_ADDR_TRANS_P_'s guard and the subpage. Returns true on success
+ (parameters valid), false otherwise. */
+#define CAP_ADDR_TRANS_SET_GUARD_SUBPAGE(cap_addr_trans_p_, guard_, gdepth_, \
+ subpage_, subpages_) \
+ ({ bool r_ = true; \
+ /* There must be at least 1 subpage. */ \
+ CATSGST_ (((subpages_) > 0), \
+ "subpages_ (%d) must be at least 1\n", (subpages_)); \
+ CATSGST_ (((subpages_) & ((subpages_) - 1)) == 0, \
+ "SUBPAGES_ (%d) must be a power of 2\n", (subpages_)); \
+ int subpages_log2_ = vg_msb ((subpages_)) - 1; \
+ CATSGST_ (subpages_log2_ <= 8, \
+ "maximum subpages is 256 (%d)\n", (subpages_)); \
+ CATSGST_ (0 <= (subpage_) && (subpage_) < (subpages_), \
+ "subpage (%d) must be between 0 and SUBPAGES_ (%d) - 1\n", \
+ (subpage_), (subpages_)); \
+ \
+ /* The number of required guard bits. */ \
+ int gbits_ = vg_msb64 ((guard_)); \
+ CATSGST_ (gbits_ <= (gdepth_), \
+ "Significant guard bits (%d) must be less than depth (%d)\n", \
+ gbits_, (gdepth_)); \
+ CATSGST_ (gbits_ + subpages_log2_ <= CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS, \
+ "Significant guard bits (%d) plus subpage bits (%d) > %d\n", \
+ gbits_, subpages_log2_, CAP_ADDR_TRANS_GUARD_SUBPAGE_BITS); \
+ \
+ if (r_) \
+ { \
+ (cap_addr_trans_p_)->subpages_log2 = subpages_log2_; \
+ (cap_addr_trans_p_)->gdepth = (gdepth_); \
+ (cap_addr_trans_p_)->guard_subpage \
+ = ((guard_) << subpages_log2_) | (subpage_); \
+ } \
+ r_; \
+ })
+
+/* Set *CAP_ADDR_TRANS_P_'s guard. Returns true on success (parameters
+ valid), false otherwise. */
+#define CAP_ADDR_TRANS_SET_GUARD(cap_addr_trans_p_, guard_, gdepth_) \
+ ({ int subpage_ = CAP_ADDR_TRANS_SUBPAGE (*(cap_addr_trans_p_)); \
+ int subpages_ = CAP_ADDR_TRANS_SUBPAGES (*(cap_addr_trans_p_)); \
+ CAP_ADDR_TRANS_SET_GUARD_SUBPAGE ((cap_addr_trans_p_), \
+ (guard_), (gdepth_), \
+ (subpage_), (subpages_)); \
+ })
+
+/* Set *CAP_ADDR_TRANS_P_'s subpage. Returns true on success (parameters
+ valid), false otherwise. */
+#define CAP_ADDR_TRANS_SET_SUBPAGE(cap_addr_trans_p_, subpage_, subpages_) \
+ ({ int gdepth_ = CAP_ADDR_TRANS_GUARD_BITS (*(cap_addr_trans_p_)); \
+ int guard_ = CAP_ADDR_TRANS_GUARD (*(cap_addr_trans_p_)); \
+ CAP_ADDR_TRANS_SET_GUARD_SUBPAGE ((cap_addr_trans_p_), \
+ (guard_), (gdepth_), \
+ (subpage_), (subpages_)); \
+ })
+
+/* Returns whether the capability address CAP_ADDR_TRANS is well-formed. */
+#define CAP_ADDR_TRANS_VALID(cap_addr_trans) \
+ ({ bool r_ = true; \
+ CATSGST_ (CAP_ADDR_TRANS_GUARD_BITS (cap_addr_trans) <= WORDSIZE, \
+ "Invalid guard depth (%d)", \
+ CAP_ADDR_TRANS_GUARD_BITS (cap_addr_trans)); \
+ CATSGST_ (CAP_ADDR_TRANS_SUBPAGES_LOG2 (cap_addr_trans) <= 8, \
+ "Invalid number of subpages (%d)", \
+ CAP_ADDR_TRANS_SUBPAGES (cap_addr_trans)); \
+ CATSGST_ (vg_msb (CAP_ADDR_TRANS_GUARD (cap_addr_trans)) \
+ <= CAP_ADDR_TRANS_GUARD_BITS (cap_addr_trans), \
+ "Significant guard bits (%d) exceeds guard depth (%d)", \
+ vg_msb (CAP_ADDR_TRANS_GUARD (cap_addr_trans)), \
+ CAP_ADDR_TRANS_GUARD_BITS (cap_addr_trans)); \
+ r_; \
+ })
+
+#endif
diff --git a/libviengoos/viengoos/addr.h b/libviengoos/viengoos/addr.h
new file mode 100644
index 0000000..badfcd9
--- /dev/null
+++ b/libviengoos/viengoos/addr.h
@@ -0,0 +1,179 @@
+/* addr.h - Address definitions.
+ Copyright (C) 2007, 2008 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#ifndef _VIENGOOS_ADDR_H
+#define _VIENGOOS_ADDR_H 1
+
+#include <hurd/types.h>
+#include <hurd/math.h>
+#include <stdint.h>
+
+#include <assert.h>
+
+/* Addresses are 64-bits wide and translate up to 63 bits. They are
+ composed of a depth and a prefix that is depth bits wide.
+
+ The 64-bit field is packed as follows: the upper bits are encoded
+ in binary and represent the prefix, these are followed by a single
+ bit that is on, which is followed by a number encoded in unary.
+ The value of the unary number is 63 - depth. This allows easy
+ calculation of the depth and extraction of the prefix. Thus,
+ given:
+
+ xxxxx100
+
+ The unary value is 2 yielding a depth of 63 - 2 = 61. These bits
+ are encoded in the upper DEPTH bits of the field.
+
+ Leaves thus have a 1 in the least significant bit and nodes a
+ 0. */
+struct addr
+{
+ uint64_t raw;
+};
+#define ADDR_BITS 63
+/* Client-side capability handle. */
+typedef struct addr addr_t;
+
+#define ADDR_FMT "%llx/%d"
+#define ADDR_PRINTF(addr_) addr_prefix ((addr_)), addr_depth ((addr_))
+
+/* Create an address given a prefix and a depth. */
+#define ADDR(prefix_, depth_) \
+ ({ \
+ uint64_t p_ = (prefix_); \
+ uint64_t d_ = (depth_); \
+ assert (0 <= d_ && d_ <= ADDR_BITS); \
+ assert ((p_ & ((1 << (ADDR_BITS - d_)) - 1)) == 0); \
+ assert (p_ < (1ULL << ADDR_BITS)); \
+ (struct addr) { (p_ << 1ULL) | (1ULL << (ADDR_BITS - d_)) }; \
+ })
+
+/* Create an address given a prefix and a depth. Appropriate for use
+ as an initializer. */
+#define ADDR_INIT(prefix_, depth_) \
+ { .raw = ((((prefix_) << 1) | 1) << (ADDR_BITS - (depth_))) }
+
+#define ADDR_VOID ((struct addr) { 0ULL })
+#define ADDR_EQ(a, b) (a.raw == b.raw)
+#define ADDR_IS_VOID(a) (ADDR_EQ (a, ADDR_VOID))
+
+/* Return ADDR_'s depth. */
+static inline int
+addr_depth (addr_t addr)
+{
+ return ADDR_BITS - (vg_lsb64 (addr.raw) - 1);
+}
+
+/* Return ADDR's prefix. */
+static inline uint64_t
+addr_prefix (addr_t addr)
+{
+ /* (Clear the boundary bit and shift right 1.) */
+ return (addr.raw & ~(1ULL << (ADDR_BITS - addr_depth (addr)))) >> 1;
+}
+
+/* Extend the address ADDR by concatenating the lowest DEPTH bits of
+ PREFIX. */
+#if 0
+static inline addr_t
+addr_extend (addr_t addr, uint64_t prefix, int depth)
+{
+ assertx (depth >= 0, "depth: %d", depth);
+ assertx (addr_depth (addr) + depth <= ADDR_BITS,
+ "addr: " ADDR_FMT "; depth: %d", ADDR_PRINTF (addr), depth);
+ assertx (prefix < (1ULL << depth),
+ "prefix: %llx; depth: %lld", prefix, 1ULL << depth);
+ return ADDR (addr_prefix (addr)
+ | (prefix << (ADDR_BITS - addr_depth (addr) - depth)),
+ addr_depth (addr) + depth);
+}
+#else
+#define addr_extend(addr_, prefix_, depth_) \
+ ({ \
+ addr_t a__ = (addr_); \
+ uint64_t p__ = (prefix_); \
+ int d__ = (depth_); \
+ assertx (d__ >= 0, "depth: %d", d__); \
+ assertx (addr_depth ((a__)) + (d__) <= ADDR_BITS, \
+ "addr: " ADDR_FMT "; depth: %d", ADDR_PRINTF (a__), d__); \
+ assertx (p__ < (1ULL << d__), \
+ "prefix: %llx; depth: %lld", p__, 1ULL << d__); \
+ ADDR (addr_prefix ((a__)) \
+ | ((p__) << (ADDR_BITS - addr_depth ((a__)) - (d__))), \
+ addr_depth ((a__)) + (d__)); \
+ })
+#endif
+
+/* Decrease the depth of ADDR by DEPTH. */
+static inline addr_t
+addr_chop (addr_t addr, int depth)
+{
+ int d = addr_depth (addr) - depth;
+ assert (d >= 0);
+
+ return ADDR (addr_prefix (addr) & ~((1ULL << (ADDR_BITS - d)) - 1), d);
+}
+
+/* Return the last WIDTH bits of address's ADDR prefix. */
+static inline uint64_t
+addr_extract (addr_t addr, int width)
+{
+ assert (width <= addr_depth (addr));
+
+ return (addr_prefix (addr) >> (ADDR_BITS - addr_depth (addr)))
+ & ((1ULL << width) - 1);
+}
+
+/* Convert an address to a pointer. The address must name an object
+ mapped in the machine data instruction accessible part of the
+ address space. */
+#define ADDR_TO_PTR(addr_) \
+ ({ \
+ assert (addr_prefix ((addr_)) < ((uintptr_t) -1)); \
+ assert (addr_depth ((addr_)) == ADDR_BITS); \
+ (void *) (uintptr_t) addr_prefix ((addr_)); \
+ })
+
+/* Convert a pointer to an address. */
+#define PTR_TO_ADDR(ptr_) \
+ (ADDR ((uintptr_t) (ptr_), ADDR_BITS))
+
+/* Return the address of the page that would contain pointer PTR_. */
+#define PTR_TO_PAGE(ptr_) \
+ addr_chop (ADDR ((uintptr_t) (ptr_), ADDR_BITS), PAGESIZE_LOG2)
+
+static inline addr_t
+addr_add (addr_t addr, uint64_t count)
+{
+ int w = ADDR_BITS - addr_depth (addr);
+
+ return ADDR (addr_prefix (addr) + (count << w),
+ addr_depth (addr));
+}
+
+static inline addr_t
+addr_sub (addr_t addr, uint64_t count)
+{
+ return addr_add (addr, - count);
+}
+
+#endif
diff --git a/libviengoos/viengoos/cap.h b/libviengoos/viengoos/cap.h
new file mode 100644
index 0000000..c15d66f
--- /dev/null
+++ b/libviengoos/viengoos/cap.h
@@ -0,0 +1,730 @@
+/* cap.h - Capability definitions.
+ Copyright (C) 2007, 2008 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ GNU Hurd is free software: you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation, either version 3 of the
+ License, or (at your option) any later version.
+
+ GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GNU Hurd. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _VIENGOOS_CAP_H
+#define _VIENGOOS_CAP_H 1
+
+#include <hurd/types.h>
+#include <hurd/stddef.h>
+#include <viengoos/addr.h>
+#include <viengoos/addr-trans.h>
+#include <hurd/startup.h>
+#include <hurd/error.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+/* Capabilities.
+
+ Capabilities have three functions: a capability can designate an
+ object, it can participate in address translation, and it can be
+ used to control how the designated object should be managed. */
+
+/* The types of objects designated by capabilities. */
+enum cap_type
+ {
+#define CAP_TYPE_MIN cap_void
+ cap_void,
+ cap_page,
+ cap_rpage,
+ cap_cappage,
+ cap_rcappage,
+ cap_folio,
+ cap_activity,
+ cap_activity_control,
+ cap_thread,
+ cap_messenger,
+ cap_rmessenger,
+ cap_type_count,
+#define CAP_TYPE_MAX (cap_type_count - 1)
+ };
+
+static inline const char *
+cap_type_string (enum cap_type type)
+{
+ switch (type)
+ {
+ case cap_void:
+ return "void";
+ case cap_page:
+ return "page";
+ case cap_rpage:
+ return "rpage";
+ case cap_cappage:
+ return "cappage";
+ case cap_rcappage:
+ return "rcappage";
+ case cap_folio:
+ return "folio";
+ case cap_activity:
+ return "activity";
+ case cap_activity_control:
+ return "activity_control";
+ case cap_thread:
+ return "thread";
+ case cap_messenger:
+ return "messenger";
+ case cap_rmessenger:
+ return "rmessenger";
+ default:
+ return "unknown cap type";
+ };
+}
+
+/* Return whether two types are compatible in the sense that two caps
+ with the given types can designate the same object. */
+static inline bool
+cap_types_compatible (enum cap_type a, enum cap_type b)
+{
+ if (a == b)
+ return true;
+
+ if (a == cap_page && b == cap_rpage)
+ return true;
+ if (a == cap_rpage && b == cap_page)
+ return true;
+
+ if (a == cap_cappage && b == cap_rcappage)
+ return true;
+ if (a == cap_rcappage && b == cap_cappage)
+ return true;
+
+ if (a == cap_activity && b == cap_activity_control)
+ return true;
+ if (a == cap_activity_control && b == cap_activity)
+ return true;
+
+ if (a == cap_messenger && b == cap_rmessenger)
+ return true;
+ if (a == cap_rmessenger && b == cap_messenger)
+ return true;
+
+ return false;
+}
+
+/* Returns weather TYPE corresponds to a weak type. */
+static inline bool
+cap_type_weak_p (enum cap_type type)
+{
+ switch (type)
+ {
+ case cap_rpage:
+ case cap_rcappage:
+ case cap_activity:
+ case cap_rmessenger:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/* Returns the weakened type corresponding to TYPE. If type is
+ already a weak type, returns TYPE. */
+static inline enum cap_type
+cap_type_weaken (enum cap_type type)
+{
+ switch (type)
+ {
+ case cap_page:
+ case cap_rpage:
+ return cap_rpage;
+
+ case cap_cappage:
+ case cap_rcappage:
+ return cap_rcappage;
+
+ case cap_activity_control:
+ case cap_activity:
+ return cap_activity;
+
+ case cap_messenger:
+ case cap_rmessenger:
+ return cap_rmessenger;
+
+ default:
+ return cap_void;
+ }
+}
+
+/* Returns the strong type corresponding to TYPE. If type is already
+ a strong type, returns TYPE. */
+static inline enum cap_type
+cap_type_strengthen (enum cap_type type)
+{
+ switch (type)
+ {
+ case cap_page:
+ case cap_rpage:
+ return cap_page;
+
+ case cap_cappage:
+ case cap_rcappage:
+ return cap_cappage;
+
+ case cap_activity_control:
+ case cap_activity:
+ return cap_activity_control;
+
+ case cap_messenger:
+ case cap_rmessenger:
+ return cap_messenger;
+
+ default:
+ return type;
+ }
+}
+
+/* Object policy. */
+
+/* The object priority is a signed 7-bit number (-64 -> 63). A lower
+ numeric value corresponds to a lower priority. */
+#define OBJECT_PRIORITY_BITS 7
+#define OBJECT_PRIORITY_LEVELS (1 << OBJECT_PRIORITY_BITS)
+#define OBJECT_PRIORITY_MIN (-(1 << (OBJECT_PRIORITY_BITS - 1)))
+#define OBJECT_PRIORITY_DEFAULT (0)
+#define OBJECT_PRIORITY_MAX ((1 << (OBJECT_PRIORITY_BITS - 1)) - 1)
+
+struct object_policy
+{
+ union
+ {
+ struct
+ {
+ /* Whether a page is discardable (if so and the page is not
+ zero, trying to read the page from disk generates a first
+ fault fault). */
+ int8_t discardable : 1;
+
+ /* An object's priority. If can be used to override LRU
+ eviction. When a memory object is to be evicted, we select
+ the object with the lowest priority (higher value = lower
+ priority). */
+ int8_t priority : OBJECT_PRIORITY_BITS;
+ };
+ uint8_t raw;
+ };
+};
+
+#define OBJECT_POLICY_INIT { { raw: 0 } }
+#define OBJECT_POLICY(__op_discardable, __op_priority) \
+ (struct object_policy) { { { (__op_discardable), (__op_priority) } } }
+/* The default object policy: not discardable, managed by LRU. */
+#define OBJECT_POLICY_VOID \
+ OBJECT_POLICY (false, OBJECT_PRIORITY_DEFAULT)
+/* Synonym for OBJECT_POLICY_VOID. */
+#define OBJECT_POLICY_DEFAULT OBJECT_POLICY_VOID
+
+/* Capability properties. */
+
+struct cap_properties
+{
+ struct object_policy policy;
+ struct cap_addr_trans addr_trans;
+};
+
+#define CAP_PROPERTIES_INIT \
+ { OBJECT_POLICY_INIT, CAP_ADDR_TRANS_INIT }
+#define CAP_PROPERTIES(__op_object_policy, __op_addr_trans) \
+ (struct cap_properties) { __op_object_policy, __op_addr_trans }
+#define CAP_PROPERTIES_VOID \
+ CAP_PROPERTIES (OBJECT_POLICY_INIT, CAP_ADDR_TRANS_INIT)
+#define CAP_PROPERTIES_DEFAULT CAP_PROPERTIES_VOID
+
+/* Capability representation. */
+
+#ifdef RM_INTERN
+/* An OID corresponds to a page on a volume. Only the least 54 bits
+ are significant. */
+typedef uint64_t oid_t;
+#define OID_FMT "0x%llx"
+#define OID_PRINTF(__op_oid) ((oid_t) (__op_oid))
+#endif
+
+#define CAP_VERSION_BITS 20
+#define CAP_TYPE_BITS 6
+
+struct cap
+{
+#ifdef RM_INTERN
+ /* For a description of how versioning works, refer to the comment
+ titled "Object versioning" in object.h. */
+ uint32_t version : CAP_VERSION_BITS;
+ /* Whether the capability is weak. */
+ uint32_t weak_p : 1;
+
+ /* Whether the designated object may be discarded. */
+ uint32_t discardable : 1;
+ /* The designated object's priority. */
+ int32_t priority : OBJECT_PRIORITY_BITS;
+
+ struct cap_addr_trans addr_trans;
+
+ uint64_t type : CAP_TYPE_BITS;
+
+ /* If the capability designates an object, the object id. */
+ uint64_t oid : 64 - CAP_TYPE_BITS;
+#else
+ /* The shadow object (only for cappages and folios). */
+ struct object *shadow;
+
+ uint32_t discardable : 1;
+ int32_t priority : OBJECT_PRIORITY_BITS;
+
+ uint32_t type : CAP_TYPE_BITS;
+
+ uint32_t pad0 : 32 - 1 - OBJECT_PRIORITY_BITS - CAP_TYPE_BITS;
+
+ /* This capability's address description. */
+ struct cap_addr_trans addr_trans;
+#endif
+};
+
+#define CAP_VOID ((struct cap) { .type = cap_void })
+
+/* Return CAP's policy. */
+#define CAP_POLICY_GET(__cpg_cap) \
+ OBJECT_POLICY ((__cpg_cap).discardable, (__cpg_cap).priority)
+/* Set CAP's policy to POLICY. */
+#define CAP_POLICY_SET(__cps_cap, __cps_policy) \
+ do \
+ { \
+ (__cps_cap)->discardable = (__cps_policy).discardable; \
+ (__cps_cap)->priority = (__cps_policy).priority; \
+ } \
+ while (0)
+
+/* Return CAP's properties. */
+#define CAP_PROPERTIES_GET(__cpg_cap) \
+ CAP_PROPERTIES (CAP_POLICY_GET (__cpg_cap), \
+ (__cpg_cap).addr_trans)
+/* Set *CAP's properties to PROPERTIES. */
+#define CAP_PROPERTIES_SET(__cps_cap, __cps_properties) \
+ do \
+ { \
+ CAP_POLICY_SET (__cps_cap, (__cps_properties).policy); \
+ (__cps_cap)->addr_trans = (__cps_properties).addr_trans; \
+ } \
+ while (0)
+
+/* Convenience macros for printing capabilities. */
+
+#ifdef RM_INTERN
+#define CAP_FMT "{ " OID_FMT ".%d:%s %llx/%d; %d/%d }"
+#define CAP_PRINTF(cap) \
+ OID_PRINTF ((cap)->oid), (cap)->version, cap_type_string ((cap)->type), \
+ CAP_GUARD ((cap)), CAP_GUARD_BITS ((cap)), \
+ CAP_SUBPAGE ((cap)), CAP_SUBPAGES ((cap))
+#else
+#define CAP_FMT "{ %s %llx/%d; %d/%d }"
+#define CAP_PRINTF(cap) \
+ cap_type_string ((cap)->type), \
+ CAP_GUARD ((cap)), CAP_GUARD_BITS ((cap)), \
+ CAP_SUBPAGE ((cap)), CAP_SUBPAGES ((cap))
+#endif
+
+/* Accessors corresponding to the CAP_ADDR_TRANS macros. */
+#define CAP_SUBPAGES_LOG2(cap_) \
+ CAP_ADDR_TRANS_SUBPAGES_LOG2((cap_)->addr_trans)
+#define CAP_SUBPAGES(cap_) CAP_ADDR_TRANS_SUBPAGES ((cap_)->addr_trans)
+#define CAP_SUBPAGE(cap_) CAP_ADDR_TRANS_SUBPAGE((cap_)->addr_trans)
+#define CAP_SUBPAGE_SIZE_LOG2(cap_) \
+ CAP_ADDR_TRANS_SUBPAGE_SIZE_LOG2 ((cap_)->addr_trans)
+#define CAP_SUBPAGE_SIZE(cap_) \
+ CAP_ADDR_TRANS_SUBPAGE_SIZE ((cap_)->addr_trans)
+#define CAP_SUBPAGE_OFFSET(cap_) \
+ CAP_ADDR_TRANS_SUBPAGE_OFFSET((cap_)->addr_trans)
+#define CAP_GUARD_BITS(cap_) CAP_ADDR_TRANS_GUARD_BITS((cap_)->addr_trans)
+#define CAP_GUARD(cap_) CAP_ADDR_TRANS_GUARD((cap_)->addr_trans)
+
+/* NB: Only updates the shadow guard; NOT the capability. If the
+ latter behavior is desired, use cap_copy_x instead. */
+#define CAP_SET_GUARD_SUBPAGE(cap_, guard_, gdepth_, subpage_, subpages_) \
+ ({ bool r_ = true; \
+ if ((subpages_) != 1 \
+ && ! ((cap_)->type == cap_cappage || (cap_)->type == cap_rcappage)) \
+ { \
+ debug (1, "Subpages are only allow for cappages."); \
+ r_ = false; \
+ } \
+ if (r_) \
+ r_ = CAP_ADDR_TRANS_SET_GUARD_SUBPAGE (&(cap_)->addr_trans, \
+ (guard_), (gdepth_), \
+ (subpage_), (subpages_)); \
+ r_; \
+ })
+
+#define CAP_SET_GUARD(cap_, guard_, gdepth_) \
+ CAP_SET_GUARD_SUBPAGE ((cap_), (guard_), (gdepth_), \
+ CAP_SUBPAGE ((cap_)), CAP_SUBPAGES ((cap_)))
+#define CAP_SET_SUBPAGE(cap_, subpage_, subpages_) \
+ CAP_SET_GUARD_SUBPAGE ((cap_), CAP_GUARD (cap_), CAP_GUARD_BITS (cap_), \
+ (subpage_), (subpages_))
+
+/* Capability-related methods. */
+
+#define RPC_STUB_PREFIX rm
+#define RPC_ID_PREFIX RM
+
+#include <viengoos/rpc.h>
+
+enum
+ {
+ RM_cap_copy = 300,
+ RM_cap_rubout,
+ RM_cap_read,
+
+ RM_object_discarded_clear = 400,
+ RM_object_discard,
+ RM_object_status,
+ RM_object_reply_on_destruction,
+ RM_object_name,
+ };
+
+enum
+{
+ /* Use subpage in CAP_ADDR_TRANS (must be a subset of subpage in
+ SOURCE). */
+ CAP_COPY_COPY_ADDR_TRANS_SUBPAGE = 1 << 0,
+ /* Use guard in TARGET, not the guard in CAP_ADDR_TRANS. */
+ CAP_COPY_COPY_ADDR_TRANS_GUARD = 1 << 1,
+ /* Use guard in SOURCE. */
+ CAP_COPY_COPY_SOURCE_GUARD = 1 << 2,
+
+ /* When copying the capability copies a weakened reference. */
+ CAP_COPY_WEAKEN = 1 << 3,
+
+ /* Set the discardable bit on the capability. */
+ CAP_COPY_DISCARDABLE_SET = 1 << 4,
+
+ /* Set the priority of the object. */
+ CAP_COPY_PRIORITY_SET = 1 << 5,
+};
+
+/* Copy the capability in capability slot SOURCE to the slot at ADDR
+ in the object OBJECT. If OBJECT is ADDR_VOID, then the calling
+ thread's address space root is used.
+
+ By default, preserves SOURCE's subpage specification and copies
+ TARGET's guard and policy.
+
+ If CAP_COPY_COPY_SUBPAGE is set, then uses the subpage
+ specification in CAP_PROPERTIES. If CAP_COPY_COPY_ADDR_TRANS_GUARD
+ is set, uses the guard description in CAP_PROPERTIES.
+
+ If CAP_COPY_COPY_SOURCE_GUARD is set, uses the guard description in
+ source. Otherwise, preserves the guard in TARGET.
+
+ If CAP_COPY_WEAKEN is set, saves a weakened version of SOURCE
+ (e.g., if SOURCE's type is cap_page, a cap_rpage is saved).
+
+ If CAP_COPY_DISCARDABLE_SET is set, then sets the discardable bit
+ based on the value in PROPERTIES. Otherwise, copies SOURCE's
+ value.
+
+ If CAP_COPY_PRIORITY_SET is set, then sets the priority based on
+ the value in properties. Otherwise, copies SOURCE's value. */
+RPC(cap_copy, 5, 0, 0,
+ /* cap_t activity, cap_t object, */ addr_t, addr,
+ cap_t, source_object, addr_t, source_addr,
+ uintptr_t, flags, struct cap_properties, properties)
+
+/* Overwrite the capability slot at ADDR in the object OBJECT with a
+ void capability. */
+RPC(cap_rubout, 1, 0, 0,
+ /* cap_t activity, cap_t object, */ addr_t, addr)
+
+/* Returns the public bits of the capability at address ADDR in OBJECT
+ in TYPE and CAP_PROPERTIES. */
+RPC(cap_read, 1, 2, 0,
+ /* cap_t activity, cap_t object, */ addr_t, addr,
+ /* Out: */
+ uintptr_t, type, struct cap_properties, properties)
+
+/* Clear the discarded bit of the object at ADDR in object OBJECT. */
+RPC(object_discarded_clear, 1, 0, 0,
+ /* cap_t activity, cap_t object, */ addr_t, addr)
+
+/* If the object designated by OBJECT is in memory, discard it.
+ OBJECT must have write authority. This does not set the object's
+ discarded bit and thus does not result in a fault. Instead, the
+ next access will see, e.g., zero-filled memory. */
+RPC(object_discard, 0, 0, 0
+ /* cap_t activity, cap_t object, */)
+
+enum
+{
+ object_dirty = 1 << 0,
+ object_referenced = 1 << 1,
+};
+
+/* Returns whether OBJECT is dirty. If CLEAR is set, the dirty bit is
+ clear. An object's dirty bit is set when the object is modified.
+ (Note: this is not the state of a frame but an indication of
+ whether the object has been modified since the last time it the
+ dirty bit was cleared.) */
+RPC (object_status, 1, 1, 0,
+ /* addr_t activity, addr_t object, */ bool, clear,
+ uintptr_t, status)
+
+/* Returns the object's return code in RETURN_CODE on object
+ destruction. */
+RPC (object_reply_on_destruction, 0, 1, 0,
+ /* cap_t principal, cap_t object, */
+ /* Out: */
+ uintptr_t, return_code);
+
+struct object_name
+{
+ char name[12];
+};
+
+/* Give object OBJECT a name. This is only used for debugging
+ purposes and is only supported by some objects, in particular,
+ activities and threads. */
+RPC (object_name, 1, 0, 0,
+ /* cap_t activity, cap_t object, */ struct object_name, name);
+
+
+#undef RPC_STUB_PREFIX
+#undef RPC_ID_PREFIX
+
+/* An object. */
+
+/* The number of capabilities per page. */
+enum
+ {
+ CAPPAGE_SLOTS = PAGESIZE / 16,
+ };
+/* The log2 of the number of capabilities per page. */
+enum
+ {
+ CAPPAGE_SLOTS_LOG2 = PAGESIZE_LOG2 - 4,
+ };
+
+struct object
+{
+ union
+ {
+ char data[PAGESIZE];
+ struct cap caps[CAPPAGE_SLOTS];
+ };
+};
+
+#ifdef RM_INTERN
+typedef struct activity *activity_t;
+#else
+typedef addr_t activity_t;
+#endif
+
+#ifndef RM_INTERN
+/* Return the address of cap CAP's shadow object. */
+static inline void *
+cap_get_shadow (const struct cap *cap)
+{
+ return cap->shadow;
+}
+
+/* Set CAP's shadow object to SHADOW. */
+static inline void
+cap_set_shadow (struct cap *cap, void *shadow)
+{
+ cap->shadow = shadow;
+}
+#endif
+
+/* Given cap CAP, return the corresponding object, or NULL, if there
+ is none. */
+#ifdef RM_INTERN
+extern struct object *cap_to_object (activity_t activity, struct cap *cap);
+#else
+static inline struct object *
+cap_to_object (activity_t activity, struct cap *cap)
+{
+ return cap_get_shadow (cap);
+}
+#endif
+
+/* Wrapper for the cap_copy method. Also updates shadow
+ capabilities. */
+static inline bool
+cap_copy_x (activity_t activity,
+ addr_t target_address_space, struct cap *target, addr_t target_addr,
+ addr_t source_address_space, struct cap source, addr_t source_addr,
+ int flags, struct cap_properties properties)
+{
+ /* By default, we preserve SOURCE's subpage specification. */
+ int subpage = CAP_SUBPAGE (&source);
+ int subpages = CAP_SUBPAGES (&source);
+
+ if ((flags & CAP_COPY_COPY_ADDR_TRANS_SUBPAGE))
+ /* Copy the subpage descriptor from PROPERTIES.ADDR_TRANS. */
+ {
+ if (CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans) != 1
+ && (source.type != cap_cappage
+ && source.type != cap_rcappage))
+ /* A subpage descriptor is only valid for
+ cappages. */
+ {
+ debug (1, "subpages (%d) specified for non-cappage "
+ "cap " CAP_FMT,
+ CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans),
+ CAP_PRINTF (&source));
+ return false;
+ }
+
+ if (!
+ (/* Start of PROPERTIES.ADDR_TRANS must be at or after start of
+ SOURCE. */
+ subpage * (256 / subpages)
+ <= (CAP_ADDR_TRANS_SUBPAGE (properties.addr_trans) *
+ (256 / CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans)))
+ /* End of PROPERTIES.ADDR_TRANS must be before or at end of
+ SOURCE. */
+ && (((CAP_ADDR_TRANS_SUBPAGE (properties.addr_trans) + 1) *
+ (256 / CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans)))
+ <= (subpage + 1) * (256 / subpages))))
+ /* The subpage descriptor does not narrow the
+ rights. */
+ {
+ debug (1, "specified subpage (%d/%d) not a subset "
+ " of source " CAP_FMT,
+ CAP_ADDR_TRANS_SUBPAGE (properties.addr_trans),
+ CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans),
+ CAP_PRINTF (&source));
+ return false;
+ }
+
+ subpage = CAP_ADDR_TRANS_SUBPAGE (properties.addr_trans);
+ subpages = CAP_ADDR_TRANS_SUBPAGES (properties.addr_trans);
+ }
+
+ /* By default, we preserve the guard in TARGET. */
+ int guard = CAP_GUARD (target);
+ int gbits = CAP_GUARD_BITS (target);
+
+ if ((flags & CAP_COPY_COPY_ADDR_TRANS_GUARD))
+ /* Copy guard from PROPERTIES.ADDR_TRANS. */
+ {
+ guard = CAP_ADDR_TRANS_GUARD (properties.addr_trans);
+ gbits = CAP_ADDR_TRANS_GUARD_BITS (properties.addr_trans);
+ }
+ else if ((flags & CAP_COPY_COPY_SOURCE_GUARD))
+ /* Copy guard from SOURCE. */
+ {
+ guard = CAP_GUARD (&source);
+ gbits = CAP_GUARD_BITS (&source);
+ }
+
+ int type = source.type;
+ if ((flags & CAP_COPY_WEAKEN))
+ type = cap_type_weaken (type);
+
+#ifdef RM_INTERN
+ /* Changing a capability can change how addresses are translated.
+ In this case, we need to shoot down all cached translations. */
+ bool changes_translation = false;
+
+ if (target->oid != source.oid)
+ {
+ debug (5, "OID mismatch, changes translation");
+ changes_translation = true;
+ }
+ else if (target->version != source.version)
+ {
+ debug (5, "Version mismatch, changes translation");
+ changes_translation = true;
+ }
+
+ if (subpage != CAP_SUBPAGE (target) || subpages != CAP_SUBPAGES (target))
+ {
+ debug (5, "Subpage specification differs %d/%d -> %d/%d.",
+ subpage, subpages, CAP_SUBPAGE (target), CAP_SUBPAGES (target));
+ changes_translation = true;
+ }
+
+ if (guard != CAP_GUARD (target)
+ || gbits != CAP_GUARD_BITS (target))
+ {
+ debug (5, "Guard changed invalidating translation "
+ "0x%x/%d -> %llx/%d",
+ guard, gbits, CAP_GUARD (target), CAP_GUARD_BITS (target));
+ changes_translation = true;
+ }
+
+ if (type != target->type)
+ {
+ debug (5, "Type changed, invalidating translation");
+ changes_translation = true;
+ }
+
+ if (changes_translation)
+ {
+ extern void cap_shootdown (struct activity *activity, struct cap *cap);
+
+ debug (5, "Translation changed: " CAP_FMT " -> " CAP_FMT,
+ CAP_PRINTF (target), CAP_PRINTF (&source));
+
+ cap_shootdown (activity, target);
+ }
+#endif
+
+ if (! CAP_ADDR_TRANS_SET_GUARD_SUBPAGE (&properties.addr_trans,
+ guard, gbits,
+ subpage, subpages))
+ return false;
+
+#ifndef RM_INTERN
+ assert (! ADDR_IS_VOID (target_addr));
+ assert (! ADDR_IS_VOID (source_addr));
+
+ error_t err = rm_cap_copy (activity, target_address_space, target_addr,
+ source_address_space, source_addr,
+ flags, properties);
+ assert (err == 0);
+#endif
+
+ *target = source;
+ target->addr_trans = properties.addr_trans;
+ target->type = type;
+
+ if ((flags & CAP_COPY_DISCARDABLE_SET))
+ target->discardable = properties.policy.discardable;
+
+ if ((flags & CAP_COPY_PRIORITY_SET))
+ target->priority = properties.policy.priority;
+
+ return true;
+}
+
+/* Copy the capability SOURCE to capability TARGET. Preserves
+ SOURCE's subpage specification and TARGET's guard. Copies SOURCE's
+ policy. */
+static inline bool
+cap_copy (activity_t activity,
+ addr_t target_as, struct cap *target, addr_t target_addr,
+ addr_t source_as, struct cap source, addr_t source_addr)
+{
+ return cap_copy_x (activity, target_as, target, target_addr,
+ source_as, source, source_addr,
+ CAP_COPY_DISCARDABLE_SET | CAP_COPY_PRIORITY_SET,
+ CAP_PROPERTIES_GET (source));
+}
+
+#endif
diff --git a/libviengoos/viengoos/folio.h b/libviengoos/viengoos/folio.h
new file mode 100644
index 0000000..4172e5d
--- /dev/null
+++ b/libviengoos/viengoos/folio.h
@@ -0,0 +1,441 @@
+/* folio.h - Folio definitions.
+ Copyright (C) 2007, 2008 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ GNU Hurd is free software: you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation, either version 3 of the
+ License, or (at your option) any later version.
+
+ GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GNU Hurd. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _VIENGOOS_FOLIO_H
+#define _VIENGOOS_FOLIO_H 1
+
+#include <stdint.h>
+#include <bit-array.h>
+#include <viengoos/addr.h>
+#include <viengoos/cap.h>
+
+/* Number of user objects per folio. */
+enum
+ {
+ FOLIO_OBJECTS = 128,
+ };
+enum
+ {
+ FOLIO_OBJECTS_LOG2 = 7,
+ };
+
+/* User settable folio policy. */
+
+/* The range of valid folio priorities. A lower numerical value
+ corresponds to a lower priority. */
+#define FOLIO_PRIORITY_BITS 15
+#define FOLIO_PRIORITY_MIN (-(1 << (FOLIO_PRIORITY_BITS - 1)))
+#define FOLIO_PRIORITY_LRU (0)
+#define FOLIO_PRIORITY_MAX ((1 << (FOLIO_PRIORITY_BITS - 1)) - 1)
+
+/* The folio group range. */
+#define FOLIO_GROUP_BITS 15
+#define FOLIO_GROUP_NONE 0
+#define FOLIO_GROUP_MIN 0
+#define FOLIO_GROUP_MAX ((1 << FOLIO_BITS) - 1)
+
+struct folio_policy
+{
+ union
+ {
+ struct
+ {
+ /* Whether a folio is discardable. If an activity reaches it
+ quota, rather than returning an out of memory error, the
+ system may reclaim storage with the discardable bit set. It
+ performs the equivalent of calling folio_free on the
+ folio. */
+ int32_t discardable : 1;
+
+ /* The following are only used if DISCARABLE is true. */
+
+ /* Folios can belong to a group. When one folio is discarded,
+ all folios in that group are discarded, unless GROUP is
+ FOLIO_GROUP_NONE. */
+ uint32_t group : FOLIO_GROUP_BITS;
+
+ /* By default, the system tries to discard folios according to
+ an LRU policy. This can be overridden using this field. In
+ this case, folios from the lowest priority group are
+ discarded. */
+ int32_t priority : FOLIO_PRIORITY_BITS;
+ };
+ uint32_t raw;
+ };
+};
+
+#define FOLIO_POLICY_INIT { { raw: 0 } }
+#define FOLIO_POLICY_VOID (struct folio_policy) FOLIO_POLICY_INIT
+/* The default policy is not discardable. */
+#define FOLIO_POLICY_DEFAULT FOLIO_POLICY_VOID
+
+/* The format of the first page of a folio. This page is followed (on
+ disk) by FOLIO_OBJECTS pages. */
+struct folio
+{
+#ifdef RM_INTERN
+ /* Folios are the unit of storage accounting. Every folio belongs
+ to exactly one activity. To track what folios belong to a
+ particular activity, each folio is attached to a doubly-linked
+ list originating at its owner activity. */
+ struct cap activity;
+ struct cap next;
+ struct cap prev;
+
+ /* The storage policy. */
+ struct folio_policy policy;
+
+ struct
+ {
+ /* Each object in the folio Disk version of each object. */
+ uint32_t version : CAP_VERSION_BITS;
+
+ /* Whether a page has any content (i.e., if it is not
+ uninitialized). */
+ uint32_t content : 1;
+
+ /* The object's memory policy when accessed via the folio. */
+ uint32_t discardable : 1;
+ int32_t priority : OBJECT_PRIORITY_BITS;
+ } misc[1 + FOLIO_OBJECTS];
+
+ /* The type. */
+ uint8_t types[FOLIO_OBJECTS];
+
+ /* Bit array indicating whether the an object has a non-empty wait
+ queue. */
+ uint8_t wait_queues_p[(1 + FOLIO_OBJECTS + (8 - 1)) / 8];
+
+ uint8_t discarded[(FOLIO_OBJECTS + (8 - 1)) / 8];
+
+ /* User reference and dirty bits. Optionally reset on read. Set
+ respectively when an object is referenced or modified. Flushing
+ the object to disk does not clear this. */
+ uint8_t dirty[(1 + FOLIO_OBJECTS + (8 - 1)) / 8];
+ uint8_t referenced[(1 + FOLIO_OBJECTS + (8 - 1)) / 8];
+
+ /* Head of the list of objects waiting for some event on this
+ object. An element of this array is only valid if the
+ corresponding element of WAIT_QUEUES_P is true. The list is a
+ circular list. HEAD->PREV points to the tail. TAIL->NEXT points
+ to the OBJECT (NOT HEAD). */
+ oid_t wait_queues[1 + FOLIO_OBJECTS];
+
+ uint64_t checksums[1 + FOLIO_OBJECTS][2];
+#else
+ /* User-space folio. */
+ struct cap objects[FOLIO_OBJECTS];
+#endif
+};
+
+#ifdef RM_INTERN
+typedef struct folio *folio_t;
+#else
+typedef addr_t folio_t;
+#endif
+
+/* OBJECT is from -1 to FOLIO_OBJECTS. */
+static inline enum cap_type
+folio_object_type (struct folio *folio, int object)
+{
+#ifdef RM_INTERN
+ assert (object >= -1 && object < FOLIO_OBJECTS);
+
+ if (object == -1)
+ return cap_folio;
+ return folio->types[object];
+#else
+ assert (object >= 0 && object < FOLIO_OBJECTS);
+ return folio->objects[object].type;
+#endif
+}
+
+static inline void
+folio_object_type_set (struct folio *folio, int object, enum cap_type type)
+{
+ assert (object >= 0 && object < FOLIO_OBJECTS);
+
+#ifdef RM_INTERN
+ folio->types[object] = type;
+#else
+ folio->objects[object].type = type;
+#endif
+}
+
+static inline struct object_policy
+folio_object_policy (struct folio *folio, int object)
+{
+ struct object_policy policy;
+
+#ifdef RM_INTERN
+ assert (object >= -1 && object < FOLIO_OBJECTS);
+
+ policy.discardable = folio->misc[object + 1].discardable;
+ policy.priority = folio->misc[object + 1].priority;
+#else
+ assert (object >= 0 && object < FOLIO_OBJECTS);
+
+ policy.discardable = folio->objects[object].discardable;
+ policy.priority = folio->objects[object].priority;
+#endif
+
+ return policy;
+}
+
+static inline void
+folio_object_policy_set (struct folio *folio, int object,
+ struct object_policy policy)
+{
+#ifdef RM_INTERN
+ assert (object >= -1 && object < FOLIO_OBJECTS);
+
+ folio->misc[object + 1].discardable = policy.discardable;
+ folio->misc[object + 1].priority = policy.priority;
+#else
+ assert (object >= 0 && object < FOLIO_OBJECTS);
+
+ folio->objects[object].discardable = policy.discardable;
+ folio->objects[object].priority = policy.priority;
+#endif
+}
+
+#ifdef RM_INTERN
+#include <bit-array.h>
+
+static inline bool
+folio_object_wait_queue_p (struct folio *folio, int object)
+{
+ assert (object >= -1 && object < FOLIO_OBJECTS);
+
+ return bit_test (folio->wait_queues_p, object + 1);
+}
+
+static inline void
+folio_object_wait_queue_p_set (struct folio *folio, int object,
+ bool valid)
+{
+ assert (object >= -1 && object < FOLIO_OBJECTS);
+
+ bit_set_to (folio->wait_queues_p, sizeof (folio->wait_queues_p),
+ object + 1, valid);
+}
+
+static inline oid_t
+folio_object_wait_queue (struct folio *folio, int object)
+{
+ assert (object >= -1 && object < FOLIO_OBJECTS);
+
+ return folio->wait_queues[object + 1];
+}
+
+static inline void
+folio_object_wait_queue_set (struct folio *folio, int object,
+ oid_t head)
+{
+ assert (object >= -1 && object < FOLIO_OBJECTS);
+
+ folio->wait_queues[object + 1] = head;
+}
+
+static inline uint32_t
+folio_object_version (struct folio *folio, int object)
+{
+ assert (object >= -1 && object < FOLIO_OBJECTS);
+
+ return folio->misc[object + 1].version;
+}
+
+static inline void
+folio_object_version_set (struct folio *folio, int object,
+ uint32_t version)
+{
+ assert (object >= -1 && object < FOLIO_OBJECTS);
+
+ folio->misc[object + 1].version = version;
+}
+
+static inline bool
+folio_object_content (struct folio *folio, int object)
+{
+ assert (object >= -1 && object < FOLIO_OBJECTS);
+
+ return folio->misc[object + 1].content;
+}
+
+static inline void
+folio_object_content_set (struct folio *folio, int object,
+ bool content)
+{
+ assert (object >= -1 && object < FOLIO_OBJECTS);
+
+ folio->misc[object + 1].content = content;
+}
+
+static inline bool
+folio_object_discarded (struct folio *folio, int object)
+{
+ assert (object >= 0 && object < FOLIO_OBJECTS);
+
+ return bit_test (folio->discarded, object);
+}
+
+static inline void
+folio_object_discarded_set (struct folio *folio, int object, bool valid)
+{
+ assert (object >= 0 && object < FOLIO_OBJECTS);
+
+ bit_set_to (folio->discarded, sizeof (folio->discarded),
+ object, valid);
+}
+
+static inline bool
+folio_object_referenced (struct folio *folio, int object)
+{
+ assert (object >= -1 && object < FOLIO_OBJECTS);
+
+ return bit_test (folio->referenced, object + 1);
+}
+
+static inline void
+folio_object_referenced_set (struct folio *folio, int object, bool p)
+{
+ assert (object >= -1 && object < FOLIO_OBJECTS);
+
+ bit_set_to (folio->referenced, sizeof (folio->referenced), object + 1, p);
+}
+
+static inline bool
+folio_object_dirty (struct folio *folio, int object)
+{
+ assert (object >= -1 && object < FOLIO_OBJECTS);
+
+ return bit_test (folio->dirty, object + 1);
+}
+
+static inline void
+folio_object_dirty_set (struct folio *folio, int object, bool p)
+{
+ assert (object >= -1 && object < FOLIO_OBJECTS);
+
+ bit_set_to (folio->dirty, sizeof (folio->dirty), object + 1, p);
+}
+#endif /* RM_INTERN */
+
+/* Return a cap designating folio FOLIO's OBJECT'th object. */
+#ifdef RM_INTERN
+/* This needs to be a macro as we use object_to_object_desc which is
+ made available by object.h but object.h includes this file. */
+#define folio_object_cap(__foc_folio, __foc_object) \
+ ({ \
+ struct cap __foc_cap; \
+ \
+ __foc_cap.type = folio_object_type (__foc_folio, __foc_object); \
+ __foc_cap.version = folio_object_version (__foc_folio, \
+ __foc_object); \
+ \
+ struct cap_properties __foc_cap_properties \
+ = CAP_PROPERTIES (folio_object_policy (__foc_folio, __foc_object), \
+ CAP_ADDR_TRANS_VOID); \
+ CAP_PROPERTIES_SET (&__foc_cap, __foc_cap_properties); \
+ \
+ __foc_cap.oid \
+ = object_to_object_desc ((struct object *) __foc_folio)->oid \
+ + 1 + __foc_object; \
+ \
+ __foc_cap; \
+ })
+#else
+static inline struct cap
+folio_object_cap (struct folio *folio, int object)
+{
+ assert (0 <= object && object < FOLIO_OBJECTS);
+ return folio->objects[object];
+}
+#endif
+
+#define RPC_STUB_PREFIX rm
+#define RPC_ID_PREFIX RM
+
+#include <viengoos/rpc.h>
+
+enum
+ {
+ RM_folio_alloc = 200,
+ RM_folio_free,
+ RM_folio_object_alloc,
+ RM_folio_policy
+ };
+
+/* Allocate a folio against ACTIVITY. Return a capability in the
+ caller's cspace in slot FOLIO. POLICY specifies the storage
+ policy. */
+RPC(folio_alloc, 1, 0, 1,
+ /* cap_t, principal, cap_t, activity, */
+ struct folio_policy, policy, cap_t, folio)
+
+/* Free the folio designated by FOLIO. */
+RPC(folio_free, 0, 0, 0
+ /* cap_t, principal, cap_t, folio */)
+
+/* Destroys the INDEXth object in folio FOLIO and allocate in its
+ place an object of tye TYPE. If TYPE is CAP_VOID, any existing
+ object is destroyed, however, no object is instantiated in its
+ place. POLICY specifies the object's policy when accessed via the
+ folio. If an object is destroyed and there are waiters, they are
+ passed the return code RETURN_CODE.
+
+ Returns a capability to the allocated object in OBJECT. Returns a
+ weak capability to the object in OBJECT_WEAK. */
+RPC(folio_object_alloc, 4, 0, 2,
+ /* cap_t, principal, cap_t, folio, */
+ uintptr_t, index, uintptr_t, type,
+ struct object_policy, policy, uintptr_t, return_code,
+ /* Out: */
+ cap_t, object, cap_t, object_weak)
+
+/* Flags for folio_policy. */
+enum
+{
+ FOLIO_POLICY_DELIVER = 1 << 0,
+
+ FOLIO_POLICY_DISCARDABLE_SET = 1 << 1,
+ FOLIO_POLICY_GROUP_SET = 1 << 2,
+ FOLIO_POLICY_PRIORITY_SET = 1 << 3,
+
+ FOLIO_POLICY_SET = (FOLIO_POLICY_DISCARDABLE_SET
+ | FOLIO_POLICY_GROUP_SET
+ | FOLIO_POLICY_PRIORITY_SET)
+};
+
+/* Get and set the management policy for folio FOLIO.
+
+ If FOLIO_POLICY_DELIVER is set in FLAGS, then return FOLIO's
+ current paging policy in OLD. Then, if any of the set flags are
+ set, set the corresponding values based on the value of POLICY. */
+RPC(folio_policy, 2, 1, 0,
+ /* cap_t, principal, cap_t, folio, */
+ uintptr_t, flags, struct folio_policy, policy,
+ /* Out: */
+ struct folio_policy, old)
+
+#undef RPC_STUB_PREFIX
+#undef RPC_ID_PREFIX
+
+#endif
diff --git a/libviengoos/viengoos/futex.h b/libviengoos/viengoos/futex.h
new file mode 100644
index 0000000..3f77b6d
--- /dev/null
+++ b/libviengoos/viengoos/futex.h
@@ -0,0 +1,219 @@
+/* futex.h - Futex definitions.
+ Copyright (C) 2008 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ GNU Hurd is free software: you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU Hurd. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _HURD_FUTEX_H
+#define _HURD_FUTEX_H 1
+
+#include <viengoos/addr.h>
+#include <hurd/startup.h>
+#include <hurd/error.h>
+#include <stdbool.h>
+#define __need_timespec
+#include <time.h>
+
+/* The interface to the kernel futex implementation. This is only
+ here because glibc really wants futexes. If this project gets
+ sufficient momentum, the kernel futex implementation should be
+ replaced with a more microkernel friendly approach to locks. */
+
+enum
+ {
+ RM_futex = 800,
+ };
+
+#define RPC_STUB_PREFIX rm
+#define RPC_ID_PREFIX RM
+
+#include <viengoos/rpc.h>
+
+/* Operations. */
+enum
+ {
+ FUTEX_WAIT,
+ FUTEX_WAKE,
+ FUTEX_WAKE_OP,
+ FUTEX_CMP_REQUEUE,
+#if 0
+ /* We don't support these operations. The first is deprecated and
+ the second requires FDs which the kernel doesn't support.
+ Although we could return EOPNOTSUPP, commenting them out
+ catches any uses at compile-time. */
+ FUTEX_REQUEUE,
+ FUTEX_FD,
+#endif
+ };
+
+enum
+ {
+ FUTEX_OP_SET = 0,
+ FUTEX_OP_ADD = 1,
+ FUTEX_OP_OR = 2,
+ FUTEX_OP_ANDN = 3,
+ FUTEX_OP_XOR = 4
+ };
+
+enum
+ {
+ FUTEX_OP_CMP_EQ = 0,
+ FUTEX_OP_CMP_NE = 1,
+ FUTEX_OP_CMP_LT = 2,
+ FUTEX_OP_CMP_LE = 3,
+ FUTEX_OP_CMP_GT = 4,
+ FUTEX_OP_CMP_GE = 5
+ };
+
+union futex_val2
+{
+ struct timespec timespec;
+ int value;
+};
+
+union futex_val3
+{
+ int value;
+ struct
+ {
+ int cmparg: 12;
+ int oparg: 12;
+ int cmp: 4;
+ int op: 4;
+ };
+};
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE \
+ (union futex_val3) { { 1, 0, FUTEX_OP_CMP_GT, FUTEX_OP_SET } }
+
+RPC (futex, 7, 1, 0,
+ /* cap_t principal, cap_t thread, */
+ void *, addr1, int, op, int, val1,
+ bool, timeout, union futex_val2, val2,
+ void *, addr2, union futex_val3, val3,
+ /* Out: */
+ long, out);
+
+#undef RPC_STUB_PREFIX
+#undef RPC_ID_PREFIX
+
+#ifndef RM_INTERN
+#include <errno.h>
+
+struct futex_return
+{
+ error_t err;
+ long ret;
+};
+
+static inline struct futex_return
+__attribute__((always_inline))
+futex_using (struct hurd_message_buffer *mb,
+ void *addr1, int op, int val1, struct timespec *timespec,
+ void *addr2, int val3)
+{
+ union futex_val2 val2;
+ if (timespec)
+ val2.timespec = *timespec;
+ else
+ __builtin_memset (&val2, 0, sizeof (val2));
+
+ error_t err;
+ long ret = 0; /* Elide gcc warning. */
+ if (mb)
+ err = rm_futex_using (mb,
+ ADDR_VOID, ADDR_VOID,
+ addr1, op, val1, !! timespec, val2, addr2,
+ (union futex_val3) val3, &ret);
+ else
+ err = rm_futex (ADDR_VOID, ADDR_VOID,
+ addr1, op, val1, !! timespec, val2, addr2,
+ (union futex_val3) val3, &ret);
+ return (struct futex_return) { err, ret };
+}
+
+/* Standard futex signatures. See futex documentation, e.g., Futexes
+ are Tricky by Ulrich Drepper. */
+static inline struct futex_return
+__attribute__((always_inline))
+futex (void *addr1, int op, int val1, struct timespec *timespec,
+ void *addr2, int val3)
+{
+ return futex_using (NULL, addr1, op, val1, timespec, addr2, val3);
+}
+
+
+/* If *F is VAL, wait until woken. */
+static inline long
+__attribute__((always_inline))
+futex_wait_using (struct hurd_message_buffer *mb, int *f, int val)
+{
+ struct futex_return ret;
+ ret = futex_using (mb, f, FUTEX_WAIT, val, NULL, 0, 0);
+ if (ret.err)
+ {
+ errno = ret.err;
+ return -1;
+ }
+ return ret.ret;
+}
+
+static inline long
+__attribute__((always_inline))
+futex_wait (int *f, int val)
+{
+ return futex_wait_using (NULL, f, val);
+}
+
+
+/* If *F is VAL, wait until woken. */
+static inline long
+__attribute__((always_inline))
+futex_timed_wait (int *f, int val, struct timespec *timespec)
+{
+ struct futex_return ret;
+ ret = futex (f, FUTEX_WAIT, val, timespec, 0, 0);
+ if (ret.err)
+ {
+ errno = ret.err;
+ return -1;
+ }
+ return ret.ret;
+}
+
+
+/* Signal NWAKE waiters waiting on futex F. */
+static inline long
+__attribute__((always_inline))
+futex_wake_using (struct hurd_message_buffer *mb, int *f, int nwake)
+{
+ struct futex_return ret;
+ ret = futex_using (mb, f, FUTEX_WAKE, nwake, NULL, 0, 0);
+ if (ret.err)
+ {
+ errno = ret.err;
+ return -1;
+ }
+ return ret.ret;
+}
+
+static inline long
+__attribute__((always_inline))
+futex_wake (int *f, int nwake)
+{
+ return futex_wake_using (NULL, f, nwake);
+}
+#endif /* !RM_INTERN */
+
+#endif
diff --git a/libviengoos/viengoos/ipc.h b/libviengoos/viengoos/ipc.h
new file mode 100644
index 0000000..67c2bad
--- /dev/null
+++ b/libviengoos/viengoos/ipc.h
@@ -0,0 +1,297 @@
+/* ipc.h - Interprocess communication interface.
+ Copyright (C) 2008 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ GNU Hurd is free software: you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU Hurd. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _VIENGOOS_IPC_H
+#define _VIENGOOS_IPC_H 1
+
+#include <stdint.h>
+#include <errno.h>
+#include <viengoos/addr.h>
+#include <hurd/stddef.h>
+#include <viengoos/message.h>
+#include <assert.h>
+#include <hurd/startup.h>
+
+#ifdef USE_L4
+#include <l4.h>
+#endif
+
+/* IPC flags. */
+enum
+ {
+ /* IPC includes a receive phase. */
+ VG_IPC_RECEIVE = 1 << 0,
+ /* Don't unblock the receive buffer if there is no message queued
+ for delivery. */
+ VG_IPC_RECEIVE_NONBLOCKING = 1 << 1,
+ /* Activate the thread on message receipt. */
+ VG_IPC_RECEIVE_ACTIVATE = 1 << 2,
+ /* Set the receive messenger's thread to the caller. */
+ VG_IPC_RECEIVE_SET_THREAD_TO_CALLER = 1 << 3,
+ /* Set the receive messener's address space root to the
+ caller's. */
+ VG_IPC_RECEIVE_SET_ASROOT_TO_CALLERS = 1 << 4,
+ /* Whether to receive the message inline. */
+ VG_IPC_RECEIVE_INLINE = 1 << 5,
+ /* Whether to receive any capabilities inline when receiving a
+ message inline (i.e., when VG_IPC_RECEIVE_INLINE is set). */
+ VG_IPC_RECEIVE_INLINE_CAP1 = 1 << 6,
+
+ /* IPC includes a send phase. */
+ VG_IPC_SEND = 1 << 7,
+ /* If the object is blocked, return EWOULDBLOCK. */
+ VG_IPC_SEND_NONBLOCKING = 1 << 8,
+ /* Activate the thread on message transfer. */
+ VG_IPC_SEND_ACTIVATE = 1 << 9,
+ /* Set the send messenger's thread to the caller. */
+ VG_IPC_SEND_SET_THREAD_TO_CALLER = 1 << 10,
+ /* Set the sender messener's address space root to the
+ caller's. */
+ VG_IPC_SEND_SET_ASROOT_TO_CALLERS = 1 << 11,
+ /* Whether to send the message inline. */
+ VG_IPC_SEND_INLINE = 1 << 12,
+
+ /* Which inline data to transfer when sending a message. Inline
+ data is ignored if the send buffer is not ADDR_VOID. */
+ VG_IPC_SEND_INLINE_WORD1 = 1 << 13,
+ VG_IPC_SEND_INLINE_WORD2 = 1 << 14,
+ VG_IPC_SEND_INLINE_CAP1 = 1 << 15,
+
+
+ /* The IPC includes a return phase. */
+ VG_IPC_RETURN = 1 << 16,
+
+ };
+
+#ifndef RM_INTERN
+/* An IPC consists of three phases: the receive phase, the send phase
+ and the return phase. All three phases are optional. Each phase
+ is executed after the previous phase has completed. If a phase
+ does not complete successfully, the phase is aborted and the
+ remaining phases are not executed.
+
+
+ RECEIVE PHASE
+
+ If FLAGS contains VG_IPC_RECEIVE, the IPC includes a receive phase.
+
+ If RECV_BUF is not ADDR_VOID, associates RECV_BUF with
+ RECV_MESSENGER.
+
+ If FLAGS contains VG_IPC_RECEIVE_NONBLOCKING:
+
+ Unblocks RECV_MESSENGER if RECV_MESSENGER has a messenger waiting
+ to deliver a message. Otherwise, returns EWOUDBLOCK.
+
+ Otherwise:
+
+ Unblocks RECV_MESSENGER.
+
+ Resources are charged to RECV_ACTIVITY.
+
+ If VG_IPC_RECEIVE_ACTIVATE is set, an activation is sent to the
+ thread associated with RECV_MESSENGER when RECV_MESSENGER receives
+ a message.
+
+
+ SEND PHASE
+
+ If FLAGS contains VG_IPC_SEND, the IPC includes a send phase.
+
+ If SEND_MESSENGER is ADDR_VOID, an implicit messenger is allocated
+ and VG_IPC_SEND_NONBLOCKING is assumed to be on.
+
+ If SEND_BUF is not ADDR_VOID, assocaiates SEND_BUF with
+ SEND_MESSENGER. Otherwise, associates inline data (INLINE_WORD1,
+ INLINE_WORD2 and INLINE_CAP) according to the inline flags with
+ SEND_MESSENGER.
+
+ If FLAGS contains VG_IPC_SEND_NONBLOCKING:
+
+ If TARGET_MESSENGER is blocked, returns ETIMEDOUT.
+
+ Otherwise:
+
+ Blocks SEND_MESSENGER and enqueues it on TARGET_MESSENGER.
+
+ When TARGET_MESSENGER becomes unblocked, SEND_MESSENGER delivers
+ its message to TARGET_MESSENGER.
+
+ Resources are charged to SEND_ACTIVITY.
+
+ If VG_IPC_SEND_ACTIVATE is set, an activation is sent to the thread
+ associated with SEND_MESSENGER when SEND_MESSENGER's message is
+ transferred to TARGET_MESSENGER (or, when TARGET_MESSENGER is
+ destroyed).
+
+
+ RETURN PHASE
+
+ If FLAGS contains VG_IPC_RETURN, the IPC returns. Otherwise, the
+ calling thread is suspended until it is next activated. */
+static inline error_t
+vg_ipc_full (uintptr_t flags,
+ addr_t recv_activity, addr_t recv_messenger, addr_t recv_buf,
+ addr_t recv_inline_cap,
+ addr_t send_activity, addr_t target_messenger,
+ addr_t send_messenger, addr_t send_buf,
+ uintptr_t send_inline_word1, uintptr_t send_inline_word2,
+ addr_t send_inline_cap)
+{
+ error_t err = 0;
+
+#ifdef USE_L4
+ l4_msg_tag_t tag = l4_niltag;
+ l4_msg_tag_set_label (&tag, 8194);
+
+ l4_msg_t msg;
+ l4_msg_clear (msg);
+ l4_msg_set_msg_tag (msg, tag);
+
+ void msg_append_addr (addr_t addr)
+ {
+ int i;
+ for (i = 0; i < sizeof (addr_t) / sizeof (uintptr_t); i ++)
+ l4_msg_append_word (msg, ((uintptr_t *) &addr)[i]);
+ }
+
+ l4_msg_append_word (msg, flags);
+
+ msg_append_addr (recv_activity);
+ msg_append_addr (recv_messenger);
+ msg_append_addr (recv_buf);
+ msg_append_addr (recv_inline_cap);
+
+ msg_append_addr (send_activity);
+ msg_append_addr (target_messenger);
+
+ msg_append_addr (send_messenger);
+ msg_append_addr (send_buf);
+
+ l4_msg_append_word (msg, send_inline_word1);
+ l4_msg_append_word (msg, send_inline_word2);
+ msg_append_addr (send_inline_cap);
+
+ l4_msg_load (msg);
+ l4_accept (l4_map_grant_items (L4_COMPLETE_ADDRESS_SPACE));
+
+ bool call = true;
+
+ while (1)
+ {
+ extern struct hurd_startup_data *__hurd_startup_data;
+
+ if (call)
+ tag = l4_call (__hurd_startup_data->rm);
+ else
+ tag = l4_receive (__hurd_startup_data->rm);
+
+ if (likely (l4_ipc_failed (tag)))
+ {
+ if (((l4_error_code () >> 1) & 0x7) == 3)
+ {
+ if (l4_error_code () & 1)
+ /* IPC was interrupted in the receive phase, i.e., we
+ got a response. */
+ break;
+ else
+ call = false;
+ }
+ else
+ return EHOSTDOWN;
+ }
+ else
+ {
+ assert (l4_untyped_words (tag) == 1);
+ l4_msg_store (tag, msg);
+ /* Potential error performing IPC (or VG_RETURN specified). */
+ err = l4_msg_word (msg, 1);
+ break;
+ }
+ }
+#else
+# warning vg_ipc not ported to this architecture.
+#endif
+
+ return err;
+}
+
+static inline error_t
+vg_ipc (uintptr_t flags,
+ addr_t recv_activity, addr_t recv_messenger, addr_t recv_buf,
+ addr_t send_activity, addr_t target_messenger,
+ addr_t send_messenger, addr_t send_buf)
+{
+ return vg_ipc_full (flags,
+ recv_activity, recv_messenger, recv_buf, ADDR_VOID,
+ send_activity, target_messenger,
+ send_messenger, send_buf,
+ 0, 0, ADDR_VOID);
+}
+
+static inline error_t
+vg_ipc_short (uintptr_t flags,
+ addr_t recv_activity, addr_t recv_messenger, addr_t recv_cap,
+ addr_t send_activity, addr_t target_messenger,
+ addr_t send_messenger,
+ uintptr_t inline_word1, uintptr_t inline_word2,
+ addr_t inline_cap)
+{
+ return vg_ipc_full (flags,
+ recv_activity, recv_messenger, ADDR_VOID, recv_cap,
+ send_activity, target_messenger,
+ send_messenger, ADDR_VOID,
+ inline_word1, inline_word2, inline_cap);
+}
+
+static inline error_t
+vg_send (uintptr_t flags, addr_t send_activity, addr_t target_messenger,
+ addr_t send_messenger, addr_t send_buf)
+{
+ return vg_ipc_full (flags | VG_IPC_SEND | VG_IPC_SEND_ACTIVATE,
+ ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ send_activity, target_messenger,
+ send_messenger, send_buf,
+ 0, 0, ADDR_VOID);
+}
+
+static inline error_t
+vg_reply (uintptr_t flags, addr_t send_activity, addr_t target_messenger,
+ addr_t send_messenger, addr_t send_buf)
+{
+ return vg_ipc_full (flags | VG_IPC_SEND | VG_IPC_SEND_NONBLOCKING,
+ ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ send_activity, target_messenger, send_messenger, send_buf,
+ 0, 0, ADDR_VOID);
+}
+
+/* Suspend the caller until the next activation. */
+static inline error_t
+vg_suspend (void)
+{
+ return vg_ipc_full (0,
+ ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ 0, 0, ADDR_VOID);
+}
+
+#endif
+
+#endif
diff --git a/libviengoos/viengoos/message.h b/libviengoos/viengoos/message.h
new file mode 100644
index 0000000..bff1e9a
--- /dev/null
+++ b/libviengoos/viengoos/message.h
@@ -0,0 +1,229 @@
+/* message.h - Message buffer definitions.
+ Copyright (C) 2008 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ GNU Hurd is free software: you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU Hurd. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _VIENGOOS_MESSAGE_H
+#define _VIENGOOS_MESSAGE_H 1
+
+#include <stdint.h>
+#include <assert.h>
+#include <viengoos/addr.h>
+#include <hurd/stddef.h>
+
+/* A message.
+
+ When handing a message structure to a messenger, it must start at
+ the beginning of a page and it cannot extend past the end of that
+ page. */
+struct vg_message
+{
+ union
+ {
+ struct
+ {
+ /* The number of capability addresses in the message. */
+ uint16_t cap_count;
+ /* The number of bytes of data transferred in this message. */
+ uint16_t data_count;
+
+ addr_t caps[/* cap_count */];
+ // char data[data_count];
+ };
+
+ char raw[PAGESIZE];
+ };
+};
+
+
+/* Clear the msg so that it references no capabilities and
+ contains no data. */
+static inline void
+vg_message_clear (struct vg_message *msg)
+{
+ msg->cap_count = 0;
+ msg->data_count = 0;
+}
+
+
+/* Return the number of capabilities referenced by MSG. */
+static inline int
+vg_message_cap_count (struct vg_message *msg)
+{
+ int max = (PAGESIZE - __builtin_offsetof (struct vg_message, caps))
+ / sizeof (addr_t);
+
+ int count = msg->cap_count;
+ if (count > max)
+ count = max;
+
+ return count;
+}
+
+/* Return the number of bytes of data in MSG. */
+static inline int
+vg_message_data_count (struct vg_message *msg)
+{
+ int max = PAGESIZE
+ - vg_message_cap_count (msg) * sizeof (addr_t)
+ - __builtin_offsetof (struct vg_message, caps);
+
+ int count = msg->data_count;
+ if (count > max)
+ count = max;
+
+ return count;
+}
+
+
+/* Return the start of the capability address array in msg MSG. */
+static inline addr_t *
+vg_message_caps (struct vg_message *msg)
+{
+ return msg->caps;
+}
+
+/* Return capability IDX in msg MSG. */
+static inline addr_t
+vg_message_cap (struct vg_message *msg, int idx)
+{
+ assert (idx < msg->cap_count);
+
+ return msg->caps[idx];
+}
+
+
+/* Return the start of the data in msg MSG. */
+static inline char *
+vg_message_data (struct vg_message *msg)
+{
+ return (void *) msg
+ + __builtin_offsetof (struct vg_message, caps)
+ + msg->cap_count * sizeof (addr_t);
+}
+
+/* Return data word WORD in msg MSG. */
+static inline uintptr_t
+vg_message_word (struct vg_message *msg, int word)
+{
+ assert (word < msg->data_count / sizeof (uintptr_t));
+
+ return ((uintptr_t *) vg_message_data (msg))[word];
+}
+
+
+/* Append the array of capability addresses CAPS to the msg MSG.
+ There must be sufficient room in the message buffer. */
+static inline void
+vg_message_append_caps (struct vg_message *msg, int cap_count, addr_t *caps)
+{
+ assert ((void *) vg_message_data (msg) - (void *) msg
+ + vg_message_data_count (msg) + cap_count * sizeof (*caps)
+ <= PAGESIZE);
+
+ __builtin_memmove (&msg->caps[msg->cap_count + cap_count],
+ &msg->caps[msg->cap_count],
+ msg->data_count);
+
+ __builtin_memcpy (&msg->caps[msg->cap_count],
+ caps,
+ cap_count * sizeof (addr_t));
+
+ msg->cap_count += cap_count;
+}
+
+/* Append the capability address CAP to the msg MSG. There must be
+ sufficient room in the message buffer. */
+static inline void
+vg_message_append_cap (struct vg_message *msg, addr_t cap)
+{
+ vg_message_append_caps (msg, 1, &cap);
+}
+
+
+/* Append DATA to the msg MSG. There must be sufficient room in the
+ message buffer. */
+static inline void
+vg_message_append_data (struct vg_message *msg, int bytes, char *data)
+{
+ int dstart = __builtin_offsetof (struct vg_message, caps)
+ + msg->cap_count * sizeof (addr_t);
+ int dend = dstart + msg->data_count;
+
+ int new_dend = dend + bytes;
+ assert (new_dend <= PAGESIZE);
+
+ msg->data_count += bytes;
+ __builtin_memcpy ((void *) msg + dend, data, bytes);
+}
+
+/* Append the word WORD to the msg MSG. There must be
+ sufficient room in the message buffer. */
+static inline void
+vg_message_append_word (struct vg_message *msg, uintptr_t word)
+{
+ vg_message_append_data (msg, sizeof (word), (char *) &word);
+}
+
+/* Return data word WORD in msg MSG. */
+static inline void
+vg_message_word_set (struct vg_message *msg, int pos, uintptr_t word)
+{
+ if (msg->data_count < pos * sizeof (uintptr_t))
+ msg->data_count = pos * sizeof (uintptr_t);
+
+ ((uintptr_t *) vg_message_data (msg))[pos] = word;
+}
+
+#include <s-printf.h>
+
+static inline void
+vg_message_dump (struct vg_message *message)
+{
+ s_printf ("%d bytes, %d caps\n",
+ vg_message_data_count (message),
+ vg_message_cap_count (message));
+
+ char d2h[] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ 'A', 'B', 'C', 'D', 'E', 'F' };
+ unsigned char *data = vg_message_data (message);
+
+ int i = 0;
+ while (i < vg_message_data_count (message))
+ {
+ s_printf ("%d: ", i);
+
+ int j, k;
+ for (j = 0, k = 0;
+ i < vg_message_data_count (message) && j < 4 * 8;
+ j ++, i ++)
+ {
+ s_printf ("%c%c", d2h[data[i] >> 4], d2h[data[i] & 0xf]);
+ if (j % 4 == 3)
+ s_printf (" ");
+ }
+ s_printf ("\n");
+ }
+
+ for (i = 0; i < vg_message_cap_count (message); i ++)
+ s_printf ("cap %d: " ADDR_FMT "\n",
+ i, ADDR_PRINTF (vg_message_cap (message, i)));
+}
+
+
+#endif /* _VIENGOOS_MESSAGE_H */
diff --git a/libviengoos/viengoos/messenger.h b/libviengoos/viengoos/messenger.h
new file mode 100644
index 0000000..fbdc5ff
--- /dev/null
+++ b/libviengoos/viengoos/messenger.h
@@ -0,0 +1,87 @@
+/* messenger.h - Messenger buffer definitions.
+ Copyright (C) 2008 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ GNU Hurd is free software: you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU Hurd. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _VIENGOOS_MESSENGER_H
+#define _VIENGOOS_MESSENGER_H 1
+
+#include <stdint.h>
+#include <viengoos/addr.h>
+
+/* A messenger references a message buffer. It can transfer a message
+ (contained in its message buffer) to another messenger. It can
+ also receive a message from another messenger. A messenger can
+ block waiting to deliver a message to or receive a message from
+ another messenger.
+
+ To send a message, a payload is loaded into a message buffer and
+ associated with a messenger. The messenger is then enqueued on
+ another messenger. When the latter messenger is unblocked, the
+ message is delivered.
+
+ To avoid messages from being overwritten, messengers are blocked on
+ message delivery and must be explicitly unblocked before another
+ message is sent. */
+#ifdef RM_INTERN
+struct messenger;
+typedef struct messenger *vg_messenger_t;
+#else
+typedef addr_t vg_messenger_t;
+#endif
+
+#define VG_MESSENGER_INLINE_WORDS 2
+#define VG_MESSENGER_INLINE_CAPS 1
+
+/* Number of user-settable capability slots at the start of the
+ messenger structure. */
+enum
+ {
+ /* The thread to activate. */
+ VG_MESSENGER_THREAD_SLOT = 0,
+ /* The address space root relative to which all capability
+ addresses in the message buffer will be resolved. */
+ VG_MESSENGER_ASROOT_SLOT,
+ /* The assocaited message buffer. */
+ VG_MESSENGER_BUFFER_SLOT,
+ /* The activity that was delivered with the last message. */
+ VG_MESSENGER_ACTIVITY_SLOT,
+
+ VG_MESSENGER_SLOTS = 4,
+ };
+#define VG_MESSENGER_SLOTS_LOG2 2
+
+enum
+ {
+ VG_messenger_id = 900,
+ };
+
+#define RPC_STUB_PREFIX vg
+#define RPC_ID_PREFIX VG
+
+#include <viengoos/rpc.h>
+
+/* Set MESSENGER's ID to ID and return the old ID in OLD. */
+RPC(messenger_id, 1, 1, 0,
+ /* cap_t activity, cap_t messenger, */
+ uint64_t, id, uint64_t, old)
+
+#undef RPC_STUB_PREFIX vg
+#undef RPC_ID_PREFIX VG
+
+#endif /* _VIENGOOS_MESSENGER_H */
diff --git a/libviengoos/viengoos/misc.h b/libviengoos/viengoos/misc.h
new file mode 100644
index 0000000..e367c19
--- /dev/null
+++ b/libviengoos/viengoos/misc.h
@@ -0,0 +1,128 @@
+/* rm.h - Resource manager interface.
+ Copyright (C) 2007, 2008 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ The GNU Hurd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _VIENGOOS_MISC_H
+#define _VIENGOOS_MISC_H
+
+#include <hurd/startup.h>
+#include <viengoos/folio.h>
+#include <hurd/exceptions.h>
+#include <viengoos/thread.h>
+#include <viengoos/activity.h>
+#include <viengoos/futex.h>
+#include <l4/message.h>
+
+enum rm_method_id
+ {
+ RM_write = 100,
+ RM_read,
+ RM_as_dump,
+ RM_fault,
+ };
+
+static inline const char *
+rm_method_id_string (int id)
+{
+ switch (id)
+ {
+ case RM_write:
+ return "write";
+ case RM_read:
+ return "read";
+ case RM_as_dump:
+ return "as_dump";
+ case RM_fault:
+ return "fault";
+ case RM_folio_alloc:
+ return "folio_alloc";
+ case RM_folio_free:
+ return "folio_free";
+ case RM_folio_object_alloc:
+ return "folio_object_alloc";
+ case RM_folio_policy:
+ return "folio_policy";
+ case RM_cap_copy:
+ return "cap_copy";
+ case RM_cap_rubout:
+ return "cap_rubout";
+ case RM_cap_read:
+ return "cap_read";
+ case RM_object_discarded_clear:
+ return "object_discarded_clear";
+ case RM_object_discard:
+ return "object_discard";
+ case RM_object_status:
+ return "object_status";
+ case RM_object_reply_on_destruction:
+ return "object_reply_on_destruction";
+ case RM_object_name:
+ return "object_name";
+ case RM_thread_exregs:
+ return "thread_exregs";
+ case RM_thread_id:
+ return "thread_id";
+ case RM_thread_activation_collect:
+ return "thread_activation_collect";
+ case RM_activity_policy:
+ return "activity_policy";
+ case RM_activity_info:
+ return "activity_info";
+ case RM_futex:
+ return "futex";
+ default:
+ return "unknown method id";
+ }
+}
+
+#define RPC_STUB_PREFIX rm
+#define RPC_ID_PREFIX RM
+
+#include <viengoos/rpc.h>
+
+struct io_buffer
+{
+ /* The length. */
+ unsigned char len;
+ char data[(L4_NUM_BRS - 2) * sizeof (uintptr_t)];
+};
+
+/* Echo the character CHR on the manager console. */
+RPC(write, 1, 0, 0, struct io_buffer, io)
+
+/* Read up to MAX characters from the console's input device. */
+RPC(read, 1, 1, 0,
+ int, max, struct io_buffer, io)
+
+/* Dump the address space rooted at ROOT. */
+RPC(as_dump, 0, 0, 0,
+ /* cap_t, principal, cap_t, object */)
+
+/* Fault up to COUNT pages starting at START. Returns the number
+ actually faulted in OCOUNT. */
+RPC(fault, 2, 1, 0,
+ /* cap_t, principal, cap_t thread, */
+ uintptr_t, start, int, count,
+ /* Out: */
+ int, ocount)
+
+#undef RPC_STUB_PREFIX
+#undef RPC_ID_PREFIX
+
+#endif
diff --git a/libviengoos/viengoos/rpc.h b/libviengoos/viengoos/rpc.h
new file mode 100644
index 0000000..14feddd
--- /dev/null
+++ b/libviengoos/viengoos/rpc.h
@@ -0,0 +1,1054 @@
+/* rpc.h - RPC template definitions.
+ Copyright (C) 2007, 2008 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ This file is part of the GNU Hurd.
+
+ GNU Hurd is free software: you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation, either version 3 of the
+ License, or (at your option) any later version.
+
+ GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GNU Hurd. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define RPC_CONCAT2(a,b) a##b
+#define RPC_CONCAT(a,b) RPC_CONCAT2(a,b)
+
+/* If RPC_STUB_PREFIX is defined, the prefix prepended plus an
+ underscore to all function names. If using, don't forget to #undef
+ after all uses to avoid potential redefinition errors. */
+#undef RPC_STUB_PREFIX_
+#ifndef RPC_STUB_PREFIX
+#define RPC_STUB_PREFIX_(x) x
+#else
+#define RPC_STUB_PREFIX_(name) RPC_CONCAT(RPC_STUB_PREFIX,_##name)
+#endif
+
+/* If RPC_STUB_PREFIX is defined, the prefix prepended plus an
+ underscore to all function names. If using, don't forget to #undef
+ after all uses to avoid potential redefinition errors. */
+#undef RPC_ID_PREFIX_
+#ifndef RPC_ID_PREFIX
+#define RPC_ID_PREFIX_(x) x
+#else
+#define RPC_ID_PREFIX_(name) RPC_CONCAT(RPC_ID_PREFIX,_##name)
+#endif
+
+#ifndef _HURD_RPC_H
+#define _HURD_RPC_H
+
+#include <hurd/stddef.h>
+#include <viengoos/message.h>
+#include <viengoos/ipc.h>
+#include <errno.h>
+
+#ifdef RM_INTERN
+extern struct vg_message *reply_buffer;
+
+/* We can't include messenger.h as it includes hurd/cap.h which in turn
+ includes this file. */
+struct messenger;
+struct activity;
+extern bool messenger_message_load (struct activity *activity,
+ struct messenger *target,
+ struct vg_message *message);
+#else
+# include <hurd/message-buffer.h>
+#endif
+typedef addr_t cap_t;
+
+/* First we define some cpp help macros. */
+#define CPP_IFELSE_0(when, whennot) whennot
+#define CPP_IFELSE_1(when, whennot) when
+#define CPP_IFELSE_2(when, whennot) when
+#define CPP_IFELSE_3(when, whennot) when
+#define CPP_IFELSE_4(when, whennot) when
+#define CPP_IFELSE_5(when, whennot) when
+#define CPP_IFELSE_6(when, whennot) when
+#define CPP_IFELSE_7(when, whennot) when
+#define CPP_IFELSE_8(when, whennot) when
+#define CPP_IFELSE_9(when, whennot) when
+#define CPP_IFELSE_10(when, whennot) when
+#define CPP_IFELSE_11(when, whennot) when
+#define CPP_IFELSE_12(when, whennot) when
+#define CPP_IFELSE_13(when, whennot) when
+#define CPP_IFELSE_14(when, whennot) when
+#define CPP_IFELSE_15(when, whennot) when
+#define CPP_IFELSE_16(when, whennot) when
+#define CPP_IFELSE_17(when, whennot) when
+#define CPP_IFELSE_18(when, whennot) when
+#define CPP_IFELSE_19(when, whennot) when
+#define CPP_IFELSE_20(when, whennot) when
+#define CPP_IFELSE_21(when, whennot) when
+#define CPP_IFELSE_22(when, whennot) when
+#define CPP_IFELSE_23(when, whennot) when
+#define CPP_IFELSE_24(when, whennot) when
+#define CPP_IFELSE_25(when, whennot) when
+
+#define CPP_IFELSE_(expr, when, whennot) \
+ CPP_IFELSE_##expr(when, whennot)
+#define CPP_IFELSE(expr, when, whennot) \
+ CPP_IFELSE_(expr, when, whennot)
+#define CPP_IF(expr, when) \
+ CPP_IFELSE(expr, when,)
+#define CPP_IFNOT(expr, whennot) \
+ CPP_IFELSE(expr, , whennot)
+
+#define CPP_SUCC_0 1
+#define CPP_SUCC_1 2
+#define CPP_SUCC_2 3
+#define CPP_SUCC_3 4
+#define CPP_SUCC_4 5
+#define CPP_SUCC_5 6
+#define CPP_SUCC_6 7
+#define CPP_SUCC_7 8
+#define CPP_SUCC_8 9
+#define CPP_SUCC_9 10
+#define CPP_SUCC_10 11
+#define CPP_SUCC_11 12
+#define CPP_SUCC_12 13
+#define CPP_SUCC_13 14
+#define CPP_SUCC_14 15
+#define CPP_SUCC_15 16
+#define CPP_SUCC_16 17
+#define CPP_SUCC_17 18
+#define CPP_SUCC_18 19
+#define CPP_SUCC_19 20
+#define CPP_SUCC_20 21
+#define CPP_SUCC_21 22
+#define CPP_SUCC_22 23
+#define CPP_SUCC_23 24
+#define CPP_SUCC_24 25
+#define CPP_SUCC_25 26
+
+#define CPP_SUCC_(x) CPP_SUCC_##x
+#define CPP_SUCC(x) CPP_SUCC_(x)
+
+/* We'd like to define CPP_ADD as:
+
+ #define CPP_ADD(x, y) \
+ CPP_IFELSE(y, CPP_ADD(SUCC(x), SUCC(y)), y)
+
+ This does not work as while a macro is being expanded, it becomes
+ ineligible for expansion. Thus, any references (including indirect
+ references) are not expanded. Repeated applications of a macro are,
+ however, allowed, and this is what the CPP_APPLY macro does. */
+#define CPP_APPLY1(x, y) x(y)
+#define CPP_APPLY2(x, y) x(CPP_APPLY1(x, y))
+#define CPP_APPLY3(x, y) x(CPP_APPLY2(x, y))
+#define CPP_APPLY4(x, y) x(CPP_APPLY3(x, y))
+#define CPP_APPLY5(x, y) x(CPP_APPLY4(x, y))
+#define CPP_APPLY6(x, y) x(CPP_APPLY5(x, y))
+#define CPP_APPLY7(x, y) x(CPP_APPLY6(x, y))
+#define CPP_APPLY8(x, y) x(CPP_APPLY7(x, y))
+#define CPP_APPLY9(x, y) x(CPP_APPLY8(x, y))
+#define CPP_APPLY10(x, y) x(CPP_APPLY9(x, y))
+#define CPP_APPLY11(x, y) x(CPP_APPLY10(x, y))
+#define CPP_APPLY12(x, y) x(CPP_APPLY11(x, y))
+#define CPP_APPLY13(x, y) x(CPP_APPLY12(x, y))
+#define CPP_APPLY14(x, y) x(CPP_APPLY13(x, y))
+#define CPP_APPLY15(x, y) x(CPP_APPLY14(x, y))
+#define CPP_APPLY16(x, y) x(CPP_APPLY15(x, y))
+#define CPP_APPLY17(x, y) x(CPP_APPLY16(x, y))
+#define CPP_APPLY18(x, y) x(CPP_APPLY17(x, y))
+#define CPP_APPLY19(x, y) x(CPP_APPLY18(x, y))
+#define CPP_APPLY20(x, y) x(CPP_APPLY19(x, y))
+#define CPP_APPLY21(x, y) x(CPP_APPLY20(x, y))
+#define CPP_APPLY22(x, y) x(CPP_APPLY21(x, y))
+#define CPP_APPLY23(x, y) x(CPP_APPLY22(x, y))
+#define CPP_APPLY24(x, y) x(CPP_APPLY23(x, y))
+#define CPP_APPLY25(x, y) x(CPP_APPLY24(x, y))
+
+#define CPP_ADD(x, y) \
+ CPP_IFELSE(y, CPP_APPLY##y(CPP_SUCC, x), x)
+
+/* Apply a function to each of the first n arguments.
+
+
+ CPP_FOREACH(2, CPP_SAFE_DEREF, NULL, a, b)
+
+ =>
+
+ ((a) ? *(a) : NULL), ((b) ? *(b) : NULL)
+ */
+#define CPP_FOREACH_0(func, cookie, ...)
+#define CPP_FOREACH_1(func, cookie, element, ...) func(cookie, element)
+#define CPP_FOREACH_2(func, cookie, element, ...) func(cookie, element), CPP_FOREACH_1(func, cookie, __VA_ARGS__)
+#define CPP_FOREACH_3(func, cookie, element, ...) func(cookie, element), CPP_FOREACH_2(func, cookie, __VA_ARGS__)
+#define CPP_FOREACH_4(func, cookie, element, ...) func(cookie, element), CPP_FOREACH_3(func, cookie, __VA_ARGS__)
+#define CPP_FOREACH_5(func, cookie, element, ...) func(cookie, element), CPP_FOREACH_4(func, cookie, __VA_ARGS__)
+#define CPP_FOREACH_6(func, cookie, element, ...) func(cookie, element), CPP_FOREACH_5(func, cookie, __VA_ARGS__)
+#define CPP_FOREACH_7(func, cookie, element, ...) func(cookie, element), CPP_FOREACH_6(func, cookie, __VA_ARGS__)
+#define CPP_FOREACH_8(func, cookie, element, ...) func(cookie, element), CPP_FOREACH_7(func, cookie, __VA_ARGS__)
+#define CPP_FOREACH_9(func, cookie, element, ...) func(cookie, element), CPP_FOREACH_8(func, cookie, __VA_ARGS__)
+
+#define CPP_FOREACH_(n, func, cookie, ...) \
+ CPP_FOREACH_##n(func, cookie, __VA_ARGS__)
+#define CPP_FOREACH(n, func, cookie, ...) \
+ CPP_FOREACH_(n, func, cookie, __VA_ARGS__)
+
+/* Used in conjunction with CPP_FOREACH. Generates C code that
+ dereferences ELEMENT if it is not NULL, otherwise, returns
+ COOKIE. */
+#define CPP_SAFE_DEREF(cookie, element) ((element) ? *(element) : (cookie))
+
+
+/* CPP treats commas specially so we have to be smart about how we
+ insert them algorithmically. For instance, this won't work:
+
+ #define COMMA ,
+ CPP_IFELSE(x, COMMA, )
+
+ To optional insert a comma, use this function instead. When the
+ result is need, invoke the result. For instance:
+
+ RPC_IF_COMMA(x) ()
+ */
+#define RPC_COMMA() ,
+#define RPC_NOCOMMA()
+#define RPC_IF_COMMA(x) CPP_IFELSE(x, RPC_COMMA, RPC_NOCOMMA)
+
+/* Append the argument __RLA_ARG, whose type is __RLA_TYPE, to the
+ message buffer MSG. */
+#define RPCLOADARG(__rla_type, __rla_arg) \
+ { \
+ if (__builtin_strcmp (#__rla_type, "cap_t") == 0) \
+ { \
+ union \
+ { \
+ __rla_type __rla_a; \
+ RPC_GRAB2 (, 1, RPC_TYPE_SHIFT (1, struct cap *, cap_t, __rla_foo)); \
+ cap_t __rla_cap; \
+ } __rla_arg2 = { (__rla_arg) }; \
+ vg_message_append_cap (msg, __rla_arg2.__rla_cap); \
+ } \
+ else \
+ { \
+ union \
+ { \
+ __rla_type __rla_a; \
+ uintptr_t __rla_raw[(sizeof (__rla_type) + sizeof (uintptr_t) - 1) \
+ / sizeof (uintptr_t)]; \
+ } __rla_arg2 = { (__rla_arg) }; \
+ int __rla_i; \
+ for (__rla_i = 0; \
+ __rla_i < sizeof (__rla_arg2) / sizeof (uintptr_t); \
+ __rla_i ++) \
+ vg_message_append_word (msg, __rla_arg2.__rla_raw[__rla_i]); \
+ } \
+ }
+
+#define RPCLOAD0(...)
+#define RPCLOAD1(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD0(__VA_ARGS__)
+#define RPCLOAD2(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD1(__VA_ARGS__)
+#define RPCLOAD3(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD2(__VA_ARGS__)
+#define RPCLOAD4(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD3(__VA_ARGS__)
+#define RPCLOAD5(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD4(__VA_ARGS__)
+#define RPCLOAD6(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD5(__VA_ARGS__)
+#define RPCLOAD7(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD6(__VA_ARGS__)
+#define RPCLOAD8(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD7(__VA_ARGS__)
+#define RPCLOAD9(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD8(__VA_ARGS__)
+#define RPCLOAD10(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD9(__VA_ARGS__)
+#define RPCLOAD11(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD10(__VA_ARGS__)
+#define RPCLOAD12(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD11(__VA_ARGS__)
+#define RPCLOAD13(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD12(__VA_ARGS__)
+#define RPCLOAD14(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD13(__VA_ARGS__)
+#define RPCLOAD15(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD14(__VA_ARGS__)
+#define RPCLOAD16(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD15(__VA_ARGS__)
+#define RPCLOAD17(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD16(__VA_ARGS__)
+#define RPCLOAD18(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD17(__VA_ARGS__)
+#define RPCLOAD19(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD18(__VA_ARGS__)
+#define RPCLOAD20(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD19(__VA_ARGS__)
+#define RPCLOAD21(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD20(__VA_ARGS__)
+#define RPCLOAD22(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD21(__VA_ARGS__)
+#define RPCLOAD23(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD22(__VA_ARGS__)
+#define RPCLOAD24(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD23(__VA_ARGS__)
+#define RPCLOAD25(__rl_type, __rl_arg, ...) RPCLOADARG(__rl_type, __rl_arg) RPCLOAD24(__VA_ARGS__)
+#define RPCLOAD_(__rl_count, ...) RPCLOAD##__rl_count (__VA_ARGS__)
+#define RPCLOAD(__rl_count, ...) RPCLOAD_ (__rl_count, __VA_ARGS__)
+
+/* Store the next argument in the message MSG whose type is __RSA_TYPE
+ in *__RSA_ARG. */
+#define RPCSTOREARG(__rsa_type, __rsa_arg) \
+ { \
+ if (__builtin_strcmp (#__rsa_type, "cap_t") == 0) \
+ { \
+ union \
+ { \
+ __rsa_type *__rsa_a; \
+ cap_t *__rsa_cap; \
+ } __rsa_arg2; \
+ __rsa_arg2.__rsa_a = __rsa_arg; \
+ if (vg_message_cap_count (msg) > __rsu_cap_idx) \
+ { \
+ if (__rsa_arg) \
+ *__rsa_arg2.__rsa_cap = vg_message_cap (msg, __rsu_cap_idx); \
+ __rsu_cap_idx ++; \
+ } \
+ else \
+ __rsu_err = EINVAL; \
+ } \
+ else \
+ { \
+ union \
+ { \
+ __rsa_type __rsa_a; \
+ uintptr_t __rsa_raw[(sizeof (__rsa_type) + sizeof (uintptr_t) - 1) \
+ / sizeof (uintptr_t)]; \
+ } __rsa_arg2; \
+ int __rsa_i; \
+ for (__rsa_i = 0; \
+ __rsa_i < sizeof (__rsa_arg2) / sizeof (uintptr_t); \
+ __rsa_i ++) \
+ if (vg_message_data_count (msg) / sizeof (uintptr_t) \
+ > __rsu_data_idx) \
+ __rsa_arg2.__rsa_raw[__rsa_i] \
+ = vg_message_word (msg, __rsu_data_idx ++); \
+ else \
+ __rsu_err = EINVAL; \
+ if (! __rsu_err && __rsa_arg) \
+ *(__rsa_arg) = __rsa_arg2.__rsa_a; \
+ } \
+ }
+
+#define RPCSTORE0(...)
+#define RPCSTORE1(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE0(__VA_ARGS__)
+#define RPCSTORE2(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE1(__VA_ARGS__)
+#define RPCSTORE3(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE2(__VA_ARGS__)
+#define RPCSTORE4(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE3(__VA_ARGS__)
+#define RPCSTORE5(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE4(__VA_ARGS__)
+#define RPCSTORE6(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE5(__VA_ARGS__)
+#define RPCSTORE7(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE6(__VA_ARGS__)
+#define RPCSTORE8(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE7(__VA_ARGS__)
+#define RPCSTORE9(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE8(__VA_ARGS__)
+#define RPCSTORE10(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE9(__VA_ARGS__)
+#define RPCSTORE11(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE10(__VA_ARGS__)
+#define RPCSTORE12(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE11(__VA_ARGS__)
+#define RPCSTORE13(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE12(__VA_ARGS__)
+#define RPCSTORE14(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE13(__VA_ARGS__)
+#define RPCSTORE15(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE14(__VA_ARGS__)
+#define RPCSTORE16(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE15(__VA_ARGS__)
+#define RPCSTORE17(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE16(__VA_ARGS__)
+#define RPCSTORE18(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE17(__VA_ARGS__)
+#define RPCSTORE19(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE18(__VA_ARGS__)
+#define RPCSTORE20(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE19(__VA_ARGS__)
+#define RPCSTORE21(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE20(__VA_ARGS__)
+#define RPCSTORE22(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE21(__VA_ARGS__)
+#define RPCSTORE23(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE22(__VA_ARGS__)
+#define RPCSTORE24(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE23(__VA_ARGS__)
+#define RPCSTORE25(__rs_type, __rs_arg, ...) \
+ RPCSTOREARG(__rs_type, __rs_arg) RPCSTORE24(__VA_ARGS__)
+
+#define RPCSTORE_(__rs_count, ...) RPCSTORE##__rs_count (__VA_ARGS__)
+#define RPCSTORE(__rs_count, ...) RPCSTORE_ (__rs_count, __VA_ARGS__)
+
+/* Marshal a request. */
+#define RPC_SEND_MARSHAL(id, icount, ...) \
+ static inline void \
+ __attribute__((always_inline)) \
+ RPC_CONCAT (RPC_STUB_PREFIX_(id), _send_marshal) \
+ (struct vg_message *msg, \
+ RPC_GRAB2 (, icount, ##__VA_ARGS__) RPC_IF_COMMA(icount) () \
+ cap_t reply_messenger) \
+ { \
+ vg_message_clear (msg); \
+ /* Add the label. */ \
+ vg_message_append_word (msg, RPC_ID_PREFIX_(id)); \
+ /* Then load the arguments. */ \
+ RPCLOAD (icount, ##__VA_ARGS__); \
+ /* Finally, add the reply messenger. */ \
+ vg_message_append_cap (msg, reply_messenger); \
+ }
+
+/* Unmarshal a request. */
+#define RPC_SEND_UNMARSHAL(id, icount, ...) \
+ static inline error_t \
+ __attribute__((always_inline)) \
+ RPC_CONCAT (RPC_STUB_PREFIX_(id), _send_unmarshal) \
+ (struct vg_message *msg, \
+ RPC_GRAB2 (*, icount, ##__VA_ARGS__) RPC_IF_COMMA(icount) () \
+ cap_t *reply_messenger) \
+ { \
+ uintptr_t label = 0; \
+ if (likely (vg_message_data_count (msg) >= sizeof (uintptr_t))) \
+ label = vg_message_word (msg, 0); \
+ if (label != RPC_ID_PREFIX_(id)) \
+ { \
+ debug (1, #id " has bad method id, %d, excepted %d", \
+ label, RPC_ID_PREFIX_(id)); \
+ return EINVAL; \
+ } \
+ \
+ int __rsu_data_idx __attribute__ ((unused)) = 1; \
+ int __rsu_cap_idx __attribute__ ((unused)) = 0; \
+ error_t __rsu_err = 0; \
+ RPCSTORE (icount, ##__VA_ARGS__); \
+ if (unlikely (__rsu_err \
+ || (__rsu_data_idx * sizeof (uintptr_t) \
+ != vg_message_data_count (msg) \
+ && __rsu_cap_idx + 1 != vg_message_cap_count (msg)))) \
+ { \
+ debug (1, #id " has wrong number of arguments: " \
+ "got %d bytes and %d caps; expected %d/%d", \
+ __rsu_data_idx * sizeof (uintptr_t), __rsu_cap_idx + 1, \
+ vg_message_data_count (msg), \
+ vg_message_cap_count (msg)); \
+ return EINVAL; \
+ } \
+ \
+ if (reply_messenger) \
+ *reply_messenger = vg_message_cap (msg, __rsu_cap_idx); \
+ return 0; \
+ }
+
+/* Prepare a receive buffer. */
+#ifdef RM_INTERN
+#define RPC_RECEIVE_MARSHAL(id, ret_cap_count, ...)
+#else
+#define RPC_RECEIVE_MARSHAL(id, ret_cap_count, ...) \
+ static inline void \
+ __attribute__((always_inline)) \
+ RPC_CONCAT (RPC_STUB_PREFIX_(id), _receive_marshal) \
+ (struct vg_message *msg RPC_IF_COMMA(ret_cap_count) () \
+ RPC_GRAB2 (, ret_cap_count, ##__VA_ARGS__)) \
+ { \
+ vg_message_clear (msg); \
+ /* Load the arguments. */ \
+ RPCLOAD (ret_cap_count, ##__VA_ARGS__); \
+ assert (vg_message_data_count (msg) == 0); \
+ assert (vg_message_cap_count (msg) == ret_cap_count); \
+ }
+#endif
+
+/* Marshal a reply. */
+#define RPC_REPLY_MARSHAL(id, out_count, ret_cap_count, ...) \
+ static inline void \
+ __attribute__((always_inline)) \
+ RPC_CONCAT (RPC_STUB_PREFIX_(id), _reply_marshal) \
+ (struct vg_message *msg \
+ RPC_IF_COMMA (out_count) () \
+ RPC_GRAB2 (, out_count, ##__VA_ARGS__) \
+ RPC_IF_COMMA (ret_cap_count) () \
+ RPC_GRAB2 (, ret_cap_count, \
+ RPC_TYPE_SHIFT (ret_cap_count, struct cap *, \
+ RPC_CHOP2 (out_count, __VA_ARGS__)))) \
+ { \
+ vg_message_clear (msg); \
+ \
+ /* The error code. */ \
+ vg_message_append_word (msg, 0); \
+ RPCLOAD (CPP_ADD (out_count, ret_cap_count), ##__VA_ARGS__); \
+ \
+ assert (vg_message_cap_count (msg) == ret_cap_count); \
+ }
+
+/* Unmarshal a reply. */
+#define RPC_REPLY_UNMARSHAL(id, out_count, ret_cap_count, ...) \
+ static inline error_t \
+ __attribute__((always_inline)) \
+ RPC_CONCAT (RPC_STUB_PREFIX_(id), _reply_unmarshal) \
+ (struct vg_message *msg \
+ RPC_IF_COMMA (CPP_ADD (out_count, ret_cap_count)) () \
+ RPC_GRAB2(*, CPP_ADD (out_count, ret_cap_count), ##__VA_ARGS__)) \
+ { \
+ /* The server error code. */ \
+ error_t __rsu_err = EINVAL; \
+ if (likely (vg_message_data_count (msg) >= sizeof (uintptr_t))) \
+ __rsu_err = vg_message_word (msg, 0); \
+ if (unlikely (__rsu_err)) \
+ return __rsu_err; \
+ \
+ int __rsu_data_idx __attribute__ ((unused)) = 1; \
+ int __rsu_cap_idx __attribute__ ((unused)) = 0; \
+ RPCSTORE (CPP_ADD (out_count, ret_cap_count), ##__VA_ARGS__); \
+ if (unlikely (__rsu_err \
+ || (__rsu_data_idx * sizeof (uintptr_t) \
+ != vg_message_data_count (msg) \
+ || __rsu_cap_idx != vg_message_cap_count (msg)))) \
+ { \
+ debug (1, #id " has wrong number of arguments: " \
+ "got %d bytes and %d caps; expected %d/%d", \
+ __rsu_data_idx * sizeof (uintptr_t), __rsu_cap_idx, \
+ vg_message_data_count (msg), \
+ vg_message_cap_count (msg)); \
+ return EINVAL; \
+ } \
+ return 0; \
+ }
+
+/* RPC_ARGUMENTS takes a list of types and arguments and returns the first
+ COUNT arguments. (NB: the list may contain more than COUNT
+ arguments!).
+
+ RPC_ARGUMENTS(2, &, int, i, int, j, double, d)
+
+ =>
+
+ &i, &j
+*/
+#define RPC_ARGUMENTS0(...)
+#define RPC_ARGUMENTS1(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg RPC_ARGUMENTS0(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS2(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS1(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS3(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS2(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS4(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS3(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS5(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS4(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS6(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS5(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS7(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS6(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS8(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS7(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS9(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS8(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS10(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS9(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS11(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS10(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS12(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS11(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS13(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS12(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS14(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS13(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS15(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS14(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS16(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS15(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS17(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS16(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS18(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS17(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS19(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS18(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS20(__ra_prefix, __ra_type, __ra_arg, ...) __ra_prefix __ra_arg, RPC_ARGUMENTS19(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS_(__ra_count, __ra_prefix, ...) RPC_ARGUMENTS##__ra_count(__ra_prefix, __VA_ARGS__)
+#define RPC_ARGUMENTS(__ra_count, __ra_prefix, ...) RPC_ARGUMENTS_(__ra_count, __ra_prefix, __VA_ARGS__)
+
+/* Given a list of arguments, returns the arguments minus the first
+ COUNT **pairs** of arguments. For example:
+
+ RPC_CHOP2(1, int, i, int, j, double, d)
+
+ =>
+
+ int, j, double, d
+
+ */
+#define RPC_CHOP2_0(...) __VA_ARGS__
+#define RPC_CHOP2_1(__rc_a, __rc_b, ...) RPC_CHOP2_0(__VA_ARGS__)
+#define RPC_CHOP2_2(__rc_a, __rc_b, ...) RPC_CHOP2_1(__VA_ARGS__)
+#define RPC_CHOP2_3(__rc_a, __rc_b, ...) RPC_CHOP2_2(__VA_ARGS__)
+#define RPC_CHOP2_4(__rc_a, __rc_b, ...) RPC_CHOP2_3(__VA_ARGS__)
+#define RPC_CHOP2_5(__rc_a, __rc_b, ...) RPC_CHOP2_4(__VA_ARGS__)
+#define RPC_CHOP2_6(__rc_a, __rc_b, ...) RPC_CHOP2_5(__VA_ARGS__)
+#define RPC_CHOP2_7(__rc_a, __rc_b, ...) RPC_CHOP2_6(__VA_ARGS__)
+#define RPC_CHOP2_8(__rc_a, __rc_b, ...) RPC_CHOP2_7(__VA_ARGS__)
+#define RPC_CHOP2_9(__rc_a, __rc_b, ...) RPC_CHOP2_8(__VA_ARGS__)
+#define RPC_CHOP2_10(__rc_a, __rc_b, ...) RPC_CHOP2_9(__VA_ARGS__)
+#define RPC_CHOP2_11(__rc_a, __rc_b, ...) RPC_CHOP2_10(__VA_ARGS__)
+#define RPC_CHOP2_12(__rc_a, __rc_b, ...) RPC_CHOP2_11(__VA_ARGS__)
+#define RPC_CHOP2_13(__rc_a, __rc_b, ...) RPC_CHOP2_12(__VA_ARGS__)
+#define RPC_CHOP2_14(__rc_a, __rc_b, ...) RPC_CHOP2_13(__VA_ARGS__)
+#define RPC_CHOP2_15(__rc_a, __rc_b, ...) RPC_CHOP2_14(__VA_ARGS__)
+#define RPC_CHOP2_16(__rc_a, __rc_b, ...) RPC_CHOP2_15(__VA_ARGS__)
+#define RPC_CHOP2_17(__rc_a, __rc_b, ...) RPC_CHOP2_16(__VA_ARGS__)
+#define RPC_CHOP2_18(__rc_a, __rc_b, ...) RPC_CHOP2_17(__VA_ARGS__)
+#define RPC_CHOP2_19(__rc_a, __rc_b, ...) RPC_CHOP2_18(__VA_ARGS__)
+#define RPC_CHOP2_20(__rc_a, __rc_b, ...) RPC_CHOP2_19(__VA_ARGS__)
+#define RPC_CHOP2_21(__rc_a, __rc_b, ...) RPC_CHOP2_20(__VA_ARGS__)
+#define RPC_CHOP2_22(__rc_a, __rc_b, ...) RPC_CHOP2_21(__VA_ARGS__)
+#define RPC_CHOP2_23(__rc_a, __rc_b, ...) RPC_CHOP2_22(__VA_ARGS__)
+#define RPC_CHOP2_24(__rc_a, __rc_b, ...) RPC_CHOP2_23(__VA_ARGS__)
+#define RPC_CHOP2_25(__rc_a, __rc_b, ...) RPC_CHOP2_24(__VA_ARGS__)
+#define RPC_CHOP2_(__rc_count, ...) RPC_CHOP2_##__rc_count (__VA_ARGS__)
+#define RPC_CHOP2(__rc_count, ...) RPC_CHOP2_(__rc_count, __VA_ARGS__)
+
+/* Given a list of arguments, returns the first COUNT **pairs** of
+ arguments, the elements of each pair separated by SEP and each pair
+ separated by a comma. For example:
+
+ For example:
+
+ RPC_GRAB2(, 2, int, i, int, j, double, d)
+
+ =>
+
+ int i, int j
+*/
+#define RPC_GRAB2_0(__rg_sep, ...)
+#define RPC_GRAB2_1(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b RPC_GRAB2_0(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_2(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_1(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_3(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_2(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_4(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_3(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_5(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_4(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_6(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_5(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_7(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_6(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_8(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_7(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_9(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_8(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_10(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_9(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_11(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_10(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_12(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_11(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_13(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_12(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_14(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_13(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_15(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_14(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_16(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_15(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_17(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_16(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_18(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_17(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_19(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_18(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_20(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_19(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_21(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_20(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_22(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_21(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_23(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_22(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_24(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_23(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_25(__rg_sep, __rg_a, __rg_b, ...) __rg_a __rg_sep __rg_b, RPC_GRAB2_24(__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2_(__rg_sep, __rg_count, ...) RPC_GRAB2_##__rg_count (__rg_sep, __VA_ARGS__)
+#define RPC_GRAB2(__rg_sep, __rg_count, ...) RPC_GRAB2_(__rg_sep, __rg_count, __VA_ARGS__)
+
+#define RPC_GRAB_0(...)
+#define RPC_GRAB_1(__rg_a, ...) __rg_a RPC_GRAB_0(__VA_ARGS__)
+#define RPC_GRAB_2(__rg_a, ...) __rg_a, RPC_GRAB_1(__VA_ARGS__)
+#define RPC_GRAB_3(__rg_a, ...) __rg_a, RPC_GRAB_2(__VA_ARGS__)
+#define RPC_GRAB_4(__rg_a, ...) __rg_a, RPC_GRAB_3(__VA_ARGS__)
+#define RPC_GRAB_5(__rg_a, ...) __rg_a, RPC_GRAB_4(__VA_ARGS__)
+#define RPC_GRAB_6(__rg_a, ...) __rg_a, RPC_GRAB_5(__VA_ARGS__)
+#define RPC_GRAB_7(__rg_a, ...) __rg_a, RPC_GRAB_6(__VA_ARGS__)
+#define RPC_GRAB_8(__rg_a, ...) __rg_a, RPC_GRAB_7(__VA_ARGS__)
+#define RPC_GRAB_9(__rg_a, ...) __rg_a, RPC_GRAB_8(__VA_ARGS__)
+#define RPC_GRAB_10(__rg_a, ...) __rg_a, RPC_GRAB_9(__VA_ARGS__)
+#define RPC_GRAB_11(__rg_a, ...) __rg_a, RPC_GRAB_10(__VA_ARGS__)
+#define RPC_GRAB_12(__rg_a, ...) __rg_a, RPC_GRAB_11(__VA_ARGS__)
+#define RPC_GRAB_13(__rg_a, ...) __rg_a, RPC_GRAB_12(__VA_ARGS__)
+#define RPC_GRAB_14(__rg_a, ...) __rg_a, RPC_GRAB_13(__VA_ARGS__)
+#define RPC_GRAB_15(__rg_a, ...) __rg_a, RPC_GRAB_14(__VA_ARGS__)
+#define RPC_GRAB_16(__rg_a, ...) __rg_a, RPC_GRAB_15(__VA_ARGS__)
+#define RPC_GRAB_17(__rg_a, ...) __rg_a, RPC_GRAB_16(__VA_ARGS__)
+#define RPC_GRAB_18(__rg_a, ...) __rg_a, RPC_GRAB_17(__VA_ARGS__)
+#define RPC_GRAB_19(__rg_a, ...) __rg_a, RPC_GRAB_18(__VA_ARGS__)
+#define RPC_GRAB_20(__rg_a, ...) __rg_a, RPC_GRAB_19(__VA_ARGS__)
+#define RPC_GRAB_21(__rg_a, ...) __rg_a, RPC_GRAB_20(__VA_ARGS__)
+#define RPC_GRAB_22(__rg_a, ...) __rg_a, RPC_GRAB_21(__VA_ARGS__)
+#define RPC_GRAB_23(__rg_a, ...) __rg_a, RPC_GRAB_22(__VA_ARGS__)
+#define RPC_GRAB_24(__rg_a, ...) __rg_a, RPC_GRAB_23(__VA_ARGS__)
+#define RPC_GRAB_25(__rg_a, ...) __rg_a, RPC_GRAB_24(__VA_ARGS__)
+#define RPC_GRAB_(__rg_count, ...) RPC_GRAB_##__rg_count (__VA_ARGS__)
+#define RPC_GRAB(__rg_count, ...) RPC_GRAB_(__rg_count, __VA_ARGS__)
+
+#define RPC_TYPE_SHIFT_0(...)
+#define RPC_TYPE_SHIFT_1(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg RPC_TYPE_SHIFT_0(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_2(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_1(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_3(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_2(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_4(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_3(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_5(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_4(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_6(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_5(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_7(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_6(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_8(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_7(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_9(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_8(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_10(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_9(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_11(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_10(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_12(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_11(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_13(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_12(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_14(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_13(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_15(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_14(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_16(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_15(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_17(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_16(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_18(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_17(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_19(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_18(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_20(__ra_new_type, __ra_type, __ra_arg, ...) __ra_new_type, __ra_arg, RPC_TYPE_SHIFT_19(__ra_new_type, __VA_ARGS__)
+#define RPC_TYPE_SHIFT_(__ra_count, __ra_new_type, ...) RPC_TYPE_SHIFT_##__ra_count(__ra_new_type, __VA_ARGS__)
+#ifdef RM_INTERN
+# define RPC_TYPE_SHIFT(__ra_count, __ra_new_type, ...) RPC_TYPE_SHIFT_(__ra_count, __ra_new_type, __VA_ARGS__)
+#else
+# define RPC_TYPE_SHIFT(__ra_count, __ra_new_type, ...) __VA_ARGS__
+#endif
+
+/* Ensure that there are X pairs of arguments. */
+#define RPC_INVALID_NUMBER_OF_ARGUMENTS_
+#define RPC_EMPTY_LIST_(x) RPC_INVALID_NUMBER_OF_ARGUMENTS_##x
+#define RPC_EMPTY_LIST(x) RPC_EMPTY_LIST_(x)
+#define RPC_ENSURE_ARGS(count, ...) \
+ RPC_EMPTY_LIST (RPC_CHOP2 (count, __VA_ARGS__))
+
+#define RPC_SEND_MARSHAL_GEN_(id, in_count, out_count, ret_cap_count, ...) \
+ RPC_SEND_MARSHAL(id, in_count, ##__VA_ARGS__) \
+ RPC_SEND_UNMARSHAL(id, in_count, ##__VA_ARGS__)
+
+#define RPC_RECEIVE_MARSHAL_GEN_(id, in_count, out_count, ret_cap_count, ...) \
+ RPC_RECEIVE_MARSHAL(id, ret_cap_count, \
+ RPC_CHOP2 (CPP_ADD (in_count, out_count), \
+ ##__VA_ARGS__))
+
+#define RPC_REPLY_MARSHAL_GEN_(id, in_count, out_count, ret_cap_count, ...) \
+ RPC_REPLY_MARSHAL(id, out_count, ret_cap_count, \
+ RPC_CHOP2 (in_count, ##__VA_ARGS__)) \
+ RPC_REPLY_UNMARSHAL(id, out_count, ret_cap_count, \
+ RPC_CHOP2 (in_count, ##__VA_ARGS__))
+
+#define RPC_MARSHAL_GEN_(id, in_count, out_count, ret_cap_count, ...) \
+ RPC_ENSURE_ARGS(CPP_ADD (CPP_ADD (in_count, out_count), \
+ ret_cap_count), \
+ ##__VA_ARGS__) \
+ RPC_SEND_MARSHAL_GEN_(id, in_count, out_count, ret_cap_count, \
+ ##__VA_ARGS__) \
+ RPC_RECEIVE_MARSHAL_GEN_(id, in_count, out_count, ret_cap_count, \
+ ##__VA_ARGS__) \
+ RPC_REPLY_MARSHAL_GEN_(id, in_count, out_count, ret_cap_count, \
+ ##__VA_ARGS__)
+
+/* Send a message. __RPC_REPY_MESSENGER designates the messenger that
+ should receive the reply. (Its buffer should have already been
+ prepared using, e.g., the corresponding receive_marshal
+ function.) */
+#ifndef RM_INTERN
+#define RPC_SEND_(postfix, id, in_count, out_count, ret_cap_count, ...) \
+ static inline error_t \
+ __attribute__((always_inline)) \
+ RPC_CONCAT(RPC_STUB_PREFIX_(id), postfix) \
+ (cap_t __rpc_activity, cap_t __rpc_object \
+ RPC_IF_COMMA (in_count) () \
+ RPC_GRAB2 (, in_count, __VA_ARGS__), \
+ cap_t __rpc_reply_messenger) \
+ { \
+ struct hurd_message_buffer *mb = hurd_message_buffer_alloc (); \
+ mb->just_free = true; \
+ \
+ RPC_CONCAT (RPC_STUB_PREFIX_(id), _send_marshal) \
+ (mb->request \
+ RPC_IF_COMMA (in_count) () RPC_ARGUMENTS(in_count,, __VA_ARGS__), \
+ __rpc_reply_messenger); \
+ \
+ error_t err = vg_send (VG_IPC_SEND_SET_THREAD_TO_CALLER \
+ | VG_IPC_SEND_SET_ASROOT_TO_CALLERS, \
+ __rpc_activity, __rpc_object, \
+ mb->sender, ADDR_VOID); \
+ \
+ return err; \
+ }
+#else
+#define RPC_SEND_(postfix, id, in_count, out_count, ret_cap_count, ...)
+#endif
+
+/* Send a message. Abort if the target is not ready. */
+#ifndef RM_INTERN
+#define RPC_SEND_NONBLOCKING_(postfix, id, in_count, out_count, ret_cap_count, ...) \
+ static inline error_t \
+ __attribute__((always_inline)) \
+ RPC_CONCAT(RPC_STUB_PREFIX_(id), postfix) \
+ (cap_t __rpc_activity, cap_t __rpc_object \
+ RPC_IF_COMMA (in_count) () \
+ RPC_GRAB2 (, in_count, __VA_ARGS__), \
+ cap_t __rpc_reply_messenger) \
+ { \
+ struct hurd_message_buffer *mb = hurd_message_buffer_alloc (); \
+ \
+ RPC_CONCAT (RPC_STUB_PREFIX_(id), _send_marshal) \
+ (mb->request \
+ RPC_IF_COMMA (in_count) () RPC_ARGUMENTS(in_count,, __VA_ARGS__), \
+ __rpc_reply_messenger); \
+ \
+ error_t err = vg_reply (VG_IPC_SEND_SET_THREAD_TO_CALLER \
+ | VG_IPC_SEND_SET_ASROOT_TO_CALLERS, \
+ __rpc_activity, __rpc_object, \
+ mb->sender, ADDR_VOID); \
+ \
+ hurd_message_buffer_free (mb); \
+ \
+ return err; \
+ }
+#else
+#define RPC_SEND_NONBLOCKING_(postfix, id, in_count, out_count, ret_cap_count, ...)
+#endif
+
+/* Send a message and wait for a reply. */
+#ifndef RM_INTERN
+#define RPC_(postfix, id, in_count, out_count, ret_cap_count, ...) \
+ static inline error_t \
+ __attribute__((always_inline)) \
+ RPC_CONCAT (RPC_CONCAT (RPC_STUB_PREFIX_(id), _using), postfix) \
+ (struct hurd_message_buffer *mb, \
+ addr_t __rpc_activity, \
+ addr_t __rpc_object \
+ /* In arguments. */ \
+ RPC_IF_COMMA (in_count) () \
+ RPC_GRAB2 (, in_count, __VA_ARGS__) \
+ /* Out arguments (data and caps). */ \
+ RPC_IF_COMMA (CPP_ADD (out_count, ret_cap_count)) () \
+ RPC_GRAB2 (*, CPP_ADD (out_count, ret_cap_count), \
+ RPC_CHOP2 (in_count, __VA_ARGS__))) \
+ { \
+ /* Prepare the reply buffer. */ \
+ RPC_CONCAT (RPC_STUB_PREFIX_(id), _receive_marshal) \
+ (mb->reply \
+ RPC_IF_COMMA (ret_cap_count) () \
+ CPP_FOREACH(ret_cap_count, CPP_SAFE_DEREF, ADDR_VOID, \
+ RPC_ARGUMENTS (ret_cap_count, , \
+ RPC_CHOP2 (CPP_ADD (in_count, out_count), \
+ __VA_ARGS__)))); \
+ \
+ /* Then the send buffer. */ \
+ RPC_CONCAT (RPC_STUB_PREFIX_(id), _send_marshal) \
+ (mb->request \
+ RPC_IF_COMMA (in_count) () \
+ RPC_ARGUMENTS (in_count,, __VA_ARGS__), \
+ mb->receiver); \
+ \
+ hurd_activation_message_register (mb); \
+ \
+ /* We will be resumed via an activation. */ \
+ error_t err = vg_ipc (VG_IPC_RECEIVE | VG_IPC_SEND \
+ | VG_IPC_RECEIVE_ACTIVATE \
+ | VG_IPC_SEND_SET_THREAD_TO_CALLER \
+ | VG_IPC_SEND_SET_ASROOT_TO_CALLERS \
+ | VG_IPC_RECEIVE_SET_THREAD_TO_CALLER \
+ | VG_IPC_RECEIVE_SET_ASROOT_TO_CALLERS, \
+ __rpc_activity, \
+ mb->receiver_strong, ADDR_VOID, \
+ __rpc_activity, __rpc_object, \
+ mb->sender, ADDR_VOID); \
+ if (err) \
+ /* Error sending the IPC. */ \
+ hurd_activation_message_unregister (mb); \
+ else \
+ err = RPC_CONCAT (RPC_STUB_PREFIX_(id), _reply_unmarshal) \
+ (mb->reply \
+ RPC_IF_COMMA (CPP_ADD (out_count, ret_cap_count)) () \
+ RPC_ARGUMENTS (CPP_ADD (out_count, ret_cap_count),, \
+ RPC_CHOP2 (in_count, ##__VA_ARGS__))); \
+ \
+ return err; \
+ } \
+ \
+ static inline error_t \
+ __attribute__((always_inline)) \
+ RPC_CONCAT (RPC_STUB_PREFIX_(id), postfix) \
+ (addr_t __rpc_activity, \
+ addr_t __rpc_object \
+ /* In arguments. */ \
+ RPC_IF_COMMA (in_count) () \
+ RPC_GRAB2 (, in_count, __VA_ARGS__) \
+ /* Out arguments (data and caps). */ \
+ RPC_IF_COMMA (CPP_ADD (out_count, ret_cap_count)) () \
+ RPC_GRAB2 (*, CPP_ADD (out_count, ret_cap_count), \
+ RPC_CHOP2 (in_count, __VA_ARGS__))) \
+ { \
+ struct hurd_message_buffer *mb = hurd_message_buffer_alloc (); \
+ \
+ error_t err; \
+ err = RPC_CONCAT (RPC_CONCAT (RPC_STUB_PREFIX_(id), _using), postfix) \
+ (mb, __rpc_activity, __rpc_object \
+ RPC_IF_COMMA (CPP_ADD (CPP_ADD (in_count, out_count), \
+ ret_cap_count)) () \
+ RPC_ARGUMENTS (CPP_ADD (CPP_ADD (in_count, out_count), \
+ ret_cap_count),, __VA_ARGS__)); \
+ \
+ hurd_message_buffer_free (mb); \
+ \
+ return err; \
+ }
+#else
+# define RPC_(postfix, id, in_count, out_count, ret_cap_count, ...)
+#endif
+
+/* Send a reply to __RPC_TARGET. If __RPC_TARGET does not accept the
+ message immediately, abort sending. */
+#ifndef RM_INTERN
+#define RPC_REPLY_(id, in_count, out_count, ret_cap_count, ...) \
+ static inline error_t \
+ RPC_CONCAT (RPC_STUB_PREFIX_(id), _reply) \
+ (addr_t __rpc_activity, \
+ addr_t __rpc_target \
+ /* Out data. */ \
+ RPC_IF_COMMA (out_count) () \
+ RPC_GRAB2 (, out_count, RPC_CHOP2 (in_count, ##__VA_ARGS__)) \
+ /* Return capabilities. */ \
+ RPC_IF_COMMA (ret_cap_count) () \
+ RPC_GRAB2 (, ret_cap_count, \
+ RPC_CHOP2 (CPP_ADD (in_count, out_count), \
+ ##__VA_ARGS__))) \
+ { \
+ struct hurd_message_buffer *mb = hurd_message_buffer_alloc (); \
+ \
+ RPC_CONCAT (RPC_STUB_PREFIX_(id), _reply_marshal) \
+ (mb->request \
+ /* Out data. */ \
+ RPC_IF_COMMA (out_count) () \
+ RPC_ARGUMENTS(out_count,, RPC_CHOP2 (in_count, __VA_ARGS__)) \
+ /* Out capabilities. */ \
+ RPC_IF_COMMA (ret_cap_count) () \
+ RPC_ARGUMENTS(ret_cap_count,, \
+ RPC_CHOP2 (CPP_ADD (in_count, out_count), \
+ __VA_ARGS__))); \
+ \
+ error_t err = vg_reply (VG_IPC_SEND_SET_THREAD_TO_CALLER \
+ | VG_IPC_SEND_SET_ASROOT_TO_CALLERS, \
+ __rpc_activity, __rpc_target, \
+ mb->sender, ADDR_VOID); \
+ \
+ hurd_message_buffer_free (mb); \
+ \
+ return err; \
+ }
+#else
+#define RPC_REPLY_(id, in_count, out_count, ret_cap_count, ...) \
+ static inline error_t \
+ __attribute__((always_inline)) \
+ RPC_CONCAT (RPC_STUB_PREFIX_(id), _reply) \
+ (struct activity *__rpc_activity, \
+ struct messenger *__rpc_target \
+ /* Out data. */ \
+ RPC_IF_COMMA (out_count) () \
+ RPC_GRAB2 (, out_count, RPC_CHOP2 (in_count, ##__VA_ARGS__)) \
+ /* Return capabilities. */ \
+ RPC_IF_COMMA (ret_cap_count) () \
+ RPC_GRAB2 (, ret_cap_count, \
+ RPC_TYPE_SHIFT (ret_cap_count, struct cap, \
+ RPC_CHOP2 (CPP_ADD (in_count, out_count), \
+ ##__VA_ARGS__)))) \
+ { \
+ RPC_CONCAT (RPC_STUB_PREFIX_(id), _reply_marshal) \
+ (reply_buffer \
+ /* Out data. */ \
+ RPC_IF_COMMA (out_count) () \
+ RPC_ARGUMENTS(out_count,, RPC_CHOP2 (in_count, ##__VA_ARGS__)) \
+ /* Out capabilities. */ \
+ RPC_IF_COMMA (ret_cap_count) () \
+ RPC_ARGUMENTS (ret_cap_count, &, \
+ RPC_CHOP2 (CPP_ADD (in_count, out_count), \
+ ##__VA_ARGS__))); \
+ \
+ bool ret = messenger_message_load (__rpc_activity, \
+ __rpc_target, reply_buffer); \
+ \
+ return ret ? 0 : EWOULDBLOCK; \
+ }
+#endif
+
+/* RPC template. ID is the method name. IN_COUNT is the number of
+ arguments. OUT_COUNT is the number of out arguments.
+ RET_CAP_COUNT is the number of capabilities that are returned. The
+ remaining arguments correspond to pairs of types and argument
+ names.
+
+ Consider:
+
+ RPC(method, 2, 1, 1,
+ // In (data and capability) parameters
+ int, foo, cap_t, bar,
+ // Out data parameters
+ int bam,
+ // Out capabilities
+ cap_t xyzzy)
+
+ This will generate marshalling and unmarshalling functions as well
+ as send, reply and call functions. For instance, the signature for
+ the correspond send marshal function is:
+
+ error_t method_send_marshal (struct vg_message *message,
+ int foo, cap_t bar, cap_t reply)
+
+ that of the send unmarshal function is:
+
+ error_t method_send_unmarshal (struct vg_message *message,
+ int *foo, cap_t *bar, cap_t *reply)
+
+ that of the receive marshal function is:
+
+ error_t method_receive_marshal (struct vg_message *message,
+ cap_t xyzzy)
+
+
+ that of the reply marshal function is:
+
+ error_t method_reply_marshal (struct vg_message *message,
+ int bam, cap_t xyzzy)
+
+ that of the reply unmarshal function is:
+
+ error_t method_reply_unmarshal (struct vg_message *message,
+ int *bam, cap_t *xyzzy)
+
+ Functions to send requests and replies as well as to produce calls
+ are also generated.
+
+ error_t method_call (cap_t activity, cap_t object,
+ int foo, cap_t bar, int *bam, cap_t *xyzzy)
+
+ Note that *XYZZY must be initialize with the location of a
+ capability slot to store the returned capability. *XYZZY is set to
+ ADDR_VOID if the sender did not provide a capability.
+
+ To send a message and not wait for a reply, a function with the
+ following prototype is generated:
+
+ error_t method_send (cap_t activity, cap_t object,
+ int foo, cap_t bar,
+ cap_t reply_messenger)
+
+ To reply to a request, a function with the following prototype is
+ generated:
+
+ error_t method_reply (cap_t activity, cap_t reply_messenger,
+ int bam, cap_t xyzzy)
+*/
+
+#define RPC(id, in_count, out_count, ret_cap_count, ...) \
+ RPC_MARSHAL_GEN_(id, in_count, out_count, ret_cap_count, ##__VA_ARGS__) \
+ \
+ RPC_(, id, in_count, out_count, ret_cap_count, ##__VA_ARGS__) \
+ RPC_SEND_(_send, id, in_count, out_count, ret_cap_count, \
+ ##__VA_ARGS__) \
+ RPC_SEND_NONBLOCKING_(_send_nonblocking, \
+ id, in_count, out_count, ret_cap_count, \
+ ##__VA_ARGS__) \
+ RPC_REPLY_(id, in_count, out_count, ret_cap_count, ##__VA_ARGS__)
+
+/* Marshal a reply consisting of the error code ERR in *MSG. */
+static inline void
+__attribute__((always_inline))
+rpc_error_reply_marshal (struct vg_message *msg, error_t err)
+{
+ vg_message_clear (msg);
+ vg_message_append_word (msg, err);
+}
+
+/* Reply to the target TARGET with error code ERROR. */
+#ifdef RM_INTERN
+static inline error_t
+__attribute__((always_inline))
+rpc_error_reply (struct activity *activity, struct messenger *target,
+ error_t err)
+{
+ rpc_error_reply_marshal (reply_buffer, err);
+ bool ret = messenger_message_load (activity, target, reply_buffer);
+ return ret ? 0 : EWOULDBLOCK;
+}
+#else
+static inline error_t
+__attribute__((always_inline))
+rpc_error_reply (cap_t activity, cap_t target, error_t err)
+{
+ return vg_ipc_short (VG_IPC_SEND_NONBLOCKING | VG_IPC_SEND_INLINE
+ | VG_IPC_SEND_INLINE_WORD1,
+ ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ ADDR_VOID, target,
+ ADDR_VOID, err, 0, ADDR_VOID);
+}
+#endif
+
+#endif
diff --git a/libviengoos/viengoos/thread.h b/libviengoos/viengoos/thread.h
new file mode 100644
index 0000000..d8a69eb
--- /dev/null
+++ b/libviengoos/viengoos/thread.h
@@ -0,0 +1,281 @@
+/* thread.h - Thread definitions.
+ Copyright (C) 2007, 2008 Free Software Foundation, Inc.
+ Written by Neal H. Walfield <neal@gnu.org>.
+
+ GNU Hurd is free software: you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation, either version 3 of the
+ License, or (at your option) any later version.
+
+ GNU Hurd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GNU Hurd. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef __have_vg_thread_id_t
+# define __have_vg_thread_id_t
+
+# ifdef USE_L4
+# include <l4.h>
+typedef l4_thread_id_t vg_thread_id_t;
+# define vg_niltid l4_nilthread
+# define VG_THREAD_ID_FMT "%x"
+# else
+# include <stdint.h>
+typedef uint64_t vg_thread_id_t;
+# define vg_niltid -1
+# define VG_THREAD_ID_FMT "%llx"
+# endif
+
+#endif /* !__have_vg_thread_id_t */
+
+#if defined(__need_vg_thread_id_t)
+# undef __need_vg_thread_id_t
+#else
+
+#ifndef _VIENGOOS_THREAD_H
+#define _VIENGOOS_THREAD_H 1
+
+#include <stdint.h>
+#include <viengoos/addr.h>
+#include <viengoos/cap.h>
+#include <viengoos/messenger.h>
+
+/* The user thread control block. */
+struct vg_utcb
+{
+ /* Generic data. */
+ struct
+ {
+ union
+ {
+ struct
+ {
+ union
+ {
+ struct
+ {
+ /* Whether the thread is in activated mode. If so, any
+ activations that arrive during this time will be queued
+ or dropped. */
+ uintptr_t activated_mode : 1;
+ /* Set by the kernel to indicated that there is a pending
+ message. */
+ uintptr_t pending_message : 1;
+ /* Set by the kernel to indicate whether the thread was
+ interrupted while the EIP is in the transition range. */
+ uintptr_t interrupt_in_transition : 1;
+ };
+ uintptr_t mode;
+ };
+
+ /* The value of the IP and SP when the thread was running. */
+ uintptr_t saved_ip;
+ uintptr_t saved_sp;
+
+ uintptr_t activation_handler_sp;
+ uintptr_t activation_handler_ip;
+ uintptr_t activation_handler_end;
+
+ /* The protected payload of the capability that invoked the
+ messenger that caused this activation. */
+ uint64_t protected_payload;
+ /* The messenger's id. */
+ uint64_t messenger_id;
+
+ uintptr_t inline_words[VG_MESSENGER_INLINE_WORDS];
+ addr_t inline_caps[VG_MESSENGER_INLINE_CAPS];
+
+ union
+ {
+ struct
+ {
+ int inline_word_count : 2;
+ int inline_cap_count : 1;
+ };
+ int inline_data : 3;
+ };
+ };
+
+ char data[256];
+ };
+ };
+
+ /* Architecture-specific data. */
+ struct
+ {
+ union
+ {
+ struct
+ {
+ };
+
+ char data[256];
+ };
+ };
+};
+
+/* A thread object's user accessible capability slots. */
+enum
+ {
+ /* Root of the address space. */
+ THREAD_ASPACE_SLOT = 0,
+ /* The activity the thread is bound to. */
+ THREAD_ACTIVITY_SLOT = 1,
+ /* The messenger to post exceptions to. */
+ THREAD_EXCEPTION_MESSENGER = 2,
+ /* The user thread control block. Must be a cap_page. */
+ THREAD_UTCB = 3,
+
+ /* Total number of capability slots in a thread object. This must
+ be a power of 2. */
+ THREAD_SLOTS = 4,
+ };
+#define THREAD_SLOTS_LOG2 2
+
+enum
+{
+ HURD_EXREGS_SET_UTCB = 0x2000,
+ HURD_EXREGS_SET_EXCEPTION_MESSENGER = 0x1000,
+ HURD_EXREGS_SET_ASPACE = 0x800,
+ HURD_EXREGS_SET_ACTIVITY = 0x400,
+ HURD_EXREGS_SET_SP = _L4_XCHG_REGS_SET_SP,
+ HURD_EXREGS_SET_IP = _L4_XCHG_REGS_SET_IP,
+ HURD_EXREGS_SET_SP_IP = _L4_XCHG_REGS_SET_SP | _L4_XCHG_REGS_SET_IP,
+ HURD_EXREGS_SET_EFLAGS = _L4_XCHG_REGS_SET_FLAGS,
+ HURD_EXREGS_SET_USER_HANDLE = _L4_XCHG_REGS_SET_USER_HANDLE,
+ HURD_EXREGS_SET_REGS = (HURD_EXREGS_SET_UTCB
+ | HURD_EXREGS_SET_EXCEPTION_MESSENGER
+ | HURD_EXREGS_SET_ASPACE
+ | HURD_EXREGS_SET_ACTIVITY
+ | HURD_EXREGS_SET_SP
+ | HURD_EXREGS_SET_IP
+ | HURD_EXREGS_SET_EFLAGS
+ | HURD_EXREGS_SET_USER_HANDLE),
+
+ HURD_EXREGS_GET_REGS = _L4_XCHG_REGS_DELIVER,
+
+ HURD_EXREGS_START = _L4_XCHG_REGS_SET_HALT,
+ HURD_EXREGS_STOP = _L4_XCHG_REGS_SET_HALT | _L4_XCHG_REGS_HALT,
+
+ HURD_EXREGS_ABORT_SEND = _L4_XCHG_REGS_CANCEL_SEND,
+ HURD_EXREGS_ABORT_RECEIVE = _L4_XCHG_REGS_CANCEL_RECV,
+ HURD_EXREGS_ABORT_IPC = HURD_EXREGS_ABORT_SEND | _L4_XCHG_REGS_CANCEL_RECV,
+};
+
+enum
+ {
+ RM_thread_exregs = 600,
+ RM_thread_id,
+ RM_thread_activation_collect,
+ };
+
+#ifdef RM_INTERN
+struct thread;
+typedef struct thread *thread_t;
+#else
+typedef addr_t thread_t;
+#endif
+
+#define RPC_STUB_PREFIX rm
+#define RPC_ID_PREFIX RM
+
+#include <viengoos/rpc.h>
+
+struct hurd_thread_exregs_in
+{
+ uintptr_t aspace_cap_properties_flags;
+ struct cap_properties aspace_cap_properties;
+
+ uintptr_t sp;
+ uintptr_t ip;
+ uintptr_t eflags;
+ uintptr_t user_handle;
+};
+
+struct hurd_thread_exregs_out
+{
+ uintptr_t sp;
+ uintptr_t ip;
+ uintptr_t eflags;
+ uintptr_t user_handle;
+};
+
+/* l4_exregs wrapper. */
+RPC (thread_exregs, 6, 1, 4,
+ /* cap_t principal, cap_t thread, */
+ uintptr_t, control, struct hurd_thread_exregs_in, in,
+ cap_t, aspace, cap_t, activity, cap_t, utcb, cap_t, exception_messenger,
+ /* Out: */
+ struct hurd_thread_exregs_out, out,
+ cap_t, aspace_out, cap_t, activity_out, cap_t, utcb_out,
+ cap_t, exception_messenger_out)
+
+static inline error_t
+thread_start (addr_t thread)
+{
+ struct hurd_thread_exregs_in in;
+ struct hurd_thread_exregs_out out;
+
+ return rm_thread_exregs (ADDR_VOID, thread,
+ HURD_EXREGS_START | HURD_EXREGS_ABORT_IPC,
+ in, ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ &out, NULL, NULL, NULL, NULL);
+}
+
+static inline error_t
+thread_start_sp_ip (addr_t thread, uintptr_t sp, uintptr_t ip)
+{
+ struct hurd_thread_exregs_in in;
+ struct hurd_thread_exregs_out out;
+
+ in.sp = sp;
+ in.ip = ip;
+
+ return rm_thread_exregs (ADDR_VOID, thread,
+ HURD_EXREGS_START | HURD_EXREGS_ABORT_IPC
+ | HURD_EXREGS_SET_SP_IP,
+ in, ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ &out, NULL, NULL, NULL, NULL);
+}
+
+static inline error_t
+thread_stop (addr_t thread)
+{
+ struct hurd_thread_exregs_in in;
+ struct hurd_thread_exregs_out out;
+
+ return rm_thread_exregs (ADDR_VOID, thread,
+ HURD_EXREGS_STOP | HURD_EXREGS_ABORT_IPC,
+ in, ADDR_VOID, ADDR_VOID, ADDR_VOID, ADDR_VOID,
+ &out, NULL, NULL, NULL, NULL);
+}
+
+/* Return the unique integer associated with thread THREAD. */
+RPC(thread_id, 0, 1, 0,
+ /* cap_t, principal, cap_t, thread, */
+ vg_thread_id_t, tid)
+
+/* Cause the delivery of a pending message, if any. */
+RPC(thread_activation_collect, 0, 0, 0
+ /* cap_t principal, cap_t thread */)
+
+#undef RPC_STUB_PREFIX
+#undef RPC_ID_PREFIX
+
+static inline vg_thread_id_t
+vg_myself (void)
+{
+ vg_thread_id_t tid;
+ error_t err = rm_thread_id (ADDR_VOID, ADDR_VOID, &tid);
+ if (err)
+ return vg_niltid;
+ return tid;
+}
+
+#endif /* _VIENGOOS_THREAD_H */
+#endif /* __need_vg_thread_id_t */